MDEV-19955 make argument of handler::ha_write_row() const

MDEV-19486 and one more similar bug appeared because handler::write_row() interface
welcomes to modify buffer by storage engine. But callers are not ready for that
thus bugs are possible in future.

handler::write_row():
handler::ha_write_row(): make argument const
This commit is contained in:
Eugene Kosov 2019-07-04 21:31:35 +03:00
parent 23c12ed5cb
commit c9aa495fb6
57 changed files with 86 additions and 85 deletions

View file

@ -305,7 +305,7 @@ extern int maria_rsame_with_pos(MARIA_HA *file, uchar *record,
int inx, MARIA_RECORD_POS pos);
extern int maria_update(MARIA_HA *file, const uchar *old,
const uchar *new_record);
extern int maria_write(MARIA_HA *file, uchar *buff);
extern int maria_write(MARIA_HA *file, const uchar *buff);
extern MARIA_RECORD_POS maria_position(MARIA_HA *file);
extern int maria_status(MARIA_HA *info, MARIA_INFO *x, uint flag);
extern int maria_lock_database(MARIA_HA *file, int lock_type);

View file

@ -276,7 +276,7 @@ extern int mi_rsame_with_pos(struct st_myisam_info *file,uchar *record,
int inx, my_off_t pos);
extern int mi_update(struct st_myisam_info *file,const uchar *old,
const uchar *new_record);
extern int mi_write(struct st_myisam_info *file,uchar *buff);
extern int mi_write(struct st_myisam_info *file,const uchar *buff);
extern my_off_t mi_position(struct st_myisam_info *file);
extern int mi_status(struct st_myisam_info *info, MI_ISAMINFO *x, uint flag);
extern int mi_lock_database(struct st_myisam_info *file,int lock_type);

View file

@ -106,7 +106,7 @@ extern int myrg_rrnd(MYRG_INFO *file,uchar *buf,ulonglong pos);
extern int myrg_rsame(MYRG_INFO *file,uchar *record,int inx);
extern int myrg_update(MYRG_INFO *file,const uchar *old,
const uchar *new_rec);
extern int myrg_write(MYRG_INFO *info,uchar *rec);
extern int myrg_write(MYRG_INFO *info,const uchar *rec);
extern int myrg_status(MYRG_INFO *file,MYMERGE_INFO *x,int flag);
extern int myrg_lock_database(MYRG_INFO *file,int lock_type);
extern int myrg_create(const char *name, const char **table_names,

View file

@ -1364,7 +1364,7 @@ public:
virtual uint max_packed_col_length(uint max_length)
{ return max_length;}
uint offset(uchar *record) const
uint offset(const uchar *record) const
{
return (uint) (ptr - record);
}

View file

@ -4247,7 +4247,7 @@ void ha_partition::try_semi_consistent_read(bool yes)
determining which partition the row should be written to.
*/
int ha_partition::write_row(uchar * buf)
int ha_partition::write_row(const uchar * buf)
{
uint32 part_id;
int error;

View file

@ -628,7 +628,7 @@ public:
start_bulk_insert and end_bulk_insert is called before and after a
number of calls to write_row.
*/
virtual int write_row(uchar * buf);
virtual int write_row(const uchar * buf);
virtual bool start_bulk_update();
virtual int exec_bulk_update(ha_rows *dup_key_found);
virtual int end_bulk_update();

View file

@ -194,7 +194,7 @@ int ha_sequence::create(const char *name, TABLE *form,
the sequence with 'buf' as the sequence object is already up to date.
*/
int ha_sequence::write_row(uchar *buf)
int ha_sequence::write_row(const uchar *buf)
{
int error;
sequence_definition tmp_seq;

View file

@ -71,7 +71,7 @@ public:
int create(const char *name, TABLE *form,
HA_CREATE_INFO *create_info);
handler *clone(const char *name, MEM_ROOT *mem_root);
int write_row(uchar *buf);
int write_row(const uchar *buf);
Table_flags table_flags() const;
/* One can't update or delete from sequence engine */
int update_row(const uchar *old_data, const uchar *new_data)

View file

@ -6499,7 +6499,7 @@ static int wsrep_after_row(THD *thd)
#endif /* WITH_WSREP */
static int check_duplicate_long_entry_key(TABLE *table, handler *h,
uchar *new_rec, uint key_no)
const uchar *new_rec, uint key_no)
{
Field *hash_field;
int result, error= 0;
@ -6588,7 +6588,8 @@ exit:
unique constraint on long columns.
@returns 0 if no duplicate else returns error
*/
static int check_duplicate_long_entries(TABLE *table, handler *h, uchar *new_rec)
static int check_duplicate_long_entries(TABLE *table, handler *h,
const uchar *new_rec)
{
table->file->errkey= -1;
int result;
@ -6657,7 +6658,7 @@ static int check_duplicate_long_entries_update(TABLE *table, handler *h, uchar *
return error;
}
int handler::ha_write_row(uchar *buf)
int handler::ha_write_row(const uchar *buf)
{
int error;
Log_func *log_func= Write_rows_log_event::binlog_row_logging_function;
@ -6746,7 +6747,7 @@ int handler::ha_update_row(const uchar *old_data, const uchar *new_data)
Update first row. Only used by sequence tables
*/
int handler::update_first_row(uchar *new_data)
int handler::update_first_row(const uchar *new_data)
{
int error;
if (likely(!(error= ha_rnd_init(1))))

View file

@ -3238,7 +3238,7 @@ public:
and delete_row() below.
*/
int ha_external_lock(THD *thd, int lock_type);
int ha_write_row(uchar * buf);
int ha_write_row(const uchar * buf);
int ha_update_row(const uchar * old_data, const uchar * new_data);
int ha_delete_row(const uchar * buf);
void ha_release_auto_increment();
@ -4548,7 +4548,7 @@ private:
*/
virtual int rnd_init(bool scan)= 0;
virtual int rnd_end() { return 0; }
virtual int write_row(uchar *buf __attribute__((unused)))
virtual int write_row(const uchar *buf __attribute__((unused)))
{
return HA_ERR_WRONG_COMMAND;
}
@ -4571,7 +4571,7 @@ private:
Optimized function for updating the first row. Only used by sequence
tables
*/
virtual int update_first_row(uchar *new_data);
virtual int update_first_row(const uchar *new_data);
virtual int delete_row(const uchar *buf __attribute__((unused)))
{

View file

@ -371,7 +371,7 @@ int Archive_share::write_v1_metafile()
@return Length of packed row
*/
unsigned int ha_archive::pack_row_v1(uchar *record)
unsigned int ha_archive::pack_row_v1(const uchar *record)
{
uint *blob, *end;
uchar *pos;
@ -868,7 +868,7 @@ error:
/*
This is where the actual row is written out.
*/
int ha_archive::real_write_row(uchar *buf, azio_stream *writer)
int ha_archive::real_write_row(const uchar *buf, azio_stream *writer)
{
my_off_t written;
unsigned int r_pack_length;
@ -917,7 +917,7 @@ uint32 ha_archive::max_row_length(const uchar *record)
}
unsigned int ha_archive::pack_row(uchar *record, azio_stream *writer)
unsigned int ha_archive::pack_row(const uchar *record, azio_stream *writer)
{
uchar *ptr;
my_ptrdiff_t const rec_offset= record - table->record[0];
@ -959,7 +959,7 @@ unsigned int ha_archive::pack_row(uchar *record, azio_stream *writer)
for implementing start_bulk_insert() is that we could skip
setting dirty to true each time.
*/
int ha_archive::write_row(uchar *buf)
int ha_archive::write_row(const uchar *buf)
{
int rc;
uchar *read_buf= NULL;

View file

@ -95,7 +95,7 @@ class ha_archive: public handler
void destroy_record_buffer(archive_record_buffer *r);
int frm_copy(azio_stream *src, azio_stream *dst);
int frm_compare(azio_stream *src);
unsigned int pack_row_v1(uchar *record);
unsigned int pack_row_v1(const uchar *record);
public:
ha_archive(handlerton *hton, TABLE_SHARE *table_arg);
@ -131,8 +131,8 @@ public:
int index_next(uchar * buf);
int open(const char *name, int mode, uint test_if_locked);
int close(void);
int write_row(uchar * buf);
int real_write_row(uchar *buf, azio_stream *writer);
int write_row(const uchar * buf);
int real_write_row(const uchar *buf, azio_stream *writer);
int truncate();
int rnd_init(bool scan=1);
int rnd_next(uchar *buf);
@ -168,7 +168,7 @@ public:
uint32 max_row_length(const uchar *buf);
bool fix_rec_buff(unsigned int length);
int unpack_row(azio_stream *file_to_read, uchar *record);
unsigned int pack_row(uchar *record, azio_stream *writer);
unsigned int pack_row(const uchar *record, azio_stream *writer);
bool check_if_incompatible_data(HA_CREATE_INFO *info, uint table_changes);
int external_lock(THD *thd, int lock_type);
private:

View file

@ -117,7 +117,7 @@ const char *ha_blackhole::index_type(uint key_number)
HA_KEY_ALG_RTREE) ? "RTREE" : "BTREE");
}
int ha_blackhole::write_row(uchar * buf)
int ha_blackhole::write_row(const uchar * buf)
{
DBUG_ENTER("ha_blackhole::write_row");
DBUG_RETURN(table->next_number_field ? update_auto_increment() : 0);

View file

@ -96,7 +96,7 @@ public:
THR_LOCK_DATA **to,
enum thr_lock_type lock_type);
private:
virtual int write_row(uchar *buf);
virtual int write_row(const uchar *buf);
virtual int update_row(const uchar *old_data, const uchar *new_data);
virtual int delete_row(const uchar *buf);
};

View file

@ -1931,7 +1931,7 @@ void ha_cassandra::free_dynamic_row(DYNAMIC_COLUMN_VALUE **vals,
*names= 0;
}
int ha_cassandra::write_row(uchar *buf)
int ha_cassandra::write_row(const uchar *buf)
{
my_bitmap_map *old_map;
int ires;

View file

@ -239,7 +239,7 @@ public:
int open(const char *name, int mode, uint test_if_locked);
int close(void);
int write_row(uchar *buf);
int write_row(const uchar *buf);
int update_row(const uchar *old_data, const uchar *new_data);
int delete_row(const uchar *buf);

View file

@ -3576,7 +3576,7 @@ int ha_connect::close(void)
item_sum.cc, item_sum.cc, sql_acl.cc, sql_insert.cc,
sql_insert.cc, sql_select.cc, sql_table.cc, sql_udf.cc and sql_update.cc
*/
int ha_connect::write_row(uchar *buf)
int ha_connect::write_row(const uchar *buf)
{
int rc= 0;
PGLOBAL& g= xp->g;

View file

@ -388,7 +388,7 @@ virtual int check(THD* thd, HA_CHECK_OPT* check_opt);
We implement this in ha_connect.cc. It's not an obligatory method;
skip it and and MySQL will treat it as not implemented.
*/
int write_row(uchar *buf);
int write_row(const uchar *buf);
/** @brief
We implement this in ha_connect.cc. It's not an obligatory method;

View file

@ -1004,7 +1004,7 @@ int ha_tina::close(void)
of the file and appends the data. In an error case it really should
just truncate to the original position (this is not done yet).
*/
int ha_tina::write_row(uchar * buf)
int ha_tina::write_row(const uchar * buf)
{
int size;
DBUG_ENTER("ha_tina::write_row");

View file

@ -136,7 +136,7 @@ public:
int open(const char *name, int mode, uint open_options);
int close(void);
int write_row(uchar * buf);
int write_row(const uchar * buf);
int update_row(const uchar * old_data, const uchar * new_data);
int delete_row(const uchar * buf);
int rnd_init(bool scan=1);

View file

@ -398,7 +398,7 @@ int ha_example::close(void)
sql_insert.cc, sql_select.cc, sql_table.cc, sql_udf.cc and sql_update.cc
*/
int ha_example::write_row(uchar *buf)
int ha_example::write_row(const uchar *buf)
{
DBUG_ENTER("ha_example::write_row");
/*

View file

@ -180,7 +180,7 @@ public:
We implement this in ha_example.cc. It's not an obligatory method;
skip it and and MySQL will treat it as not implemented.
*/
int write_row(uchar *buf);
int write_row(const uchar *buf);
/** @brief
We implement this in ha_example.cc. It's not an obligatory method;

View file

@ -1825,7 +1825,7 @@ bool ha_federated::append_stmt_insert(String *query)
sql_insert.cc, sql_select.cc, sql_table.cc, sql_udf.cc, and sql_update.cc.
*/
int ha_federated::write_row(uchar *buf)
int ha_federated::write_row(const uchar *buf)
{
char values_buffer[FEDERATED_QUERY_BUFFER_SIZE];
char insert_field_value_buffer[STRING_BUFFER_USUAL_SIZE];

View file

@ -209,7 +209,7 @@ public:
void start_bulk_insert(ha_rows rows, uint flags);
int end_bulk_insert();
int write_row(uchar *buf);
int write_row(const uchar *buf);
int update_row(const uchar *old_data, const uchar *new_data);
int delete_row(const uchar *buf);
int index_init(uint keynr, bool sorted);

View file

@ -1987,7 +1987,7 @@ bool ha_federatedx::append_stmt_insert(String *query)
sql_insert.cc, sql_select.cc, sql_table.cc, sql_udf.cc, and sql_update.cc.
*/
int ha_federatedx::write_row(uchar *buf)
int ha_federatedx::write_row(const uchar *buf)
{
char values_buffer[FEDERATEDX_QUERY_BUFFER_SIZE];
char insert_field_value_buffer[STRING_BUFFER_USUAL_SIZE];

View file

@ -397,7 +397,7 @@ public:
void start_bulk_insert(ha_rows rows, uint flags);
int end_bulk_insert();
int write_row(uchar *buf);
int write_row(const uchar *buf);
int update_row(const uchar *old_data, const uchar *new_data);
int delete_row(const uchar *buf);
int index_init(uint keynr, bool sorted);

View file

@ -228,7 +228,7 @@ void ha_heap::update_key_stats()
}
int ha_heap::write_row(uchar * buf)
int ha_heap::write_row(const uchar * buf)
{
int res;
if (table->next_number_field && buf == table->record[0])

View file

@ -70,7 +70,7 @@ public:
int open(const char *name, int mode, uint test_if_locked);
int close(void);
void set_keys_for_scanning(void);
int write_row(uchar * buf);
int write_row(const uchar * buf);
int update_row(const uchar * old_data, const uchar * new_data);
int delete_row(const uchar * buf);
virtual void get_auto_increment(ulonglong offset, ulonglong increment,

View file

@ -7997,7 +7997,7 @@ handle.
int
ha_innobase::write_row(
/*===================*/
uchar* record) /*!< in: a row in MySQL format */
const uchar* record) /*!< in: a row in MySQL format */
{
dberr_t error;
#ifdef WITH_WSREP

View file

@ -112,7 +112,7 @@ public:
int delete_all_rows() override;
int write_row(uchar * buf) override;
int write_row(const uchar * buf) override;
int update_row(const uchar * old_data, const uchar * new_data) override;

View file

@ -1227,7 +1227,7 @@ int ha_maria::close(void)
}
int ha_maria::write_row(uchar * buf)
int ha_maria::write_row(const uchar * buf)
{
/*
If we have an auto_increment column and we are writing a changed row

View file

@ -73,7 +73,7 @@ public:
int open(const char *name, int mode, uint test_if_locked);
int close(void);
int write_row(uchar * buf);
int write_row(const uchar * buf);
int update_row(const uchar * old_data, const uchar * new_data);
int delete_row(const uchar * buf);
int index_read_map(uchar * buf, const uchar * key, key_part_map keypart_map,

View file

@ -83,7 +83,7 @@ my_bool _ma_write_abort_default(MARIA_HA *info __attribute__((unused)))
/* Write new record to a table */
int maria_write(MARIA_HA *info, uchar *record)
int maria_write(MARIA_HA *info, const uchar *record)
{
MARIA_SHARE *share= info->s;
uint i;

View file

@ -5852,7 +5852,7 @@ bool ha_mroonga::wrapper_have_target_index()
DBUG_RETURN(have_target_index);
}
int ha_mroonga::wrapper_write_row(uchar *buf)
int ha_mroonga::wrapper_write_row(const uchar *buf)
{
int error = 0;
THD *thd = ha_thd();
@ -5881,7 +5881,7 @@ int ha_mroonga::wrapper_write_row(uchar *buf)
DBUG_RETURN(error);
}
int ha_mroonga::wrapper_write_row_index(uchar *buf)
int ha_mroonga::wrapper_write_row_index(const uchar *buf)
{
MRN_DBUG_ENTER_METHOD();
@ -5968,7 +5968,7 @@ err:
DBUG_RETURN(error);
}
int ha_mroonga::storage_write_row(uchar *buf)
int ha_mroonga::storage_write_row(const uchar *buf)
{
MRN_DBUG_ENTER_METHOD();
int error = 0;
@ -6231,7 +6231,7 @@ err:
DBUG_RETURN(error);
}
int ha_mroonga::storage_write_row_multiple_column_index(uchar *buf,
int ha_mroonga::storage_write_row_multiple_column_index(const uchar *buf,
grn_id record_id,
KEY *key_info,
grn_obj *index_column)
@ -6268,7 +6268,7 @@ int ha_mroonga::storage_write_row_multiple_column_index(uchar *buf,
DBUG_RETURN(error);
}
int ha_mroonga::storage_write_row_multiple_column_indexes(uchar *buf,
int ha_mroonga::storage_write_row_multiple_column_indexes(const uchar *buf,
grn_id record_id)
{
MRN_DBUG_ENTER_METHOD();
@ -6380,7 +6380,7 @@ int ha_mroonga::storage_write_row_unique_index(const uchar *buf,
DBUG_RETURN(0);
}
int ha_mroonga::storage_write_row_unique_indexes(uchar *buf)
int ha_mroonga::storage_write_row_unique_indexes(const uchar *buf)
{
int error = 0;
uint i;
@ -6443,7 +6443,7 @@ err:
DBUG_RETURN(error);
}
int ha_mroonga::write_row(uchar *buf)
int ha_mroonga::write_row(const uchar *buf)
{
MRN_DBUG_ENTER_METHOD();
int error = 0;

View file

@ -451,7 +451,7 @@ public:
int extra_opt(enum ha_extra_function operation, ulong cache_size);
int delete_table(const char *name);
int write_row(uchar *buf);
int write_row(const uchar *buf);
int update_row(const uchar *old_data, const uchar *new_data);
int delete_row(const uchar *buf);
@ -908,20 +908,20 @@ private:
int storage_end_bulk_insert();
bool wrapper_is_target_index(KEY *key_info);
bool wrapper_have_target_index();
int wrapper_write_row(uchar *buf);
int wrapper_write_row_index(uchar *buf);
int storage_write_row(uchar *buf);
int storage_write_row_multiple_column_index(uchar *buf,
int wrapper_write_row(const uchar *buf);
int wrapper_write_row_index(const uchar *buf);
int storage_write_row(const uchar *buf);
int storage_write_row_multiple_column_index(const uchar *buf,
grn_id record_id,
KEY *key_info,
grn_obj *index_column);
int storage_write_row_multiple_column_indexes(uchar *buf, grn_id record_id);
int storage_write_row_multiple_column_indexes(const uchar *buf, grn_id record_id);
int storage_write_row_unique_index(const uchar *buf,
KEY *key_info,
grn_obj *index_table,
grn_obj *index_column,
grn_id *key_id);
int storage_write_row_unique_indexes(uchar *buf);
int storage_write_row_unique_indexes(const uchar *buf);
int wrapper_get_record_id(uchar *data, grn_id *record_id,
const char *context);
int wrapper_update_row(const uchar *old_data, const uchar *new_data);

View file

@ -933,7 +933,7 @@ int ha_myisam::close(void)
return mi_close(tmp);
}
int ha_myisam::write_row(uchar *buf)
int ha_myisam::write_row(const uchar *buf)
{
/*
If we have an auto_increment column and we are writing a changed row

View file

@ -69,7 +69,7 @@ class ha_myisam: public handler
void change_table_ptr(TABLE *table_arg, TABLE_SHARE *share);
int open(const char *name, int mode, uint test_if_locked);
int close(void);
int write_row(uchar * buf);
int write_row(const uchar * buf);
int update_row(const uchar * old_data, const uchar * new_data);
int delete_row(const uchar * buf);
int index_read_map(uchar *buf, const uchar *key, key_part_map keypart_map,

View file

@ -40,7 +40,7 @@ int _mi_ck_write_btree(register MI_INFO *info, uint keynr,uchar *key,
/* Write new record to database */
int mi_write(MI_INFO *info, uchar *record)
int mi_write(MI_INFO *info, const uchar *record)
{
MYISAM_SHARE *share=info->s;
uint i;

View file

@ -1088,7 +1088,7 @@ int ha_myisammrg::close(void)
DBUG_RETURN(rc);
}
int ha_myisammrg::write_row(uchar * buf)
int ha_myisammrg::write_row(const uchar * buf)
{
DBUG_ENTER("ha_myisammrg::write_row");
DBUG_ASSERT(this->file->children_attached);

View file

@ -111,7 +111,7 @@ public:
int detach_children(void);
virtual handler *clone(const char *name, MEM_ROOT *mem_root);
int close(void);
int write_row(uchar * buf);
int write_row(const uchar * buf);
int update_row(const uchar * old_data, const uchar * new_data);
int delete_row(const uchar * buf);
int index_read_map(uchar *buf, const uchar *key, key_part_map keypart_map,

View file

@ -18,7 +18,7 @@
#include "myrg_def.h"
int myrg_write(register MYRG_INFO *info, uchar *rec)
int myrg_write(register MYRG_INFO *info, const uchar *rec)
{
/* [phi] MERGE_WRITE_DISABLED is handled by the else case */
if (info->merge_insert_method == MERGE_INSERT_TO_FIRST)

View file

@ -799,7 +799,7 @@ void ha_oqgraph::update_key_stats()
}
int ha_oqgraph::write_row(byte * buf)
int ha_oqgraph::write_row(const byte * buf)
{
return HA_ERR_TABLE_READONLY;
}

View file

@ -83,7 +83,7 @@ public:
int open(const char *name, int mode, uint test_if_locked);
int close(void);
int write_row(byte * buf);
int write_row(const byte * buf);
int update_row(const uchar * old_data, const uchar * new_data);
int delete_row(const byte * buf);
int index_read(byte * buf, const byte * key,

View file

@ -251,7 +251,7 @@ int ha_perfschema::close(void)
DBUG_RETURN(0);
}
int ha_perfschema::write_row(uchar *buf)
int ha_perfschema::write_row(const uchar *buf)
{
int result;

View file

@ -120,7 +120,7 @@ public:
@param buf the row to write
@return 0 on success
*/
int write_row(uchar *buf);
int write_row(const uchar *buf);
void use_hidden_primary_key();

View file

@ -178,7 +178,7 @@ ha_rows PFS_engine_table_share::get_row_count(void) const
return m_records;
}
int PFS_engine_table_share::write_row(TABLE *table, unsigned char *buf,
int PFS_engine_table_share::write_row(TABLE *table, const unsigned char *buf,
Field **fields) const
{
my_bitmap_map *org_bitmap;

View file

@ -200,7 +200,7 @@ protected:
typedef PFS_engine_table* (*pfs_open_table_t)(void);
/** Callback to write a row. */
typedef int (*pfs_write_row_t)(TABLE *table,
unsigned char *buf, Field **fields);
const unsigned char *buf, Field **fields);
/** Callback to delete all rows. */
typedef int (*pfs_delete_all_rows_t)(void);
/** Callback to get a row count. */
@ -217,7 +217,7 @@ struct PFS_engine_table_share
/** Get the row count. */
ha_rows get_row_count(void) const;
/** Write a row. */
int write_row(TABLE *table, unsigned char *buf, Field **fields) const;
int write_row(TABLE *table, const unsigned char *buf, Field **fields) const;
/** Table name. */
LEX_STRING m_name;

View file

@ -52,7 +52,7 @@ PFS_engine_table* table_setup_actors::create()
return new table_setup_actors();
}
int table_setup_actors::write_row(TABLE *table, unsigned char *buf,
int table_setup_actors::write_row(TABLE *table, const unsigned char *buf,
Field **fields)
{
Field *f;

View file

@ -55,7 +55,7 @@ public:
static PFS_engine_table_share m_share;
/** Table builder. */
static PFS_engine_table* create();
static int write_row(TABLE *table, unsigned char *buf, Field **fields);
static int write_row(TABLE *table, const unsigned char *buf, Field **fields);
static int delete_all_rows();
static ha_rows get_row_count();

View file

@ -66,7 +66,7 @@ PFS_engine_table* table_setup_objects::create(void)
return new table_setup_objects();
}
int table_setup_objects::write_row(TABLE *table, unsigned char *buf,
int table_setup_objects::write_row(TABLE *table, const unsigned char *buf,
Field **fields)
{
int result;

View file

@ -58,7 +58,7 @@ public:
static PFS_engine_table_share m_share;
/** Table builder. */
static PFS_engine_table* create();
static int write_row(TABLE *table, unsigned char *buf, Field **fields);
static int write_row(TABLE *table, const unsigned char *buf, Field **fields);
static int delete_all_rows();
static ha_rows get_row_count();

View file

@ -9485,7 +9485,7 @@ const std::string ha_rocksdb::get_table_comment(const TABLE *const table_arg) {
HA_EXIT_SUCCESS OK
other HA_ERR error code (can be SE-specific)
*/
int ha_rocksdb::write_row(uchar *const buf) {
int ha_rocksdb::write_row(const uchar *const buf) {
DBUG_ENTER_FUNC();
DBUG_ASSERT(buf != nullptr);

View file

@ -640,7 +640,7 @@ public:
MY_ATTRIBUTE((__warn_unused_result__));
int close(void) override MY_ATTRIBUTE((__warn_unused_result__));
int write_row(uchar *const buf) override
int write_row(const uchar *const buf) override
MY_ATTRIBUTE((__warn_unused_result__));
int update_row(const uchar *const old_data, const uchar *const new_data) override
MY_ATTRIBUTE((__warn_unused_result__));

View file

@ -9796,7 +9796,7 @@ int ha_spider::end_bulk_insert()
}
int ha_spider::write_row(
uchar *buf
const uchar *buf
) {
int error_num;
THD *thd = ha_thd();

View file

@ -568,7 +568,7 @@ public:
#endif
int end_bulk_insert();
int write_row(
uchar *buf
const uchar *buf
);
#ifdef HA_CAN_BULK_ACCESS
int pre_write_row(

View file

@ -3714,7 +3714,7 @@ static bool do_unique_checks_fn(THD *thd) {
#endif // defined(TOKU_INCLUDE_RFR) && TOKU_INCLUDE_RFR
int ha_tokudb::do_uniqueness_checks(uchar* record, DB_TXN* txn, THD* thd) {
int ha_tokudb::do_uniqueness_checks(const uchar* record, DB_TXN* txn, THD* thd) {
int error = 0;
//
// first do uniqueness checks
@ -3757,7 +3757,7 @@ cleanup:
return error;
}
void ha_tokudb::test_row_packing(uchar* record, DBT* pk_key, DBT* pk_val) {
void ha_tokudb::test_row_packing(const uchar* record, DBT* pk_key, DBT* pk_val) {
int error;
DBT row, key;
//
@ -3998,7 +3998,7 @@ out:
// 0 on success
// error otherwise
//
int ha_tokudb::write_row(uchar * record) {
int ha_tokudb::write_row(const uchar * record) {
TOKUDB_HANDLER_DBUG_ENTER("%p", record);
DBT row, prim_key;

View file

@ -703,11 +703,11 @@ private:
void trace_create_table_info(TABLE* form);
int is_index_unique(bool* is_unique, DB_TXN* txn, DB* db, KEY* key_info, int lock_flags);
int is_val_unique(bool* is_unique, const uchar* record, KEY* key_info, uint dict_index, DB_TXN* txn);
int do_uniqueness_checks(uchar* record, DB_TXN* txn, THD* thd);
int do_uniqueness_checks(const uchar* record, DB_TXN* txn, THD* thd);
void set_main_dict_put_flags(THD* thd, bool opt_eligible, uint32_t* put_flags);
int insert_row_to_main_dictionary(DBT* pk_key, DBT* pk_val, DB_TXN* txn);
int insert_rows_to_dictionaries_mult(DBT* pk_key, DBT* pk_val, DB_TXN* txn, THD* thd);
void test_row_packing(uchar* record, DBT* pk_key, DBT* pk_val);
void test_row_packing(const uchar* record, DBT* pk_key, DBT* pk_val);
uint32_t fill_row_mutator(
uchar* buf,
uint32_t* dropped_columns,
@ -785,7 +785,7 @@ public:
int rename_table(const char *from, const char *to);
int optimize(THD * thd, HA_CHECK_OPT * check_opt);
int analyze(THD * thd, HA_CHECK_OPT * check_opt);
int write_row(uchar * buf);
int write_row(const uchar * buf);
int update_row(const uchar * old_data, const uchar * new_data);
int delete_row(const uchar * buf);
#if MYSQL_VERSION_ID >= 100000