mirror of
https://github.com/MariaDB/server.git
synced 2025-01-31 02:51:44 +01:00
Merge chilla.local:/home/mydev/mysql-5.0
into chilla.local:/home/mydev/mysql-5.0-amerge
This commit is contained in:
commit
b900484346
20 changed files with 300 additions and 86 deletions
|
@ -1158,13 +1158,14 @@ int chk_data_link(MI_CHECK *param, MI_INFO *info,int extend)
|
|||
#ifdef HAVE_RTREE_KEYS
|
||||
(keyinfo->flag & HA_SPATIAL) ?
|
||||
rtree_find_first(info, key, info->lastkey, key_length,
|
||||
SEARCH_SAME) :
|
||||
MBR_EQUAL | MBR_DATA) :
|
||||
#endif
|
||||
_mi_search(info,keyinfo,info->lastkey,key_length,
|
||||
SEARCH_SAME, info->s->state.key_root[key]);
|
||||
if (search_result)
|
||||
{
|
||||
mi_check_print_error(param,"Record at: %10s Can't find key for index: %2d",
|
||||
mi_check_print_error(param,"Record at: %10s "
|
||||
"Can't find key for index: %2d",
|
||||
llstr(start_recpos,llbuff),key+1);
|
||||
if (error++ > MAXERR || !(param->testflag & T_VERBOSE))
|
||||
goto err2;
|
||||
|
|
|
@ -59,6 +59,8 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
|
|||
my_off_t key_root[MI_MAX_POSSIBLE_KEY],key_del[MI_MAX_KEY_BLOCK_SIZE];
|
||||
MI_CREATE_INFO tmp_create_info;
|
||||
DBUG_ENTER("mi_create");
|
||||
DBUG_PRINT("enter", ("keys: %u columns: %u uniques: %u flags: %u",
|
||||
keys, columns, uniques, flags));
|
||||
|
||||
if (!ci)
|
||||
{
|
||||
|
@ -471,6 +473,16 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
|
|||
uniques * MI_UNIQUEDEF_SIZE +
|
||||
(key_segs + unique_key_parts)*HA_KEYSEG_SIZE+
|
||||
columns*MI_COLUMNDEF_SIZE);
|
||||
DBUG_PRINT("info", ("info_length: %u", info_length));
|
||||
/* There are only 16 bits for the total header length. */
|
||||
if (info_length > 65535)
|
||||
{
|
||||
my_printf_error(0, "MyISAM table '%s' has too many columns and/or "
|
||||
"indexes and/or unique constraints.",
|
||||
MYF(0), name + dirname_length(name));
|
||||
my_errno= HA_WRONG_CREATE_OPTION;
|
||||
goto err;
|
||||
}
|
||||
|
||||
bmove(share.state.header.file_version,(byte*) myisam_file_magic,4);
|
||||
ci->old_options=options| (ci->old_options & HA_OPTION_TEMP_COMPRESS_RECORD ?
|
||||
|
@ -620,6 +632,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
|
|||
errpos=3;
|
||||
}
|
||||
|
||||
DBUG_PRINT("info", ("write state info and base info"));
|
||||
if (mi_state_info_write(file, &share.state, 2) ||
|
||||
mi_base_info_write(file, &share.base))
|
||||
goto err;
|
||||
|
@ -633,6 +646,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
|
|||
#endif
|
||||
|
||||
/* Write key and keyseg definitions */
|
||||
DBUG_PRINT("info", ("write key and keyseg definitions"));
|
||||
for (i=0 ; i < share.base.keys - uniques; i++)
|
||||
{
|
||||
uint sp_segs=(keydefs[i].flag & HA_SPATIAL) ? 2*SPDIMS : 0;
|
||||
|
@ -683,6 +697,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
|
|||
}
|
||||
|
||||
/* Save unique definition */
|
||||
DBUG_PRINT("info", ("write unique definitions"));
|
||||
for (i=0 ; i < share.state.header.uniques ; i++)
|
||||
{
|
||||
HA_KEYSEG *keyseg_end;
|
||||
|
@ -713,6 +728,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
|
|||
goto err;
|
||||
}
|
||||
}
|
||||
DBUG_PRINT("info", ("write field definitions"));
|
||||
for (i=0 ; i < share.base.fields ; i++)
|
||||
if (mi_recinfo_write(file, &recinfo[i]))
|
||||
goto err;
|
||||
|
@ -727,6 +743,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
|
|||
#endif
|
||||
|
||||
/* Enlarge files */
|
||||
DBUG_PRINT("info", ("enlarge to keystart: %lu", (ulong) share.base.keystart));
|
||||
if (my_chsize(file,(ulong) share.base.keystart,0,MYF(0)))
|
||||
goto err;
|
||||
|
||||
|
|
|
@ -34,12 +34,24 @@ int mi_delete_table(const char *name)
|
|||
#ifdef USE_RAID
|
||||
{
|
||||
MI_INFO *info;
|
||||
/* we use 'open_for_repair' to be able to delete a crashed table */
|
||||
if (!(info=mi_open(name, O_RDONLY, HA_OPEN_FOR_REPAIR)))
|
||||
DBUG_RETURN(my_errno);
|
||||
raid_type = info->s->base.raid_type;
|
||||
raid_chunks = info->s->base.raid_chunks;
|
||||
mi_close(info);
|
||||
/*
|
||||
When built with RAID support, we need to determine if this table
|
||||
makes use of the raid feature. If yes, we need to remove all raid
|
||||
chunks. This is done with my_raid_delete(). Unfortunately it is
|
||||
necessary to open the table just to check this. We use
|
||||
'open_for_repair' to be able to open even a crashed table. If even
|
||||
this open fails, we assume no raid configuration for this table
|
||||
and try to remove the normal data file only. This may however
|
||||
leave the raid chunks behind.
|
||||
*/
|
||||
if (!(info= mi_open(name, O_RDONLY, HA_OPEN_FOR_REPAIR)))
|
||||
raid_type= 0;
|
||||
else
|
||||
{
|
||||
raid_type= info->s->base.raid_type;
|
||||
raid_chunks= info->s->base.raid_chunks;
|
||||
mi_close(info);
|
||||
}
|
||||
}
|
||||
#ifdef EXTRA_DEBUG
|
||||
check_table_is_closed(name,"delete");
|
||||
|
|
|
@ -1155,6 +1155,9 @@ int _mi_read_dynamic_record(MI_INFO *info, my_off_t filepos, byte *buf)
|
|||
info->rec_cache.pos_in_file <= block_info.next_filepos &&
|
||||
flush_io_cache(&info->rec_cache))
|
||||
goto err;
|
||||
/* A corrupted table can have wrong pointers. (Bug# 19835) */
|
||||
if (block_info.next_filepos == HA_OFFSET_ERROR)
|
||||
goto panic;
|
||||
info->rec_cache.seek_not_done=1;
|
||||
if ((b_type=_mi_get_block_info(&block_info,file,
|
||||
block_info.next_filepos))
|
||||
|
|
|
@ -64,7 +64,7 @@ uint _mi_make_key(register MI_INFO *info, uint keynr, uchar *key,
|
|||
TODO: nulls processing
|
||||
*/
|
||||
#ifdef HAVE_SPATIAL
|
||||
return sp_make_key(info,keynr,key,record,filepos);
|
||||
DBUG_RETURN(sp_make_key(info,keynr,key,record,filepos));
|
||||
#else
|
||||
DBUG_ASSERT(0); /* mi_open should check that this never happens*/
|
||||
#endif
|
||||
|
|
|
@ -68,6 +68,7 @@ int mi_rkey(MI_INFO *info, byte *buf, int inx, const byte *key, uint key_len,
|
|||
|
||||
if (fast_mi_readinfo(info))
|
||||
goto err;
|
||||
|
||||
if (share->concurrent_insert)
|
||||
rw_rdlock(&share->key_root_lock[inx]);
|
||||
|
||||
|
@ -90,24 +91,35 @@ int mi_rkey(MI_INFO *info, byte *buf, int inx, const byte *key, uint key_len,
|
|||
case HA_KEY_ALG_BTREE:
|
||||
default:
|
||||
if (!_mi_search(info, keyinfo, key_buff, use_key_length,
|
||||
myisam_read_vec[search_flag], info->s->state.key_root[inx]))
|
||||
myisam_read_vec[search_flag], info->s->state.key_root[inx]))
|
||||
{
|
||||
while (info->lastpos >= info->state->data_file_length)
|
||||
/*
|
||||
If we are searching for an exact key (including the data pointer)
|
||||
and this was added by an concurrent insert,
|
||||
then the result is "key not found".
|
||||
*/
|
||||
if ((search_flag == HA_READ_KEY_EXACT) &&
|
||||
(info->lastpos >= info->state->data_file_length))
|
||||
{
|
||||
my_errno= HA_ERR_KEY_NOT_FOUND;
|
||||
info->lastpos= HA_OFFSET_ERROR;
|
||||
}
|
||||
else while (info->lastpos >= info->state->data_file_length)
|
||||
{
|
||||
/*
|
||||
Skip rows that are inserted by other threads since we got a lock
|
||||
Note that this can only happen if we are not searching after an
|
||||
exact key, because the keys are sorted according to position
|
||||
*/
|
||||
|
||||
if (_mi_search_next(info, keyinfo, info->lastkey,
|
||||
info->lastkey_length,
|
||||
myisam_readnext_vec[search_flag],
|
||||
info->s->state.key_root[inx]))
|
||||
info->lastkey_length,
|
||||
myisam_readnext_vec[search_flag],
|
||||
info->s->state.key_root[inx]))
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (share->concurrent_insert)
|
||||
rw_unlock(&share->key_root_lock[inx]);
|
||||
|
||||
|
|
|
@ -183,9 +183,11 @@ int rtree_find_first(MI_INFO *info, uint keynr, uchar *key, uint key_length,
|
|||
return -1;
|
||||
}
|
||||
|
||||
/* Save searched key */
|
||||
memcpy(info->first_mbr_key, key, keyinfo->keylength -
|
||||
info->s->base.rec_reflength);
|
||||
/*
|
||||
Save searched key, include data pointer.
|
||||
The data pointer is required if the search_flag contains MBR_DATA.
|
||||
*/
|
||||
memcpy(info->first_mbr_key, key, keyinfo->keylength);
|
||||
info->last_rkey_length = key_length;
|
||||
|
||||
info->rtree_recursion_depth = -1;
|
||||
|
|
|
@ -52,10 +52,14 @@
|
|||
if (EQUAL_CMP(amin, amax, bmin, bmax)) \
|
||||
return 1; \
|
||||
} \
|
||||
else /* if (nextflag & MBR_DISJOINT) */ \
|
||||
else if (nextflag & MBR_DISJOINT) \
|
||||
{ \
|
||||
if (DISJOINT_CMP(amin, amax, bmin, bmax)) \
|
||||
return 1; \
|
||||
}\
|
||||
else /* if unknown comparison operator */ \
|
||||
{ \
|
||||
DBUG_ASSERT(0); \
|
||||
}
|
||||
|
||||
#define RT_CMP_KORR(type, korr_func, len, nextflag) \
|
||||
|
|
|
@ -1689,6 +1689,22 @@ id c1 c2
|
|||
9 abc ppc
|
||||
drop table federated.t1, federated.t2;
|
||||
drop table federated.t1, federated.t2;
|
||||
create table t1 (id int not null auto_increment primary key, val int);
|
||||
create table t1
|
||||
(id int not null auto_increment primary key, val int) engine=federated
|
||||
connection='mysql://root@127.0.0.1:SLAVE_PORT/test/t1';
|
||||
insert into t1 values (1,0),(2,0);
|
||||
update t1 set val = NULL where id = 1;
|
||||
select * from t1;
|
||||
id val
|
||||
1 NULL
|
||||
2 0
|
||||
select * from t1;
|
||||
id val
|
||||
1 NULL
|
||||
2 0
|
||||
drop table t1;
|
||||
drop table t1;
|
||||
DROP TABLE IF EXISTS federated.t1;
|
||||
DROP DATABASE IF EXISTS federated;
|
||||
DROP TABLE IF EXISTS federated.t1;
|
||||
|
|
|
@ -816,3 +816,43 @@ check table t1 extended;
|
|||
Table Op Msg_type Msg_text
|
||||
test.t1 check status OK
|
||||
drop table t1;
|
||||
CREATE TABLE t1 (
|
||||
c1 geometry NOT NULL default '',
|
||||
SPATIAL KEY i1 (c1(32))
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
|
||||
INSERT INTO t1 (c1) VALUES (
|
||||
PolygonFromText('POLYGON((-18.6086111000 -66.9327777000,
|
||||
-18.6055555000 -66.8158332999,
|
||||
-18.7186111000 -66.8102777000,
|
||||
-18.7211111000 -66.9269443999,
|
||||
-18.6086111000 -66.9327777000))'));
|
||||
CHECK TABLE t1 EXTENDED;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 check status OK
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1 (
|
||||
c1 geometry NOT NULL default '',
|
||||
SPATIAL KEY i1 (c1(32))
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
|
||||
INSERT INTO t1 (c1) VALUES (
|
||||
PolygonFromText('POLYGON((-18.6086111000 -66.9327777000,
|
||||
-18.6055555000 -66.8158332999,
|
||||
-18.7186111000 -66.8102777000,
|
||||
-18.7211111000 -66.9269443999,
|
||||
-18.6086111000 -66.9327777000))'));
|
||||
INSERT INTO t1 (c1) VALUES (
|
||||
PolygonFromText('POLYGON((-65.7402776999 -96.6686111000,
|
||||
-65.7372222000 -96.5516666000,
|
||||
-65.8502777000 -96.5461111000,
|
||||
-65.8527777000 -96.6627777000,
|
||||
-65.7402776999 -96.6686111000))'));
|
||||
INSERT INTO t1 (c1) VALUES (
|
||||
PolygonFromText('POLYGON((-18.6086111000 -66.9327777000,
|
||||
-18.6055555000 -66.8158332999,
|
||||
-18.7186111000 -66.8102777000,
|
||||
-18.7211111000 -66.9269443999,
|
||||
-18.6086111000 -66.9327777000))'));
|
||||
CHECK TABLE t1 EXTENDED;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 check status OK
|
||||
DROP TABLE t1;
|
||||
|
|
|
@ -1365,4 +1365,23 @@ drop table federated.t1, federated.t2;
|
|||
connection slave;
|
||||
drop table federated.t1, federated.t2;
|
||||
|
||||
#
|
||||
# Bug #16494: Updates that set a column to NULL fail sometimes
|
||||
#
|
||||
connection slave;
|
||||
create table t1 (id int not null auto_increment primary key, val int);
|
||||
connection master;
|
||||
--replace_result $SLAVE_MYPORT SLAVE_PORT
|
||||
eval create table t1
|
||||
(id int not null auto_increment primary key, val int) engine=federated
|
||||
connection='mysql://root@127.0.0.1:$SLAVE_MYPORT/test/t1';
|
||||
insert into t1 values (1,0),(2,0);
|
||||
update t1 set val = NULL where id = 1;
|
||||
select * from t1;
|
||||
connection slave;
|
||||
select * from t1;
|
||||
drop table t1;
|
||||
connection master;
|
||||
drop table t1;
|
||||
|
||||
source include/federated_cleanup.inc;
|
||||
|
|
|
@ -187,4 +187,48 @@ check table t1 extended;
|
|||
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# Bug#17877 - Corrupted spatial index
|
||||
#
|
||||
CREATE TABLE t1 (
|
||||
c1 geometry NOT NULL default '',
|
||||
SPATIAL KEY i1 (c1(32))
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
|
||||
INSERT INTO t1 (c1) VALUES (
|
||||
PolygonFromText('POLYGON((-18.6086111000 -66.9327777000,
|
||||
-18.6055555000 -66.8158332999,
|
||||
-18.7186111000 -66.8102777000,
|
||||
-18.7211111000 -66.9269443999,
|
||||
-18.6086111000 -66.9327777000))'));
|
||||
# This showed a missing key.
|
||||
CHECK TABLE t1 EXTENDED;
|
||||
DROP TABLE t1;
|
||||
#
|
||||
CREATE TABLE t1 (
|
||||
c1 geometry NOT NULL default '',
|
||||
SPATIAL KEY i1 (c1(32))
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
|
||||
INSERT INTO t1 (c1) VALUES (
|
||||
PolygonFromText('POLYGON((-18.6086111000 -66.9327777000,
|
||||
-18.6055555000 -66.8158332999,
|
||||
-18.7186111000 -66.8102777000,
|
||||
-18.7211111000 -66.9269443999,
|
||||
-18.6086111000 -66.9327777000))'));
|
||||
INSERT INTO t1 (c1) VALUES (
|
||||
PolygonFromText('POLYGON((-65.7402776999 -96.6686111000,
|
||||
-65.7372222000 -96.5516666000,
|
||||
-65.8502777000 -96.5461111000,
|
||||
-65.8527777000 -96.6627777000,
|
||||
-65.7402776999 -96.6686111000))'));
|
||||
# This is the same as the first insert to get a non-unique key.
|
||||
INSERT INTO t1 (c1) VALUES (
|
||||
PolygonFromText('POLYGON((-18.6086111000 -66.9327777000,
|
||||
-18.6055555000 -66.8158332999,
|
||||
-18.7186111000 -66.8102777000,
|
||||
-18.7211111000 -66.9269443999,
|
||||
-18.6086111000 -66.9327777000))'));
|
||||
# This showed (and still shows) OK.
|
||||
CHECK TABLE t1 EXTENDED;
|
||||
DROP TABLE t1;
|
||||
|
||||
# End of 4.1 tests
|
||||
|
|
31
sql/field.cc
31
sql/field.cc
|
@ -1515,7 +1515,8 @@ bool Field::optimize_range(uint idx, uint part)
|
|||
}
|
||||
|
||||
|
||||
Field *Field::new_field(MEM_ROOT *root, struct st_table *new_table)
|
||||
Field *Field::new_field(MEM_ROOT *root, struct st_table *new_table,
|
||||
bool keep_type __attribute__((unused)))
|
||||
{
|
||||
Field *tmp;
|
||||
if (!(tmp= (Field*) memdup_root(root,(char*) this,size_of())))
|
||||
|
@ -1540,7 +1541,7 @@ Field *Field::new_key_field(MEM_ROOT *root, struct st_table *new_table,
|
|||
uint new_null_bit)
|
||||
{
|
||||
Field *tmp;
|
||||
if ((tmp= new_field(root, new_table)))
|
||||
if ((tmp= new_field(root, new_table, table == new_table)))
|
||||
{
|
||||
tmp->ptr= new_ptr;
|
||||
tmp->null_ptr= new_null_ptr;
|
||||
|
@ -6227,29 +6228,21 @@ uint Field_string::max_packed_col_length(uint max_length)
|
|||
}
|
||||
|
||||
|
||||
Field *Field_string::new_field(MEM_ROOT *root, struct st_table *new_table)
|
||||
Field *Field_string::new_field(MEM_ROOT *root, struct st_table *new_table,
|
||||
bool keep_type)
|
||||
{
|
||||
Field *new_field;
|
||||
|
||||
if (type() != MYSQL_TYPE_VAR_STRING || table == new_table)
|
||||
return Field::new_field(root, new_table);
|
||||
if (type() != MYSQL_TYPE_VAR_STRING || keep_type)
|
||||
return Field::new_field(root, new_table, keep_type);
|
||||
|
||||
/*
|
||||
Old VARCHAR field which should be modified to a VARCHAR on copy
|
||||
This is done to ensure that ALTER TABLE will convert old VARCHAR fields
|
||||
to now VARCHAR fields.
|
||||
*/
|
||||
if ((new_field= new Field_varstring(field_length, maybe_null(),
|
||||
field_name, new_table, charset())))
|
||||
{
|
||||
/*
|
||||
delayed_insert::get_local_table() needs a ptr copied from old table.
|
||||
This is what other new_field() methods do too. The above method of
|
||||
Field_varstring sets ptr to NULL.
|
||||
*/
|
||||
new_field->ptr= ptr;
|
||||
}
|
||||
return new_field;
|
||||
return new Field_varstring(field_length, maybe_null(),
|
||||
field_name, new_table, charset());
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
|
@ -6741,9 +6734,11 @@ int Field_varstring::cmp_binary(const char *a_ptr, const char *b_ptr,
|
|||
}
|
||||
|
||||
|
||||
Field *Field_varstring::new_field(MEM_ROOT *root, struct st_table *new_table)
|
||||
Field *Field_varstring::new_field(MEM_ROOT *root, struct st_table *new_table,
|
||||
bool keep_type)
|
||||
{
|
||||
Field_varstring *res= (Field_varstring*) Field::new_field(root, new_table);
|
||||
Field_varstring *res= (Field_varstring*) Field::new_field(root, new_table,
|
||||
keep_type);
|
||||
if (res)
|
||||
res->length_bytes= length_bytes;
|
||||
return res;
|
||||
|
|
|
@ -211,7 +211,8 @@ public:
|
|||
*/
|
||||
virtual bool can_be_compared_as_longlong() const { return FALSE; }
|
||||
virtual void free() {}
|
||||
virtual Field *new_field(MEM_ROOT *root, struct st_table *new_table);
|
||||
virtual Field *new_field(MEM_ROOT *root, struct st_table *new_table,
|
||||
bool keep_type);
|
||||
virtual Field *new_key_field(MEM_ROOT *root, struct st_table *new_table,
|
||||
char *new_ptr, uchar *new_null_ptr,
|
||||
uint new_null_bit);
|
||||
|
@ -1033,7 +1034,7 @@ public:
|
|||
enum_field_types real_type() const { return FIELD_TYPE_STRING; }
|
||||
bool has_charset(void) const
|
||||
{ return charset() == &my_charset_bin ? FALSE : TRUE; }
|
||||
Field *new_field(MEM_ROOT *root, struct st_table *new_table);
|
||||
Field *new_field(MEM_ROOT *root, struct st_table *new_table, bool keep_type);
|
||||
};
|
||||
|
||||
|
||||
|
@ -1105,7 +1106,7 @@ public:
|
|||
enum_field_types real_type() const { return MYSQL_TYPE_VARCHAR; }
|
||||
bool has_charset(void) const
|
||||
{ return charset() == &my_charset_bin ? FALSE : TRUE; }
|
||||
Field *new_field(MEM_ROOT *root, struct st_table *new_table);
|
||||
Field *new_field(MEM_ROOT *root, struct st_table *new_table, bool keep_type);
|
||||
Field *new_key_field(MEM_ROOT *root, struct st_table *new_table,
|
||||
char *new_ptr, uchar *new_null_ptr,
|
||||
uint new_null_bit);
|
||||
|
|
|
@ -1810,19 +1810,13 @@ int ha_federated::update_row(const byte *old_data, byte *new_data)
|
|||
/*
|
||||
buffers for following strings
|
||||
*/
|
||||
char old_field_value_buffer[STRING_BUFFER_USUAL_SIZE];
|
||||
char new_field_value_buffer[STRING_BUFFER_USUAL_SIZE];
|
||||
char field_value_buffer[STRING_BUFFER_USUAL_SIZE];
|
||||
char update_buffer[FEDERATED_QUERY_BUFFER_SIZE];
|
||||
char where_buffer[FEDERATED_QUERY_BUFFER_SIZE];
|
||||
|
||||
/* stores the value to be replaced of the field were are updating */
|
||||
String old_field_value(old_field_value_buffer,
|
||||
sizeof(old_field_value_buffer),
|
||||
&my_charset_bin);
|
||||
/* stores the new value of the field */
|
||||
String new_field_value(new_field_value_buffer,
|
||||
sizeof(new_field_value_buffer),
|
||||
&my_charset_bin);
|
||||
/* Work area for field values */
|
||||
String field_value(field_value_buffer, sizeof(field_value_buffer),
|
||||
&my_charset_bin);
|
||||
/* stores the update query */
|
||||
String update_string(update_buffer,
|
||||
sizeof(update_buffer),
|
||||
|
@ -1835,8 +1829,7 @@ int ha_federated::update_row(const byte *old_data, byte *new_data)
|
|||
/*
|
||||
set string lengths to 0 to avoid misc chars in string
|
||||
*/
|
||||
old_field_value.length(0);
|
||||
new_field_value.length(0);
|
||||
field_value.length(0);
|
||||
update_string.length(0);
|
||||
where_string.length(0);
|
||||
|
||||
|
@ -1850,8 +1843,8 @@ int ha_federated::update_row(const byte *old_data, byte *new_data)
|
|||
In this loop, we want to match column names to values being inserted
|
||||
(while building INSERT statement).
|
||||
|
||||
Iterate through table->field (new data) and share->old_filed (old_data)
|
||||
using the same index to created an SQL UPDATE statement, new data is
|
||||
Iterate through table->field (new data) and share->old_field (old_data)
|
||||
using the same index to create an SQL UPDATE statement. New data is
|
||||
used to create SET field=value and old data is used to create WHERE
|
||||
field=oldvalue
|
||||
*/
|
||||
|
@ -1863,30 +1856,28 @@ int ha_federated::update_row(const byte *old_data, byte *new_data)
|
|||
update_string.append(FEDERATED_EQ);
|
||||
|
||||
if ((*field)->is_null())
|
||||
new_field_value.append(FEDERATED_NULL);
|
||||
update_string.append(FEDERATED_NULL);
|
||||
else
|
||||
{
|
||||
/* otherwise = */
|
||||
(*field)->val_str(&new_field_value);
|
||||
(*field)->quote_data(&new_field_value);
|
||||
|
||||
if (!field_in_record_is_null(table, *field, (char*) old_data))
|
||||
where_string.append(FEDERATED_EQ);
|
||||
(*field)->val_str(&field_value);
|
||||
(*field)->quote_data(&field_value);
|
||||
update_string.append(field_value);
|
||||
field_value.length(0);
|
||||
}
|
||||
|
||||
if (field_in_record_is_null(table, *field, (char*) old_data))
|
||||
where_string.append(FEDERATED_ISNULL);
|
||||
else
|
||||
{
|
||||
(*field)->val_str(&old_field_value,
|
||||
where_string.append(FEDERATED_EQ);
|
||||
(*field)->val_str(&field_value,
|
||||
(char*) (old_data + (*field)->offset()));
|
||||
(*field)->quote_data(&old_field_value);
|
||||
where_string.append(old_field_value);
|
||||
(*field)->quote_data(&field_value);
|
||||
where_string.append(field_value);
|
||||
field_value.length(0);
|
||||
}
|
||||
|
||||
update_string.append(new_field_value);
|
||||
new_field_value.length(0);
|
||||
|
||||
/*
|
||||
Only append conjunctions if we have another field in which
|
||||
to iterate
|
||||
|
@ -1896,7 +1887,6 @@ int ha_federated::update_row(const byte *old_data, byte *new_data)
|
|||
update_string.append(FEDERATED_COMMA);
|
||||
where_string.append(FEDERATED_AND);
|
||||
}
|
||||
old_field_value.length(0);
|
||||
}
|
||||
update_string.append(FEDERATED_WHERE);
|
||||
update_string.append(where_string);
|
||||
|
|
|
@ -930,7 +930,7 @@ bool select_send::send_data(List<Item> &items)
|
|||
Protocol *protocol= thd->protocol;
|
||||
char buff[MAX_FIELD_WIDTH];
|
||||
String buffer(buff, sizeof(buff), &my_charset_bin);
|
||||
DBUG_ENTER("send_data");
|
||||
DBUG_ENTER("select_send::send_data");
|
||||
|
||||
protocol->prepare_for_resend();
|
||||
Item *item;
|
||||
|
@ -1140,7 +1140,7 @@ select_export::prepare(List<Item> &list, SELECT_LEX_UNIT *u)
|
|||
bool select_export::send_data(List<Item> &items)
|
||||
{
|
||||
|
||||
DBUG_ENTER("send_data");
|
||||
DBUG_ENTER("select_export::send_data");
|
||||
char buff[MAX_FIELD_WIDTH],null_buff[2],space[MAX_FIELD_WIDTH];
|
||||
bool space_inited=0;
|
||||
String tmp(buff,sizeof(buff),&my_charset_bin),*res;
|
||||
|
@ -1297,7 +1297,7 @@ bool select_dump::send_data(List<Item> &items)
|
|||
String tmp(buff,sizeof(buff),&my_charset_bin),*res;
|
||||
tmp.length(0);
|
||||
Item *item;
|
||||
DBUG_ENTER("send_data");
|
||||
DBUG_ENTER("select_dump::send_data");
|
||||
|
||||
if (unit->offset_limit_cnt)
|
||||
{ // using limit offset,count
|
||||
|
|
|
@ -17,6 +17,44 @@
|
|||
|
||||
/* Insert of records */
|
||||
|
||||
/*
|
||||
INSERT DELAYED
|
||||
|
||||
Insert delayed is distinguished from a normal insert by lock_type ==
|
||||
TL_WRITE_DELAYED instead of TL_WRITE. It first tries to open a
|
||||
"delayed" table (delayed_get_table()), but falls back to
|
||||
open_and_lock_tables() on error and proceeds as normal insert then.
|
||||
|
||||
Opening a "delayed" table means to find a delayed insert thread that
|
||||
has the table open already. If this fails, a new thread is created and
|
||||
waited for to open and lock the table.
|
||||
|
||||
If accessing the thread succeeded, in
|
||||
delayed_insert::get_local_table() the table of the thread is copied
|
||||
for local use. A copy is required because the normal insert logic
|
||||
works on a target table, but the other threads table object must not
|
||||
be used. The insert logic uses the record buffer to create a record.
|
||||
And the delayed insert thread uses the record buffer to pass the
|
||||
record to the table handler. So there must be different objects. Also
|
||||
the copied table is not included in the lock, so that the statement
|
||||
can proceed even if the real table cannot be accessed at this moment.
|
||||
|
||||
Copying a table object is not a trivial operation. Besides the TABLE
|
||||
object there are the field pointer array, the field objects and the
|
||||
record buffer. After copying the field objects, their pointers into
|
||||
the record must be "moved" to point to the new record buffer.
|
||||
|
||||
After this setup the normal insert logic is used. Only that for
|
||||
delayed inserts write_delayed() is called instead of write_record().
|
||||
It inserts the rows into a queue and signals the delayed insert thread
|
||||
instead of writing directly to the table.
|
||||
|
||||
The delayed insert thread awakes from the signal. It locks the table,
|
||||
inserts the rows from the queue, unlocks the table, and waits for the
|
||||
next signal. It does normally live until a FLUSH TABLES or SHUTDOWN.
|
||||
|
||||
*/
|
||||
|
||||
#include "mysql_priv.h"
|
||||
#include "sp_head.h"
|
||||
#include "sql_trigger.h"
|
||||
|
@ -1441,6 +1479,7 @@ TABLE *delayed_insert::get_local_table(THD* client_thd)
|
|||
my_ptrdiff_t adjust_ptrs;
|
||||
Field **field,**org_field, *found_next_number_field;
|
||||
TABLE *copy;
|
||||
DBUG_ENTER("delayed_insert::get_local_table");
|
||||
|
||||
/* First request insert thread to get a lock */
|
||||
status=1;
|
||||
|
@ -1464,31 +1503,47 @@ TABLE *delayed_insert::get_local_table(THD* client_thd)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Allocate memory for the TABLE object, the field pointers array, and
|
||||
one record buffer of reclength size. Normally a table has three
|
||||
record buffers of rec_buff_length size, which includes alignment
|
||||
bytes. Since the table copy is used for creating one record only,
|
||||
the other record buffers and alignment are unnecessary.
|
||||
*/
|
||||
client_thd->proc_info="allocating local table";
|
||||
copy= (TABLE*) client_thd->alloc(sizeof(*copy)+
|
||||
(table->s->fields+1)*sizeof(Field**)+
|
||||
table->s->reclength);
|
||||
if (!copy)
|
||||
goto error;
|
||||
|
||||
/* Copy the TABLE object. */
|
||||
*copy= *table;
|
||||
copy->s= ©->share_not_to_be_used;
|
||||
// No name hashing
|
||||
bzero((char*) ©->s->name_hash,sizeof(copy->s->name_hash));
|
||||
/* We don't need to change the file handler here */
|
||||
|
||||
field=copy->field=(Field**) (copy+1);
|
||||
copy->record[0]=(byte*) (field+table->s->fields+1);
|
||||
memcpy((char*) copy->record[0],(char*) table->record[0],table->s->reclength);
|
||||
/* Assign the pointers for the field pointers array and the record. */
|
||||
field= copy->field= (Field**) (copy + 1);
|
||||
copy->record[0]= (byte*) (field + table->s->fields + 1);
|
||||
memcpy((char*) copy->record[0], (char*) table->record[0],
|
||||
table->s->reclength);
|
||||
|
||||
/* Make a copy of all fields */
|
||||
/*
|
||||
Make a copy of all fields.
|
||||
The copied fields need to point into the copied record. This is done
|
||||
by copying the field objects with their old pointer values and then
|
||||
"move" the pointers by the distance between the original and copied
|
||||
records. That way we preserve the relative positions in the records.
|
||||
*/
|
||||
adjust_ptrs= PTR_BYTE_DIFF(copy->record[0], table->record[0]);
|
||||
|
||||
adjust_ptrs=PTR_BYTE_DIFF(copy->record[0],table->record[0]);
|
||||
|
||||
found_next_number_field=table->found_next_number_field;
|
||||
for (org_field=table->field ; *org_field ; org_field++,field++)
|
||||
found_next_number_field= table->found_next_number_field;
|
||||
for (org_field= table->field; *org_field; org_field++, field++)
|
||||
{
|
||||
if (!(*field= (*org_field)->new_field(client_thd->mem_root,copy)))
|
||||
return 0;
|
||||
if (!(*field= (*org_field)->new_field(client_thd->mem_root, copy, 1)))
|
||||
DBUG_RETURN(0);
|
||||
(*field)->orig_table= copy; // Remove connection
|
||||
(*field)->move_field(adjust_ptrs); // Point at copy->record[0]
|
||||
if (*org_field == found_next_number_field)
|
||||
|
@ -1515,14 +1570,14 @@ TABLE *delayed_insert::get_local_table(THD* client_thd)
|
|||
/* Adjust lock_count. This table object is not part of a lock. */
|
||||
copy->lock_count= 0;
|
||||
|
||||
return copy;
|
||||
DBUG_RETURN(copy);
|
||||
|
||||
/* Got fatal error */
|
||||
error:
|
||||
tables_in_use--;
|
||||
status=1;
|
||||
pthread_cond_signal(&cond); // Inform thread about abort
|
||||
return 0;
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -8017,7 +8017,8 @@ Field* create_tmp_field_from_field(THD *thd, Field* org_field,
|
|||
org_field->field_name, table,
|
||||
org_field->charset());
|
||||
else
|
||||
new_field= org_field->new_field(thd->mem_root, table);
|
||||
new_field= org_field->new_field(thd->mem_root, table,
|
||||
table == org_field->table);
|
||||
if (new_field)
|
||||
{
|
||||
if (item)
|
||||
|
@ -13062,7 +13063,7 @@ setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param,
|
|||
saved value
|
||||
*/
|
||||
Field *field= item->field;
|
||||
item->result_field=field->new_field(thd->mem_root,field->table);
|
||||
item->result_field=field->new_field(thd->mem_root,field->table, 1);
|
||||
char *tmp=(char*) sql_alloc(field->pack_length()+1);
|
||||
if (!tmp)
|
||||
goto err;
|
||||
|
|
|
@ -747,7 +747,8 @@ bool Table_triggers_list::prepare_record1_accessors(TABLE *table)
|
|||
QQ: it is supposed that it is ok to use this function for field
|
||||
cloning...
|
||||
*/
|
||||
if (!(*old_fld= (*fld)->new_field(&table->mem_root, table)))
|
||||
if (!(*old_fld= (*fld)->new_field(&table->mem_root, table,
|
||||
table == (*fld)->table)))
|
||||
return 1;
|
||||
(*old_fld)->move_field((my_ptrdiff_t)(table->record[1] -
|
||||
table->record[0]));
|
||||
|
|
|
@ -804,7 +804,8 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
|
|||
if (!(field->flags & BLOB_FLAG))
|
||||
{ // Create a new field
|
||||
field=key_part->field=field->new_field(&outparam->mem_root,
|
||||
outparam);
|
||||
outparam,
|
||||
outparam == field->table);
|
||||
field->field_length=key_part->length;
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue