mirror of
https://github.com/MariaDB/server.git
synced 2025-01-29 02:05:57 +01:00
Add support for NULL=NULL in keys (Used in GROUP BY optimization)
Add ISAM to Windows version Fix of test results Fixes for NULL keys in HEAP tables.
This commit is contained in:
parent
7dd4eb71fe
commit
4d10a0cb7e
21 changed files with 298 additions and 104 deletions
|
@ -451,3 +451,4 @@ vio/test-ssl
|
|||
vio/test-sslclient
|
||||
vio/test-sslserver
|
||||
vio/viotest-ssl
|
||||
libmysqld/ha_innodb.cc
|
||||
|
|
|
@ -48121,10 +48121,12 @@ Our TODO section contains what we plan to have in 4.0. @xref{TODO MySQL 4.0}.
|
|||
|
||||
@itemize @bullet
|
||||
@item
|
||||
Fixed bug in @code{GROUP BY BINARY column}
|
||||
@item
|
||||
Added support for @code{NULL} keys in HEAP tables.
|
||||
@item
|
||||
Use index for @code{ORDER BY} in queries of type:
|
||||
@code{SELECT * FROM t1 WHERE key_part1=1 ORDER BY key_part1 DESC,key_part2 DESC}
|
||||
@code{SELECT * FROM t WHERE key_part1=1 ORDER BY key_part1 DESC,key_part2 DESC}
|
||||
@item
|
||||
Fixed bug in @code{FLUSH QUERY CACHE}.
|
||||
@item
|
||||
|
|
|
@ -46,7 +46,8 @@ HP_INFO *heap_open(const char *name, int mode, uint keys, HP_KEYDEF *keydef,
|
|||
for (j=length=0 ; j < keydef[i].keysegs; j++)
|
||||
{
|
||||
length+=keydef[i].seg[j].length;
|
||||
if (keydef[i].seg[j].null_bit)
|
||||
if (keydef[i].seg[j].null_bit &&
|
||||
!(keydef[i].flag & HA_NULL_ARE_EQUAL))
|
||||
keydef[i].flag |= HA_NULL_PART_KEY;
|
||||
}
|
||||
keydef[i].length=length;
|
||||
|
|
|
@ -20,7 +20,7 @@ int heap_rkey(HP_INFO *info, byte *record, int inx, const byte *key)
|
|||
{
|
||||
byte *pos;
|
||||
HP_SHARE *share=info->s;
|
||||
DBUG_ENTER("hp_rkey");
|
||||
DBUG_ENTER("heap_rkey");
|
||||
DBUG_PRINT("enter",("base: %lx inx: %d",info,inx));
|
||||
|
||||
if ((uint) inx >= share->keys)
|
||||
|
|
|
@ -238,7 +238,7 @@ int _hp_write_key(register HP_SHARE *info, HP_KEYDEF *keyinfo,
|
|||
_hp_movelink(pos,gpos,empty);
|
||||
}
|
||||
|
||||
/* Check if dupplicated keys */
|
||||
/* Check if duplicated keys */
|
||||
if ((keyinfo->flag & HA_NOSAME) && pos == gpos &&
|
||||
(!(keyinfo->flag & HA_NULL_PART_KEY) ||
|
||||
!hp_if_null_in_key(keyinfo, record)))
|
||||
|
|
|
@ -255,6 +255,8 @@ inline double ulonglong2double(ulonglong value)
|
|||
#define HAVE_COMPRESS
|
||||
#define HAVE_CREATESEMAPHORE
|
||||
|
||||
#define HAVE_ISAM /* We want to have support for ISAM in 4.0 */
|
||||
|
||||
#ifdef NOT_USED
|
||||
#define HAVE_SNPRINTF /* Gave link error */
|
||||
#define _snprintf snprintf
|
||||
|
|
|
@ -150,6 +150,7 @@ enum ha_base_keytype {
|
|||
#define HA_FULLTEXT 128 /* SerG: for full-text search */
|
||||
#define HA_UNIQUE_CHECK 256 /* Check the key for uniqueness */
|
||||
#define HA_SPATIAL 1024 /* Alex Barkov: for spatial search */
|
||||
#define HA_NULL_ARE_EQUAL 2048 /* NULL in key are cmp as equal */
|
||||
|
||||
|
||||
/* Automatic bits in key-flag */
|
||||
|
@ -260,6 +261,7 @@ enum ha_base_keytype {
|
|||
#define MBR_DISJOINT 4096
|
||||
#define MBR_EQUAL 8192
|
||||
#define MBR_DATA 16384
|
||||
#define SEARCH_NULL_ARE_EQUAL 32768 /* NULL in keys are equal */
|
||||
|
||||
/* bits in opt_flag */
|
||||
#define QUICK_USED 1
|
||||
|
|
|
@ -38,7 +38,7 @@ libmysqlsources = errmsg.c get_password.c password.c
|
|||
noinst_HEADERS = embedded_priv.h
|
||||
|
||||
sqlsources = convert.cc derror.cc field.cc field_conv.cc filesort.cc \
|
||||
ha_innobase.cc ha_berkeley.cc ha_heap.cc ha_isam.cc ha_isammrg.cc \
|
||||
ha_innodb.cc ha_berkeley.cc ha_heap.cc ha_isam.cc ha_isammrg.cc \
|
||||
ha_myisam.cc ha_myisammrg.cc handler.cc sql_handler.cc \
|
||||
hostname.cc init.cc \
|
||||
item.cc item_buff.cc item_cmpfunc.cc item_create.cc \
|
||||
|
|
|
@ -25,7 +25,8 @@
|
|||
|
||||
/* Functions declared in this file */
|
||||
|
||||
static int w_search(MI_INFO *info,MI_KEYDEF *keyinfo,uchar *key,
|
||||
static int w_search(MI_INFO *info,MI_KEYDEF *keyinfo,
|
||||
uint comp_flag, uchar *key,
|
||||
uint key_length, my_off_t pos, uchar *father_buff,
|
||||
uchar *father_keypos, my_off_t father_page,
|
||||
my_bool insert_last);
|
||||
|
@ -245,10 +246,23 @@ int _mi_ck_write_btree(register MI_INFO *info, uint keynr, uchar *key,
|
|||
uint key_length)
|
||||
{
|
||||
int error;
|
||||
uint comp_flag;
|
||||
MI_KEYDEF *keyinfo=info->s->keyinfo+keynr;
|
||||
DBUG_ENTER("_mi_ck_write_btree");
|
||||
|
||||
if (keyinfo->flag & HA_SORT_ALLOWS_SAME)
|
||||
comp_flag=SEARCH_BIGGER; /* Put after same key */
|
||||
else if (keyinfo->flag & HA_NOSAME)
|
||||
{
|
||||
comp_flag=SEARCH_FIND | SEARCH_UPDATE; /* No dupplicates */
|
||||
if (keyinfo->flag & HA_NULL_ARE_EQUAL)
|
||||
comp_flag|= SEARCH_NULL_ARE_EQUAL;
|
||||
}
|
||||
else
|
||||
comp_flag=SEARCH_SAME; /* Keys in rec-pos order */
|
||||
|
||||
if (info->s->state.key_root[keynr] == HA_OFFSET_ERROR ||
|
||||
(error=w_search(info,info->s->keyinfo+keynr,key, key_length,
|
||||
(error=w_search(info, keyinfo, comp_flag, key, key_length,
|
||||
info->s->state.key_root[keynr], (uchar *) 0, (uchar*) 0,
|
||||
(my_off_t) 0, 1)) > 0)
|
||||
error=_mi_enlarge_root(info,keynr,key);
|
||||
|
@ -291,13 +305,12 @@ int _mi_enlarge_root(register MI_INFO *info, uint keynr, uchar *key)
|
|||
*/
|
||||
|
||||
static int w_search(register MI_INFO *info, register MI_KEYDEF *keyinfo,
|
||||
uchar *key, uint key_length, my_off_t page,
|
||||
uchar *father_buff,
|
||||
uchar *father_keypos, my_off_t father_page,
|
||||
my_bool insert_last)
|
||||
uint comp_flag, uchar *key, uint key_length, my_off_t page,
|
||||
uchar *father_buff, uchar *father_keypos,
|
||||
my_off_t father_page, my_bool insert_last)
|
||||
{
|
||||
int error,flag;
|
||||
uint comp_flag,nod_flag, search_key_length;
|
||||
uint nod_flag, search_key_length;
|
||||
uchar *temp_buff,*keypos;
|
||||
uchar keybuff[MI_MAX_KEY_BUFF];
|
||||
my_bool was_last_key;
|
||||
|
@ -305,17 +318,7 @@ static int w_search(register MI_INFO *info, register MI_KEYDEF *keyinfo,
|
|||
DBUG_ENTER("w_search");
|
||||
DBUG_PRINT("enter",("page: %ld",page));
|
||||
|
||||
search_key_length=USE_WHOLE_KEY;
|
||||
if (keyinfo->flag & HA_SORT_ALLOWS_SAME)
|
||||
comp_flag=SEARCH_BIGGER; /* Put after same key */
|
||||
else if (keyinfo->flag & HA_NOSAME)
|
||||
{
|
||||
comp_flag=SEARCH_FIND | SEARCH_UPDATE; /* No dupplicates */
|
||||
search_key_length= key_length;
|
||||
}
|
||||
else
|
||||
comp_flag=SEARCH_SAME; /* Keys in rec-pos order */
|
||||
|
||||
search_key_length= (comp_flag & SEARCH_FIND) ? key_length : USE_WHOLE_KEY;
|
||||
if (!(temp_buff= (uchar*) my_alloca((uint) keyinfo->block_length+
|
||||
MI_MAX_KEY_BUFF*2)))
|
||||
DBUG_RETURN(-1);
|
||||
|
@ -344,7 +347,7 @@ static int w_search(register MI_INFO *info, register MI_KEYDEF *keyinfo,
|
|||
insert_last=0;
|
||||
next_page=_mi_kpos(nod_flag,keypos);
|
||||
if (next_page == HA_OFFSET_ERROR ||
|
||||
(error=w_search(info,keyinfo,key,key_length,next_page,
|
||||
(error=w_search(info, keyinfo, comp_flag, key, key_length, next_page,
|
||||
temp_buff, keypos, page, insert_last)) >0)
|
||||
{
|
||||
error=_mi_insert(info,keyinfo,key,temp_buff,keypos,keybuff,father_buff,
|
||||
|
@ -759,41 +762,44 @@ static int keys_compare(bulk_insert_param *param, uchar *key1, uchar *key2)
|
|||
{
|
||||
uint not_used;
|
||||
return _mi_key_cmp(param->info->s->keyinfo[param->keynr].seg,
|
||||
key1, key2, USE_WHOLE_KEY, SEARCH_SAME, ¬_used);
|
||||
key1, key2, USE_WHOLE_KEY, SEARCH_SAME,
|
||||
¬_used);
|
||||
}
|
||||
|
||||
|
||||
static int keys_free(uchar *key, TREE_FREE mode, bulk_insert_param *param)
|
||||
{
|
||||
/* probably I can use info->lastkey here, but I'm not sure,
|
||||
and to be safe I'd better use local lastkey.
|
||||
Monty, feel free to comment on this */
|
||||
/*
|
||||
Probably I can use info->lastkey here, but I'm not sure,
|
||||
and to be safe I'd better use local lastkey.
|
||||
*/
|
||||
uchar lastkey[MI_MAX_KEY_BUFF];
|
||||
uint keylen;
|
||||
MI_KEYDEF *keyinfo;
|
||||
|
||||
switch (mode) {
|
||||
case free_init:
|
||||
if (param->info->s->concurrent_insert)
|
||||
{
|
||||
rw_wrlock(¶m->info->s->key_root_lock[param->keynr]);
|
||||
param->info->s->keyinfo[param->keynr].version++;
|
||||
}
|
||||
return 0;
|
||||
case free_free:
|
||||
keyinfo=param->info->s->keyinfo+param->keynr;
|
||||
keylen=_mi_keylength(keyinfo, key);
|
||||
memcpy(lastkey, key, keylen);
|
||||
return _mi_ck_write_btree(param->info,param->keynr,lastkey,
|
||||
keylen - param->info->s->rec_reflength);
|
||||
case free_end:
|
||||
if (param->info->s->concurrent_insert)
|
||||
rw_unlock(¶m->info->s->key_root_lock[param->keynr]);
|
||||
return 0;
|
||||
case free_init:
|
||||
if (param->info->s->concurrent_insert)
|
||||
{
|
||||
rw_wrlock(¶m->info->s->key_root_lock[param->keynr]);
|
||||
param->info->s->keyinfo[param->keynr].version++;
|
||||
}
|
||||
return 0;
|
||||
case free_free:
|
||||
keyinfo=param->info->s->keyinfo+param->keynr;
|
||||
keylen=_mi_keylength(keyinfo, key);
|
||||
memcpy(lastkey, key, keylen);
|
||||
return _mi_ck_write_btree(param->info,param->keynr,lastkey,
|
||||
keylen - param->info->s->rec_reflength);
|
||||
case free_end:
|
||||
if (param->info->s->concurrent_insert)
|
||||
rw_unlock(¶m->info->s->key_root_lock[param->keynr]);
|
||||
return 0;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
int _mi_init_bulk_insert(MI_INFO *info)
|
||||
{
|
||||
MYISAM_SHARE *share=info->s;
|
||||
|
|
|
@ -263,3 +263,84 @@ score count(*)
|
|||
2 1
|
||||
1 2
|
||||
drop table t1;
|
||||
CREATE TABLE t1 (a char(1));
|
||||
INSERT INTO t1 VALUES ('A'),('B'),('A'),('B'),('A'),('B'),(NULL),('a'),('b'),(NULL),('A'),('B'),(NULL);
|
||||
SELECT a FROM t1 GROUP BY a;
|
||||
a
|
||||
NULL
|
||||
A
|
||||
B
|
||||
SELECT a,count(*) FROM t1 GROUP BY a;
|
||||
a count(*)
|
||||
NULL 3
|
||||
A 5
|
||||
B 5
|
||||
SELECT a FROM t1 GROUP BY binary a;
|
||||
a
|
||||
NULL
|
||||
A
|
||||
B
|
||||
a
|
||||
b
|
||||
SELECT a,count(*) FROM t1 GROUP BY binary a;
|
||||
a count(*)
|
||||
NULL 3
|
||||
A 4
|
||||
B 4
|
||||
a 1
|
||||
b 1
|
||||
SELECT binary a FROM t1 GROUP BY 1;
|
||||
binary a
|
||||
NULL
|
||||
A
|
||||
B
|
||||
a
|
||||
b
|
||||
SELECT binary a,count(*) FROM t1 GROUP BY 1;
|
||||
binary a count(*)
|
||||
NULL 3
|
||||
A 4
|
||||
B 4
|
||||
a 1
|
||||
b 1
|
||||
SET SQL_BIG_TABLES=1;
|
||||
SELECT a FROM t1 GROUP BY a;
|
||||
a
|
||||
NULL
|
||||
A
|
||||
B
|
||||
SELECT a,count(*) FROM t1 GROUP BY a;
|
||||
a count(*)
|
||||
NULL 3
|
||||
A 5
|
||||
B 5
|
||||
SELECT a FROM t1 GROUP BY binary a;
|
||||
a
|
||||
NULL
|
||||
A
|
||||
B
|
||||
a
|
||||
b
|
||||
SELECT a,count(*) FROM t1 GROUP BY binary a;
|
||||
a count(*)
|
||||
NULL 3
|
||||
A 4
|
||||
B 4
|
||||
a 1
|
||||
b 1
|
||||
SELECT binary a FROM t1 GROUP BY 1;
|
||||
binary a
|
||||
NULL
|
||||
A
|
||||
B
|
||||
a
|
||||
b
|
||||
SELECT binary a,count(*) FROM t1 GROUP BY 1;
|
||||
binary a count(*)
|
||||
NULL 3
|
||||
A 4
|
||||
B 4
|
||||
a 1
|
||||
b 1
|
||||
SET SQL_BIG_TABLES=0;
|
||||
drop table t1;
|
||||
|
|
|
@ -165,3 +165,29 @@ explain select * from t1 where btn="a" and new_col="a";
|
|||
table type possible_keys key key_len ref rows Extra
|
||||
t1 ref btn btn 11 const,const 10 where used
|
||||
drop table t1;
|
||||
CREATE TABLE t1 (
|
||||
a int default NULL,
|
||||
b int default NULL,
|
||||
KEY a (a),
|
||||
UNIQUE b (b)
|
||||
) type=heap;
|
||||
INSERT INTO t1 VALUES (NULL,99),(99,NULL),(1,1),(2,2),(1,3);
|
||||
SELECT * FROM t1 WHERE a=NULL;
|
||||
a b
|
||||
explain SELECT * FROM t1 WHERE a IS NULL;
|
||||
table type possible_keys key key_len ref rows Extra
|
||||
t1 ref a a 5 const 10 where used
|
||||
SELECT * FROM t1 WHERE a<=>NULL;
|
||||
a b
|
||||
NULL 99
|
||||
SELECT * FROM t1 WHERE b=NULL;
|
||||
a b
|
||||
explain SELECT * FROM t1 WHERE b IS NULL;
|
||||
table type possible_keys key key_len ref rows Extra
|
||||
t1 ref b b 5 const 1 where used
|
||||
SELECT * FROM t1 WHERE b<=>NULL;
|
||||
a b
|
||||
99 NULL
|
||||
INSERT INTO t1 VALUES (1,3);
|
||||
Duplicate entry '3' for key 1
|
||||
DROP TABLE t1;
|
||||
|
|
|
@ -40,7 +40,6 @@ insert into t1 values (null);
|
|||
select * from t1 where x != 0;
|
||||
x
|
||||
drop table t1;
|
||||
DROP TABLE IF EXISTS t1;
|
||||
CREATE TABLE t1 (
|
||||
indexed_field int default NULL,
|
||||
KEY indexed_field (indexed_field)
|
||||
|
|
|
@ -286,15 +286,15 @@ a b c
|
|||
1 NULL NULL
|
||||
explain select * from t1 where a = 1 order by a desc, b desc;
|
||||
table type possible_keys key key_len ref rows Extra
|
||||
t1 ref a a 4 const 5 where used; Using index; Using filesort
|
||||
t1 ref a a 4 const 5 where used; Using index
|
||||
select * from t1 where a = 1 order by a desc, b desc;
|
||||
a b c
|
||||
1 3 b
|
||||
1 1 b
|
||||
1 1 b
|
||||
1 1 NULL
|
||||
1 1 b
|
||||
1 1 b
|
||||
1 NULL NULL
|
||||
1 NULL b
|
||||
1 NULL NULL
|
||||
explain select * from t1 where a = 1 and b is null order by a desc, b desc;
|
||||
table type possible_keys key key_len ref rows Extra
|
||||
t1 ref a a 9 const,const 2 where used; Using index; Using filesort
|
||||
|
|
|
@ -243,3 +243,26 @@ select sql_big_result spid,sum(userid) from t1 group by spid desc;
|
|||
explain select sql_big_result score,count(*) from t1 group by score desc;
|
||||
select sql_big_result score,count(*) from t1 group by score desc;
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# Compare with hash keys
|
||||
#
|
||||
|
||||
CREATE TABLE t1 (a char(1));
|
||||
INSERT INTO t1 VALUES ('A'),('B'),('A'),('B'),('A'),('B'),(NULL),('a'),('b'),(NULL),('A'),('B'),(NULL);
|
||||
SELECT a FROM t1 GROUP BY a;
|
||||
SELECT a,count(*) FROM t1 GROUP BY a;
|
||||
SELECT a FROM t1 GROUP BY binary a;
|
||||
SELECT a,count(*) FROM t1 GROUP BY binary a;
|
||||
SELECT binary a FROM t1 GROUP BY 1;
|
||||
SELECT binary a,count(*) FROM t1 GROUP BY 1;
|
||||
# Do the same tests with MyISAM temporary tables
|
||||
SET SQL_BIG_TABLES=1;
|
||||
SELECT a FROM t1 GROUP BY a;
|
||||
SELECT a,count(*) FROM t1 GROUP BY a;
|
||||
SELECT a FROM t1 GROUP BY binary a;
|
||||
SELECT a,count(*) FROM t1 GROUP BY binary a;
|
||||
SELECT binary a FROM t1 GROUP BY 1;
|
||||
SELECT binary a,count(*) FROM t1 GROUP BY 1;
|
||||
SET SQL_BIG_TABLES=0;
|
||||
drop table t1;
|
||||
|
|
|
@ -100,3 +100,25 @@ update t1 set new_col=btn;
|
|||
explain select * from t1 where btn="a";
|
||||
explain select * from t1 where btn="a" and new_col="a";
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# Test of NULL keys
|
||||
#
|
||||
|
||||
CREATE TABLE t1 (
|
||||
a int default NULL,
|
||||
b int default NULL,
|
||||
KEY a (a),
|
||||
UNIQUE b (b)
|
||||
) type=heap;
|
||||
INSERT INTO t1 VALUES (NULL,99),(99,NULL),(1,1),(2,2),(1,3);
|
||||
SELECT * FROM t1 WHERE a=NULL;
|
||||
explain SELECT * FROM t1 WHERE a IS NULL;
|
||||
SELECT * FROM t1 WHERE a<=>NULL;
|
||||
SELECT * FROM t1 WHERE b=NULL;
|
||||
explain SELECT * FROM t1 WHERE b IS NULL;
|
||||
SELECT * FROM t1 WHERE b<=>NULL;
|
||||
|
||||
--error 1062
|
||||
INSERT INTO t1 VALUES (1,3);
|
||||
DROP TABLE t1;
|
||||
|
|
|
@ -25,7 +25,6 @@ drop table t1;
|
|||
# Test problem med index on NULL columns and testing with =NULL;
|
||||
#
|
||||
|
||||
DROP TABLE IF EXISTS t1;
|
||||
CREATE TABLE t1 (
|
||||
indexed_field int default NULL,
|
||||
KEY indexed_field (indexed_field)
|
||||
|
|
|
@ -33,7 +33,7 @@ const char **ha_heap::bas_ext() const
|
|||
|
||||
int ha_heap::open(const char *name, int mode, uint test_if_locked)
|
||||
{
|
||||
uint key,part,parts,mem_per_row=0;
|
||||
uint key,parts,mem_per_row=0;
|
||||
ulong max_rows;
|
||||
HP_KEYDEF *keydef;
|
||||
HP_KEYSEG *seg;
|
||||
|
@ -48,24 +48,27 @@ int ha_heap::open(const char *name, int mode, uint test_if_locked)
|
|||
for (key=0 ; key < table->keys ; key++)
|
||||
{
|
||||
KEY *pos=table->key_info+key;
|
||||
KEY_PART_INFO *key_part= pos->key_part;
|
||||
KEY_PART_INFO *key_part_end= key_part+pos->key_parts;
|
||||
|
||||
mem_per_row += (pos->key_length + (sizeof(char*) * 2));
|
||||
|
||||
keydef[key].keysegs=(uint) pos->key_parts;
|
||||
keydef[key].flag = (pos->flags & HA_NOSAME);
|
||||
keydef[key].flag = (pos->flags & (HA_NOSAME | HA_NULL_ARE_EQUAL));
|
||||
keydef[key].seg=seg;
|
||||
|
||||
for (part=0 ; part < pos->key_parts ; part++)
|
||||
for (; key_part != key_part_end ; key_part++, seg++)
|
||||
{
|
||||
uint flag=pos->key_part[part].key_type;
|
||||
Field *field=pos->key_part[part].field;
|
||||
uint flag=key_part->key_type;
|
||||
Field *field=key_part->field;
|
||||
if (!f_is_packed(flag) &&
|
||||
f_packtype(flag) == (int) FIELD_TYPE_DECIMAL &&
|
||||
!(flag & FIELDFLAG_BINARY))
|
||||
seg->type= (int) HA_KEYTYPE_TEXT;
|
||||
else
|
||||
seg->type= (int) HA_KEYTYPE_BINARY;
|
||||
seg->start=(uint) pos->key_part[part].offset;
|
||||
seg->length=(uint) pos->key_part[part].length;
|
||||
seg->start=(uint) key_part->offset;
|
||||
seg->length=(uint) key_part->length;
|
||||
if (field->null_ptr)
|
||||
{
|
||||
seg->null_bit=field->null_bit;
|
||||
|
@ -88,7 +91,8 @@ int ha_heap::open(const char *name, int mode, uint test_if_locked)
|
|||
table->max_rows : max_rows),
|
||||
table->min_rows);
|
||||
my_free((gptr) keydef,MYF(0));
|
||||
info(HA_STATUS_NO_LOCK | HA_STATUS_CONST | HA_STATUS_VARIABLE);
|
||||
if (file)
|
||||
info(HA_STATUS_NO_LOCK | HA_STATUS_CONST | HA_STATUS_VARIABLE);
|
||||
ref_length=sizeof(HEAP_PTR);
|
||||
return (!file ? errno : 0);
|
||||
}
|
||||
|
|
|
@ -435,7 +435,8 @@ class Item_func_binary :public Item_str_func
|
|||
public:
|
||||
Item_func_binary(Item *a) :Item_str_func(a) {}
|
||||
const char *func_name() const { return "binary"; }
|
||||
String *val_str(String *a) { return (args[0]->val_str(a)); }
|
||||
String *val_str(String *a)
|
||||
{ a=args[0]->val_str(a); null_value=args[0]->null_value; return a; }
|
||||
void fix_length_and_dec() { binary=1; max_length=args[0]->max_length; }
|
||||
void print(String *str) { print_op(str); }
|
||||
};
|
||||
|
|
|
@ -418,7 +418,8 @@ class Item_typecast :public Item_str_func
|
|||
{
|
||||
public:
|
||||
Item_typecast(Item *a) :Item_str_func(a) {}
|
||||
String *val_str(String *a) { return (args[0]->val_str(a)); }
|
||||
String *val_str(String *a)
|
||||
{ a=args[0]->val_str(a); null_value=args[0]->null_value; return a; }
|
||||
void fix_length_and_dec() { max_length=args[0]->max_length; }
|
||||
void print(String *str);
|
||||
};
|
||||
|
|
|
@ -557,9 +557,9 @@ pthread_handler_decl(handle_one_connection,arg)
|
|||
|
||||
pthread_detach_this_thread();
|
||||
|
||||
#if !defined( __WIN__) && !defined(OS2) /* Win32 calls this in pthread_create */
|
||||
if (my_thread_init()) // needed to be called first before we call
|
||||
// DBUG_ macros
|
||||
#if !defined( __WIN__) && !defined(OS2) // Win32 calls this in pthread_create
|
||||
// The following calls needs to be done before we call DBUG_ macros
|
||||
if (my_thread_init())
|
||||
{
|
||||
close_connection(&thd->net,ER_OUT_OF_RESOURCES);
|
||||
statistic_increment(aborted_connects,&LOCK_thread_count);
|
||||
|
@ -568,13 +568,13 @@ pthread_handler_decl(handle_one_connection,arg)
|
|||
}
|
||||
#endif
|
||||
|
||||
// handle_one_connection() is the only way a thread would start
|
||||
// and would always be on top of the stack
|
||||
// therefore, the thread stack always starts at the address of the first
|
||||
// local variable of handle_one_connection, which is thd
|
||||
// we need to know the start of the stack so that we could check for
|
||||
// stack overruns
|
||||
|
||||
/*
|
||||
handle_one_connection() is the only way a thread would start
|
||||
and would always be on top of the stack, therefore, the thread
|
||||
stack always starts at the address of the first local variable
|
||||
of handle_one_connection, which is thd. We need to know the
|
||||
start of the stack so that we could check for stack overruns.
|
||||
*/
|
||||
DBUG_PRINT("info", ("handle_one_connection called by thread %d\n",
|
||||
thd->thread_id));
|
||||
// now that we've called my_thread_init(), it is safe to call DBUG_*
|
||||
|
@ -634,12 +634,12 @@ pthread_handler_decl(handle_one_connection,arg)
|
|||
if (net->error && net->vio != 0)
|
||||
{
|
||||
if (!thd->killed && opt_warnings)
|
||||
sql_print_error(ER(ER_NEW_ABORTING_CONNECTION),
|
||||
thd->thread_id,(thd->db ? thd->db : "unconnected"),
|
||||
thd->user ? thd->user : "unauthenticated",
|
||||
thd->host_or_ip,
|
||||
(net->last_errno ? ER(net->last_errno) :
|
||||
ER(ER_UNKNOWN_ERROR)));
|
||||
sql_print_error(ER(ER_NEW_ABORTING_CONNECTION),
|
||||
thd->thread_id,(thd->db ? thd->db : "unconnected"),
|
||||
thd->user ? thd->user : "unauthenticated",
|
||||
thd->host_or_ip,
|
||||
(net->last_errno ? ER(net->last_errno) :
|
||||
ER(ER_UNKNOWN_ERROR)));
|
||||
send_error(net,net->last_errno,NullS);
|
||||
thread_safe_increment(aborted_threads,&LOCK_thread_count);
|
||||
}
|
||||
|
@ -1216,7 +1216,6 @@ mysql_execute_command(void)
|
|||
#endif
|
||||
}
|
||||
|
||||
thread_safe_increment(com_stat[lex->sql_command],&LOCK_thread_count);
|
||||
/*
|
||||
Skip if we are in the slave thread, some table rules have been given
|
||||
and the table list says the query should not be replicated
|
||||
|
|
|
@ -183,7 +183,7 @@ mysql_select(THD *thd,TABLE_LIST *tables,List<Item> &fields,COND *conds,
|
|||
ulong select_options,select_result *result)
|
||||
{
|
||||
TABLE *tmp_table;
|
||||
int error,tmp;
|
||||
int error, tmp_error, tmp;
|
||||
bool need_tmp,hidden_group_fields;
|
||||
bool simple_order,simple_group,no_order, skip_sort_order;
|
||||
Item::cond_result cond_value;
|
||||
|
@ -678,8 +678,11 @@ mysql_select(THD *thd,TABLE_LIST *tables,List<Item> &fields,COND *conds,
|
|||
|
||||
/* Copy data to the temporary table */
|
||||
thd->proc_info="Copying to tmp table";
|
||||
if (do_select(&join,(List<Item> *) 0,tmp_table,0))
|
||||
if ((tmp_error=do_select(&join,(List<Item> *) 0,tmp_table,0)))
|
||||
{
|
||||
error=tmp_error;
|
||||
goto err; /* purecov: inspected */
|
||||
}
|
||||
if (join.having)
|
||||
join.having=having=0; // Allready done
|
||||
|
||||
|
@ -752,9 +755,11 @@ mysql_select(THD *thd,TABLE_LIST *tables,List<Item> &fields,COND *conds,
|
|||
group=0;
|
||||
}
|
||||
thd->proc_info="Copying to group table";
|
||||
tmp_error= -1;
|
||||
if (make_sum_func_list(&join,all_fields) ||
|
||||
do_select(&join,(List<Item> *) 0,tmp_table2,0))
|
||||
(tmp_error=do_select(&join,(List<Item> *) 0,tmp_table2,0)))
|
||||
{
|
||||
error=tmp_error;
|
||||
free_tmp_table(thd,tmp_table2);
|
||||
goto err; /* purecov: inspected */
|
||||
}
|
||||
|
@ -3736,14 +3741,16 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
|
|||
if (maybe_null)
|
||||
{
|
||||
/*
|
||||
To be able to group on NULL, we move the null bit to be
|
||||
just before the column.
|
||||
The null byte is updated by 'end_update()'
|
||||
To be able to group on NULL, we reserve place in group_buff
|
||||
for the NULL flag just before the column.
|
||||
The field data is after this flag.
|
||||
The NULL flag is updated by 'end_update()' and 'end_write()'
|
||||
*/
|
||||
key_part_info->null_bit=1;
|
||||
key_part_info->null_offset= key_part_info->offset-1;
|
||||
group->field->move_field((char*) group_buff+1, (uchar*) group_buff,
|
||||
1);
|
||||
keyinfo->flags|= HA_NULL_ARE_EQUAL; // def. that NULL == NULL
|
||||
key_part_info->null_bit=field->null_bit;
|
||||
key_part_info->null_offset= (uint) (field->null_ptr -
|
||||
(uchar*) table->record[0]);
|
||||
group->field->move_field((char*) ++group->buff);
|
||||
}
|
||||
else
|
||||
group->field->move_field((char*) group_buff);
|
||||
|
@ -3899,10 +3906,10 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param,
|
|||
for (uint i=0; i < keyinfo->key_parts ; i++,seg++)
|
||||
{
|
||||
Field *field=keyinfo->key_part[i].field;
|
||||
seg->flag=0;
|
||||
seg->language=MY_CHARSET_CURRENT;
|
||||
seg->length=keyinfo->key_part[i].length;
|
||||
seg->start=keyinfo->key_part[i].offset;
|
||||
seg->flag= 0;
|
||||
seg->language= MY_CHARSET_CURRENT;
|
||||
seg->length= keyinfo->key_part[i].length;
|
||||
seg->start= keyinfo->key_part[i].offset;
|
||||
if (field->flags & BLOB_FLAG)
|
||||
{
|
||||
seg->type=
|
||||
|
@ -3923,11 +3930,17 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param,
|
|||
keyinfo->key_part[i].length > 4)
|
||||
seg->flag|=HA_SPACE_PACK;
|
||||
}
|
||||
if (using_unique_constraint &&
|
||||
!(field->flags & NOT_NULL_FLAG))
|
||||
if (!(field->flags & NOT_NULL_FLAG))
|
||||
{
|
||||
seg->null_bit= field->null_bit;
|
||||
seg->null_pos= (uint) (field->null_ptr - (uchar*) table->record[0]);
|
||||
/*
|
||||
We are using a GROUP BY on something that contains NULL
|
||||
In this case we have to tell MyISAM that two NULL should
|
||||
on INSERT be compared as equal
|
||||
*/
|
||||
if (!using_unique_constraint)
|
||||
keydef.flag|= HA_NULL_ARE_EQUAL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -4065,9 +4078,12 @@ bool create_myisam_from_heap(TABLE *table, TMP_TABLE_PARAM *param, int error,
|
|||
}
|
||||
|
||||
|
||||
/*****************************************************************************
|
||||
** Make a join of all tables and write it on socket or to table
|
||||
*****************************************************************************/
|
||||
/****************************************************************************
|
||||
Make a join of all tables and write it on socket or to table
|
||||
Return: 0 if ok
|
||||
1 if error is sent
|
||||
-1 if error should be sent
|
||||
****************************************************************************/
|
||||
|
||||
static int
|
||||
do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure)
|
||||
|
@ -4144,15 +4160,21 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure)
|
|||
if (error == -3)
|
||||
error=0; /* select_limit used */
|
||||
}
|
||||
|
||||
/* Return 1 if error is sent; -1 if error should be sent */
|
||||
if (error < 0)
|
||||
join->result->send_error(0,NullS); /* purecov: inspected */
|
||||
{
|
||||
join->result->send_error(0,NullS); /* purecov: inspected */
|
||||
error=1; // Error sent
|
||||
}
|
||||
else
|
||||
{
|
||||
if (!table) // If sending data to client
|
||||
error=0;
|
||||
if (!table) // If sending data to client
|
||||
{
|
||||
join_free(join); // Unlock all cursors
|
||||
if (join->result->send_eof())
|
||||
error= -1;
|
||||
error= 1; // Don't send error
|
||||
}
|
||||
DBUG_PRINT("info",("%ld records output",join->send_records));
|
||||
}
|
||||
|
@ -4169,10 +4191,10 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure)
|
|||
my_errno=tmp;
|
||||
error= -1;
|
||||
}
|
||||
if (error != old_error)
|
||||
if (error == -1)
|
||||
table->file->print_error(my_errno,MYF(0));
|
||||
}
|
||||
DBUG_RETURN(error < 0);
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
|
||||
|
||||
|
@ -4926,6 +4948,7 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
|
|||
copy_fields(&join->tmp_table_param);
|
||||
copy_funcs(join->tmp_table_param.funcs);
|
||||
|
||||
#ifdef TO_BE_DELETED
|
||||
if (!table->uniques) // If not unique handling
|
||||
{
|
||||
/* Copy null values from group to row */
|
||||
|
@ -4936,10 +4959,11 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
|
|||
if (item->maybe_null)
|
||||
{
|
||||
Field *field=item->tmp_table_field();
|
||||
field->ptr[-1]= (byte) (field->is_null() ? 0 : 1);
|
||||
field->ptr[-1]= (byte) (field->is_null() ? 1 : 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
if (!join->having || join->having->val_int())
|
||||
{
|
||||
join->found_records++;
|
||||
|
@ -4994,8 +5018,9 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
|
|||
{
|
||||
Item *item= *group->item;
|
||||
item->save_org_in_field(group->field);
|
||||
/* Store in the used key if the field was 0 */
|
||||
if (item->maybe_null)
|
||||
group->buff[0]=item->null_value ? 0: 1; // Save reversed value
|
||||
group->buff[-1]=item->null_value ? 1 : 0;
|
||||
}
|
||||
// table->file->index_init(0);
|
||||
if (!table->file->index_read(table->record[1],
|
||||
|
|
Loading…
Add table
Reference in a new issue