diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok index 90e6c3c100d..9237eea21c4 100644 --- a/BitKeeper/etc/logging_ok +++ b/BitKeeper/etc/logging_ok @@ -51,6 +51,7 @@ georg@lmy002.wdf.sap.corp gerberb@ou800.zenez.com gluh@gluh.(none) gluh@gluh.mysql.r18.ru +gluh@mysql.com gordon@zero.local.lan greg@gcw.ath.cx greg@mysql.com diff --git a/client/mysqlcheck.c b/client/mysqlcheck.c index babf4de0c3d..07f3f25b50c 100644 --- a/client/mysqlcheck.c +++ b/client/mysqlcheck.c @@ -566,6 +566,7 @@ static void print_result() my_bool found_error=0; res = mysql_use_result(sock); + prev[0] = '\0'; for (i = 0; (row = mysql_fetch_row(res)); i++) { @@ -595,7 +596,7 @@ static void print_result() putchar('\n'); } if (found_error && opt_auto_repair && what_to_do != DO_REPAIR && - (!opt_fast || strcmp(row[3],"OK"))) + !opt_fast) insert_dynamic(&tables4repair, prev); mysql_free_result(res); } diff --git a/innobase/include/os0file.h b/innobase/include/os0file.h index d1439faf29f..ebc014df9fd 100644 --- a/innobase/include/os0file.h +++ b/innobase/include/os0file.h @@ -65,6 +65,8 @@ log. */ #define OS_FILE_OVERWRITE 53 #define OS_FILE_OPEN_RAW 54 #define OS_FILE_CREATE_PATH 55 +#define OS_FILE_OPEN_RETRY 56 /* for os_file_create() on + the first ibdata file */ #define OS_FILE_READ_ONLY 333 #define OS_FILE_READ_WRITE 444 diff --git a/innobase/os/os0file.c b/innobase/os/os0file.c index 8d1ad895aa9..15f5bf40e51 100644 --- a/innobase/os/os0file.c +++ b/innobase/os/os0file.c @@ -402,8 +402,6 @@ os_file_lock( "InnoDB: using the same InnoDB data or log files.\n"); } - close(fd); - return(-1); } @@ -978,6 +976,7 @@ try_again: } else if (access_type == OS_FILE_READ_WRITE && os_file_lock(file, name)) { *success = FALSE; + close(file); file = -1; #endif } else { @@ -1090,6 +1089,7 @@ os_file_create_simple_no_error_handling( } else if (access_type == OS_FILE_READ_WRITE && os_file_lock(file, name)) { *success = FALSE; + close(file); file = -1; #endif } else { @@ -1141,7 +1141,8 @@ try_again: if (create_mode == OS_FILE_OPEN_RAW) { create_flag = OPEN_EXISTING; share_mode = FILE_SHARE_WRITE; - } else if (create_mode == OS_FILE_OPEN) { + } else if (create_mode == OS_FILE_OPEN + || create_mode == OS_FILE_OPEN_RETRY) { create_flag = OPEN_EXISTING; } else if (create_mode == OS_FILE_CREATE) { create_flag = CREATE_NEW; @@ -1232,7 +1233,8 @@ try_again: try_again: ut_a(name); - if (create_mode == OS_FILE_OPEN || create_mode == OS_FILE_OPEN_RAW) { + if (create_mode == OS_FILE_OPEN || create_mode == OS_FILE_OPEN_RAW + || create_mode == OS_FILE_OPEN_RETRY) { mode_str = "OPEN"; create_flag = O_RDWR; } else if (create_mode == OS_FILE_CREATE) { @@ -1305,6 +1307,23 @@ try_again: } else if (create_mode != OS_FILE_OPEN_RAW && os_file_lock(file, name)) { *success = FALSE; + if (create_mode == OS_FILE_OPEN_RETRY) { + int i; + ut_print_timestamp(stderr); + fputs(" InnoDB: Retrying to lock the first data file\n", + stderr); + for (i = 0; i < 100; i++) { + os_thread_sleep(1000000); + if (!os_file_lock(file, name)) { + *success = TRUE; + return(file); + } + } + ut_print_timestamp(stderr); + fputs(" InnoDB: Unable to open the first data file\n", + stderr); + } + close(file); file = -1; #endif } else { diff --git a/innobase/srv/srv0start.c b/innobase/srv/srv0start.c index 44cf645f170..62df7301cc9 100644 --- a/innobase/srv/srv0start.c +++ b/innobase/srv/srv0start.c @@ -789,6 +789,11 @@ open_or_create_data_files( files[i] = os_file_create( name, OS_FILE_OPEN_RAW, OS_FILE_NORMAL, OS_DATA_FILE, &ret); + } else if (i == 0) { + files[i] = os_file_create( + name, OS_FILE_OPEN_RETRY, + OS_FILE_NORMAL, + OS_DATA_FILE, &ret); } else { files[i] = os_file_create( name, OS_FILE_OPEN, OS_FILE_NORMAL, diff --git a/mysql-test/Makefile.am b/mysql-test/Makefile.am index b2e86d8c00e..57598f5f3e3 100644 --- a/mysql-test/Makefile.am +++ b/mysql-test/Makefile.am @@ -32,21 +32,21 @@ endif benchdir_root= $(prefix) testdir = $(benchdir_root)/mysql-test -EXTRA_SCRIPTS = mysql-test-run.sh install_test_db.sh +EXTRA_SCRIPTS = mysql-test-run.sh mysql-test-run.pl install_test_db.sh EXTRA_DIST = $(EXTRA_SCRIPTS) test_SCRIPTS = mysql-test-run install_test_db test_DATA = std_data/client-key.pem std_data/client-cert.pem std_data/cacert.pem CLEANFILES = $(test_SCRIPTS) $(test_DATA) -INCLUDES = -I$(srcdir)/../include -I../include -I.. -EXTRA_PROGRAMS = mysql_test_run_new +INCLUDES = -I$(srcdir)/../include -I../include -I.. +EXTRA_PROGRAMS = mysql_test_run_new noinst_HEADERS = my_manage.h mysql_test_run_new_SOURCES= mysql_test_run_new.c my_manage.c my_create_tables.c dist-hook: mkdir -p $(distdir)/t $(distdir)/r $(distdir)/include \ - $(distdir)/std_data + $(distdir)/std_data $(distdir)/lib $(INSTALL_DATA) $(srcdir)/t/*.test $(srcdir)/t/*.opt $(srcdir)/t/*.sh $(srcdir)/t/*.slave-mi $(distdir)/t $(INSTALL_DATA) $(srcdir)/include/*.inc $(distdir)/include $(INSTALL_DATA) $(srcdir)/r/*.result $(srcdir)/r/*.result.es $(srcdir)/r/*.require $(distdir)/r @@ -54,6 +54,8 @@ dist-hook: $(INSTALL_DATA) $(srcdir)/std_data/*.dat $(srcdir)/std_data/*.000001 $(distdir)/std_data $(INSTALL_DATA) $(srcdir)/std_data/des_key_file $(distdir)/std_data $(INSTALL_DATA) $(srcdir)/std_data/*.pem $(distdir)/std_data + $(INSTALL_DATA) $(srcdir)/lib/init_db.sql $(distdir)/lib + $(INSTALL_DATA) $(srcdir)/lib/*.pl $(distdir)/lib install-data-local: @@ -61,7 +63,8 @@ install-data-local: $(DESTDIR)$(testdir)/t \ $(DESTDIR)$(testdir)/r \ $(DESTDIR)$(testdir)/include \ - $(DESTDIR)$(testdir)/std_data + $(DESTDIR)$(testdir)/std_data \ + $(DESTDIR)$(testdir)/lib $(INSTALL_DATA) $(srcdir)/README $(DESTDIR)$(testdir) $(INSTALL_DATA) $(srcdir)/t/*.test $(DESTDIR)$(testdir)/t $(INSTALL_DATA) $(srcdir)/t/*.opt $(DESTDIR)$(testdir)/t @@ -75,6 +78,8 @@ install-data-local: $(INSTALL_DATA) $(srcdir)/std_data/des_key_file $(DESTDIR)$(testdir)/std_data $(INSTALL_DATA) $(srcdir)/std_data/Moscow_leap $(DESTDIR)$(testdir)/std_data $(INSTALL_DATA) $(srcdir)/std_data/*.pem $(DESTDIR)$(testdir)/std_data + $(INSTALL_DATA) $(srcdir)/lib/init_db.sql $(DESTDIR)$(testdir)/lib + $(INSTALL_DATA) $(srcdir)/lib/*.pl $(DESTDIR)$(testdir)/lib std_data/%.pem: @CP@ $(top_srcdir)/SSL/$(@F) $(srcdir)/std_data diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index ae5caa8044a..e4f14447e30 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -635,7 +635,6 @@ sub command_line_setup () { { mtr_error("Can't use --extern with --embedded-server"); } - $opt_result_ext= ".es"; } # FIXME don't understand what this is diff --git a/mysql-test/r/ndb_basic.result b/mysql-test/r/ndb_basic.result index a6396080ef0..12f9c3742e5 100644 --- a/mysql-test/r/ndb_basic.result +++ b/mysql-test/r/ndb_basic.result @@ -607,3 +607,33 @@ primary key (a)) engine=ndb max_rows=1; drop table t1; +create table t1 +(counter int(64) NOT NULL auto_increment, +datavalue char(40) default 'XXXX', +primary key (counter) +) ENGINE=ndbcluster; +insert into t1 (datavalue) values ('newval'); +insert into t1 (datavalue) values ('newval'); +select * from t1 order by counter; +counter datavalue +1 newval +2 newval +insert into t1 (datavalue) select datavalue from t1 where counter < 100; +select * from t1 order by counter; +counter datavalue +1 newval +2 newval +3 newval +4 newval +insert into t1 (datavalue) select datavalue from t1 where counter < 100; +select * from t1 order by counter; +counter datavalue +1 newval +2 newval +3 newval +4 newval +35 newval +36 newval +37 newval +38 newval +drop table t1; diff --git a/mysql-test/r/rpl_rewrite_db.result b/mysql-test/r/rpl_rewrite_db.result index a2c8706e3e1..da3ec0243fe 100644 --- a/mysql-test/r/rpl_rewrite_db.result +++ b/mysql-test/r/rpl_rewrite_db.result @@ -89,4 +89,5 @@ a b 2 row 2 3 row 3 0 +drop database rewrite; drop table t1; diff --git a/mysql-test/r/user_var.result b/mysql-test/r/user_var.result index 9edeea404ec..e42849abdf1 100644 --- a/mysql-test/r/user_var.result +++ b/mysql-test/r/user_var.result @@ -175,3 +175,7 @@ set @v1=null, @v2=1, @v3=1.1, @v4=now(); select coercibility(@v1),coercibility(@v2),coercibility(@v3),coercibility(@v4); coercibility(@v1) coercibility(@v2) coercibility(@v3) coercibility(@v4) 2 2 2 2 +set session @honk=99; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '@honk=99' at line 1 +set one_shot @honk=99; +ERROR HY000: The SET ONE_SHOT syntax is reserved for purposes internal to the MySQL server diff --git a/mysql-test/t/ndb_basic.test b/mysql-test/t/ndb_basic.test index f460c573a9d..35e1ddc5ebf 100644 --- a/mysql-test/t/ndb_basic.test +++ b/mysql-test/t/ndb_basic.test @@ -577,3 +577,28 @@ create table t1 engine=ndb max_rows=1; drop table t1; + +# +# Test auto_increment +# + +connect (con1,localhost,,,test); +connect (con2,localhost,,,test); + +create table t1 + (counter int(64) NOT NULL auto_increment, + datavalue char(40) default 'XXXX', + primary key (counter) + ) ENGINE=ndbcluster; + +connection con1; +insert into t1 (datavalue) values ('newval'); +insert into t1 (datavalue) values ('newval'); +select * from t1 order by counter; +insert into t1 (datavalue) select datavalue from t1 where counter < 100; +select * from t1 order by counter; +connection con2; +insert into t1 (datavalue) select datavalue from t1 where counter < 100; +select * from t1 order by counter; + +drop table t1; diff --git a/mysql-test/t/rpl_rewrite_db.test b/mysql-test/t/rpl_rewrite_db.test index b6118854037..b77d57294fa 100644 --- a/mysql-test/t/rpl_rewrite_db.test +++ b/mysql-test/t/rpl_rewrite_db.test @@ -73,5 +73,8 @@ connection slave; # The empty line last comes from the end line field in the file select * from rewrite.t1; +drop database rewrite; + connection master; drop table t1; + diff --git a/mysql-test/t/user_var.test b/mysql-test/t/user_var.test index b907f21056c..a288b7ef708 100644 --- a/mysql-test/t/user_var.test +++ b/mysql-test/t/user_var.test @@ -111,3 +111,11 @@ select FIELD( @var,'1it','Hit') as my_column; select @v, coercibility(@v); set @v1=null, @v2=1, @v3=1.1, @v4=now(); select coercibility(@v1),coercibility(@v2),coercibility(@v3),coercibility(@v4); + +# +# Bug #9286 SESSION/GLOBAL should be disallowed for user variables +# +--error 1064 +set session @honk=99; +--error 1105 +set one_shot @honk=99; diff --git a/ndb/src/ndbapi/Ndb.cpp b/ndb/src/ndbapi/Ndb.cpp index b5493622b70..6390a1b50b5 100644 --- a/ndb/src/ndbapi/Ndb.cpp +++ b/ndb/src/ndbapi/Ndb.cpp @@ -722,26 +722,28 @@ Remark: Returns a new TupleId to the application. Uint64 Ndb::getAutoIncrementValue(const char* aTableName, Uint32 cacheSize) { - DEBUG_TRACE("getAutoIncrementValue"); + DBUG_ENTER("getAutoIncrementValue"); const char * internalTableName = internalizeTableName(aTableName); Ndb_local_table_info *info= theDictionary->get_local_table_info(internalTableName, false); if (info == 0) - return ~0; + DBUG_RETURN(~0); const NdbTableImpl *table= info->m_table_impl; Uint64 tupleId = getTupleIdFromNdb(table->m_tableId, cacheSize); - return tupleId; + DBUG_PRINT("info", ("value %u", tupleId)); + DBUG_RETURN(tupleId); } Uint64 Ndb::getAutoIncrementValue(const NdbDictionary::Table * aTable, Uint32 cacheSize) { - DEBUG_TRACE("getAutoIncrementValue"); + DBUG_ENTER("getAutoIncrementValue"); if (aTable == 0) - return ~0; + DBUG_RETURN(~0); const NdbTableImpl* table = & NdbTableImpl::getImpl(*aTable); Uint64 tupleId = getTupleIdFromNdb(table->m_tableId, cacheSize); - return tupleId; + DBUG_PRINT("info", ("value %u", tupleId)); + DBUG_RETURN(tupleId); } Uint64 @@ -756,39 +758,45 @@ Ndb::getTupleIdFromNdb(const char* aTableName, Uint32 cacheSize) Uint64 Ndb::getTupleIdFromNdb(Uint32 aTableId, Uint32 cacheSize) { + DBUG_ENTER("getTupleIdFromNdb"); if ( theFirstTupleId[aTableId] != theLastTupleId[aTableId] ) { theFirstTupleId[aTableId]++; - return theFirstTupleId[aTableId]; + DBUG_PRINT("info", ("next cached value %u", theFirstTupleId[aTableId])); + DBUG_RETURN(theFirstTupleId[aTableId]); } else // theFirstTupleId == theLastTupleId { - return opTupleIdOnNdb(aTableId, cacheSize, 0); + DBUG_PRINT("info",("reading %u values from database", + (cacheSize == 0) ? 1 : cacheSize)); + DBUG_RETURN(opTupleIdOnNdb(aTableId, (cacheSize == 0) ? 1 : cacheSize, 0)); } } Uint64 Ndb::readAutoIncrementValue(const char* aTableName) { - DEBUG_TRACE("readtAutoIncrementValue"); + DBUG_ENTER("readtAutoIncrementValue"); const NdbTableImpl* table = theDictionary->getTable(aTableName); if (table == 0) { theError= theDictionary->getNdbError(); - return ~0; + DBUG_RETURN(~0); } Uint64 tupleId = readTupleIdFromNdb(table->m_tableId); - return tupleId; + DBUG_PRINT("info", ("value %u", tupleId)); + DBUG_RETURN(tupleId); } Uint64 Ndb::readAutoIncrementValue(const NdbDictionary::Table * aTable) { - DEBUG_TRACE("readtAutoIncrementValue"); + DBUG_ENTER("readtAutoIncrementValue"); if (aTable == 0) - return ~0; + DBUG_RETURN(~0); const NdbTableImpl* table = & NdbTableImpl::getImpl(*aTable); Uint64 tupleId = readTupleIdFromNdb(table->m_tableId); - return tupleId; + DBUG_PRINT("info", ("value %u", tupleId)); + DBUG_RETURN(tupleId); } Uint64 diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/ndb/src/ndbapi/NdbDictionaryImpl.cpp index e88f76ef513..fb2e0d673cd 100644 --- a/ndb/src/ndbapi/NdbDictionaryImpl.cpp +++ b/ndb/src/ndbapi/NdbDictionaryImpl.cpp @@ -1501,7 +1501,7 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb, if (col->m_autoIncrement) { if (haveAutoIncrement) { m_error.code = 4335; - return -1; + DBUG_RETURN(-1); } haveAutoIncrement = true; autoIncrementValue = col->m_autoIncrementInitialValue; @@ -1622,7 +1622,7 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb, ret= createTable(&tSignal, ptr); if (ret) - return ret; + DBUG_RETURN(ret); if (haveAutoIncrement) { if (!ndb.setAutoIncrementValue(impl.m_externalName.c_str(), diff --git a/sql/field.cc b/sql/field.cc index 740c027350d..d73257a673f 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -4914,7 +4914,7 @@ void Field_datetime::sql_type(String &res) const int Field_string::store(const char *from,uint length,CHARSET_INFO *cs) { - int error= 0; + int error= 0, well_formed_error; uint32 not_used; char buff[80]; String tmpstr(buff,sizeof(buff), &my_charset_bin); @@ -4942,7 +4942,7 @@ int Field_string::store(const char *from,uint length,CHARSET_INFO *cs) from,from+length, field_length/ field_charset->mbmaxlen, - &error); + &well_formed_error); memcpy(ptr,from,copy_length); if (copy_length < field_length) // Append spaces if shorter field_charset->cset->fill(field_charset,ptr+copy_length, @@ -5545,7 +5545,7 @@ void Field_blob::put_length(char *pos, uint32 length) int Field_blob::store(const char *from,uint length,CHARSET_INFO *cs) { - int error= 0; + int error= 0, well_formed_error; if (!length) { bzero(ptr,Field_blob::pack_length()); @@ -5580,7 +5580,7 @@ int Field_blob::store(const char *from,uint length,CHARSET_INFO *cs) from,from + min(length, copy_length), copy_length, - &error); + &well_formed_error); if (copy_length < length) error= 1; Field_blob::store_length(copy_length); diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 22e7da76e1f..cfb7a61c864 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -2946,7 +2946,11 @@ void ha_ndbcluster::start_bulk_insert(ha_rows rows) DBUG_PRINT("enter", ("rows: %d", (int)rows)); m_rows_inserted= 0; - m_rows_to_insert= rows; + if (rows == 0) + /* We don't know how many will be inserted, guess */ + m_rows_to_insert= m_autoincrement_prefetch; + else + m_rows_to_insert= rows; /* Calculate how many rows that should be inserted @@ -3955,6 +3959,10 @@ longlong ha_ndbcluster::get_auto_increment() DBUG_ENTER("get_auto_increment"); DBUG_PRINT("enter", ("m_tabname: %s", m_tabname)); Ndb *ndb= get_ndb(); + + if (m_rows_inserted > m_rows_to_insert) + /* We guessed too low */ + m_rows_to_insert+= m_autoincrement_prefetch; int cache_size= (m_rows_to_insert - m_rows_inserted < m_autoincrement_prefetch) ? m_rows_to_insert - m_rows_inserted diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 0d1021ec9af..3f133a473ac 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -1995,6 +1995,11 @@ typedef struct key_field_t { // Used when finding key fields uint level; uint optimize; bool eq_func; + /* + If true, the condition this struct represents will not be satisfied + when val IS NULL. + */ + bool null_rejecting; } KEY_FIELD; /* Values in optimize */ @@ -2011,6 +2016,12 @@ typedef struct key_field_t { // Used when finding key fields that are internally transformed to something like: SELECT * FROM t1 WHERE t1.key=outer_ref_field or t1.key IS NULL + + KEY_FIELD::null_rejecting is processed as follows: + result has null_rejecting=true if it is set for both ORed references. + for example: + (t2.key = t1.field OR t2.key = t1.field) -> null_rejecting=true + (t2.key = t1.field OR t2.key <=> t1.field) -> null_rejecting=false */ static KEY_FIELD * @@ -2044,6 +2055,8 @@ merge_key_fields(KEY_FIELD *start,KEY_FIELD *new_fields,KEY_FIELD *end, KEY_OPTIMIZE_EXISTS) | ((old->optimize | new_fields->optimize) & KEY_OPTIMIZE_REF_OR_NULL)); + old->null_rejecting= old->null_rejecting && + new_fields->null_rejecting; } } else if (old->eq_func && new_fields->eq_func && @@ -2055,6 +2068,8 @@ merge_key_fields(KEY_FIELD *start,KEY_FIELD *new_fields,KEY_FIELD *end, KEY_OPTIMIZE_EXISTS) | ((old->optimize | new_fields->optimize) & KEY_OPTIMIZE_REF_OR_NULL)); + old->null_rejecting= old->null_rejecting && + new_fields->null_rejecting; } else if (old->eq_func && new_fields->eq_func && (old->val->is_null() || new_fields->val->is_null())) @@ -2065,6 +2080,8 @@ merge_key_fields(KEY_FIELD *start,KEY_FIELD *new_fields,KEY_FIELD *end, /* Remember the NOT NULL value */ if (old->val->is_null()) old->val= new_fields->val; + /* The referred expression can be NULL: */ + old->null_rejecting= false; } else { @@ -2119,7 +2136,7 @@ merge_key_fields(KEY_FIELD *start,KEY_FIELD *new_fields,KEY_FIELD *end, */ static void -add_key_field(KEY_FIELD **key_fields,uint and_level, COND *cond, +add_key_field(KEY_FIELD **key_fields,uint and_level, Item_func *cond, Field *field,bool eq_func,Item **value, uint num_values, table_map usable_tables) { @@ -2221,12 +2238,29 @@ add_key_field(KEY_FIELD **key_fields,uint and_level, COND *cond, (*key_fields)->val= *value; (*key_fields)->level= and_level; (*key_fields)->optimize= exists_optimize; + /* + If the condition has form "tbl.keypart = othertbl.field" and + othertbl.field can be NULL, there will be no matches if othertbl.field + has NULL value. + */ + (*key_fields)->null_rejecting= (cond->functype() == Item_func::EQ_FUNC) && + ((*value)->type() == Item::FIELD_ITEM) && + ((Item_field*)*value)->field->maybe_null(); (*key_fields)++; } - +/* + SYNOPSIS + add_key_fields() + key_fields Add KEY_FIELD entries to this array (and move the + pointer) + and_level AND-level (a value that is different for every n-way + AND operation) + cond Condition to analyze + usable_tables Value to pass to add_key_field +*/ static void -add_key_fields(JOIN_TAB *stat,KEY_FIELD **key_fields,uint *and_level, +add_key_fields(KEY_FIELD **key_fields,uint *and_level, COND *cond, table_map usable_tables) { if (cond->type() == Item_func::COND_ITEM) @@ -2238,20 +2272,20 @@ add_key_fields(JOIN_TAB *stat,KEY_FIELD **key_fields,uint *and_level, { Item *item; while ((item=li++)) - add_key_fields(stat,key_fields,and_level,item,usable_tables); + add_key_fields(key_fields,and_level,item,usable_tables); for (; org_key_fields != *key_fields ; org_key_fields++) org_key_fields->level= *and_level; } else { (*and_level)++; - add_key_fields(stat,key_fields,and_level,li++,usable_tables); + add_key_fields(key_fields,and_level,li++,usable_tables); Item *item; while ((item=li++)) { KEY_FIELD *start_key_fields= *key_fields; (*and_level)++; - add_key_fields(stat,key_fields,and_level,item,usable_tables); + add_key_fields(key_fields,and_level,item,usable_tables); *key_fields=merge_key_fields(org_key_fields,start_key_fields, *key_fields,++(*and_level)); } @@ -2363,6 +2397,7 @@ add_key_part(DYNAMIC_ARRAY *keyuse_array,KEY_FIELD *key_field) keyuse.keypart_map= (key_part_map) 1 << part; keyuse.used_tables=key_field->val->used_tables(); keyuse.optimize= key_field->optimize & KEY_OPTIMIZE_REF_OR_NULL; + keyuse.null_rejecting= key_field->null_rejecting; VOID(insert_dynamic(keyuse_array,(gptr) &keyuse)); } } @@ -2456,8 +2491,22 @@ sort_keyuse(KEYUSE *a,KEYUSE *b) /* Update keyuse array with all possible keys we can use to fetch rows - join_tab is a array in tablenr_order - stat is a reference array in 'prefered' order. + + SYNOPSIS + update_ref_and_keys() + thd + keyuse OUT Put here ordered array of KEYUSE structures + join_tab Array in tablenr_order + tables Number of tables in join + cond WHERE condition (note that the function analyzes + join_tab[i]->on_expr too) + normal_tables tables not inner w.r.t some outer join (ones for which + we can make ref access based the WHERE clause) + select_lex current SELECT + + RETURN + 0 - OK + 1 - Out of memory. */ static bool @@ -2478,7 +2527,7 @@ update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab, return TRUE; if (cond) { - add_key_fields(join_tab,&end,&and_level,cond,normal_tables); + add_key_fields(&end,&and_level,cond,normal_tables); for (; field != end ; field++) { add_key_part(keyuse,field); @@ -2492,7 +2541,7 @@ update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab, { if (join_tab[i].on_expr) { - add_key_fields(join_tab,&end,&and_level,join_tab[i].on_expr, + add_key_fields(&end,&and_level,join_tab[i].on_expr, join_tab[i].table->map); } } @@ -3232,6 +3281,7 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse, } j->ref.key_buff2=j->ref.key_buff+ALIGN_SIZE(length); j->ref.key_err=1; + j->ref.null_rejecting= 0; keyuse=org_keyuse; store_key **ref_key= j->ref.key_copy; @@ -3256,6 +3306,8 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse, uint maybe_null= test(keyinfo->key_part[i].null_bit); j->ref.items[i]=keyuse->val; // Save for cond removal + if (keyuse->null_rejecting) + j->ref.null_rejecting |= 1 << i; keyuse_uses_no_tables= keyuse_uses_no_tables && !keyuse->used_tables; if (!keyuse->used_tables && !(join->select_options & SELECT_DESCRIBE)) @@ -3410,12 +3462,98 @@ make_simple_join(JOIN *join,TABLE *tmp_table) } +inline void add_cond_and_fix(Item **e1, Item *e2) +{ + if (*e1) + { + Item *res; + if ((res= new Item_cond_and(*e1, e2))) + { + *e1= res; + res->quick_fix_field(); + } + } + else + *e1= e2; +} + + +/* + Add to join_tab->select_cond[i] "table.field IS NOT NULL" conditions we've + inferred from ref/eq_ref access performed. + + SYNOPSIS + add_not_null_conds() + join Join to process + + NOTES + This function is a part of "Early NULL-values filtering for ref access" + optimization. + + Example of this optimization: + For query SELECT * FROM t1,t2 WHERE t2.key=t1.field + and plan " any-access(t1), ref(t2.key=t1.field) " + add "t1.field IS NOT NULL" to t1's table condition. + Description of the optimization: + + We look through equalities choosen to perform ref/eq_ref access, + pick equalities that have form "tbl.part_of_key = othertbl.field" + (where othertbl is a non-const table and othertbl.field may be NULL) + and add them to conditions on correspoding tables (othertbl in this + example). + + This optimization doesn't affect the choices that ref, range, or join + optimizer make. This was intentional because this was added after 4.1 + was GA. + + Implementation overview + 1. update_ref_and_keys() accumulates info about null-rejecting + predicates in in KEY_FIELD::null_rejecting + 1.1 add_key_part saves these to KEYUSE. + 2. create_ref_for_key copies them to TABLE_REF. + 3. add_not_null_conds adds "x IS NOT NULL" to join_tab->select_cond of + appropiate JOIN_TAB members. +*/ + +static void add_not_null_conds(JOIN *join) +{ + DBUG_ENTER("add_not_null_conds"); + for (uint i=join->const_tables ; i < join->tables ; i++) + { + JOIN_TAB *tab=join->join_tab+i; + if ((tab->type == JT_REF || tab->type == JT_REF_OR_NULL) && + !tab->table->maybe_null) + { + for (uint keypart= 0; keypart < tab->ref.key_parts; keypart++) + { + if (tab->ref.null_rejecting & (1 << keypart)) + { + Item *item= tab->ref.items[keypart]; + DBUG_ASSERT(item->type() == Item::FIELD_ITEM); + Item_field *not_null_item= (Item_field*)item; + JOIN_TAB *referred_tab= not_null_item->field->table->reginfo.join_tab; + Item_func_isnotnull *notnull; + if (!(notnull= new Item_func_isnotnull(not_null_item))) + DBUG_VOID_RETURN; + + notnull->quick_fix_field(); + DBUG_EXECUTE("where",print_where(notnull, + referred_tab->table->table_name);); + add_cond_and_fix(&referred_tab->select_cond, notnull); + } + } + } + } + DBUG_VOID_RETURN; +} + static bool make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) { DBUG_ENTER("make_join_select"); if (select) { + add_not_null_conds(join); table_map used_tables; if (join->tables > 1) cond->update_used_tables(); // Tablenr may have changed @@ -3472,13 +3610,14 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) } if (tmp) { - DBUG_EXECUTE("where",print_where(tmp,tab->table->table_name);); SQL_SELECT *sel=tab->select=(SQL_SELECT*) join->thd->memdup((gptr) select, sizeof(SQL_SELECT)); if (!sel) DBUG_RETURN(1); // End of memory - tab->select_cond=sel->cond=tmp; + add_cond_and_fix(&tab->select_cond, tmp); + sel->cond= tab->select_cond; sel->head=tab->table; + DBUG_EXECUTE("where",print_where(tmp,tab->table->table_name);); if (tab->quick) { /* Use quick key read if it's a constant and it's not used diff --git a/sql/sql_select.h b/sql/sql_select.h index 4ea7e1b23e7..ab3b442ef74 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -31,6 +31,11 @@ typedef struct keyuse_t { uint key, keypart, optimize; key_part_map keypart_map; ha_rows ref_table_rows; + /* + If true, the comparison this value was created from will not be + satisfied if val has NULL 'value'. + */ + bool null_rejecting; } KEYUSE; class store_key; @@ -45,6 +50,11 @@ typedef struct st_table_ref byte *key_buff2; // key_buff+key_length store_key **key_copy; // Item **items; // val()'s for each keypart + /* + (null_rejecting & (1<option_type= OPT_GLOBAL; } | LOCAL_SYM { Lex->option_type= OPT_SESSION; } | SESSION_SYM { Lex->option_type= OPT_SESSION; } - | ONE_SHOT_SYM { Lex->option_type= OPT_SESSION; Lex->one_shot_set= 1; } ; +option_type: + /* empty */ {} + | ONE_SHOT_SYM { Lex->option_type= OPT_SESSION; Lex->one_shot_set= 1; } + ; + opt_var_type: /* empty */ { $$=OPT_SESSION; } | GLOBAL_SYM { $$=OPT_GLOBAL; } @@ -5386,34 +5395,37 @@ opt_var_ident_type: | SESSION_SYM '.' { $$=OPT_SESSION; } ; +sys_option_value: + internal_variable_name equal set_expr_or_default + { + LEX *lex=Lex; + lex->var_list.push_back(new set_var(lex->option_type, $1.var, + &$1.base_name, $3)); + } + | TRANSACTION_SYM ISOLATION LEVEL_SYM isolation_types + { + LEX *lex=Lex; + LEX_STRING tmp; + tmp.str=0; + tmp.length=0; + lex->var_list.push_back(new set_var(lex->option_type, + find_sys_var("tx_isolation"), + &tmp, + new Item_int((int32) $4))); + } + ; + option_value: '@' ident_or_text equal expr { Lex->var_list.push_back(new set_var_user(new Item_func_set_user_var($2,$4))); } - | internal_variable_name equal set_expr_or_default - { - LEX *lex=Lex; - lex->var_list.push_back(new set_var(lex->option_type, $1.var, - &$1.base_name, $3)); - } | '@' '@' opt_var_ident_type internal_variable_name equal set_expr_or_default - { - LEX *lex=Lex; - lex->var_list.push_back(new set_var((enum_var_type) $3, $4.var, - &$4.base_name, $6)); - } - | TRANSACTION_SYM ISOLATION LEVEL_SYM isolation_types - { - LEX *lex=Lex; - LEX_STRING tmp; - tmp.str=0; - tmp.length=0; - lex->var_list.push_back(new set_var(lex->option_type, - find_sys_var("tx_isolation"), - &tmp, - new Item_int((int32) $4))); - } + { + LEX *lex=Lex; + lex->var_list.push_back(new set_var((enum_var_type) $3, $4.var, + &$4.base_name, $6)); + } | charset old_or_new_charset_name_or_default { THD *thd= YYTHD; diff --git a/strings/ctype-mb.c b/strings/ctype-mb.c index dbe3a24324e..cbbd035c631 100644 --- a/strings/ctype-mb.c +++ b/strings/ctype-mb.c @@ -276,7 +276,7 @@ uint my_well_formed_len_mb(CHARSET_INFO *cs, const char *b, const char *e, if ((mblen= cs->cset->mb_wc(cs, &wc, (uchar*) b, (uchar*) e)) <= 0) { - *error= 1; + *error= b < e ? 1 : 0; break; } b+= mblen;