diff --git a/BUILD/SETUP.sh b/BUILD/SETUP.sh index 8fa70aecb6a..d378276a0a3 100644 --- a/BUILD/SETUP.sh +++ b/BUILD/SETUP.sh @@ -45,6 +45,7 @@ cxx_warnings="$global_warnings -Woverloaded-virtual -Wsign-promo -Wreorder -Wcto alpha_cflags="-mcpu=ev6 -Wa,-mev6" # Not used yet pentium_cflags="-mcpu=pentiumpro" +pentium64_cflags="-mcpu=nocona -m64" ppc_cflags="-mpowerpc -mcpu=powerpc" sparc_cflags="" diff --git a/BUILD/compile-amd64-debug-max b/BUILD/compile-amd64-debug-max new file mode 100755 index 00000000000..46291db48b1 --- /dev/null +++ b/BUILD/compile-amd64-debug-max @@ -0,0 +1,15 @@ +#! /bin/sh +path=`dirname $0` +. "$path/SETUP.sh" +amd64_cflags="-DBIG_TABLES" +amd64_cxx_flags="-DBIG_TABLES" +amd64_configs="" +base_cxxflags="$amd64_cxx_flags $base_cxxflags" +extra_flags="$amd64_cflags $debug_cflags" +c_warnings="$c_warnings $debug_extra_warnings" +cxx_warnings="$cxx_warnings $debug_extra_warnings" +extra_configs="$amd64_configs $debug_configs" + +extra_configs="$extra_configs --with-berkeley-db --with-innodb --without-isam --with-embedded-server --with-openssl --with-raid --with-vio --with-ndbcluster" + +. "$path/FINISH.sh" diff --git a/BUILD/compile-amd64-max b/BUILD/compile-amd64-max new file mode 100755 index 00000000000..087dba7f94a --- /dev/null +++ b/BUILD/compile-amd64-max @@ -0,0 +1,17 @@ +#! /bin/sh + +path=`dirname $0` +. "$path/SETUP.sh" +amd64_cflags="-DBIG_TABLES" +amd64_cxx_flags="-DBIG_TABLES" +amd64_configs="" +base_cxxflags="$amd64_cxx_flags $base_cxxflags" +extra_flags="$amd64_cflags $fast_cflags -g" +extra_configs="$amd64_configs" +#strip=yes + +extra_configs="$extra_configs --with-innodb --with-berkeley-db \ + --with-embedded-server --enable-thread-safe-client \ + --with-openssl --with-vio --with-raid --with-ndbcluster" + +. "$path/FINISH.sh" diff --git a/BUILD/compile-pentium64-valgrind-max b/BUILD/compile-pentium64-valgrind-max new file mode 100644 index 00000000000..7f78089c3e8 --- /dev/null +++ b/BUILD/compile-pentium64-valgrind-max @@ -0,0 +1,29 @@ +#! /bin/sh + +path=`dirname $0` +. "$path/SETUP.sh" + +extra_flags="$pentium64_cflags $debug_cflags -USAFEMALLOC -UFORCE_INIT_OF_VARS -DHAVE_purify -DMYSQL_SERVER_SUFFIX=-valgrind-max" +c_warnings="$c_warnings $debug_extra_warnings" +cxx_warnings="$cxx_warnings $debug_extra_warnings" +extra_configs="$pentium_configs $debug_configs" + +# We want to test isam when building with valgrind +extra_configs="$extra_configs --with-berkeley-db --with-innodb --with-isam --with-embedded-server --with-openssl --with-vio --with-raid --with-ndbcluster" + +. "$path/FINISH.sh" + +if test -z "$just_print" +then + set +v +x + echo "\ +****************************************************************************** +Note that by default BUILD/compile-pentium-valgrind-max calls 'configure' with +--enable-assembler. When Valgrind detects an error involving an assembly +function (for example an uninitialized value used as an argument of an +assembly function), Valgrind will not print the stacktrace and 'valgrind +--gdb-attach=yes' will not work either. If you need a stacktrace in those +cases, you have to run BUILD/compile-pentium-valgrind-max with the +--disable-assembler argument. +******************************************************************************" +fi diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok index 61590de940d..b9e4478dd1a 100644 --- a/BitKeeper/etc/logging_ok +++ b/BitKeeper/etc/logging_ok @@ -94,9 +94,12 @@ jcole@sarvik.tfr.cafe.ee jcole@tetra.spaceapes.com jimw@mysql.com joerg@mysql.com +jon@gigan. +joreland@bk-internal.mysql.com joreland@mysql.com jorge@linux.jorge.mysql.com jplindst@t41.(none) +kaa@polly.local kaj@work.mysql.com kent@mysql.com konstantin@mysql.com @@ -107,6 +110,7 @@ lenz@mysql.com magnus@neptunus.(none) magnus@shellback.(none) marko@hundin.mysql.fi +marty@shark. mats@mysql.com matt@mysql.com matthias@three.local.lan diff --git a/config/ac-macros/ha_federated.m4 b/config/ac-macros/ha_federated.m4 new file mode 100644 index 00000000000..4383a9d8d55 --- /dev/null +++ b/config/ac-macros/ha_federated.m4 @@ -0,0 +1,29 @@ +dnl --------------------------------------------------------------------------- +dnl Macro: MYSQL_CHECK_FEDERATED +dnl Sets HAVE_FEDERATED if --with-federated-storage-engine is used +dnl --------------------------------------------------------------------------- +AC_DEFUN([MYSQL_CHECK_FEDERATED], [ + AC_ARG_WITH([federated-storage-engine], + [ + --with-federated-storage-engine + Enable the MySQL Storage Engine], + [federateddb="$withval"], + [federateddb=no]) + AC_MSG_CHECKING([for MySQL federated storage engine]) + + case "$federateddb" in + yes ) + AC_DEFINE([HAVE_FEDERATED_DB], [1], [Define to enable Federated Handler]) + AC_MSG_RESULT([yes]) + [federateddb=yes] + ;; + * ) + AC_MSG_RESULT([no]) + [federateddb=no] + ;; + esac + +]) +dnl --------------------------------------------------------------------------- +dnl END OF MYSQL_CHECK_FEDERATED SECTION +dnl --------------------------------------------------------------------------- diff --git a/configure.in b/configure.in index 748f86bdbb7..9e1e57630d4 100644 --- a/configure.in +++ b/configure.in @@ -17,7 +17,7 @@ SHARED_LIB_VERSION=14:0:0 # ndb version NDB_VERSION_MAJOR=5 NDB_VERSION_MINOR=0 -NDB_VERSION_BUILD=2 +NDB_VERSION_BUILD=3 NDB_VERSION_STATUS="alpha" # Set all version vars based on $VERSION. How do we do this more elegant ? @@ -38,6 +38,7 @@ sinclude(config/ac-macros/compiler_flag.m4) sinclude(config/ac-macros/ha_archive.m4) sinclude(config/ac-macros/ha_berkeley.m4) sinclude(config/ac-macros/ha_example.m4) +sinclude(config/ac-macros/ha_federated.m4) sinclude(config/ac-macros/ha_innodb.m4) sinclude(config/ac-macros/ha_isam.m4) sinclude(config/ac-macros/ha_ndbcluster.m4) @@ -748,7 +749,7 @@ AC_CHECK_HEADERS(fcntl.h float.h floatingpoint.h ieeefp.h limits.h \ strings.h string.h synch.h sys/mman.h sys/socket.h netinet/in.h arpa/inet.h \ sys/timeb.h sys/types.h sys/un.h sys/vadvise.h sys/wait.h term.h \ unistd.h utime.h sys/utime.h termio.h termios.h sched.h crypt.h alloca.h \ - sys/ioctl.h malloc.h sys/malloc.h linux/config.h sys/resource.h sys/param.h) + sys/ioctl.h malloc.h sys/malloc.h sys/ipc.h sys/shm.h linux/config.h sys/resource.h sys/param.h) #-------------------------------------------------------------------- # Check for system libraries. Adds the library to $LIBS @@ -775,6 +776,22 @@ AC_CHECK_FUNC(crypt, AC_DEFINE([HAVE_CRYPT], [1], [crypt])) AC_CHECK_FUNC(sem_init, , AC_CHECK_LIB(posix4, sem_init)) MYSQL_CHECK_ZLIB_WITH_COMPRESS +# For large pages support +if test "$IS_LINUX" = "true" +then + # For SHM_HUGETLB on Linux + AC_CHECK_DECLS(SHM_HUGETLB, + AC_DEFINE([HAVE_LARGE_PAGES], [1], + [Define if you have large pages support]) + AC_DEFINE([HUGETLB_USE_PROC_MEMINFO], [1], + [Define if /proc/meminfo shows the huge page size (Linux only)]) + , , + [ +#include + ] + ) +fi + #-------------------------------------------------------------------- # Check for TCP wrapper support #-------------------------------------------------------------------- @@ -2420,6 +2437,7 @@ MYSQL_CHECK_EXAMPLEDB MYSQL_CHECK_ARCHIVEDB MYSQL_CHECK_CSVDB MYSQL_CHECK_NDBCLUSTER +MYSQL_CHECK_FEDERATED # If we have threads generate some library functions and test programs sql_server_dirs= diff --git a/dbug/dbug.c b/dbug/dbug.c index d21b4e7801a..91b7e7b6c4c 100644 --- a/dbug/dbug.c +++ b/dbug/dbug.c @@ -978,7 +978,7 @@ uint length) { fprintf(_db_fp_, "%s: ", state->func); } - sprintf(dbuff,"%s: Memory: %lx Bytes: (%d)\n", + sprintf(dbuff,"%s: Memory: 0x%lx Bytes: (%d)\n", keyword,(ulong) memory, length); (void) fputs(dbuff,_db_fp_); diff --git a/heap/_check.c b/heap/_check.c index a745aee48bf..4316a9926f7 100644 --- a/heap/_check.c +++ b/heap/_check.c @@ -123,7 +123,7 @@ static int check_one_key(HP_KEYDEF *keydef, uint keynr, ulong records, blength, records)) != i) { - DBUG_PRINT("error",("Record in wrong link: Link %d Record: %lx Record-link %d", i,hash_info->ptr_to_rec,rec_link)); + DBUG_PRINT("error",("Record in wrong link: Link %d Record: 0x%lx Record-link %d", i,hash_info->ptr_to_rec,rec_link)); error=1; } else @@ -180,7 +180,7 @@ static int check_one_rb_key(HP_INFO *info, uint keynr, ulong records, key_length, SEARCH_FIND | SEARCH_SAME, ¬_used)) { error= 1; - DBUG_PRINT("error",("Record in wrong link: key: %d Record: %lx\n", + DBUG_PRINT("error",("Record in wrong link: key: %d Record: 0x%lx\n", keynr, recpos)); } else diff --git a/heap/hp_create.c b/heap/hp_create.c index d296c9db28b..0580c178498 100644 --- a/heap/hp_create.c +++ b/heap/hp_create.c @@ -77,14 +77,31 @@ int heap_create(const char *name, uint keys, HP_KEYDEF *keydef, case HA_KEYTYPE_INT8: keyinfo->seg[j].flag|= HA_SWAP_KEY; break; - case HA_KEYTYPE_VARBINARY: + case HA_KEYTYPE_VARBINARY1: /* Case-insensitiveness is handled in coll->hash_sort */ - keyinfo->seg[j].type= HA_KEYTYPE_VARTEXT; + keyinfo->seg[j].type= HA_KEYTYPE_VARTEXT1; /* fall_through */ - case HA_KEYTYPE_VARTEXT: + case HA_KEYTYPE_VARTEXT1: if (!my_binary_compare(keyinfo->seg[j].charset)) keyinfo->flag|= HA_END_SPACE_KEY; keyinfo->flag|= HA_VAR_LENGTH_KEY; + /* Save number of bytes used to store length */ + keyinfo->seg[j].bit_start= 1; + break; + case HA_KEYTYPE_VARBINARY2: + /* Case-insensitiveness is handled in coll->hash_sort */ + /* fall_through */ + case HA_KEYTYPE_VARTEXT2: + if (!my_binary_compare(keyinfo->seg[j].charset)) + keyinfo->flag|= HA_END_SPACE_KEY; + keyinfo->flag|= HA_VAR_LENGTH_KEY; + /* Save number of bytes used to store length */ + keyinfo->seg[j].bit_start= 2; + /* + Make future comparison simpler by only having to check for + one type + */ + keyinfo->seg[j].type= HA_KEYTYPE_VARTEXT1; break; default: break; diff --git a/heap/hp_delete.c b/heap/hp_delete.c index 4adefde1fe9..5287533ae0a 100644 --- a/heap/hp_delete.c +++ b/heap/hp_delete.c @@ -24,7 +24,7 @@ int heap_delete(HP_INFO *info, const byte *record) HP_SHARE *share=info->s; HP_KEYDEF *keydef, *end, *p_lastinx; DBUG_ENTER("heap_delete"); - DBUG_PRINT("enter",("info: %lx record: %lx",info,record)); + DBUG_PRINT("enter",("info: %lx record: 0x%lx",info,record)); test_active(info); @@ -139,7 +139,7 @@ int hp_delete_key(HP_INFO *info, register HP_KEYDEF *keyinfo, /* Save for heap_rnext/heap_rprev */ info->current_hash_ptr=last_ptr; info->current_ptr = last_ptr ? last_ptr->ptr_to_rec : 0; - DBUG_PRINT("info",("Corrected current_ptr to point at: %lx", + DBUG_PRINT("info",("Corrected current_ptr to point at: 0x%lx", info->current_ptr)); } empty=pos; diff --git a/heap/hp_hash.c b/heap/hp_hash.c index 7e5f92bc7b8..3121ef71fb0 100644 --- a/heap/hp_hash.c +++ b/heap/hp_hash.c @@ -271,18 +271,21 @@ ulong hp_hashnr(register HP_KEYDEF *keydef, register const byte *key) } cs->coll->hash_sort(cs, pos, length, &nr, &nr2); } - else if (seg->type == HA_KEYTYPE_VARTEXT) + else if (seg->type == HA_KEYTYPE_VARTEXT1) /* Any VARCHAR segments */ { CHARSET_INFO *cs= seg->charset; + uint pack_length= 2; /* Key packing is constant */ uint length= uint2korr(pos); if (cs->mbmaxlen > 1) { uint char_length; - char_length= my_charpos(cs, pos +2, pos +2 + length, + char_length= my_charpos(cs, pos +pack_length, + pos +pack_length + length, seg->length/cs->mbmaxlen); set_if_smaller(length, char_length); } - cs->coll->hash_sort(cs, pos+2, length, &nr, &nr2); + cs->coll->hash_sort(cs, pos+pack_length, length, &nr, &nr2); + key+= pack_length; } else { @@ -293,6 +296,7 @@ ulong hp_hashnr(register HP_KEYDEF *keydef, register const byte *key) } } } + DBUG_PRINT("exit", ("hash: 0x%lx", nr)); return((ulong) nr); } @@ -300,7 +304,6 @@ ulong hp_hashnr(register HP_KEYDEF *keydef, register const byte *key) ulong hp_rec_hashnr(register HP_KEYDEF *keydef, register const byte *rec) { - /*register*/ ulong nr=1, nr2=4; HA_KEYSEG *seg,*endseg; @@ -327,18 +330,20 @@ ulong hp_rec_hashnr(register HP_KEYDEF *keydef, register const byte *rec) } cs->coll->hash_sort(cs, pos, char_length, &nr, &nr2); } - else if (seg->type == HA_KEYTYPE_VARTEXT) + else if (seg->type == HA_KEYTYPE_VARTEXT1) /* Any VARCHAR segments */ { CHARSET_INFO *cs= seg->charset; - uint length= uint2korr(pos); + uint pack_length= seg->bit_start; + uint length= (pack_length == 1 ? (uint) *(uchar*) pos : uint2korr(pos)); if (cs->mbmaxlen > 1) { uint char_length; - char_length= my_charpos(cs, pos + 2 , pos + 2 + length, + char_length= my_charpos(cs, pos + pack_length, + pos + pack_length + length, seg->length/cs->mbmaxlen); set_if_smaller(length, char_length); } - cs->coll->hash_sort(cs, pos+2, length, &nr, &nr2); + cs->coll->hash_sort(cs, pos+pack_length, length, &nr, &nr2); } else { @@ -349,7 +354,8 @@ ulong hp_rec_hashnr(register HP_KEYDEF *keydef, register const byte *rec) } } } - return((ulong) nr); + DBUG_PRINT("exit", ("hash: 0x%lx", nr)); + return(nr); } #else @@ -392,10 +398,13 @@ ulong hp_hashnr(register HP_KEYDEF *keydef, register const byte *key) { seg->charset->hash_sort(seg->charset,pos,((uchar*)key)-pos,&nr,NULL); } - else if (seg->type == HA_KEYTYPE_VARTEXT) + else if (seg->type == HA_KEYTYPE_VARTEXT1) /* Any VARCHAR segments */ { + uint pack_length= 2; /* Key packing is constant */ uint length= uint2korr(pos); - seg->charset->hash_sort(seg->charset, pos+2, length, &nr, NULL); + seg->charset->hash_sort(seg->charset, pos+pack_length, length, &nr, + NULL); + key+= pack_length; } else { @@ -406,7 +415,8 @@ ulong hp_hashnr(register HP_KEYDEF *keydef, register const byte *key) } } } - return((ulong) nr); + DBUG_PRINT("exit", ("hash: 0x%lx", nr)); + return(nr); } /* Calc hashvalue for a key in a record */ @@ -418,7 +428,7 @@ ulong hp_rec_hashnr(register HP_KEYDEF *keydef, register const byte *rec) for (seg=keydef->seg,endseg=seg+keydef->keysegs ; seg < endseg ; seg++) { - uchar *pos=(uchar*) rec+seg->start,*end=pos+seg->length; + uchar *pos=(uchar*) rec+seg->start; if (seg->null_bit) { if (rec[seg->null_pos] & seg->null_bit) @@ -431,13 +441,16 @@ ulong hp_rec_hashnr(register HP_KEYDEF *keydef, register const byte *rec) { seg->charset->hash_sort(seg->charset,pos,((uchar*)key)-pos,&nr,NULL); } - else if (seg->type == HA_KEYTYPE_VARTEXT) + else if (seg->type == HA_KEYTYPE_VARTEXT1) /* Any VARCHAR segments */ { - uint length= uint2korr(pos); - seg->charset->hash_sort(seg->charset, pos+2, length, &nr, NULL); + uint pack_length= seg->bit_start; + uint length= (pack_length == 1 ? (uint) *(uchar*) pos : uint2korr(pos)); + seg->charset->hash_sort(seg->charset, pos+pack_length, + length, &nr, NULL); } else { + uchar *end= pos+seg->length; for ( ; pos < end ; pos++) { nr *=16777619; @@ -445,7 +458,8 @@ ulong hp_rec_hashnr(register HP_KEYDEF *keydef, register const byte *rec) } } } - return((ulong) nr); + DBUG_PRINT("exit", ("hash: 0x%lx", nr)); + return(nr); } #endif @@ -510,13 +524,25 @@ int hp_rec_key_cmp(HP_KEYDEF *keydef, const byte *rec1, const byte *rec2, pos2,char_length2, 0)) return 1; } - else if (seg->type == HA_KEYTYPE_VARTEXT) + else if (seg->type == HA_KEYTYPE_VARTEXT1) /* Any VARCHAR segments */ { - uchar *pos1= (uchar*)rec1 + seg->start; - uchar *pos2= (uchar*)rec2 + seg->start; - uint char_length1= uint2korr(pos1); - uint char_length2= uint2korr(pos2); + uchar *pos1= (uchar*) rec1 + seg->start; + uchar *pos2= (uchar*) rec2 + seg->start; + uint char_length1, char_length2; + uint pack_length= seg->bit_start; CHARSET_INFO *cs= seg->charset; + if (pack_length == 1) + { + char_length1= (uint) *(uchar*) pos1++; + char_length2= (uint) *(uchar*) pos2++; + } + else + { + char_length1= uint2korr(pos1); + char_length2= uint2korr(pos2); + pos1+= 2; + pos2+= 2; + } if (cs->mbmaxlen > 1) { uint char_length= seg->length / cs->mbmaxlen; @@ -527,8 +553,8 @@ int hp_rec_key_cmp(HP_KEYDEF *keydef, const byte *rec1, const byte *rec2, } if (cs->coll->strnncollsp(seg->charset, - pos1+2, char_length1, - pos2+2, char_length2, + pos1, char_length1, + pos2, char_length2, seg->flag & HA_END_SPACE_ARE_EQUAL ? 0 : diff_if_only_endspace_difference)) return 1; @@ -585,28 +611,31 @@ int hp_key_cmp(HP_KEYDEF *keydef, const byte *rec, const byte *key) (uchar*) key, char_length_key, 0)) return 1; } - else if (seg->type == HA_KEYTYPE_VARTEXT) + else if (seg->type == HA_KEYTYPE_VARTEXT1) /* Any VARCHAR segments */ { uchar *pos= (uchar*) rec + seg->start; CHARSET_INFO *cs= seg->charset; - uint char_length_rec= uint2korr(pos); + uint pack_length= seg->bit_start; + uint char_length_rec= (pack_length == 1 ? (uint) *(uchar*) pos : + uint2korr(pos)); + /* Key segments are always packed with 2 bytes */ uint char_length_key= uint2korr(key); - + pos+= pack_length; + key+= 2; /* skip key pack length */ if (cs->mbmaxlen > 1) { uint char_length= seg->length / cs->mbmaxlen; - char_length_key= my_charpos(cs, key+2, key +2 + char_length_key, + char_length_key= my_charpos(cs, key, key + char_length_key, char_length); set_if_smaller(char_length_key, seg->length); - char_length_rec= my_charpos(cs, pos +2 , pos + 2 + char_length_rec, + char_length_rec= my_charpos(cs, pos, pos + char_length_rec, char_length); set_if_smaller(char_length_rec, seg->length); } - if (cs->coll->strnncollsp(seg->charset, - (uchar*) pos+2, char_length_rec, - (uchar*) key+2, char_length_key, 0)) + (uchar*) pos, char_length_rec, + (uchar*) key, char_length_key, 0)) return 1; } else @@ -638,6 +667,8 @@ void hp_make_key(HP_KEYDEF *keydef, byte *key, const byte *rec) char_length / cs->mbmaxlen); set_if_smaller(char_length, seg->length); /* QQ: ok to remove? */ } + if (seg->type == HA_KEYTYPE_VARTEXT1) + char_length+= seg->bit_start; /* Copy also length */ memcpy(key,rec+seg->start,(size_t) char_length); key+= char_length; } @@ -707,11 +738,13 @@ uint hp_rb_make_key(HP_KEYDEF *keydef, byte *key, { uchar *pos= (uchar*) rec + seg->start; uint length= seg->length; - uint tmp_length= uint2korr(pos); + uint pack_length= seg->bit_start; + uint tmp_length= (pack_length == 1 ? (uint) *(uchar*) pos : + uint2korr(pos)); CHARSET_INFO *cs= seg->charset; char_length= length/cs->mbmaxlen; - pos+=2; /* Skip VARCHAR length */ + pos+= pack_length; /* Skip VARCHAR length */ set_if_smaller(length,tmp_length); FIX_LENGTH(cs, pos, length, char_length); store_key_length_inc(key,char_length); diff --git a/heap/hp_open.c b/heap/hp_open.c index 1fa832208fb..fd937229b0d 100644 --- a/heap/hp_open.c +++ b/heap/hp_open.c @@ -63,7 +63,7 @@ HP_INFO *heap_open(const char *name, int mode) #ifndef DBUG_OFF info->opt_flag= READ_CHECK_USED; /* Check when changing */ #endif - DBUG_PRINT("exit",("heap: %lx reclength: %d records_in_block: %d", + DBUG_PRINT("exit",("heap: 0x%lx reclength: %d records_in_block: %d", info,share->reclength,share->block.records_in_block)); DBUG_RETURN(info); } @@ -82,7 +82,7 @@ HP_SHARE *hp_find_named_heap(const char *name) info= (HP_SHARE*) pos->data; if (!strcmp(name, info->name)) { - DBUG_PRINT("exit", ("Old heap_database: %lx",info)); + DBUG_PRINT("exit", ("Old heap_database: 0x%lx",info)); DBUG_RETURN(info); } } diff --git a/heap/hp_rkey.c b/heap/hp_rkey.c index a88139bbdee..f5f22a877a1 100644 --- a/heap/hp_rkey.c +++ b/heap/hp_rkey.c @@ -23,7 +23,7 @@ int heap_rkey(HP_INFO *info, byte *record, int inx, const byte *key, HP_SHARE *share= info->s; HP_KEYDEF *keyinfo= share->keydef + inx; DBUG_ENTER("heap_rkey"); - DBUG_PRINT("enter",("base: %lx inx: %d",info,inx)); + DBUG_PRINT("enter",("base: 0x%lx inx: %d",info,inx)); if ((uint) inx >= share->keys) { diff --git a/heap/hp_rrnd.c b/heap/hp_rrnd.c index cce3ce24e51..4daa3a06377 100644 --- a/heap/hp_rrnd.c +++ b/heap/hp_rrnd.c @@ -29,7 +29,7 @@ int heap_rrnd(register HP_INFO *info, byte *record, byte *pos) { HP_SHARE *share=info->s; DBUG_ENTER("heap_rrnd"); - DBUG_PRINT("enter",("info: %lx pos: %lx",info,pos)); + DBUG_PRINT("enter",("info: 0x%lx pos: %lx",info,pos)); info->lastinx= -1; if (!(info->current_ptr= pos)) @@ -44,7 +44,7 @@ int heap_rrnd(register HP_INFO *info, byte *record, byte *pos) } info->update=HA_STATE_PREV_FOUND | HA_STATE_NEXT_FOUND | HA_STATE_AKTIV; memcpy(record,info->current_ptr,(size_t) share->reclength); - DBUG_PRINT("exit",("found record at %lx",info->current_ptr)); + DBUG_PRINT("exit",("found record at 0x%lx",info->current_ptr)); info->current_hash_ptr=0; /* Can't use rnext */ DBUG_RETURN(0); } /* heap_rrnd */ @@ -64,7 +64,7 @@ int heap_rrnd_old(register HP_INFO *info, byte *record, ulong pos) { HP_SHARE *share=info->s; DBUG_ENTER("heap_rrnd"); - DBUG_PRINT("enter",("info: %lx pos: %ld",info,pos)); + DBUG_PRINT("enter",("info: 0x%lx pos: %ld",info,pos)); info->lastinx= -1; if (pos == (ulong) -1) @@ -98,7 +98,7 @@ end: } info->update=HA_STATE_PREV_FOUND | HA_STATE_NEXT_FOUND | HA_STATE_AKTIV; memcpy(record,info->current_ptr,(size_t) share->reclength); - DBUG_PRINT("exit",("found record at %lx",info->current_ptr)); + DBUG_PRINT("exit",("found record at 0x%lx",info->current_ptr)); info->current_hash_ptr=0; /* Can't use rnext */ DBUG_RETURN(0); } /* heap_rrnd */ diff --git a/heap/hp_write.c b/heap/hp_write.c index 577c52a007d..171998e9125 100644 --- a/heap/hp_write.c +++ b/heap/hp_write.c @@ -138,7 +138,7 @@ static byte *next_free_record_pos(HP_SHARE *info) pos=info->del_link; info->del_link= *((byte**) pos); info->deleted--; - DBUG_PRINT("exit",("Used old position: %lx",pos)); + DBUG_PRINT("exit",("Used old position: 0x%lx",pos)); DBUG_RETURN(pos); } if (!(block_pos=(info->records % info->block.records_in_block))) diff --git a/include/m_ctype.h b/include/m_ctype.h index d7a4af08b0f..50673b6d494 100644 --- a/include/m_ctype.h +++ b/include/m_ctype.h @@ -85,7 +85,7 @@ enum my_lex_states { MY_LEX_START, MY_LEX_CHAR, MY_LEX_IDENT, MY_LEX_IDENT_SEP, MY_LEX_IDENT_START, - MY_LEX_REAL, MY_LEX_HEX_NUMBER, + MY_LEX_REAL, MY_LEX_HEX_NUMBER, MY_LEX_BIN_NUMBER, MY_LEX_CMP_OP, MY_LEX_LONG_CMP_OP, MY_LEX_STRING, MY_LEX_COMMENT, MY_LEX_END, MY_LEX_OPERATOR_OR_IDENT, MY_LEX_NUMBER_IDENT, MY_LEX_INT_OR_REAL, MY_LEX_REAL_OR_POINT, MY_LEX_BOOL, MY_LEX_EOL, MY_LEX_ESCAPE, diff --git a/include/my_base.h b/include/my_base.h index 88d3ec0b270..4d043cf6b5b 100644 --- a/include/my_base.h +++ b/include/my_base.h @@ -181,8 +181,13 @@ enum ha_base_keytype { HA_KEYTYPE_INT24=12, HA_KEYTYPE_UINT24=13, HA_KEYTYPE_INT8=14, - HA_KEYTYPE_VARTEXT=15, /* Key is sorted as letters */ - HA_KEYTYPE_VARBINARY=16 /* Key is sorted as unsigned chars */ + /* Varchar (0-255 bytes) with length packed with 1 byte */ + HA_KEYTYPE_VARTEXT1=15, /* Key is sorted as letters */ + HA_KEYTYPE_VARBINARY1=16, /* Key is sorted as unsigned chars */ + /* Varchar (0-65535 bytes) with length packed with 2 bytes */ + HA_KEYTYPE_VARTEXT2=17, /* Key is sorted as letters */ + HA_KEYTYPE_VARBINARY2=18, /* Key is sorted as unsigned chars */ + HA_KEYTYPE_BIT=19 }; #define HA_MAX_KEYTYPE 31 /* Must be log2-1 */ @@ -232,6 +237,7 @@ enum ha_base_keytype { Only needed for internal temporary tables. */ #define HA_END_SPACE_ARE_EQUAL 512 +#define HA_BIT_PART 1024 /* optionbits for database */ #define HA_OPTION_PACK_RECORD 1 @@ -390,4 +396,6 @@ typedef ulong ha_rows; #define MAX_FILE_SIZE LONGLONG_MAX #endif +#define HA_VARCHAR_PACKLENGTH(field_length) ((field_length) < 256 ? 1 :2) + #endif /* _my_base_h */ diff --git a/include/my_handler.h b/include/my_handler.h index d81c4590f8e..9ddc0c61eee 100644 --- a/include/my_handler.h +++ b/include/my_handler.h @@ -34,6 +34,8 @@ typedef struct st_HA_KEYSEG /* Key-portion */ uint32 start; /* Start of key in record */ uint32 null_pos; /* position to NULL indicator */ CHARSET_INFO *charset; + uint8 bit_length; /* Length of bit part */ + uint16 bit_pos; /* Position to bit part */ } HA_KEYSEG; #define get_key_length(length,key) \ @@ -64,6 +66,21 @@ typedef struct st_HA_KEYSEG /* Key-portion */ { *(key)=255; mi_int2store((key)+1,(length)); (key)+=3; } \ } +#define get_rec_bits(bit_ptr, bit_ofs, bit_len) \ + (((((uint16) (bit_ptr)[1] << 8) | (uint16) (bit_ptr)[0]) >> (bit_ofs)) & \ + ((1 << (bit_len)) - 1)) + +#define set_rec_bits(bits, bit_ptr, bit_ofs, bit_len) \ +{ \ + (bit_ptr)[0]= ((bit_ptr)[0] & ((1 << (bit_ofs)) - 1)) | \ + ((bits) << (bit_ofs)); \ + if ((bit_ofs) + (bit_len) > 8) \ + (bit_ptr)[1]= ((bits) & ((1 << (bit_len)) - 1)) >> (8 - (bit_ofs)); \ +} + +#define clr_rec_bits(bit_ptr, bit_ofs, bit_len) \ + set_rec_bits(0, bit_ptr, bit_ofs, bit_len) + extern int mi_compare_text(CHARSET_INFO *, uchar *, uint, uchar *, uint , my_bool, my_bool); extern int ha_key_cmp(register HA_KEYSEG *keyseg, register uchar *a, diff --git a/include/my_net.h b/include/my_net.h index 7b42afa1f3a..71914964e46 100644 --- a/include/my_net.h +++ b/include/my_net.h @@ -72,6 +72,11 @@ C_MODE_START #define in_addr_t uint32 #endif +/* On some operating systems (e.g. Solaris) INADDR_NONE is not defined */ +#ifndef INADDR_NONE +#define INADDR_NONE -1 /* Error value from inet_addr */ +#endif + /* Thread safe or portable version of some functions */ void my_inet_ntoa(struct in_addr in, char *buf); diff --git a/include/my_sys.h b/include/my_sys.h index 3de3ec9687c..e630c9bdbba 100644 --- a/include/my_sys.h +++ b/include/my_sys.h @@ -168,6 +168,16 @@ extern char *my_strdup_with_length(const byte *from, uint length, #define TRASH(A,B) /* nothing */ #endif +#ifdef HAVE_LARGE_PAGES +extern uint my_get_large_page_size(void); +extern gptr my_large_malloc(uint size, myf my_flags); +extern void my_large_free(gptr ptr, myf my_flags); +#else +#define my_get_large_page_size() (0) +#define my_large_malloc(A,B) my_malloc_lock((A),(B)) +#define my_large_free(A,B) my_free_lock((A),(B)) +#endif /* HAVE_LARGE_PAGES */ + #ifdef HAVE_ALLOCA #if defined(_AIX) && !defined(__GNUC__) && !defined(_AIX43) #pragma alloca @@ -213,6 +223,11 @@ extern int (*fatal_error_handler_hook)(uint my_err, const char *str, myf MyFlags); extern uint my_file_limit; +#ifdef HAVE_LARGE_PAGES +extern my_bool my_use_large_pages; +extern uint my_large_page_size; +#endif + /* charsets */ extern CHARSET_INFO *default_charset_info; extern CHARSET_INFO *all_charsets[256]; diff --git a/include/mysql.h b/include/mysql.h index cb7b4629ec0..58c314207c1 100644 --- a/include/mysql.h +++ b/include/mysql.h @@ -145,7 +145,8 @@ enum mysql_option MYSQL_OPT_PROTOCOL, MYSQL_SHARED_MEMORY_BASE_NAME, MYSQL_OPT_READ_TIMEOUT, MYSQL_OPT_WRITE_TIMEOUT, MYSQL_OPT_USE_RESULT, MYSQL_OPT_USE_REMOTE_CONNECTION, MYSQL_OPT_USE_EMBEDDED_CONNECTION, - MYSQL_OPT_GUESS_CONNECTION, MYSQL_SET_CLIENT_IP, MYSQL_SECURE_AUTH + MYSQL_OPT_GUESS_CONNECTION, MYSQL_SET_CLIENT_IP, MYSQL_SECURE_AUTH, + MYSQL_REPORT_DATA_TRUNCATION }; struct st_mysql_options { @@ -186,6 +187,8 @@ struct st_mysql_options { char *client_ip; /* Refuse client connecting to server if it uses old (pre-4.1.1) protocol */ my_bool secure_auth; + /* 0 - never report, 1 - always report (default) */ + my_bool report_data_truncation; /* function pointers for local infile support */ int (*local_infile_init)(void **, const char *, void *); diff --git a/include/mysql_com.h b/include/mysql_com.h index 6a03fe90eb5..59b2ee743ec 100644 --- a/include/mysql_com.h +++ b/include/mysql_com.h @@ -210,6 +210,7 @@ enum enum_field_types { MYSQL_TYPE_DECIMAL, MYSQL_TYPE_TINY, MYSQL_TYPE_DATE, MYSQL_TYPE_TIME, MYSQL_TYPE_DATETIME, MYSQL_TYPE_YEAR, MYSQL_TYPE_NEWDATE, MYSQL_TYPE_VARCHAR, + MYSQL_TYPE_BIT, MYSQL_TYPE_ENUM=247, MYSQL_TYPE_SET=248, MYSQL_TYPE_TINY_BLOB=249, @@ -250,6 +251,7 @@ enum enum_field_types { MYSQL_TYPE_DECIMAL, MYSQL_TYPE_TINY, #define FIELD_TYPE_CHAR MYSQL_TYPE_TINY #define FIELD_TYPE_INTERVAL MYSQL_TYPE_ENUM #define FIELD_TYPE_GEOMETRY MYSQL_TYPE_GEOMETRY +#define FIELD_TYPE_BIT MYSQL_TYPE_BIT /* Shutdown/kill enums and constants */ diff --git a/innobase/buf/buf0buf.c b/innobase/buf/buf0buf.c index 3930ea93889..89f851709db 100644 --- a/innobase/buf/buf0buf.c +++ b/innobase/buf/buf0buf.c @@ -331,33 +331,43 @@ buf_page_is_corrupted( } } #endif - old_checksum = buf_calc_page_old_checksum(read_buf); + + /* If we use checksums validation, make additional check before returning + TRUE to ensure that the checksum is not equal to BUF_NO_CHECKSUM_MAGIC which + might be stored by InnoDB with checksums disabled. + Otherwise, skip checksum calculation and return FALSE */ + + if (srv_use_checksums) { + old_checksum = buf_calc_page_old_checksum(read_buf); - old_checksum_field = mach_read_from_4(read_buf + UNIV_PAGE_SIZE + old_checksum_field = mach_read_from_4(read_buf + UNIV_PAGE_SIZE - FIL_PAGE_END_LSN_OLD_CHKSUM); - /* There are 2 valid formulas for old_checksum_field: - 1. Very old versions of InnoDB only stored 8 byte lsn to the start - and the end of the page. - 2. Newer InnoDB versions store the old formula checksum there. */ + /* There are 2 valid formulas for old_checksum_field: + 1. Very old versions of InnoDB only stored 8 byte lsn to the start + and the end of the page. + 2. Newer InnoDB versions store the old formula checksum there. */ - if (old_checksum_field != mach_read_from_4(read_buf + FIL_PAGE_LSN) - && old_checksum_field != old_checksum) { + if (old_checksum_field != mach_read_from_4(read_buf + FIL_PAGE_LSN) + && old_checksum_field != old_checksum + && old_checksum_field != BUF_NO_CHECKSUM_MAGIC) { - return(TRUE); - } + return(TRUE); + } - checksum = buf_calc_page_new_checksum(read_buf); - checksum_field = mach_read_from_4(read_buf + FIL_PAGE_SPACE_OR_CHKSUM); + checksum = buf_calc_page_new_checksum(read_buf); + checksum_field = mach_read_from_4(read_buf + FIL_PAGE_SPACE_OR_CHKSUM); - /* InnoDB versions < 4.0.14 and < 4.1.1 stored the space id - (always equal to 0), to FIL_PAGE_SPACE_SPACE_OR_CHKSUM */ + /* InnoDB versions < 4.0.14 and < 4.1.1 stored the space id + (always equal to 0), to FIL_PAGE_SPACE_SPACE_OR_CHKSUM */ - if (checksum_field != 0 && checksum_field != checksum) { - - return(TRUE); - } + if (checksum_field != 0 && checksum_field != checksum + && checksum_field != BUF_NO_CHECKSUM_MAGIC) { + return(TRUE); + } + } + return(FALSE); } @@ -379,8 +389,10 @@ buf_page_print( ut_print_buf(stderr, read_buf, UNIV_PAGE_SIZE); fputs("InnoDB: End of page dump\n", stderr); - checksum = buf_calc_page_new_checksum(read_buf); - old_checksum = buf_calc_page_old_checksum(read_buf); + checksum = srv_use_checksums ? + buf_calc_page_new_checksum(read_buf) : BUF_NO_CHECKSUM_MAGIC; + old_checksum = srv_use_checksums ? + buf_calc_page_old_checksum(read_buf) : BUF_NO_CHECKSUM_MAGIC; ut_print_timestamp(stderr); fprintf(stderr, @@ -548,7 +560,7 @@ buf_pool_init( } /*----------------------------------------*/ } else { - buf_pool->frame_mem = ut_malloc_low( + buf_pool->frame_mem = os_mem_alloc_large( UNIV_PAGE_SIZE * (n_frames + 1), TRUE, FALSE); } diff --git a/innobase/buf/buf0flu.c b/innobase/buf/buf0flu.c index aff4fe92a71..a0ca614d9b3 100644 --- a/innobase/buf/buf0flu.c +++ b/innobase/buf/buf0flu.c @@ -448,7 +448,8 @@ buf_flush_init_for_writing( /* Store the new formula checksum */ mach_write_to_4(page + FIL_PAGE_SPACE_OR_CHKSUM, - buf_calc_page_new_checksum(page)); + srv_use_checksums ? + buf_calc_page_new_checksum(page) : BUF_NO_CHECKSUM_MAGIC); /* We overwrite the first 4 bytes of the end lsn field to store the old formula checksum. Since it depends also on the field @@ -456,7 +457,8 @@ buf_flush_init_for_writing( new formula checksum. */ mach_write_to_4(page + UNIV_PAGE_SIZE - FIL_PAGE_END_LSN_OLD_CHKSUM, - buf_calc_page_old_checksum(page)); + srv_use_checksums ? + buf_calc_page_old_checksum(page) : BUF_NO_CHECKSUM_MAGIC); } /************************************************************************ diff --git a/innobase/include/Makefile.am b/innobase/include/Makefile.am index 102d25566da..eb1e3b72877 100644 --- a/innobase/include/Makefile.am +++ b/innobase/include/Makefile.am @@ -49,7 +49,7 @@ noinst_HEADERS = btr0btr.h btr0btr.ic btr0cur.h btr0cur.ic \ thr0loc.h thr0loc.ic trx0purge.h trx0purge.ic trx0rec.h \ trx0rec.ic trx0roll.h trx0roll.ic trx0rseg.h trx0rseg.ic \ trx0sys.h trx0sys.ic trx0trx.h trx0trx.ic trx0types.h \ - trx0undo.h trx0undo.ic univ.i \ + trx0undo.h trx0undo.ic trx0xa.h univ.i \ usr0sess.h usr0sess.ic usr0types.h ut0byte.h ut0byte.ic \ ut0dbg.h ut0lst.h ut0mem.h ut0mem.ic ut0rnd.h ut0rnd.ic \ ut0sort.h ut0ut.h ut0ut.ic diff --git a/innobase/include/buf0buf.h b/innobase/include/buf0buf.h index 8df1211327f..5ee323f1b1e 100644 --- a/innobase/include/buf0buf.h +++ b/innobase/include/buf0buf.h @@ -52,6 +52,8 @@ Created 11/5/1995 Heikki Tuuri /* Modes for buf_page_get_known_nowait */ #define BUF_MAKE_YOUNG 51 #define BUF_KEEP_OLD 52 +/* Magic value to use instead of checksums when they are disabled */ +#define BUF_NO_CHECKSUM_MAGIC 0xDEADBEEFUL extern buf_pool_t* buf_pool; /* The buffer pool of the database */ extern ibool buf_debug_prints;/* If this is set TRUE, the program diff --git a/innobase/include/os0proc.h b/innobase/include/os0proc.h index d0d3cf82e38..b0b72e18675 100644 --- a/innobase/include/os0proc.h +++ b/innobase/include/os0proc.h @@ -12,6 +12,11 @@ Created 9/30/1995 Heikki Tuuri #include "univ.i" +#ifdef UNIV_LINUX +#include +#include +#endif + typedef void* os_process_t; typedef unsigned long int os_process_id_t; @@ -27,6 +32,10 @@ page size of an Intel x86 processor. We cannot use AWE with 2 MB or 4 MB pages. */ #define OS_AWE_X86_PAGE_SIZE 4096 +extern ibool os_use_large_pages; +/* Large page size. This may be a boot-time option on some platforms */ +extern ulint os_large_page_size; + /******************************************************************** Windows AWE support. Tries to enable the "lock pages in memory" privilege for the current process so that the current process can allocate memory-locked @@ -103,6 +112,25 @@ os_mem_alloc_nocache( /* out: allocated memory */ ulint n); /* in: number of bytes */ /******************************************************************** +Allocates large pages memory. */ + +void* +os_mem_alloc_large( +/*=================*/ + /* out: allocated memory */ + ulint n, /* in: number of bytes */ + ibool set_to_zero, /* in: TRUE if allocated memory should be set + to zero if UNIV_SET_MEM_TO_ZERO is defined */ + ibool assert_on_error); /* in: if TRUE, we crash mysqld if the memory + cannot be allocated */ +/******************************************************************** +Frees large pages memory. */ + +void +os_mem_free_large( +/*=================*/ +void *ptr); /* in: number of bytes */ +/******************************************************************** Sets the priority boost for threads released from waiting within the current process. */ diff --git a/innobase/include/page0page.ic b/innobase/include/page0page.ic index 1d5ea337031..a63b5ca4238 100644 --- a/innobase/include/page0page.ic +++ b/innobase/include/page0page.ic @@ -152,6 +152,19 @@ page_header_reset_last_insert( MLOG_2BYTES, mtr); } +/**************************************************************** +Determine whether the page is in new-style compact format. */ +UNIV_INLINE +ibool +page_is_comp( +/*=========*/ + /* out: TRUE if the page is in compact format + FALSE if it is in old-style format */ + page_t* page) /* in: index page */ +{ + return(!!(page_header_get_field(page, PAGE_N_HEAP) & 0x8000)); +} + /**************************************************************** Gets the first record on the page. */ UNIV_INLINE @@ -513,19 +526,6 @@ page_dir_calc_reserved_space( / PAGE_DIR_SLOT_MIN_N_OWNED); } -/**************************************************************** -Determine whether the page is in new-style compact format. */ -UNIV_INLINE -ibool -page_is_comp( -/*=========*/ - /* out: TRUE if the page is in compact format - FALSE if it is in old-style format */ - page_t* page) /* in: index page */ -{ - return(!!(page_header_get_field(page, PAGE_N_HEAP) & 0x8000)); -} - /**************************************************************** Gets the pointer to the next record on the page. */ UNIV_INLINE diff --git a/innobase/include/rem0rec.ic b/innobase/include/rem0rec.ic index db938aa9fa5..6c3dabf04a2 100644 --- a/innobase/include/rem0rec.ic +++ b/innobase/include/rem0rec.ic @@ -40,8 +40,18 @@ most significant bytes and bits are written below less significant. (1) byte offset (2) bit usage within byte downward from - origin -> 1 8 bits pointer to next record (relative) - 2 8 bits pointer to next record (relative) + origin -> 1 8 bits relative offset of next record + 2 8 bits relative offset of next record + the relative offset is an unsigned 16-bit + integer: + (offset_of_next_record + - offset_of_this_record) mod 64Ki, + where mod is the modulo as a non-negative + number; + we can calculate the the offset of the next + record with the formula: + relative_offset + offset_of_this_record + mod UNIV_PAGE_SIZE 3 3 bits status: 000=conventional record 001=node pointer record (inside B-tree) @@ -252,26 +262,37 @@ UNIV_INLINE ulint rec_get_next_offs( /*==============*/ - /* out: the page offset of the next chained record */ + /* out: the page offset of the next chained record, or + 0 if none */ rec_t* rec, /* in: physical record */ ibool comp) /* in: TRUE=compact page format */ { + ulint field_value; + + ut_ad(REC_NEXT_MASK == 0xFFFFUL); + ut_ad(REC_NEXT_SHIFT == 0); + + field_value = mach_read_from_2(rec - REC_NEXT); + if (comp) { - lint ret = (int16_t) rec_get_bit_field_2(rec, REC_NEXT, - REC_NEXT_MASK, REC_NEXT_SHIFT); #if UNIV_PAGE_SIZE <= 32768 - /* with 64 KiB page size, the pointer will "wrap around", - and the following assertions are invalid */ - ut_ad(ret + ut_align_offset(rec, UNIV_PAGE_SIZE) < - UNIV_PAGE_SIZE); + /* Note that for 64 KiB pages, field_value can 'wrap around' + and the debug assertion is not valid */ + + ut_ad((int16_t)field_value + + ut_align_offset(rec, UNIV_PAGE_SIZE) + < UNIV_PAGE_SIZE); #endif - return(ret ? ut_align_offset(rec + ret, UNIV_PAGE_SIZE) : 0); - } - else { - ulint ret = rec_get_bit_field_2(rec, REC_NEXT, - REC_NEXT_MASK, REC_NEXT_SHIFT); - ut_ad(ret < UNIV_PAGE_SIZE); - return(ret); + if (field_value == 0) { + + return(0); + } + + return(ut_align_offset(rec + field_value, UNIV_PAGE_SIZE)); + } else { + ut_ad(field_value < UNIV_PAGE_SIZE); + + return(field_value); } } @@ -284,21 +305,31 @@ rec_set_next_offs( /*==============*/ rec_t* rec, /* in: physical record */ ibool comp, /* in: TRUE=compact page format */ - ulint next) /* in: offset of the next record */ + ulint next) /* in: offset of the next record, or 0 if none */ { ut_ad(rec); ut_ad(UNIV_PAGE_SIZE > next); + ut_ad(REC_NEXT_MASK == 0xFFFFUL); + ut_ad(REC_NEXT_SHIFT == 0); if (comp) { - rec_set_bit_field_2(rec, next - ? (next - ut_align_offset(rec, UNIV_PAGE_SIZE)) -#ifdef UNIV_DEBUG /* avoid an assertion failure */ - & (REC_NEXT_MASK >> REC_NEXT_SHIFT) -#endif - : 0, REC_NEXT, REC_NEXT_MASK, REC_NEXT_SHIFT); + ulint field_value; + + if (next) { + /* The following two statements calculate + next - offset_of_rec mod 64Ki, where mod is the modulo + as a non-negative number */ + + field_value = (ulint)((lint)next + - (lint)ut_align_offset(rec, UNIV_PAGE_SIZE)); + field_value &= REC_NEXT_MASK; + } else { + field_value = 0; + } + + mach_write_to_2(rec - REC_NEXT, field_value); } else { - rec_set_bit_field_2(rec, next, - REC_NEXT, REC_NEXT_MASK, REC_NEXT_SHIFT); + mach_write_to_2(rec - REC_NEXT, next); } } diff --git a/innobase/include/srv0srv.h b/innobase/include/srv0srv.h index d4cc7d8222f..84b7d14ca00 100644 --- a/innobase/include/srv0srv.h +++ b/innobase/include/srv0srv.h @@ -107,6 +107,7 @@ extern ibool srv_very_fast_shutdown; /* if this TRUE, do not flush the extern ibool srv_innodb_status; extern ibool srv_use_doublewrite_buf; +extern ibool srv_use_checksums; extern ibool srv_set_thread_priorities; extern int srv_query_thread_priority; diff --git a/innobase/include/trx0roll.h b/innobase/include/trx0roll.h index 893e5af6c01..9d025da4a5f 100644 --- a/innobase/include/trx0roll.h +++ b/innobase/include/trx0roll.h @@ -105,11 +105,19 @@ trx_rollback( Rollback or clean up transactions which have no user session. If the transaction already was committed, then we clean up a possible insert undo log. If the transaction was not yet committed, then we roll it back. -Note: this is done in a background thread */ +Note: this is done in a background thread. */ -void * -trx_rollback_or_clean_all_without_sess(void *); -/*============================================*/ +#ifndef __WIN__ +void* +#else +ulint +#endif +trx_rollback_or_clean_all_without_sess( +/*===================================*/ + /* out: a dummy parameter */ + void* arg __attribute__((unused))); + /* in: a dummy parameter required by + os_thread_create */ /******************************************************************** Finishes a transaction rollback. */ diff --git a/innobase/log/log0recv.c b/innobase/log/log0recv.c index f42f0eb8c72..5eefd32c8a6 100644 --- a/innobase/log/log0recv.c +++ b/innobase/log/log0recv.c @@ -2937,7 +2937,6 @@ recv_recovery_from_checkpoint_finish(void) #ifndef UNIV_LOG_DEBUG recv_sys_free(); #endif - if (srv_force_recovery < SRV_FORCE_NO_TRX_UNDO) { os_thread_create(trx_rollback_or_clean_all_without_sess, (void *)&i, &recovery_thread_id); diff --git a/innobase/os/os0proc.c b/innobase/os/os0proc.c index 2f155788420..167aed93de7 100644 --- a/innobase/os/os0proc.c +++ b/innobase/os/os0proc.c @@ -69,6 +69,10 @@ byte* os_awe_window; ulint os_awe_window_size; #endif +ibool os_use_large_pages; +/* Large page size. This may be a boot-time option on some platforms */ +ulint os_large_page_size; + /******************************************************************** Windows AWE support. Tries to enable the "lock pages in memory" privilege for the current process so that the current process can allocate memory-locked @@ -515,6 +519,89 @@ os_mem_alloc_nocache( #endif } +/******************************************************************** +Allocates large pages memory. */ + +void* +os_mem_alloc_large( +/*=================*/ + /* out: allocated memory */ + ulint n, /* in: number of bytes */ + ibool set_to_zero, /* in: TRUE if allocated memory should be set + to zero if UNIV_SET_MEM_TO_ZERO is defined */ + ibool assert_on_error) /* in: if TRUE, we crash mysqld if the memory + cannot be allocated */ +{ +#ifdef HAVE_LARGE_PAGES + ulint size; + int shmid; + void *ptr = NULL; + struct shmid_ds buf; + + if (!os_use_large_pages || !os_large_page_size) { + goto skip; + } + +#ifdef UNIV_LINUX + /* Align block size to os_large_page_size */ + size = ((n - 1) & ~(os_large_page_size - 1)) + os_large_page_size; + + shmid = shmget(IPC_PRIVATE, (size_t)size, SHM_HUGETLB | SHM_R | SHM_W); + if (shmid < 0) { + fprintf(stderr, "InnoDB: HugeTLB: Warning: Failed to allocate %lu bytes. " + "errno %d\n", n, errno); + } else { + ptr = shmat(shmid, NULL, 0); + if (ptr == (void *)-1) { + fprintf(stderr, "InnoDB: HugeTLB: Warning: Failed to attach shared memory " + "segment, errno %d\n", errno); + } + /* + Remove the shared memory segment so that it will be automatically freed + after memory is detached or process exits + */ + shmctl(shmid, IPC_RMID, &buf); + } +#endif + + if (ptr) { + if (set_to_zero) { +#ifdef UNIV_SET_MEM_TO_ZERO + memset(ptr, '\0', size); +#endif + } + + return(ptr); + } + + fprintf(stderr, "InnoDB HugeTLB: Warning: Using conventional memory pool\n"); +skip: +#endif /* HAVE_LARGE_PAGES */ + + return(ut_malloc_low(n, set_to_zero, assert_on_error)); +} + +/******************************************************************** +Frees large pages memory. */ + +void +os_mem_free_large( +/*=================*/ + void *ptr) /* in: number of bytes */ +{ +#ifdef HAVE_LARGE_PAGES + if (os_use_large_pages && os_large_page_size +#ifdef UNIV_LINUX + && !shmdt(ptr) +#endif + ) { + return; + } +#endif + + ut_free(ptr); +} + /******************************************************************** Sets the priority boost for threads released from waiting within the current process. */ diff --git a/innobase/srv/srv0srv.c b/innobase/srv/srv0srv.c index 40befae424e..83d4fb4d39d 100644 --- a/innobase/srv/srv0srv.c +++ b/innobase/srv/srv0srv.c @@ -313,6 +313,7 @@ ibool srv_very_fast_shutdown = FALSE; /* if this TRUE, do not flush the ibool srv_innodb_status = FALSE; ibool srv_use_doublewrite_buf = TRUE; +ibool srv_use_checksums = TRUE; ibool srv_set_thread_priorities = TRUE; int srv_query_thread_priority = 0; diff --git a/innobase/srv/srv0start.c b/innobase/srv/srv0start.c index 69341a1d7d1..a0e763d7a44 100644 --- a/innobase/srv/srv0start.c +++ b/innobase/srv/srv0start.c @@ -1403,15 +1403,13 @@ NetWare. */ fsp_header_inc_size(0, sum_of_new_sizes, &mtr); mtr_commit(&mtr); - } - if (recv_needed_recovery) { - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Flushing modified pages from the buffer pool...\n"); - } + /* Immediately write the log record about increased tablespace + size to disk, so that it is durable even if mysqld would crash + quickly */ - log_make_checkpoint_at(ut_dulint_max, TRUE); + log_buffer_flush_to_disk(); + } #ifdef UNIV_LOG_ARCHIVE /* Archiving is always off under MySQL */ diff --git a/innobase/trx/trx0roll.c b/innobase/trx/trx0roll.c index db5e16c7778..e5cffd2a4f3 100644 --- a/innobase/trx/trx0roll.c +++ b/innobase/trx/trx0roll.c @@ -30,9 +30,13 @@ Created 3/26/1996 Heikki Tuuri /* This many pages must be undone before a truncate is tried within rollback */ #define TRX_ROLL_TRUNC_THRESHOLD 1 +/* In crash recovery, the current trx to be rolled back */ +trx_t* trx_roll_crash_recv_trx = NULL; + /* In crash recovery we set this to the undo n:o of the current trx to be rolled back. Then we can print how many % the rollback has progressed. */ ib_longlong trx_roll_max_undo_no; + /* Auxiliary variable which tells the previous progress % we printed */ ulint trx_roll_progress_printed_pct; @@ -332,11 +336,19 @@ trx_savept_take( Rollback or clean up transactions which have no user session. If the transaction already was committed, then we clean up a possible insert undo log. If the transaction was not yet committed, then we roll it back. -Note: this is done in a background thread */ +Note: this is done in a background thread. */ -void * -trx_rollback_or_clean_all_without_sess(void *i) -/*========================================*/ +#ifndef __WIN__ +void* +#else +ulint +#endif +trx_rollback_or_clean_all_without_sess( +/*===================================*/ + /* out: a dummy parameter */ + void* arg __attribute__((unused))) + /* in: a dummy parameter required by + os_thread_create */ { mem_heap_t* heap; que_fork_t* fork; @@ -361,9 +373,9 @@ trx_rollback_or_clean_all_without_sess(void *i) if (UT_LIST_GET_FIRST(trx_sys->trx_list)) { fprintf(stderr, - "InnoDB: Starting rollback of uncommitted transactions\n"); +"InnoDB: Starting in background the rollback of uncommitted transactions\n"); } else { - os_thread_exit(i); + goto leave_function; } loop: heap = mem_heap_create(512); @@ -373,7 +385,6 @@ loop: trx = UT_LIST_GET_FIRST(trx_sys->trx_list); while (trx) { - if ((trx->sess || (trx->conc_state == TRX_NOT_STARTED))) { trx = UT_LIST_GET_NEXT(trx_list, trx); } else if (trx->conc_state == TRX_PREPARED) { @@ -386,17 +397,17 @@ loop: mutex_exit(&kernel_mutex); if (trx == NULL) { + ut_print_timestamp(stderr); fprintf(stderr, - "InnoDB: Rollback of uncommitted transactions completed\n"); + " InnoDB: Rollback of uncommitted transactions completed\n"); mem_heap_free(heap); - - os_thread_exit(i); + + goto leave_function; } trx->sess = trx_dummy_sess; - if (trx->conc_state == TRX_COMMITTED_IN_MEMORY) { fprintf(stderr, "InnoDB: Cleaning up trx with id %lu %lu\n", (ulong) ut_dulint_get_high(trx->id), @@ -425,21 +436,28 @@ loop: ut_a(thr == que_fork_start_command(fork)); + trx_roll_crash_recv_trx = trx; trx_roll_max_undo_no = ut_conv_dulint_to_longlong(trx->undo_no); trx_roll_progress_printed_pct = 0; rows_to_undo = trx_roll_max_undo_no; + if (rows_to_undo > 1000000000) { rows_to_undo = rows_to_undo / 1000000; unit = "M"; } + ut_print_timestamp(stderr); fprintf(stderr, -"InnoDB: Rolling back trx with id %lu %lu, %lu%s rows to undo", +" InnoDB: Rolling back trx with id %lu %lu, %lu%s rows to undo\n", (ulong) ut_dulint_get_high(trx->id), (ulong) ut_dulint_get_low(trx->id), (ulong) rows_to_undo, unit); mutex_exit(&kernel_mutex); + trx->mysql_thread_id = os_thread_get_curr_id(); + + trx->mysql_process_no = os_proc_get_number(); + if (trx->dict_operation) { row_mysql_lock_data_dictionary(trx); } @@ -454,7 +472,7 @@ loop: fprintf(stderr, "InnoDB: Waiting for rollback of trx id %lu to end\n", - (ulong) ut_dulint_get_low(trx->id)); + (ulong) ut_dulint_get_low(trx->id)); os_thread_sleep(100000); mutex_enter(&kernel_mutex); @@ -493,9 +511,23 @@ loop: (ulong) ut_dulint_get_low(trx->id)); mem_heap_free(heap); + trx_roll_crash_recv_trx = NULL; + goto loop; - os_thread_exit(i); /* not reached */ +leave_function: + /* We count the number of threads in os_thread_exit(). A created + thread should always use that to exit and not use return() to exit. */ + + os_thread_exit(NULL); + + /* The following is dummy code to keep the compiler happy: */ + +#ifndef __WIN__ + return(NULL); +#else + return(0); +#endif } /*********************************************************************** @@ -856,16 +888,17 @@ try_again: ut_ad(ut_dulint_cmp(ut_dulint_add(undo_no, 1), trx->undo_no) == 0); /* We print rollback progress info if we are in a crash recovery - and the transaction has at least 1000 row operations to undo */ + and the transaction has at least 1000 row operations to undo. */ - if (srv_is_being_started && trx_roll_max_undo_no > 1000) { - progress_pct = 100 - (ulint) + if (trx == trx_roll_crash_recv_trx && trx_roll_max_undo_no > 1000) { + + progress_pct = 100 - (ulint) ((ut_conv_dulint_to_longlong(undo_no) * 100) / trx_roll_max_undo_no); if (progress_pct != trx_roll_progress_printed_pct) { if (trx_roll_progress_printed_pct == 0) { fprintf(stderr, - "\nInnoDB: Progress in percents: %lu", (ulong) progress_pct); +"\nInnoDB: Progress in percents: %lu\n", (ulong) progress_pct); } else { fprintf(stderr, " %lu", (ulong) progress_pct); diff --git a/innobase/trx/trx0sys.c b/innobase/trx/trx0sys.c index 35e18064329..57166e98f45 100644 --- a/innobase/trx/trx0sys.c +++ b/innobase/trx/trx0sys.c @@ -124,6 +124,22 @@ trx_doublewrite_init( * sizeof(void*)); } +/******************************************************************** +Frees the doublewrite buffer. */ +static +void +trx_doublewrite_free(void) +/*======================*/ +{ + mutex_free(&(trx_doublewrite->mutex)); + + mem_free(trx_doublewrite->buf_block_arr); + ut_free(trx_doublewrite->write_buf_unaligned); + + mem_free(trx_doublewrite); + trx_doublewrite = NULL; +} + /******************************************************************** Marks the trx sys header when we have successfully upgraded to the >= 4.1.x multiple tablespace format. */ @@ -512,6 +528,9 @@ trx_sys_doublewrite_init_or_restore_pages( fil_flush_file_spaces(FIL_TABLESPACE); + if (!srv_use_doublewrite_buf) + trx_doublewrite_free(); + leave_func: ut_free(unaligned_read_buf); } diff --git a/libmysql/libmysql.c b/libmysql/libmysql.c index 2e9721abbe3..4476a42f8ac 100644 --- a/libmysql/libmysql.c +++ b/libmysql/libmysql.c @@ -3428,7 +3428,7 @@ static void fetch_string_with_conversion(MYSQL_BIND *param, char *value, { char *buffer= (char *)param->buffer; int err= 0; - char *endptr; + char *endptr= value + length; /* This function should support all target buffer types: the rest @@ -3439,39 +3439,33 @@ static void fetch_string_with_conversion(MYSQL_BIND *param, char *value, break; case MYSQL_TYPE_TINY: { - longlong data= my_strntoll(&my_charset_latin1, value, length, 10, - &endptr, &err); + longlong data= my_strtoll10(value, &endptr, &err); *param->error= (IS_TRUNCATED(data, param->is_unsigned, - INT_MIN8, INT_MAX8, UINT_MAX8) | - test(err)); + INT_MIN8, INT_MAX8, UINT_MAX8) || err > 0); *buffer= (uchar) data; break; } case MYSQL_TYPE_SHORT: { - longlong data= my_strntoll(&my_charset_latin1, value, length, 10, - &endptr, &err); + longlong data= my_strtoll10(value, &endptr, &err); *param->error= (IS_TRUNCATED(data, param->is_unsigned, - INT_MIN16, INT_MAX16, UINT_MAX16) | - test(err)); + INT_MIN16, INT_MAX16, UINT_MAX16) || err > 0); shortstore(buffer, (short) data); break; } case MYSQL_TYPE_LONG: { - longlong data= my_strntoll(&my_charset_latin1, value, length, 10, - &endptr, &err); + longlong data= my_strtoll10(value, &endptr, &err); *param->error= (IS_TRUNCATED(data, param->is_unsigned, - INT_MIN32, INT_MAX32, UINT_MAX32) | - test(err)); + INT_MIN32, INT_MAX32, UINT_MAX32) || err > 0); longstore(buffer, (int32) data); break; } case MYSQL_TYPE_LONGLONG: { - longlong data= my_strntoll(&my_charset_latin1, value, length, 10, - &endptr, &err); - *param->error= test(err); + longlong data= my_strtoll10(value, &endptr, &err); + *param->error= param->is_unsigned ? err != 0 : + (err > 0 || (err == 0 && data < 0)); longlongstore(buffer, data); break; } @@ -3554,10 +3548,9 @@ static void fetch_string_with_conversion(MYSQL_BIND *param, char *value, */ static void fetch_long_with_conversion(MYSQL_BIND *param, MYSQL_FIELD *field, - longlong value) + longlong value, my_bool is_unsigned) { char *buffer= (char *)param->buffer; - uint field_is_unsigned= field->flags & UNSIGNED_FLAG; switch (param->buffer_type) { case MYSQL_TYPE_NULL: /* do nothing */ @@ -3579,38 +3572,38 @@ static void fetch_long_with_conversion(MYSQL_BIND *param, MYSQL_FIELD *field, break; case MYSQL_TYPE_LONGLONG: longlongstore(buffer, value); + *param->error= param->is_unsigned != is_unsigned && value < 0; break; case MYSQL_TYPE_FLOAT: { + /* + We need to store data in the buffer before the truncation check to + workaround Intel FPU executive precision feature. + (See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=323 for details) + AFAIU it does not guarantee to work. + */ float data; - if (field_is_unsigned) - { + if (is_unsigned) data= (float) ulonglong2double(value); - *param->error= (ulonglong) data != (ulonglong) value; - } else - { data= (float) value; - /* printf("%lld, %f\n", value, data); */ - *param->error= value != ((longlong) data); - } floatstore(buffer, data); + *param->error= is_unsigned ? + ((ulonglong) value) != ((ulonglong) (*(float*) buffer)) : + ((longlong) value) != ((longlong) (*(float*) buffer)); break; } case MYSQL_TYPE_DOUBLE: { double data; - if (field_is_unsigned) - { + if (is_unsigned) data= ulonglong2double(value); - *param->error= (ulonglong) data != (ulonglong) value; - } else - { data= value; - *param->error= (longlong) data != value; - } doublestore(buffer, data); + *param->error= is_unsigned ? + ((ulonglong) value) != ((ulonglong) (*(double*) buffer)) : + ((longlong) value) != ((longlong) (*(double*) buffer)); break; } case MYSQL_TYPE_TIME: @@ -3626,7 +3619,7 @@ static void fetch_long_with_conversion(MYSQL_BIND *param, MYSQL_FIELD *field, default: { char buff[22]; /* Enough for longlong */ - char *end= longlong10_to_str(value, buff, field_is_unsigned ? 10: -10); + char *end= longlong10_to_str(value, buff, is_unsigned ? 10: -10); /* Resort to string conversion which supports all typecodes */ uint length= (uint) (end-buff); @@ -3665,74 +3658,67 @@ static void fetch_float_with_conversion(MYSQL_BIND *param, MYSQL_FIELD *field, case MYSQL_TYPE_NULL: /* do nothing */ break; case MYSQL_TYPE_TINY: - { + /* + We need to _store_ data in the buffer before the truncation check to + workaround Intel FPU executive precision feature. + (See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=323 for details) + Sic: AFAIU it does not guarantee to work. + */ if (param->is_unsigned) - { - int8 data= (int8) value; - *param->error= (double) data != value; - *buffer= (uchar) data; - } + *buffer= (uint8) value; else - { - uchar data= (uchar) value; - *param->error= (double) data != value; - *buffer= data; - } + *buffer= (int8) value; + *param->error= value != (param->is_unsigned ? (double) ((uint8) *buffer) : + (double) ((int8) *buffer)); break; - } case MYSQL_TYPE_SHORT: - { if (param->is_unsigned) { ushort data= (ushort) value; - *param->error= (double) data != value; shortstore(buffer, data); } else { short data= (short) value; - *param->error= (double) data != value; shortstore(buffer, data); } + *param->error= value != (param->is_unsigned ? (double) (*(ushort*) buffer): + (double) (*(short*) buffer)); break; - } case MYSQL_TYPE_LONG: - { if (param->is_unsigned) { uint32 data= (uint32) value; - *param->error= (double) data != value; longstore(buffer, data); } else { int32 data= (int32) value; - *param->error= (double) data != value; longstore(buffer, data); } - break; - } + *param->error= value != (param->is_unsigned ? (double) (*(uint32*) buffer): + (double) (*(int32*) buffer)); + break; case MYSQL_TYPE_LONGLONG: - { if (param->is_unsigned) { ulonglong data= (ulonglong) value; - *param->error= (double) data != value; longlongstore(buffer, data); } else { longlong data= (longlong) value; - *param->error= (double) data != value; longlongstore(buffer, data); } + *param->error= value != (param->is_unsigned ? + (double) (*(ulonglong*) buffer) : + (double) (*(longlong*) buffer)); break; - } case MYSQL_TYPE_FLOAT: { float data= (float) value; - *param->error= data != value; floatstore(buffer, data); + *param->error= (*(float*) buffer) != value; break; } case MYSQL_TYPE_DOUBLE: @@ -3813,8 +3799,9 @@ static void fetch_datetime_with_conversion(MYSQL_BIND *param, case MYSQL_TYPE_DOUBLE: { ulonglong value= TIME_to_ulonglong(time); - return fetch_float_with_conversion(param, field, - ulonglong2double(value), DBL_DIG); + fetch_float_with_conversion(param, field, + ulonglong2double(value), DBL_DIG); + break; } case MYSQL_TYPE_TINY: case MYSQL_TYPE_SHORT: @@ -3823,7 +3810,8 @@ static void fetch_datetime_with_conversion(MYSQL_BIND *param, case MYSQL_TYPE_LONGLONG: { longlong value= (longlong) TIME_to_ulonglong(time); - return fetch_long_with_conversion(param, field, value); + fetch_long_with_conversion(param, field, value, TRUE); + break; } default: { @@ -3870,7 +3858,7 @@ static void fetch_result_with_conversion(MYSQL_BIND *param, MYSQL_FIELD *field, /* sic: we need to cast to 'signed char' as 'char' may be unsigned */ longlong data= field_is_unsigned ? (longlong) value : (longlong) (signed char) value; - fetch_long_with_conversion(param, field, data); + fetch_long_with_conversion(param, field, data, 0); *row+= 1; break; } @@ -3880,7 +3868,7 @@ static void fetch_result_with_conversion(MYSQL_BIND *param, MYSQL_FIELD *field, short value= sint2korr(*row); longlong data= field_is_unsigned ? (longlong) (unsigned short) value : (longlong) value; - fetch_long_with_conversion(param, field, data); + fetch_long_with_conversion(param, field, data, 0); *row+= 2; break; } @@ -3890,14 +3878,15 @@ static void fetch_result_with_conversion(MYSQL_BIND *param, MYSQL_FIELD *field, long value= sint4korr(*row); longlong data= field_is_unsigned ? (longlong) (unsigned long) value : (longlong) value; - fetch_long_with_conversion(param, field, data); + fetch_long_with_conversion(param, field, data, 0); *row+= 4; break; } case MYSQL_TYPE_LONGLONG: { longlong value= (longlong)sint8korr(*row); - fetch_long_with_conversion(param, field, value); + fetch_long_with_conversion(param, field, value, + field->flags & UNSIGNED_FLAG); *row+= 8; break; } @@ -4140,13 +4129,13 @@ static my_bool is_binary_compatible(enum enum_field_types type1, enum enum_field_types type2) { static const enum enum_field_types - range1[]= { MYSQL_TYPE_SHORT, MYSQL_TYPE_YEAR, 0 }, - range2[]= { MYSQL_TYPE_INT24, MYSQL_TYPE_LONG, 0 }, - range3[]= { MYSQL_TYPE_DATETIME, MYSQL_TYPE_TIMESTAMP, 0 }, + range1[]= { MYSQL_TYPE_SHORT, MYSQL_TYPE_YEAR, MYSQL_TYPE_NULL }, + range2[]= { MYSQL_TYPE_INT24, MYSQL_TYPE_LONG, MYSQL_TYPE_NULL }, + range3[]= { MYSQL_TYPE_DATETIME, MYSQL_TYPE_TIMESTAMP, MYSQL_TYPE_NULL }, range4[]= { MYSQL_TYPE_ENUM, MYSQL_TYPE_SET, MYSQL_TYPE_TINY_BLOB, MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, MYSQL_TYPE_BLOB, MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_STRING, MYSQL_TYPE_GEOMETRY, - MYSQL_TYPE_DECIMAL, 0 }, + MYSQL_TYPE_DECIMAL, MYSQL_TYPE_NULL }, *range_list[]= { range1, range2, range3, range4 }, **range_list_end= range_list + sizeof(range_list)/sizeof(*range_list); const enum enum_field_types **range, *type; @@ -4157,7 +4146,7 @@ static my_bool is_binary_compatible(enum enum_field_types type1, { /* check that both type1 and type2 are in the same range */ bool type1_found= FALSE, type2_found= FALSE; - for (type= *range; *type; type++) + for (type= *range; *type != MYSQL_TYPE_NULL; type++) { type1_found|= type1 == *type; type2_found|= type2 == *type; @@ -4340,7 +4329,6 @@ my_bool STDCALL mysql_stmt_bind_result(MYSQL_STMT *stmt, MYSQL_BIND *bind) MYSQL_FIELD *field; ulong bind_count= stmt->field_count; uint param_count= 0; - uchar report_data_truncation= 0; DBUG_ENTER("mysql_stmt_bind_result"); DBUG_PRINT("enter",("field_count: %d", bind_count)); @@ -4378,8 +4366,6 @@ my_bool STDCALL mysql_stmt_bind_result(MYSQL_STMT *stmt, MYSQL_BIND *bind) if (!param->error) param->error= ¶m->error_value; - else - report_data_truncation= REPORT_DATA_TRUNCATION; param->param_number= param_count++; param->offset= 0; @@ -4393,7 +4379,10 @@ my_bool STDCALL mysql_stmt_bind_result(MYSQL_STMT *stmt, MYSQL_BIND *bind) DBUG_RETURN(1); } } - stmt->bind_result_done= BIND_RESULT_DONE | report_data_truncation; + stmt->bind_result_done= BIND_RESULT_DONE; + if (stmt->mysql->options.report_data_truncation) + stmt->bind_result_done|= REPORT_DATA_TRUNCATION; + DBUG_RETURN(0); } diff --git a/myisam/ft_static.c b/myisam/ft_static.c index 3b186f7b179..cdb1580e706 100644 --- a/myisam/ft_static.c +++ b/myisam/ft_static.c @@ -25,9 +25,9 @@ char ft_boolean_syntax[]="+ -><()~*:\"\"&|"; const HA_KEYSEG ft_keysegs[FT_SEGS]={ { - HA_KEYTYPE_VARTEXT, /* type */ + HA_KEYTYPE_VARTEXT2, /* type */ 63, /* language (will be overwritten) */ - 0, 0, 0, /* null_bit, bit_start, bit_end */ + 0, 2, 0, /* null_bit, bit_start, bit_end */ HA_VAR_LENGTH_PART | HA_PACK_KEY, /* flag */ HA_FT_MAXBYTELEN, /* length */ HA_FT_WLEN, /* start */ diff --git a/myisam/ft_test1.c b/myisam/ft_test1.c index a92c85924de..14be9aa1e8c 100644 --- a/myisam/ft_test1.c +++ b/myisam/ft_test1.c @@ -79,24 +79,24 @@ static int run_test(const char *filename) recinfo[0].length= (extra_field == FIELD_BLOB ? 4 + mi_portable_sizeof_char_ptr : extra_length); if (extra_field == FIELD_VARCHAR) - recinfo[0].length+=2; + recinfo[0].length+= HA_VARCHAR_PACKLENGTH(extra_length); recinfo[1].type=key_field; recinfo[1].length= (key_field == FIELD_BLOB ? 4+mi_portable_sizeof_char_ptr : key_length); if (key_field == FIELD_VARCHAR) - recinfo[1].length+=2; + recinfo[1].length+= HA_VARCHAR_PACKLENGTH(key_length); /* Define a key over the first column */ keyinfo[0].seg=keyseg; keyinfo[0].keysegs=1; keyinfo[0].seg[0].type= key_type; - keyinfo[0].seg[0].flag= (key_field == FIELD_BLOB)?HA_BLOB_PART: - (key_field == FIELD_VARCHAR)?HA_VAR_LENGTH_PART:0; + keyinfo[0].seg[0].flag= (key_field == FIELD_BLOB) ? HA_BLOB_PART: + (key_field == FIELD_VARCHAR) ? HA_VAR_LENGTH_PART:0; keyinfo[0].seg[0].start=recinfo[0].length; keyinfo[0].seg[0].length=key_length; keyinfo[0].seg[0].null_bit= 0; keyinfo[0].seg[0].null_pos=0; - keyinfo[0].seg[0].language=MY_CHARSET_CURRENT; + keyinfo[0].seg[0].language= default_charset_info->number; keyinfo[0].flag = (no_fulltext?HA_PACK_KEY:HA_FULLTEXT); if (!silent) @@ -155,33 +155,42 @@ static int run_test(const char *filename) if (!silent) printf("- Reading rows with key\n"); for (i=0 ; i < NQUERIES ; i++) - { FT_DOCLIST *result; + { + FT_DOCLIST *result; result=ft_nlq_init_search(file,0,(char*) query[i],strlen(query[i]),1); - if(!result) { + if(!result) + { printf("Query %d: `%s' failed with errno %3d\n",i,query[i],my_errno); continue; } printf("Query %d: `%s'. Found: %d. Top five documents:\n", - i,query[i],result->ndocs); - for(j=0;j<5;j++) { double w; int err; - err=ft_nlq_read_next(result, read_record); - if(err==HA_ERR_END_OF_FILE) { - printf("No more matches!\n"); - break; - } else if (err) { - printf("ft_read_next %d failed with errno %3d\n",j,my_errno); - break; - } - w=ft_nlq_get_relevance(result); - if(key_field == FIELD_VARCHAR) { - uint l; - char *p; - p=recinfo[0].length+read_record; - l=uint2korr(p); - printf("%10.7f: %.*s\n",w,(int) l,p+2); - } else - printf("%10.7f: %.*s\n",w,recinfo[1].length, - recinfo[0].length+read_record); + i,query[i],result->ndocs); + for (j=0;j<5;j++) + { + double w; int err; + err= ft_nlq_read_next(result, read_record); + if (err==HA_ERR_END_OF_FILE) + { + printf("No more matches!\n"); + break; + } + else if (err) + { + printf("ft_read_next %d failed with errno %3d\n",j,my_errno); + break; + } + w=ft_nlq_get_relevance(result); + if (key_field == FIELD_VARCHAR) + { + uint l; + char *p; + p=recinfo[0].length+read_record; + l=uint2korr(p); + printf("%10.7f: %.*s\n",w,(int) l,p+2); + } + else + printf("%10.7f: %.*s\n",w,recinfo[1].length, + recinfo[0].length+read_record); } ft_nlq_close_search(result); } @@ -215,9 +224,14 @@ void create_record(char *pos, int n) else if (recinfo[0].type == FIELD_VARCHAR) { uint tmp; - strnmov(pos+2,data[n].f0,keyinfo[0].seg[0].length); - tmp=strlen(pos+2); - int2store(pos,tmp); + /* -1 is here because pack_length is stored in seg->length */ + uint pack_length= HA_VARCHAR_PACKLENGTH(keyinfo[0].seg[0].length-1); + strnmov(pos+pack_length,data[n].f0,keyinfo[0].seg[0].length); + tmp=strlen(pos+pack_length); + if (pack_length == 1) + *pos= (char) tmp; + else + int2store(pos,tmp); pos+=recinfo[0].length; } else @@ -239,9 +253,14 @@ void create_record(char *pos, int n) else if (recinfo[1].type == FIELD_VARCHAR) { uint tmp; - strnmov(pos+2,data[n].f2,keyinfo[0].seg[0].length); - tmp=strlen(pos+2); - int2store(pos,tmp); + /* -1 is here because pack_length is stored in seg->length */ + uint pack_length= HA_VARCHAR_PACKLENGTH(keyinfo[0].seg[0].length-1); + strnmov(pos+pack_length,data[n].f2,keyinfo[0].seg[0].length); + tmp=strlen(pos+1); + if (pack_length == 1) + *pos= (char) tmp; + else + int2store(pos,tmp); pos+=recinfo[1].length; } else diff --git a/myisam/ft_update.c b/myisam/ft_update.c index 8dafefe77a8..b8cd925bf4f 100644 --- a/myisam/ft_update.c +++ b/myisam/ft_update.c @@ -58,29 +58,27 @@ uint _mi_ft_segiterator(register FT_SEG_ITERATOR *ftsi) DBUG_ENTER("_mi_ft_segiterator"); if (!ftsi->num) - { DBUG_RETURN(0); - } - else - ftsi->num--; + + ftsi->num--; if (!ftsi->seg) - { DBUG_RETURN(1); - } - else - ftsi->seg--; + + ftsi->seg--; if (ftsi->seg->null_bit && (ftsi->rec[ftsi->seg->null_pos] & ftsi->seg->null_bit)) { - ftsi->pos=0; - DBUG_RETURN(1); + ftsi->pos=0; + DBUG_RETURN(1); } ftsi->pos= ftsi->rec+ftsi->seg->start; if (ftsi->seg->flag & HA_VAR_LENGTH_PART) { - ftsi->len=uint2korr(ftsi->pos); - ftsi->pos+=2; /* Skip VARCHAR length */ + uint pack_length= (ftsi->seg->bit_start); + ftsi->len= (pack_length == 1 ? (uint) *(uchar*) ftsi->pos : + uint2korr(ftsi->pos)); + ftsi->pos+= pack_length; /* Skip VARCHAR length */ DBUG_RETURN(1); } if (ftsi->seg->flag & HA_BLOB_PART) @@ -296,9 +294,11 @@ uint _ft_make_key(MI_INFO *info, uint keynr, byte *keybuf, FT_WORD *wptr, DBUG_RETURN(_mi_make_key(info,keynr,(uchar*) keybuf,buf,filepos)); } + /* convert key value to ft2 */ + uint _mi_ft_convert_to_ft2(MI_INFO *info, uint keynr, uchar *key) { my_off_t root; @@ -316,9 +316,12 @@ uint _mi_ft_convert_to_ft2(MI_INFO *info, uint keynr, uchar *key) get_key_full_length_rdonly(key_length, key); while (_mi_ck_delete(info, keynr, key, key_length) == 0) - /* nothing to do here. - _mi_ck_delete() will populate info->ft1_to_ft2 with deleted keys - */; + { + /* + nothing to do here. + _mi_ck_delete() will populate info->ft1_to_ft2 with deleted keys + */ + } /* creating pageful of keys */ mi_putint(info->buff,length+2,0); diff --git a/myisam/mi_check.c b/myisam/mi_check.c index 112a371c9fe..b8f992dc21a 100644 --- a/myisam/mi_check.c +++ b/myisam/mi_check.c @@ -280,7 +280,8 @@ int chk_size(MI_CHECK *param, register MI_INFO *info) size=my_seek(info->s->kfile,0L,MY_SEEK_END,MYF(0)); if ((skr=(my_off_t) info->state->key_file_length) != size) { - if (skr > size) + /* Don't give error if file generated by myisampack */ + if (skr > size && info->s->state.key_map) { error=1; mi_check_print_error(param, diff --git a/myisam/mi_checksum.c b/myisam/mi_checksum.c index 95338434211..33a51068fb0 100644 --- a/myisam/mi_checksum.c +++ b/myisam/mi_checksum.c @@ -40,8 +40,12 @@ ha_checksum mi_checksum(MI_INFO *info, const byte *buf) } case FIELD_VARCHAR: { - length=uint2korr(buf); - pos=buf+2; + uint pack_length= HA_VARCHAR_PACKLENGTH(rec->length-1); + if (pack_length == 1) + length= (ulong) *(uchar*) buf; + else + length= uint2korr(buf); + pos= buf+pack_length; break; } default: diff --git a/myisam/mi_create.c b/myisam/mi_create.c index e139997e0c7..0164555272d 100644 --- a/myisam/mi_create.c +++ b/myisam/mi_create.c @@ -43,7 +43,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, myf create_flag; uint fields,length,max_key_length,packed,pointer,real_length_diff, key_length,info_length,key_segs,options,min_key_length_skip, - base_pos,varchar_count,long_varchar_count,varchar_length, + base_pos,long_varchar_count,varchar_length, max_key_block_length,unique_key_parts,fulltext_keys,offset; ulong reclength, real_reclength,min_pack_length; char filename[FN_REFLEN],linkname[FN_REFLEN], *linkname_ptr; @@ -99,7 +99,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, /* Start by checking fields and field-types used */ - reclength=varchar_count=varchar_length=long_varchar_count=packed= + reclength=varchar_length=long_varchar_count=packed= min_pack_length=pack_reclength=0; for (rec=recinfo, fields=0 ; fields != columns ; @@ -130,14 +130,15 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, } else if (type == FIELD_VARCHAR) { - varchar_count++; - varchar_length+=rec->length-2; + varchar_length+= rec->length-1; /* Used for min_pack_length */ packed--; - pack_reclength+=1; - if (test(rec->length > 257)) - { /* May be packed on 3 bytes */ + pack_reclength++; + min_pack_length++; + /* We must test for 257 as length includes pack-length */ + if (test(rec->length >= 257)) + { long_varchar_count++; - pack_reclength+=2; + pack_reclength+= 2; /* May be packed on 3 bytes */ } } else if (type != FIELD_SKIP_ZERO) @@ -169,12 +170,8 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, /* We can't use checksum with static length rows */ if (!(options & HA_OPTION_PACK_RECORD)) options&= ~HA_OPTION_CHECKSUM; - if (options & (HA_OPTION_PACK_RECORD | HA_OPTION_COMPRESS_RECORD)) - min_pack_length+=varchar_count; /* Min length to pack */ - else - { - min_pack_length+=varchar_length+2*varchar_count; - } + if (!(options & (HA_OPTION_PACK_RECORD | HA_OPTION_COMPRESS_RECORD))) + min_pack_length+= varchar_length; if (flags & HA_CREATE_TMP_TABLE) options|= HA_OPTION_TMP_TABLE; if (flags & HA_CREATE_CHECKSUM || (options & HA_OPTION_CHECKSUM)) @@ -220,7 +217,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, reclength=pointer+1; /* reserve place for delete link */ } else - reclength+=long_varchar_count; /* We need space for this! */ + reclength+= long_varchar_count; /* We need space for varchar! */ max_key_length=0; tot_length=0 ; key_segs=0; fulltext_keys=0; @@ -261,7 +258,8 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, j++, keyseg++) { if (keyseg->type != HA_KEYTYPE_BINARY && - keyseg->type != HA_KEYTYPE_VARBINARY) + keyseg->type != HA_KEYTYPE_VARBINARY1 && + keyseg->type != HA_KEYTYPE_VARBINARY2) { my_errno=HA_WRONG_CREATE_OPTION; goto err; @@ -285,11 +283,22 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, j++, keyseg++) { if (keyseg->type != HA_KEYTYPE_TEXT && - keyseg->type != HA_KEYTYPE_VARTEXT) + keyseg->type != HA_KEYTYPE_VARTEXT1 && + keyseg->type != HA_KEYTYPE_VARTEXT2) { my_errno=HA_WRONG_CREATE_OPTION; goto err; } + if (!(keyseg->flag & HA_BLOB_PART) && + (keyseg->type == HA_KEYTYPE_VARTEXT1 || + keyseg->type == HA_KEYTYPE_VARTEXT2)) + { + /* Make a flag that this is a VARCHAR */ + keyseg->flag|= HA_VAR_LENGTH_PART; + /* Store in bit_start number of bytes used to pack the length */ + keyseg->bit_start= ((keyseg->type == HA_KEYTYPE_VARTEXT1)? + 1 : 2); + } } fulltext_keys++; @@ -345,10 +354,19 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, case HA_KEYTYPE_INT8: keyseg->flag|= HA_SWAP_KEY; break; - case HA_KEYTYPE_VARTEXT: - case HA_KEYTYPE_VARBINARY: + case HA_KEYTYPE_VARTEXT1: + case HA_KEYTYPE_VARTEXT2: + case HA_KEYTYPE_VARBINARY1: + case HA_KEYTYPE_VARBINARY2: if (!(keyseg->flag & HA_BLOB_PART)) + { + /* Make a flag that this is a VARCHAR */ keyseg->flag|= HA_VAR_LENGTH_PART; + /* Store in bit_start number of bytes used to pack the length */ + keyseg->bit_start= ((keyseg->type == HA_KEYTYPE_VARTEXT1 || + keyseg->type == HA_KEYTYPE_VARBINARY1) ? + 1 : 2); + } break; default: break; @@ -368,6 +386,8 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, } if (keyseg->flag & (HA_VAR_LENGTH_PART | HA_BLOB_PART)) { + DBUG_ASSERT(!test_all_bits(keyseg->flag, + (HA_VAR_LENGTH_PART | HA_BLOB_PART))); keydef->flag|=HA_VAR_LENGTH_KEY; length++; /* At least one length byte */ options|=HA_OPTION_PACK_KEYS; /* Using packed keys */ @@ -646,11 +666,31 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, /* Save unique definition */ for (i=0 ; i < share.state.header.uniques ; i++) { + HA_KEYSEG *keyseg_end; + keyseg= uniquedefs[i].seg; if (mi_uniquedef_write(file, &uniquedefs[i])) goto err; - for (j=0 ; j < uniquedefs[i].keysegs ; j++) + for (keyseg= uniquedefs[i].seg, keyseg_end= keyseg+ uniquedefs[i].keysegs; + keyseg < keyseg_end; + keyseg++) { - if (mi_keyseg_write(file, &uniquedefs[i].seg[j])) + switch (keyseg->type) { + case HA_KEYTYPE_VARTEXT1: + case HA_KEYTYPE_VARTEXT2: + case HA_KEYTYPE_VARBINARY1: + case HA_KEYTYPE_VARBINARY2: + if (!(keyseg->flag & HA_BLOB_PART)) + { + keyseg->flag|= HA_VAR_LENGTH_PART; + keyseg->bit_start= ((keyseg->type == HA_KEYTYPE_VARTEXT1 || + keyseg->type == HA_KEYTYPE_VARBINARY1) ? + 1 : 2); + } + break; + default: + break; + } + if (mi_keyseg_write(file, keyseg)) goto err; } } diff --git a/myisam/mi_dbug.c b/myisam/mi_dbug.c index 02d1c7d05d6..e782d21afe7 100644 --- a/myisam/mi_dbug.c +++ b/myisam/mi_dbug.c @@ -131,9 +131,21 @@ void _mi_print_key(FILE *stream, register HA_KEYSEG *keyseg, key=end; break; } + case HA_KEYTYPE_BIT: + { + uint i; + fputs("0x",stream); + for (i=0 ; i < keyseg->length ; i++) + fprintf(stream, "%02x", (uint) *key++); + key= end; + break; + } + #endif - case HA_KEYTYPE_VARTEXT: /* VARCHAR and TEXT */ - case HA_KEYTYPE_VARBINARY: /* VARBINARY and BLOB */ + case HA_KEYTYPE_VARTEXT1: /* VARCHAR and TEXT */ + case HA_KEYTYPE_VARTEXT2: /* VARCHAR and TEXT */ + case HA_KEYTYPE_VARBINARY1: /* VARBINARY and BLOB */ + case HA_KEYTYPE_VARBINARY2: /* VARBINARY and BLOB */ { uint tmp_length; get_key_length(tmp_length,key); diff --git a/myisam/mi_dynrec.c b/myisam/mi_dynrec.c index 0b8d3c97872..9d8e161b8fe 100644 --- a/myisam/mi_dynrec.c +++ b/myisam/mi_dynrec.c @@ -768,11 +768,21 @@ uint _mi_rec_pack(MI_INFO *info, register byte *to, register const byte *from) } else if (type == FIELD_VARCHAR) { - uint tmp_length=uint2korr(from); - store_key_length_inc(to,tmp_length); - memcpy(to,from+2,tmp_length); - to+=tmp_length; - continue; + uint pack_length= HA_VARCHAR_PACKLENGTH(rec->length -1); + uint tmp_length; + if (pack_length == 1) + { + tmp_length= (uint) *(uchar*) from; + *to++= *from; + } + else + { + tmp_length= uint2korr(from); + store_key_length_inc(to,tmp_length); + } + memcpy(to, from+pack_length,tmp_length); + to+= tmp_length; + continue; } else { @@ -878,9 +888,20 @@ my_bool _mi_rec_check(MI_INFO *info,const char *record, byte *rec_buff, } else if (type == FIELD_VARCHAR) { - uint tmp_length=uint2korr(record); - to+=get_pack_length(tmp_length)+tmp_length; - continue; + uint pack_length= HA_VARCHAR_PACKLENGTH(rec->length -1); + uint tmp_length; + if (pack_length == 1) + { + tmp_length= (uint) *(uchar*) record; + to+= 1+ tmp_length; + continue; + } + else + { + tmp_length= uint2korr(record); + to+= get_pack_length(tmp_length)+tmp_length; + } + continue; } else { @@ -894,9 +915,7 @@ my_bool _mi_rec_check(MI_INFO *info,const char *record, byte *rec_buff, } } else - { - to+=length; - } + to+= length; } if (packed_length != (uint) (to - rec_buff) + test(info->s->calc_checksum) || (bit != 1 && (flag & ~(bit - 1)))) @@ -947,13 +966,27 @@ ulong _mi_rec_unpack(register MI_INFO *info, register byte *to, byte *from, { if (type == FIELD_VARCHAR) { - get_key_length(length,from); - if (length > rec_length-2) - goto err; - int2store(to,length); - memcpy(to+2,from,length); - from+=length; - continue; + uint pack_length= HA_VARCHAR_PACKLENGTH(rec_length-1); + if (pack_length == 1) + { + length= (uint) *(uchar*) from; + if (length > rec_length-1) + goto err; + *to= *from++; + } + else + { + get_key_length(length, from); + if (length > rec_length-2) + goto err; + int2store(to,length); + } + if (from+length > from_end) + goto err; + memcpy(to+pack_length, from, length); + from+= length; + min_pack_length--; + continue; } if (flag & bit) { @@ -1021,15 +1054,17 @@ ulong _mi_rec_unpack(register MI_INFO *info, register byte *to, byte *from, if (min_pack_length > (uint) (from_end - from)) goto err; min_pack_length-=rec_length; - memcpy(to,(byte*) from,(size_t) rec_length); from+=rec_length; + memcpy(to, (byte*) from, (size_t) rec_length); + from+=rec_length; } } if (info->s->calc_checksum) from++; if (to == to_end && from == from_end && (bit == 1 || !(flag & ~(bit-1)))) DBUG_RETURN(found_length); + err: - my_errno=HA_ERR_RECORD_DELETED; + my_errno= HA_ERR_WRONG_IN_RECORD; DBUG_PRINT("error",("to_end: %lx -> %lx from_end: %lx -> %lx", to,to_end,from,from_end)); DBUG_DUMP("from",(byte*) info->rec_buff,info->s->base.min_pack_length); diff --git a/myisam/mi_key.c b/myisam/mi_key.c index a775e0ba2d0..caca63452b0 100644 --- a/myisam/mi_key.c +++ b/myisam/mi_key.c @@ -34,10 +34,20 @@ static int _mi_put_key_in_record(MI_INFO *info,uint keynr,byte *record); - /* - ** Make a intern key from a record - ** Ret: Length of key - */ +/* + Make a intern key from a record + + SYNOPSIS + _mi_make_key() + info MyiSAM handler + keynr key number + key Store created key here + record Record + filepos Position to record in the data file + + RETURN + Length of key +*/ uint _mi_make_key(register MI_INFO *info, uint keynr, uchar *key, const byte *record, my_off_t filepos) @@ -82,6 +92,19 @@ uint _mi_make_key(register MI_INFO *info, uint keynr, uchar *key, length); pos= (byte*) record+keyseg->start; + if (type == HA_KEYTYPE_BIT) + { + if (keyseg->bit_length) + { + uchar bits= get_rec_bits((uchar*) record + keyseg->bit_pos, + keyseg->bit_start, keyseg->bit_length); + *key++= bits; + length--; + } + memcpy((byte*) key, pos, length); + key+= length; + continue; + } if (keyseg->flag & HA_SPACE_PACK) { end=pos+length; @@ -104,8 +127,10 @@ uint _mi_make_key(register MI_INFO *info, uint keynr, uchar *key, } if (keyseg->flag & HA_VAR_LENGTH_PART) { - uint tmp_length=uint2korr(pos); - pos+=2; /* Skip VARCHAR length */ + uint pack_length= keyseg->bit_start; + uint tmp_length= (pack_length == 1 ? (uint) *(uchar*) pos : + uint2korr(pos)); + pos+= pack_length; /* Skip VARCHAR length */ set_if_smaller(length,tmp_length); FIX_LENGTH(cs, pos, length, char_length); store_key_length_inc(key,char_length); @@ -333,6 +358,26 @@ static int _mi_put_key_in_record(register MI_INFO *info, uint keynr, } record[keyseg->null_pos]&= ~keyseg->null_bit; } + if (keyseg->type == HA_KEYTYPE_BIT) + { + uint length= keyseg->length; + + if (keyseg->bit_length) + { + uchar bits= *key++; + set_rec_bits(bits, record + keyseg->bit_pos, keyseg->bit_start, + keyseg->bit_length); + length--; + } + else + { + clr_rec_bits(record + keyseg->bit_pos, keyseg->bit_start, + keyseg->bit_length); + } + memcpy(record + keyseg->start, (byte*) key, length); + key+= length; + continue; + } if (keyseg->flag & HA_SPACE_PACK) { uint length; @@ -365,9 +410,12 @@ static int _mi_put_key_in_record(register MI_INFO *info, uint keynr, goto err; #endif /* Store key length */ - int2store(record+keyseg->start, length); + if (keyseg->bit_start == 1) + *(uchar*) (record+keyseg->start)= (uchar) length; + else + int2store(record+keyseg->start, length); /* And key data */ - memcpy(record+keyseg->start+2,(byte*) key, length); + memcpy(record+keyseg->start + keyseg->bit_start, (byte*) key, length); key+= length; } else if (keyseg->flag & HA_BLOB_PART) diff --git a/myisam/mi_open.c b/myisam/mi_open.c index 562227d2f03..58db2e47c1f 100644 --- a/myisam/mi_open.c +++ b/myisam/mi_open.c @@ -314,7 +314,9 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags) { disk_pos=mi_keyseg_read(disk_pos, pos); - if (pos->type == HA_KEYTYPE_TEXT || pos->type == HA_KEYTYPE_VARTEXT) + if (pos->type == HA_KEYTYPE_TEXT || + pos->type == HA_KEYTYPE_VARTEXT1 || + pos->type == HA_KEYTYPE_VARTEXT2) { if (!pos->language) pos->charset=default_charset_info; @@ -389,7 +391,9 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags) for (j=0 ; j < share->uniqueinfo[i].keysegs; j++,pos++) { disk_pos=mi_keyseg_read(disk_pos, pos); - if (pos->type == HA_KEYTYPE_TEXT || pos->type == HA_KEYTYPE_VARTEXT) + if (pos->type == HA_KEYTYPE_TEXT || + pos->type == HA_KEYTYPE_VARTEXT1 || + pos->type == HA_KEYTYPE_VARTEXT2) { if (!pos->language) pos->charset=default_charset_info; @@ -1049,12 +1053,13 @@ int mi_keyseg_write(File file, const HA_KEYSEG *keyseg) *ptr++ =keyseg->null_bit; *ptr++ =keyseg->bit_start; *ptr++ =keyseg->bit_end; - *ptr++ =0; /* Not used */ + *ptr++= keyseg->bit_length; mi_int2store(ptr,keyseg->flag); ptr+=2; mi_int2store(ptr,keyseg->length); ptr+=2; mi_int4store(ptr,keyseg->start); ptr+=4; - mi_int4store(ptr,keyseg->null_pos); ptr+=4; - + mi_int4store(ptr, keyseg->null_bit ? keyseg->null_pos : keyseg->bit_pos); + ptr+=4; + return my_write(file,(char*) buff, (uint) (ptr-buff), MYF(MY_NABP)); } @@ -1066,12 +1071,19 @@ char *mi_keyseg_read(char *ptr, HA_KEYSEG *keyseg) keyseg->null_bit = *ptr++; keyseg->bit_start = *ptr++; keyseg->bit_end = *ptr++; - ptr++; + keyseg->bit_length = *ptr++; keyseg->flag = mi_uint2korr(ptr); ptr +=2; keyseg->length = mi_uint2korr(ptr); ptr +=2; keyseg->start = mi_uint4korr(ptr); ptr +=4; keyseg->null_pos = mi_uint4korr(ptr); ptr +=4; keyseg->charset=0; /* Will be filled in later */ + if (keyseg->null_bit) + keyseg->bit_pos= keyseg->null_pos + (keyseg->null_bit == 7); + else + { + keyseg->bit_pos= keyseg->null_pos; + keyseg->null_pos= 0; + } return ptr; } diff --git a/myisam/mi_packrec.c b/myisam/mi_packrec.c index a277c2ca9d1..62d15c03266 100644 --- a/myisam/mi_packrec.c +++ b/myisam/mi_packrec.c @@ -91,8 +91,10 @@ static void uf_zero(MI_COLUMNDEF *rec,MI_BIT_BUFF *bit_buff, uchar *to,uchar *end); static void uf_blob(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff, uchar *to, uchar *end); -static void uf_varchar(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff, - uchar *to, uchar *end); +static void uf_varchar1(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff, + uchar *to, uchar *end); +static void uf_varchar2(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff, + uchar *to, uchar *end); static void decode_bytes(MI_COLUMNDEF *rec,MI_BIT_BUFF *bit_buff, uchar *to,uchar *end); static uint decode_pos(MI_BIT_BUFF *bit_buff,MI_DECODE_TREE *decode_tree); @@ -522,14 +524,16 @@ static void (*get_unpack_function(MI_COLUMNDEF *rec)) case FIELD_BLOB: return &uf_blob; case FIELD_VARCHAR: - return &uf_varchar; + if (rec->length <= 256) /* 255 + 1 byte length */ + return &uf_varchar1; + return &uf_varchar2; case FIELD_LAST: default: return 0; /* This should never happend */ } } - /* De different functions to unpack a field */ + /* The different functions to unpack a field */ static void uf_zerofill_skip_zero(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff, uchar *to, uchar *end) @@ -773,7 +777,22 @@ static void uf_blob(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff, } } -static void uf_varchar(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff, + +static void uf_varchar1(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff, + uchar *to, uchar *end __attribute__((unused))) +{ + if (get_bit(bit_buff)) + to[0]= 0; /* Zero lengths */ + else + { + ulong length=get_bits(bit_buff,rec->space_length_bits); + *to= (uchar) length; + decode_bytes(rec,bit_buff,to+1,to+1+length); + } +} + + +static void uf_varchar2(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff, uchar *to, uchar *end __attribute__((unused))) { if (get_bit(bit_buff)) diff --git a/myisam/mi_search.c b/myisam/mi_search.c index 2fef70db9f0..2259dd17fcd 100644 --- a/myisam/mi_search.c +++ b/myisam/mi_search.c @@ -425,7 +425,8 @@ int _mi_prefix_search(MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *page, if (len < cmplen) { if ((keyinfo->seg->type != HA_KEYTYPE_TEXT && - keyinfo->seg->type != HA_KEYTYPE_VARTEXT)) + keyinfo->seg->type != HA_KEYTYPE_VARTEXT1 && + keyinfo->seg->type != HA_KEYTYPE_VARTEXT2)) my_flag= -1; else { @@ -1371,7 +1372,8 @@ _mi_calc_var_pack_key_length(MI_KEYDEF *keyinfo,uint nod_flag,uchar *next_key, sort_order=0; if ((keyinfo->flag & HA_FULLTEXT) && ((keyseg->type == HA_KEYTYPE_TEXT) || - (keyseg->type == HA_KEYTYPE_VARTEXT)) && + (keyseg->type == HA_KEYTYPE_VARTEXT1) || + (keyseg->type == HA_KEYTYPE_VARTEXT2)) && !use_strnxfrm(keyseg->charset)) sort_order=keyseg->charset->sort_order; diff --git a/myisam/mi_test1.c b/myisam/mi_test1.c index 15ce7515ac2..aa6cd98ac8e 100644 --- a/myisam/mi_test1.c +++ b/myisam/mi_test1.c @@ -75,11 +75,11 @@ static int run_test(const char *filename) recinfo[1].length= (key_field == FIELD_BLOB ? 4+mi_portable_sizeof_char_ptr : key_length); if (key_field == FIELD_VARCHAR) - recinfo[1].length+=2; + recinfo[1].length+= HA_VARCHAR_PACKLENGTH(key_length);; recinfo[2].type=extra_field; recinfo[2].length= (extra_field == FIELD_BLOB ? 4 + mi_portable_sizeof_char_ptr : 24); if (extra_field == FIELD_VARCHAR) - recinfo[2].length+=2; + recinfo[2].length+= HA_VARCHAR_PACKLENGTH(recinfo[2].length); if (opt_unique) { recinfo[3].type=FIELD_CHECK; @@ -88,6 +88,9 @@ static int run_test(const char *filename) rec_length=recinfo[0].length+recinfo[1].length+recinfo[2].length+ recinfo[3].length; + if (key_type == HA_KEYTYPE_VARTEXT1 && + key_length > 255) + key_type= HA_KEYTYPE_VARTEXT2; /* Define a key over the first column */ keyinfo[0].seg=keyseg; @@ -330,7 +333,8 @@ static void create_key_part(char *key,uint rownr) { sprintf(key,"%*d",keyinfo[0].seg[0].length,rownr); } - else if (keyinfo[0].seg[0].type == HA_KEYTYPE_VARTEXT) + else if (keyinfo[0].seg[0].type == HA_KEYTYPE_VARTEXT1 || + keyinfo[0].seg[0].type == HA_KEYTYPE_VARTEXT2) { /* Alpha record */ /* Create a key that may be easily packed */ bfill(key,keyinfo[0].seg[0].length,rownr < 10 ? 'A' : 'B'); @@ -410,11 +414,14 @@ static void create_record(char *record,uint rownr) } else if (recinfo[1].type == FIELD_VARCHAR) { - uint tmp; - create_key_part(pos+2,rownr); - tmp=strlen(pos+2); - int2store(pos,tmp); - pos+=recinfo[1].length; + uint tmp, pack_length= HA_VARCHAR_PACKLENGTH(recinfo[1].length-1); + create_key_part(pos+pack_length,rownr); + tmp= strlen(pos+pack_length); + if (pack_length == 1) + *(uchar*) pos= (uchar) tmp; + else + int2store(pos,tmp); + pos+= recinfo[1].length; } else { @@ -434,10 +441,13 @@ static void create_record(char *record,uint rownr) } else if (recinfo[2].type == FIELD_VARCHAR) { - uint tmp; - sprintf(pos+2,"... row: %d", rownr); - tmp=strlen(pos+2); - int2store(pos,tmp); + uint tmp, pack_length= HA_VARCHAR_PACKLENGTH(recinfo[1].length-1); + sprintf(pos+pack_length, "... row: %d", rownr); + tmp= strlen(pos+pack_length); + if (pack_length == 1) + *(uchar*) pos= (uchar) tmp; + else + int2store(pos,tmp); } else { @@ -466,8 +476,9 @@ static void update_record(char *record) } else if (recinfo[1].type == FIELD_VARCHAR) { - uint length=uint2korr(pos); - my_casedn(default_charset_info,pos+2,length); + uint pack_length= HA_VARCHAR_PACKLENGTH(recinfo[1].length-1); + uint length= pack_length == 1 ? (uint) *(uchar*) pos : uint2korr(pos); + my_casedn(default_charset_info,pos+pack_length,length); pos+=recinfo[1].length; } else @@ -493,10 +504,14 @@ static void update_record(char *record) else if (recinfo[2].type == FIELD_VARCHAR) { /* Second field is longer than 10 characters */ - uint length=uint2korr(pos); - bfill(pos+2+length,recinfo[2].length-length-2,'.'); - length=recinfo[2].length-2; - int2store(pos,length); + uint pack_length= HA_VARCHAR_PACKLENGTH(recinfo[1].length-1); + uint length= pack_length == 1 ? (uint) *(uchar*) pos : uint2korr(pos); + bfill(pos+pack_length+length,recinfo[2].length-length-pack_length,'.'); + length=recinfo[2].length-pack_length; + if (pack_length == 1) + *(uchar*) pos= (uchar) length; + else + int2store(pos,length); } else { @@ -519,7 +534,7 @@ static struct my_option my_long_options[] = 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"insert_rows", 'i', "Undocumented", (gptr*) &insert_count, (gptr*) &insert_count, 0, GET_UINT, REQUIRED_ARG, 1000, 0, 0, 0, 0, 0}, - {"key_alpha", 'a', "Undocumented", + {"key_alpha", 'a', "Use a key of type HA_KEYTYPE_TEXT", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"key_binary_pack", 'B', "Undocumented", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, @@ -535,9 +550,9 @@ static struct my_option my_long_options[] = 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"key_space_pack", 'p', "Undocumented", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"key_varchar", 'w', "Undocumented", + {"key_varchar", 'w', "Test VARCHAR keys", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"null_fields", 'N', "Undocumented", + {"null_fields", 'N', "Define fields with NULL", (gptr*) &null_fields, (gptr*) &null_fields, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"row_fixed_size", 'S', "Undocumented", @@ -604,7 +619,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), key_field=FIELD_BLOB; /* blob key */ extra_field= FIELD_BLOB; pack_seg|= HA_BLOB_PART; - key_type= HA_KEYTYPE_VARTEXT; + key_type= HA_KEYTYPE_VARTEXT1; break; case 'k': if (key_length < 4 || key_length > MI_MAX_KEY_LENGTH) @@ -616,11 +631,11 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), case 'w': key_field=FIELD_VARCHAR; /* varchar keys */ extra_field= FIELD_VARCHAR; - key_type= HA_KEYTYPE_VARTEXT; + key_type= HA_KEYTYPE_VARTEXT1; pack_seg|= HA_VAR_LENGTH_PART; create_flag|= HA_PACK_RECORD; break; - case 'K': /* Use key cacheing */ + case 'K': /* Use key cacheing */ key_cacheing=1; break; case 'V': diff --git a/myisam/mi_test3.c b/myisam/mi_test3.c index 27d23317b5c..be4277cc65c 100644 --- a/myisam/mi_test3.c +++ b/myisam/mi_test3.c @@ -67,6 +67,7 @@ int main(int argc,char **argv) bzero((char*) keyinfo,sizeof(keyinfo)); bzero((char*) recinfo,sizeof(recinfo)); + bzero((char*) keyseg,sizeof(keyseg)); keyinfo[0].seg= &keyseg[0][0]; keyinfo[0].seg[0].start=0; keyinfo[0].seg[0].length=8; diff --git a/myisam/mi_test_all.res b/myisam/mi_test_all.res index 94355bf1aa2..16b517d3f76 100644 --- a/myisam/mi_test_all.res +++ b/myisam/mi_test_all.res @@ -1,3 +1,6 @@ +myisamchk: MyISAM file test1 +myisamchk: warning: Size of indexfile is: 1024 Should be: 2048 +MyISAM-table 'test1' is usable but should be fixed mi_test2 -s -L -K -R1 -m2000 ; Should give error 135 Error: 135 in write at record: 1105 got error: 135 when using MyISAM-database @@ -5,46 +8,46 @@ myisamchk: MyISAM file test2 myisamchk: warning: Datafile is almost full, 65532 of 65534 used MyISAM-table 'test2' is usable but should be fixed Commands Used count Errors Recover errors -open 17 0 0 -write 850 0 0 -update 85 0 0 -delete 850 0 0 -close 17 0 0 -extra 102 0 0 -Total 1921 0 0 +open 1 0 0 +write 50 0 0 +update 5 0 0 +delete 50 0 0 +close 1 0 0 +extra 6 0 0 +Total 113 0 0 Commands Used count Errors Recover errors -open 18 0 0 -write 900 0 0 -update 90 0 0 -delete 900 0 0 -close 18 0 0 -extra 108 0 0 -Total 2034 0 0 +open 2 0 0 +write 100 0 0 +update 10 0 0 +delete 100 0 0 +close 2 0 0 +extra 12 0 0 +Total 226 0 0 -real 0m1.054s -user 0m0.410s -sys 0m0.640s +real 0m0.791s +user 0m0.137s +sys 0m0.117s -real 0m1.077s -user 0m0.550s -sys 0m0.530s +real 0m0.659s +user 0m0.252s +sys 0m0.102s -real 0m1.100s -user 0m0.420s -sys 0m0.680s +real 0m0.571s +user 0m0.188s +sys 0m0.098s -real 0m0.783s -user 0m0.590s -sys 0m0.200s +real 0m1.111s +user 0m0.236s +sys 0m0.037s -real 0m0.764s -user 0m0.560s -sys 0m0.210s +real 0m0.621s +user 0m0.242s +sys 0m0.022s -real 0m0.699s -user 0m0.570s -sys 0m0.130s +real 0m0.698s +user 0m0.248s +sys 0m0.021s -real 0m0.991s -user 0m0.630s -sys 0m0.350s +real 0m0.683s +user 0m0.265s +sys 0m0.079s diff --git a/myisam/mi_unique.c b/myisam/mi_unique.c index c03182456df..f2d5f01be25 100644 --- a/myisam/mi_unique.c +++ b/myisam/mi_unique.c @@ -95,8 +95,10 @@ ha_checksum mi_unique_hash(MI_UNIQUEDEF *def, const byte *record) pos= record+keyseg->start; if (keyseg->flag & HA_VAR_LENGTH_PART) { - uint tmp_length=uint2korr(pos); - pos+=2; /* Skip VARCHAR length */ + uint pack_length= keyseg->bit_start; + uint tmp_length= (pack_length == 1 ? (uint) *(uchar*) pos : + uint2korr(pos)); + pos+= pack_length; /* Skip VARCHAR length */ set_if_smaller(length,tmp_length); } else if (keyseg->flag & HA_BLOB_PART) @@ -107,7 +109,8 @@ ha_checksum mi_unique_hash(MI_UNIQUEDEF *def, const byte *record) length=tmp_length; /* The whole blob */ } end= pos+length; - if (type == HA_KEYTYPE_TEXT || type == HA_KEYTYPE_VARTEXT) + if (type == HA_KEYTYPE_TEXT || type == HA_KEYTYPE_VARTEXT1 || + type == HA_KEYTYPE_VARTEXT2) { keyseg->charset->coll->hash_sort(keyseg->charset, (const uchar*) pos, length, &seed1, @@ -157,12 +160,21 @@ int mi_unique_comp(MI_UNIQUEDEF *def, const byte *a, const byte *b, pos_b= b+keyseg->start; if (keyseg->flag & HA_VAR_LENGTH_PART) { - a_length= uint2korr(pos_a); - b_length= uint2korr(pos_b); - pos_a+= 2; /* Skip VARCHAR length */ - pos_b+= 2; - set_if_smaller(a_length, keyseg->length); - set_if_smaller(b_length, keyseg->length); + uint pack_length= keyseg->bit_start; + if (pack_length == 1) + { + a_length= (uint) *(uchar*) pos_a++; + b_length= (uint) *(uchar*) pos_b++; + } + else + { + a_length= uint2korr(pos_a); + b_length= uint2korr(pos_b); + pos_a+= 2; /* Skip VARCHAR length */ + pos_b+= 2; + } + set_if_smaller(a_length, keyseg->length); /* Safety */ + set_if_smaller(b_length, keyseg->length); /* safety */ } else if (keyseg->flag & HA_BLOB_PART) { @@ -182,7 +194,8 @@ int mi_unique_comp(MI_UNIQUEDEF *def, const byte *a, const byte *b, memcpy_fixed((byte*) &pos_a,pos_a+keyseg->bit_start,sizeof(char*)); memcpy_fixed((byte*) &pos_b,pos_b+keyseg->bit_start,sizeof(char*)); } - if (type == HA_KEYTYPE_TEXT || type == HA_KEYTYPE_VARTEXT) + if (type == HA_KEYTYPE_TEXT || type == HA_KEYTYPE_VARTEXT1 || + type == HA_KEYTYPE_VARTEXT2) { if (mi_compare_text(keyseg->charset, (uchar *) pos_a, a_length, (uchar *) pos_b, b_length, 0, 1)) diff --git a/myisam/myisampack.c b/myisam/myisampack.c index cc520847f70..bda620a594a 100644 --- a/myisam/myisampack.c +++ b/myisam/myisampack.c @@ -849,9 +849,11 @@ static int get_statistic(PACK_MRG_INFO *mrg,HUFF_COUNTS *huff_counts) } else if (count->field_type == FIELD_VARCHAR) { - length=uint2korr(start_pos); - pos=start_pos+2; - end_pos=start_pos+length; + uint pack_length= HA_VARCHAR_PACKLENGTH(count->field_length-1); + length= (pack_length == 1 ? (uint) *(uchar*) start_pos : + uint2korr(start_pos)); + pos= start_pos+pack_length; + end_pos= pos+length; set_if_bigger(count->max_length,length); } if (count->field_length <= 8 && @@ -1833,17 +1835,19 @@ static int compress_isam_file(PACK_MRG_INFO *mrg, HUFF_COUNTS *huff_counts) } case FIELD_VARCHAR: { - ulong col_length= uint2korr(start_pos); + uint pack_length= HA_VARCHAR_PACKLENGTH(count->field_length-1); + ulong col_length= (pack_length == 1 ? (uint) *(uchar*) start_pos : + uint2korr(start_pos)); if (!col_length) { write_bits(1,1); /* Empty varchar */ } else { - byte *end=start_pos+2+col_length; + byte *end=start_pos+pack_length+col_length; write_bits(0,1); write_bits(col_length,count->length_bits); - for (start_pos+=2 ; start_pos < end ; start_pos++) + for (start_pos+=pack_length ; start_pos < end ; start_pos++) write_bits(tree->code[(uchar) *start_pos], (uint) tree->code_len[(uchar) *start_pos]); } diff --git a/mysql-test/include/have_federated_db.inc b/mysql-test/include/have_federated_db.inc new file mode 100644 index 00000000000..7247c5db4b2 --- /dev/null +++ b/mysql-test/include/have_federated_db.inc @@ -0,0 +1,4 @@ +-- require r/have_federated_db.require +disable_query_log; +show variables like "have_federated_db"; +enable_query_log; diff --git a/mysql-test/include/ps_conv.inc b/mysql-test/include/ps_conv.inc index 0a5bec26fe7..0b6e27619e6 100644 --- a/mysql-test/include/ps_conv.inc +++ b/mysql-test/include/ps_conv.inc @@ -104,7 +104,7 @@ drop table t5 ; # c5 integer, c6 bigint, c7 float, c8 double, # c9 double precision, c10 real, c11 decimal(7, 4), c12 numeric(8, 4), # c13 date, c14 datetime, c15 timestamp(14), c16 time, -# c17 year, c18 bit, c19 bool, c20 char, +# c17 year, c18 tinyint, c19 bool, c20 char, # c21 char(10), c22 varchar(30), c23 tinyblob, c24 tinytext, # c25 blob, c26 text, c27 mediumblob, c28 mediumtext, # c29 longblob, c30 longtext, c31 enum('one', 'two', 'three'), diff --git a/mysql-test/include/ps_create.inc b/mysql-test/include/ps_create.inc index dfc9c494b46..306ed3f1cac 100644 --- a/mysql-test/include/ps_create.inc +++ b/mysql-test/include/ps_create.inc @@ -34,7 +34,7 @@ eval create table t9 c5 integer, c6 bigint, c7 float, c8 double, c9 double precision, c10 real, c11 decimal(7, 4), c12 numeric(8, 4), c13 date, c14 datetime, c15 timestamp(14), c16 time, - c17 year, c18 bit, c19 bool, c20 char, + c17 year, c18 tinyint, c19 bool, c20 char, c21 char(10), c22 varchar(30), c23 tinyblob, c24 tinytext, c25 blob, c26 text, c27 mediumblob, c28 mediumtext, c29 longblob, c30 longtext, c31 enum('one', 'two', 'three'), diff --git a/mysql-test/include/varchar.inc b/mysql-test/include/varchar.inc index 6c9b62065c5..32140bc7146 100644 --- a/mysql-test/include/varchar.inc +++ b/mysql-test/include/varchar.inc @@ -92,6 +92,66 @@ select sql_big_result c,count(t) from t1 group by c limit 10; select t,count(*) from t1 group by t limit 10; select t,count(t) from t1 group by t limit 10; select sql_big_result t,count(t) from t1 group by t limit 10; + +# +# Test varchar > 255 bytes +# + +alter table t1 modify v varchar(300), drop key v, drop key v_2, add key v (v); +show create table t1; +select count(*) from t1 where v='a'; +select count(*) from t1 where v='a '; +select count(*) from t1 where v between 'a' and 'a '; +select count(*) from t1 where v between 'a' and 'a ' and v between 'a ' and 'b\n'; +select count(*) from t1 where v like 'a%'; +select count(*) from t1 where v like 'a %'; +explain select count(*) from t1 where v='a '; +explain select count(*) from t1 where v like 'a%'; +explain select count(*) from t1 where v between 'a' and 'a '; +explain select count(*) from t1 where v between 'a' and 'a ' and v between 'a ' and 'b\n'; +explain select * from t1 where v='a'; + +# GROUP BY + +select v,count(*) from t1 group by v limit 10; +select v,count(t) from t1 group by v limit 10; +select sql_big_result v,count(t) from t1 group by v limit 10; + +# +# Test varchar > 255 bytes, key < 255 +# + +alter table t1 drop key v, add key v (v(30)); +show create table t1; +select count(*) from t1 where v='a'; +select count(*) from t1 where v='a '; +select count(*) from t1 where v between 'a' and 'a '; +select count(*) from t1 where v between 'a' and 'a ' and v between 'a ' and 'b\n'; +select count(*) from t1 where v like 'a%'; +select count(*) from t1 where v like 'a %'; +explain select count(*) from t1 where v='a '; +explain select count(*) from t1 where v like 'a%'; +explain select count(*) from t1 where v between 'a' and 'a '; +explain select count(*) from t1 where v between 'a' and 'a ' and v between 'a ' and 'b\n'; +explain select * from t1 where v='a'; + +# GROUP BY + +select v,count(*) from t1 group by v limit 10; +select v,count(t) from t1 group by v limit 10; +select sql_big_result v,count(t) from t1 group by v limit 10; + +# +# Test varchar > 512 (special case for GROUP BY becasue of +# CONVERT_IF_BIGGER_TO_BLOB define) +# + +alter table t1 modify v varchar(600), drop key v, add key v (v); +show create table t1; +select v,count(*) from t1 group by v limit 10; +select v,count(t) from t1 group by v limit 10; +select sql_big_result v,count(t) from t1 group by v limit 10; + drop table t1; # diff --git a/mysql-test/mysql-test-run.sh b/mysql-test/mysql-test-run.sh index 707f18d064e..9c5beee02d6 100644 --- a/mysql-test/mysql-test-run.sh +++ b/mysql-test/mysql-test-run.sh @@ -1347,7 +1347,7 @@ run_testcase () tsrcdir=$TESTDIR/$tname-src result_file="r/$tname.result" echo $tname > $CURRENT_TEST - SKIP_SLAVE=`$EXPR \( $tname : rpl \) = 0` + SKIP_SLAVE=`$EXPR \( $tname : rpl \) = 0 \& \( $tname : federated \) = 0` if [ -n "$RESULT_EXT" -a \( x$RECORD = x1 -o -f "$result_file$RESULT_EXT" \) ] ; then result_file="$result_file$RESULT_EXT" fi diff --git a/mysql-test/ndb/ndb_range_bounds.pl b/mysql-test/ndb/ndb_range_bounds.pl index 75b7f8a33e1..abe1ea28298 100644 --- a/mysql-test/ndb/ndb_range_bounds.pl +++ b/mysql-test/ndb/ndb_range_bounds.pl @@ -1,138 +1,218 @@ # # test range scan bounds -# output to mysql-test/t/ndb_range_bounds.test -# -# give option --all to generate all cases +# give option --all to test all cases +# set MYSQL_HOME to installation top # use strict; use integer; use Getopt::Long; +use DBI; my $opt_all = 0; my $opt_cnt = 5; -GetOptions("all" => \$opt_all, "cnt=i" => \$opt_cnt) - or die "options are: --all --cnt=N"; +my $opt_verbose = 0; +GetOptions("all" => \$opt_all, "cnt=i" => \$opt_cnt, "verbose" => \$opt_verbose) + or die "options are: --all --cnt=N --verbose"; + +my $mysql_home = $ENV{MYSQL_HOME}; +defined($mysql_home) or die "no MYSQL_HOME"; +my $dsn = "dbi:mysql:database=test;host=localhost;mysql_read_default_file=$mysql_home/var/my.cnf"; +my $opts = { RaiseError => 0, PrintError => 0, AutoCommit => 1, }; + +my $dbh; +my $sth; +my $sql; + +$dbh = DBI->connect($dsn, "root", undef, $opts) or die $DBI::errstr; my $table = 't'; -print <do($sql) or die $DBI::errstr; ---disable_warnings -drop table if exists $table; ---enable_warnings - -# test range scan bounds -# generated by mysql-test/ndb/ndb_range_bounds.pl -# all selects must return 0 - -EOF - -sub cut ($$@) { - my($op, $key, @v) = @_; +sub cut ($$$) { + my($op, $key, $val) = @_; $op = '==' if $op eq '='; - my(@w); - eval "\@w = grep(\$_ $op $key, \@v)"; + my(@w) = @$val; + eval "\@w = grep(\$_ $op $key, \@w)"; $@ and die $@; - return @w; + return [ @w ]; } -sub mkdummy (\@) { +sub mkdummy ($) { my ($val) = @_; return { 'dummy' => 1, 'exp' => '9 = 9', - 'cnt' => scalar @$val, + 'res' => $val, }; } -sub mkone ($$$\@) { +sub mkone ($$$$) { my($col, $op, $key, $val) = @_; - my $cnt = scalar cut($op, $key, @$val); + my $res = cut($op, $key, $val); return { 'exp' => "$col $op $key", - 'cnt' => $cnt, + 'res' => $res, }; } -sub mktwo ($$$$$\@) { +sub mktwo ($$$$$$) { my($col, $op1, $key1, $op2, $key2, $val) = @_; - my $cnt = scalar cut($op2, $key2, cut($op1, $key1, @$val)); + my $res = cut($op2, $key2, cut($op1, $key1, $val)); return { 'exp' => "$col $op1 $key1 and $col $op2 $key2", - 'cnt' => $cnt, + 'res' => $res, }; } -sub mkall ($$$\@) { +sub mkall ($$$$) { my($col, $key1, $key2, $val) = @_; my @a = (); - my $p = mkdummy(@$val); + my $p = mkdummy($val); push(@a, $p) if $opt_all; my @ops = qw(< <= = >= >); for my $op (@ops) { - my $p = mkone($col, $op, $key1, @$val); - push(@a, $p) if $opt_all || $p->{cnt} != 0; + my $p = mkone($col, $op, $key1, $val); + push(@a, $p) if $opt_all || @{$p->{res}} != 0; } my @ops1 = $opt_all ? @ops : qw(= >= >); my @ops2 = $opt_all ? @ops : qw(<= <); for my $op1 (@ops1) { for my $op2 (@ops2) { - my $p = mktwo($col, $op1, $key1, $op2, $key2, @$val); - push(@a, $p) if $opt_all || $p->{cnt} != 0; + my $p = mktwo($col, $op1, $key1, $op2, $key2, $val); + push(@a, $p) if $opt_all || @{$p->{res}} != 0; } } + warn scalar(@a)." cases\n" if $opt_verbose; return \@a; } +my $casecnt = 0; + +sub verify ($$$) { + my($sql, $ord, $res) = @_; + warn "$sql\n" if $opt_verbose; + $sth = $dbh->prepare($sql) or die "prepare: $sql: $DBI::errstr"; + $sth->execute() or die "execute: $sql: $DBI::errstr"; + # + # BUG: execute can return success on error so check again + # + $sth->err and die "execute: $sql: $DBI::errstr"; + my @out = (); + for my $b (@{$res->[0]}) { + for my $c (@{$res->[1]}) { + for my $d (@{$res->[2]}) { + push(@out, [$b, $c, $d]); + } + } + } + if ($ord) { + @out = sort { + $ord * ($a->[0] - $b->[0]) || + $ord * ($a->[1] - $b->[1]) || + $ord * ($a->[2] - $b->[2]) || + 0 + } @out; + } + my $cnt = scalar @out; + my $n = 0; + while (1) { + my $row = $sth->fetchrow_arrayref; + $row || last; + @$row == 3 or die "bad row: $sql: @$row"; + for my $v (@$row) { + $v =~ s/^\s+|\s+$//g; + $v =~ /^\d+$/ or die "bad value: $sql: $v"; + } + if ($ord) { + my $out = $out[$n]; + $row->[0] == $out->[0] && + $row->[1] == $out->[1] && + $row->[2] == $out->[2] or + die "$sql: row $n: got row @$row != @$out"; + } + $n++; + } + $sth->err and die "fetch: $sql: $DBI::errstr"; + $n == $cnt or die "verify: $sql: got row count $n != $cnt"; + $casecnt++; +} + for my $nn ("bcd", "") { my %nn; for my $x (qw(b c d)) { $nn{$x} = $nn =~ /$x/ ? "not null" : "null"; } - print <do($sql) or die $DBI::errstr; + warn "insert\n"; + $sql = "insert into $table values(?, ?, ?, ?)"; + $sth = $dbh->prepare($sql) or die $DBI::errstr; my @val = (0..($opt_cnt-1)); my $v0 = 0; for my $v1 (@val) { for my $v2 (@val) { for my $v3 (@val) { - print "insert into $table values($v0, $v1, $v2, $v3);\n"; + $sth->bind_param(1, $v0) or die $DBI::errstr; + $sth->bind_param(2, $v1) or die $DBI::errstr; + $sth->bind_param(3, $v2) or die $DBI::errstr; + $sth->bind_param(4, $v3) or die $DBI::errstr; + $sth->execute or die $DBI::errstr; $v0++; } } } + warn "generate cases\n"; my $key1 = 1; my $key2 = 3; - my $a1 = mkall('b', $key1, $key2, @val); - my $a2 = mkall('c', $key1, $key2, @val); - my $a3 = mkall('d', $key1, $key2, @val); - for my $p1 (@$a1) { - my $cnt1 = $p1->{cnt} * @val * @val; - print "select count(*) - $cnt1 from $table"; - print " where $p1->{exp};\n"; - for my $p2 (@$a2) { - my $cnt2 = $p1->{cnt} * $p2->{cnt} * @val; - print "select count(*) - $cnt2 from $table"; - print " where $p1->{exp} and $p2->{exp};\n"; - for my $p3 (@$a3) { - my $cnt3 = $p1->{cnt} * $p2->{cnt} * $p3->{cnt}; - print "select count(*) - $cnt3 from $table"; - print " where $p1->{exp} and $p2->{exp} and $p3->{exp};\n"; + my $a1 = mkall('b', $key1, $key2, \@val); + my $a2 = mkall('c', $key1, $key2, \@val); + my $a3 = mkall('d', $key1, $key2, \@val); + warn "select\n"; + for my $ord (0, +1, -1) { + my $orderby = + $ord == 0 ? "" : + $ord == +1 ? " order by b, c, d" : + $ord == -1 ? " order by b desc, c desc, d desc" : die "not here"; + for my $p1 (@$a1) { + my $res = [ $p1->{res}, \@val, \@val ]; + $sql = "select b, c, d from $table" . + " where $p1->{exp}" . + $orderby; + verify($sql, $ord, $res); + for my $p2 (@$a2) { + my $res = [ $p1->{res}, $p2->{res}, \@val ]; + $sql = "select b, c, d from $table" . + " where $p1->{exp} and $p2->{exp}" . + $orderby; + verify($sql, $ord, $res); + for my $p3 (@$a3) { + my $res = [ $p1->{res}, $p2->{res}, $p3->{res} ]; + $sql = "select b, c, d from $table" . + " where $p1->{exp} and $p2->{exp} and $p3->{exp}" . + $orderby; + verify($sql, $ord, $res); + } } } } - print <do($sql) or die $DBI::errstr; } +warn "verified $casecnt cases\n"; +warn "done\n"; + # vim: set sw=2: diff --git a/mysql-test/r/archive.result b/mysql-test/r/archive.result index e215e72d43f..ce5917db4bc 100644 --- a/mysql-test/r/archive.result +++ b/mysql-test/r/archive.result @@ -2601,6 +2601,1214 @@ auto fld1 companynr fld3 fld4 fld5 fld6 2 011401 37 breaking dreaded Steinberg W 3 011402 37 Romans scholastics jarring 4 011403 37 intercepted audiology tinily +REPAIR TABLE t2; +Table Op Msg_type Msg_text +test.t2 repair status OK +SELECT * FROM t2; +auto fld1 companynr fld3 fld4 fld5 fld6 +1 000001 00 Omaha teethe neat +2 011401 37 breaking dreaded Steinberg W +3 011402 37 Romans scholastics jarring +4 011403 37 intercepted audiology tinily +5 011501 37 bewilderingly wallet balled +6 011701 37 astound parters persist W +7 011702 37 admonishing eschew attainments +8 011703 37 sumac quitter fanatic +9 012001 37 flanking neat measures FAS +10 012003 37 combed Steinberg rightfulness +11 012004 37 subjective jarring capably +12 012005 37 scatterbrain tinily impulsive +13 012301 37 Eulerian balled starlet +14 012302 36 dubbed persist terminators +15 012303 37 Kane attainments untying +16 012304 37 overlay fanatic announces FAS +17 012305 37 perturb measures featherweight FAS +18 012306 37 goblins rightfulness pessimist FAS +19 012501 37 annihilates capably daughter +20 012602 37 Wotan impulsive decliner FAS +21 012603 37 snatching starlet lawgiver +22 012604 37 concludes terminators stated +23 012605 37 laterally untying readable +24 012606 37 yelped announces attrition +25 012701 37 grazing featherweight cascade FAS +26 012702 37 Baird pessimist motors FAS +27 012703 37 celery daughter interrogate +28 012704 37 misunderstander decliner pests W +29 013601 37 handgun lawgiver stairway +30 013602 37 foldout stated dopers FAS +31 013603 37 mystic readable testicle W +32 013604 37 succumbed attrition Parsifal W +33 013605 37 Nabisco cascade leavings +34 013606 37 fingerings motors postulation W +35 013607 37 aging interrogate squeaking +36 013608 37 afield pests contrasted +37 013609 37 ammonium stairway leftover +38 013610 37 boat dopers whiteners +39 013801 37 intelligibility testicle erases W +40 013802 37 Augustine Parsifal Punjab W +41 013803 37 teethe leavings Merritt +42 013804 37 dreaded postulation Quixotism +43 013901 37 scholastics squeaking sweetish FAS +44 016001 37 audiology contrasted dogging FAS +45 016201 37 wallet leftover scornfully FAS +46 016202 37 parters whiteners bellow +47 016301 37 eschew erases bills +48 016302 37 quitter Punjab cupboard FAS +49 016303 37 neat Merritt sureties FAS +50 016304 37 Steinberg Quixotism puddings +51 018001 37 jarring sweetish tapestry +52 018002 37 tinily dogging fetters +53 018003 37 balled scornfully bivalves +54 018004 37 persist bellow incurring +55 018005 37 attainments bills Adolph +56 018007 37 fanatic cupboard pithed +57 018008 37 measures sureties emergency +58 018009 37 rightfulness puddings Miles +59 018010 37 capably tapestry trimmings +60 018012 37 impulsive fetters tragedies W +61 018013 37 starlet bivalves skulking W +62 018014 37 terminators incurring flint +63 018015 37 untying Adolph flopping W +64 018016 37 announces pithed relaxing FAS +65 018017 37 featherweight emergency offload FAS +66 018018 37 pessimist Miles suites W +67 018019 37 daughter trimmings lists FAS +68 018020 37 decliner tragedies animized FAS +69 018021 37 lawgiver skulking multilayer W +70 018022 37 stated flint standardizes FAS +71 018023 37 readable flopping Judas +72 018024 37 attrition relaxing vacuuming W +73 018025 37 cascade offload dentally W +74 018026 37 motors suites humanness W +75 018027 37 interrogate lists inch W +76 018028 37 pests animized Weissmuller W +77 018029 37 stairway multilayer irresponsibly W +78 018030 37 dopers standardizes luckily FAS +79 018032 37 testicle Judas culled W +80 018033 37 Parsifal vacuuming medical FAS +81 018034 37 leavings dentally bloodbath FAS +82 018035 37 postulation humanness subschema W +83 018036 37 squeaking inch animals W +84 018037 37 contrasted Weissmuller Micronesia +85 018038 37 leftover irresponsibly repetitions +86 018039 37 whiteners luckily Antares +87 018040 37 erases culled ventilate W +88 018041 37 Punjab medical pityingly +89 018042 37 Merritt bloodbath interdependent +90 018043 37 Quixotism subschema Graves FAS +91 018044 37 sweetish animals neonatal +92 018045 37 dogging Micronesia scribbled FAS +93 018046 37 scornfully repetitions chafe W +94 018048 37 bellow Antares honoring +95 018049 37 bills ventilate realtor +96 018050 37 cupboard pityingly elite +97 018051 37 sureties interdependent funereal +98 018052 37 puddings Graves abrogating +99 018053 50 tapestry neonatal sorters +100 018054 37 fetters scribbled Conley +101 018055 37 bivalves chafe lectured +102 018056 37 incurring honoring Abraham +103 018057 37 Adolph realtor Hawaii W +104 018058 37 pithed elite cage +105 018059 36 emergency funereal hushes +106 018060 37 Miles abrogating Simla +107 018061 37 trimmings sorters reporters +108 018101 37 tragedies Conley Dutchman FAS +109 018102 37 skulking lectured descendants FAS +110 018103 37 flint Abraham groupings FAS +111 018104 37 flopping Hawaii dissociate +112 018201 37 relaxing cage coexist W +113 018202 37 offload hushes Beebe +114 018402 37 suites Simla Taoism +115 018403 37 lists reporters Connally +116 018404 37 animized Dutchman fetched FAS +117 018405 37 multilayer descendants checkpoints FAS +118 018406 37 standardizes groupings rusting +119 018409 37 Judas dissociate galling +120 018601 37 vacuuming coexist obliterates +121 018602 37 dentally Beebe traitor +122 018603 37 humanness Taoism resumes FAS +123 018801 37 inch Connally analyzable FAS +124 018802 37 Weissmuller fetched terminator FAS +125 018803 37 irresponsibly checkpoints gritty FAS +126 018804 37 luckily rusting firearm W +127 018805 37 culled galling minima +128 018806 37 medical obliterates Selfridge +129 018807 37 bloodbath traitor disable +130 018808 37 subschema resumes witchcraft W +131 018809 37 animals analyzable betroth W +132 018810 37 Micronesia terminator Manhattanize +133 018811 37 repetitions gritty imprint +134 018812 37 Antares firearm peeked +135 019101 37 ventilate minima swelling +136 019102 37 pityingly Selfridge interrelationships W +137 019103 37 interdependent disable riser +138 019201 37 Graves witchcraft Gandhian W +139 030501 37 neonatal betroth peacock A +140 030502 50 scribbled Manhattanize bee A +141 030503 37 chafe imprint kanji +142 030504 37 honoring peeked dental +143 031901 37 realtor swelling scarf FAS +144 036001 37 elite interrelationships chasm A +145 036002 37 funereal riser insolence A +146 036004 37 abrogating Gandhian syndicate +147 036005 37 sorters peacock alike +148 038001 37 Conley bee imperial A +149 038002 37 lectured kanji convulsion A +150 038003 37 Abraham dental railway A +151 038004 37 Hawaii scarf validate A +152 038005 37 cage chasm normalizes A +153 038006 37 hushes insolence comprehensive +154 038007 37 Simla syndicate chewing +155 038008 37 reporters alike denizen +156 038009 37 Dutchman imperial schemer +157 038010 37 descendants convulsion chronicle +158 038011 37 groupings railway Kline +159 038012 37 dissociate validate Anatole +160 038013 37 coexist normalizes partridges +161 038014 37 Beebe comprehensive brunch +162 038015 37 Taoism chewing recruited +163 038016 37 Connally denizen dimensions W +164 038017 37 fetched schemer Chicana W +165 038018 37 checkpoints chronicle announced +166 038101 37 rusting Kline praised FAS +167 038102 37 galling Anatole employing +168 038103 37 obliterates partridges linear +169 038104 37 traitor brunch quagmire +170 038201 37 resumes recruited western A +171 038202 37 analyzable dimensions relishing +172 038203 37 terminator Chicana serving A +173 038204 37 gritty announced scheduling +174 038205 37 firearm praised lore +175 038206 37 minima employing eventful +176 038208 37 Selfridge linear arteriole A +177 042801 37 disable quagmire disentangle +178 042802 37 witchcraft western cured A +179 046101 37 betroth relishing Fenton W +180 048001 37 Manhattanize serving avoidable A +181 048002 37 imprint scheduling drains A +182 048003 37 peeked lore detectably FAS +183 048004 37 swelling eventful husky +184 048005 37 interrelationships arteriole impelling +185 048006 37 riser disentangle undoes +186 048007 37 Gandhian cured evened +187 048008 37 peacock Fenton squeezes +188 048101 37 bee avoidable destroyer FAS +189 048102 37 kanji drains rudeness +190 048201 37 dental detectably beaner FAS +191 048202 37 scarf husky boorish +192 048203 37 chasm impelling Everhart +193 048204 37 insolence undoes encompass A +194 048205 37 syndicate evened mushrooms +195 048301 37 alike squeezes Alison A +196 048302 37 imperial destroyer externally FAS +197 048303 37 convulsion rudeness pellagra +198 048304 37 railway beaner cult +199 048305 37 validate boorish creek A +200 048401 37 normalizes Everhart Huffman +201 048402 37 comprehensive encompass Majorca FAS +202 048403 37 chewing mushrooms governing A +203 048404 37 denizen Alison gadfly FAS +204 048405 37 schemer externally reassigned FAS +205 048406 37 chronicle pellagra intentness W +206 048407 37 Kline cult craziness +207 048408 37 Anatole creek psychic +208 048409 37 partridges Huffman squabbled +209 048410 37 brunch Majorca burlesque +210 048411 37 recruited governing capped +211 048412 37 dimensions gadfly extracted A +212 048413 37 Chicana reassigned DiMaggio +213 048601 37 announced intentness exclamation FAS +214 048602 37 praised craziness subdirectory +215 048603 37 employing psychic fangs +216 048604 37 linear squabbled buyer A +217 048801 37 quagmire burlesque pithing A +218 050901 37 western capped transistorizing A +219 051201 37 relishing extracted nonbiodegradable +220 056002 37 serving DiMaggio dislocate +221 056003 37 scheduling exclamation monochromatic FAS +222 056004 37 lore subdirectory batting +223 056102 37 eventful fangs postcondition A +224 056203 37 arteriole buyer catalog FAS +225 056204 37 disentangle pithing Remus +226 058003 37 cured transistorizing devices A +227 058004 37 Fenton nonbiodegradable bike A +228 058005 37 avoidable dislocate qualify +229 058006 37 drains monochromatic detained +230 058007 37 detectably batting commended +231 058101 37 husky postcondition civilize +232 058102 37 impelling catalog Elmhurst +233 058103 37 undoes Remus anesthetizing +234 058105 37 evened devices deaf +235 058111 37 squeezes bike Brigham +236 058112 37 destroyer qualify title +237 058113 37 rudeness detained coarse +238 058114 37 beaner commended combinations +239 058115 37 boorish civilize grayness +240 058116 37 Everhart Elmhurst innumerable FAS +241 058117 37 encompass anesthetizing Caroline A +242 058118 37 mushrooms deaf fatty FAS +243 058119 37 Alison Brigham eastbound +244 058120 37 externally title inexperienced +245 058121 37 pellagra coarse hoarder A +246 058122 37 cult combinations scotch W +247 058123 37 creek grayness passport A +248 058124 37 Huffman innumerable strategic FAS +249 058125 37 Majorca Caroline gated +250 058126 37 governing fatty flog +251 058127 37 gadfly eastbound Pipestone +252 058128 37 reassigned inexperienced Dar +253 058201 37 intentness hoarder Corcoran +254 058202 37 craziness scotch flyers A +255 058303 37 psychic passport competitions W +256 058304 37 squabbled strategic suppliers FAS +257 058602 37 burlesque gated skips +258 058603 37 capped flog institutes +259 058604 37 extracted Pipestone troop A +260 058605 37 DiMaggio Dar connective W +261 058606 37 exclamation Corcoran denies +262 058607 37 subdirectory flyers polka +263 060401 36 fangs competitions observations FAS +264 061701 36 buyer suppliers askers +265 066201 36 pithing skips homeless FAS +266 066501 36 transistorizing institutes Anna +267 068001 36 nonbiodegradable troop subdirectories W +268 068002 36 dislocate connective decaying FAS +269 068005 36 monochromatic denies outwitting W +270 068006 36 batting polka Harpy W +271 068007 36 postcondition observations crazed +272 068008 36 catalog askers suffocate +273 068009 36 Remus homeless provers FAS +274 068010 36 devices Anna technically +275 068011 36 bike subdirectories Franklinizations +276 068202 36 qualify decaying considered +277 068302 36 detained outwitting tinnily +278 068303 36 commended Harpy uninterruptedly +279 068401 36 civilize crazed whistled A +280 068501 36 Elmhurst suffocate automate +281 068502 36 anesthetizing provers gutting W +282 068503 36 deaf technically surreptitious +283 068602 36 Brigham Franklinizations Choctaw +284 068603 36 title considered cooks +285 068701 36 coarse tinnily millivolt FAS +286 068702 36 combinations uninterruptedly counterpoise +287 068703 36 grayness whistled Gothicism +288 076001 36 innumerable automate feminine +289 076002 36 Caroline gutting metaphysically W +290 076101 36 fatty surreptitious sanding A +291 076102 36 eastbound Choctaw contributorily +292 076103 36 inexperienced cooks receivers FAS +293 076302 36 hoarder millivolt adjourn +294 076303 36 scotch counterpoise straggled A +295 076304 36 passport Gothicism druggists +296 076305 36 strategic feminine thanking FAS +297 076306 36 gated metaphysically ostrich +298 076307 36 flog sanding hopelessness FAS +299 076402 36 Pipestone contributorily Eurydice +300 076501 36 Dar receivers excitation W +301 076502 36 Corcoran adjourn presumes FAS +302 076701 36 flyers straggled imaginable FAS +303 078001 36 competitions druggists concoct W +304 078002 36 suppliers thanking peering W +305 078003 36 skips ostrich Phelps FAS +306 078004 36 institutes hopelessness ferociousness FAS +307 078005 36 troop Eurydice sentences +308 078006 36 connective excitation unlocks +309 078007 36 denies presumes engrossing W +310 078008 36 polka imaginable Ruth +311 078101 36 observations concoct tying +312 078103 36 askers peering exclaimers +313 078104 36 homeless Phelps synergy +314 078105 36 Anna ferociousness Huey W +315 082101 36 subdirectories sentences merging +316 083401 36 decaying unlocks judges A +317 084001 36 outwitting engrossing Shylock W +318 084002 36 Harpy Ruth Miltonism +319 086001 36 crazed tying hen W +320 086102 36 suffocate exclaimers honeybee FAS +321 086201 36 provers synergy towers +322 088001 36 technically Huey dilutes W +323 088002 36 Franklinizations merging numerals FAS +324 088003 36 considered judges democracy FAS +325 088004 36 tinnily Shylock Ibero- +326 088101 36 uninterruptedly Miltonism invalids +327 088102 36 whistled hen behavior +328 088103 36 automate honeybee accruing +329 088104 36 gutting towers relics A +330 088105 36 surreptitious dilutes rackets +331 088106 36 Choctaw numerals Fischbein W +332 088201 36 cooks democracy phony W +333 088203 36 millivolt Ibero- cross FAS +334 088204 36 counterpoise invalids cleanup +335 088302 37 Gothicism behavior conspirator +336 088303 37 feminine accruing label FAS +337 088305 37 metaphysically relics university +338 088402 37 sanding rackets cleansed FAS +339 088501 36 contributorily Fischbein ballgown +340 088502 36 receivers phony starlet +341 088503 36 adjourn cross aqueous +342 098001 58 straggled cleanup portrayal A +343 098002 58 druggists conspirator despising W +344 098003 58 thanking label distort W +345 098004 58 ostrich university palmed +346 098005 58 hopelessness cleansed faced +347 098006 58 Eurydice ballgown silverware +348 141903 29 excitation starlet assessor +349 098008 58 presumes aqueous spiders +350 098009 58 imaginable portrayal artificially +351 098010 58 concoct despising reminiscence +352 098011 58 peering distort Mexican +353 098012 58 Phelps palmed obnoxious +354 098013 58 ferociousness faced fragile +355 098014 58 sentences silverware apprehensible +356 098015 58 unlocks assessor births +357 098016 58 engrossing spiders garages +358 098017 58 Ruth artificially panty +359 098018 58 tying reminiscence anteater +360 098019 58 exclaimers Mexican displacement A +361 098020 58 synergy obnoxious drovers A +362 098021 58 Huey fragile patenting A +363 098022 58 merging apprehensible far A +364 098023 58 judges births shrieks +365 098024 58 Shylock garages aligning W +366 098025 37 Miltonism panty pragmatism +367 106001 36 hen anteater fevers W +368 108001 36 honeybee displacement reexamines A +369 108002 36 towers drovers occupancies +370 108003 36 dilutes patenting sweats FAS +371 108004 36 numerals far modulators +372 108005 36 democracy shrieks demand W +373 108007 36 Ibero- aligning Madeira +374 108008 36 invalids pragmatism Viennese W +375 108009 36 behavior fevers chillier W +376 108010 36 accruing reexamines wildcats FAS +377 108011 36 relics occupancies gentle +378 108012 36 rackets sweats Angles W +379 108101 36 Fischbein modulators accuracies +380 108102 36 phony demand toggle +381 108103 36 cross Madeira Mendelssohn W +382 108111 50 cleanup Viennese behaviorally +383 108105 36 conspirator chillier Rochford +384 108106 36 label wildcats mirror W +385 108107 36 university gentle Modula +386 108108 50 cleansed Angles clobbering +387 108109 36 ballgown accuracies chronography +388 108110 36 starlet toggle Eskimoizeds +389 108201 36 aqueous Mendelssohn British W +390 108202 36 portrayal behaviorally pitfalls +391 108203 36 despising Rochford verify W +392 108204 36 distort mirror scatter FAS +393 108205 36 palmed Modula Aztecan +394 108301 36 faced clobbering acuity W +395 108302 36 silverware chronography sinking W +396 112101 36 assessor Eskimoizeds beasts FAS +397 112102 36 spiders British Witt W +398 113701 36 artificially pitfalls physicists FAS +399 116001 36 reminiscence verify folksong A +400 116201 36 Mexican scatter strokes FAS +401 116301 36 obnoxious Aztecan crowder +402 116302 36 fragile acuity merry +403 116601 36 apprehensible sinking cadenced +404 116602 36 births beasts alimony A +405 116603 36 garages Witt principled A +406 116701 36 panty physicists golfing +407 116702 36 anteater folksong undiscovered +408 118001 36 displacement strokes irritates +409 118002 36 drovers crowder patriots A +410 118003 36 patenting merry rooms FAS +411 118004 36 far cadenced towering W +412 118005 36 shrieks alimony displease +413 118006 36 aligning principled photosensitive +414 118007 36 pragmatism golfing inking +415 118008 36 fevers undiscovered gainers +416 118101 36 reexamines irritates leaning A +417 118102 36 occupancies patriots hydrant A +418 118103 36 sweats rooms preserve +419 118202 36 modulators towering blinded A +420 118203 36 demand displease interactions A +421 118204 36 Madeira photosensitive Barry +422 118302 36 Viennese inking whiteness A +423 118304 36 chillier gainers pastimes W +424 118305 36 wildcats leaning Edenization +425 118306 36 gentle hydrant Muscat +426 118307 36 Angles preserve assassinated +427 123101 36 accuracies blinded labeled +428 123102 36 toggle interactions glacial A +429 123301 36 Mendelssohn Barry implied W +430 126001 36 behaviorally whiteness bibliographies W +431 126002 36 Rochford pastimes Buchanan +432 126003 36 mirror Edenization forgivably FAS +433 126101 36 Modula Muscat innuendo A +434 126301 36 clobbering assassinated den FAS +435 126302 36 chronography labeled submarines W +436 126402 36 Eskimoizeds glacial mouthful A +437 126601 36 British implied expiring +438 126602 36 pitfalls bibliographies unfulfilled FAS +439 126702 36 verify Buchanan precession +440 128001 36 scatter forgivably nullified +441 128002 36 Aztecan innuendo affects +442 128003 36 acuity den Cynthia +443 128004 36 sinking submarines Chablis A +444 128005 36 beasts mouthful betterments FAS +445 128007 36 Witt expiring advertising +446 128008 36 physicists unfulfilled rubies A +447 128009 36 folksong precession southwest FAS +448 128010 36 strokes nullified superstitious A +449 128011 36 crowder affects tabernacle W +450 128012 36 merry Cynthia silk A +451 128013 36 cadenced Chablis handsomest A +452 128014 36 alimony betterments Persian A +453 128015 36 principled advertising analog W +454 128016 36 golfing rubies complex W +455 128017 36 undiscovered southwest Taoist +456 128018 36 irritates superstitious suspend +457 128019 36 patriots tabernacle relegated +458 128020 36 rooms silk awesome W +459 128021 36 towering handsomest Bruxelles +460 128022 36 displease Persian imprecisely A +461 128023 36 photosensitive analog televise +462 128101 36 inking complex braking +463 128102 36 gainers Taoist true FAS +464 128103 36 leaning suspend disappointing FAS +465 128104 36 hydrant relegated navally W +466 128106 36 preserve awesome circus +467 128107 36 blinded Bruxelles beetles +468 128108 36 interactions imprecisely trumps +469 128202 36 Barry televise fourscore W +470 128203 36 whiteness braking Blackfoots +471 128301 36 pastimes true Grady +472 128302 36 Edenization disappointing quiets FAS +473 128303 36 Muscat navally floundered FAS +474 128304 36 assassinated circus profundity W +475 128305 36 labeled beetles Garrisonian W +476 128307 36 glacial trumps Strauss +477 128401 36 implied fourscore cemented FAS +478 128502 36 bibliographies Blackfoots contrition A +479 128503 36 Buchanan Grady mutations +480 128504 36 forgivably quiets exhibits W +481 128505 36 innuendo floundered tits +482 128601 36 den profundity mate A +483 128603 36 submarines Garrisonian arches +484 128604 36 mouthful Strauss Moll +485 128702 36 expiring cemented ropers +486 128703 36 unfulfilled contrition bombast +487 128704 36 precession mutations difficultly A +488 138001 36 nullified exhibits adsorption +489 138002 36 affects tits definiteness FAS +490 138003 36 Cynthia mate cultivation A +491 138004 36 Chablis arches heals A +492 138005 36 betterments Moll Heusen W +493 138006 36 advertising ropers target FAS +494 138007 36 rubies bombast cited A +495 138008 36 southwest difficultly congresswoman W +496 138009 36 superstitious adsorption Katherine +497 138102 36 tabernacle definiteness titter A +498 138103 36 silk cultivation aspire A +499 138104 36 handsomest heals Mardis +500 138105 36 Persian Heusen Nadia W +501 138201 36 analog target estimating FAS +502 138302 36 complex cited stuck A +503 138303 36 Taoist congresswoman fifteenth A +504 138304 36 suspend Katherine Colombo +505 138401 29 relegated titter survey A +506 140102 29 awesome aspire staffing +507 140103 29 Bruxelles Mardis obtain +508 140104 29 imprecisely Nadia loaded +509 140105 29 televise estimating slaughtered +510 140201 29 braking stuck lights A +511 140701 29 true fifteenth circumference +512 141501 29 disappointing Colombo dull A +513 141502 29 navally survey weekly A +514 141901 29 circus staffing wetness +515 141902 29 beetles obtain visualized +516 142101 29 trumps loaded Tannenbaum +517 142102 29 fourscore slaughtered moribund +518 142103 29 Blackfoots lights demultiplex +519 142701 29 Grady circumference lockings +520 143001 29 quiets dull thugs FAS +521 143501 29 floundered weekly unnerves +522 143502 29 profundity wetness abut +523 148001 29 Garrisonian visualized Chippewa A +524 148002 29 Strauss Tannenbaum stratifications A +525 148003 29 cemented moribund signaled +526 148004 29 contrition demultiplex Italianizes A +527 148005 29 mutations lockings algorithmic A +528 148006 29 exhibits thugs paranoid FAS +529 148007 29 tits unnerves camping A +530 148009 29 mate abut signifying A +531 148010 29 arches Chippewa Patrice W +532 148011 29 Moll stratifications search A +533 148012 29 ropers signaled Angeles A +534 148013 29 bombast Italianizes semblance +535 148023 36 difficultly algorithmic taxed +536 148015 29 adsorption paranoid Beatrice +537 148016 29 definiteness camping retrace +538 148017 29 cultivation signifying lockout +539 148018 29 heals Patrice grammatic +540 148019 29 Heusen search helmsman +541 148020 29 target Angeles uniform W +542 148021 29 cited semblance hamming +543 148022 29 congresswoman taxed disobedience +544 148101 29 Katherine Beatrice captivated A +545 148102 29 titter retrace transferals A +546 148201 29 aspire lockout cartographer A +547 148401 29 Mardis grammatic aims FAS +548 148402 29 Nadia helmsman Pakistani +549 148501 29 estimating uniform burglarized FAS +550 148502 29 stuck hamming saucepans A +551 148503 29 fifteenth disobedience lacerating A +552 148504 29 Colombo captivated corny +553 148601 29 survey transferals megabytes FAS +554 148602 29 staffing cartographer chancellor +555 150701 29 obtain aims bulk A +556 152101 29 loaded Pakistani commits A +557 152102 29 slaughtered burglarized meson W +558 155202 36 lights saucepans deputies +559 155203 29 circumference lacerating northeaster A +560 155204 29 dull corny dipole +561 155205 29 weekly megabytes machining 0 +562 156001 29 wetness chancellor therefore +563 156002 29 visualized bulk Telefunken +564 156102 29 Tannenbaum commits salvaging +565 156301 29 moribund meson Corinthianizes A +566 156302 29 demultiplex deputies restlessly A +567 156303 29 lockings northeaster bromides +568 156304 29 thugs dipole generalized A +569 156305 29 unnerves machining mishaps +570 156306 29 abut therefore quelling +571 156501 29 Chippewa Telefunken spiritual A +572 158001 29 stratifications salvaging beguiles FAS +573 158002 29 signaled Corinthianizes Trobriand FAS +574 158101 29 Italianizes restlessly fleeing A +575 158102 29 algorithmic bromides Armour A +576 158103 29 paranoid generalized chin A +577 158201 29 camping mishaps provers A +578 158202 29 signifying quelling aeronautic A +579 158203 29 Patrice spiritual voltage W +580 158204 29 search beguiles sash +581 158301 29 Angeles Trobriand anaerobic A +582 158302 29 semblance fleeing simultaneous A +583 158303 29 taxed Armour accumulating A +584 158304 29 Beatrice chin Medusan A +585 158305 29 retrace provers shouted A +586 158306 29 lockout aeronautic freakish +587 158501 29 grammatic voltage index FAS +588 160301 29 helmsman sash commercially +589 166101 50 uniform anaerobic mistiness A +590 166102 50 hamming simultaneous endpoint +591 168001 29 disobedience accumulating straight A +592 168002 29 captivated Medusan flurried +593 168003 29 transferals shouted denotative A +594 168101 29 cartographer freakish coming FAS +595 168102 29 aims index commencements FAS +596 168103 29 Pakistani commercially gentleman +597 168104 29 burglarized mistiness gifted +598 168202 29 saucepans endpoint Shanghais +599 168301 29 lacerating straight sportswriting A +600 168502 29 corny flurried sloping A +601 168503 29 megabytes denotative navies +602 168601 29 chancellor coming leaflet A +603 173001 40 bulk commencements shooter +604 173701 40 commits gentleman Joplin FAS +605 173702 40 meson gifted babies +606 176001 40 deputies Shanghais subdivision FAS +607 176101 40 northeaster sportswriting burstiness W +608 176201 40 dipole sloping belted FAS +609 176401 40 machining navies assails FAS +610 176501 40 therefore leaflet admiring W +611 176601 40 Telefunken shooter swaying 0 +612 176602 40 salvaging Joplin Goldstine FAS +613 176603 40 Corinthianizes babies fitting +614 178001 40 restlessly subdivision Norwalk W +615 178002 40 bromides burstiness weakening W +616 178003 40 generalized belted analogy FAS +617 178004 40 mishaps assails deludes +618 178005 40 quelling admiring cokes +619 178006 40 spiritual swaying Clayton +620 178007 40 beguiles Goldstine exhausts +621 178008 40 Trobriand fitting causality +622 178101 40 fleeing Norwalk sating FAS +623 178102 40 Armour weakening icon +624 178103 40 chin analogy throttles +625 178201 40 provers deludes communicants FAS +626 178202 40 aeronautic cokes dehydrate FAS +627 178301 40 voltage Clayton priceless FAS +628 178302 40 sash exhausts publicly +629 178401 40 anaerobic causality incidentals FAS +630 178402 40 simultaneous sating commonplace +631 178403 40 accumulating icon mumbles +632 178404 40 Medusan throttles furthermore W +633 178501 40 shouted communicants cautioned W +634 186002 37 freakish dehydrate parametrized A +635 186102 37 index priceless registration A +636 186201 40 commercially publicly sadly FAS +637 186202 40 mistiness incidentals positioning +638 186203 40 endpoint commonplace babysitting +639 186302 37 straight mumbles eternal A +640 188007 37 flurried furthermore hoarder +641 188008 37 denotative cautioned congregates +642 188009 37 coming parametrized rains +643 188010 37 commencements registration workers W +644 188011 37 gentleman sadly sags A +645 188012 37 gifted positioning unplug W +646 188013 37 Shanghais babysitting garage A +647 188014 37 sportswriting eternal boulder A +648 188015 37 sloping hoarder hollowly A +649 188016 37 navies congregates specifics +650 188017 37 leaflet rains Teresa +651 188102 37 shooter workers Winsett +652 188103 37 Joplin sags convenient A +653 188202 37 babies unplug buckboards FAS +654 188301 40 subdivision garage amenities +655 188302 40 burstiness boulder resplendent FAS +656 188303 40 belted hollowly priding FAS +657 188401 37 assails specifics configurations +658 188402 37 admiring Teresa untidiness A +659 188503 37 swaying Winsett Brice W +660 188504 37 Goldstine convenient sews FAS +661 188505 37 fitting buckboards participated +662 190701 37 Norwalk amenities Simon FAS +663 190703 50 weakening resplendent certificates +664 191701 37 analogy priding Fitzpatrick +665 191702 37 deludes configurations Evanston A +666 191703 37 cokes untidiness misted +667 196001 37 Clayton Brice textures A +668 196002 37 exhausts sews save +669 196003 37 causality participated count +670 196101 37 sating Simon rightful A +671 196103 37 icon certificates chaperone +672 196104 37 throttles Fitzpatrick Lizzy A +673 196201 37 communicants Evanston clenched A +674 196202 37 dehydrate misted effortlessly +675 196203 37 priceless textures accessed +676 198001 37 publicly save beaters A +677 198003 37 incidentals count Hornblower FAS +678 198004 37 commonplace rightful vests A +679 198005 37 mumbles chaperone indulgences FAS +680 198006 37 furthermore Lizzy infallibly A +681 198007 37 cautioned clenched unwilling FAS +682 198008 37 parametrized effortlessly excrete FAS +683 198009 37 registration accessed spools A +684 198010 37 sadly beaters crunches FAS +685 198011 37 positioning Hornblower overestimating FAS +686 198012 37 babysitting vests ineffective +687 198013 37 eternal indulgences humiliation A +688 198014 37 hoarder infallibly sophomore +689 198015 37 congregates unwilling star +690 198017 37 rains excrete rifles +691 198018 37 workers spools dialysis +692 198019 37 sags crunches arriving +693 198020 37 unplug overestimating indulge +694 198021 37 garage ineffective clockers +695 198022 37 boulder humiliation languages +696 198023 50 hollowly sophomore Antarctica A +697 198024 37 specifics star percentage +698 198101 37 Teresa rifles ceiling A +699 198103 37 Winsett dialysis specification +700 198105 37 convenient arriving regimented A +701 198106 37 buckboards indulge ciphers +702 198201 37 amenities clockers pictures A +703 198204 37 resplendent languages serpents A +704 198301 53 priding Antarctica allot A +705 198302 53 configurations percentage realized A +706 198303 53 untidiness ceiling mayoral A +707 198304 53 Brice specification opaquely A +708 198401 37 sews regimented hostess FAS +709 198402 37 participated ciphers fiftieth +710 198403 37 Simon pictures incorrectly +711 202101 37 certificates serpents decomposition FAS +712 202301 37 Fitzpatrick allot stranglings +713 202302 37 Evanston realized mixture FAS +714 202303 37 misted mayoral electroencephalography FAS +715 202304 37 textures opaquely similarities FAS +716 202305 37 save hostess charges W +717 202601 37 count fiftieth freest FAS +718 202602 37 rightful incorrectly Greenberg FAS +719 202605 37 chaperone decomposition tinting +720 202606 37 Lizzy stranglings expelled W +721 202607 37 clenched mixture warm +722 202901 37 effortlessly electroencephalography smoothed +723 202902 37 accessed similarities deductions FAS +724 202903 37 beaters charges Romano W +725 202904 37 Hornblower freest bitterroot +726 202907 37 vests Greenberg corset +727 202908 37 indulgences tinting securing +728 203101 37 infallibly expelled environing FAS +729 203103 37 unwilling warm cute +730 203104 37 excrete smoothed Crays +731 203105 37 spools deductions heiress FAS +732 203401 37 crunches Romano inform FAS +733 203402 37 overestimating bitterroot avenge +734 203404 37 ineffective corset universals +735 203901 37 humiliation securing Kinsey W +736 203902 37 sophomore environing ravines FAS +737 203903 37 star cute bestseller +738 203906 37 rifles Crays equilibrium +739 203907 37 dialysis heiress extents 0 +740 203908 37 arriving inform relatively +741 203909 37 indulge avenge pressure FAS +742 206101 37 clockers universals critiques FAS +743 206201 37 languages Kinsey befouled +744 206202 37 Antarctica ravines rightfully FAS +745 206203 37 percentage bestseller mechanizing FAS +746 206206 37 ceiling equilibrium Latinizes +747 206207 37 specification extents timesharing +748 206208 37 regimented relatively Aden +749 208001 37 ciphers pressure embassies +750 208002 37 pictures critiques males FAS +751 208003 37 serpents befouled shapelessly FAS +752 208004 37 allot rightfully genres FAS +753 208008 37 realized mechanizing mastering +754 208009 37 mayoral Latinizes Newtonian +755 208010 37 opaquely timesharing finishers FAS +756 208011 37 hostess Aden abates +757 208101 37 fiftieth embassies teem +758 208102 37 incorrectly males kiting FAS +759 208103 37 decomposition shapelessly stodgy FAS +760 208104 37 stranglings genres scalps FAS +761 208105 37 mixture mastering feed FAS +762 208110 37 electroencephalography Newtonian guitars +763 208111 37 similarities finishers airships +764 208112 37 charges abates store +765 208113 37 freest teem denounces +766 208201 37 Greenberg kiting Pyle FAS +767 208203 37 tinting stodgy Saxony +768 208301 37 expelled scalps serializations FAS +769 208302 37 warm feed Peruvian FAS +770 208305 37 smoothed guitars taxonomically FAS +771 208401 37 deductions airships kingdom A +772 208402 37 Romano store stint A +773 208403 37 bitterroot denounces Sault A +774 208404 37 corset Pyle faithful +775 208501 37 securing Saxony Ganymede FAS +776 208502 37 environing serializations tidiness FAS +777 208503 37 cute Peruvian gainful FAS +778 208504 37 Crays taxonomically contrary FAS +779 208505 37 heiress kingdom Tipperary FAS +780 210101 37 inform stint tropics W +781 210102 37 avenge Sault theorizers +782 210103 37 universals faithful renew 0 +783 210104 37 Kinsey Ganymede already +784 210105 37 ravines tidiness terminal +785 210106 37 bestseller gainful Hegelian +786 210107 37 equilibrium contrary hypothesizer +787 210401 37 extents Tipperary warningly FAS +788 213201 37 relatively tropics journalizing FAS +789 213203 37 pressure theorizers nested +790 213204 37 critiques renew Lars +791 213205 37 befouled already saplings +792 213206 37 rightfully terminal foothill +793 213207 37 mechanizing Hegelian labeled +794 216101 37 Latinizes hypothesizer imperiously FAS +795 216103 37 timesharing warningly reporters FAS +796 218001 37 Aden journalizing furnishings FAS +797 218002 37 embassies nested precipitable FAS +798 218003 37 males Lars discounts FAS +799 218004 37 shapelessly saplings excises FAS +800 143503 50 genres foothill Stalin +801 218006 37 mastering labeled despot FAS +802 218007 37 Newtonian imperiously ripeness FAS +803 218008 37 finishers reporters Arabia +804 218009 37 abates furnishings unruly +805 218010 37 teem precipitable mournfulness +806 218011 37 kiting discounts boom FAS +807 218020 37 stodgy excises slaughter A +808 218021 50 scalps Stalin Sabine +809 218022 37 feed despot handy FAS +810 218023 37 guitars ripeness rural +811 218024 37 airships Arabia organizer +812 218101 37 store unruly shipyard FAS +813 218102 37 denounces mournfulness civics FAS +814 218103 37 Pyle boom inaccuracy FAS +815 218201 37 Saxony slaughter rules FAS +816 218202 37 serializations Sabine juveniles FAS +817 218203 37 Peruvian handy comprised W +818 218204 37 taxonomically rural investigations +819 218205 37 kingdom organizer stabilizes A +820 218301 37 stint shipyard seminaries FAS +821 218302 37 Sault civics Hunter A +822 218401 37 faithful inaccuracy sporty FAS +823 218402 37 Ganymede rules test FAS +824 218403 37 tidiness juveniles weasels +825 218404 37 gainful comprised CERN +826 218407 37 contrary investigations tempering +827 218408 37 Tipperary stabilizes afore FAS +828 218409 37 tropics seminaries Galatean +829 218410 37 theorizers Hunter techniques W +830 226001 37 renew sporty error +831 226002 37 already test veranda +832 226003 37 terminal weasels severely +833 226004 37 Hegelian CERN Cassites FAS +834 226005 37 hypothesizer tempering forthcoming +835 226006 37 warningly afore guides +836 226007 37 journalizing Galatean vanish FAS +837 226008 37 nested techniques lied A +838 226203 37 Lars error sawtooth FAS +839 226204 37 saplings veranda fated FAS +840 226205 37 foothill severely gradually +841 226206 37 labeled Cassites widens +842 226207 37 imperiously forthcoming preclude +843 226208 37 reporters guides Jobrel +844 226209 37 furnishings vanish hooker +845 226210 37 precipitable lied rainstorm +846 226211 37 discounts sawtooth disconnects +847 228001 37 excises fated cruelty +848 228004 37 Stalin gradually exponentials A +849 228005 37 despot widens affective A +850 228006 37 ripeness preclude arteries +851 228007 37 Arabia Jobrel Crosby FAS +852 228008 37 unruly hooker acquaint +853 228009 37 mournfulness rainstorm evenhandedly +854 228101 37 boom disconnects percentage +855 228108 37 slaughter cruelty disobedience +856 228109 37 Sabine exponentials humility +857 228110 37 handy affective gleaning A +858 228111 37 rural arteries petted A +859 228112 37 organizer Crosby bloater A +860 228113 37 shipyard acquaint minion A +861 228114 37 civics evenhandedly marginal A +862 228115 37 inaccuracy percentage apiary A +863 228116 37 rules disobedience measures +864 228117 37 juveniles humility precaution +865 228118 37 comprised gleaning repelled +866 228119 37 investigations petted primary FAS +867 228120 37 stabilizes bloater coverings +868 228121 37 seminaries minion Artemia A +869 228122 37 Hunter marginal navigate +870 228201 37 sporty apiary spatial +871 228206 37 test measures Gurkha +872 228207 37 weasels precaution meanwhile A +873 228208 37 CERN repelled Melinda A +874 228209 37 tempering primary Butterfield +875 228210 37 afore coverings Aldrich A +876 228211 37 Galatean Artemia previewing A +877 228212 37 techniques navigate glut A +878 228213 37 error spatial unaffected +879 228214 37 veranda Gurkha inmate +880 228301 37 severely meanwhile mineral +881 228305 37 Cassites Melinda impending A +882 228306 37 forthcoming Butterfield meditation A +883 228307 37 guides Aldrich ideas +884 228308 37 vanish previewing miniaturizes W +885 228309 37 lied glut lewdly +886 228310 37 sawtooth unaffected title +887 228311 37 fated inmate youthfulness +888 228312 37 gradually mineral creak FAS +889 228313 37 widens impending Chippewa +890 228314 37 preclude meditation clamored +891 228401 65 Jobrel ideas freezes +892 228402 65 hooker miniaturizes forgivably FAS +893 228403 65 rainstorm lewdly reduce FAS +894 228404 65 disconnects title McGovern W +895 228405 65 cruelty youthfulness Nazis W +896 228406 65 exponentials creak epistle W +897 228407 65 affective Chippewa socializes W +898 228408 65 arteries clamored conceptions +899 228409 65 Crosby freezes Kevin +900 228410 65 acquaint forgivably uncovering +901 230301 37 evenhandedly reduce chews FAS +902 230302 37 percentage McGovern appendixes FAS +903 230303 37 disobedience Nazis raining +904 018062 37 humility epistle infest +905 230501 37 gleaning socializes compartment +906 230502 37 petted conceptions minting +907 230503 37 bloater Kevin ducks +908 230504 37 minion uncovering roped A +909 230505 37 marginal chews waltz +910 230506 37 apiary appendixes Lillian +911 230507 37 measures raining repressions A +912 230508 37 precaution infest chillingly +913 230509 37 repelled compartment noncritical +914 230901 37 primary minting lithograph +915 230902 37 coverings ducks spongers +916 230903 37 Artemia roped parenthood +917 230904 37 navigate waltz posed +918 230905 37 spatial Lillian instruments +919 230906 37 Gurkha repressions filial +920 230907 37 meanwhile chillingly fixedly +921 230908 37 Melinda noncritical relives +922 230909 37 Butterfield lithograph Pandora +923 230910 37 Aldrich spongers watering A +924 230911 37 previewing parenthood ungrateful +925 230912 37 glut posed secures +926 230913 37 unaffected instruments chastisers +927 230914 37 inmate filial icon +928 231304 37 mineral fixedly reuniting A +929 231305 37 impending relives imagining A +930 231306 37 meditation Pandora abiding A +931 231307 37 ideas watering omnisciently +932 231308 37 miniaturizes ungrateful Britannic +933 231309 37 lewdly secures scholastics A +934 231310 37 title chastisers mechanics A +935 231311 37 youthfulness icon humidly A +936 231312 37 creak reuniting masterpiece +937 231313 37 Chippewa imagining however +938 231314 37 clamored abiding Mendelian +939 231315 37 freezes omnisciently jarred +940 232102 37 forgivably Britannic scolds +941 232103 37 reduce scholastics infatuate +942 232104 37 McGovern mechanics willed A +943 232105 37 Nazis humidly joyfully +944 232106 37 epistle masterpiece Microsoft +945 232107 37 socializes however fibrosities +946 232108 37 conceptions Mendelian Baltimorean +947 232601 37 Kevin jarred equestrian +948 232602 37 uncovering scolds Goodrich +949 232603 37 chews infatuate apish A +950 232605 37 appendixes willed Adlerian +5950 1232605 37 appendixes willed Adlerian +5951 1232606 37 appendixes willed Adlerian +5952 1232607 37 appendixes willed Adlerian +5953 1232608 37 appendixes willed Adlerian +5954 1232609 37 appendixes willed Adlerian +951 232606 37 raining joyfully Tropez +952 232607 37 infest Microsoft nouns +953 232608 37 compartment fibrosities distracting +954 232609 37 minting Baltimorean mutton +955 236104 37 ducks equestrian bridgeable A +956 236105 37 roped Goodrich stickers A +957 236106 37 waltz apish transcontinental A +958 236107 37 Lillian Adlerian amateurish +959 236108 37 repressions Tropez Gandhian +960 236109 37 chillingly nouns stratified +961 236110 37 noncritical distracting chamberlains +962 236111 37 lithograph mutton creditably +963 236112 37 spongers bridgeable philosophic +964 236113 37 parenthood stickers ores +965 238005 37 posed transcontinental Carleton +966 238006 37 instruments amateurish tape A +967 238007 37 filial Gandhian afloat A +968 238008 37 fixedly stratified goodness A +969 238009 37 relives chamberlains welcoming +970 238010 37 Pandora creditably Pinsky FAS +971 238011 37 watering philosophic halting +972 238012 37 ungrateful ores bibliography +973 238013 37 secures Carleton decoding +974 240401 41 chastisers tape variance A +975 240402 41 icon afloat allowed A +976 240901 41 reuniting goodness dire A +977 240902 41 imagining welcoming dub A +978 241801 41 abiding Pinsky poisoning +979 242101 41 omnisciently halting Iraqis A +980 242102 41 Britannic bibliography heaving +981 242201 41 scholastics decoding population A +982 242202 41 mechanics variance bomb A +983 242501 41 humidly allowed Majorca A +984 242502 41 masterpiece dire Gershwins +985 246201 41 however dub explorers +986 246202 41 Mendelian poisoning libretto A +987 246203 41 jarred Iraqis occurred +988 246204 41 scolds heaving Lagos +989 246205 41 infatuate population rats +990 246301 41 willed bomb bankruptcies A +991 246302 41 joyfully Majorca crying +992 248001 41 Microsoft Gershwins unexpected +993 248002 41 fibrosities explorers accessed A +994 248003 41 Baltimorean libretto colorful A +995 248004 41 equestrian occurred versatility A +996 248005 41 Goodrich Lagos cosy +997 248006 41 apish rats Darius A +998 248007 41 Adlerian bankruptcies mastering A +999 248008 41 Tropez crying Asiaticizations A +1000 248009 41 nouns unexpected offerers A +1001 248010 41 distracting accessed uncles A +1002 248011 41 mutton colorful sleepwalk +1003 248012 41 bridgeable versatility Ernestine +1004 248013 41 stickers cosy checksumming +1005 248014 41 transcontinental Darius stopped +1006 248015 41 amateurish mastering sicker +1007 248016 41 Gandhian Asiaticizations Italianization +1008 248017 41 stratified offerers alphabetic +1009 248018 41 chamberlains uncles pharmaceutic +1010 248019 41 creditably sleepwalk creator +1011 248020 41 philosophic Ernestine chess +1012 248021 41 ores checksumming charcoal +1013 248101 41 Carleton stopped Epiphany A +1014 248102 41 tape sicker bulldozes A +1015 248201 41 afloat Italianization Pygmalion A +1016 248202 41 goodness alphabetic caressing A +1017 248203 41 welcoming pharmaceutic Palestine A +1018 248204 41 Pinsky creator regimented A +1019 248205 41 halting chess scars A +1020 248206 41 bibliography charcoal realest A +1021 248207 41 decoding Epiphany diffusing A +1022 248208 41 variance bulldozes clubroom A +1023 248209 41 allowed Pygmalion Blythe A +1024 248210 41 dire caressing ahead +1025 248211 50 dub Palestine reviver +1026 250501 34 poisoning regimented retransmitting A +1027 250502 34 Iraqis scars landslide +1028 250503 34 heaving realest Eiffel +1029 250504 34 population diffusing absentee +1030 250505 34 bomb clubroom aye +1031 250601 34 Majorca Blythe forked A +1032 250602 34 Gershwins ahead Peruvianizes +1033 250603 34 explorers reviver clerked +1034 250604 34 libretto retransmitting tutor +1035 250605 34 occurred landslide boulevard +1036 251001 34 Lagos Eiffel shuttered +1037 251002 34 rats absentee quotes A +1038 251003 34 bankruptcies aye Caltech +1039 251004 34 crying forked Mossberg +1040 251005 34 unexpected Peruvianizes kept +1041 251301 34 accessed clerked roundly +1042 251302 34 colorful tutor features A +1043 251303 34 versatility boulevard imaginable A +1044 251304 34 cosy shuttered controller +1045 251305 34 Darius quotes racial +1046 251401 34 mastering Caltech uprisings A +1047 251402 34 Asiaticizations Mossberg narrowed A +1048 251403 34 offerers kept cannot A +1049 251404 34 uncles roundly vest +1050 251405 34 sleepwalk features famine +1051 251406 34 Ernestine imaginable sugars +1052 251801 34 checksumming controller exterminated A +1053 251802 34 stopped racial belays +1054 252101 34 sicker uprisings Hodges A +1055 252102 34 Italianization narrowed translatable +1056 252301 34 alphabetic cannot duality A +1057 252302 34 pharmaceutic vest recording A +1058 252303 34 creator famine rouses A +1059 252304 34 chess sugars poison +1060 252305 34 charcoal exterminated attitude +1061 252306 34 Epiphany belays dusted +1062 252307 34 bulldozes Hodges encompasses +1063 252308 34 Pygmalion translatable presentation +1064 252309 34 caressing duality Kantian +1065 256001 34 Palestine recording imprecision A +1066 256002 34 regimented rouses saving +1067 256003 34 scars poison maternal +1068 256004 34 realest attitude hewed +1069 256005 34 diffusing dusted kerosene +1070 258001 34 clubroom encompasses Cubans +1071 258002 34 Blythe presentation photographers +1072 258003 34 ahead Kantian nymph A +1073 258004 34 reviver imprecision bedlam A +1074 258005 34 retransmitting saving north A +1075 258006 34 landslide maternal Schoenberg A +1076 258007 34 Eiffel hewed botany A +1077 258008 34 absentee kerosene curs +1078 258009 34 aye Cubans solidification +1079 258010 34 forked photographers inheritresses +1080 258011 34 Peruvianizes nymph stiller +1081 258101 68 clerked bedlam t1 A +1082 258102 68 tutor north suite A +1083 258103 34 boulevard Schoenberg ransomer +1084 258104 68 shuttered botany Willy +1085 258105 68 quotes curs Rena A +1086 258106 68 Caltech solidification Seattle A +1087 258107 68 Mossberg inheritresses relaxes A +1088 258108 68 kept stiller exclaim +1089 258109 68 roundly t1 implicated A +1090 258110 68 features suite distinguish +1091 258111 68 imaginable ransomer assayed +1092 258112 68 controller Willy homeowner +1093 258113 68 racial Rena and +1094 258201 34 uprisings Seattle stealth +1095 258202 34 narrowed relaxes coinciding A +1096 258203 34 cannot exclaim founder A +1097 258204 34 vest implicated environing +1098 258205 34 famine distinguish jewelry +1099 258301 34 sugars assayed lemons A +1100 258401 34 exterminated homeowner brokenness A +1101 258402 34 belays and bedpost A +1102 258403 34 Hodges stealth assurers A +1103 258404 34 translatable coinciding annoyers +1104 258405 34 duality founder affixed +1105 258406 34 recording environing warbling +1106 258407 34 rouses jewelry seriously +1107 228123 37 poison lemons boasted +1108 250606 34 attitude brokenness Chantilly +1109 208405 37 dusted bedpost Iranizes +1110 212101 37 encompasses assurers violinist +1111 218206 37 presentation annoyers extramarital +1112 150401 37 Kantian affixed spates +1113 248212 41 imprecision warbling cloakroom +1114 128026 00 saving seriously gazer +1115 128024 00 maternal boasted hand +1116 128027 00 hewed Chantilly tucked +1117 128025 00 kerosene Iranizes gems +1118 128109 00 Cubans violinist clinker +1119 128705 00 photographers extramarital refiner +1120 126303 00 nymph spates callus +1121 128308 00 bedlam cloakroom leopards +1122 128204 00 north gazer comfortingly +1123 128205 00 Schoenberg hand generically +1124 128206 00 botany tucked getters +1125 128207 00 curs gems sexually +1126 118205 00 solidification clinker spear +1127 116801 00 inheritresses refiner serums +1128 116803 00 stiller callus Italianization +1129 116804 00 t1 leopards attendants +1130 116802 00 suite comfortingly spies +1131 128605 00 ransomer generically Anthony +1132 118308 00 Willy getters planar +1133 113702 00 Rena sexually cupped +1134 113703 00 Seattle spear cleanser +1135 112103 00 relaxes serums commuters +1136 118009 00 exclaim Italianization honeysuckle +5136 1118009 00 exclaim Italianization honeysuckle +1137 138011 00 implicated attendants orphanage +1138 138010 00 distinguish spies skies +1139 138012 00 assayed Anthony crushers +1140 068304 00 homeowner planar Puritan +1141 078009 00 and cupped squeezer +1142 108013 00 stealth cleanser bruises +1143 084004 00 coinciding commuters bonfire +1144 083402 00 founder honeysuckle Colombo +1145 084003 00 environing orphanage nondecreasing +1146 088504 00 jewelry skies innocents +1147 088005 00 lemons crushers masked +1148 088007 00 brokenness Puritan file +1149 088006 00 bedpost squeezer brush +1150 148025 00 assurers bruises mutilate +1151 148024 00 annoyers bonfire mommy +1152 138305 00 affixed Colombo bulkheads +1153 138306 00 warbling nondecreasing undeclared +1154 152701 00 seriously innocents displacements +1155 148505 00 boasted masked nieces +1156 158003 00 Chantilly file coeducation +1157 156201 00 Iranizes brush brassy +1158 156202 00 violinist mutilate authenticator +1159 158307 00 extramarital mommy Washoe +1160 158402 00 spates bulkheads penny +1161 158401 00 cloakroom undeclared Flagler +1162 068013 00 gazer displacements stoned +1163 068012 00 hand nieces cranes +1164 068203 00 tucked coeducation masterful +1165 088205 00 gems brassy biracial +1166 068704 00 clinker authenticator steamships +1167 068604 00 refiner Washoe windmills +1168 158502 00 callus penny exploit +1169 123103 00 leopards Flagler riverfront +1170 148026 00 comfortingly stoned sisterly +1171 123302 00 generically cranes sharpshoot +1172 076503 00 getters masterful mittens +1173 126304 00 sexually biracial interdependency +1174 068306 00 spear steamships policy +1175 143504 00 serums windmills unleashing +1176 160201 00 Italianization exploit pretenders +1177 148028 00 attendants riverfront overstatements +1178 148027 00 spies sisterly birthed +1179 143505 00 Anthony sharpshoot opportunism +1180 108014 00 planar mittens showroom +1181 076104 00 cupped interdependency compromisingly +1182 078106 00 cleanser policy Medicare +1183 126102 00 commuters unleashing corresponds +1184 128029 00 honeysuckle pretenders hardware +1185 128028 00 orphanage overstatements implant +1186 018410 00 skies birthed Alicia +1187 128110 00 crushers opportunism requesting +1188 148506 00 Puritan showroom produced +1189 123303 00 squeezer compromisingly criticizes +1190 123304 00 bruises Medicare backer +1191 068504 00 bonfire corresponds positively +1192 068305 00 Colombo hardware colicky +1193 000000 00 nondecreasing implant thrillingly +1 000001 00 Omaha teethe neat +2 011401 37 breaking dreaded Steinberg W +3 011402 37 Romans scholastics jarring +4 011403 37 intercepted audiology tinily INSERT INTO t2 VALUES (1,000001,00,'Omaha','teethe','neat','') , (2,011401,37,'breaking','dreaded','Steinberg','W') , (3,011402,37,'Romans','scholastics','jarring','') , (4,011403,37,'intercepted','audiology','tinily',''); SELECT * FROM t2; auto fld1 companynr fld3 fld4 fld5 fld6 diff --git a/mysql-test/r/bdb.result b/mysql-test/r/bdb.result index 3570f74065e..337cc570298 100644 --- a/mysql-test/r/bdb.result +++ b/mysql-test/r/bdb.result @@ -1578,6 +1578,217 @@ f 10 g 10 h 10 i 10 +alter table t1 modify v varchar(300), drop key v, drop key v_2, add key v (v); +Warnings: +Warning 1071 Specified key was too long; max key length is 255 bytes +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `v` varchar(300) default NULL, + `c` char(10) default NULL, + `t` text, + KEY `c` (`c`), + KEY `t` (`t`(10)), + KEY `v` (`v`(255)) +) ENGINE=BerkeleyDB DEFAULT CHARSET=latin1 +select count(*) from t1 where v='a'; +count(*) +10 +select count(*) from t1 where v='a '; +count(*) +10 +select count(*) from t1 where v between 'a' and 'a '; +count(*) +10 +select count(*) from t1 where v between 'a' and 'a ' and v between 'a ' and 'b\n'; +count(*) +10 +select count(*) from t1 where v like 'a%'; +count(*) +11 +select count(*) from t1 where v like 'a %'; +count(*) +9 +explain select count(*) from t1 where v='a '; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 258 const 10 Using where +explain select count(*) from t1 where v like 'a%'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 258 NULL 11 Using where +explain select count(*) from t1 where v between 'a' and 'a '; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 258 NULL 10 Using where +explain select count(*) from t1 where v between 'a' and 'a ' and v between 'a ' and 'b\n'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 258 NULL 10 Using where +explain select * from t1 where v='a'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 258 const 10 Using where +select v,count(*) from t1 group by v limit 10; +v count(*) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +select v,count(t) from t1 group by v limit 10; +v count(t) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +select sql_big_result v,count(t) from t1 group by v limit 10; +v count(t) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +alter table t1 drop key v, add key v (v(30)); +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `v` varchar(300) default NULL, + `c` char(10) default NULL, + `t` text, + KEY `c` (`c`), + KEY `t` (`t`(10)), + KEY `v` (`v`(30)) +) ENGINE=BerkeleyDB DEFAULT CHARSET=latin1 +select count(*) from t1 where v='a'; +count(*) +10 +select count(*) from t1 where v='a '; +count(*) +10 +select count(*) from t1 where v between 'a' and 'a '; +count(*) +10 +select count(*) from t1 where v between 'a' and 'a ' and v between 'a ' and 'b\n'; +count(*) +10 +select count(*) from t1 where v like 'a%'; +count(*) +11 +select count(*) from t1 where v like 'a %'; +count(*) +9 +explain select count(*) from t1 where v='a '; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 33 const 10 Using where +explain select count(*) from t1 where v like 'a%'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 33 NULL 11 Using where +explain select count(*) from t1 where v between 'a' and 'a '; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 33 NULL 10 Using where +explain select count(*) from t1 where v between 'a' and 'a ' and v between 'a ' and 'b\n'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 33 NULL 10 Using where +explain select * from t1 where v='a'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 33 const 10 Using where +select v,count(*) from t1 group by v limit 10; +v count(*) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +select v,count(t) from t1 group by v limit 10; +v count(t) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +select sql_big_result v,count(t) from t1 group by v limit 10; +v count(t) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +alter table t1 modify v varchar(600), drop key v, add key v (v); +Warnings: +Warning 1071 Specified key was too long; max key length is 255 bytes +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `v` varchar(600) default NULL, + `c` char(10) default NULL, + `t` text, + KEY `c` (`c`), + KEY `t` (`t`(10)), + KEY `v` (`v`(255)) +) ENGINE=BerkeleyDB DEFAULT CHARSET=latin1 +select v,count(*) from t1 group by v limit 10; +v count(*) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +select v,count(t) from t1 group by v limit 10; +v count(t) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +select sql_big_result v,count(t) from t1 group by v limit 10; +v count(t) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 drop table t1; create table t1 (a char(10), unique (a)); insert into t1 values ('a '); diff --git a/mysql-test/r/federated.result b/mysql-test/r/federated.result new file mode 100644 index 00000000000..8cadb120c2e --- /dev/null +++ b/mysql-test/r/federated.result @@ -0,0 +1,566 @@ +stop slave; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; +reset master; +reset slave; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; +start slave; +stop slave; +drop database if exists federated; +create database federated; +CREATE TABLE federated.t1 ( `id` int(20) NOT NULL auto_increment, `name` varchar(32) NOT NULL default '', `other` int(20) NOT NULL default '0', created datetime default '2004-04-04 04:04:04', PRIMARY KEY (`id`), KEY `name` (`name`), KEY `other_key` (`other`)) DEFAULT CHARSET=latin1; +drop database if exists federated; +create database federated; +CREATE TABLE federated.t1 ( `id` int(20) NOT NULL auto_increment, `name` varchar(32) NOT NULL default '', `other` int(20) NOT NULL default '0', created datetime default '2004-04-04 04:04:04', PRIMARY KEY (`id`), KEY `name` (`name`), KEY `other_key` (`other`)) ENGINE="FEDERATED" DEFAULT CHARSET=latin1 COMMENT='mysql://root@127.0.0.1:9308/federated/t1'; +insert into federated.t1 (name, other) values ('First Name', 11111); +insert into federated.t1 (name, other) values ('Second Name', 22222); +insert into federated.t1 (name, other) values ('Third Name', 33333); +insert into federated.t1 (name, other) values ('Fourth Name', 44444); +insert into federated.t1 (name, other) values ('Fifth Name', 55555); +insert into federated.t1 (name, other) values ('Sixth Name', 66666); +insert into federated.t1 (name, other) values ('Seventh Name', 77777); +insert into federated.t1 (name, other) values ('Eigth Name', 88888); +insert into federated.t1 (name, other) values ('Ninth Name', 99999); +insert into federated.t1 (name, other) values ('Tenth Name', 101010); +select * from federated.t1; +id name other created +1 First Name 11111 2004-04-04 04:04:04 +2 Second Name 22222 2004-04-04 04:04:04 +3 Third Name 33333 2004-04-04 04:04:04 +4 Fourth Name 44444 2004-04-04 04:04:04 +5 Fifth Name 55555 2004-04-04 04:04:04 +6 Sixth Name 66666 2004-04-04 04:04:04 +7 Seventh Name 77777 2004-04-04 04:04:04 +8 Eigth Name 88888 2004-04-04 04:04:04 +9 Ninth Name 99999 2004-04-04 04:04:04 +10 Tenth Name 101010 2004-04-04 04:04:04 +select * from federated.t1 where id = 5; +id name other created +5 Fifth Name 55555 2004-04-04 04:04:04 +select * from federated.t1 where name = 'Sixth Name'; +id name other created +6 Sixth Name 66666 2004-04-04 04:04:04 +select * from federated.t1 where id = 6 and name = 'Sixth Name'; +id name other created +6 Sixth Name 66666 2004-04-04 04:04:04 +select * from federated.t1 where other = 44444; +id name other created +4 Fourth Name 44444 2004-04-04 04:04:04 +select * from federated.t1 where name like '%th%'; +id name other created +3 Third Name 33333 2004-04-04 04:04:04 +4 Fourth Name 44444 2004-04-04 04:04:04 +5 Fifth Name 55555 2004-04-04 04:04:04 +6 Sixth Name 66666 2004-04-04 04:04:04 +7 Seventh Name 77777 2004-04-04 04:04:04 +8 Eigth Name 88888 2004-04-04 04:04:04 +9 Ninth Name 99999 2004-04-04 04:04:04 +10 Tenth Name 101010 2004-04-04 04:04:04 +update federated.t1 set name = '3rd name' where id = 3; +select * from federated.t1 where name = '3rd name'; +id name other created +3 3rd name 33333 2004-04-04 04:04:04 +update federated.t1 set name = 'Third name' where name = '3rd name'; +select * from federated.t1 where name = 'Third name'; +id name other created +3 Third name 33333 2004-04-04 04:04:04 +select * from federated.t1 order by id DESC; +id name other created +10 Tenth Name 101010 2004-04-04 04:04:04 +9 Ninth Name 99999 2004-04-04 04:04:04 +8 Eigth Name 88888 2004-04-04 04:04:04 +7 Seventh Name 77777 2004-04-04 04:04:04 +6 Sixth Name 66666 2004-04-04 04:04:04 +5 Fifth Name 55555 2004-04-04 04:04:04 +4 Fourth Name 44444 2004-04-04 04:04:04 +3 Third name 33333 2004-04-04 04:04:04 +2 Second Name 22222 2004-04-04 04:04:04 +1 First Name 11111 2004-04-04 04:04:04 +select * from federated.t1 order by name; +id name other created +8 Eigth Name 88888 2004-04-04 04:04:04 +5 Fifth Name 55555 2004-04-04 04:04:04 +1 First Name 11111 2004-04-04 04:04:04 +4 Fourth Name 44444 2004-04-04 04:04:04 +9 Ninth Name 99999 2004-04-04 04:04:04 +2 Second Name 22222 2004-04-04 04:04:04 +7 Seventh Name 77777 2004-04-04 04:04:04 +6 Sixth Name 66666 2004-04-04 04:04:04 +10 Tenth Name 101010 2004-04-04 04:04:04 +3 Third name 33333 2004-04-04 04:04:04 +select * from federated.t1 order by name DESC; +id name other created +3 Third name 33333 2004-04-04 04:04:04 +10 Tenth Name 101010 2004-04-04 04:04:04 +6 Sixth Name 66666 2004-04-04 04:04:04 +7 Seventh Name 77777 2004-04-04 04:04:04 +2 Second Name 22222 2004-04-04 04:04:04 +9 Ninth Name 99999 2004-04-04 04:04:04 +4 Fourth Name 44444 2004-04-04 04:04:04 +1 First Name 11111 2004-04-04 04:04:04 +5 Fifth Name 55555 2004-04-04 04:04:04 +8 Eigth Name 88888 2004-04-04 04:04:04 +select * from federated.t1 order by name ASC; +id name other created +8 Eigth Name 88888 2004-04-04 04:04:04 +5 Fifth Name 55555 2004-04-04 04:04:04 +1 First Name 11111 2004-04-04 04:04:04 +4 Fourth Name 44444 2004-04-04 04:04:04 +9 Ninth Name 99999 2004-04-04 04:04:04 +2 Second Name 22222 2004-04-04 04:04:04 +7 Seventh Name 77777 2004-04-04 04:04:04 +6 Sixth Name 66666 2004-04-04 04:04:04 +10 Tenth Name 101010 2004-04-04 04:04:04 +3 Third name 33333 2004-04-04 04:04:04 +select * from federated.t1 group by other; +id name other created +1 First Name 11111 2004-04-04 04:04:04 +2 Second Name 22222 2004-04-04 04:04:04 +3 Third name 33333 2004-04-04 04:04:04 +4 Fourth Name 44444 2004-04-04 04:04:04 +5 Fifth Name 55555 2004-04-04 04:04:04 +6 Sixth Name 66666 2004-04-04 04:04:04 +7 Seventh Name 77777 2004-04-04 04:04:04 +8 Eigth Name 88888 2004-04-04 04:04:04 +9 Ninth Name 99999 2004-04-04 04:04:04 +10 Tenth Name 101010 2004-04-04 04:04:04 +delete from federated.t1 where id = 5; +select * from federated.t1 where id = 5; +id name other created +delete from federated.t1; +select * from federated.t1 where id = 5; +id name other created +drop table if exists federated.t1; +CREATE TABLE federated.t1 ( `id` int(20) NOT NULL auto_increment, `name` varchar(32), `other` varchar(20), PRIMARY KEY (`id`) ) ENGINE="FEDERATED" DEFAULT CHARSET=latin1 COMMENT='mysql://root@127.0.0.1:9308/federated/t1'; +drop table if exists federated.t1; +CREATE TABLE federated.t1 ( `id` int(20) NOT NULL auto_increment, `name` varchar(32), `other` varchar(20), PRIMARY KEY (`id`) ); +insert into federated.t1 (name, other) values ('First Name', 11111); +insert into federated.t1 (name, other) values ('Second Name', NULL); +insert into federated.t1 (name, other) values ('Third Name', 33333); +insert into federated.t1 (name, other) values (NULL, NULL); +insert into federated.t1 (name, other) values ('Fifth Name', 55555); +insert into federated.t1 (name, other) values ('Sixth Name', 66666); +insert into federated.t1 (name) values ('Seventh Name'); +insert into federated.t1 (name, other) values ('Eigth Name', 88888); +insert into federated.t1 (name, other) values ('Ninth Name', 99999); +insert into federated.t1 (other) values ('fee fie foe fum'); +select * from federated.t1 where other IS NULL; +id name other +2 Second Name NULL +4 NULL NULL +7 Seventh Name NULL +select * from federated.t1 where name IS NULL; +id name other +4 NULL NULL +10 NULL fee fie foe fum +select * from federated.t1 where name IS NULL and other IS NULL; +id name other +4 NULL NULL +select * from federated.t1 where name IS NULL or other IS NULL; +id name other +2 Second Name NULL +4 NULL NULL +7 Seventh Name NULL +10 NULL fee fie foe fum +update federated.t1 set name = 'Fourth Name', other = 'four four four' where name IS NULL and other IS NULL; +update federated.t1 set other = 'two two two two' where name = 'Secend Name'; +update federated.t1 set other = 'seven seven' where name like 'Sec%'; +update federated.t1 set other = 'seven seven' where name = 'Seventh Name'; +update federated.t1 set name = 'Tenth Name' where other like 'fee fie%'; +select * from federated.t1 where name IS NULL or other IS NULL ; +id name other +select * from federated.t1; +id name other +1 First Name 11111 +2 Second Name seven seven +3 Third Name 33333 +4 Fourth Name four four four +5 Fifth Name 55555 +6 Sixth Name 66666 +7 Seventh Name seven seven +8 Eigth Name 88888 +9 Ninth Name 99999 +10 Tenth Name fee fie foe fum +drop table if exists federated.t1; +CREATE TABLE federated.t1 (id int, name varchar(32), floatval float, other int) DEFAULT CHARSET=latin1; +drop table if exists federated.t1; +CREATE TABLE federated.t1 (id int, name varchar(32), floatval float, other int) ENGINE="FEDERATED" DEFAULT CHARSET=latin1 COMMENT='mysql://root@127.0.0.1:9308/federated/t1'; +insert into federated.t1 values (NULL, NULL, NULL, NULL); +insert into federated.t1 values (); +insert into federated.t1 (id) values (1); +insert into federated.t1 (name, floatval, other) values ('foo', 33.33333332, NULL); +insert into federated.t1 (name, floatval, other) values (0, 00.3333, NULL); +select * from federated.t1; +id name floatval other +NULL NULL NULL NULL +NULL NULL NULL NULL +1 NULL NULL NULL +NULL foo 33.3333 NULL +NULL 0 0.3333 NULL +select count(*) from federated.t1 where id IS NULL and name IS NULL and floatval IS NULL and other IS NULL; +count(*) +2 +drop table if exists federated.t1; +CREATE TABLE federated.t1 ( blurb_id int NOT NULL DEFAULT 0, blurb text default '', primary key(blurb_id)) DEFAULT CHARSET=latin1; +drop table if exists federated.t1; +CREATE TABLE federated.t1 ( blurb_id int NOT NULL DEFAULT 0, blurb text default '', primary key(blurb_id)) ENGINE="FEDERATED" DEFAULT CHARSET=latin1 COMMENT='mysql://root@127.0.0.1:9308/federated/t1'; +INSERT INTO federated.t1 VALUES (1, " MySQL supports a number of column types in several categories: numeric types, date and time types, and string (character) types. This chapter first gives an overview of these column types, and then provides a more detailed description of the properties of the types in each category, and a summary of the column type storage requirements. The overview is intentionally brief. The more detailed descriptions should be consulted for additional information about particular column types, such as the allowable formats in which you can specify values."); +INSERT INTO federated.t1 VALUES (2, "All arithmetic is done using signed BIGINT or DOUBLE values, so you should not use unsigned big integers larger than 9223372036854775807 (63 bits) except with bit functions! If you do that, some of the last digits in the result may be wrong because of rounding errors when converting a BIGINT value to a DOUBLE."); +INSERT INTO federated.t1 VALUES (3, " A floating-point number. p represents the precision. It can be from 0 to 24 for a single-precision floating-point number and from 25 to 53 for a double-precision floating-point number. These types are like the FLOAT and DOUBLE types described immediately following. FLOAT(p) has the same range as the corresponding FLOAT and DOUBLE types, but the display size and number of decimals are undefined. "); +INSERT INTO federated.t1 VALUES(4, "Die Übersetzung einer so umfangreichen technischen Dokumentation wie des MySQL-Referenzhandbuchs ist schon eine besondere Herausforderung. Zumindest für jemanden, der seine Zielsprache ernst nimmt:"); +select * from federated.t1; +blurb_id blurb +1 MySQL supports a number of column types in several categories: numeric types, date and time types, and string (character) types. This chapter first gives an overview of these column types, and then provides a more detailed description of the properties of the types in each category, and a summary of the column type storage requirements. The overview is intentionally brief. The more detailed descriptions should be consulted for additional information about particular column types, such as the allowable formats in which you can specify values. +2 All arithmetic is done using signed BIGINT or DOUBLE values, so you should not use unsigned big integers larger than 9223372036854775807 (63 bits) except with bit functions! If you do that, some of the last digits in the result may be wrong because of rounding errors when converting a BIGINT value to a DOUBLE. +3 A floating-point number. p represents the precision. It can be from 0 to 24 for a single-precision floating-point number and from 25 to 53 for a double-precision floating-point number. These types are like the FLOAT and DOUBLE types described immediately following. FLOAT(p) has the same range as the corresponding FLOAT and DOUBLE types, but the display size and number of decimals are undefined. +4 Die Übersetzung einer so umfangreichen technischen Dokumentation wie des MySQL-Referenzhandbuchs ist schon eine besondere Herausforderung. Zumindest für jemanden, der seine Zielsprache ernst nimmt: +drop table if exists federated.t1; +create table federated.t1 (a int not null, b int not null, c int not null, primary key (a),key(b)); +drop table if exists federated.t1; +create table federated.t1 (a int not null, b int not null, c int not null, primary key (a),key(b)) ENGINE="FEDERATED" DEFAULT CHARSET=latin1 COMMENT='mysql://root@127.0.0.1:9308/federated/t1'; +insert into federated.t1 values (3,3,3),(1,1,1),(2,2,2),(4,4,4); +explain select * from federated.t1 order by a; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10000 Using filesort +explain select * from federated.t1 order by b; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10000 Using filesort +explain select * from federated.t1 order by c; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10000 Using filesort +explain select a from federated.t1 order by a; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10000 Using filesort +explain select b from federated.t1 order by b; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10000 Using filesort +explain select a,b from federated.t1 order by b; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10000 Using filesort +explain select a,b from federated.t1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10000 +explain select a,b,c from federated.t1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10000 +drop table if exists federated.t1; +create table federated.t1 (i1 int, i2 int, i3 int, i4 int, i5 int, i6 int, i7 int, i8 +int, i9 int, i10 int, i11 int, i12 int, i13 int, i14 int, i15 int, i16 int, i17 +int, i18 int, i19 int, i20 int, i21 int, i22 int, i23 int, i24 int, i25 int, +i26 int, i27 int, i28 int, i29 int, i30 int, i31 int, i32 int, i33 int, i34 +int, i35 int, i36 int, i37 int, i38 int, i39 int, i40 int, i41 int, i42 int, +i43 int, i44 int, i45 int, i46 int, i47 int, i48 int, i49 int, i50 int, i51 +int, i52 int, i53 int, i54 int, i55 int, i56 int, i57 int, i58 int, i59 int, +i60 int, i61 int, i62 int, i63 int, i64 int, i65 int, i66 int, i67 int, i68 +int, i69 int, i70 int, i71 int, i72 int, i73 int, i74 int, i75 int, i76 int, +i77 int, i78 int, i79 int, i80 int, i81 int, i82 int, i83 int, i84 int, i85 +int, i86 int, i87 int, i88 int, i89 int, i90 int, i91 int, i92 int, i93 int, +i94 int, i95 int, i96 int, i97 int, i98 int, i99 int, i100 int, i101 int, i102 +int, i103 int, i104 int, i105 int, i106 int, i107 int, i108 int, i109 int, i110 +int, i111 int, i112 int, i113 int, i114 int, i115 int, i116 int, i117 int, i118 +int, i119 int, i120 int, i121 int, i122 int, i123 int, i124 int, i125 int, i126 +int, i127 int, i128 int, i129 int, i130 int, i131 int, i132 int, i133 int, i134 +int, i135 int, i136 int, i137 int, i138 int, i139 int, i140 int, i141 int, i142 +int, i143 int, i144 int, i145 int, i146 int, i147 int, i148 int, i149 int, i150 +int, i151 int, i152 int, i153 int, i154 int, i155 int, i156 int, i157 int, i158 +int, i159 int, i160 int, i161 int, i162 int, i163 int, i164 int, i165 int, i166 +int, i167 int, i168 int, i169 int, i170 int, i171 int, i172 int, i173 int, i174 +int, i175 int, i176 int, i177 int, i178 int, i179 int, i180 int, i181 int, i182 +int, i183 int, i184 int, i185 int, i186 int, i187 int, i188 int, i189 int, i190 +int, i191 int, i192 int, i193 int, i194 int, i195 int, i196 int, i197 int, i198 +int, i199 int, i200 int, i201 int, i202 int, i203 int, i204 int, i205 int, i206 +int, i207 int, i208 int, i209 int, i210 int, i211 int, i212 int, i213 int, i214 +int, i215 int, i216 int, i217 int, i218 int, i219 int, i220 int, i221 int, i222 +int, i223 int, i224 int, i225 int, i226 int, i227 int, i228 int, i229 int, i230 +int, i231 int, i232 int, i233 int, i234 int, i235 int, i236 int, i237 int, i238 +int, i239 int, i240 int, i241 int, i242 int, i243 int, i244 int, i245 int, i246 +int, i247 int, i248 int, i249 int, i250 int, i251 int, i252 int, i253 int, i254 +int, i255 int, i256 int, i257 int, i258 int, i259 int, i260 int, i261 int, i262 +int, i263 int, i264 int, i265 int, i266 int, i267 int, i268 int, i269 int, i270 +int, i271 int, i272 int, i273 int, i274 int, i275 int, i276 int, i277 int, i278 +int, i279 int, i280 int, i281 int, i282 int, i283 int, i284 int, i285 int, i286 +int, i287 int, i288 int, i289 int, i290 int, i291 int, i292 int, i293 int, i294 +int, i295 int, i296 int, i297 int, i298 int, i299 int, i300 int, i301 int, i302 +int, i303 int, i304 int, i305 int, i306 int, i307 int, i308 int, i309 int, i310 +int, i311 int, i312 int, i313 int, i314 int, i315 int, i316 int, i317 int, i318 +int, i319 int, i320 int, i321 int, i322 int, i323 int, i324 int, i325 int, i326 +int, i327 int, i328 int, i329 int, i330 int, i331 int, i332 int, i333 int, i334 +int, i335 int, i336 int, i337 int, i338 int, i339 int, i340 int, i341 int, i342 +int, i343 int, i344 int, i345 int, i346 int, i347 int, i348 int, i349 int, i350 +int, i351 int, i352 int, i353 int, i354 int, i355 int, i356 int, i357 int, i358 +int, i359 int, i360 int, i361 int, i362 int, i363 int, i364 int, i365 int, i366 +int, i367 int, i368 int, i369 int, i370 int, i371 int, i372 int, i373 int, i374 +int, i375 int, i376 int, i377 int, i378 int, i379 int, i380 int, i381 int, i382 +int, i383 int, i384 int, i385 int, i386 int, i387 int, i388 int, i389 int, i390 +int, i391 int, i392 int, i393 int, i394 int, i395 int, i396 int, i397 int, i398 +int, i399 int, i400 int, i401 int, i402 int, i403 int, i404 int, i405 int, i406 +int, i407 int, i408 int, i409 int, i410 int, i411 int, i412 int, i413 int, i414 +int, i415 int, i416 int, i417 int, i418 int, i419 int, i420 int, i421 int, i422 +int, i423 int, i424 int, i425 int, i426 int, i427 int, i428 int, i429 int, i430 +int, i431 int, i432 int, i433 int, i434 int, i435 int, i436 int, i437 int, i438 +int, i439 int, i440 int, i441 int, i442 int, i443 int, i444 int, i445 int, i446 +int, i447 int, i448 int, i449 int, i450 int, i451 int, i452 int, i453 int, i454 +int, i455 int, i456 int, i457 int, i458 int, i459 int, i460 int, i461 int, i462 +int, i463 int, i464 int, i465 int, i466 int, i467 int, i468 int, i469 int, i470 +int, i471 int, i472 int, i473 int, i474 int, i475 int, i476 int, i477 int, i478 +int, i479 int, i480 int, i481 int, i482 int, i483 int, i484 int, i485 int, i486 +int, i487 int, i488 int, i489 int, i490 int, i491 int, i492 int, i493 int, i494 +int, i495 int, i496 int, i497 int, i498 int, i499 int, i500 int, i501 int, i502 +int, i503 int, i504 int, i505 int, i506 int, i507 int, i508 int, i509 int, i510 +int, i511 int, i512 int, i513 int, i514 int, i515 int, i516 int, i517 int, i518 +int, i519 int, i520 int, i521 int, i522 int, i523 int, i524 int, i525 int, i526 +int, i527 int, i528 int, i529 int, i530 int, i531 int, i532 int, i533 int, i534 +int, i535 int, i536 int, i537 int, i538 int, i539 int, i540 int, i541 int, i542 +int, i543 int, i544 int, i545 int, i546 int, i547 int, i548 int, i549 int, i550 +int, i551 int, i552 int, i553 int, i554 int, i555 int, i556 int, i557 int, i558 +int, i559 int, i560 int, i561 int, i562 int, i563 int, i564 int, i565 int, i566 +int, i567 int, i568 int, i569 int, i570 int, i571 int, i572 int, i573 int, i574 +int, i575 int, i576 int, i577 int, i578 int, i579 int, i580 int, i581 int, i582 +int, i583 int, i584 int, i585 int, i586 int, i587 int, i588 int, i589 int, i590 +int, i591 int, i592 int, i593 int, i594 int, i595 int, i596 int, i597 int, i598 +int, i599 int, i600 int, i601 int, i602 int, i603 int, i604 int, i605 int, i606 +int, i607 int, i608 int, i609 int, i610 int, i611 int, i612 int, i613 int, i614 +int, i615 int, i616 int, i617 int, i618 int, i619 int, i620 int, i621 int, i622 +int, i623 int, i624 int, i625 int, i626 int, i627 int, i628 int, i629 int, i630 +int, i631 int, i632 int, i633 int, i634 int, i635 int, i636 int, i637 int, i638 +int, i639 int, i640 int, i641 int, i642 int, i643 int, i644 int, i645 int, i646 +int, i647 int, i648 int, i649 int, i650 int, i651 int, i652 int, i653 int, i654 +int, i655 int, i656 int, i657 int, i658 int, i659 int, i660 int, i661 int, i662 +int, i663 int, i664 int, i665 int, i666 int, i667 int, i668 int, i669 int, i670 +int, i671 int, i672 int, i673 int, i674 int, i675 int, i676 int, i677 int, i678 +int, i679 int, i680 int, i681 int, i682 int, i683 int, i684 int, i685 int, i686 +int, i687 int, i688 int, i689 int, i690 int, i691 int, i692 int, i693 int, i694 +int, i695 int, i696 int, i697 int, i698 int, i699 int, i700 int, i701 int, i702 +int, i703 int, i704 int, i705 int, i706 int, i707 int, i708 int, i709 int, i710 +int, i711 int, i712 int, i713 int, i714 int, i715 int, i716 int, i717 int, i718 +int, i719 int, i720 int, i721 int, i722 int, i723 int, i724 int, i725 int, i726 +int, i727 int, i728 int, i729 int, i730 int, i731 int, i732 int, i733 int, i734 +int, i735 int, i736 int, i737 int, i738 int, i739 int, i740 int, i741 int, i742 +int, i743 int, i744 int, i745 int, i746 int, i747 int, i748 int, i749 int, i750 +int, i751 int, i752 int, i753 int, i754 int, i755 int, i756 int, i757 int, i758 +int, i759 int, i760 int, i761 int, i762 int, i763 int, i764 int, i765 int, i766 +int, i767 int, i768 int, i769 int, i770 int, i771 int, i772 int, i773 int, i774 +int, i775 int, i776 int, i777 int, i778 int, i779 int, i780 int, i781 int, i782 +int, i783 int, i784 int, i785 int, i786 int, i787 int, i788 int, i789 int, i790 +int, i791 int, i792 int, i793 int, i794 int, i795 int, i796 int, i797 int, i798 +int, i799 int, i800 int, i801 int, i802 int, i803 int, i804 int, i805 int, i806 +int, i807 int, i808 int, i809 int, i810 int, i811 int, i812 int, i813 int, i814 +int, i815 int, i816 int, i817 int, i818 int, i819 int, i820 int, i821 int, i822 +int, i823 int, i824 int, i825 int, i826 int, i827 int, i828 int, i829 int, i830 +int, i831 int, i832 int, i833 int, i834 int, i835 int, i836 int, i837 int, i838 +int, i839 int, i840 int, i841 int, i842 int, i843 int, i844 int, i845 int, i846 +int, i847 int, i848 int, i849 int, i850 int, i851 int, i852 int, i853 int, i854 +int, i855 int, i856 int, i857 int, i858 int, i859 int, i860 int, i861 int, i862 +int, i863 int, i864 int, i865 int, i866 int, i867 int, i868 int, i869 int, i870 +int, i871 int, i872 int, i873 int, i874 int, i875 int, i876 int, i877 int, i878 +int, i879 int, i880 int, i881 int, i882 int, i883 int, i884 int, i885 int, i886 +int, i887 int, i888 int, i889 int, i890 int, i891 int, i892 int, i893 int, i894 +int, i895 int, i896 int, i897 int, i898 int, i899 int, i900 int, i901 int, i902 +int, i903 int, i904 int, i905 int, i906 int, i907 int, i908 int, i909 int, i910 +int, i911 int, i912 int, i913 int, i914 int, i915 int, i916 int, i917 int, i918 +int, i919 int, i920 int, i921 int, i922 int, i923 int, i924 int, i925 int, i926 +int, i927 int, i928 int, i929 int, i930 int, i931 int, i932 int, i933 int, i934 +int, i935 int, i936 int, i937 int, i938 int, i939 int, i940 int, i941 int, i942 +int, i943 int, i944 int, i945 int, i946 int, i947 int, i948 int, i949 int, i950 +int, i951 int, i952 int, i953 int, i954 int, i955 int, i956 int, i957 int, i958 +int, i959 int, i960 int, i961 int, i962 int, i963 int, i964 int, i965 int, i966 +int, i967 int, i968 int, i969 int, i970 int, i971 int, i972 int, i973 int, i974 +int, i975 int, i976 int, i977 int, i978 int, i979 int, i980 int, i981 int, i982 +int, i983 int, i984 int, i985 int, i986 int, i987 int, i988 int, i989 int, i990 +int, i991 int, i992 int, i993 int, i994 int, i995 int, i996 int, i997 int, i998 +int, i999 int, i1000 int, b blob) row_format=dynamic; +drop table if exists federated.t1; +create table federated.t1 (i1 int, i2 int, i3 int, i4 int, i5 int, i6 int, i7 int, i8 +int, i9 int, i10 int, i11 int, i12 int, i13 int, i14 int, i15 int, i16 int, i17 +int, i18 int, i19 int, i20 int, i21 int, i22 int, i23 int, i24 int, i25 int, +i26 int, i27 int, i28 int, i29 int, i30 int, i31 int, i32 int, i33 int, i34 +int, i35 int, i36 int, i37 int, i38 int, i39 int, i40 int, i41 int, i42 int, +i43 int, i44 int, i45 int, i46 int, i47 int, i48 int, i49 int, i50 int, i51 +int, i52 int, i53 int, i54 int, i55 int, i56 int, i57 int, i58 int, i59 int, +i60 int, i61 int, i62 int, i63 int, i64 int, i65 int, i66 int, i67 int, i68 +int, i69 int, i70 int, i71 int, i72 int, i73 int, i74 int, i75 int, i76 int, +i77 int, i78 int, i79 int, i80 int, i81 int, i82 int, i83 int, i84 int, i85 +int, i86 int, i87 int, i88 int, i89 int, i90 int, i91 int, i92 int, i93 int, +i94 int, i95 int, i96 int, i97 int, i98 int, i99 int, i100 int, i101 int, i102 +int, i103 int, i104 int, i105 int, i106 int, i107 int, i108 int, i109 int, i110 +int, i111 int, i112 int, i113 int, i114 int, i115 int, i116 int, i117 int, i118 +int, i119 int, i120 int, i121 int, i122 int, i123 int, i124 int, i125 int, i126 +int, i127 int, i128 int, i129 int, i130 int, i131 int, i132 int, i133 int, i134 +int, i135 int, i136 int, i137 int, i138 int, i139 int, i140 int, i141 int, i142 +int, i143 int, i144 int, i145 int, i146 int, i147 int, i148 int, i149 int, i150 +int, i151 int, i152 int, i153 int, i154 int, i155 int, i156 int, i157 int, i158 +int, i159 int, i160 int, i161 int, i162 int, i163 int, i164 int, i165 int, i166 +int, i167 int, i168 int, i169 int, i170 int, i171 int, i172 int, i173 int, i174 +int, i175 int, i176 int, i177 int, i178 int, i179 int, i180 int, i181 int, i182 +int, i183 int, i184 int, i185 int, i186 int, i187 int, i188 int, i189 int, i190 +int, i191 int, i192 int, i193 int, i194 int, i195 int, i196 int, i197 int, i198 +int, i199 int, i200 int, i201 int, i202 int, i203 int, i204 int, i205 int, i206 +int, i207 int, i208 int, i209 int, i210 int, i211 int, i212 int, i213 int, i214 +int, i215 int, i216 int, i217 int, i218 int, i219 int, i220 int, i221 int, i222 +int, i223 int, i224 int, i225 int, i226 int, i227 int, i228 int, i229 int, i230 +int, i231 int, i232 int, i233 int, i234 int, i235 int, i236 int, i237 int, i238 +int, i239 int, i240 int, i241 int, i242 int, i243 int, i244 int, i245 int, i246 +int, i247 int, i248 int, i249 int, i250 int, i251 int, i252 int, i253 int, i254 +int, i255 int, i256 int, i257 int, i258 int, i259 int, i260 int, i261 int, i262 +int, i263 int, i264 int, i265 int, i266 int, i267 int, i268 int, i269 int, i270 +int, i271 int, i272 int, i273 int, i274 int, i275 int, i276 int, i277 int, i278 +int, i279 int, i280 int, i281 int, i282 int, i283 int, i284 int, i285 int, i286 +int, i287 int, i288 int, i289 int, i290 int, i291 int, i292 int, i293 int, i294 +int, i295 int, i296 int, i297 int, i298 int, i299 int, i300 int, i301 int, i302 +int, i303 int, i304 int, i305 int, i306 int, i307 int, i308 int, i309 int, i310 +int, i311 int, i312 int, i313 int, i314 int, i315 int, i316 int, i317 int, i318 +int, i319 int, i320 int, i321 int, i322 int, i323 int, i324 int, i325 int, i326 +int, i327 int, i328 int, i329 int, i330 int, i331 int, i332 int, i333 int, i334 +int, i335 int, i336 int, i337 int, i338 int, i339 int, i340 int, i341 int, i342 +int, i343 int, i344 int, i345 int, i346 int, i347 int, i348 int, i349 int, i350 +int, i351 int, i352 int, i353 int, i354 int, i355 int, i356 int, i357 int, i358 +int, i359 int, i360 int, i361 int, i362 int, i363 int, i364 int, i365 int, i366 +int, i367 int, i368 int, i369 int, i370 int, i371 int, i372 int, i373 int, i374 +int, i375 int, i376 int, i377 int, i378 int, i379 int, i380 int, i381 int, i382 +int, i383 int, i384 int, i385 int, i386 int, i387 int, i388 int, i389 int, i390 +int, i391 int, i392 int, i393 int, i394 int, i395 int, i396 int, i397 int, i398 +int, i399 int, i400 int, i401 int, i402 int, i403 int, i404 int, i405 int, i406 +int, i407 int, i408 int, i409 int, i410 int, i411 int, i412 int, i413 int, i414 +int, i415 int, i416 int, i417 int, i418 int, i419 int, i420 int, i421 int, i422 +int, i423 int, i424 int, i425 int, i426 int, i427 int, i428 int, i429 int, i430 +int, i431 int, i432 int, i433 int, i434 int, i435 int, i436 int, i437 int, i438 +int, i439 int, i440 int, i441 int, i442 int, i443 int, i444 int, i445 int, i446 +int, i447 int, i448 int, i449 int, i450 int, i451 int, i452 int, i453 int, i454 +int, i455 int, i456 int, i457 int, i458 int, i459 int, i460 int, i461 int, i462 +int, i463 int, i464 int, i465 int, i466 int, i467 int, i468 int, i469 int, i470 +int, i471 int, i472 int, i473 int, i474 int, i475 int, i476 int, i477 int, i478 +int, i479 int, i480 int, i481 int, i482 int, i483 int, i484 int, i485 int, i486 +int, i487 int, i488 int, i489 int, i490 int, i491 int, i492 int, i493 int, i494 +int, i495 int, i496 int, i497 int, i498 int, i499 int, i500 int, i501 int, i502 +int, i503 int, i504 int, i505 int, i506 int, i507 int, i508 int, i509 int, i510 +int, i511 int, i512 int, i513 int, i514 int, i515 int, i516 int, i517 int, i518 +int, i519 int, i520 int, i521 int, i522 int, i523 int, i524 int, i525 int, i526 +int, i527 int, i528 int, i529 int, i530 int, i531 int, i532 int, i533 int, i534 +int, i535 int, i536 int, i537 int, i538 int, i539 int, i540 int, i541 int, i542 +int, i543 int, i544 int, i545 int, i546 int, i547 int, i548 int, i549 int, i550 +int, i551 int, i552 int, i553 int, i554 int, i555 int, i556 int, i557 int, i558 +int, i559 int, i560 int, i561 int, i562 int, i563 int, i564 int, i565 int, i566 +int, i567 int, i568 int, i569 int, i570 int, i571 int, i572 int, i573 int, i574 +int, i575 int, i576 int, i577 int, i578 int, i579 int, i580 int, i581 int, i582 +int, i583 int, i584 int, i585 int, i586 int, i587 int, i588 int, i589 int, i590 +int, i591 int, i592 int, i593 int, i594 int, i595 int, i596 int, i597 int, i598 +int, i599 int, i600 int, i601 int, i602 int, i603 int, i604 int, i605 int, i606 +int, i607 int, i608 int, i609 int, i610 int, i611 int, i612 int, i613 int, i614 +int, i615 int, i616 int, i617 int, i618 int, i619 int, i620 int, i621 int, i622 +int, i623 int, i624 int, i625 int, i626 int, i627 int, i628 int, i629 int, i630 +int, i631 int, i632 int, i633 int, i634 int, i635 int, i636 int, i637 int, i638 +int, i639 int, i640 int, i641 int, i642 int, i643 int, i644 int, i645 int, i646 +int, i647 int, i648 int, i649 int, i650 int, i651 int, i652 int, i653 int, i654 +int, i655 int, i656 int, i657 int, i658 int, i659 int, i660 int, i661 int, i662 +int, i663 int, i664 int, i665 int, i666 int, i667 int, i668 int, i669 int, i670 +int, i671 int, i672 int, i673 int, i674 int, i675 int, i676 int, i677 int, i678 +int, i679 int, i680 int, i681 int, i682 int, i683 int, i684 int, i685 int, i686 +int, i687 int, i688 int, i689 int, i690 int, i691 int, i692 int, i693 int, i694 +int, i695 int, i696 int, i697 int, i698 int, i699 int, i700 int, i701 int, i702 +int, i703 int, i704 int, i705 int, i706 int, i707 int, i708 int, i709 int, i710 +int, i711 int, i712 int, i713 int, i714 int, i715 int, i716 int, i717 int, i718 +int, i719 int, i720 int, i721 int, i722 int, i723 int, i724 int, i725 int, i726 +int, i727 int, i728 int, i729 int, i730 int, i731 int, i732 int, i733 int, i734 +int, i735 int, i736 int, i737 int, i738 int, i739 int, i740 int, i741 int, i742 +int, i743 int, i744 int, i745 int, i746 int, i747 int, i748 int, i749 int, i750 +int, i751 int, i752 int, i753 int, i754 int, i755 int, i756 int, i757 int, i758 +int, i759 int, i760 int, i761 int, i762 int, i763 int, i764 int, i765 int, i766 +int, i767 int, i768 int, i769 int, i770 int, i771 int, i772 int, i773 int, i774 +int, i775 int, i776 int, i777 int, i778 int, i779 int, i780 int, i781 int, i782 +int, i783 int, i784 int, i785 int, i786 int, i787 int, i788 int, i789 int, i790 +int, i791 int, i792 int, i793 int, i794 int, i795 int, i796 int, i797 int, i798 +int, i799 int, i800 int, i801 int, i802 int, i803 int, i804 int, i805 int, i806 +int, i807 int, i808 int, i809 int, i810 int, i811 int, i812 int, i813 int, i814 +int, i815 int, i816 int, i817 int, i818 int, i819 int, i820 int, i821 int, i822 +int, i823 int, i824 int, i825 int, i826 int, i827 int, i828 int, i829 int, i830 +int, i831 int, i832 int, i833 int, i834 int, i835 int, i836 int, i837 int, i838 +int, i839 int, i840 int, i841 int, i842 int, i843 int, i844 int, i845 int, i846 +int, i847 int, i848 int, i849 int, i850 int, i851 int, i852 int, i853 int, i854 +int, i855 int, i856 int, i857 int, i858 int, i859 int, i860 int, i861 int, i862 +int, i863 int, i864 int, i865 int, i866 int, i867 int, i868 int, i869 int, i870 +int, i871 int, i872 int, i873 int, i874 int, i875 int, i876 int, i877 int, i878 +int, i879 int, i880 int, i881 int, i882 int, i883 int, i884 int, i885 int, i886 +int, i887 int, i888 int, i889 int, i890 int, i891 int, i892 int, i893 int, i894 +int, i895 int, i896 int, i897 int, i898 int, i899 int, i900 int, i901 int, i902 +int, i903 int, i904 int, i905 int, i906 int, i907 int, i908 int, i909 int, i910 +int, i911 int, i912 int, i913 int, i914 int, i915 int, i916 int, i917 int, i918 +int, i919 int, i920 int, i921 int, i922 int, i923 int, i924 int, i925 int, i926 +int, i927 int, i928 int, i929 int, i930 int, i931 int, i932 int, i933 int, i934 +int, i935 int, i936 int, i937 int, i938 int, i939 int, i940 int, i941 int, i942 +int, i943 int, i944 int, i945 int, i946 int, i947 int, i948 int, i949 int, i950 +int, i951 int, i952 int, i953 int, i954 int, i955 int, i956 int, i957 int, i958 +int, i959 int, i960 int, i961 int, i962 int, i963 int, i964 int, i965 int, i966 +int, i967 int, i968 int, i969 int, i970 int, i971 int, i972 int, i973 int, i974 +int, i975 int, i976 int, i977 int, i978 int, i979 int, i980 int, i981 int, i982 +int, i983 int, i984 int, i985 int, i986 int, i987 int, i988 int, i989 int, i990 +int, i991 int, i992 int, i993 int, i994 int, i995 int, i996 int, i997 int, i998 +int, i999 int, i1000 int, b blob) row_format=dynamic ENGINE="FEDERATED" DEFAULT CHARSET=latin1 COMMENT='mysql://root@127.0.0.1:9308/federated/t1'; +insert into federated.t1 values (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, "PatrickG"); +update federated.t1 set b=repeat('a',256); +update federated.t1 set i1=0, i2=0, i3=0, i4=0, i5=0, i6=0, i7=0, i8=0, i9=0, i10=0; +select * from federated.t1 where i9=0 and i10=0; +i1 i2 i3 i4 i5 i6 i7 i8 i9 i10 i11 i12 i13 i14 i15 i16 i17 i18 i19 i20 i21 i22 i23 i24 i25 i26 i27 i28 i29 i30 i31 i32 i33 i34 i35 i36 i37 i38 i39 i40 i41 i42 i43 i44 i45 i46 i47 i48 i49 i50 i51 i52 i53 i54 i55 i56 i57 i58 i59 i60 i61 i62 i63 i64 i65 i66 i67 i68 i69 i70 i71 i72 i73 i74 i75 i76 i77 i78 i79 i80 i81 i82 i83 i84 i85 i86 i87 i88 i89 i90 i91 i92 i93 i94 i95 i96 i97 i98 i99 i100 i101 i102 i103 i104 i105 i106 i107 i108 i109 i110 i111 i112 i113 i114 i115 i116 i117 i118 i119 i120 i121 i122 i123 i124 i125 i126 i127 i128 i129 i130 i131 i132 i133 i134 i135 i136 i137 i138 i139 i140 i141 i142 i143 i144 i145 i146 i147 i148 i149 i150 i151 i152 i153 i154 i155 i156 i157 i158 i159 i160 i161 i162 i163 i164 i165 i166 i167 i168 i169 i170 i171 i172 i173 i174 i175 i176 i177 i178 i179 i180 i181 i182 i183 i184 i185 i186 i187 i188 i189 i190 i191 i192 i193 i194 i195 i196 i197 i198 i199 i200 i201 i202 i203 i204 i205 i206 i207 i208 i209 i210 i211 i212 i213 i214 i215 i216 i217 i218 i219 i220 i221 i222 i223 i224 i225 i226 i227 i228 i229 i230 i231 i232 i233 i234 i235 i236 i237 i238 i239 i240 i241 i242 i243 i244 i245 i246 i247 i248 i249 i250 i251 i252 i253 i254 i255 i256 i257 i258 i259 i260 i261 i262 i263 i264 i265 i266 i267 i268 i269 i270 i271 i272 i273 i274 i275 i276 i277 i278 i279 i280 i281 i282 i283 i284 i285 i286 i287 i288 i289 i290 i291 i292 i293 i294 i295 i296 i297 i298 i299 i300 i301 i302 i303 i304 i305 i306 i307 i308 i309 i310 i311 i312 i313 i314 i315 i316 i317 i318 i319 i320 i321 i322 i323 i324 i325 i326 i327 i328 i329 i330 i331 i332 i333 i334 i335 i336 i337 i338 i339 i340 i341 i342 i343 i344 i345 i346 i347 i348 i349 i350 i351 i352 i353 i354 i355 i356 i357 i358 i359 i360 i361 i362 i363 i364 i365 i366 i367 i368 i369 i370 i371 i372 i373 i374 i375 i376 i377 i378 i379 i380 i381 i382 i383 i384 i385 i386 i387 i388 i389 i390 i391 i392 i393 i394 i395 i396 i397 i398 i399 i400 i401 i402 i403 i404 i405 i406 i407 i408 i409 i410 i411 i412 i413 i414 i415 i416 i417 i418 i419 i420 i421 i422 i423 i424 i425 i426 i427 i428 i429 i430 i431 i432 i433 i434 i435 i436 i437 i438 i439 i440 i441 i442 i443 i444 i445 i446 i447 i448 i449 i450 i451 i452 i453 i454 i455 i456 i457 i458 i459 i460 i461 i462 i463 i464 i465 i466 i467 i468 i469 i470 i471 i472 i473 i474 i475 i476 i477 i478 i479 i480 i481 i482 i483 i484 i485 i486 i487 i488 i489 i490 i491 i492 i493 i494 i495 i496 i497 i498 i499 i500 i501 i502 i503 i504 i505 i506 i507 i508 i509 i510 i511 i512 i513 i514 i515 i516 i517 i518 i519 i520 i521 i522 i523 i524 i525 i526 i527 i528 i529 i530 i531 i532 i533 i534 i535 i536 i537 i538 i539 i540 i541 i542 i543 i544 i545 i546 i547 i548 i549 i550 i551 i552 i553 i554 i555 i556 i557 i558 i559 i560 i561 i562 i563 i564 i565 i566 i567 i568 i569 i570 i571 i572 i573 i574 i575 i576 i577 i578 i579 i580 i581 i582 i583 i584 i585 i586 i587 i588 i589 i590 i591 i592 i593 i594 i595 i596 i597 i598 i599 i600 i601 i602 i603 i604 i605 i606 i607 i608 i609 i610 i611 i612 i613 i614 i615 i616 i617 i618 i619 i620 i621 i622 i623 i624 i625 i626 i627 i628 i629 i630 i631 i632 i633 i634 i635 i636 i637 i638 i639 i640 i641 i642 i643 i644 i645 i646 i647 i648 i649 i650 i651 i652 i653 i654 i655 i656 i657 i658 i659 i660 i661 i662 i663 i664 i665 i666 i667 i668 i669 i670 i671 i672 i673 i674 i675 i676 i677 i678 i679 i680 i681 i682 i683 i684 i685 i686 i687 i688 i689 i690 i691 i692 i693 i694 i695 i696 i697 i698 i699 i700 i701 i702 i703 i704 i705 i706 i707 i708 i709 i710 i711 i712 i713 i714 i715 i716 i717 i718 i719 i720 i721 i722 i723 i724 i725 i726 i727 i728 i729 i730 i731 i732 i733 i734 i735 i736 i737 i738 i739 i740 i741 i742 i743 i744 i745 i746 i747 i748 i749 i750 i751 i752 i753 i754 i755 i756 i757 i758 i759 i760 i761 i762 i763 i764 i765 i766 i767 i768 i769 i770 i771 i772 i773 i774 i775 i776 i777 i778 i779 i780 i781 i782 i783 i784 i785 i786 i787 i788 i789 i790 i791 i792 i793 i794 i795 i796 i797 i798 i799 i800 i801 i802 i803 i804 i805 i806 i807 i808 i809 i810 i811 i812 i813 i814 i815 i816 i817 i818 i819 i820 i821 i822 i823 i824 i825 i826 i827 i828 i829 i830 i831 i832 i833 i834 i835 i836 i837 i838 i839 i840 i841 i842 i843 i844 i845 i846 i847 i848 i849 i850 i851 i852 i853 i854 i855 i856 i857 i858 i859 i860 i861 i862 i863 i864 i865 i866 i867 i868 i869 i870 i871 i872 i873 i874 i875 i876 i877 i878 i879 i880 i881 i882 i883 i884 i885 i886 i887 i888 i889 i890 i891 i892 i893 i894 i895 i896 i897 i898 i899 i900 i901 i902 i903 i904 i905 i906 i907 i908 i909 i910 i911 i912 i913 i914 i915 i916 i917 i918 i919 i920 i921 i922 i923 i924 i925 i926 i927 i928 i929 i930 i931 i932 i933 i934 i935 i936 i937 i938 i939 i940 i941 i942 i943 i944 i945 i946 i947 i948 i949 i950 i951 i952 i953 i954 i955 i956 i957 i958 i959 i960 i961 i962 i963 i964 i965 i966 i967 i968 i969 i970 i971 i972 i973 i974 i975 i976 i977 i978 i979 i980 i981 i982 i983 i984 i985 i986 i987 i988 i989 i990 i991 i992 i993 i994 i995 i996 i997 i998 i999 i1000 b +0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 PatrickG +update federated.t1 set i50=20; +select * from federated.t1; +i1 i2 i3 i4 i5 i6 i7 i8 i9 i10 i11 i12 i13 i14 i15 i16 i17 i18 i19 i20 i21 i22 i23 i24 i25 i26 i27 i28 i29 i30 i31 i32 i33 i34 i35 i36 i37 i38 i39 i40 i41 i42 i43 i44 i45 i46 i47 i48 i49 i50 i51 i52 i53 i54 i55 i56 i57 i58 i59 i60 i61 i62 i63 i64 i65 i66 i67 i68 i69 i70 i71 i72 i73 i74 i75 i76 i77 i78 i79 i80 i81 i82 i83 i84 i85 i86 i87 i88 i89 i90 i91 i92 i93 i94 i95 i96 i97 i98 i99 i100 i101 i102 i103 i104 i105 i106 i107 i108 i109 i110 i111 i112 i113 i114 i115 i116 i117 i118 i119 i120 i121 i122 i123 i124 i125 i126 i127 i128 i129 i130 i131 i132 i133 i134 i135 i136 i137 i138 i139 i140 i141 i142 i143 i144 i145 i146 i147 i148 i149 i150 i151 i152 i153 i154 i155 i156 i157 i158 i159 i160 i161 i162 i163 i164 i165 i166 i167 i168 i169 i170 i171 i172 i173 i174 i175 i176 i177 i178 i179 i180 i181 i182 i183 i184 i185 i186 i187 i188 i189 i190 i191 i192 i193 i194 i195 i196 i197 i198 i199 i200 i201 i202 i203 i204 i205 i206 i207 i208 i209 i210 i211 i212 i213 i214 i215 i216 i217 i218 i219 i220 i221 i222 i223 i224 i225 i226 i227 i228 i229 i230 i231 i232 i233 i234 i235 i236 i237 i238 i239 i240 i241 i242 i243 i244 i245 i246 i247 i248 i249 i250 i251 i252 i253 i254 i255 i256 i257 i258 i259 i260 i261 i262 i263 i264 i265 i266 i267 i268 i269 i270 i271 i272 i273 i274 i275 i276 i277 i278 i279 i280 i281 i282 i283 i284 i285 i286 i287 i288 i289 i290 i291 i292 i293 i294 i295 i296 i297 i298 i299 i300 i301 i302 i303 i304 i305 i306 i307 i308 i309 i310 i311 i312 i313 i314 i315 i316 i317 i318 i319 i320 i321 i322 i323 i324 i325 i326 i327 i328 i329 i330 i331 i332 i333 i334 i335 i336 i337 i338 i339 i340 i341 i342 i343 i344 i345 i346 i347 i348 i349 i350 i351 i352 i353 i354 i355 i356 i357 i358 i359 i360 i361 i362 i363 i364 i365 i366 i367 i368 i369 i370 i371 i372 i373 i374 i375 i376 i377 i378 i379 i380 i381 i382 i383 i384 i385 i386 i387 i388 i389 i390 i391 i392 i393 i394 i395 i396 i397 i398 i399 i400 i401 i402 i403 i404 i405 i406 i407 i408 i409 i410 i411 i412 i413 i414 i415 i416 i417 i418 i419 i420 i421 i422 i423 i424 i425 i426 i427 i428 i429 i430 i431 i432 i433 i434 i435 i436 i437 i438 i439 i440 i441 i442 i443 i444 i445 i446 i447 i448 i449 i450 i451 i452 i453 i454 i455 i456 i457 i458 i459 i460 i461 i462 i463 i464 i465 i466 i467 i468 i469 i470 i471 i472 i473 i474 i475 i476 i477 i478 i479 i480 i481 i482 i483 i484 i485 i486 i487 i488 i489 i490 i491 i492 i493 i494 i495 i496 i497 i498 i499 i500 i501 i502 i503 i504 i505 i506 i507 i508 i509 i510 i511 i512 i513 i514 i515 i516 i517 i518 i519 i520 i521 i522 i523 i524 i525 i526 i527 i528 i529 i530 i531 i532 i533 i534 i535 i536 i537 i538 i539 i540 i541 i542 i543 i544 i545 i546 i547 i548 i549 i550 i551 i552 i553 i554 i555 i556 i557 i558 i559 i560 i561 i562 i563 i564 i565 i566 i567 i568 i569 i570 i571 i572 i573 i574 i575 i576 i577 i578 i579 i580 i581 i582 i583 i584 i585 i586 i587 i588 i589 i590 i591 i592 i593 i594 i595 i596 i597 i598 i599 i600 i601 i602 i603 i604 i605 i606 i607 i608 i609 i610 i611 i612 i613 i614 i615 i616 i617 i618 i619 i620 i621 i622 i623 i624 i625 i626 i627 i628 i629 i630 i631 i632 i633 i634 i635 i636 i637 i638 i639 i640 i641 i642 i643 i644 i645 i646 i647 i648 i649 i650 i651 i652 i653 i654 i655 i656 i657 i658 i659 i660 i661 i662 i663 i664 i665 i666 i667 i668 i669 i670 i671 i672 i673 i674 i675 i676 i677 i678 i679 i680 i681 i682 i683 i684 i685 i686 i687 i688 i689 i690 i691 i692 i693 i694 i695 i696 i697 i698 i699 i700 i701 i702 i703 i704 i705 i706 i707 i708 i709 i710 i711 i712 i713 i714 i715 i716 i717 i718 i719 i720 i721 i722 i723 i724 i725 i726 i727 i728 i729 i730 i731 i732 i733 i734 i735 i736 i737 i738 i739 i740 i741 i742 i743 i744 i745 i746 i747 i748 i749 i750 i751 i752 i753 i754 i755 i756 i757 i758 i759 i760 i761 i762 i763 i764 i765 i766 i767 i768 i769 i770 i771 i772 i773 i774 i775 i776 i777 i778 i779 i780 i781 i782 i783 i784 i785 i786 i787 i788 i789 i790 i791 i792 i793 i794 i795 i796 i797 i798 i799 i800 i801 i802 i803 i804 i805 i806 i807 i808 i809 i810 i811 i812 i813 i814 i815 i816 i817 i818 i819 i820 i821 i822 i823 i824 i825 i826 i827 i828 i829 i830 i831 i832 i833 i834 i835 i836 i837 i838 i839 i840 i841 i842 i843 i844 i845 i846 i847 i848 i849 i850 i851 i852 i853 i854 i855 i856 i857 i858 i859 i860 i861 i862 i863 i864 i865 i866 i867 i868 i869 i870 i871 i872 i873 i874 i875 i876 i877 i878 i879 i880 i881 i882 i883 i884 i885 i886 i887 i888 i889 i890 i891 i892 i893 i894 i895 i896 i897 i898 i899 i900 i901 i902 i903 i904 i905 i906 i907 i908 i909 i910 i911 i912 i913 i914 i915 i916 i917 i918 i919 i920 i921 i922 i923 i924 i925 i926 i927 i928 i929 i930 i931 i932 i933 i934 i935 i936 i937 i938 i939 i940 i941 i942 i943 i944 i945 i946 i947 i948 i949 i950 i951 i952 i953 i954 i955 i956 i957 i958 i959 i960 i961 i962 i963 i964 i965 i966 i967 i968 i969 i970 i971 i972 i973 i974 i975 i976 i977 i978 i979 i980 i981 i982 i983 i984 i985 i986 i987 i988 i989 i990 i991 i992 i993 i994 i995 i996 i997 i998 i999 i1000 b +0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 20 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 PatrickG +delete from federated.t1 where i51=20; +select * from federated.t1; +i1 i2 i3 i4 i5 i6 i7 i8 i9 i10 i11 i12 i13 i14 i15 i16 i17 i18 i19 i20 i21 i22 i23 i24 i25 i26 i27 i28 i29 i30 i31 i32 i33 i34 i35 i36 i37 i38 i39 i40 i41 i42 i43 i44 i45 i46 i47 i48 i49 i50 i51 i52 i53 i54 i55 i56 i57 i58 i59 i60 i61 i62 i63 i64 i65 i66 i67 i68 i69 i70 i71 i72 i73 i74 i75 i76 i77 i78 i79 i80 i81 i82 i83 i84 i85 i86 i87 i88 i89 i90 i91 i92 i93 i94 i95 i96 i97 i98 i99 i100 i101 i102 i103 i104 i105 i106 i107 i108 i109 i110 i111 i112 i113 i114 i115 i116 i117 i118 i119 i120 i121 i122 i123 i124 i125 i126 i127 i128 i129 i130 i131 i132 i133 i134 i135 i136 i137 i138 i139 i140 i141 i142 i143 i144 i145 i146 i147 i148 i149 i150 i151 i152 i153 i154 i155 i156 i157 i158 i159 i160 i161 i162 i163 i164 i165 i166 i167 i168 i169 i170 i171 i172 i173 i174 i175 i176 i177 i178 i179 i180 i181 i182 i183 i184 i185 i186 i187 i188 i189 i190 i191 i192 i193 i194 i195 i196 i197 i198 i199 i200 i201 i202 i203 i204 i205 i206 i207 i208 i209 i210 i211 i212 i213 i214 i215 i216 i217 i218 i219 i220 i221 i222 i223 i224 i225 i226 i227 i228 i229 i230 i231 i232 i233 i234 i235 i236 i237 i238 i239 i240 i241 i242 i243 i244 i245 i246 i247 i248 i249 i250 i251 i252 i253 i254 i255 i256 i257 i258 i259 i260 i261 i262 i263 i264 i265 i266 i267 i268 i269 i270 i271 i272 i273 i274 i275 i276 i277 i278 i279 i280 i281 i282 i283 i284 i285 i286 i287 i288 i289 i290 i291 i292 i293 i294 i295 i296 i297 i298 i299 i300 i301 i302 i303 i304 i305 i306 i307 i308 i309 i310 i311 i312 i313 i314 i315 i316 i317 i318 i319 i320 i321 i322 i323 i324 i325 i326 i327 i328 i329 i330 i331 i332 i333 i334 i335 i336 i337 i338 i339 i340 i341 i342 i343 i344 i345 i346 i347 i348 i349 i350 i351 i352 i353 i354 i355 i356 i357 i358 i359 i360 i361 i362 i363 i364 i365 i366 i367 i368 i369 i370 i371 i372 i373 i374 i375 i376 i377 i378 i379 i380 i381 i382 i383 i384 i385 i386 i387 i388 i389 i390 i391 i392 i393 i394 i395 i396 i397 i398 i399 i400 i401 i402 i403 i404 i405 i406 i407 i408 i409 i410 i411 i412 i413 i414 i415 i416 i417 i418 i419 i420 i421 i422 i423 i424 i425 i426 i427 i428 i429 i430 i431 i432 i433 i434 i435 i436 i437 i438 i439 i440 i441 i442 i443 i444 i445 i446 i447 i448 i449 i450 i451 i452 i453 i454 i455 i456 i457 i458 i459 i460 i461 i462 i463 i464 i465 i466 i467 i468 i469 i470 i471 i472 i473 i474 i475 i476 i477 i478 i479 i480 i481 i482 i483 i484 i485 i486 i487 i488 i489 i490 i491 i492 i493 i494 i495 i496 i497 i498 i499 i500 i501 i502 i503 i504 i505 i506 i507 i508 i509 i510 i511 i512 i513 i514 i515 i516 i517 i518 i519 i520 i521 i522 i523 i524 i525 i526 i527 i528 i529 i530 i531 i532 i533 i534 i535 i536 i537 i538 i539 i540 i541 i542 i543 i544 i545 i546 i547 i548 i549 i550 i551 i552 i553 i554 i555 i556 i557 i558 i559 i560 i561 i562 i563 i564 i565 i566 i567 i568 i569 i570 i571 i572 i573 i574 i575 i576 i577 i578 i579 i580 i581 i582 i583 i584 i585 i586 i587 i588 i589 i590 i591 i592 i593 i594 i595 i596 i597 i598 i599 i600 i601 i602 i603 i604 i605 i606 i607 i608 i609 i610 i611 i612 i613 i614 i615 i616 i617 i618 i619 i620 i621 i622 i623 i624 i625 i626 i627 i628 i629 i630 i631 i632 i633 i634 i635 i636 i637 i638 i639 i640 i641 i642 i643 i644 i645 i646 i647 i648 i649 i650 i651 i652 i653 i654 i655 i656 i657 i658 i659 i660 i661 i662 i663 i664 i665 i666 i667 i668 i669 i670 i671 i672 i673 i674 i675 i676 i677 i678 i679 i680 i681 i682 i683 i684 i685 i686 i687 i688 i689 i690 i691 i692 i693 i694 i695 i696 i697 i698 i699 i700 i701 i702 i703 i704 i705 i706 i707 i708 i709 i710 i711 i712 i713 i714 i715 i716 i717 i718 i719 i720 i721 i722 i723 i724 i725 i726 i727 i728 i729 i730 i731 i732 i733 i734 i735 i736 i737 i738 i739 i740 i741 i742 i743 i744 i745 i746 i747 i748 i749 i750 i751 i752 i753 i754 i755 i756 i757 i758 i759 i760 i761 i762 i763 i764 i765 i766 i767 i768 i769 i770 i771 i772 i773 i774 i775 i776 i777 i778 i779 i780 i781 i782 i783 i784 i785 i786 i787 i788 i789 i790 i791 i792 i793 i794 i795 i796 i797 i798 i799 i800 i801 i802 i803 i804 i805 i806 i807 i808 i809 i810 i811 i812 i813 i814 i815 i816 i817 i818 i819 i820 i821 i822 i823 i824 i825 i826 i827 i828 i829 i830 i831 i832 i833 i834 i835 i836 i837 i838 i839 i840 i841 i842 i843 i844 i845 i846 i847 i848 i849 i850 i851 i852 i853 i854 i855 i856 i857 i858 i859 i860 i861 i862 i863 i864 i865 i866 i867 i868 i869 i870 i871 i872 i873 i874 i875 i876 i877 i878 i879 i880 i881 i882 i883 i884 i885 i886 i887 i888 i889 i890 i891 i892 i893 i894 i895 i896 i897 i898 i899 i900 i901 i902 i903 i904 i905 i906 i907 i908 i909 i910 i911 i912 i913 i914 i915 i916 i917 i918 i919 i920 i921 i922 i923 i924 i925 i926 i927 i928 i929 i930 i931 i932 i933 i934 i935 i936 i937 i938 i939 i940 i941 i942 i943 i944 i945 i946 i947 i948 i949 i950 i951 i952 i953 i954 i955 i956 i957 i958 i959 i960 i961 i962 i963 i964 i965 i966 i967 i968 i969 i970 i971 i972 i973 i974 i975 i976 i977 i978 i979 i980 i981 i982 i983 i984 i985 i986 i987 i988 i989 i990 i991 i992 i993 i994 i995 i996 i997 i998 i999 i1000 b +0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 20 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 PatrickG +delete from federated.t1 where i50=20; +select * from federated.t1; +i1 i2 i3 i4 i5 i6 i7 i8 i9 i10 i11 i12 i13 i14 i15 i16 i17 i18 i19 i20 i21 i22 i23 i24 i25 i26 i27 i28 i29 i30 i31 i32 i33 i34 i35 i36 i37 i38 i39 i40 i41 i42 i43 i44 i45 i46 i47 i48 i49 i50 i51 i52 i53 i54 i55 i56 i57 i58 i59 i60 i61 i62 i63 i64 i65 i66 i67 i68 i69 i70 i71 i72 i73 i74 i75 i76 i77 i78 i79 i80 i81 i82 i83 i84 i85 i86 i87 i88 i89 i90 i91 i92 i93 i94 i95 i96 i97 i98 i99 i100 i101 i102 i103 i104 i105 i106 i107 i108 i109 i110 i111 i112 i113 i114 i115 i116 i117 i118 i119 i120 i121 i122 i123 i124 i125 i126 i127 i128 i129 i130 i131 i132 i133 i134 i135 i136 i137 i138 i139 i140 i141 i142 i143 i144 i145 i146 i147 i148 i149 i150 i151 i152 i153 i154 i155 i156 i157 i158 i159 i160 i161 i162 i163 i164 i165 i166 i167 i168 i169 i170 i171 i172 i173 i174 i175 i176 i177 i178 i179 i180 i181 i182 i183 i184 i185 i186 i187 i188 i189 i190 i191 i192 i193 i194 i195 i196 i197 i198 i199 i200 i201 i202 i203 i204 i205 i206 i207 i208 i209 i210 i211 i212 i213 i214 i215 i216 i217 i218 i219 i220 i221 i222 i223 i224 i225 i226 i227 i228 i229 i230 i231 i232 i233 i234 i235 i236 i237 i238 i239 i240 i241 i242 i243 i244 i245 i246 i247 i248 i249 i250 i251 i252 i253 i254 i255 i256 i257 i258 i259 i260 i261 i262 i263 i264 i265 i266 i267 i268 i269 i270 i271 i272 i273 i274 i275 i276 i277 i278 i279 i280 i281 i282 i283 i284 i285 i286 i287 i288 i289 i290 i291 i292 i293 i294 i295 i296 i297 i298 i299 i300 i301 i302 i303 i304 i305 i306 i307 i308 i309 i310 i311 i312 i313 i314 i315 i316 i317 i318 i319 i320 i321 i322 i323 i324 i325 i326 i327 i328 i329 i330 i331 i332 i333 i334 i335 i336 i337 i338 i339 i340 i341 i342 i343 i344 i345 i346 i347 i348 i349 i350 i351 i352 i353 i354 i355 i356 i357 i358 i359 i360 i361 i362 i363 i364 i365 i366 i367 i368 i369 i370 i371 i372 i373 i374 i375 i376 i377 i378 i379 i380 i381 i382 i383 i384 i385 i386 i387 i388 i389 i390 i391 i392 i393 i394 i395 i396 i397 i398 i399 i400 i401 i402 i403 i404 i405 i406 i407 i408 i409 i410 i411 i412 i413 i414 i415 i416 i417 i418 i419 i420 i421 i422 i423 i424 i425 i426 i427 i428 i429 i430 i431 i432 i433 i434 i435 i436 i437 i438 i439 i440 i441 i442 i443 i444 i445 i446 i447 i448 i449 i450 i451 i452 i453 i454 i455 i456 i457 i458 i459 i460 i461 i462 i463 i464 i465 i466 i467 i468 i469 i470 i471 i472 i473 i474 i475 i476 i477 i478 i479 i480 i481 i482 i483 i484 i485 i486 i487 i488 i489 i490 i491 i492 i493 i494 i495 i496 i497 i498 i499 i500 i501 i502 i503 i504 i505 i506 i507 i508 i509 i510 i511 i512 i513 i514 i515 i516 i517 i518 i519 i520 i521 i522 i523 i524 i525 i526 i527 i528 i529 i530 i531 i532 i533 i534 i535 i536 i537 i538 i539 i540 i541 i542 i543 i544 i545 i546 i547 i548 i549 i550 i551 i552 i553 i554 i555 i556 i557 i558 i559 i560 i561 i562 i563 i564 i565 i566 i567 i568 i569 i570 i571 i572 i573 i574 i575 i576 i577 i578 i579 i580 i581 i582 i583 i584 i585 i586 i587 i588 i589 i590 i591 i592 i593 i594 i595 i596 i597 i598 i599 i600 i601 i602 i603 i604 i605 i606 i607 i608 i609 i610 i611 i612 i613 i614 i615 i616 i617 i618 i619 i620 i621 i622 i623 i624 i625 i626 i627 i628 i629 i630 i631 i632 i633 i634 i635 i636 i637 i638 i639 i640 i641 i642 i643 i644 i645 i646 i647 i648 i649 i650 i651 i652 i653 i654 i655 i656 i657 i658 i659 i660 i661 i662 i663 i664 i665 i666 i667 i668 i669 i670 i671 i672 i673 i674 i675 i676 i677 i678 i679 i680 i681 i682 i683 i684 i685 i686 i687 i688 i689 i690 i691 i692 i693 i694 i695 i696 i697 i698 i699 i700 i701 i702 i703 i704 i705 i706 i707 i708 i709 i710 i711 i712 i713 i714 i715 i716 i717 i718 i719 i720 i721 i722 i723 i724 i725 i726 i727 i728 i729 i730 i731 i732 i733 i734 i735 i736 i737 i738 i739 i740 i741 i742 i743 i744 i745 i746 i747 i748 i749 i750 i751 i752 i753 i754 i755 i756 i757 i758 i759 i760 i761 i762 i763 i764 i765 i766 i767 i768 i769 i770 i771 i772 i773 i774 i775 i776 i777 i778 i779 i780 i781 i782 i783 i784 i785 i786 i787 i788 i789 i790 i791 i792 i793 i794 i795 i796 i797 i798 i799 i800 i801 i802 i803 i804 i805 i806 i807 i808 i809 i810 i811 i812 i813 i814 i815 i816 i817 i818 i819 i820 i821 i822 i823 i824 i825 i826 i827 i828 i829 i830 i831 i832 i833 i834 i835 i836 i837 i838 i839 i840 i841 i842 i843 i844 i845 i846 i847 i848 i849 i850 i851 i852 i853 i854 i855 i856 i857 i858 i859 i860 i861 i862 i863 i864 i865 i866 i867 i868 i869 i870 i871 i872 i873 i874 i875 i876 i877 i878 i879 i880 i881 i882 i883 i884 i885 i886 i887 i888 i889 i890 i891 i892 i893 i894 i895 i896 i897 i898 i899 i900 i901 i902 i903 i904 i905 i906 i907 i908 i909 i910 i911 i912 i913 i914 i915 i916 i917 i918 i919 i920 i921 i922 i923 i924 i925 i926 i927 i928 i929 i930 i931 i932 i933 i934 i935 i936 i937 i938 i939 i940 i941 i942 i943 i944 i945 i946 i947 i948 i949 i950 i951 i952 i953 i954 i955 i956 i957 i958 i959 i960 i961 i962 i963 i964 i965 i966 i967 i968 i969 i970 i971 i972 i973 i974 i975 i976 i977 i978 i979 i980 i981 i982 i983 i984 i985 i986 i987 i988 i989 i990 i991 i992 i993 i994 i995 i996 i997 i998 i999 i1000 b +drop table if exists federated.t1; +create table federated.t1 (id int NOT NULL auto_increment, code char(20) NOT NULL, fileguts blob, creation_date datetime, entered_time datetime default '2004-04-04 04:04:04', primary key(id), index(code)) DEFAULT CHARSET=latin1; +drop table if exists federated.t1; +create table federated.t1 (id int NOT NULL auto_increment, code char(20) NOT NULL, fileguts blob, creation_date datetime, entered_time datetime default '2004-04-04 04:04:04', primary key(id), index(code)) ENGINE="FEDERATED" DEFAULT CHARSET=latin1 COMMENT='mysql://root@127.0.0.1:9308/federated/t1'; +insert into federated.t1 (code, fileguts, creation_date) values ('ASDFWERQWETWETAWETA', '*()w*09*$()*#)(*09*^90*d)(*s()d8g)(s*ned)(*)(s*d)(*hn(d*)(*sbn)D((#$*(#*%%&#&^$#&#&#&#&^&#*&*#$*&^*(&#(&Q*&&(*!&!(*&*(#&*(%&#*###[[', '2003-03-03 03:03:03'); +insert into federated.t1 (code, fileguts, creation_date) values ('DEUEUEUEUEUEUEUEUEU', '*()w*09*$()*#)(*09*^90*d)(*s()d8g)(s*ned)(*)(s*d)(*hn(d*)(*sbn)D((#$*(#*%%&#&^$#&#&#&#&^&#*&*#$*&^*(&#(&Q*&&(*!&!(*&*(#&*(%&#*###[[', '2004-04-04 04:04:04'); +select * from federated.t1; +id code fileguts creation_date entered_time +1 ASDFWERQWETWETAWETA *()w*09*$()*#)(*09*^90*d)(*s()d8g)(s*ned)(*)(s*d)(*hn(d*)(*sbn)D((#$*(#*%%&#&^$#&#&#&#&^&#*&*#$*&^*(&#(&Q*&&(*!&!(*&*(#&*(%&#*###[[ 2003-03-03 03:03:03 2004-04-04 04:04:04 +2 DEUEUEUEUEUEUEUEUEU *()w*09*$()*#)(*09*^90*d)(*s()d8g)(s*ned)(*)(s*d)(*hn(d*)(*sbn)D((#$*(#*%%&#&^$#&#&#&#&^&#*&*#$*&^*(&#(&Q*&&(*!&!(*&*(#&*(%&#*###[[ 2004-04-04 04:04:04 2004-04-04 04:04:04 +drop table if exists federated.t1; +drop table if exists federated.t1; +drop database if exists federated; +drop table if exists federated.t1; +drop database if exists federated; diff --git a/mysql-test/r/have_federated_db.require b/mysql-test/r/have_federated_db.require new file mode 100644 index 00000000000..dda556a2974 --- /dev/null +++ b/mysql-test/r/have_federated_db.require @@ -0,0 +1,2 @@ +Variable_name Value +have_federated_db YES diff --git a/mysql-test/r/information_schema.result b/mysql-test/r/information_schema.result index d421ac1d184..0bd3ba14636 100644 --- a/mysql-test/r/information_schema.result +++ b/mysql-test/r/information_schema.result @@ -49,7 +49,6 @@ TABLE_PRIVILEGES COLUMN_PRIVILEGES TABLE_CONSTRAINTS KEY_COLUMN_USAGE -TABLE_NAMES columns_priv db func @@ -78,7 +77,6 @@ c table_name TABLES TABLES TABLE_PRIVILEGES TABLE_PRIVILEGES TABLE_CONSTRAINTS TABLE_CONSTRAINTS -TABLE_NAMES TABLE_NAMES tables_priv tables_priv time_zone time_zone time_zone_leap_second time_zone_leap_second @@ -96,7 +94,6 @@ c table_name TABLES TABLES TABLE_PRIVILEGES TABLE_PRIVILEGES TABLE_CONSTRAINTS TABLE_CONSTRAINTS -TABLE_NAMES TABLE_NAMES tables_priv tables_priv time_zone time_zone time_zone_leap_second time_zone_leap_second @@ -474,7 +471,7 @@ character_sets CREATE TEMPORARY TABLE `character_sets` ( `DEFAULT_COLLATE_NAME` varchar(60) NOT NULL default '', `DESCRIPTION` varchar(60) NOT NULL default '', `MAXLEN` bigint(3) NOT NULL default '0' -) ENGINE=HEAP DEFAULT CHARSET=utf8 MAX_ROWS=2252 +) ENGINE=HEAP DEFAULT CHARSET=utf8 MAX_ROWS=2267 set names latin2; SHOW CREATE TABLE INFORMATION_SCHEMA.character_sets; Table Create Table @@ -483,7 +480,7 @@ character_sets CREATE TEMPORARY TABLE `character_sets` ( `DEFAULT_COLLATE_NAME` varchar(60) NOT NULL default '', `DESCRIPTION` varchar(60) NOT NULL default '', `MAXLEN` bigint(3) NOT NULL default '0' -) ENGINE=HEAP DEFAULT CHARSET=utf8 MAX_ROWS=2252 +) ENGINE=HEAP DEFAULT CHARSET=utf8 MAX_ROWS=2267 set names latin1; create table t1 select * from information_schema.CHARACTER_SETS where CHARACTER_SET_NAME like "latin1"; @@ -577,7 +574,6 @@ Tables_in_information_schema (T%) Table_type TABLES TEMPORARY TABLE_PRIVILEGES TEMPORARY TABLE_CONSTRAINTS TEMPORARY -TABLE_NAMES TEMPORARY create table t1(a int); ERROR 42S02: Unknown table 't1' in information_schema use test; @@ -589,7 +585,23 @@ Tables_in_information_schema (T%) TABLES TABLE_PRIVILEGES TABLE_CONSTRAINTS -TABLE_NAMES +select table_name from tables where table_name='user'; +table_name +user +select column_name, privileges from columns +where table_name='user' and column_name like '%o%'; +column_name privileges +Host select,insert,update,references +Password select,insert,update,references +Drop_priv select,insert,update,references +Reload_priv select,insert,update,references +Shutdown_priv select,insert,update,references +Process_priv select,insert,update,references +Show_db_priv select,insert,update,references +Lock_tables_priv select,insert,update,references +Show_view_priv select,insert,update,references +max_questions select,insert,update,references +max_connections select,insert,update,references use test; create function sub1(i int) returns int return i+1; @@ -627,3 +639,5 @@ constraint_name drop view t2; drop view t3; drop table t4; +select * from information_schema.table_names; +ERROR 42S02: Unknown table 'table_names' in information_schema diff --git a/mysql-test/r/innodb.result b/mysql-test/r/innodb.result index 9a6c69b7bea..fe99961f964 100644 --- a/mysql-test/r/innodb.result +++ b/mysql-test/r/innodb.result @@ -1421,19 +1421,19 @@ insert t2 select * from t1; insert t3 select * from t1; checksum table t1, t2, t3, t4 quick; Table Checksum -test.t1 272226711 +test.t1 2948697075 test.t2 NULL test.t3 NULL test.t4 NULL checksum table t1, t2, t3, t4; Table Checksum -test.t1 272226711 +test.t1 2948697075 test.t2 968604391 test.t3 968604391 test.t4 NULL checksum table t1, t2, t3, t4 extended; Table Checksum -test.t1 272226711 +test.t1 3092701434 test.t2 968604391 test.t3 968604391 test.t4 NULL diff --git a/mysql-test/r/myisam.result b/mysql-test/r/myisam.result index 0074b2672fc..98020f26e37 100644 --- a/mysql-test/r/myisam.result +++ b/mysql-test/r/myisam.result @@ -513,18 +513,18 @@ insert t1 values (1, "aaa", "bbb"), (NULL, "", "ccccc"), (0, NULL, ""); insert t2 select * from t1; checksum table t1, t2, t3 quick; Table Checksum -test.t1 272226711 +test.t1 2948697075 test.t2 NULL test.t3 NULL checksum table t1, t2, t3; Table Checksum -test.t1 272226711 -test.t2 272226711 +test.t1 2948697075 +test.t2 3092701434 test.t3 NULL checksum table t1, t2, t3 extended; Table Checksum -test.t1 272226711 -test.t2 272226711 +test.t1 3092701434 +test.t2 3092701434 test.t3 NULL drop table t1,t2; create table t1 (a int, key (a)); @@ -849,6 +849,213 @@ f 10 g 10 h 10 i 10 +alter table t1 modify v varchar(300), drop key v, drop key v_2, add key v (v); +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `v` varchar(300) default NULL, + `c` char(10) default NULL, + `t` text, + KEY `c` (`c`), + KEY `t` (`t`(10)), + KEY `v` (`v`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +select count(*) from t1 where v='a'; +count(*) +10 +select count(*) from t1 where v='a '; +count(*) +10 +select count(*) from t1 where v between 'a' and 'a '; +count(*) +10 +select count(*) from t1 where v between 'a' and 'a ' and v between 'a ' and 'b\n'; +count(*) +10 +select count(*) from t1 where v like 'a%'; +count(*) +11 +select count(*) from t1 where v like 'a %'; +count(*) +9 +explain select count(*) from t1 where v='a '; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 303 const 7 Using where; Using index +explain select count(*) from t1 where v like 'a%'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 303 NULL 8 Using where; Using index +explain select count(*) from t1 where v between 'a' and 'a '; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 303 NULL 7 Using where; Using index +explain select count(*) from t1 where v between 'a' and 'a ' and v between 'a ' and 'b\n'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 303 NULL 7 Using where; Using index +explain select * from t1 where v='a'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 303 const 7 Using where +select v,count(*) from t1 group by v limit 10; +v count(*) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +select v,count(t) from t1 group by v limit 10; +v count(t) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +select sql_big_result v,count(t) from t1 group by v limit 10; +v count(t) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +alter table t1 drop key v, add key v (v(30)); +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `v` varchar(300) default NULL, + `c` char(10) default NULL, + `t` text, + KEY `c` (`c`), + KEY `t` (`t`(10)), + KEY `v` (`v`(30)) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +select count(*) from t1 where v='a'; +count(*) +10 +select count(*) from t1 where v='a '; +count(*) +10 +select count(*) from t1 where v between 'a' and 'a '; +count(*) +10 +select count(*) from t1 where v between 'a' and 'a ' and v between 'a ' and 'b\n'; +count(*) +10 +select count(*) from t1 where v like 'a%'; +count(*) +11 +select count(*) from t1 where v like 'a %'; +count(*) +9 +explain select count(*) from t1 where v='a '; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 33 const 7 Using where +explain select count(*) from t1 where v like 'a%'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 33 NULL 8 Using where +explain select count(*) from t1 where v between 'a' and 'a '; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 33 NULL 7 Using where +explain select count(*) from t1 where v between 'a' and 'a ' and v between 'a ' and 'b\n'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 33 NULL 7 Using where +explain select * from t1 where v='a'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 33 const 7 Using where +select v,count(*) from t1 group by v limit 10; +v count(*) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +select v,count(t) from t1 group by v limit 10; +v count(t) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +select sql_big_result v,count(t) from t1 group by v limit 10; +v count(t) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +alter table t1 modify v varchar(600), drop key v, add key v (v); +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `v` varchar(600) default NULL, + `c` char(10) default NULL, + `t` text, + KEY `c` (`c`), + KEY `t` (`t`(10)), + KEY `v` (`v`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +select v,count(*) from t1 group by v limit 10; +v count(*) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +select v,count(t) from t1 group by v limit 10; +v count(t) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +select sql_big_result v,count(t) from t1 group by v limit 10; +v count(t) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 drop table t1; create table t1 (a char(10), unique (a)); insert into t1 values ('a '); diff --git a/mysql-test/r/ndb_index_ordered.result b/mysql-test/r/ndb_index_ordered.result index 50f904af750..1cf2a97a6b3 100644 --- a/mysql-test/r/ndb_index_ordered.result +++ b/mysql-test/r/ndb_index_ordered.result @@ -275,6 +275,115 @@ a b c 1 1 1 4 4 NULL drop table t1; +create table t1 ( +a int unsigned primary key, +b int unsigned, +c char(10), +key bc (b, c) +) engine=ndb; +insert into t1 values(1,1,'a'),(2,2,'b'),(3,3,'c'),(4,4,'d'),(5,5,'e'); +insert into t1 select a*7,10*b,'f' from t1; +insert into t1 select a*13,10*b,'g' from t1; +insert into t1 select a*17,10*b,'h' from t1; +insert into t1 select a*19,10*b,'i' from t1; +insert into t1 select a*23,10*b,'j' from t1; +insert into t1 select a*29,10*b,'k' from t1; +select b, c from t1 where b <= 10 and c <'f' order by b, c; +b c +1 a +2 b +3 c +4 d +5 e +select b, c from t1 where b <= 10 and c <'f' order by b desc, c desc; +b c +5 e +4 d +3 c +2 b +1 a +select b, c from t1 where b=4000 and c<'k' order by b, c; +b c +4000 h +4000 i +4000 i +4000 i +4000 j +4000 j +4000 j +4000 j +4000 j +4000 j +select b, c from t1 where b=4000 and c<'k' order by b desc, c desc; +b c +4000 j +4000 j +4000 j +4000 j +4000 j +4000 j +4000 i +4000 i +4000 i +4000 h +select b, c from t1 where 1000<=b and b<=100000 and c<'j' order by b, c; +b c +1000 h +1000 i +1000 i +1000 i +2000 h +2000 i +2000 i +2000 i +3000 h +3000 i +3000 i +3000 i +4000 h +4000 i +4000 i +4000 i +5000 h +5000 i +5000 i +5000 i +10000 i +20000 i +30000 i +40000 i +50000 i +select b, c from t1 where 1000<=b and b<=100000 and c<'j' order by b desc, c desc; +b c +50000 i +40000 i +30000 i +20000 i +10000 i +5000 i +5000 i +5000 i +5000 h +4000 i +4000 i +4000 i +4000 h +3000 i +3000 i +3000 i +3000 h +2000 i +2000 i +2000 i +2000 h +1000 i +1000 i +1000 i +1000 h +select min(b), max(b) from t1; +min(b) max(b) +1 5000000 +drop table t1; CREATE TABLE test1 ( SubscrID int(11) NOT NULL auto_increment, UsrID int(11) NOT NULL default '0', diff --git a/mysql-test/r/ps_1general.result b/mysql-test/r/ps_1general.result index ef399b6662d..d963fd04109 100644 --- a/mysql-test/r/ps_1general.result +++ b/mysql-test/r/ps_1general.result @@ -14,7 +14,7 @@ c1 tinyint, c2 smallint, c3 mediumint, c4 int, c5 integer, c6 bigint, c7 float, c8 double, c9 double precision, c10 real, c11 decimal(7, 4), c12 numeric(8, 4), c13 date, c14 datetime, c15 timestamp(14), c16 time, -c17 year, c18 bit, c19 bool, c20 char, +c17 year, c18 tinyint, c19 bool, c20 char, c21 char(10), c22 varchar(30), c23 tinyblob, c24 tinytext, c25 blob, c26 text, c27 mediumblob, c28 mediumtext, c29 longblob, c30 longtext, c31 enum('one', 'two', 'three'), @@ -278,7 +278,7 @@ t2 MyISAM 9 Fixed 0 0 0 64424509439 1024 0 NULL # # # latin1_swedish_ci NULL prepare stmt4 from ' show table status from test like ''t9%'' '; execute stmt4; Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment -t9 MyISAM 10 Dynamic 2 222 444 4294967295 2048 0 NULL # # # latin1_swedish_ci NULL +t9 MyISAM 10 Dynamic 2 220 440 4294967295 2048 0 NULL # # # latin1_swedish_ci NULL prepare stmt4 from ' show status like ''Threads_running'' '; execute stmt4; Variable_name Value @@ -325,6 +325,7 @@ NDB YES/NO Alias for NDBCLUSTER EXAMPLE YES/NO Example storage engine ARCHIVE YES/NO Archive storage engine CSV YES/NO CSV storage engine +FEDERATED YES/NO Federated MySQL storage engine drop table if exists t5; prepare stmt1 from ' drop table if exists t5 ' ; execute stmt1 ; diff --git a/mysql-test/r/ps_2myisam.result b/mysql-test/r/ps_2myisam.result index 347e1c39eb2..d3882b51219 100644 --- a/mysql-test/r/ps_2myisam.result +++ b/mysql-test/r/ps_2myisam.result @@ -11,7 +11,7 @@ c1 tinyint, c2 smallint, c3 mediumint, c4 int, c5 integer, c6 bigint, c7 float, c8 double, c9 double precision, c10 real, c11 decimal(7, 4), c12 numeric(8, 4), c13 date, c14 datetime, c15 timestamp(14), c16 time, -c17 year, c18 bit, c19 bool, c20 char, +c17 year, c18 tinyint, c19 bool, c20 char, c21 char(10), c22 varchar(30), c23 tinyblob, c24 tinytext, c25 blob, c26 text, c27 mediumblob, c28 mediumtext, c29 longblob, c30 longtext, c31 enum('one', 'two', 'three'), @@ -66,7 +66,7 @@ def test t9 t9 c14 c14 12 19 19 Y 128 0 63 def test t9 t9 c15 c15 7 19 19 N 1249 0 63 def test t9 t9 c16 c16 11 8 8 Y 128 0 63 def test t9 t9 c17 c17 13 4 4 Y 32864 0 63 -def test t9 t9 c18 c18 1 1 1 Y 32768 0 63 +def test t9 t9 c18 c18 1 4 1 Y 32768 0 63 def test t9 t9 c19 c19 1 1 1 Y 32768 0 63 def test t9 t9 c20 c20 254 1 1 Y 0 0 8 def test t9 t9 c21 c21 254 10 10 Y 0 0 8 diff --git a/mysql-test/r/ps_3innodb.result b/mysql-test/r/ps_3innodb.result index 57ae4a3793c..6b7cf4f5c18 100644 --- a/mysql-test/r/ps_3innodb.result +++ b/mysql-test/r/ps_3innodb.result @@ -11,7 +11,7 @@ c1 tinyint, c2 smallint, c3 mediumint, c4 int, c5 integer, c6 bigint, c7 float, c8 double, c9 double precision, c10 real, c11 decimal(7, 4), c12 numeric(8, 4), c13 date, c14 datetime, c15 timestamp(14), c16 time, -c17 year, c18 bit, c19 bool, c20 char, +c17 year, c18 tinyint, c19 bool, c20 char, c21 char(10), c22 varchar(30), c23 tinyblob, c24 tinytext, c25 blob, c26 text, c27 mediumblob, c28 mediumtext, c29 longblob, c30 longtext, c31 enum('one', 'two', 'three'), @@ -66,7 +66,7 @@ def test t9 t9 c14 c14 12 19 19 Y 128 0 63 def test t9 t9 c15 c15 7 19 19 N 1249 0 63 def test t9 t9 c16 c16 11 8 8 Y 128 0 63 def test t9 t9 c17 c17 13 4 4 Y 32864 0 63 -def test t9 t9 c18 c18 1 1 1 Y 32768 0 63 +def test t9 t9 c18 c18 1 4 1 Y 32768 0 63 def test t9 t9 c19 c19 1 1 1 Y 32768 0 63 def test t9 t9 c20 c20 254 1 1 Y 0 0 8 def test t9 t9 c21 c21 254 10 10 Y 0 0 8 diff --git a/mysql-test/r/ps_4heap.result b/mysql-test/r/ps_4heap.result index 2f533f4c843..58ea213c7b6 100644 --- a/mysql-test/r/ps_4heap.result +++ b/mysql-test/r/ps_4heap.result @@ -12,7 +12,7 @@ c1 tinyint, c2 smallint, c3 mediumint, c4 int, c5 integer, c6 bigint, c7 float, c8 double, c9 double precision, c10 real, c11 decimal(7, 4), c12 numeric(8, 4), c13 date, c14 datetime, c15 timestamp(14), c16 time, -c17 year, c18 bit, c19 bool, c20 char, +c17 year, c18 tinyint, c19 bool, c20 char, c21 char(10), c22 varchar(30), c23 varchar(100), c24 varchar(100), c25 varchar(100), c26 varchar(100), c27 varchar(100), c28 varchar(100), c29 varchar(100), c30 varchar(100), c31 enum('one', 'two', 'three'), @@ -67,7 +67,7 @@ def test t9 t9 c14 c14 12 19 19 Y 128 0 63 def test t9 t9 c15 c15 7 19 19 N 1249 0 63 def test t9 t9 c16 c16 11 8 8 Y 128 0 63 def test t9 t9 c17 c17 13 4 4 Y 32864 0 63 -def test t9 t9 c18 c18 1 1 1 Y 32768 0 63 +def test t9 t9 c18 c18 1 4 1 Y 32768 0 63 def test t9 t9 c19 c19 1 1 1 Y 32768 0 63 def test t9 t9 c20 c20 254 1 1 Y 0 0 8 def test t9 t9 c21 c21 254 10 10 Y 0 0 8 diff --git a/mysql-test/r/ps_5merge.result b/mysql-test/r/ps_5merge.result index 1eaf747e8c6..8c90cdc588a 100644 --- a/mysql-test/r/ps_5merge.result +++ b/mysql-test/r/ps_5merge.result @@ -13,7 +13,7 @@ c1 tinyint, c2 smallint, c3 mediumint, c4 int, c5 integer, c6 bigint, c7 float, c8 double, c9 double precision, c10 real, c11 decimal(7, 4), c12 numeric(8, 4), c13 date, c14 datetime, c15 timestamp(14), c16 time, -c17 year, c18 bit, c19 bool, c20 char, +c17 year, c18 tinyint, c19 bool, c20 char, c21 char(10), c22 varchar(30), c23 tinyblob, c24 tinytext, c25 blob, c26 text, c27 mediumblob, c28 mediumtext, c29 longblob, c30 longtext, c31 enum('one', 'two', 'three'), @@ -33,7 +33,7 @@ c1 tinyint, c2 smallint, c3 mediumint, c4 int, c5 integer, c6 bigint, c7 float, c8 double, c9 double precision, c10 real, c11 decimal(7, 4), c12 numeric(8, 4), c13 date, c14 datetime, c15 timestamp(14), c16 time, -c17 year, c18 bit, c19 bool, c20 char, +c17 year, c18 tinyint, c19 bool, c20 char, c21 char(10), c22 varchar(30), c23 tinyblob, c24 tinytext, c25 blob, c26 text, c27 mediumblob, c28 mediumtext, c29 longblob, c30 longtext, c31 enum('one', 'two', 'three'), @@ -53,7 +53,7 @@ c1 tinyint, c2 smallint, c3 mediumint, c4 int, c5 integer, c6 bigint, c7 float, c8 double, c9 double precision, c10 real, c11 decimal(7, 4), c12 numeric(8, 4), c13 date, c14 datetime, c15 timestamp(14), c16 time, -c17 year, c18 bit, c19 bool, c20 char, +c17 year, c18 tinyint, c19 bool, c20 char, c21 char(10), c22 varchar(30), c23 tinyblob, c24 tinytext, c25 blob, c26 text, c27 mediumblob, c28 mediumtext, c29 longblob, c30 longtext, c31 enum('one', 'two', 'three'), @@ -109,7 +109,7 @@ def test t9 t9 c14 c14 12 19 19 Y 128 0 63 def test t9 t9 c15 c15 7 19 19 N 1249 0 63 def test t9 t9 c16 c16 11 8 8 Y 128 0 63 def test t9 t9 c17 c17 13 4 4 Y 32864 0 63 -def test t9 t9 c18 c18 1 1 1 Y 32768 0 63 +def test t9 t9 c18 c18 1 4 1 Y 32768 0 63 def test t9 t9 c19 c19 1 1 1 Y 32768 0 63 def test t9 t9 c20 c20 254 1 1 Y 0 0 8 def test t9 t9 c21 c21 254 10 10 Y 0 0 8 @@ -3062,7 +3062,7 @@ c1 tinyint, c2 smallint, c3 mediumint, c4 int, c5 integer, c6 bigint, c7 float, c8 double, c9 double precision, c10 real, c11 decimal(7, 4), c12 numeric(8, 4), c13 date, c14 datetime, c15 timestamp(14), c16 time, -c17 year, c18 bit, c19 bool, c20 char, +c17 year, c18 tinyint, c19 bool, c20 char, c21 char(10), c22 varchar(30), c23 tinyblob, c24 tinytext, c25 blob, c26 text, c27 mediumblob, c28 mediumtext, c29 longblob, c30 longtext, c31 enum('one', 'two', 'three'), @@ -3118,7 +3118,7 @@ def test t9 t9 c14 c14 12 19 19 Y 128 0 63 def test t9 t9 c15 c15 7 19 19 N 1249 0 63 def test t9 t9 c16 c16 11 8 8 Y 128 0 63 def test t9 t9 c17 c17 13 4 4 Y 32864 0 63 -def test t9 t9 c18 c18 1 1 1 Y 32768 0 63 +def test t9 t9 c18 c18 1 4 1 Y 32768 0 63 def test t9 t9 c19 c19 1 1 1 Y 32768 0 63 def test t9 t9 c20 c20 254 1 1 Y 0 0 8 def test t9 t9 c21 c21 254 10 10 Y 0 0 8 diff --git a/mysql-test/r/ps_6bdb.result b/mysql-test/r/ps_6bdb.result index 07034e01869..79bb5879d24 100644 --- a/mysql-test/r/ps_6bdb.result +++ b/mysql-test/r/ps_6bdb.result @@ -11,7 +11,7 @@ c1 tinyint, c2 smallint, c3 mediumint, c4 int, c5 integer, c6 bigint, c7 float, c8 double, c9 double precision, c10 real, c11 decimal(7, 4), c12 numeric(8, 4), c13 date, c14 datetime, c15 timestamp(14), c16 time, -c17 year, c18 bit, c19 bool, c20 char, +c17 year, c18 tinyint, c19 bool, c20 char, c21 char(10), c22 varchar(30), c23 tinyblob, c24 tinytext, c25 blob, c26 text, c27 mediumblob, c28 mediumtext, c29 longblob, c30 longtext, c31 enum('one', 'two', 'three'), @@ -66,7 +66,7 @@ def test t9 t9 c14 c14 12 19 19 Y 128 0 63 def test t9 t9 c15 c15 7 19 19 N 1249 0 63 def test t9 t9 c16 c16 11 8 8 Y 128 0 63 def test t9 t9 c17 c17 13 4 4 Y 32864 0 63 -def test t9 t9 c18 c18 1 1 1 Y 32768 0 63 +def test t9 t9 c18 c18 1 4 1 Y 32768 0 63 def test t9 t9 c19 c19 1 1 1 Y 32768 0 63 def test t9 t9 c20 c20 254 1 1 Y 0 0 8 def test t9 t9 c21 c21 254 10 10 Y 0 0 8 diff --git a/mysql-test/r/ps_7ndb.result b/mysql-test/r/ps_7ndb.result index a0da0b5c7bb..bda79a7da97 100644 --- a/mysql-test/r/ps_7ndb.result +++ b/mysql-test/r/ps_7ndb.result @@ -11,7 +11,7 @@ c1 tinyint, c2 smallint, c3 mediumint, c4 int, c5 integer, c6 bigint, c7 float, c8 double, c9 double precision, c10 real, c11 decimal(7, 4), c12 numeric(8, 4), c13 date, c14 datetime, c15 timestamp(14), c16 time, -c17 year, c18 bit, c19 bool, c20 char, +c17 year, c18 tinyint, c19 bool, c20 char, c21 char(10), c22 varchar(30), c23 tinyblob, c24 tinytext, c25 blob, c26 text, c27 mediumblob, c28 mediumtext, c29 longblob, c30 longtext, c31 enum('one', 'two', 'three'), @@ -66,11 +66,11 @@ def test t9 t9 c14 c14 12 19 19 Y 128 0 63 def test t9 t9 c15 c15 7 19 19 N 1249 0 63 def test t9 t9 c16 c16 11 8 8 Y 128 0 63 def test t9 t9 c17 c17 13 4 4 Y 32864 0 63 -def test t9 t9 c18 c18 1 1 1 Y 32768 0 63 +def test t9 t9 c18 c18 1 4 1 Y 32768 0 63 def test t9 t9 c19 c19 1 1 1 Y 32768 0 63 def test t9 t9 c20 c20 254 1 1 Y 0 0 8 -def test t9 t9 c21 c21 253 10 10 Y 0 0 8 -def test t9 t9 c22 c22 253 30 30 Y 0 0 8 +def test t9 t9 c21 c21 254 10 10 Y 0 0 8 +def test t9 t9 c22 c22 254 30 30 Y 0 0 8 def test t9 t9 c23 c23 252 255 8 Y 144 0 63 def test t9 t9 c24 c24 252 255 8 Y 16 0 8 def test t9 t9 c25 c25 252 65535 4 Y 144 0 63 @@ -1756,9 +1756,9 @@ t5 CREATE TABLE `t5` ( `param02` double default NULL, `const03` double NOT NULL default '0', `param03` double default NULL, - `const04` char(3) NOT NULL default '', + `const04` varchar(3) NOT NULL default '', `param04` longtext, - `const05` binary(3) NOT NULL default '', + `const05` varbinary(3) NOT NULL default '', `param05` longblob, `const06` varchar(10) NOT NULL default '', `param06` longtext, @@ -1786,9 +1786,9 @@ def test t5 t5 const02 const02 5 3 3 N 32769 1 63 def test t5 t5 param02 param02 5 20 1 Y 32768 31 63 def test t5 t5 const03 const03 5 23 1 N 32769 31 63 def test t5 t5 param03 param03 5 20 1 Y 32768 31 63 -def test t5 t5 const04 const04 254 3 3 N 1 0 8 +def test t5 t5 const04 const04 253 3 3 N 1 0 8 def test t5 t5 param04 param04 252 16777215 3 Y 16 0 8 -def test t5 t5 const05 const05 254 3 3 N 129 0 63 +def test t5 t5 const05 const05 253 3 3 N 129 0 63 def test t5 t5 param05 param05 252 16777215 3 Y 144 0 63 def test t5 t5 const06 const06 253 10 10 N 1 0 8 def test t5 t5 param06 param06 252 16777215 10 Y 16 0 8 @@ -1892,38 +1892,38 @@ from t9 where c1= 1 ; 1 1 1 1 1 1 1 1 1 1 1.0000 1.0000 2004-02-29 2004-02-29 11:11:11 2004-02-29 11:11:11 11:11:11 2004 1 1 a 123456789a 123456789a123456789b123456789c tinyblob tinytext blob text mediumblob mediumtext longblob longtext one monday execute full_info ; Catalog Database Table Table_alias Column Column_alias Name Type Length Max length Is_null Flags Decimals Charsetnr -def @arg01 254 20 1 Y 128 31 63 -def @arg02 254 20 1 Y 128 31 63 -def @arg03 254 20 1 Y 128 31 63 -def @arg04 254 20 1 Y 128 31 63 -def @arg05 254 20 1 Y 128 31 63 -def @arg06 254 20 1 Y 128 31 63 -def @arg07 254 20 1 Y 128 31 63 -def @arg08 254 20 1 Y 128 31 63 -def @arg09 254 20 1 Y 128 31 63 -def @arg10 254 20 1 Y 128 31 63 -def @arg11 254 20 1 Y 128 31 63 -def @arg12 254 20 1 Y 128 31 63 -def @arg13 254 8192 10 Y 128 31 63 -def @arg14 254 8192 19 Y 128 31 63 -def @arg15 254 8192 19 Y 128 31 63 -def @arg16 254 8192 8 Y 128 31 63 -def @arg17 254 20 4 Y 128 31 63 -def @arg18 254 20 1 Y 128 31 63 -def @arg19 254 20 1 Y 128 31 63 -def @arg20 254 8192 1 Y 0 31 8 -def @arg21 254 8192 10 Y 0 31 8 -def @arg22 254 8192 30 Y 0 31 8 -def @arg23 254 8192 8 Y 128 31 63 -def @arg24 254 8192 8 Y 0 31 8 -def @arg25 254 8192 4 Y 128 31 63 -def @arg26 254 8192 4 Y 0 31 8 -def @arg27 254 8192 10 Y 128 31 63 -def @arg28 254 8192 10 Y 0 31 8 -def @arg29 254 8192 8 Y 128 31 63 -def @arg30 254 8192 8 Y 0 31 8 -def @arg31 254 8192 3 Y 0 31 8 -def @arg32 254 8192 6 Y 128 31 63 +def @arg01 253 20 1 Y 128 31 63 +def @arg02 253 20 1 Y 128 31 63 +def @arg03 253 20 1 Y 128 31 63 +def @arg04 253 20 1 Y 128 31 63 +def @arg05 253 20 1 Y 128 31 63 +def @arg06 253 20 1 Y 128 31 63 +def @arg07 253 20 1 Y 128 31 63 +def @arg08 253 20 1 Y 128 31 63 +def @arg09 253 20 1 Y 128 31 63 +def @arg10 253 20 1 Y 128 31 63 +def @arg11 253 20 1 Y 128 31 63 +def @arg12 253 20 1 Y 128 31 63 +def @arg13 253 8192 10 Y 128 31 63 +def @arg14 253 8192 19 Y 128 31 63 +def @arg15 253 8192 19 Y 128 31 63 +def @arg16 253 8192 8 Y 128 31 63 +def @arg17 253 20 4 Y 128 31 63 +def @arg18 253 20 1 Y 128 31 63 +def @arg19 253 20 1 Y 128 31 63 +def @arg20 253 8192 1 Y 0 31 8 +def @arg21 253 8192 10 Y 0 31 8 +def @arg22 253 8192 30 Y 0 31 8 +def @arg23 253 8192 8 Y 128 31 63 +def @arg24 253 8192 8 Y 0 31 8 +def @arg25 253 8192 4 Y 128 31 63 +def @arg26 253 8192 4 Y 0 31 8 +def @arg27 253 8192 10 Y 128 31 63 +def @arg28 253 8192 10 Y 0 31 8 +def @arg29 253 8192 8 Y 128 31 63 +def @arg30 253 8192 8 Y 0 31 8 +def @arg31 253 8192 3 Y 0 31 8 +def @arg32 253 8192 6 Y 128 31 63 @arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 1 1 1 1 1 1 1 1 1 1 1 1 2004-02-29 2004-02-29 11:11:11 2004-02-29 11:11:11 11:11:11 2004 1 1 a 123456789a 123456789a123456789b123456789c tinyblob tinytext blob text mediumblob mediumtext longblob longtext one monday select @arg01:= c1, @arg02:= c2, @arg03:= c3, @arg04:= c4, @@ -1939,38 +1939,38 @@ from t9 where c1= 0 ; 0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1991-01-01 01:01:01 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL execute full_info ; Catalog Database Table Table_alias Column Column_alias Name Type Length Max length Is_null Flags Decimals Charsetnr -def @arg01 254 20 1 Y 128 31 63 -def @arg02 254 20 0 Y 128 31 63 -def @arg03 254 20 0 Y 128 31 63 -def @arg04 254 20 0 Y 128 31 63 -def @arg05 254 20 0 Y 128 31 63 -def @arg06 254 20 0 Y 128 31 63 -def @arg07 254 20 0 Y 128 31 63 -def @arg08 254 20 0 Y 128 31 63 -def @arg09 254 20 0 Y 128 31 63 -def @arg10 254 20 0 Y 128 31 63 -def @arg11 254 20 0 Y 128 31 63 -def @arg12 254 20 0 Y 128 31 63 -def @arg13 254 8192 0 Y 128 31 63 -def @arg14 254 8192 0 Y 128 31 63 -def @arg15 254 8192 19 Y 128 31 63 -def @arg16 254 8192 0 Y 128 31 63 -def @arg17 254 20 0 Y 128 31 63 -def @arg18 254 20 0 Y 128 31 63 -def @arg19 254 20 0 Y 128 31 63 -def @arg20 254 8192 0 Y 0 31 8 -def @arg21 254 8192 0 Y 0 31 8 -def @arg22 254 8192 0 Y 0 31 8 -def @arg23 254 8192 0 Y 128 31 63 -def @arg24 254 8192 0 Y 0 31 8 -def @arg25 254 8192 0 Y 128 31 63 -def @arg26 254 8192 0 Y 0 31 8 -def @arg27 254 8192 0 Y 128 31 63 -def @arg28 254 8192 0 Y 0 31 8 -def @arg29 254 8192 0 Y 128 31 63 -def @arg30 254 8192 0 Y 0 31 8 -def @arg31 254 8192 0 Y 0 31 8 -def @arg32 254 8192 0 Y 0 31 8 +def @arg01 253 20 1 Y 128 31 63 +def @arg02 253 20 0 Y 128 31 63 +def @arg03 253 20 0 Y 128 31 63 +def @arg04 253 20 0 Y 128 31 63 +def @arg05 253 20 0 Y 128 31 63 +def @arg06 253 20 0 Y 128 31 63 +def @arg07 253 20 0 Y 128 31 63 +def @arg08 253 20 0 Y 128 31 63 +def @arg09 253 20 0 Y 128 31 63 +def @arg10 253 20 0 Y 128 31 63 +def @arg11 253 20 0 Y 128 31 63 +def @arg12 253 20 0 Y 128 31 63 +def @arg13 253 8192 0 Y 128 31 63 +def @arg14 253 8192 0 Y 128 31 63 +def @arg15 253 8192 19 Y 128 31 63 +def @arg16 253 8192 0 Y 128 31 63 +def @arg17 253 20 0 Y 128 31 63 +def @arg18 253 20 0 Y 128 31 63 +def @arg19 253 20 0 Y 128 31 63 +def @arg20 253 8192 0 Y 0 31 8 +def @arg21 253 8192 0 Y 0 31 8 +def @arg22 253 8192 0 Y 0 31 8 +def @arg23 253 8192 0 Y 128 31 63 +def @arg24 253 8192 0 Y 0 31 8 +def @arg25 253 8192 0 Y 128 31 63 +def @arg26 253 8192 0 Y 0 31 8 +def @arg27 253 8192 0 Y 128 31 63 +def @arg28 253 8192 0 Y 0 31 8 +def @arg29 253 8192 0 Y 128 31 63 +def @arg30 253 8192 0 Y 0 31 8 +def @arg31 253 8192 0 Y 0 31 8 +def @arg32 253 8192 0 Y 0 31 8 @arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1991-01-01 01:01:01 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL prepare stmt1 from "select @@ -1989,38 +1989,38 @@ execute stmt1 using @my_key ; 1 1 1 1 1 1 1 1 1 1 1.0000 1.0000 2004-02-29 2004-02-29 11:11:11 2004-02-29 11:11:11 11:11:11 2004 1 1 a 123456789a 123456789a123456789b123456789c tinyblob tinytext blob text mediumblob mediumtext longblob longtext one monday execute full_info ; Catalog Database Table Table_alias Column Column_alias Name Type Length Max length Is_null Flags Decimals Charsetnr -def @arg01 254 20 1 Y 128 31 63 -def @arg02 254 20 1 Y 128 31 63 -def @arg03 254 20 1 Y 128 31 63 -def @arg04 254 20 1 Y 128 31 63 -def @arg05 254 20 1 Y 128 31 63 -def @arg06 254 20 1 Y 128 31 63 -def @arg07 254 20 1 Y 128 31 63 -def @arg08 254 20 1 Y 128 31 63 -def @arg09 254 20 1 Y 128 31 63 -def @arg10 254 20 1 Y 128 31 63 -def @arg11 254 20 1 Y 128 31 63 -def @arg12 254 20 1 Y 128 31 63 -def @arg13 254 8192 10 Y 128 31 63 -def @arg14 254 8192 19 Y 128 31 63 -def @arg15 254 8192 19 Y 128 31 63 -def @arg16 254 8192 8 Y 128 31 63 -def @arg17 254 20 4 Y 128 31 63 -def @arg18 254 20 1 Y 128 31 63 -def @arg19 254 20 1 Y 128 31 63 -def @arg20 254 8192 1 Y 0 31 8 -def @arg21 254 8192 10 Y 0 31 8 -def @arg22 254 8192 30 Y 0 31 8 -def @arg23 254 8192 8 Y 128 31 63 -def @arg24 254 8192 8 Y 0 31 8 -def @arg25 254 8192 4 Y 128 31 63 -def @arg26 254 8192 4 Y 0 31 8 -def @arg27 254 8192 10 Y 128 31 63 -def @arg28 254 8192 10 Y 0 31 8 -def @arg29 254 8192 8 Y 128 31 63 -def @arg30 254 8192 8 Y 0 31 8 -def @arg31 254 8192 3 Y 0 31 8 -def @arg32 254 8192 6 Y 128 31 63 +def @arg01 253 20 1 Y 128 31 63 +def @arg02 253 20 1 Y 128 31 63 +def @arg03 253 20 1 Y 128 31 63 +def @arg04 253 20 1 Y 128 31 63 +def @arg05 253 20 1 Y 128 31 63 +def @arg06 253 20 1 Y 128 31 63 +def @arg07 253 20 1 Y 128 31 63 +def @arg08 253 20 1 Y 128 31 63 +def @arg09 253 20 1 Y 128 31 63 +def @arg10 253 20 1 Y 128 31 63 +def @arg11 253 20 1 Y 128 31 63 +def @arg12 253 20 1 Y 128 31 63 +def @arg13 253 8192 10 Y 128 31 63 +def @arg14 253 8192 19 Y 128 31 63 +def @arg15 253 8192 19 Y 128 31 63 +def @arg16 253 8192 8 Y 128 31 63 +def @arg17 253 20 4 Y 128 31 63 +def @arg18 253 20 1 Y 128 31 63 +def @arg19 253 20 1 Y 128 31 63 +def @arg20 253 8192 1 Y 0 31 8 +def @arg21 253 8192 10 Y 0 31 8 +def @arg22 253 8192 30 Y 0 31 8 +def @arg23 253 8192 8 Y 128 31 63 +def @arg24 253 8192 8 Y 0 31 8 +def @arg25 253 8192 4 Y 128 31 63 +def @arg26 253 8192 4 Y 0 31 8 +def @arg27 253 8192 10 Y 128 31 63 +def @arg28 253 8192 10 Y 0 31 8 +def @arg29 253 8192 8 Y 128 31 63 +def @arg30 253 8192 8 Y 0 31 8 +def @arg31 253 8192 3 Y 0 31 8 +def @arg32 253 8192 6 Y 128 31 63 @arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 1 1 1 1 1 1 1 1 1 1 1 1 2004-02-29 2004-02-29 11:11:11 2004-02-29 11:11:11 11:11:11 2004 1 1 a 123456789a 123456789a123456789b123456789c tinyblob tinytext blob text mediumblob mediumtext longblob longtext one monday set @my_key= 0 ; @@ -2029,38 +2029,38 @@ execute stmt1 using @my_key ; 0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1991-01-01 01:01:01 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL execute full_info ; Catalog Database Table Table_alias Column Column_alias Name Type Length Max length Is_null Flags Decimals Charsetnr -def @arg01 254 20 1 Y 128 31 63 -def @arg02 254 20 0 Y 128 31 63 -def @arg03 254 20 0 Y 128 31 63 -def @arg04 254 20 0 Y 128 31 63 -def @arg05 254 20 0 Y 128 31 63 -def @arg06 254 20 0 Y 128 31 63 -def @arg07 254 20 0 Y 128 31 63 -def @arg08 254 20 0 Y 128 31 63 -def @arg09 254 20 0 Y 128 31 63 -def @arg10 254 20 0 Y 128 31 63 -def @arg11 254 20 0 Y 128 31 63 -def @arg12 254 20 0 Y 128 31 63 -def @arg13 254 8192 0 Y 128 31 63 -def @arg14 254 8192 0 Y 128 31 63 -def @arg15 254 8192 19 Y 128 31 63 -def @arg16 254 8192 0 Y 128 31 63 -def @arg17 254 20 0 Y 128 31 63 -def @arg18 254 20 0 Y 128 31 63 -def @arg19 254 20 0 Y 128 31 63 -def @arg20 254 8192 0 Y 0 31 8 -def @arg21 254 8192 0 Y 0 31 8 -def @arg22 254 8192 0 Y 0 31 8 -def @arg23 254 8192 0 Y 128 31 63 -def @arg24 254 8192 0 Y 0 31 8 -def @arg25 254 8192 0 Y 128 31 63 -def @arg26 254 8192 0 Y 0 31 8 -def @arg27 254 8192 0 Y 128 31 63 -def @arg28 254 8192 0 Y 0 31 8 -def @arg29 254 8192 0 Y 128 31 63 -def @arg30 254 8192 0 Y 0 31 8 -def @arg31 254 8192 0 Y 0 31 8 -def @arg32 254 8192 0 Y 0 31 8 +def @arg01 253 20 1 Y 128 31 63 +def @arg02 253 20 0 Y 128 31 63 +def @arg03 253 20 0 Y 128 31 63 +def @arg04 253 20 0 Y 128 31 63 +def @arg05 253 20 0 Y 128 31 63 +def @arg06 253 20 0 Y 128 31 63 +def @arg07 253 20 0 Y 128 31 63 +def @arg08 253 20 0 Y 128 31 63 +def @arg09 253 20 0 Y 128 31 63 +def @arg10 253 20 0 Y 128 31 63 +def @arg11 253 20 0 Y 128 31 63 +def @arg12 253 20 0 Y 128 31 63 +def @arg13 253 8192 0 Y 128 31 63 +def @arg14 253 8192 0 Y 128 31 63 +def @arg15 253 8192 19 Y 128 31 63 +def @arg16 253 8192 0 Y 128 31 63 +def @arg17 253 20 0 Y 128 31 63 +def @arg18 253 20 0 Y 128 31 63 +def @arg19 253 20 0 Y 128 31 63 +def @arg20 253 8192 0 Y 0 31 8 +def @arg21 253 8192 0 Y 0 31 8 +def @arg22 253 8192 0 Y 0 31 8 +def @arg23 253 8192 0 Y 128 31 63 +def @arg24 253 8192 0 Y 0 31 8 +def @arg25 253 8192 0 Y 128 31 63 +def @arg26 253 8192 0 Y 0 31 8 +def @arg27 253 8192 0 Y 128 31 63 +def @arg28 253 8192 0 Y 0 31 8 +def @arg29 253 8192 0 Y 128 31 63 +def @arg30 253 8192 0 Y 0 31 8 +def @arg31 253 8192 0 Y 0 31 8 +def @arg32 253 8192 0 Y 0 31 8 @arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1991-01-01 01:01:01 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL prepare stmt1 from "select ? := c1 from t9 where c1= 1" ; @@ -2077,38 +2077,38 @@ into @arg01, @arg02, @arg03, @arg04, @arg05, @arg06, @arg07, @arg08, from t9 where c1= 1 ; execute full_info ; Catalog Database Table Table_alias Column Column_alias Name Type Length Max length Is_null Flags Decimals Charsetnr -def @arg01 254 20 1 Y 128 31 63 -def @arg02 254 20 1 Y 128 31 63 -def @arg03 254 20 1 Y 128 31 63 -def @arg04 254 20 1 Y 128 31 63 -def @arg05 254 20 1 Y 128 31 63 -def @arg06 254 20 1 Y 128 31 63 -def @arg07 254 20 1 Y 128 31 63 -def @arg08 254 20 1 Y 128 31 63 -def @arg09 254 20 1 Y 128 31 63 -def @arg10 254 20 1 Y 128 31 63 -def @arg11 254 20 1 Y 128 31 63 -def @arg12 254 20 1 Y 128 31 63 -def @arg13 254 8192 10 Y 128 31 63 -def @arg14 254 8192 19 Y 128 31 63 -def @arg15 254 8192 19 Y 128 31 63 -def @arg16 254 8192 8 Y 128 31 63 -def @arg17 254 20 4 Y 128 31 63 -def @arg18 254 20 1 Y 128 31 63 -def @arg19 254 20 1 Y 128 31 63 -def @arg20 254 8192 1 Y 0 31 8 -def @arg21 254 8192 10 Y 0 31 8 -def @arg22 254 8192 30 Y 0 31 8 -def @arg23 254 8192 8 Y 128 31 63 -def @arg24 254 8192 8 Y 0 31 8 -def @arg25 254 8192 4 Y 128 31 63 -def @arg26 254 8192 4 Y 0 31 8 -def @arg27 254 8192 10 Y 128 31 63 -def @arg28 254 8192 10 Y 0 31 8 -def @arg29 254 8192 8 Y 128 31 63 -def @arg30 254 8192 8 Y 0 31 8 -def @arg31 254 8192 3 Y 0 31 8 -def @arg32 254 8192 6 Y 128 31 63 +def @arg01 253 20 1 Y 128 31 63 +def @arg02 253 20 1 Y 128 31 63 +def @arg03 253 20 1 Y 128 31 63 +def @arg04 253 20 1 Y 128 31 63 +def @arg05 253 20 1 Y 128 31 63 +def @arg06 253 20 1 Y 128 31 63 +def @arg07 253 20 1 Y 128 31 63 +def @arg08 253 20 1 Y 128 31 63 +def @arg09 253 20 1 Y 128 31 63 +def @arg10 253 20 1 Y 128 31 63 +def @arg11 253 20 1 Y 128 31 63 +def @arg12 253 20 1 Y 128 31 63 +def @arg13 253 8192 10 Y 128 31 63 +def @arg14 253 8192 19 Y 128 31 63 +def @arg15 253 8192 19 Y 128 31 63 +def @arg16 253 8192 8 Y 128 31 63 +def @arg17 253 20 4 Y 128 31 63 +def @arg18 253 20 1 Y 128 31 63 +def @arg19 253 20 1 Y 128 31 63 +def @arg20 253 8192 1 Y 0 31 8 +def @arg21 253 8192 10 Y 0 31 8 +def @arg22 253 8192 30 Y 0 31 8 +def @arg23 253 8192 8 Y 128 31 63 +def @arg24 253 8192 8 Y 0 31 8 +def @arg25 253 8192 4 Y 128 31 63 +def @arg26 253 8192 4 Y 0 31 8 +def @arg27 253 8192 10 Y 128 31 63 +def @arg28 253 8192 10 Y 0 31 8 +def @arg29 253 8192 8 Y 128 31 63 +def @arg30 253 8192 8 Y 0 31 8 +def @arg31 253 8192 3 Y 0 31 8 +def @arg32 253 8192 6 Y 128 31 63 @arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 1 1 1 1 1 1 1 1 1 1 1 1 2004-02-29 2004-02-29 11:11:11 2004-02-29 11:11:11 11:11:11 2004 1 1 a 123456789a 123456789a123456789b123456789c tinyblob tinytext blob text mediumblob mediumtext longblob longtext one monday select c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, @@ -2121,38 +2121,38 @@ into @arg01, @arg02, @arg03, @arg04, @arg05, @arg06, @arg07, @arg08, from t9 where c1= 0 ; execute full_info ; Catalog Database Table Table_alias Column Column_alias Name Type Length Max length Is_null Flags Decimals Charsetnr -def @arg01 254 20 1 Y 128 31 63 -def @arg02 254 20 0 Y 128 31 63 -def @arg03 254 20 0 Y 128 31 63 -def @arg04 254 20 0 Y 128 31 63 -def @arg05 254 20 0 Y 128 31 63 -def @arg06 254 20 0 Y 128 31 63 -def @arg07 254 20 0 Y 128 31 63 -def @arg08 254 20 0 Y 128 31 63 -def @arg09 254 20 0 Y 128 31 63 -def @arg10 254 20 0 Y 128 31 63 -def @arg11 254 20 0 Y 128 31 63 -def @arg12 254 20 0 Y 128 31 63 -def @arg13 254 8192 0 Y 128 31 63 -def @arg14 254 8192 0 Y 128 31 63 -def @arg15 254 8192 19 Y 128 31 63 -def @arg16 254 8192 0 Y 128 31 63 -def @arg17 254 20 0 Y 128 31 63 -def @arg18 254 20 0 Y 128 31 63 -def @arg19 254 20 0 Y 128 31 63 -def @arg20 254 8192 0 Y 0 31 8 -def @arg21 254 8192 0 Y 0 31 8 -def @arg22 254 8192 0 Y 0 31 8 -def @arg23 254 8192 0 Y 128 31 63 -def @arg24 254 8192 0 Y 0 31 8 -def @arg25 254 8192 0 Y 128 31 63 -def @arg26 254 8192 0 Y 0 31 8 -def @arg27 254 8192 0 Y 128 31 63 -def @arg28 254 8192 0 Y 0 31 8 -def @arg29 254 8192 0 Y 128 31 63 -def @arg30 254 8192 0 Y 0 31 8 -def @arg31 254 8192 0 Y 0 31 8 -def @arg32 254 8192 0 Y 0 31 8 +def @arg01 253 20 1 Y 128 31 63 +def @arg02 253 20 0 Y 128 31 63 +def @arg03 253 20 0 Y 128 31 63 +def @arg04 253 20 0 Y 128 31 63 +def @arg05 253 20 0 Y 128 31 63 +def @arg06 253 20 0 Y 128 31 63 +def @arg07 253 20 0 Y 128 31 63 +def @arg08 253 20 0 Y 128 31 63 +def @arg09 253 20 0 Y 128 31 63 +def @arg10 253 20 0 Y 128 31 63 +def @arg11 253 20 0 Y 128 31 63 +def @arg12 253 20 0 Y 128 31 63 +def @arg13 253 8192 0 Y 128 31 63 +def @arg14 253 8192 0 Y 128 31 63 +def @arg15 253 8192 19 Y 128 31 63 +def @arg16 253 8192 0 Y 128 31 63 +def @arg17 253 20 0 Y 128 31 63 +def @arg18 253 20 0 Y 128 31 63 +def @arg19 253 20 0 Y 128 31 63 +def @arg20 253 8192 0 Y 0 31 8 +def @arg21 253 8192 0 Y 0 31 8 +def @arg22 253 8192 0 Y 0 31 8 +def @arg23 253 8192 0 Y 128 31 63 +def @arg24 253 8192 0 Y 0 31 8 +def @arg25 253 8192 0 Y 128 31 63 +def @arg26 253 8192 0 Y 0 31 8 +def @arg27 253 8192 0 Y 128 31 63 +def @arg28 253 8192 0 Y 0 31 8 +def @arg29 253 8192 0 Y 128 31 63 +def @arg30 253 8192 0 Y 0 31 8 +def @arg31 253 8192 0 Y 0 31 8 +def @arg32 253 8192 0 Y 0 31 8 @arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1991-01-01 01:01:01 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL prepare stmt1 from "select c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, @@ -2167,76 +2167,76 @@ set @my_key= 1 ; execute stmt1 using @my_key ; execute full_info ; Catalog Database Table Table_alias Column Column_alias Name Type Length Max length Is_null Flags Decimals Charsetnr -def @arg01 254 20 1 Y 128 31 63 -def @arg02 254 20 1 Y 128 31 63 -def @arg03 254 20 1 Y 128 31 63 -def @arg04 254 20 1 Y 128 31 63 -def @arg05 254 20 1 Y 128 31 63 -def @arg06 254 20 1 Y 128 31 63 -def @arg07 254 20 1 Y 128 31 63 -def @arg08 254 20 1 Y 128 31 63 -def @arg09 254 20 1 Y 128 31 63 -def @arg10 254 20 1 Y 128 31 63 -def @arg11 254 20 1 Y 128 31 63 -def @arg12 254 20 1 Y 128 31 63 -def @arg13 254 8192 10 Y 128 31 63 -def @arg14 254 8192 19 Y 128 31 63 -def @arg15 254 8192 19 Y 128 31 63 -def @arg16 254 8192 8 Y 128 31 63 -def @arg17 254 20 4 Y 128 31 63 -def @arg18 254 20 1 Y 128 31 63 -def @arg19 254 20 1 Y 128 31 63 -def @arg20 254 8192 1 Y 0 31 8 -def @arg21 254 8192 10 Y 0 31 8 -def @arg22 254 8192 30 Y 0 31 8 -def @arg23 254 8192 8 Y 128 31 63 -def @arg24 254 8192 8 Y 0 31 8 -def @arg25 254 8192 4 Y 128 31 63 -def @arg26 254 8192 4 Y 0 31 8 -def @arg27 254 8192 10 Y 128 31 63 -def @arg28 254 8192 10 Y 0 31 8 -def @arg29 254 8192 8 Y 128 31 63 -def @arg30 254 8192 8 Y 0 31 8 -def @arg31 254 8192 3 Y 0 31 8 -def @arg32 254 8192 6 Y 128 31 63 +def @arg01 253 20 1 Y 128 31 63 +def @arg02 253 20 1 Y 128 31 63 +def @arg03 253 20 1 Y 128 31 63 +def @arg04 253 20 1 Y 128 31 63 +def @arg05 253 20 1 Y 128 31 63 +def @arg06 253 20 1 Y 128 31 63 +def @arg07 253 20 1 Y 128 31 63 +def @arg08 253 20 1 Y 128 31 63 +def @arg09 253 20 1 Y 128 31 63 +def @arg10 253 20 1 Y 128 31 63 +def @arg11 253 20 1 Y 128 31 63 +def @arg12 253 20 1 Y 128 31 63 +def @arg13 253 8192 10 Y 128 31 63 +def @arg14 253 8192 19 Y 128 31 63 +def @arg15 253 8192 19 Y 128 31 63 +def @arg16 253 8192 8 Y 128 31 63 +def @arg17 253 20 4 Y 128 31 63 +def @arg18 253 20 1 Y 128 31 63 +def @arg19 253 20 1 Y 128 31 63 +def @arg20 253 8192 1 Y 0 31 8 +def @arg21 253 8192 10 Y 0 31 8 +def @arg22 253 8192 30 Y 0 31 8 +def @arg23 253 8192 8 Y 128 31 63 +def @arg24 253 8192 8 Y 0 31 8 +def @arg25 253 8192 4 Y 128 31 63 +def @arg26 253 8192 4 Y 0 31 8 +def @arg27 253 8192 10 Y 128 31 63 +def @arg28 253 8192 10 Y 0 31 8 +def @arg29 253 8192 8 Y 128 31 63 +def @arg30 253 8192 8 Y 0 31 8 +def @arg31 253 8192 3 Y 0 31 8 +def @arg32 253 8192 6 Y 128 31 63 @arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 1 1 1 1 1 1 1 1 1 1 1 1 2004-02-29 2004-02-29 11:11:11 2004-02-29 11:11:11 11:11:11 2004 1 1 a 123456789a 123456789a123456789b123456789c tinyblob tinytext blob text mediumblob mediumtext longblob longtext one monday set @my_key= 0 ; execute stmt1 using @my_key ; execute full_info ; Catalog Database Table Table_alias Column Column_alias Name Type Length Max length Is_null Flags Decimals Charsetnr -def @arg01 254 20 1 Y 128 31 63 -def @arg02 254 20 0 Y 128 31 63 -def @arg03 254 20 0 Y 128 31 63 -def @arg04 254 20 0 Y 128 31 63 -def @arg05 254 20 0 Y 128 31 63 -def @arg06 254 20 0 Y 128 31 63 -def @arg07 254 20 0 Y 128 31 63 -def @arg08 254 20 0 Y 128 31 63 -def @arg09 254 20 0 Y 128 31 63 -def @arg10 254 20 0 Y 128 31 63 -def @arg11 254 20 0 Y 128 31 63 -def @arg12 254 20 0 Y 128 31 63 -def @arg13 254 8192 0 Y 128 31 63 -def @arg14 254 8192 0 Y 128 31 63 -def @arg15 254 8192 19 Y 128 31 63 -def @arg16 254 8192 0 Y 128 31 63 -def @arg17 254 20 0 Y 128 31 63 -def @arg18 254 20 0 Y 128 31 63 -def @arg19 254 20 0 Y 128 31 63 -def @arg20 254 8192 0 Y 0 31 8 -def @arg21 254 8192 0 Y 0 31 8 -def @arg22 254 8192 0 Y 0 31 8 -def @arg23 254 8192 0 Y 128 31 63 -def @arg24 254 8192 0 Y 0 31 8 -def @arg25 254 8192 0 Y 128 31 63 -def @arg26 254 8192 0 Y 0 31 8 -def @arg27 254 8192 0 Y 128 31 63 -def @arg28 254 8192 0 Y 0 31 8 -def @arg29 254 8192 0 Y 128 31 63 -def @arg30 254 8192 0 Y 0 31 8 -def @arg31 254 8192 0 Y 0 31 8 -def @arg32 254 8192 0 Y 0 31 8 +def @arg01 253 20 1 Y 128 31 63 +def @arg02 253 20 0 Y 128 31 63 +def @arg03 253 20 0 Y 128 31 63 +def @arg04 253 20 0 Y 128 31 63 +def @arg05 253 20 0 Y 128 31 63 +def @arg06 253 20 0 Y 128 31 63 +def @arg07 253 20 0 Y 128 31 63 +def @arg08 253 20 0 Y 128 31 63 +def @arg09 253 20 0 Y 128 31 63 +def @arg10 253 20 0 Y 128 31 63 +def @arg11 253 20 0 Y 128 31 63 +def @arg12 253 20 0 Y 128 31 63 +def @arg13 253 8192 0 Y 128 31 63 +def @arg14 253 8192 0 Y 128 31 63 +def @arg15 253 8192 19 Y 128 31 63 +def @arg16 253 8192 0 Y 128 31 63 +def @arg17 253 20 0 Y 128 31 63 +def @arg18 253 20 0 Y 128 31 63 +def @arg19 253 20 0 Y 128 31 63 +def @arg20 253 8192 0 Y 0 31 8 +def @arg21 253 8192 0 Y 0 31 8 +def @arg22 253 8192 0 Y 0 31 8 +def @arg23 253 8192 0 Y 128 31 63 +def @arg24 253 8192 0 Y 0 31 8 +def @arg25 253 8192 0 Y 128 31 63 +def @arg26 253 8192 0 Y 0 31 8 +def @arg27 253 8192 0 Y 128 31 63 +def @arg28 253 8192 0 Y 0 31 8 +def @arg29 253 8192 0 Y 128 31 63 +def @arg30 253 8192 0 Y 0 31 8 +def @arg31 253 8192 0 Y 0 31 8 +def @arg32 253 8192 0 Y 0 31 8 @arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1991-01-01 01:01:01 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL prepare stmt1 from "select c1 into ? from t9 where c1= 1" ; diff --git a/mysql-test/r/show_check.result b/mysql-test/r/show_check.result index dab35262e0a..ec9bd33d301 100644 --- a/mysql-test/r/show_check.result +++ b/mysql-test/r/show_check.result @@ -314,57 +314,57 @@ insert into t2 values (1),(2); insert into t3 values (1,1),(2,2); show table status; Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment -t1 HEAP 9 Fixed 2 5 # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL -t2 HEAP 9 Fixed 2 5 # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL -t3 HEAP 9 Fixed 2 9 # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL +t1 HEAP 9 Fixed 2 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL +t2 HEAP 9 Fixed 2 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL +t3 HEAP 9 Fixed 2 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL insert into t1 values (3),(4); insert into t2 values (3),(4); insert into t3 values (3,3),(4,4); show table status; Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment -t1 HEAP 9 Fixed 4 5 # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL -t2 HEAP 9 Fixed 4 5 # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL -t3 HEAP 9 Fixed 4 9 # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL +t1 HEAP 9 Fixed 4 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL +t2 HEAP 9 Fixed 4 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL +t3 HEAP 9 Fixed 4 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL insert into t1 values (5); insert into t2 values (5); insert into t3 values (5,5); show table status; Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment -t1 HEAP 9 Fixed 5 5 # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL -t2 HEAP 9 Fixed 5 5 # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL -t3 HEAP 9 Fixed 5 9 # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL +t1 HEAP 9 Fixed 5 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL +t2 HEAP 9 Fixed 5 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL +t3 HEAP 9 Fixed 5 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL delete from t1 where a=3; delete from t2 where b=3; delete from t3 where a=3; show table status; Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment -t1 HEAP 9 Fixed 4 5 # # # 5 NULL NULL NULL NULL latin1_swedish_ci NULL -t2 HEAP 9 Fixed 4 5 # # # 5 NULL NULL NULL NULL latin1_swedish_ci NULL -t3 HEAP 9 Fixed 4 9 # # # 9 NULL NULL NULL NULL latin1_swedish_ci NULL +t1 HEAP 9 Fixed 4 # # # # 5 NULL NULL NULL NULL latin1_swedish_ci NULL +t2 HEAP 9 Fixed 4 # # # # 5 NULL NULL NULL NULL latin1_swedish_ci NULL +t3 HEAP 9 Fixed 4 # # # # 9 NULL NULL NULL NULL latin1_swedish_ci NULL delete from t1; delete from t2; delete from t3; show table status; Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment -t1 HEAP 9 Fixed 0 5 # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL -t2 HEAP 9 Fixed 0 5 # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL -t3 HEAP 9 Fixed 0 9 # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL +t1 HEAP 9 Fixed 0 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL +t2 HEAP 9 Fixed 0 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL +t3 HEAP 9 Fixed 0 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL insert into t1 values (5); insert into t2 values (5); insert into t3 values (5,5); show table status; Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment -t1 HEAP 9 Fixed 1 5 # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL -t2 HEAP 9 Fixed 1 5 # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL -t3 HEAP 9 Fixed 1 9 # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL +t1 HEAP 9 Fixed 1 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL +t2 HEAP 9 Fixed 1 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL +t3 HEAP 9 Fixed 1 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL delete from t1 where a=5; delete from t2 where b=5; delete from t3 where a=5; show table status; Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment -t1 HEAP 9 Fixed 0 5 # # # 5 NULL NULL NULL NULL latin1_swedish_ci NULL -t2 HEAP 9 Fixed 0 5 # # # 5 NULL NULL NULL NULL latin1_swedish_ci NULL -t3 HEAP 9 Fixed 0 9 # # # 9 NULL NULL NULL NULL latin1_swedish_ci NULL +t1 HEAP 9 Fixed 0 # # # # 5 NULL NULL NULL NULL latin1_swedish_ci NULL +t2 HEAP 9 Fixed 0 # # # # 5 NULL NULL NULL NULL latin1_swedish_ci NULL +t3 HEAP 9 Fixed 0 # # # # 9 NULL NULL NULL NULL latin1_swedish_ci NULL drop table t1, t2, t3; create database mysqltest; show create database mysqltest; diff --git a/mysql-test/r/type_bit.result b/mysql-test/r/type_bit.result new file mode 100644 index 00000000000..45f887461e7 --- /dev/null +++ b/mysql-test/r/type_bit.result @@ -0,0 +1,370 @@ +select 0 + b'1'; +0 + b'1' +1 +select 0 + b'0'; +0 + b'0' +0 +select 0 + b'000001'; +0 + b'000001' +1 +select 0 + b'000011'; +0 + b'000011' +3 +select 0 + b'000101'; +0 + b'000101' +5 +select 0 + b'000000'; +0 + b'000000' +0 +select 0 + b'10000000'; +0 + b'10000000' +128 +select 0 + b'11111111'; +0 + b'11111111' +255 +select 0 + b'10000001'; +0 + b'10000001' +129 +select 0 + b'1000000000000000'; +0 + b'1000000000000000' +32768 +select 0 + b'1111111111111111'; +0 + b'1111111111111111' +65535 +select 0 + b'1000000000000001'; +0 + b'1000000000000001' +32769 +drop table if exists t1; +create table t1 (a bit(65)); +ERROR 42000: Column length too big for column 'a' (max = 64); use BLOB instead +create table t1 (a bit(0)); +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` bit(1) default NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +drop table t1; +create table t1 (a bit, key(a)) engine=innodb; +ERROR 42000: The storage engine for the table doesn't support BIT FIELD +create table t1 (a bit(64)); +insert into t1 values +(b'1111111111111111111111111111111111111111111111111111111111111111'), +(b'1000000000000000000000000000000000000000000000000000000000000000'), +(b'0000000000000000000000000000000000000000000000000000000000000001'), +(b'1010101010101010101010101010101010101010101010101010101010101010'), +(b'0101010101010101010101010101010101010101010101010101010101010101'); +select hex(a) from t1; +hex(a) +FFFFFFFFFFFFFFFF +8000000000000000 +1 +AAAAAAAAAAAAAAAA +5555555555555555 +drop table t1; +create table t1 (a bit); +insert into t1 values (b'0'), (b'1'), (b'000'), (b'100'), (b'001'); +Warnings: +Warning 1264 Out of range value adjusted for column 'a' at row 4 +select hex(a) from t1; +hex(a) +0 +1 +0 +1 +1 +alter table t1 add unique (a); +ERROR 23000: Duplicate entry '' for key 1 +drop table t1; +create table t1 (a bit(2)); +insert into t1 values (b'00'), (b'01'), (b'10'), (b'100'); +Warnings: +Warning 1264 Out of range value adjusted for column 'a' at row 4 +select a+0 from t1; +a+0 +0 +1 +2 +3 +alter table t1 add key (a); +explain select a+0 from t1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL a 2 NULL 4 Using index +select a+0 from t1; +a+0 +0 +1 +2 +3 +drop table t1; +create table t1 (a bit(7), b bit(9), key(a, b)); +insert into t1 values +(94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177), +(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380), +(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36), +(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499), +(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403), +(44, 307), (68, 454), (57, 135); +explain select a+0 from t1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL a 5 NULL 38 Using index +select a+0 from t1; +a+0 +0 +4 +5 +9 +23 +24 +28 +29 +30 +31 +34 +44 +49 +56 +57 +59 +60 +61 +68 +68 +75 +77 +78 +79 +87 +88 +94 +94 +104 +106 +108 +111 +116 +118 +119 +122 +123 +127 +explain select b+0 from t1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL a 5 NULL 38 Using index +select b+0 from t1; +b+0 +177 +245 +178 +363 +36 +398 +499 +399 +83 +438 +202 +307 +345 +379 +135 +188 +343 +152 +206 +454 +42 +133 +123 +349 +351 +411 +46 +468 +280 +446 +67 +368 +390 +380 +368 +118 +411 +403 +explain select a+0, b+0 from t1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL a 5 NULL 38 Using index +select a+0, b+0 from t1; +a+0 b+0 +0 177 +4 245 +5 178 +9 363 +23 36 +24 398 +28 499 +29 399 +30 83 +31 438 +34 202 +44 307 +49 345 +56 379 +57 135 +59 188 +60 343 +61 152 +68 206 +68 454 +75 42 +77 133 +78 123 +79 349 +87 351 +88 411 +94 46 +94 468 +104 280 +106 446 +108 67 +111 368 +116 390 +118 380 +119 368 +122 118 +123 411 +127 403 +explain select a+0, b+0 from t1 where a > 40 and b > 200 order by 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range a a 2 NULL 27 Using where; Using index; Using filesort +select a+0, b+0 from t1 where a > 40 and b > 200 order by 1; +a+0 b+0 +44 307 +49 345 +56 379 +60 343 +68 206 +68 454 +79 349 +87 351 +88 411 +94 468 +104 280 +106 446 +111 368 +116 390 +118 380 +119 368 +123 411 +127 403 +explain select a+0, b+0 from t1 where a > 40 and a < 70 order by 2; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range a a 2 NULL 8 Using where; Using index; Using filesort +select a+0, b+0 from t1 where a > 40 and a < 70 order by 2; +a+0 b+0 +57 135 +61 152 +59 188 +68 206 +44 307 +60 343 +49 345 +56 379 +68 454 +set @@max_length_for_sort_data=0; +select a+0, b+0 from t1 where a > 40 and a < 70 order by 2; +a+0 b+0 +57 135 +61 152 +59 188 +68 206 +44 307 +60 343 +49 345 +56 379 +68 454 +select hex(min(a)) from t1; +hex(min(a)) +0 +select hex(min(b)) from t1; +hex(min(b)) +24 +select hex(min(a)), hex(max(a)), hex(min(b)), hex(max(b)) from t1; +hex(min(a)) hex(max(a)) hex(min(b)) hex(max(b)) +0 7F 24 1F3 +drop table t1; +create table t1 (a int not null, b bit, c bit(9), key(a, b, c)); +insert into t1 values +(4, NULL, 1), (4, 0, 3), (2, 1, 4), (1, 1, 100), (4, 0, 23), (4, 0, 54), +(56, 0, 22), (4, 1, 100), (23, 0, 1), (4, 0, 34); +select a+0, b+0, c+0 from t1; +a+0 b+0 c+0 +1 1 100 +2 1 4 +4 NULL 1 +4 0 3 +4 0 23 +4 0 34 +4 0 54 +4 1 100 +23 0 1 +56 0 22 +select hex(min(b)) from t1 where a = 4; +hex(min(b)) +0 +select hex(min(c)) from t1 where a = 4 and b = 0; +hex(min(c)) +3 +select hex(max(b)) from t1; +hex(max(b)) +1 +select a+0, b+0, c+0 from t1 where a = 4 and b = 0 limit 2; +a+0 b+0 c+0 +4 0 3 +4 0 23 +select a+0, b+0, c+0 from t1 where a = 4 and b = 1; +a+0 b+0 c+0 +4 1 100 +select a+0, b+0, c+0 from t1 where a = 4 and b = 1 and c=100; +a+0 b+0 c+0 +4 1 100 +select a+0, b+0, c+0 from t1 order by b desc; +a+0 b+0 c+0 +2 1 4 +1 1 100 +4 1 100 +4 0 3 +4 0 23 +4 0 54 +56 0 22 +23 0 1 +4 0 34 +4 NULL 1 +select a+0, b+0, c+0 from t1 order by c; +a+0 b+0 c+0 +4 NULL 1 +23 0 1 +4 0 3 +2 1 4 +56 0 22 +4 0 23 +4 0 34 +4 0 54 +1 1 100 +4 1 100 +drop table t1; +create table t1(a bit(2), b bit(2)); +insert into t1 (a) values (0x01), (0x03), (0x02); +update t1 set b= concat(a); +select a+0, b+0 from t1; +a+0 b+0 +1 1 +3 3 +2 2 +drop table t1; +create table t1 (a bit(7), key(a)); +insert into t1 values (44), (57); +select a+0 from t1; +a+0 +44 +57 +drop table t1; diff --git a/mysql-test/r/type_varchar.result b/mysql-test/r/type_varchar.result index 31fbe7b7b5d..1c2653bd225 100644 --- a/mysql-test/r/type_varchar.result +++ b/mysql-test/r/type_varchar.result @@ -68,3 +68,311 @@ create table t1 (v varbinary(20)); insert into t1 values('a'); insert into t1 values('a '); alter table t1 add primary key (v); +drop table t1; +create table t1 (v varchar(254), index (v)); +insert into t1 values ("This is a test "); +insert into t1 values ("Some sample data"); +insert into t1 values (" garbage "); +insert into t1 values (" This is a test "); +insert into t1 values ("This is a test"); +insert into t1 values ("Hello world"); +insert into t1 values ("Foo bar"); +insert into t1 values ("This is a test"); +insert into t1 values ("MySQL varchar test"); +insert into t1 values ("test MySQL varchar"); +insert into t1 values ("This is a long string to have some random length data included"); +insert into t1 values ("Short string"); +insert into t1 values ("VSS"); +insert into t1 values ("Some samples"); +insert into t1 values ("Bar foo"); +insert into t1 values ("Bye"); +select * from t1 where v like 'This is a test' order by v; +v +This is a test +This is a test +select * from t1 where v='This is a test' order by v; +v +This is a test +This is a test +This is a test +select * from t1 where v like 'S%' order by v; +v +Short string +Some sample data +Some samples +explain select * from t1 where v like 'This is a test' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 257 NULL 3 Using where; Using index +explain select * from t1 where v='This is a test' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 257 const 3 Using where; Using index +explain select * from t1 where v like 'S%' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 257 NULL 2 Using where; Using index +alter table t1 change v v varchar(255); +select * from t1 where v like 'This is a test' order by v; +v +This is a test +This is a test +select * from t1 where v='This is a test' order by v; +v +This is a test +This is a test +This is a test +select * from t1 where v like 'S%' order by v; +v +Short string +Some sample data +Some samples +explain select * from t1 where v like 'This is a test' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 257 NULL 3 Using where; Using filesort +explain select * from t1 where v='This is a test' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 257 const 3 Using where +explain select * from t1 where v like 'S%' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 257 NULL 2 Using where; Using filesort +alter table t1 change v v varchar(256); +select * from t1 where v like 'This is a test' order by v; +v +This is a test +This is a test +select * from t1 where v='This is a test' order by v; +v +This is a test +This is a test +This is a test +select * from t1 where v like 'S%' order by v; +v +Short string +Some sample data +Some samples +explain select * from t1 where v like 'This is a test' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 257 NULL 3 Using where; Using filesort +explain select * from t1 where v='This is a test' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 257 const 3 Using where +explain select * from t1 where v like 'S%' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 257 NULL 2 Using where; Using filesort +alter table t1 change v v varchar(257); +select * from t1 where v like 'This is a test' order by v; +v +This is a test +This is a test +select * from t1 where v='This is a test' order by v; +v +This is a test +This is a test +This is a test +select * from t1 where v like 'S%' order by v; +v +Short string +Some sample data +Some samples +explain select * from t1 where v like 'This is a test' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 257 NULL 3 Using where; Using filesort +explain select * from t1 where v='This is a test' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 257 const 3 Using where +explain select * from t1 where v like 'S%' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 257 NULL 2 Using where; Using filesort +alter table t1 change v v varchar(258); +select * from t1 where v like 'This is a test' order by v; +v +This is a test +This is a test +select * from t1 where v='This is a test' order by v; +v +This is a test +This is a test +This is a test +select * from t1 where v like 'S%' order by v; +v +Short string +Some sample data +Some samples +explain select * from t1 where v like 'This is a test' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 257 NULL 3 Using where; Using filesort +explain select * from t1 where v='This is a test' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 257 const 3 Using where +explain select * from t1 where v like 'S%' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 257 NULL 2 Using where; Using filesort +alter table t1 change v v varchar(259); +select * from t1 where v like 'This is a test' order by v; +v +This is a test +This is a test +select * from t1 where v='This is a test' order by v; +v +This is a test +This is a test +This is a test +select * from t1 where v like 'S%' order by v; +v +Short string +Some sample data +Some samples +explain select * from t1 where v like 'This is a test' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 257 NULL 3 Using where; Using filesort +explain select * from t1 where v='This is a test' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 257 const 3 Using where +explain select * from t1 where v like 'S%' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 257 NULL 2 Using where; Using filesort +alter table t1 change v v varchar(258); +select * from t1 where v like 'This is a test' order by v; +v +This is a test +This is a test +select * from t1 where v='This is a test' order by v; +v +This is a test +This is a test +This is a test +select * from t1 where v like 'S%' order by v; +v +Short string +Some sample data +Some samples +explain select * from t1 where v like 'This is a test' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 257 NULL 3 Using where; Using filesort +explain select * from t1 where v='This is a test' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 257 const 3 Using where +explain select * from t1 where v like 'S%' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 257 NULL 2 Using where; Using filesort +alter table t1 change v v varchar(257); +select * from t1 where v like 'This is a test' order by v; +v +This is a test +This is a test +select * from t1 where v='This is a test' order by v; +v +This is a test +This is a test +This is a test +select * from t1 where v like 'S%' order by v; +v +Short string +Some sample data +Some samples +explain select * from t1 where v like 'This is a test' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 257 NULL 3 Using where; Using filesort +explain select * from t1 where v='This is a test' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 257 const 3 Using where +explain select * from t1 where v like 'S%' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 257 NULL 2 Using where; Using filesort +alter table t1 change v v varchar(256); +select * from t1 where v like 'This is a test' order by v; +v +This is a test +This is a test +select * from t1 where v='This is a test' order by v; +v +This is a test +This is a test +This is a test +select * from t1 where v like 'S%' order by v; +v +Short string +Some sample data +Some samples +explain select * from t1 where v like 'This is a test' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 257 NULL 3 Using where; Using filesort +explain select * from t1 where v='This is a test' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 257 const 3 Using where +explain select * from t1 where v like 'S%' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 257 NULL 2 Using where; Using filesort +alter table t1 change v v varchar(255); +select * from t1 where v like 'This is a test' order by v; +v +This is a test +This is a test +select * from t1 where v='This is a test' order by v; +v +This is a test +This is a test +This is a test +select * from t1 where v like 'S%' order by v; +v +Short string +Some sample data +Some samples +explain select * from t1 where v like 'This is a test' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 257 NULL 3 Using where; Using filesort +explain select * from t1 where v='This is a test' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 257 const 3 Using where +explain select * from t1 where v like 'S%' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 257 NULL 2 Using where; Using filesort +alter table t1 change v v varchar(254); +select * from t1 where v like 'This is a test' order by v; +v +This is a test +This is a test +select * from t1 where v='This is a test' order by v; +v +This is a test +This is a test +This is a test +select * from t1 where v like 'S%' order by v; +v +Short string +Some sample data +Some samples +explain select * from t1 where v like 'This is a test' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 257 NULL 3 Using where; Using index +explain select * from t1 where v='This is a test' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 257 const 3 Using where; Using index +explain select * from t1 where v like 'S%' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 257 NULL 2 Using where; Using index +alter table t1 change v v varchar(253); +alter table t1 change v v varchar(254), drop key v; +alter table t1 change v v varchar(300), add key (v(10)); +select * from t1 where v like 'This is a test' order by v; +v +This is a test +This is a test +select * from t1 where v='This is a test' order by v; +v +This is a test +This is a test +This is a test +select * from t1 where v like 'S%' order by v; +v +Short string +Some sample data +Some samples +explain select * from t1 where v like 'This is a test' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 13 NULL 4 Using where; Using filesort +explain select * from t1 where v='This is a test' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 13 const 4 Using where +explain select * from t1 where v like 'S%' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 13 NULL 2 Using where; Using filesort +drop table t1; diff --git a/mysql-test/t/archive.test b/mysql-test/t/archive.test index e710de7b35e..8cf54610914 100644 --- a/mysql-test/t/archive.test +++ b/mysql-test/t/archive.test @@ -1299,6 +1299,8 @@ INSERT INTO t2 VALUES (4,011403,37,'intercepted','audiology','tinily',''); SELECT * FROM t2; OPTIMIZE TABLE t2; SELECT * FROM t2; +REPAIR TABLE t2; +SELECT * FROM t2; # # Test bulk inserts diff --git a/mysql-test/t/bdb.test b/mysql-test/t/bdb.test index 01d82c0ba16..b319cf79ec2 100644 --- a/mysql-test/t/bdb.test +++ b/mysql-test/t/bdb.test @@ -824,7 +824,7 @@ select a from t1; drop table t1; # -# bug#2686 - index_merge select on BerkeleyDB table with varchar PK causes mysqld to crash +# bug#2686 - index_merge select on BerkeleyDB table with varchar PK crashes # create table t1( @@ -842,7 +842,8 @@ select substring(pk1, 1, 4), substring(pk1, 4001), drop table t1; # -# bug#2688 - Wrong index_merge query results for BDB table with variable length primary key +# bug#2688 - Wrong index_merge query results for BDB table with +# variable length primary key # create table t1 ( diff --git a/mysql-test/t/federated.test b/mysql-test/t/federated.test new file mode 100644 index 00000000000..aa62377be64 --- /dev/null +++ b/mysql-test/t/federated.test @@ -0,0 +1,505 @@ +--source include/have_federated_db.inc + +source include/master-slave.inc; + +# remote table creation + +connection slave; +--replicate-ignore-db=federated +stop slave; + +--disable_warnings +# at this point, we are connected to master +drop database if exists federated; +--enable_warnings +create database federated; + +# I wanted to use timestamp, but results will fail if so!!! +CREATE TABLE federated.t1 ( `id` int(20) NOT NULL auto_increment, `name` varchar(32) NOT NULL default '', `other` int(20) NOT NULL default '0', created datetime default '2004-04-04 04:04:04', PRIMARY KEY (`id`), KEY `name` (`name`), KEY `other_key` (`other`)) DEFAULT CHARSET=latin1; + +connection master; +--disable_warnings +drop database if exists federated; +--enable_warnings +create database federated; + +CREATE TABLE federated.t1 ( `id` int(20) NOT NULL auto_increment, `name` varchar(32) NOT NULL default '', `other` int(20) NOT NULL default '0', created datetime default '2004-04-04 04:04:04', PRIMARY KEY (`id`), KEY `name` (`name`), KEY `other_key` (`other`)) ENGINE="FEDERATED" DEFAULT CHARSET=latin1 COMMENT='mysql://root@127.0.0.1:9308/federated/t1'; + +insert into federated.t1 (name, other) values ('First Name', 11111); +insert into federated.t1 (name, other) values ('Second Name', 22222); +insert into federated.t1 (name, other) values ('Third Name', 33333); +insert into federated.t1 (name, other) values ('Fourth Name', 44444); +insert into federated.t1 (name, other) values ('Fifth Name', 55555); +insert into federated.t1 (name, other) values ('Sixth Name', 66666); +insert into federated.t1 (name, other) values ('Seventh Name', 77777); +insert into federated.t1 (name, other) values ('Eigth Name', 88888); +insert into federated.t1 (name, other) values ('Ninth Name', 99999); +insert into federated.t1 (name, other) values ('Tenth Name', 101010); + +# basic select +select * from federated.t1; +# with primary key index_read_idx +select * from federated.t1 where id = 5; +# with regular key index_read -> index_read_idx +select * from federated.t1 where name = 'Sixth Name'; +# regular and primary key index_read_idx +select * from federated.t1 where id = 6 and name = 'Sixth Name'; +# with regular key index_read -> index_read_idx +select * from federated.t1 where other = 44444; +select * from federated.t1 where name like '%th%'; +# update - update_row, index_read_idx +update federated.t1 set name = '3rd name' where id = 3; +select * from federated.t1 where name = '3rd name'; +# update - update_row, index_read -> index_read_idx +update federated.t1 set name = 'Third name' where name = '3rd name'; +select * from federated.t1 where name = 'Third name'; +# rnd_post, ::position +select * from federated.t1 order by id DESC; +select * from federated.t1 order by name; +select * from federated.t1 order by name DESC; +select * from federated.t1 order by name ASC; +select * from federated.t1 group by other; + +# ::delete_row +delete from federated.t1 where id = 5; +select * from federated.t1 where id = 5; + +# ::delete_all_rows +delete from federated.t1; +select * from federated.t1 where id = 5; + +drop table if exists federated.t1; +CREATE TABLE federated.t1 ( `id` int(20) NOT NULL auto_increment, `name` varchar(32), `other` varchar(20), PRIMARY KEY (`id`) ) ENGINE="FEDERATED" DEFAULT CHARSET=latin1 COMMENT='mysql://root@127.0.0.1:9308/federated/t1'; + +connection slave; +drop table if exists federated.t1; +CREATE TABLE federated.t1 ( `id` int(20) NOT NULL auto_increment, `name` varchar(32), `other` varchar(20), PRIMARY KEY (`id`) ); +insert into federated.t1 (name, other) values ('First Name', 11111); +insert into federated.t1 (name, other) values ('Second Name', NULL); +insert into federated.t1 (name, other) values ('Third Name', 33333); +insert into federated.t1 (name, other) values (NULL, NULL); +insert into federated.t1 (name, other) values ('Fifth Name', 55555); +insert into federated.t1 (name, other) values ('Sixth Name', 66666); +insert into federated.t1 (name) values ('Seventh Name'); +insert into federated.t1 (name, other) values ('Eigth Name', 88888); +insert into federated.t1 (name, other) values ('Ninth Name', 99999); +insert into federated.t1 (other) values ('fee fie foe fum'); + +select * from federated.t1 where other IS NULL; +select * from federated.t1 where name IS NULL; +select * from federated.t1 where name IS NULL and other IS NULL; +select * from federated.t1 where name IS NULL or other IS NULL; +update federated.t1 set name = 'Fourth Name', other = 'four four four' where name IS NULL and other IS NULL; +update federated.t1 set other = 'two two two two' where name = 'Secend Name'; +update federated.t1 set other = 'seven seven' where name like 'Sec%'; +update federated.t1 set other = 'seven seven' where name = 'Seventh Name'; +update federated.t1 set name = 'Tenth Name' where other like 'fee fie%'; +select * from federated.t1 where name IS NULL or other IS NULL ; +select * from federated.t1; + +connection slave; +drop table if exists federated.t1; +CREATE TABLE federated.t1 (id int, name varchar(32), floatval float, other int) DEFAULT CHARSET=latin1; + +connection master; +# test NULLs +drop table if exists federated.t1; +CREATE TABLE federated.t1 (id int, name varchar(32), floatval float, other int) ENGINE="FEDERATED" DEFAULT CHARSET=latin1 COMMENT='mysql://root@127.0.0.1:9308/federated/t1'; +# these both should be the same +insert into federated.t1 values (NULL, NULL, NULL, NULL); +insert into federated.t1 values (); +insert into federated.t1 (id) values (1); +insert into federated.t1 (name, floatval, other) values ('foo', 33.33333332, NULL); +insert into federated.t1 (name, floatval, other) values (0, 00.3333, NULL); +select * from federated.t1; +select count(*) from federated.t1 where id IS NULL and name IS NULL and floatval IS NULL and other IS NULL; + +connection slave; +drop table if exists federated.t1; +CREATE TABLE federated.t1 ( blurb_id int NOT NULL DEFAULT 0, blurb text default '', primary key(blurb_id)) DEFAULT CHARSET=latin1; + +connection master; +drop table if exists federated.t1; +CREATE TABLE federated.t1 ( blurb_id int NOT NULL DEFAULT 0, blurb text default '', primary key(blurb_id)) ENGINE="FEDERATED" DEFAULT CHARSET=latin1 COMMENT='mysql://root@127.0.0.1:9308/federated/t1'; + +INSERT INTO federated.t1 VALUES (1, " MySQL supports a number of column types in several categories: numeric types, date and time types, and string (character) types. This chapter first gives an overview of these column types, and then provides a more detailed description of the properties of the types in each category, and a summary of the column type storage requirements. The overview is intentionally brief. The more detailed descriptions should be consulted for additional information about particular column types, such as the allowable formats in which you can specify values."); +INSERT INTO federated.t1 VALUES (2, "All arithmetic is done using signed BIGINT or DOUBLE values, so you should not use unsigned big integers larger than 9223372036854775807 (63 bits) except with bit functions! If you do that, some of the last digits in the result may be wrong because of rounding errors when converting a BIGINT value to a DOUBLE."); +INSERT INTO federated.t1 VALUES (3, " A floating-point number. p represents the precision. It can be from 0 to 24 for a single-precision floating-point number and from 25 to 53 for a double-precision floating-point number. These types are like the FLOAT and DOUBLE types described immediately following. FLOAT(p) has the same range as the corresponding FLOAT and DOUBLE types, but the display size and number of decimals are undefined. "); +INSERT INTO federated.t1 VALUES(4, "Die Übersetzung einer so umfangreichen technischen Dokumentation wie des MySQL-Referenzhandbuchs ist schon eine besondere Herausforderung. Zumindest für jemanden, der seine Zielsprache ernst nimmt:"); +select * from federated.t1; + +connection slave; +drop table if exists federated.t1; +create table federated.t1 (a int not null, b int not null, c int not null, primary key (a),key(b)); + +connection master; +drop table if exists federated.t1; +create table federated.t1 (a int not null, b int not null, c int not null, primary key (a),key(b)) ENGINE="FEDERATED" DEFAULT CHARSET=latin1 COMMENT='mysql://root@127.0.0.1:9308/federated/t1'; + +insert into federated.t1 values (3,3,3),(1,1,1),(2,2,2),(4,4,4); +explain select * from federated.t1 order by a; +explain select * from federated.t1 order by b; +explain select * from federated.t1 order by c; +explain select a from federated.t1 order by a; +explain select b from federated.t1 order by b; +explain select a,b from federated.t1 order by b; +explain select a,b from federated.t1; +explain select a,b,c from federated.t1; + +connection slave; +drop table if exists federated.t1; +create table federated.t1 (i1 int, i2 int, i3 int, i4 int, i5 int, i6 int, i7 int, i8 +int, i9 int, i10 int, i11 int, i12 int, i13 int, i14 int, i15 int, i16 int, i17 +int, i18 int, i19 int, i20 int, i21 int, i22 int, i23 int, i24 int, i25 int, +i26 int, i27 int, i28 int, i29 int, i30 int, i31 int, i32 int, i33 int, i34 +int, i35 int, i36 int, i37 int, i38 int, i39 int, i40 int, i41 int, i42 int, +i43 int, i44 int, i45 int, i46 int, i47 int, i48 int, i49 int, i50 int, i51 +int, i52 int, i53 int, i54 int, i55 int, i56 int, i57 int, i58 int, i59 int, +i60 int, i61 int, i62 int, i63 int, i64 int, i65 int, i66 int, i67 int, i68 +int, i69 int, i70 int, i71 int, i72 int, i73 int, i74 int, i75 int, i76 int, +i77 int, i78 int, i79 int, i80 int, i81 int, i82 int, i83 int, i84 int, i85 +int, i86 int, i87 int, i88 int, i89 int, i90 int, i91 int, i92 int, i93 int, +i94 int, i95 int, i96 int, i97 int, i98 int, i99 int, i100 int, i101 int, i102 +int, i103 int, i104 int, i105 int, i106 int, i107 int, i108 int, i109 int, i110 +int, i111 int, i112 int, i113 int, i114 int, i115 int, i116 int, i117 int, i118 +int, i119 int, i120 int, i121 int, i122 int, i123 int, i124 int, i125 int, i126 +int, i127 int, i128 int, i129 int, i130 int, i131 int, i132 int, i133 int, i134 +int, i135 int, i136 int, i137 int, i138 int, i139 int, i140 int, i141 int, i142 +int, i143 int, i144 int, i145 int, i146 int, i147 int, i148 int, i149 int, i150 +int, i151 int, i152 int, i153 int, i154 int, i155 int, i156 int, i157 int, i158 +int, i159 int, i160 int, i161 int, i162 int, i163 int, i164 int, i165 int, i166 +int, i167 int, i168 int, i169 int, i170 int, i171 int, i172 int, i173 int, i174 +int, i175 int, i176 int, i177 int, i178 int, i179 int, i180 int, i181 int, i182 +int, i183 int, i184 int, i185 int, i186 int, i187 int, i188 int, i189 int, i190 +int, i191 int, i192 int, i193 int, i194 int, i195 int, i196 int, i197 int, i198 +int, i199 int, i200 int, i201 int, i202 int, i203 int, i204 int, i205 int, i206 +int, i207 int, i208 int, i209 int, i210 int, i211 int, i212 int, i213 int, i214 +int, i215 int, i216 int, i217 int, i218 int, i219 int, i220 int, i221 int, i222 +int, i223 int, i224 int, i225 int, i226 int, i227 int, i228 int, i229 int, i230 +int, i231 int, i232 int, i233 int, i234 int, i235 int, i236 int, i237 int, i238 +int, i239 int, i240 int, i241 int, i242 int, i243 int, i244 int, i245 int, i246 +int, i247 int, i248 int, i249 int, i250 int, i251 int, i252 int, i253 int, i254 +int, i255 int, i256 int, i257 int, i258 int, i259 int, i260 int, i261 int, i262 +int, i263 int, i264 int, i265 int, i266 int, i267 int, i268 int, i269 int, i270 +int, i271 int, i272 int, i273 int, i274 int, i275 int, i276 int, i277 int, i278 +int, i279 int, i280 int, i281 int, i282 int, i283 int, i284 int, i285 int, i286 +int, i287 int, i288 int, i289 int, i290 int, i291 int, i292 int, i293 int, i294 +int, i295 int, i296 int, i297 int, i298 int, i299 int, i300 int, i301 int, i302 +int, i303 int, i304 int, i305 int, i306 int, i307 int, i308 int, i309 int, i310 +int, i311 int, i312 int, i313 int, i314 int, i315 int, i316 int, i317 int, i318 +int, i319 int, i320 int, i321 int, i322 int, i323 int, i324 int, i325 int, i326 +int, i327 int, i328 int, i329 int, i330 int, i331 int, i332 int, i333 int, i334 +int, i335 int, i336 int, i337 int, i338 int, i339 int, i340 int, i341 int, i342 +int, i343 int, i344 int, i345 int, i346 int, i347 int, i348 int, i349 int, i350 +int, i351 int, i352 int, i353 int, i354 int, i355 int, i356 int, i357 int, i358 +int, i359 int, i360 int, i361 int, i362 int, i363 int, i364 int, i365 int, i366 +int, i367 int, i368 int, i369 int, i370 int, i371 int, i372 int, i373 int, i374 +int, i375 int, i376 int, i377 int, i378 int, i379 int, i380 int, i381 int, i382 +int, i383 int, i384 int, i385 int, i386 int, i387 int, i388 int, i389 int, i390 +int, i391 int, i392 int, i393 int, i394 int, i395 int, i396 int, i397 int, i398 +int, i399 int, i400 int, i401 int, i402 int, i403 int, i404 int, i405 int, i406 +int, i407 int, i408 int, i409 int, i410 int, i411 int, i412 int, i413 int, i414 +int, i415 int, i416 int, i417 int, i418 int, i419 int, i420 int, i421 int, i422 +int, i423 int, i424 int, i425 int, i426 int, i427 int, i428 int, i429 int, i430 +int, i431 int, i432 int, i433 int, i434 int, i435 int, i436 int, i437 int, i438 +int, i439 int, i440 int, i441 int, i442 int, i443 int, i444 int, i445 int, i446 +int, i447 int, i448 int, i449 int, i450 int, i451 int, i452 int, i453 int, i454 +int, i455 int, i456 int, i457 int, i458 int, i459 int, i460 int, i461 int, i462 +int, i463 int, i464 int, i465 int, i466 int, i467 int, i468 int, i469 int, i470 +int, i471 int, i472 int, i473 int, i474 int, i475 int, i476 int, i477 int, i478 +int, i479 int, i480 int, i481 int, i482 int, i483 int, i484 int, i485 int, i486 +int, i487 int, i488 int, i489 int, i490 int, i491 int, i492 int, i493 int, i494 +int, i495 int, i496 int, i497 int, i498 int, i499 int, i500 int, i501 int, i502 +int, i503 int, i504 int, i505 int, i506 int, i507 int, i508 int, i509 int, i510 +int, i511 int, i512 int, i513 int, i514 int, i515 int, i516 int, i517 int, i518 +int, i519 int, i520 int, i521 int, i522 int, i523 int, i524 int, i525 int, i526 +int, i527 int, i528 int, i529 int, i530 int, i531 int, i532 int, i533 int, i534 +int, i535 int, i536 int, i537 int, i538 int, i539 int, i540 int, i541 int, i542 +int, i543 int, i544 int, i545 int, i546 int, i547 int, i548 int, i549 int, i550 +int, i551 int, i552 int, i553 int, i554 int, i555 int, i556 int, i557 int, i558 +int, i559 int, i560 int, i561 int, i562 int, i563 int, i564 int, i565 int, i566 +int, i567 int, i568 int, i569 int, i570 int, i571 int, i572 int, i573 int, i574 +int, i575 int, i576 int, i577 int, i578 int, i579 int, i580 int, i581 int, i582 +int, i583 int, i584 int, i585 int, i586 int, i587 int, i588 int, i589 int, i590 +int, i591 int, i592 int, i593 int, i594 int, i595 int, i596 int, i597 int, i598 +int, i599 int, i600 int, i601 int, i602 int, i603 int, i604 int, i605 int, i606 +int, i607 int, i608 int, i609 int, i610 int, i611 int, i612 int, i613 int, i614 +int, i615 int, i616 int, i617 int, i618 int, i619 int, i620 int, i621 int, i622 +int, i623 int, i624 int, i625 int, i626 int, i627 int, i628 int, i629 int, i630 +int, i631 int, i632 int, i633 int, i634 int, i635 int, i636 int, i637 int, i638 +int, i639 int, i640 int, i641 int, i642 int, i643 int, i644 int, i645 int, i646 +int, i647 int, i648 int, i649 int, i650 int, i651 int, i652 int, i653 int, i654 +int, i655 int, i656 int, i657 int, i658 int, i659 int, i660 int, i661 int, i662 +int, i663 int, i664 int, i665 int, i666 int, i667 int, i668 int, i669 int, i670 +int, i671 int, i672 int, i673 int, i674 int, i675 int, i676 int, i677 int, i678 +int, i679 int, i680 int, i681 int, i682 int, i683 int, i684 int, i685 int, i686 +int, i687 int, i688 int, i689 int, i690 int, i691 int, i692 int, i693 int, i694 +int, i695 int, i696 int, i697 int, i698 int, i699 int, i700 int, i701 int, i702 +int, i703 int, i704 int, i705 int, i706 int, i707 int, i708 int, i709 int, i710 +int, i711 int, i712 int, i713 int, i714 int, i715 int, i716 int, i717 int, i718 +int, i719 int, i720 int, i721 int, i722 int, i723 int, i724 int, i725 int, i726 +int, i727 int, i728 int, i729 int, i730 int, i731 int, i732 int, i733 int, i734 +int, i735 int, i736 int, i737 int, i738 int, i739 int, i740 int, i741 int, i742 +int, i743 int, i744 int, i745 int, i746 int, i747 int, i748 int, i749 int, i750 +int, i751 int, i752 int, i753 int, i754 int, i755 int, i756 int, i757 int, i758 +int, i759 int, i760 int, i761 int, i762 int, i763 int, i764 int, i765 int, i766 +int, i767 int, i768 int, i769 int, i770 int, i771 int, i772 int, i773 int, i774 +int, i775 int, i776 int, i777 int, i778 int, i779 int, i780 int, i781 int, i782 +int, i783 int, i784 int, i785 int, i786 int, i787 int, i788 int, i789 int, i790 +int, i791 int, i792 int, i793 int, i794 int, i795 int, i796 int, i797 int, i798 +int, i799 int, i800 int, i801 int, i802 int, i803 int, i804 int, i805 int, i806 +int, i807 int, i808 int, i809 int, i810 int, i811 int, i812 int, i813 int, i814 +int, i815 int, i816 int, i817 int, i818 int, i819 int, i820 int, i821 int, i822 +int, i823 int, i824 int, i825 int, i826 int, i827 int, i828 int, i829 int, i830 +int, i831 int, i832 int, i833 int, i834 int, i835 int, i836 int, i837 int, i838 +int, i839 int, i840 int, i841 int, i842 int, i843 int, i844 int, i845 int, i846 +int, i847 int, i848 int, i849 int, i850 int, i851 int, i852 int, i853 int, i854 +int, i855 int, i856 int, i857 int, i858 int, i859 int, i860 int, i861 int, i862 +int, i863 int, i864 int, i865 int, i866 int, i867 int, i868 int, i869 int, i870 +int, i871 int, i872 int, i873 int, i874 int, i875 int, i876 int, i877 int, i878 +int, i879 int, i880 int, i881 int, i882 int, i883 int, i884 int, i885 int, i886 +int, i887 int, i888 int, i889 int, i890 int, i891 int, i892 int, i893 int, i894 +int, i895 int, i896 int, i897 int, i898 int, i899 int, i900 int, i901 int, i902 +int, i903 int, i904 int, i905 int, i906 int, i907 int, i908 int, i909 int, i910 +int, i911 int, i912 int, i913 int, i914 int, i915 int, i916 int, i917 int, i918 +int, i919 int, i920 int, i921 int, i922 int, i923 int, i924 int, i925 int, i926 +int, i927 int, i928 int, i929 int, i930 int, i931 int, i932 int, i933 int, i934 +int, i935 int, i936 int, i937 int, i938 int, i939 int, i940 int, i941 int, i942 +int, i943 int, i944 int, i945 int, i946 int, i947 int, i948 int, i949 int, i950 +int, i951 int, i952 int, i953 int, i954 int, i955 int, i956 int, i957 int, i958 +int, i959 int, i960 int, i961 int, i962 int, i963 int, i964 int, i965 int, i966 +int, i967 int, i968 int, i969 int, i970 int, i971 int, i972 int, i973 int, i974 +int, i975 int, i976 int, i977 int, i978 int, i979 int, i980 int, i981 int, i982 +int, i983 int, i984 int, i985 int, i986 int, i987 int, i988 int, i989 int, i990 +int, i991 int, i992 int, i993 int, i994 int, i995 int, i996 int, i997 int, i998 +int, i999 int, i1000 int, b blob) row_format=dynamic; + +connection master; +drop table if exists federated.t1; +create table federated.t1 (i1 int, i2 int, i3 int, i4 int, i5 int, i6 int, i7 int, i8 +int, i9 int, i10 int, i11 int, i12 int, i13 int, i14 int, i15 int, i16 int, i17 +int, i18 int, i19 int, i20 int, i21 int, i22 int, i23 int, i24 int, i25 int, +i26 int, i27 int, i28 int, i29 int, i30 int, i31 int, i32 int, i33 int, i34 +int, i35 int, i36 int, i37 int, i38 int, i39 int, i40 int, i41 int, i42 int, +i43 int, i44 int, i45 int, i46 int, i47 int, i48 int, i49 int, i50 int, i51 +int, i52 int, i53 int, i54 int, i55 int, i56 int, i57 int, i58 int, i59 int, +i60 int, i61 int, i62 int, i63 int, i64 int, i65 int, i66 int, i67 int, i68 +int, i69 int, i70 int, i71 int, i72 int, i73 int, i74 int, i75 int, i76 int, +i77 int, i78 int, i79 int, i80 int, i81 int, i82 int, i83 int, i84 int, i85 +int, i86 int, i87 int, i88 int, i89 int, i90 int, i91 int, i92 int, i93 int, +i94 int, i95 int, i96 int, i97 int, i98 int, i99 int, i100 int, i101 int, i102 +int, i103 int, i104 int, i105 int, i106 int, i107 int, i108 int, i109 int, i110 +int, i111 int, i112 int, i113 int, i114 int, i115 int, i116 int, i117 int, i118 +int, i119 int, i120 int, i121 int, i122 int, i123 int, i124 int, i125 int, i126 +int, i127 int, i128 int, i129 int, i130 int, i131 int, i132 int, i133 int, i134 +int, i135 int, i136 int, i137 int, i138 int, i139 int, i140 int, i141 int, i142 +int, i143 int, i144 int, i145 int, i146 int, i147 int, i148 int, i149 int, i150 +int, i151 int, i152 int, i153 int, i154 int, i155 int, i156 int, i157 int, i158 +int, i159 int, i160 int, i161 int, i162 int, i163 int, i164 int, i165 int, i166 +int, i167 int, i168 int, i169 int, i170 int, i171 int, i172 int, i173 int, i174 +int, i175 int, i176 int, i177 int, i178 int, i179 int, i180 int, i181 int, i182 +int, i183 int, i184 int, i185 int, i186 int, i187 int, i188 int, i189 int, i190 +int, i191 int, i192 int, i193 int, i194 int, i195 int, i196 int, i197 int, i198 +int, i199 int, i200 int, i201 int, i202 int, i203 int, i204 int, i205 int, i206 +int, i207 int, i208 int, i209 int, i210 int, i211 int, i212 int, i213 int, i214 +int, i215 int, i216 int, i217 int, i218 int, i219 int, i220 int, i221 int, i222 +int, i223 int, i224 int, i225 int, i226 int, i227 int, i228 int, i229 int, i230 +int, i231 int, i232 int, i233 int, i234 int, i235 int, i236 int, i237 int, i238 +int, i239 int, i240 int, i241 int, i242 int, i243 int, i244 int, i245 int, i246 +int, i247 int, i248 int, i249 int, i250 int, i251 int, i252 int, i253 int, i254 +int, i255 int, i256 int, i257 int, i258 int, i259 int, i260 int, i261 int, i262 +int, i263 int, i264 int, i265 int, i266 int, i267 int, i268 int, i269 int, i270 +int, i271 int, i272 int, i273 int, i274 int, i275 int, i276 int, i277 int, i278 +int, i279 int, i280 int, i281 int, i282 int, i283 int, i284 int, i285 int, i286 +int, i287 int, i288 int, i289 int, i290 int, i291 int, i292 int, i293 int, i294 +int, i295 int, i296 int, i297 int, i298 int, i299 int, i300 int, i301 int, i302 +int, i303 int, i304 int, i305 int, i306 int, i307 int, i308 int, i309 int, i310 +int, i311 int, i312 int, i313 int, i314 int, i315 int, i316 int, i317 int, i318 +int, i319 int, i320 int, i321 int, i322 int, i323 int, i324 int, i325 int, i326 +int, i327 int, i328 int, i329 int, i330 int, i331 int, i332 int, i333 int, i334 +int, i335 int, i336 int, i337 int, i338 int, i339 int, i340 int, i341 int, i342 +int, i343 int, i344 int, i345 int, i346 int, i347 int, i348 int, i349 int, i350 +int, i351 int, i352 int, i353 int, i354 int, i355 int, i356 int, i357 int, i358 +int, i359 int, i360 int, i361 int, i362 int, i363 int, i364 int, i365 int, i366 +int, i367 int, i368 int, i369 int, i370 int, i371 int, i372 int, i373 int, i374 +int, i375 int, i376 int, i377 int, i378 int, i379 int, i380 int, i381 int, i382 +int, i383 int, i384 int, i385 int, i386 int, i387 int, i388 int, i389 int, i390 +int, i391 int, i392 int, i393 int, i394 int, i395 int, i396 int, i397 int, i398 +int, i399 int, i400 int, i401 int, i402 int, i403 int, i404 int, i405 int, i406 +int, i407 int, i408 int, i409 int, i410 int, i411 int, i412 int, i413 int, i414 +int, i415 int, i416 int, i417 int, i418 int, i419 int, i420 int, i421 int, i422 +int, i423 int, i424 int, i425 int, i426 int, i427 int, i428 int, i429 int, i430 +int, i431 int, i432 int, i433 int, i434 int, i435 int, i436 int, i437 int, i438 +int, i439 int, i440 int, i441 int, i442 int, i443 int, i444 int, i445 int, i446 +int, i447 int, i448 int, i449 int, i450 int, i451 int, i452 int, i453 int, i454 +int, i455 int, i456 int, i457 int, i458 int, i459 int, i460 int, i461 int, i462 +int, i463 int, i464 int, i465 int, i466 int, i467 int, i468 int, i469 int, i470 +int, i471 int, i472 int, i473 int, i474 int, i475 int, i476 int, i477 int, i478 +int, i479 int, i480 int, i481 int, i482 int, i483 int, i484 int, i485 int, i486 +int, i487 int, i488 int, i489 int, i490 int, i491 int, i492 int, i493 int, i494 +int, i495 int, i496 int, i497 int, i498 int, i499 int, i500 int, i501 int, i502 +int, i503 int, i504 int, i505 int, i506 int, i507 int, i508 int, i509 int, i510 +int, i511 int, i512 int, i513 int, i514 int, i515 int, i516 int, i517 int, i518 +int, i519 int, i520 int, i521 int, i522 int, i523 int, i524 int, i525 int, i526 +int, i527 int, i528 int, i529 int, i530 int, i531 int, i532 int, i533 int, i534 +int, i535 int, i536 int, i537 int, i538 int, i539 int, i540 int, i541 int, i542 +int, i543 int, i544 int, i545 int, i546 int, i547 int, i548 int, i549 int, i550 +int, i551 int, i552 int, i553 int, i554 int, i555 int, i556 int, i557 int, i558 +int, i559 int, i560 int, i561 int, i562 int, i563 int, i564 int, i565 int, i566 +int, i567 int, i568 int, i569 int, i570 int, i571 int, i572 int, i573 int, i574 +int, i575 int, i576 int, i577 int, i578 int, i579 int, i580 int, i581 int, i582 +int, i583 int, i584 int, i585 int, i586 int, i587 int, i588 int, i589 int, i590 +int, i591 int, i592 int, i593 int, i594 int, i595 int, i596 int, i597 int, i598 +int, i599 int, i600 int, i601 int, i602 int, i603 int, i604 int, i605 int, i606 +int, i607 int, i608 int, i609 int, i610 int, i611 int, i612 int, i613 int, i614 +int, i615 int, i616 int, i617 int, i618 int, i619 int, i620 int, i621 int, i622 +int, i623 int, i624 int, i625 int, i626 int, i627 int, i628 int, i629 int, i630 +int, i631 int, i632 int, i633 int, i634 int, i635 int, i636 int, i637 int, i638 +int, i639 int, i640 int, i641 int, i642 int, i643 int, i644 int, i645 int, i646 +int, i647 int, i648 int, i649 int, i650 int, i651 int, i652 int, i653 int, i654 +int, i655 int, i656 int, i657 int, i658 int, i659 int, i660 int, i661 int, i662 +int, i663 int, i664 int, i665 int, i666 int, i667 int, i668 int, i669 int, i670 +int, i671 int, i672 int, i673 int, i674 int, i675 int, i676 int, i677 int, i678 +int, i679 int, i680 int, i681 int, i682 int, i683 int, i684 int, i685 int, i686 +int, i687 int, i688 int, i689 int, i690 int, i691 int, i692 int, i693 int, i694 +int, i695 int, i696 int, i697 int, i698 int, i699 int, i700 int, i701 int, i702 +int, i703 int, i704 int, i705 int, i706 int, i707 int, i708 int, i709 int, i710 +int, i711 int, i712 int, i713 int, i714 int, i715 int, i716 int, i717 int, i718 +int, i719 int, i720 int, i721 int, i722 int, i723 int, i724 int, i725 int, i726 +int, i727 int, i728 int, i729 int, i730 int, i731 int, i732 int, i733 int, i734 +int, i735 int, i736 int, i737 int, i738 int, i739 int, i740 int, i741 int, i742 +int, i743 int, i744 int, i745 int, i746 int, i747 int, i748 int, i749 int, i750 +int, i751 int, i752 int, i753 int, i754 int, i755 int, i756 int, i757 int, i758 +int, i759 int, i760 int, i761 int, i762 int, i763 int, i764 int, i765 int, i766 +int, i767 int, i768 int, i769 int, i770 int, i771 int, i772 int, i773 int, i774 +int, i775 int, i776 int, i777 int, i778 int, i779 int, i780 int, i781 int, i782 +int, i783 int, i784 int, i785 int, i786 int, i787 int, i788 int, i789 int, i790 +int, i791 int, i792 int, i793 int, i794 int, i795 int, i796 int, i797 int, i798 +int, i799 int, i800 int, i801 int, i802 int, i803 int, i804 int, i805 int, i806 +int, i807 int, i808 int, i809 int, i810 int, i811 int, i812 int, i813 int, i814 +int, i815 int, i816 int, i817 int, i818 int, i819 int, i820 int, i821 int, i822 +int, i823 int, i824 int, i825 int, i826 int, i827 int, i828 int, i829 int, i830 +int, i831 int, i832 int, i833 int, i834 int, i835 int, i836 int, i837 int, i838 +int, i839 int, i840 int, i841 int, i842 int, i843 int, i844 int, i845 int, i846 +int, i847 int, i848 int, i849 int, i850 int, i851 int, i852 int, i853 int, i854 +int, i855 int, i856 int, i857 int, i858 int, i859 int, i860 int, i861 int, i862 +int, i863 int, i864 int, i865 int, i866 int, i867 int, i868 int, i869 int, i870 +int, i871 int, i872 int, i873 int, i874 int, i875 int, i876 int, i877 int, i878 +int, i879 int, i880 int, i881 int, i882 int, i883 int, i884 int, i885 int, i886 +int, i887 int, i888 int, i889 int, i890 int, i891 int, i892 int, i893 int, i894 +int, i895 int, i896 int, i897 int, i898 int, i899 int, i900 int, i901 int, i902 +int, i903 int, i904 int, i905 int, i906 int, i907 int, i908 int, i909 int, i910 +int, i911 int, i912 int, i913 int, i914 int, i915 int, i916 int, i917 int, i918 +int, i919 int, i920 int, i921 int, i922 int, i923 int, i924 int, i925 int, i926 +int, i927 int, i928 int, i929 int, i930 int, i931 int, i932 int, i933 int, i934 +int, i935 int, i936 int, i937 int, i938 int, i939 int, i940 int, i941 int, i942 +int, i943 int, i944 int, i945 int, i946 int, i947 int, i948 int, i949 int, i950 +int, i951 int, i952 int, i953 int, i954 int, i955 int, i956 int, i957 int, i958 +int, i959 int, i960 int, i961 int, i962 int, i963 int, i964 int, i965 int, i966 +int, i967 int, i968 int, i969 int, i970 int, i971 int, i972 int, i973 int, i974 +int, i975 int, i976 int, i977 int, i978 int, i979 int, i980 int, i981 int, i982 +int, i983 int, i984 int, i985 int, i986 int, i987 int, i988 int, i989 int, i990 +int, i991 int, i992 int, i993 int, i994 int, i995 int, i996 int, i997 int, i998 +int, i999 int, i1000 int, b blob) row_format=dynamic ENGINE="FEDERATED" DEFAULT CHARSET=latin1 COMMENT='mysql://root@127.0.0.1:9308/federated/t1'; +insert into federated.t1 values (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, "PatrickG"); +update federated.t1 set b=repeat('a',256); +update federated.t1 set i1=0, i2=0, i3=0, i4=0, i5=0, i6=0, i7=0, i8=0, i9=0, i10=0; +select * from federated.t1 where i9=0 and i10=0; +update federated.t1 set i50=20; +select * from federated.t1; +delete from federated.t1 where i51=20; +select * from federated.t1; +delete from federated.t1 where i50=20; +select * from federated.t1; + +connection slave; +drop table if exists federated.t1; +create table federated.t1 (id int NOT NULL auto_increment, code char(20) NOT NULL, fileguts blob, creation_date datetime, entered_time datetime default '2004-04-04 04:04:04', primary key(id), index(code)) DEFAULT CHARSET=latin1; + +connection master; +drop table if exists federated.t1; +create table federated.t1 (id int NOT NULL auto_increment, code char(20) NOT NULL, fileguts blob, creation_date datetime, entered_time datetime default '2004-04-04 04:04:04', primary key(id), index(code)) ENGINE="FEDERATED" DEFAULT CHARSET=latin1 COMMENT='mysql://root@127.0.0.1:9308/federated/t1'; +insert into federated.t1 (code, fileguts, creation_date) values ('ASDFWERQWETWETAWETA', '*()w*09*$()*#)(*09*^90*d)(*s()d8g)(s*ned)(*)(s*d)(*hn(d*)(*sbn)D((#$*(#*%%&#&^$#&#&#&#&^&#*&*#$*&^*(&#(&Q*&&(*!&!(*&*(#&*(%&#*###[[', '2003-03-03 03:03:03'); +insert into federated.t1 (code, fileguts, creation_date) values ('DEUEUEUEUEUEUEUEUEU', '*()w*09*$()*#)(*09*^90*d)(*s()d8g)(s*ned)(*)(s*d)(*hn(d*)(*sbn)D((#$*(#*%%&#&^$#&#&#&#&^&#*&*#$*&^*(&#(&Q*&&(*!&!(*&*(#&*(%&#*###[[', '2004-04-04 04:04:04'); +select * from federated.t1; +drop table if exists federated.t1; + +# TODO +# +# create table federated.t1 (a char(20)) charset=cp1251 ENGINE="FEDERATED" COMMENT="mysql://root@127.0.0.1:9308/federated/t1"; +# +# connection slave; +# drop table if exists federated.t1; +# create table federated.t1 (a char(20)) charset=cp1251; +# +# connection master; +# insert into federated.t1 values (_cp1251'À-ÁÂÃ-1'); +# insert into federated.t1 values (_cp1251'Á-ÂÃÄ-2'); +# set names cp1251; +# insert into federated.t1 values ('Â-ÃÄÅ-3'); +# insert into federated.t1 values ('Ã-ŨÆ-4'); +# select * from federated.t1; +# select hex(a) from federated.t1; +# select hex(a) from federated.t1 order by a desc; +# update federated.t1 set a='À-ÁÂÃ-1íîâûé' where a='À-ÁÂÃ-1'; +# select * from federated.t1; +# delete from federated.t1 where a='Ã-ŨÆ-4'; +# select * from federated.t1; +# delete from federated.t1 where a>'Â-'; +# select * from federated.t1; +# set names default; +# +# drop table if exists federated.t1; +# + +connection slave; +drop table if exists federated.t1; + +connection master; +--disable_warnings +drop database if exists federated; +--enable_warnings + +connection slave; +--disable_warnings +drop table if exists federated.t1; +drop database if exists federated; +--enable_warnings diff --git a/mysql-test/t/information_schema.test b/mysql-test/t/information_schema.test index 0fe7636e500..f7d4cdd43b2 100644 --- a/mysql-test/t/information_schema.test +++ b/mysql-test/t/information_schema.test @@ -293,6 +293,13 @@ show tables; use information_schema; show tables like "T%"; +# +# Bug#7210: information_schema: can't access when table-name = reserved word +# +select table_name from tables where table_name='user'; +select column_name, privileges from columns +where table_name='user' and column_name like '%o%'; + # # Bug#7212: information_schema: "Can't find file" errors if storage engine gone # Bug#7211: information_schema: crash if bad view @@ -318,3 +325,9 @@ where table_schema='test'; drop view t2; drop view t3; drop table t4; + +# +# Bug#7213: information_schema: redundant non-standard TABLE_NAMES table +# +--error 1109 +select * from information_schema.table_names; diff --git a/mysql-test/t/ndb_index_ordered.test b/mysql-test/t/ndb_index_ordered.test index 53177511bc6..42325e25ea3 100644 --- a/mysql-test/t/ndb_index_ordered.test +++ b/mysql-test/t/ndb_index_ordered.test @@ -147,6 +147,37 @@ select * from t1 use index (bc) where b < 4 order by a; select * from t1 use index (bc) where b IS NOT NULL order by a; drop table t1; +# +# Order by again, including descending. +# + +create table t1 ( + a int unsigned primary key, + b int unsigned, + c char(10), + key bc (b, c) +) engine=ndb; + +insert into t1 values(1,1,'a'),(2,2,'b'),(3,3,'c'),(4,4,'d'),(5,5,'e'); +insert into t1 select a*7,10*b,'f' from t1; +insert into t1 select a*13,10*b,'g' from t1; +insert into t1 select a*17,10*b,'h' from t1; +insert into t1 select a*19,10*b,'i' from t1; +insert into t1 select a*23,10*b,'j' from t1; +insert into t1 select a*29,10*b,'k' from t1; +# +select b, c from t1 where b <= 10 and c <'f' order by b, c; +select b, c from t1 where b <= 10 and c <'f' order by b desc, c desc; +# +select b, c from t1 where b=4000 and c<'k' order by b, c; +select b, c from t1 where b=4000 and c<'k' order by b desc, c desc; +select b, c from t1 where 1000<=b and b<=100000 and c<'j' order by b, c; +select b, c from t1 where 1000<=b and b<=100000 and c<'j' order by b desc, c desc; +# +select min(b), max(b) from t1; +# +drop table t1; + # # Bug #6435 CREATE TABLE test1 ( diff --git a/mysql-test/t/ps_4heap.test b/mysql-test/t/ps_4heap.test index 04d995dacb9..1c9346721ab 100644 --- a/mysql-test/t/ps_4heap.test +++ b/mysql-test/t/ps_4heap.test @@ -32,7 +32,7 @@ eval create table t9 c5 integer, c6 bigint, c7 float, c8 double, c9 double precision, c10 real, c11 decimal(7, 4), c12 numeric(8, 4), c13 date, c14 datetime, c15 timestamp(14), c16 time, - c17 year, c18 bit, c19 bool, c20 char, + c17 year, c18 tinyint, c19 bool, c20 char, c21 char(10), c22 varchar(30), c23 varchar(100), c24 varchar(100), c25 varchar(100), c26 varchar(100), c27 varchar(100), c28 varchar(100), c29 varchar(100), c30 varchar(100), c31 enum('one', 'two', 'three'), diff --git a/mysql-test/t/ps_5merge.test b/mysql-test/t/ps_5merge.test index 9a79842709c..891d1be2c57 100644 --- a/mysql-test/t/ps_5merge.test +++ b/mysql-test/t/ps_5merge.test @@ -32,7 +32,7 @@ create table t9 c5 integer, c6 bigint, c7 float, c8 double, c9 double precision, c10 real, c11 decimal(7, 4), c12 numeric(8, 4), c13 date, c14 datetime, c15 timestamp(14), c16 time, - c17 year, c18 bit, c19 bool, c20 char, + c17 year, c18 tinyint, c19 bool, c20 char, c21 char(10), c22 varchar(30), c23 tinyblob, c24 tinytext, c25 blob, c26 text, c27 mediumblob, c28 mediumtext, c29 longblob, c30 longtext, c31 enum('one', 'two', 'three'), @@ -63,7 +63,7 @@ create table t9 c5 integer, c6 bigint, c7 float, c8 double, c9 double precision, c10 real, c11 decimal(7, 4), c12 numeric(8, 4), c13 date, c14 datetime, c15 timestamp(14), c16 time, - c17 year, c18 bit, c19 bool, c20 char, + c17 year, c18 tinyint, c19 bool, c20 char, c21 char(10), c22 varchar(30), c23 tinyblob, c24 tinytext, c25 blob, c26 text, c27 mediumblob, c28 mediumtext, c29 longblob, c30 longtext, c31 enum('one', 'two', 'three'), diff --git a/mysql-test/t/show_check.test b/mysql-test/t/show_check.test index b09d7240721..8680da9b31a 100644 --- a/mysql-test/t/show_check.test +++ b/mysql-test/t/show_check.test @@ -236,37 +236,37 @@ CREATE TABLE t3 ( insert into t1 values (1),(2); insert into t2 values (1),(2); insert into t3 values (1,1),(2,2); ---replace_column 7 # 8 # 9 # +--replace_column 6 # 7 # 8 # 9 # show table status; insert into t1 values (3),(4); insert into t2 values (3),(4); insert into t3 values (3,3),(4,4); ---replace_column 7 # 8 # 9 # +--replace_column 6 # 7 # 8 # 9 # show table status; insert into t1 values (5); insert into t2 values (5); insert into t3 values (5,5); ---replace_column 7 # 8 # 9 # +--replace_column 6 # 7 # 8 # 9 # show table status; delete from t1 where a=3; delete from t2 where b=3; delete from t3 where a=3; ---replace_column 7 # 8 # 9 # +--replace_column 6 # 7 # 8 # 9 # show table status; delete from t1; delete from t2; delete from t3; ---replace_column 7 # 8 # 9 # +--replace_column 6 # 7 # 8 # 9 # show table status; insert into t1 values (5); insert into t2 values (5); insert into t3 values (5,5); ---replace_column 7 # 8 # 9 # +--replace_column 6 # 7 # 8 # 9 # show table status; delete from t1 where a=5; delete from t2 where b=5; delete from t3 where a=5; ---replace_column 7 # 8 # 9 # +--replace_column 6 # 7 # 8 # 9 # show table status; drop table t1, t2, t3; diff --git a/mysql-test/t/type_bit.test b/mysql-test/t/type_bit.test new file mode 100644 index 00000000000..0c1c22099f9 --- /dev/null +++ b/mysql-test/t/type_bit.test @@ -0,0 +1,108 @@ +# +# testing of the BIT column type +# + +select 0 + b'1'; +select 0 + b'0'; +select 0 + b'000001'; +select 0 + b'000011'; +select 0 + b'000101'; +select 0 + b'000000'; +select 0 + b'10000000'; +select 0 + b'11111111'; +select 0 + b'10000001'; +select 0 + b'1000000000000000'; +select 0 + b'1111111111111111'; +select 0 + b'1000000000000001'; + +--disable_warnings +drop table if exists t1; +--enable_warnings + +--error 1074 +create table t1 (a bit(65)); + +create table t1 (a bit(0)); +show create table t1; +drop table t1; + +--error 1178 +create table t1 (a bit, key(a)) engine=innodb; + +create table t1 (a bit(64)); +insert into t1 values +(b'1111111111111111111111111111111111111111111111111111111111111111'), +(b'1000000000000000000000000000000000000000000000000000000000000000'), +(b'0000000000000000000000000000000000000000000000000000000000000001'), +(b'1010101010101010101010101010101010101010101010101010101010101010'), +(b'0101010101010101010101010101010101010101010101010101010101010101'); +select hex(a) from t1; +drop table t1; + +create table t1 (a bit); +insert into t1 values (b'0'), (b'1'), (b'000'), (b'100'), (b'001'); +select hex(a) from t1; +--error 1062 +alter table t1 add unique (a); +drop table t1; + +create table t1 (a bit(2)); +insert into t1 values (b'00'), (b'01'), (b'10'), (b'100'); +select a+0 from t1; +alter table t1 add key (a); +explain select a+0 from t1; +select a+0 from t1; +drop table t1; + +create table t1 (a bit(7), b bit(9), key(a, b)); +insert into t1 values +(94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177), +(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380), +(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36), +(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499), +(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403), +(44, 307), (68, 454), (57, 135); +explain select a+0 from t1; +select a+0 from t1; +explain select b+0 from t1; +select b+0 from t1; +explain select a+0, b+0 from t1; +select a+0, b+0 from t1; +explain select a+0, b+0 from t1 where a > 40 and b > 200 order by 1; +select a+0, b+0 from t1 where a > 40 and b > 200 order by 1; +explain select a+0, b+0 from t1 where a > 40 and a < 70 order by 2; +select a+0, b+0 from t1 where a > 40 and a < 70 order by 2; +set @@max_length_for_sort_data=0; +select a+0, b+0 from t1 where a > 40 and a < 70 order by 2; +select hex(min(a)) from t1; +select hex(min(b)) from t1; +select hex(min(a)), hex(max(a)), hex(min(b)), hex(max(b)) from t1; +drop table t1; + +create table t1 (a int not null, b bit, c bit(9), key(a, b, c)); +insert into t1 values +(4, NULL, 1), (4, 0, 3), (2, 1, 4), (1, 1, 100), (4, 0, 23), (4, 0, 54), +(56, 0, 22), (4, 1, 100), (23, 0, 1), (4, 0, 34); +select a+0, b+0, c+0 from t1; +select hex(min(b)) from t1 where a = 4; +select hex(min(c)) from t1 where a = 4 and b = 0; +select hex(max(b)) from t1; +select a+0, b+0, c+0 from t1 where a = 4 and b = 0 limit 2; +select a+0, b+0, c+0 from t1 where a = 4 and b = 1; +select a+0, b+0, c+0 from t1 where a = 4 and b = 1 and c=100; +select a+0, b+0, c+0 from t1 order by b desc; +select a+0, b+0, c+0 from t1 order by c; +drop table t1; + +create table t1(a bit(2), b bit(2)); +insert into t1 (a) values (0x01), (0x03), (0x02); +update t1 set b= concat(a); +select a+0, b+0 from t1; +drop table t1; + +# Some magic numbers + +create table t1 (a bit(7), key(a)); +insert into t1 values (44), (57); +select a+0 from t1; +drop table t1; diff --git a/mysql-test/t/type_varchar.test b/mysql-test/t/type_varchar.test index f6e9bb24087..0168128d513 100644 --- a/mysql-test/t/type_varchar.test +++ b/mysql-test/t/type_varchar.test @@ -32,3 +32,68 @@ create table t1 (v varbinary(20)); insert into t1 values('a'); insert into t1 values('a '); alter table t1 add primary key (v); +drop table t1; + +# +# Test with varchar of lengths 254,255,256,258 & 258 to ensure we don't +# have any problems with varchar with one or two byte length_bytes +# + +create table t1 (v varchar(254), index (v)); +insert into t1 values ("This is a test "); +insert into t1 values ("Some sample data"); +insert into t1 values (" garbage "); +insert into t1 values (" This is a test "); +insert into t1 values ("This is a test"); +insert into t1 values ("Hello world"); +insert into t1 values ("Foo bar"); +insert into t1 values ("This is a test"); +insert into t1 values ("MySQL varchar test"); +insert into t1 values ("test MySQL varchar"); +insert into t1 values ("This is a long string to have some random length data included"); +insert into t1 values ("Short string"); +insert into t1 values ("VSS"); +insert into t1 values ("Some samples"); +insert into t1 values ("Bar foo"); +insert into t1 values ("Bye"); +let $i= 255; +let $j= 5; +while ($j) +{ + select * from t1 where v like 'This is a test' order by v; + select * from t1 where v='This is a test' order by v; + select * from t1 where v like 'S%' order by v; + explain select * from t1 where v like 'This is a test' order by v; + explain select * from t1 where v='This is a test' order by v; + explain select * from t1 where v like 'S%' order by v; + eval alter table t1 change v v varchar($i); + inc $i; + dec $j; +} +let $i= 258; +let $j= 6; +while ($j) +{ + select * from t1 where v like 'This is a test' order by v; + select * from t1 where v='This is a test' order by v; + select * from t1 where v like 'S%' order by v; + explain select * from t1 where v like 'This is a test' order by v; + explain select * from t1 where v='This is a test' order by v; + explain select * from t1 where v like 'S%' order by v; + eval alter table t1 change v v varchar($i); + dec $i; + dec $j; +} +alter table t1 change v v varchar(254), drop key v; + +# Test with length(varchar) > 256 and key < 256 (to ensure things works with +# different kind of packing + +alter table t1 change v v varchar(300), add key (v(10)); +select * from t1 where v like 'This is a test' order by v; +select * from t1 where v='This is a test' order by v; +select * from t1 where v like 'S%' order by v; +explain select * from t1 where v like 'This is a test' order by v; +explain select * from t1 where v='This is a test' order by v; +explain select * from t1 where v like 'S%' order by v; +drop table t1; diff --git a/mysys/Makefile.am b/mysys/Makefile.am index 6a118df03cc..b0ca1b402ee 100644 --- a/mysys/Makefile.am +++ b/mysys/Makefile.am @@ -53,7 +53,7 @@ libmysys_a_SOURCES = my_init.c my_getwd.c mf_getdate.c my_mmap.c \ my_net.c my_semaphore.c my_port.c my_sleep.c \ charset.c charset-def.c my_bitmap.c my_bit.c md5.c \ my_gethostbyname.c rijndael.c my_aes.c sha1.c \ - my_handler.c my_netware.c + my_handler.c my_netware.c my_largepage.c EXTRA_DIST = thr_alarm.c thr_lock.c my_pthread.c my_thr_init.c \ thr_mutex.c thr_rwlock.c libmysys_a_LIBADD = @THREAD_LOBJECTS@ diff --git a/mysys/list.c b/mysys/list.c index 64fca10dc0b..c3cd6c94b9f 100644 --- a/mysys/list.c +++ b/mysys/list.c @@ -28,7 +28,7 @@ LIST *list_add(LIST *root, LIST *element) { DBUG_ENTER("list_add"); - DBUG_PRINT("enter",("root: 0x%lx element: %lx", root, element)); + DBUG_PRINT("enter",("root: 0x%lx element: 0x%lx", root, element)); if (root) { if (root->prev) /* If add in mid of list */ diff --git a/mysys/mf_keycache.c b/mysys/mf_keycache.c index 052d6c79ab9..bf7ed7ab6b6 100644 --- a/mysys/mf_keycache.c +++ b/mysys/mf_keycache.c @@ -341,8 +341,8 @@ int init_key_cache(KEY_CACHE *keycache, uint key_cache_block_size, blocks--; /* Allocate memory for cache page buffers */ if ((keycache->block_mem= - my_malloc_lock((ulong) blocks * keycache->key_cache_block_size, - MYF(0)))) + my_large_malloc((ulong) blocks * keycache->key_cache_block_size, + MYF(MY_WME)))) { /* Allocate memory for blocks, hash_links and hash entries; @@ -351,7 +351,7 @@ int init_key_cache(KEY_CACHE *keycache, uint key_cache_block_size, if ((keycache->block_root= (BLOCK_LINK*) my_malloc((uint) length, MYF(0)))) break; - my_free_lock(keycache->block_mem, MYF(0)); + my_large_free(keycache->block_mem, MYF(0)); } if (blocks < 8) { @@ -421,7 +421,7 @@ err: keycache->blocks= 0; if (keycache->block_mem) { - my_free_lock((gptr) keycache->block_mem, MYF(0)); + my_large_free((gptr) keycache->block_mem, MYF(0)); keycache->block_mem= NULL; } if (keycache->block_root) @@ -605,7 +605,7 @@ void end_key_cache(KEY_CACHE *keycache, my_bool cleanup) { if (keycache->block_mem) { - my_free_lock((gptr) keycache->block_mem, MYF(0)); + my_large_free((gptr) keycache->block_mem, MYF(0)); keycache->block_mem= NULL; my_free((gptr) keycache->block_root, MYF(0)); keycache->block_root= NULL; diff --git a/mysys/my_handler.c b/mysys/my_handler.c index cf8bde31e73..7c22f02fa2a 100644 --- a/mysys/my_handler.c +++ b/mysys/my_handler.c @@ -178,6 +178,7 @@ int ha_key_cmp(register HA_KEYSEG *keyseg, register uchar *a, } break; case HA_KEYTYPE_BINARY: + case HA_KEYTYPE_BIT: if (keyseg->flag & HA_SPACE_PACK) { int a_length,b_length,pack_length; @@ -206,7 +207,8 @@ int ha_key_cmp(register HA_KEYSEG *keyseg, register uchar *a, b+=length; } break; - case HA_KEYTYPE_VARTEXT: + case HA_KEYTYPE_VARTEXT1: + case HA_KEYTYPE_VARTEXT2: { int a_length,b_length,pack_length; get_key_length(a_length,a); @@ -228,7 +230,8 @@ int ha_key_cmp(register HA_KEYSEG *keyseg, register uchar *a, break; } break; - case HA_KEYTYPE_VARBINARY: + case HA_KEYTYPE_VARBINARY1: + case HA_KEYTYPE_VARBINARY2: { int a_length,b_length,pack_length; get_key_length(a_length,a); diff --git a/mysys/my_largepage.c b/mysys/my_largepage.c new file mode 100644 index 00000000000..0639c360b46 --- /dev/null +++ b/mysys/my_largepage.c @@ -0,0 +1,167 @@ +/* Copyright (C) 2004 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#include "mysys_priv.h" + +#ifdef HAVE_LARGE_PAGES + +#ifdef HAVE_SYS_IPC_H +#include +#endif + +#ifdef HAVE_SYS_SHM_H +#include +#endif + +static uint my_get_large_page_size_int(void); +static gptr my_large_malloc_int(uint size, myf my_flags); +static my_bool my_large_free_int(gptr ptr, myf my_flags); + +/* Gets the size of large pages from the OS */ + +uint my_get_large_page_size(void) +{ + uint size; + DBUG_ENTER("my_get_large_page_size"); + + if (!(size = my_get_large_page_size_int())) + fprintf(stderr, "Warning: Failed to determine large page size\n"); + + DBUG_RETURN(size); +} + +/* + General large pages allocator. + Tries to allocate memory from large pages pool and falls back to + my_malloc_lock() in case of failure +*/ + +gptr my_large_malloc(uint size, myf my_flags) +{ + gptr ptr; + DBUG_ENTER("my_large_malloc"); + + if (my_use_large_pages && my_large_page_size) + { + if ((ptr = my_large_malloc_int(size, my_flags)) != NULL) + DBUG_RETURN(ptr); + if (my_flags & MY_WME) + fprintf(stderr, "Warning: Using conventional memory pool\n"); + } + + DBUG_RETURN(my_malloc_lock(size, my_flags)); +} + +/* + General large pages deallocator. + Tries to deallocate memory as if it was from large pages pool and falls back + to my_free_lock() in case of failure + */ + +void my_large_free(gptr ptr, myf my_flags __attribute__((unused))) +{ + DBUG_ENTER("my_large_free"); + + /* + my_large_free_int() can only fail if ptr was not allocated with + my_large_malloc_int(), i.e. my_malloc_lock() was used so we should free it + with my_free_lock() + */ + if (!my_use_large_pages || !my_large_page_size || + !my_large_free_int(ptr, my_flags)) + my_free_lock(ptr, my_flags); + + DBUG_VOID_RETURN; +} + +#ifdef HUGETLB_USE_PROC_MEMINFO +/* Linux-specific function to determine the size of large pages */ + +uint my_get_large_page_size_int(void) +{ + FILE *f; + uint size = 0; + char buf[256]; + DBUG_ENTER("my_get_large_page_size_int"); + + if (!(f = my_fopen("/proc/meminfo", O_RDONLY, MYF(MY_WME)))) + goto finish; + + while (fgets(buf, sizeof(buf), f)) + if (sscanf(buf, "Hugepagesize: %u kB", &size)) + break; + + my_fclose(f, MYF(MY_WME)); + +finish: + DBUG_RETURN(size * 1024); +} +#endif /* HUGETLB_USE_PROC_MEMINFO */ + +#if HAVE_DECL_SHM_HUGETLB +/* Linux-specific large pages allocator */ + +gptr my_large_malloc_int(uint size, myf my_flags) +{ + int shmid; + gptr ptr; + struct shmid_ds buf; + DBUG_ENTER("my_large_malloc_int"); + + /* Align block size to my_large_page_size */ + size = ((size - 1) & ~(my_large_page_size - 1)) + my_large_page_size; + + shmid = shmget(IPC_PRIVATE, (size_t)size, SHM_HUGETLB | SHM_R | SHM_W); + if (shmid < 0) + { + if (my_flags & MY_WME) + fprintf(stderr, + "Warning: Failed to allocate %d bytes from HugeTLB memory." + " errno %d\n", size, errno); + + DBUG_RETURN(NULL); + } + + ptr = shmat(shmid, NULL, 0); + if (ptr == (void *)-1) + { + if (my_flags& MY_WME) + fprintf(stderr, "Warning: Failed to attach shared memory segment," + " errno %d\n", errno); + shmctl(shmid, IPC_RMID, &buf); + + DBUG_RETURN(NULL); + } + + /* + Remove the shared memory segment so that it will be automatically freed + after memory is detached or process exits + */ + shmctl(shmid, IPC_RMID, &buf); + + DBUG_RETURN(ptr); +} + +/* Linux-specific large pages deallocator */ + +my_bool my_large_free_int(byte *ptr, myf my_flags __attribute__((unused))) +{ + DBUG_ENTER("my_large_free_int"); + DBUG_RETURN(shmdt(ptr) == 0); +} +#endif /* HAVE_DECL_SHM_HUGETLB */ + +#endif /* HAVE_LARGE_PAGES */ diff --git a/mysys/my_static.c b/mysys/my_static.c index 5f034555156..57d41676390 100644 --- a/mysys/my_static.c +++ b/mysys/my_static.c @@ -61,6 +61,12 @@ const char *soundex_map= "01230120022455012623010202"; USED_MEM* my_once_root_block=0; /* pointer to first block */ uint my_once_extra=ONCE_ALLOC_INIT; /* Memory to alloc / block */ + /* from my_largepage.c */ +#ifdef HAVE_LARGE_PAGES +my_bool my_use_large_pages= 0; +uint my_large_page_size= 0; +#endif + /* from my_tempnam */ #if !defined(HAVE_TEMPNAM) || defined(HPUX11) int _my_tempnam_used=0; diff --git a/mysys/raid.cc b/mysys/raid.cc index 0b688464fb3..1d2e0cb01f0 100644 --- a/mysys/raid.cc +++ b/mysys/raid.cc @@ -185,7 +185,7 @@ extern "C" { uint my_raid_write(File fd,const byte *Buffer, uint Count, myf MyFlags) { DBUG_ENTER("my_raid_write"); - DBUG_PRINT("enter",("Fd: %d Buffer: %lx Count: %u MyFlags: %d", + DBUG_PRINT("enter",("Fd: %d Buffer: 0x%lx Count: %u MyFlags: %d", fd, Buffer, Count, MyFlags)); if (is_raid(fd)) { @@ -198,7 +198,7 @@ extern "C" { uint my_raid_read(File fd, byte *Buffer, uint Count, myf MyFlags) { DBUG_ENTER("my_raid_read"); - DBUG_PRINT("enter",("Fd: %d Buffer: %lx Count: %u MyFlags: %d", + DBUG_PRINT("enter",("Fd: %d Buffer: 0x%lx Count: %u MyFlags: %d", fd, Buffer, Count, MyFlags)); if (is_raid(fd)) { @@ -212,8 +212,9 @@ extern "C" { myf MyFlags) { DBUG_ENTER("my_raid_pread"); - DBUG_PRINT("enter",("Fd: %d Buffer: %lx Count: %u offset: %u MyFlags: %d", - Filedes, Buffer, Count, offset, MyFlags)); + DBUG_PRINT("enter", + ("Fd: %d Buffer: 0x%lx Count: %u offset: %u MyFlags: %d", + Filedes, Buffer, Count, offset, MyFlags)); if (is_raid(Filedes)) { assert(offset != MY_FILEPOS_ERROR); @@ -231,8 +232,9 @@ extern "C" { my_off_t offset, myf MyFlags) { DBUG_ENTER("my_raid_pwrite"); - DBUG_PRINT("enter",("Fd: %d Buffer: %lx Count: %u offset: %u MyFlags: %d", - Filedes, Buffer, Count, offset, MyFlags)); + DBUG_PRINT("enter", + ("Fd: %d Buffer: 0x %lx Count: %u offset: %u MyFlags: %d", + Filedes, Buffer, Count, offset, MyFlags)); if (is_raid(Filedes)) { assert(offset != MY_FILEPOS_ERROR); diff --git a/ndb/docs/doxygen/Doxyfile.mgmapi b/ndb/docs/doxygen/Doxyfile.mgmapi index 4287b37fd97..9db58393ffc 100644 --- a/ndb/docs/doxygen/Doxyfile.mgmapi +++ b/ndb/docs/doxygen/Doxyfile.mgmapi @@ -59,7 +59,7 @@ EXTRACT_PRIVATE = NO # If the EXTRACT_STATIC tag is set to YES all static members of a file # will be included in the documentation. -EXTRACT_STATIC = NO +EXTRACT_STATIC = YES # If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all # undocumented members of documented classes, files or namespaces. @@ -146,7 +146,7 @@ HIDE_SCOPE_NAMES = NO # will generate a verbatim copy of the header file for each class for # which an include is specified. Set to NO to disable this. -VERBATIM_HEADERS = YES +VERBATIM_HEADERS = NO # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # will put list of the files that are included by a file in the documentation diff --git a/ndb/docs/doxygen/Doxyfile.ndb b/ndb/docs/doxygen/Doxyfile.ndb index d43a66323f8..3db42ee78af 100644 --- a/ndb/docs/doxygen/Doxyfile.ndb +++ b/ndb/docs/doxygen/Doxyfile.ndb @@ -52,7 +52,7 @@ EXTRACT_ALL = YES # If the EXTRACT_PRIVATE tag is set to YES all private members of a class # will be included in the documentation. -EXTRACT_PRIVATE = YES +EXTRACT_PRIVATE = NO # If the EXTRACT_STATIC tag is set to YES all static members of a file # will be included in the documentation. @@ -157,7 +157,7 @@ HIDE_SCOPE_NAMES = NO # will generate a verbatim copy of the header file for each class for # which an include is specified. Set to NO to disable this. -VERBATIM_HEADERS = YES +VERBATIM_HEADERS = NO # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # will put list of the files that are included by a file in the documentation diff --git a/ndb/docs/doxygen/Doxyfile.ndbapi b/ndb/docs/doxygen/Doxyfile.ndbapi index 61d58d4fea3..5ca09e4851d 100644 --- a/ndb/docs/doxygen/Doxyfile.ndbapi +++ b/ndb/docs/doxygen/Doxyfile.ndbapi @@ -59,7 +59,7 @@ EXTRACT_PRIVATE = NO # If the EXTRACT_STATIC tag is set to YES all static members of a file # will be included in the documentation. -EXTRACT_STATIC = NO +EXTRACT_STATIC = YES # If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all # undocumented members of documented classes, files or namespaces. @@ -146,7 +146,7 @@ HIDE_SCOPE_NAMES = NO # will generate a verbatim copy of the header file for each class for # which an include is specified. Set to NO to disable this. -VERBATIM_HEADERS = YES +VERBATIM_HEADERS = NO # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # will put list of the files that are included by a file in the documentation diff --git a/ndb/docs/doxygen/Doxyfile.odbc b/ndb/docs/doxygen/Doxyfile.odbc index 93e052d5b9d..88c70b2ccf6 100644 --- a/ndb/docs/doxygen/Doxyfile.odbc +++ b/ndb/docs/doxygen/Doxyfile.odbc @@ -52,7 +52,7 @@ EXTRACT_ALL = YES # If the EXTRACT_PRIVATE tag is set to YES all private members of a class # will be included in the documentation. -EXTRACT_PRIVATE = YES +EXTRACT_PRIVATE = NO # If the EXTRACT_STATIC tag is set to YES all static members of a file # will be included in the documentation. @@ -157,7 +157,7 @@ HIDE_SCOPE_NAMES = NO # will generate a verbatim copy of the header file for each class for # which an include is specified. Set to NO to disable this. -VERBATIM_HEADERS = YES +VERBATIM_HEADERS = NO # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # will put list of the files that are included by a file in the documentation diff --git a/ndb/docs/doxygen/Doxyfile.test b/ndb/docs/doxygen/Doxyfile.test index 34ee21873ff..762013cc1cf 100644 --- a/ndb/docs/doxygen/Doxyfile.test +++ b/ndb/docs/doxygen/Doxyfile.test @@ -52,7 +52,7 @@ EXTRACT_ALL = YES # If the EXTRACT_PRIVATE tag is set to YES all private members of a class # will be included in the documentation. -EXTRACT_PRIVATE = YES +EXTRACT_PRIVATE = NO # If the EXTRACT_STATIC tag is set to YES all static members of a file # will be included in the documentation. @@ -157,7 +157,7 @@ HIDE_SCOPE_NAMES = NO # will generate a verbatim copy of the header file for each class for # which an include is specified. Set to NO to disable this. -VERBATIM_HEADERS = YES +VERBATIM_HEADERS = NO # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # will put list of the files that are included by a file in the documentation diff --git a/ndb/include/kernel/signaldata/CreateTable.hpp b/ndb/include/kernel/signaldata/CreateTable.hpp index 67e510d2ed0..481b323fdb0 100644 --- a/ndb/include/kernel/signaldata/CreateTable.hpp +++ b/ndb/include/kernel/signaldata/CreateTable.hpp @@ -86,6 +86,7 @@ public: NoMoreAttributeRecords = 708, AttributeNameTwice = 720, TableAlreadyExist = 721, + InvalidArraySize = 736, ArraySizeTooBig = 737, RecordTooBig = 738, InvalidPrimaryKeySize = 739, diff --git a/ndb/include/ndbapi/Ndb.hpp b/ndb/include/ndbapi/Ndb.hpp index 5ec09269695..b3475841c87 100644 --- a/ndb/include/ndbapi/Ndb.hpp +++ b/ndb/include/ndbapi/Ndb.hpp @@ -17,30 +17,37 @@ /** @mainpage NDB API Programmers' Guide - This guide assumes a basic familiarity with NDB Cluster concepts. + This guide assumes a basic familiarity with MySQL Cluster concepts. Some of the fundamental ones are described in section @ref secConcepts. - The NDB API is an NDB Cluster application interface - that implements both synchronous and asynchronous transactions. + The NDB API is an MySQL Cluster application interface + that implements transactions. The NDB API consists of the following fundamental classes: + - Ndb_cluster_connection class representing a connection to a cluster, - Ndb is the main class representing the database, - NdbConnection represents a transaction, - - NdbOperation represents a transaction operation using primary key, - - NdbIndexOperation represents a transaction operation using a secondary - index, + - NdbOperation represents a operation using primary key, + - NdbScanOperation represents a operation performing a full table scan. + - NdbIndexOperation represents a operation using a unique hash index, + - NdbIndexScanOperation represents a operation performing a scan using + an ordered index, - NdbRecAttr represents the value of an attribute, and - NdbDictionary represents meta information about tables and attributes. - - NdbError represents an error condition + - NdbError contains a specification of an error. There are also some auxiliary classes. The main structure of an application program is as follows: + -# Construct and connect to a cluster using the Ndb_cluster_connection + object. -# Construct and initialize Ndb object(s). - -# Define and execute (synchronous or asynchronous) transactions. + -# Define and execute transactions using NdbConnection and Ndb*Operation. -# Delete Ndb objects + -# Delete connection to cluster The main structure of a transaction is as follows: - -# Start transaction - -# Add and define operations (associated with the transaction) + -# Start transaction, a NdbConnection + -# Add and define operations (associated with the transaction), + Ndb*Operation -# Execute transaction The execute can be of two different types, @@ -71,8 +78,8 @@ At this step the transaction is being defined. It is not yet sent to the NDB kernel. -# Add and define operations to the transaction - (using NdbConnection::getNdbOperation and - methods from class NdbOperation). + (using NdbConnection::getNdb*Operation and + methods from class Ndb*Operation). The transaction is still not sent to the NDB kernel. -# Execute the transaction (using NdbConnection::execute). -# Close the transaction (using Ndb::closeTransaction). @@ -82,20 +89,21 @@ To execute several parallel synchronous transactions, one can either use multiple Ndb objects in several threads or start multiple applications programs. + +#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL Another way to execute several parallel transactions is to use asynchronous transactions. - +#endif @section secNdbOperations Operations Each transaction (NdbConnection object) consist of a list of - operations (NdbOperation or NdbIndexOperation objects. - NdbIndexOperation is used for accessing tables through secondary indexes). + operations (Ndb*Operation objects). Operations are of two different kinds: -# standard operations, and -# interpreted program operations. -

Standard Operations

+

Single row operations

After the operation is created using NdbConnection::getNdbOperation (or NdbConnection::getNdbIndexOperation), it is defined in the following three steps: @@ -106,36 +114,42 @@ -# Specify attribute actions (e.g. using NdbOperation::getValue) - Example code (using an NdbOperation): + Example code (using an NdbOperation and excluding error handling): @code - MyOperation = MyConnection->getNdbOperation("MYTABLENAME"); // 1. Create - if (MyOperation == NULL) APIERROR(MyConnection->getNdbError()); + // 1. Create + MyOperation= MyConnection->getNdbOperation("MYTABLENAME"); - MyOperation->readTuple(); // 2. Define type of operation - MyOperation->equal("ATTR1", i); // 3. Specify Search Conditions + // 2. Define type of operation and lock mode + MyOperation->readTuple(NdbOperation::LM_Read); + + // 3. Specify Search Conditions + MyOperation->equal("ATTR1", i); - MyRecAttr = MyOperation->getValue("ATTR2", NULL); // 4. Attribute Actions - if (MyRecAttr == NULL) APIERROR(MyConnection->getNdbError()); + // 4. Attribute Actions + MyRecAttr= MyOperation->getValue("ATTR2", NULL); @endcode - For more examples, see @ref ndbapi_example1.cpp and @ref ndbapi_example2.cpp. + For more examples, see @ref ndbapi_example1.cpp and + @ref ndbapi_example2.cpp. - Example code using an NdbIndexOperation: + Example code (using an NdbIndexOperation and excluding error handling): @code - MyOperation = // 1. Create - MyConnection->getNdbIndexOperation("MYINDEX", "MYTABLENAME"); - if (MyOperation == NULL) APIERROR(MyConnection->getNdbError()); + // 1. Create + MyOperation= MyConnection->getNdbIndexOperation("MYINDEX", "MYTABLENAME"); - MyOperation->readTuple(); // 2. Define type of operation - MyOperation->equal("ATTR1", i); // 3. Specify Search Conditions + // 2. Define type of operation and lock mode + MyOperation->readTuple(NdbOperation::LM_Read); - MyRecAttr = MyOperation->getValue("ATTR2", NULL); // 4. Attribute Actions - if (MyRecAttr == NULL) APIERROR(MyConnection->getNdbError()); + // 3. Specify Search Conditions + MyOperation->equal("ATTR1", i); + + // 4. Attribute Actions + MyRecAttr = MyOperation->getValue("ATTR2", NULL); @endcode For more examples, see @ref ndbapi_example4.cpp. -

Step 1: Define Standard Operation Type

- The following types of standard operations exist: +

Step 1: Define single row operation type

+ The following types of operations exist: -# NdbOperation::insertTuple : inserts a non-existing tuple -# NdbOperation::writeTuple : @@ -146,79 +160,26 @@ -# NdbOperation::deleteTuple : deletes an existing tuple -# NdbOperation::readTuple : - reads an existing tuple - -# NdbOperation::readTupleExclusive : - reads an existing tuple using an exclusive lock - -# NdbOperation::simpleRead : - reads an existing tuple (using shared read lock), - but releases lock immediately after read - -# NdbOperation::committedRead : - reads committed tuple - -# NdbOperation::dirtyUpdate : - updates an existing tuple, but releases lock immediately - after read (uses dirty lock) - -# NdbOperation::dirtyWrite : - updates or writes a tuple, but releases lock immediately - after read (uses dirty lock) + reads an existing tuple with specified lock mode All of these operations operate on the unique tuple key. (When NdbIndexOperation is used then all of these operations - operate on a defined secondary index.) - - - Some comments: - - NdbOperation::simpleRead and - NdbOperation::committedRead can execute on the same transaction - as the above operations but will release its locks immediately - after reading the tuple. - NdbOperation::simpleRead will always read the latest version - of the tuple. - Thus it will wait until it can acquire a shared read lock on - the tuple. - NdbOperation::committedRead will read the latest committed - version of the tuple. -
- Both NdbOperation::simpleRead and NdbOperation::committedRead - are examples of consistent reads which are not repeatable. - All reads read the latest version if updates were made by the same - transaction. - Errors on simple read are only reported by the NdbOperation object. - These error codes are not transferred to the NdbConnection object. - - NdbOperation::dirtyUpdate and NdbOperation::dirtyWrite - will execute in the same transaction - but will release the lock immediately after updating the - tuple. - It will wait on the lock until it can acquire an exclusive - write lock. - In a replicated version of NDB Cluster NdbOperation::dirtyUpdate - can lead to inconsistency between the replicas. - Examples of when it could be used is - to update statistical counters on tuples which are "hot-spots". + operate on a defined unique hash index.) @note If you want to define multiple operations within the same transaction, - then you need to call NdbConnection::getNdbOperation - (or NdbConnection::getNdbIndexOperation) for each + then you need to call NdbConnection::getNdb*Operation for each operation. -

Step 2: Specify Search Conditions

The search condition is used to select tuples. - (In the current NdbIndexOperation implementation - this means setting the value of - the secondary index attributes of the wanted tuple.) - If a tuple identity is used, then NdbOperation::setTupleId - is used to define the search key when inserting new tuples. - Otherwise, NdbOperation::equal is used. - - For NdbOperation::insertTuple it is also allowed to define the + For NdbOperation::insertTuple it is also allowed to define the search key by using NdbOperation::setValue. The NDB API will automatically detect that it is supposed to use NdbOperation::equal instead. For NdbOperation::insertTuple it is not necessary to use NdbOperation::setValue on key attributes before other attributes. -

Step 3: Specify Attribute Actions

Now it is time to define which attributes should be read or updated. Deletes can neither read nor set values, read can only read values and @@ -495,7 +456,7 @@ should match the automatic numbering to make it easier to debug the interpreted program. - +#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL @section secAsync Asynchronous Transactions The asynchronous interface is used to increase the speed of transaction executing by better utilizing the connection @@ -583,7 +544,7 @@ The poll method returns the number of transactions that have finished processing and executed their callback methods. - + @note When an asynchronous transaction has been started and sent to the NDB kernel, it is not allowed to execute any methods on objects belonging to this transaction until the transaction @@ -595,7 +556,7 @@ More about how transactions are send the NDB Kernel is available in section @ref secAdapt. - +#endif @section secError Error Handling @@ -671,6 +632,7 @@ * @include ndbapi_example4.cpp */ +#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL /** * @page select_all.cpp select_all.cpp * @include select_all.cpp @@ -680,6 +642,7 @@ * @page ndbapi_async.cpp ndbapi_async.cpp * @include ndbapi_async.cpp */ +#endif /** * @page ndbapi_scan.cpp ndbapi_scan.cpp @@ -691,8 +654,7 @@ @page secAdapt Adaptive Send Algorithm At the time of "sending" the transaction - (using NdbConnection::execute, NdbConnection::executeAsynch, - Ndb::sendPreparedTransactions, or Ndb::sendPollNdb), the transactions + (using NdbConnection::execute), the transactions are in reality not immediately transfered to the NDB Kernel. Instead, the "sent" transactions are only kept in a special send list (buffer) in the Ndb object to which they belong. @@ -847,12 +809,56 @@ then a timeout error occurs. Concurrent transactions (parallel application programs, thread-based - applications, or applications with asynchronous transactions) + applications) sometimes deadlock when they try to access the same information. Applications need to be programmed so that timeout errors occurring due to deadlocks are handled. This generally means that the transaction encountering timeout should be rolled back and restarted. + + @section secHint Hints and performance + + NDB API can be hinted to select a particular transaction coordinator. + The default method is round robin where each set of new transactions + is placed on the next NDB kernel node. + By providing a distribution key (usually the primary key + of the mostly used table of the transaction) for a record + the transaction will be placed on the node where the primary replica + of that record resides. + Note that this is only a hint, the system can + be under reconfiguration and then the NDB API + will use select the transaction coordinator without using + this hint. + + Placing the transaction coordinator close + to the actual data used in the transaction can in many cases + improve performance significantly. This is particularly true for + systems using TCP/IP. A system using Solaris and a 500 MHz processor + has a cost model for TCP/IP communication which is: + + 30 microseconds + (100 nanoseconds * no of Bytes) + + This means that if we can ensure that we use "popular" links we increase + buffering and thus drastically reduce the communication cost. + Systems using SCI has a different cost model which is: + + 5 microseconds + (10 nanoseconds * no of Bytes) + + Thus SCI systems are much less dependent on selection of + transaction coordinators. + Typically TCP/IP systems spend 30-60% of the time during communication, + whereas SCI systems typically spend 5-10% of the time during + communication. + Thus SCI means that less care from the NDB API programmer is + needed and great scalability can be achieved even for applications using + data from many parts of the database. + + A simple example is an application that uses many simple updates where + a transaction needs to update one record. + This record has a 32 bit primary key, + which is also the distribution key. + Then the keyData will be the address of the integer + of the primary key and keyLen will be 4. */ #ifndef Ndb_H @@ -945,6 +951,11 @@ public: * Semaphores, mutexes and so forth are easy ways of issuing memory * barriers without having to bother about the memory barrier concept. * + */ + +#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL +// to be documented later +/* * If one Ndb object is used to handle parallel transactions through the * asynchronous programming interface, please read the notes regarding * asynchronous transactions (Section @ref secAsync). @@ -955,6 +966,8 @@ public: * asynchronous transaction or the methods for * synchronous transactions but not both. */ +#endif + class Ndb { friend class NdbReceiver; @@ -976,29 +989,30 @@ public: * @{ */ /** - * The starting point of your application code is to create an - * Ndb object. - * This object represents the NDB kernel and is the main - * object used in interaction with the NDB kernel. + * The Ndb object represents a connection to a database. * + * @note the init() method must be called before it may be used + * + * @param ndb_cluster_connection is a connection to a cluster containing + * the database to be used * @param aCatalogName is the name of the catalog you want to use. * @note The catalog name provides a name space for the tables and * indexes created in any connection from the Ndb object. * @param aSchemaName is the name of the schema you - * want to use. It is optional and defaults to the "def" schema. + * want to use. * @note The schema name provides an additional name space * for the tables and indexes created in a given catalog. - * @note The methods get/setDatabaseName and get/setDatabaseSchemaName - * are equivalent to get/setCatalogName and get/setSchemaName. - * The get/setDatabaseName and get/setDatabaseSchemaName are - * deprecated. */ - Ndb(const char* aCatalogName = "", const char* aSchemaName = "def"); Ndb(Ndb_cluster_connection *ndb_cluster_connection, const char* aCatalogName = "", const char* aSchemaName = "def"); +#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL + // depricated + Ndb(const char* aCatalogName = "", const char* aSchemaName = "def"); +#endif ~Ndb(); +#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL /** * The current catalog name can be fetched by getCatalogName. * @@ -1026,7 +1040,7 @@ public: * @param aSchemaName is the new name of the current schema */ void setSchemaName(const char * aSchemaName); - +#endif /** * The current database name can be fetched by getDatabaseName. @@ -1057,22 +1071,22 @@ public: void setDatabaseSchemaName(const char * aDatabaseSchemaName); /** - * Before anything else it is necessary to initialize (start) - * the Ndb object. + * Initializes the Ndb object * * @param maxNoOfTransactions * Maximum number of parallel - * NdbConnection objects that should be handled by the Ndb object. - * A value larger than 1024 will be downgraded to 1024. - * This means that one Ndb object can handle at most 1024 parallel - * transactions. - * @return 0 if successful, -1 otherwise. + * NdbConnection objects that can be handled by the Ndb object. + * Maximum value is 1024. * - * @note The internal implementation multiplies this value - * with 3. + * @note each scan or index scan operation uses one extra + * NdbConnection object + * + * @return 0 if successful, -1 otherwise. */ int init(int maxNoOfTransactions = 4); +#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL + // depricated /** * Wait for Ndb object to successfully set-up connections to * the NDB kernel. @@ -1085,8 +1099,8 @@ public: * @return 0: Ndb is ready and timeout has not occurred.
* -1: Timeout has expired */ - int waitUntilReady(int timeout = 60); +#endif /** @} *********************************************************************/ @@ -1096,30 +1110,55 @@ public: */ /** - * Query the database for schema information - * (without performing any transaction). + * Get an object for retrieving or manipulating database schema information + * + * @note this object operates outside any transaction * * @return Object containing meta information about all tables * in NDB Cluster. */ class NdbDictionary::Dictionary* getDictionary() const; - NdbEventOperation* createEventOperation(const char* eventName, - const int bufferLength); - int dropEventOperation(NdbEventOperation*); - void monitorEvent(NdbEventOperation *, NdbEventCallback, void*); - int pollEvents(int aMillisecondNumber); + + /** @} *********************************************************************/ + + /** + * @name Event subscriptions + * @{ + */ /** - * Get the application node identity. + * Create a subcription to an event defined in the database * - * Each node (DB nodes, Applications, and Management Servers) - * has its own node identity in the NDB Cluster. - * See documentation for the management server configuration file. + * @param eventName + * unique identifier of the event + * @param bufferLength + * buffer size for storing event data * - * @return Node id of this application. + * @return Object representing an event, NULL on failure */ - int getNodeId(); + NdbEventOperation* createEventOperation(const char* eventName, + const int bufferLength); + /** + * Drop a subscription to an event + * + * @param eventName + * unique identifier of the event + * + * @return 0 on success + */ + int dropEventOperation(NdbEventOperation* eventName); + + /** + * Wait for an event to occur. Will return as soon as an event + * is detected on any of the created events. + * + * @param aMillisecondNumber + * maximum time to wait + * + * @return the number of events that has occured, -1 on failure + */ + int pollEvents(int aMillisecondNumber); /** @} *********************************************************************/ @@ -1129,71 +1168,19 @@ public: */ /** - * This method returns an NdbConnection which caters for the transaction. - * When the transaction is completed it must be closed. - * The Ndb::closeTransaction also return the NdbConnection object - * and all other memory related to the transaction. - * Failure to close the transaction will lead to memory leakage. - * The transaction must be closed independent of its outcome, i.e. - * even if there is an error. + * Start a transaction + * + * @note When the transaction is completed it must be closed using + * Ndb::closeTransaction or NdbConnection::close. + * The transaction must be closed independent of its outcome, i.e. + * even if there is an error. + * + * @param prio Not implemented + * @param keyData Pointer to partition key to be used for deciding + * which node to run the Transaction Coordinator on + * @param keyLen Length of partition key expressed in bytes * - * NDB API can be hinted to select a particular transaction coordinator. - * The default method is round robin where each set of new transactions - * is placed on the next NDB kernel node. - * By providing a distribution key (usually the primary key - * of the mostly used table of the transaction) for a record - * the transaction will be placed on the node where the primary replica - * of that record resides. - * Note that this is only a hint, the system can - * be under reconfiguration and then the NDB API - * will use select the transaction coordinator without using - * this hint. - * - * Placing the transaction coordinator close - * to the actual data used in the transaction can in many cases - * improve performance significantly. This is particularly true for - * systems using TCP/IP. A system using Solaris and a 500 MHz processor - * has a cost model for TCP/IP communication which is: - * - * 30 microseconds + (100 nanoseconds * no of Bytes) - * - * This means that if we can ensure that we use "popular" links we increase - * buffering and thus drastically reduce the communication cost. - * Systems using SCI has a different cost model which is: - * - * 5 microseconds + (10 nanoseconds * no of Bytes) - * - * Thus SCI systems are much less dependent on selection of - * transaction coordinators. - * Typically TCP/IP systems spend 30-60% of the time during communication, - * whereas SCI systems typically spend 5-10% of the time during - * communication. - * Thus SCI means that less care from the NDB API programmer is - * needed and great scalability can be achieved even for applications using - * data from many parts of the database. - * - * A simple example is an application that uses many simple updates where - * a transaction needs to update one record. - * This record has a 32 bit primary key, - * which is also the distribution key. - * Then the keyData will be the address of the integer - * of the primary key and keyLen will be 4. - * - * @note Transaction priorities are not yet supported. - * - * @param prio The priority of the transaction.
- * Priority 0 is the highest priority and is used - * for short transactions with requirements on low delay.
- * Priority 1 is a medium priority for short transactions. - *
- * Priority 2 is a medium priority for long transactions.
- * Priority 3 is a low priority for long transactions.
- * This parameter is not currently used, - * and can be set to any value - * @param keyData Pointer to distribution key - * @param keyLen Length of distribution key expressed in bytes - * - * @return NdbConnection object, or NULL if method failed. + * @return NdbConnection object, or NULL on failure. */ NdbConnection* startTransaction(Uint32 prio = 0, const char * keyData = 0, @@ -1233,7 +1220,10 @@ public: #endif /** - * When a transactions is completed, the transaction has to be closed. + * Close a transaction. + * + * @note should be called after the transaction has completed, irrespective + * of success or failure * * @note It is not allowed to call Ndb::closeTransaction after sending the * transaction asynchronously with either @@ -1245,10 +1235,11 @@ public: * If the transaction is not committed it will be aborted. */ void closeTransaction(NdbConnection* aConnection); - /** @} *********************************************************************/ +#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL + // to be documented later /** * @name Asynchronous Transactions * @{ @@ -1259,11 +1250,10 @@ public: * Will return as soon as at least 'minNoOfEventsToWakeUp' * of them have completed, or the maximum time given as timeout has passed. * - * @param aMillisecondNumber Maximum time to wait for transactions - * to complete. - * Polling without wait is achieved by setting the - * timer to zero. - * Time is expressed in milliseconds. + * @param aMillisecondNumber + * Maximum time to wait for transactions to complete. Polling + * without wait is achieved by setting the timer to zero. + * Time is expressed in milliseconds. * @param minNoOfEventsToWakeup Minimum number of transactions * which has to wake up before the poll-call will return. * If minNoOfEventsToWakeup is @@ -1325,6 +1315,7 @@ public: int sendPollNdb(int aMillisecondNumber = WAITFOR_RESPONSE_TIMEOUT, int minNoOfEventsToWakeup = 1, int forceSend = 0); +#endif /** @} *********************************************************************/ @@ -1336,7 +1327,7 @@ public: /** * Get the NdbError object * - * The NdbError object is valid until you call a new NDB API method. + * @note The NdbError object is valid until a new NDB API method is called. */ const NdbError & getNdbError() const; @@ -1348,37 +1339,36 @@ public: const NdbError & getNdbError(int errorCode); + /** @} *********************************************************************/ + +#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL + /** + * Get the application node identity. + * + * @return Node id of this application. + */ + int getNodeId(); + /** * setConnectString - * @param connectString - the connectString has the following format: - * @code - * "nodeid=;host=host://:; - * host=host://:;..." - * @endcode - * or - * @code - * "nodeid=;host=:;host=:;..." - * @endcode + * + * @param connectString - see MySQL ref manual for format */ static void setConnectString(const char * connectString); bool usingFullyQualifiedNames(); - /** @} *********************************************************************/ - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - /** * Different types of tampering with the NDB Cluster. * Only for debugging purposes only. */ enum TamperType { - LockGlbChp = 1, ///< Lock GCP - UnlockGlbChp, ///< Unlock GCP - CrashNode, ///< Crash an NDB node - ReadRestartGCI, ///< Request the restart GCI id from NDB Cluster - InsertError ///< Execute an error in NDB Cluster - ///< (may crash system) + LockGlbChp = 1, ///< Lock GCP + UnlockGlbChp, ///< Unlock GCP + CrashNode, ///< Crash an NDB node + ReadRestartGCI, ///< Request the restart GCI id from NDB Cluster + InsertError ///< Execute an error in NDB Cluster + ///< (may crash system) }; /** @@ -1397,9 +1387,7 @@ public: * on type of tampering. */ int NdbTamper(TamperType aAction, int aNode); -#endif -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL /** * Return a unique tuple id for a table. The id sequence is * ascending but may contain gaps. @@ -1429,9 +1417,7 @@ public: bool increase); bool setTupleIdInNdb(Uint32 aTableId, Uint64 val, bool increase); Uint64 opTupleIdOnNdb(Uint32 aTableId, Uint64 opValue, Uint32 op); -#endif -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL /** */ NdbConnection* hupp( NdbConnection* ); diff --git a/ndb/include/ndbapi/NdbConnection.hpp b/ndb/include/ndbapi/NdbConnection.hpp index 7f67ba9d68a..2a9a39a1a49 100644 --- a/ndb/include/ndbapi/NdbConnection.hpp +++ b/ndb/include/ndbapi/NdbConnection.hpp @@ -31,6 +31,8 @@ class Ndb; class NdbBlob; +#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL +// to be documented later /** * NdbAsynchCallback functions are used when executing asynchronous * transactions (using NdbConnection::executeAsynchPrepare, or @@ -39,6 +41,7 @@ class NdbBlob; * See @ref secAsync for more information. */ typedef void (* NdbAsynchCallback)(int, NdbConnection*, void*); +#endif /** * Commit type of transaction @@ -184,7 +187,8 @@ public: * @note All operations within the same transaction need to * be initialized with this method. * - * @param aTable A table object (fetched by NdbDictionary::Dictionary::getTable) + * @param aTable + * A table object (fetched by NdbDictionary::Dictionary::getTable) * @return Pointer to an NdbOperation object if successful, otherwise NULL. */ NdbOperation* getNdbOperation(const NdbDictionary::Table * aTable); @@ -204,7 +208,8 @@ public: * get the NdbConnection object which * was fetched by startTransaction pointing to this operation. * - * @param aTable A table object (fetched by NdbDictionary::Dictionary::getTable) + * @param aTable + * A table object (fetched by NdbDictionary::Dictionary::getTable) * @return pointer to an NdbOperation object if successful, otherwise NULL */ NdbScanOperation* getNdbScanOperation(const NdbDictionary::Table * aTable); @@ -226,12 +231,15 @@ public: * get the NdbConnection object which * was fetched by startTransaction pointing to this operation. * - * @param anIndex An index object (fetched by NdbDictionary::Dictionary::getIndex). - * @param aTable A table object (fetched by NdbDictionary::Dictionary::getTable). + * @param anIndex + An index object (fetched by NdbDictionary::Dictionary::getIndex). + * @param aTable + A table object (fetched by NdbDictionary::Dictionary::getTable). * @return pointer to an NdbOperation object if successful, otherwise NULL */ - NdbIndexScanOperation* getNdbIndexScanOperation(const NdbDictionary::Index * anIndex, - const NdbDictionary::Table * aTable); + NdbIndexScanOperation* getNdbIndexScanOperation + (const NdbDictionary::Index * anIndex, + const NdbDictionary::Table * aTable); /** * Get an operation from NdbIndexOperation idlelist and @@ -251,8 +259,10 @@ public: * get the NdbConnection object that * was fetched by startTransaction pointing to this operation. * - * @param anIndex An index object (fetched by NdbDictionary::Dictionary::getIndex). - * @param aTable A table object (fetched by NdbDictionary::Dictionary::getTable). + * @param anIndex + * An index object (fetched by NdbDictionary::Dictionary::getIndex). + * @param aTable + * A table object (fetched by NdbDictionary::Dictionary::getTable). * @return Pointer to an NdbIndexOperation object if * successful, otherwise NULL */ @@ -289,6 +299,8 @@ public: AbortOption abortOption = AbortOnError, int force = 0 ); +#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL + // to be documented later /** * Prepare an asynchronous transaction. * @@ -334,7 +346,7 @@ public: NdbAsynchCallback aCallback, void* anyObject, AbortOption abortOption = AbortOnError); - +#endif /** * Refresh * Update timeout counter of this transaction @@ -397,14 +409,14 @@ public: * (Note that there has to be an NdbConnection::execute call * with Ndb::Commit for the GCI to be available.) */ - int getGCI(); + int getGCI(); /** * Get transaction identity. * * @return Transaction id. */ - Uint64 getTransactionId(); + Uint64 getTransactionId(); /** * Returns the commit status of the transaction. diff --git a/ndb/include/ndbapi/NdbDictionary.hpp b/ndb/include/ndbapi/NdbDictionary.hpp index c1312c9b3af..f12cff8584f 100644 --- a/ndb/include/ndbapi/NdbDictionary.hpp +++ b/ndb/include/ndbapi/NdbDictionary.hpp @@ -166,7 +166,7 @@ public: * The builtin column types */ enum Type { - Undefined=0,///< Undefined + Undefined=0, ///< Undefined Tinyint, ///< 8 bit. 1 byte signed integer, can be used in array Tinyunsigned, ///< 8 bit. 1 byte unsigned integer, can be used in array Smallint, ///< 16 bit. 2 byte signed integer, can be used in array @@ -374,16 +374,11 @@ public: #ifndef DOXYGEN_SHOULD_SKIP_INTERNAL const Table * getBlobTable() const; - /** - * @name ODBC Specific methods - * @{ - */ - void setAutoIncrement(bool); + void setAutoIncrement(bool); bool getAutoIncrement() const; void setAutoIncrementInitialValue(Uint64 val); void setDefaultValue(const char*); const char* getDefaultValue() const; - /** @} *******************************************************************/ static const Column * FRAGMENT; static const Column * ROW_COUNT; diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp index 458d567ccf8..fa13b359ca6 100644 --- a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp +++ b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp @@ -4871,6 +4871,15 @@ void Dbdict::handleTabInfo(SimpleProperties::Reader & it, nullBits += attrDesc.AttributeArraySize; } + if(attrDesc.AttributeArraySize == 0) + { + parseP->errorCode = CreateTableRef::InvalidArraySize; + parseP->status = status; + parseP->errorKey = it.getKey(); + parseP->errorLine = __LINE__; + return; + } + recordLength += sz; if(attrDesc.AttributeKeyFlag){ keyLength += sz; diff --git a/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp b/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp index 8e3ca6528c2..cb5477a1551 100644 --- a/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp +++ b/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp @@ -858,6 +858,8 @@ void Dbtup::sendTUPKEYCONF(Signal* signal, return; }//Dbtup::sendTUPKEYCONF() +#define MAX_READ (sizeof(signal->theData) > MAX_MESSAGE_SIZE ? MAX_MESSAGE_SIZE : sizeof(signal->theData)) + /* ---------------------------------------------------------------- */ /* ----------------------------- READ ---------------------------- */ /* ---------------------------------------------------------------- */ @@ -878,7 +880,7 @@ int Dbtup::handleReadReq(Signal* signal, }//if Uint32 * dst = &signal->theData[25]; - Uint32 dstLen = (sizeof(signal->theData) / 4) - 25; + Uint32 dstLen = (MAX_READ / 4) - 25; const Uint32 node = refToNode(sendBref); if(node != 0 && node != getOwnNodeId()) { ; @@ -888,7 +890,7 @@ int Dbtup::handleReadReq(Signal* signal, * execute direct */ dst = &signal->theData[3]; - dstLen = (sizeof(signal->theData) / 4) - 3; + dstLen = (MAX_READ / 4) - 3; } if (regOperPtr->interpretedExec != 1) { @@ -1228,7 +1230,7 @@ int Dbtup::interpreterStartLab(Signal* signal, const BlockReference sendBref = regOperPtr->recBlockref; Uint32 * dst = &signal->theData[25]; - Uint32 dstLen = (sizeof(signal->theData) / 4) - 25; + Uint32 dstLen = (MAX_READ / 4) - 25; const Uint32 node = refToNode(sendBref); if(node != 0 && node != getOwnNodeId()) { ; @@ -1238,7 +1240,7 @@ int Dbtup::interpreterStartLab(Signal* signal, * execute direct */ dst = &signal->theData[3]; - dstLen = (sizeof(signal->theData) / 4) - 3; + dstLen = (MAX_READ / 4) - 3; } RtotalLen = RinitReadLen; diff --git a/ndb/src/ndbapi/ClusterMgr.cpp b/ndb/src/ndbapi/ClusterMgr.cpp index e10b2e1d82c..ef6e35e0702 100644 --- a/ndb/src/ndbapi/ClusterMgr.cpp +++ b/ndb/src/ndbapi/ClusterMgr.cpp @@ -429,7 +429,6 @@ ClusterMgr::reportDisconnected(NodeId nodeId){ noOfConnectedNodes--; theNodes[nodeId].connected = false; - theNodes[nodeId].m_info.m_connectCount ++; reportNodeFailed(nodeId); } @@ -439,18 +438,22 @@ ClusterMgr::reportNodeFailed(NodeId nodeId){ Node & theNode = theNodes[nodeId]; theNode.m_alive = false; - if(theNode.connected) - theFacade.doDisconnect(nodeId); + theNode.m_info.m_connectCount ++; + if(theNode.connected) + { + theFacade.doDisconnect(nodeId); + } const bool report = (theNode.m_state.startLevel != NodeState::SL_NOTHING); theNode.m_state.startLevel = NodeState::SL_NOTHING; - if(report){ + if(report) + { theFacade.ReportNodeDead(nodeId); - } - + } + theNode.nfCompleteRep = false; - + if(noOfConnectedNodes == 0){ NFCompleteRep rep; for(Uint32 i = 1; ierrorCode); theCommitStatus = Aborted; theCompletionStatus = CompletedFailure; + theReturnStatus = ReturnFailure; return 0; } else { #ifdef NDB_NO_DROPPED_SIGNAL @@ -1496,6 +1497,7 @@ NdbConnection::receiveTCROLLBACKREF(NdbApiSignal* aSignal) setOperationErrorCodeAbort(aSignal->readData(4)); theCommitStatus = Aborted; theCompletionStatus = CompletedFailure; + theReturnStatus = ReturnFailure; return 0; } else { #ifdef NDB_NO_DROPPED_SIGNAL @@ -1584,6 +1586,7 @@ from other transactions. done = 1; tOp->setErrorCode(4119); theCompletionStatus = CompletedFailure; + theReturnStatus = NdbConnection::ReturnFailure; } } tNoComp += done; @@ -1613,6 +1616,7 @@ from other transactions. /**********************************************************************/ theError.code = 4011; theCompletionStatus = CompletedFailure; + theReturnStatus = NdbConnection::ReturnFailure; theCommitStatus = Aborted; return 0; }//if @@ -1672,6 +1676,7 @@ NdbConnection::receiveTCKEY_FAILCONF(const TcKeyFailConf * failConf) case NdbOperation::OpenScanRequest: case NdbOperation::OpenRangeScanRequest: theCompletionStatus = CompletedFailure; + theReturnStatus = NdbConnection::ReturnFailure; setOperationErrorCodeAbort(4115); tOp = NULL; break; @@ -1720,6 +1725,7 @@ NdbConnection::receiveTCKEY_FAILREF(NdbApiSignal* aSignal) */ theCompletionStatus = NdbConnection::CompletedSuccess; } else { + theReturnStatus = NdbConnection::ReturnFailure; theCompletionStatus = NdbConnection::CompletedFailure; theError.code = 4031; }//if @@ -1779,6 +1785,7 @@ NdbConnection::receiveTCINDXCONF(const TcIndxConf * indxConf, theError.code = 4011; theCompletionStatus = NdbConnection::CompletedFailure; theCommitStatus = NdbConnection::Aborted; + theReturnStatus = NdbConnection::ReturnFailure; return 0; }//if if (tNoComp >= tNoSent) { @@ -1818,6 +1825,7 @@ NdbConnection::receiveTCINDXREF( NdbApiSignal* aSignal) /**********************************************************************/ theCompletionStatus = NdbConnection::CompletedFailure; theCommitStatus = NdbConnection::Aborted; + theReturnStatus = NdbConnection::ReturnFailure; return 0; } else { #ifdef NDB_NO_DROPPED_SIGNAL @@ -1891,6 +1899,7 @@ NdbConnection::OpCompleteSuccess() setOperationErrorCodeAbort(4113); // Too many operations, // stop waiting for more theCompletionStatus = NdbConnection::CompletedFailure; + theReturnStatus = NdbConnection::ReturnFailure; return 0; }//if }//NdbConnection::OpCompleteSuccess() @@ -2020,22 +2029,28 @@ NdbConnection::report_node_failure(Uint32 id){ const Uint32 len = TcKeyConf::SimpleReadBit | id; Uint32 tNoComp = theNoOfOpCompleted; Uint32 tNoSent = theNoOfOpSent; + Uint32 count = 0; while(tmp != 0) { if(tmp->theReceiver.m_expected_result_length == len && tmp->theReceiver.m_received_result_length == 0) { - tNoComp++; + count++; tmp->theError.code = 4119; } tmp = tmp->next(); } + tNoComp += count; theNoOfOpCompleted = tNoComp; - if(tNoComp == tNoSent) + if(count) { - theError.code = 4119; - theCompletionStatus = NdbConnection::CompletedFailure; - return 1; + theReturnStatus = NdbConnection::ReturnFailure; + if(tNoComp == tNoSent) + { + theError.code = 4119; + theCompletionStatus = NdbConnection::CompletedFailure; + return 1; + } } return 0; } diff --git a/ndb/src/ndbapi/Ndbif.cpp b/ndb/src/ndbapi/Ndbif.cpp index c011c1a6a26..12ab85d2895 100644 --- a/ndb/src/ndbapi/Ndbif.cpp +++ b/ndb/src/ndbapi/Ndbif.cpp @@ -301,6 +301,7 @@ Ndb::abortTransactionsAfterNodeFailure(Uint16 aNodeId) intact since the node was failing and they were aborted. Thus we set commit state to Aborted and set state to release on close. */ + localCon->theReturnStatus = NdbConnection::ReturnFailure; localCon->theCommitStatus = NdbConnection::Aborted; localCon->theReleaseOnClose = true; completedTransaction(localCon); @@ -1128,6 +1129,7 @@ Ndb::sendPrepTrans(int forceSend) a_con->theCommitStatus = NdbConnection::Aborted; }//if }//if + a_con->theReturnStatus = NdbConnection::ReturnFailure; a_con->theCompletionStatus = NdbConnection::CompletedFailure; a_con->handleExecuteCompletion(); insert_completed_list(a_con); diff --git a/ndb/src/ndbapi/ndberror.c b/ndb/src/ndbapi/ndberror.c index 7e4243d56fb..dd4a1cf0b9e 100644 --- a/ndb/src/ndbapi/ndberror.c +++ b/ndb/src/ndbapi/ndberror.c @@ -308,7 +308,7 @@ ErrorBundle ErrorCodes[] = { { 709, SE, "No such table existed" }, { 721, SE, "Table or index with given name already exists" }, { 723, SE, "No such table existed" }, - { 736, SE, "Wrong attribute size" }, + { 736, SE, "Unsupported array size" }, { 737, SE, "Attribute array size too big" }, { 738, SE, "Record too big" }, { 739, SE, "Unsupported primary key length" }, diff --git a/ndb/test/ndbapi/testDict.cpp b/ndb/test/ndbapi/testDict.cpp index 0a43bb02fff..221c035e368 100644 --- a/ndb/test/ndbapi/testDict.cpp +++ b/ndb/test/ndbapi/testDict.cpp @@ -125,6 +125,16 @@ int runCreateTheTable(NDBT_Context* ctx, NDBT_Step* step){ return NDBT_OK; } +int runDropTheTable(NDBT_Context* ctx, NDBT_Step* step){ + Ndb* pNdb = GETNDB(step); + const NdbDictionary::Table* pTab = ctx->getTab(); + + // Try to create table in db + pNdb->getDictionary()->dropTable(pTab->getName()); + + return NDBT_OK; +} + int runCreateTableWhenDbIsFull(NDBT_Context* ctx, NDBT_Step* step){ Ndb* pNdb = GETNDB(step); int result = NDBT_OK; @@ -547,6 +557,7 @@ int runTestFragmentTypes(NDBT_Context* ctx, NDBT_Step* step){ if (newTab.createTableInDb(pNdb) != 0){ ndbout << newTab.getName() << " could not be created" << ", fragmentType = "<getDictionary()->getNdbError() << endl; return NDBT_FAILED; } @@ -1583,7 +1594,7 @@ TESTCASE("CreateTableWhenDbIsFull", INITIALIZER(runFillTable); INITIALIZER(runCreateTableWhenDbIsFull); INITIALIZER(runDropTableWhenDbIsFull); - FINALIZER(runClearTable); + FINALIZER(runDropTheTable); } TESTCASE("FragmentTypeSingle", "Create the table with fragment type Single\n"){ diff --git a/ndb/test/ndbapi/testIndex.cpp b/ndb/test/ndbapi/testIndex.cpp index 25991ac375f..3526326b680 100644 --- a/ndb/test/ndbapi/testIndex.cpp +++ b/ndb/test/ndbapi/testIndex.cpp @@ -1277,7 +1277,7 @@ TESTCASE("CreateLoadDrop_O", TESTCASE("NFNR1", "Test that indexes are correctly maintained during node fail and node restart"){ TC_PROPERTY("LoggedIndexes", (unsigned)0); - //TC_PROPERTY("Threads", 2); + TC_PROPERTY("PauseThreads", 2); INITIALIZER(runClearTable); INITIALIZER(createRandomIndex); INITIALIZER(runLoadTable); @@ -1292,6 +1292,7 @@ TESTCASE("NFNR1_O", "Test that indexes are correctly maintained during node fail and node restart"){ TC_PROPERTY("OrderedIndex", 1); TC_PROPERTY("LoggedIndexes", (unsigned)0); + TC_PROPERTY("PauseThreads", 2); INITIALIZER(runClearTable); INITIALIZER(createRandomIndex); INITIALIZER(runLoadTable); @@ -1305,6 +1306,7 @@ TESTCASE("NFNR1_O", TESTCASE("NFNR2", "Test that indexes are correctly maintained during node fail and node restart"){ TC_PROPERTY("LoggedIndexes", (unsigned)0); + TC_PROPERTY("PauseThreads", 2); INITIALIZER(runClearTable); INITIALIZER(createRandomIndex); INITIALIZER(createPkIndex); @@ -1321,6 +1323,7 @@ TESTCASE("NFNR2_O", "Test that indexes are correctly maintained during node fail and node restart"){ TC_PROPERTY("OrderedIndex", 1); TC_PROPERTY("LoggedIndexes", (unsigned)0); + TC_PROPERTY("PauseThreads", 2); INITIALIZER(runClearTable); INITIALIZER(createRandomIndex); INITIALIZER(createPkIndex); @@ -1336,6 +1339,7 @@ TESTCASE("NFNR2_O", TESTCASE("NFNR3", "Test that indexes are correctly maintained during node fail and node restart"){ TC_PROPERTY("LoggedIndexes", (unsigned)0); + TC_PROPERTY("PauseThreads", 2); INITIALIZER(runClearTable); INITIALIZER(createRandomIndex); INITIALIZER(createPkIndex); @@ -1351,6 +1355,7 @@ TESTCASE("NFNR3_O", "Test that indexes are correctly maintained during node fail and node restart"){ TC_PROPERTY("OrderedIndex", 1); TC_PROPERTY("LoggedIndexes", (unsigned)0); + TC_PROPERTY("PauseThreads", 2); INITIALIZER(runClearTable); INITIALIZER(createRandomIndex); INITIALIZER(createPkIndex); @@ -1365,6 +1370,7 @@ TESTCASE("NFNR3_O", TESTCASE("NFNR4", "Test that indexes are correctly maintained during node fail and node restart"){ TC_PROPERTY("LoggedIndexes", (unsigned)0); + TC_PROPERTY("PauseThreads", 4); INITIALIZER(runClearTable); INITIALIZER(createRandomIndex); INITIALIZER(createPkIndex); @@ -1383,6 +1389,7 @@ TESTCASE("NFNR4_O", "Test that indexes are correctly maintained during node fail and node restart"){ TC_PROPERTY("OrderedIndex", 1); TC_PROPERTY("LoggedIndexes", (unsigned)0); + TC_PROPERTY("PauseThreads", 4); INITIALIZER(runClearTable); INITIALIZER(createRandomIndex); INITIALIZER(createPkIndex); diff --git a/ndb/test/ndbapi/testNdbApi.cpp b/ndb/test/ndbapi/testNdbApi.cpp index a1ebac609b6..c0393403760 100644 --- a/ndb/test/ndbapi/testNdbApi.cpp +++ b/ndb/test/ndbapi/testNdbApi.cpp @@ -799,13 +799,13 @@ int runUpdateWithoutValues(NDBT_Context* ctx, NDBT_Step* step){ // Dont' call any setValues - // Execute should not work + // Execute should work int check = pCon->execute(Commit); if (check == 0){ ndbout << "execute worked" << endl; - result = NDBT_FAILED; } else { ERR(pCon->getNdbError()); + result = NDBT_FAILED; } pNdb->closeTransaction(pCon); diff --git a/ndb/test/ndbapi/testNodeRestart.cpp b/ndb/test/ndbapi/testNodeRestart.cpp index e844f227034..7913b4b240e 100644 --- a/ndb/test/ndbapi/testNodeRestart.cpp +++ b/ndb/test/ndbapi/testNodeRestart.cpp @@ -311,7 +311,7 @@ int runDirtyRead(NDBT_Context* ctx, NDBT_Step* step){ int id = i % restarter.getNumDbNodes(); int nodeId = restarter.getDbNodeId(id); ndbout << "Restart node " << nodeId << endl; - restarter.insertErrorInAllNodes(5041); + restarter.insertErrorInNode(nodeId, 5041); restarter.insertErrorInAllNodes(8048 + (i & 1)); for(int j = 0; jmbminlen + urandom(m_cs->mbmaxlen - m_cs->mbminlen + 1); + unsigned& size = m_chr[i].m_size; + bool ok; + size = m_cs->mbminlen + urandom(m_cs->mbmaxlen - m_cs->mbminlen + 1); assert(m_cs->mbminlen <= size && size <= m_cs->mbmaxlen); // prefer longer chars if (size == m_cs->mbminlen && m_cs->mbminlen < m_cs->mbmaxlen && urandom(5) != 0) @@ -466,33 +478,57 @@ Chs::Chs(CHARSET_INFO* cs) : for (unsigned j = 0; j < size; j++) { bytes[j] = urandom(256); } + // check wellformed const char* sbytes = (const char*)bytes; if ((*cs->cset->well_formed_len)(cs, sbytes, sbytes + size, 1) != size) { miss1++; continue; } - // do not trust well_formed_len currently + // check no proper prefix wellformed + ok = true; + for (unsigned j = 1; j < size; j++) { + if ((*cs->cset->well_formed_len)(cs, sbytes, sbytes + j, 1) == j) { + ok = false; + break; + } + } + if (! ok) { + miss2++; + continue; + } + // normalize memset(xbytes, 0, sizeof(xbytes)); // currently returns buffer size always int xlen = (*cs->coll->strnxfrm)(cs, xbytes, m_xmul * size, bytes, size); // check we got something - bool xok = false; + ok = false; for (unsigned j = 0; j < xlen; j++) { if (xbytes[j] != 0) { - xok = true; + ok = true; break; } } - if (! xok) { - miss2++; + if (! ok) { + miss3++; + continue; + } + // check for duplicate (before normalize) + ok = true; + for (unsigned j = 0; j < i; j++) { + const Chr& chr = m_chr[j]; + if (chr.m_size == size && memcmp(chr.m_bytes, bytes, size) == 0) { + ok = false; + break; + } + } + if (! ok) { + miss4++; continue; } - // occasional duplicate char is ok - m_chr[i].m_size = size; i++; } bool disorder = true; - unsigned bubbels = 0; + unsigned bubbles = 0; while (disorder) { disorder = false; for (unsigned i = 1; i < maxcharcount; i++) { @@ -502,11 +538,11 @@ Chs::Chs(CHARSET_INFO* cs) : m_chr[i] = m_chr[i-1]; m_chr[i-1] = chr; disorder = true; - bubbels++; + bubbles++; } } } - LL3("inited charset " << cs->name << " miss1=" << miss1 << " miss2=" << miss2 << " bubbels=" << bubbels); + LL3("inited charset " << *this << " miss=" << miss1 << "," << miss2 << "," << miss3 << "," << miss4 << " bubbles=" << bubbles); } Chs::~Chs() @@ -514,6 +550,14 @@ Chs::~Chs() delete [] m_chr; } +static NdbOut& +operator<<(NdbOut& out, const Chs& chs) +{ + CHARSET_INFO* cs = chs.m_cs; + out << cs->name << "[" << cs->mbminlen << "-" << cs->mbmaxlen << "," << chs.m_xmul << "]"; + return out; +} + static Chs* cslist[maxcsnumber]; static void @@ -552,22 +596,26 @@ getcs(Par par) // Col - table column struct Col { + enum Type { + Unsigned = NdbDictionary::Column::Unsigned, + Char = NdbDictionary::Column::Char + }; const class Tab& m_tab; unsigned m_num; const char* m_name; bool m_pk; - NdbDictionary::Column::Type m_type; + Type m_type; unsigned m_length; unsigned m_bytelength; bool m_nullable; const Chs* m_chs; - Col(const class Tab& tab, unsigned num, const char* name, bool pk, NdbDictionary::Column::Type type, unsigned length, bool nullable, const Chs* chs); + Col(const class Tab& tab, unsigned num, const char* name, bool pk, Type type, unsigned length, bool nullable, const Chs* chs); ~Col(); bool equal(const Col& col2) const; void verify(const void* addr) const; }; -Col::Col(const class Tab& tab, unsigned num, const char* name, bool pk, NdbDictionary::Column::Type type, unsigned length, bool nullable, const Chs* chs) : +Col::Col(const class Tab& tab, unsigned num, const char* name, bool pk, Type type, unsigned length, bool nullable, const Chs* chs) : m_tab(tab), m_num(num), m_name(strcpy(new char [strlen(name) + 1], name)), @@ -595,9 +643,9 @@ void Col::verify(const void* addr) const { switch (m_type) { - case NdbDictionary::Column::Unsigned: + case Col::Unsigned: break; - case NdbDictionary::Column::Char: + case Col::Char: { CHARSET_INFO* cs = m_chs->m_cs; const char* src = (const char*)addr; @@ -616,10 +664,10 @@ operator<<(NdbOut& out, const Col& col) { out << "col[" << col.m_num << "] " << col.m_name; switch (col.m_type) { - case NdbDictionary::Column::Unsigned: + case Col::Unsigned: out << " unsigned"; break; - case NdbDictionary::Column::Char: + case Col::Char: { CHARSET_INFO* cs = col.m_chs->m_cs; out << " char(" << col.m_length << "*" << cs->mbmaxlen << ";" << cs->name << ")"; @@ -656,25 +704,41 @@ ICol::~ICol() { } +static NdbOut& +operator<<(NdbOut& out, const ICol& icol) +{ + out << "icol[" << icol.m_num << "] " << icol.m_col; + return out; +} + // ITab - index struct ITab { + enum Type { + OrderedIndex = NdbDictionary::Index::OrderedIndex, + UniqueHashIndex = NdbDictionary::Index::UniqueHashIndex + }; const class Tab& m_tab; const char* m_name; + Type m_type; unsigned m_icols; const ICol** m_icol; - ITab(const class Tab& tab, const char* name, unsigned icols); + unsigned m_colmask; + ITab(const class Tab& tab, const char* name, Type type, unsigned icols); ~ITab(); + void icoladd(unsigned k, const ICol* icolptr); }; -ITab::ITab(const class Tab& tab, const char* name, unsigned icols) : +ITab::ITab(const class Tab& tab, const char* name, Type type, unsigned icols) : m_tab(tab), m_name(strcpy(new char [strlen(name) + 1], name)), + m_type(type), m_icols(icols), - m_icol(new const ICol* [icols + 1]) + m_icol(new const ICol* [icols + 1]), + m_colmask(0) { - for (unsigned i = 0; i <= m_icols; i++) - m_icol[0] = 0; + for (unsigned k = 0; k <= m_icols; k++) + m_icol[k] = 0; } ITab::~ITab() @@ -685,13 +749,21 @@ ITab::~ITab() delete [] m_icol; } +void +ITab::icoladd(unsigned k, const ICol* icolptr) +{ + assert(k == icolptr->m_num && k < m_icols && m_icol[k] == 0); + m_icol[k] = icolptr; + m_colmask |= (1 << icolptr->m_col.m_num); +} + static NdbOut& operator<<(NdbOut& out, const ITab& itab) { out << "itab " << itab.m_name << " icols=" << itab.m_icols; for (unsigned k = 0; k < itab.m_icols; k++) { - out << endl; - out << "icol[" << k << "] " << itab.m_icol[k]->m_col; + const ICol& icol = *itab.m_icol[k]; + out << endl << icol; } return out; } @@ -706,6 +778,8 @@ struct Tab { const ITab** m_itab; // pk must contain an Unsigned column unsigned m_keycol; + void coladd(unsigned k, Col* colptr); + void itabadd(unsigned j, ITab* itab); Tab(const char* name, unsigned cols, unsigned itabs, unsigned keycol); ~Tab(); }; @@ -718,10 +792,10 @@ Tab::Tab(const char* name, unsigned cols, unsigned itabs, unsigned keycol) : m_itab(new const ITab* [itabs + 1]), m_keycol(keycol) { - for (unsigned i = 0; i <= cols; i++) - m_col[i] = 0; - for (unsigned i = 0; i <= itabs; i++) - m_itab[i] = 0; + for (unsigned k = 0; k <= cols; k++) + m_col[k] = 0; + for (unsigned j = 0; j <= itabs; j++) + m_itab[j] = 0; } Tab::~Tab() @@ -735,19 +809,33 @@ Tab::~Tab() delete [] m_itab; } +void +Tab::coladd(unsigned k, Col* colptr) +{ + assert(k == colptr->m_num && k < m_cols && m_col[k] == 0); + m_col[k] = colptr; +} + +void +Tab::itabadd(unsigned j, ITab* itabptr) +{ + assert(j < m_itabs && m_itab[j] == 0); + m_itab[j] = itabptr; +} + static NdbOut& operator<<(NdbOut& out, const Tab& tab) { out << "tab " << tab.m_name << " cols=" << tab.m_cols; for (unsigned k = 0; k < tab.m_cols; k++) { - out << endl; - out << *tab.m_col[k]; + const Col& col = *tab.m_col[k]; + out << endl << col; } for (unsigned i = 0; i < tab.m_itabs; i++) { if (tab.m_itab[i] == 0) continue; - out << endl; - out << *tab.m_itab[i]; + const ITab& itab = *tab.m_itab[i]; + out << endl << itab; } return out; } @@ -774,7 +862,7 @@ verifytables() { assert(t->m_keycol < t->m_cols); const Col* c = t->m_col[t->m_keycol]; - assert(c->m_pk && c->m_type == NdbDictionary::Column::Unsigned); + assert(c->m_pk && c->m_type == Col::Unsigned); } assert(t->m_itabs != 0 && t->m_itab != 0); for (unsigned i = 0; i < t->m_itabs; i++) { @@ -785,6 +873,9 @@ verifytables() for (unsigned k = 0; k < x->m_icols; k++) { const ICol* c = x->m_icol[k]; assert(c != 0 && c->m_num == k && c->m_col.m_num < t->m_cols); + if (x->m_type == ITab::UniqueHashIndex) { + assert(! c->m_col.m_nullable); + } } } assert(t->m_itab[t->m_itabs] == 0); @@ -810,127 +901,186 @@ makebuiltintables(Par par) } // ti0 - basic if (usetable(par, 0)) { - const Tab* t = new Tab("ti0", 5, 5, 0); + Tab* t = new Tab("ti0", 5, 7, 0); // name - pk - type - length - nullable - cs - t->m_col[0] = new Col(*t, 0, "a", 1, NdbDictionary::Column::Unsigned, 1, 0, 0); - t->m_col[1] = new Col(*t, 1, "b", 0, NdbDictionary::Column::Unsigned, 1, 1, 0); - t->m_col[2] = new Col(*t, 2, "c", 0, NdbDictionary::Column::Unsigned, 1, 1, 0); - t->m_col[3] = new Col(*t, 3, "d", 0, NdbDictionary::Column::Unsigned, 1, 1, 0); - t->m_col[4] = new Col(*t, 4, "e", 0, NdbDictionary::Column::Unsigned, 1, 1, 0); + t->coladd(0, new Col(*t, 0, "a", 1, Col::Unsigned, 1, 0, 0)); + t->coladd(1, new Col(*t, 1, "b", 0, Col::Unsigned, 1, 1, 0)); + t->coladd(2, new Col(*t, 2, "c", 0, Col::Unsigned, 1, 0, 0)); + t->coladd(3, new Col(*t, 3, "d", 0, Col::Unsigned, 1, 1, 0)); + t->coladd(4, new Col(*t, 4, "e", 0, Col::Unsigned, 1, 0, 0)); if (useindex(par, 0)) { // a - const ITab* x = t->m_itab[0] = new ITab(*t, "ti0x0", 1); - x->m_icol[0] = new ICol(*x, 0, *t->m_col[0]); + ITab* x = new ITab(*t, "ti0x0", ITab::OrderedIndex, 1); + x->icoladd(0, new ICol(*x, 0, *t->m_col[0])); + t->itabadd(0, x); } if (useindex(par, 1)) { // b - const ITab* x = t->m_itab[1] = new ITab(*t, "ti0x1", 1); - x->m_icol[0] = new ICol(*x, 0, *t->m_col[1]); + ITab* x = new ITab(*t, "ti0x1", ITab::OrderedIndex, 1); + x->icoladd(0, new ICol(*x, 0, *t->m_col[1])); + t->itabadd(1, x); } if (useindex(par, 2)) { // b, c - const ITab* x = t->m_itab[2] = new ITab(*t, "ti0x2", 2); - x->m_icol[0] = new ICol(*x, 0, *t->m_col[1]); - x->m_icol[1] = new ICol(*x, 1, *t->m_col[2]); + ITab* x = new ITab(*t, "ti0x2", ITab::OrderedIndex, 2); + x->icoladd(0, new ICol(*x, 0, *t->m_col[1])); + x->icoladd(1, new ICol(*x, 1, *t->m_col[2])); + t->itabadd(2, x); } if (useindex(par, 3)) { // d, c, b - const ITab* x = t->m_itab[3] = new ITab(*t, "ti0x3", 3); - x->m_icol[0] = new ICol(*x, 0, *t->m_col[3]); - x->m_icol[1] = new ICol(*x, 1, *t->m_col[2]); - x->m_icol[2] = new ICol(*x, 2, *t->m_col[1]); + ITab* x = new ITab(*t, "ti0x3", ITab::OrderedIndex, 3); + x->icoladd(0, new ICol(*x, 0, *t->m_col[3])); + x->icoladd(1, new ICol(*x, 1, *t->m_col[2])); + x->icoladd(2, new ICol(*x, 2, *t->m_col[1])); + t->itabadd(3, x); } if (useindex(par, 4)) { // b, e, c, d - const ITab* x = t->m_itab[4] = new ITab(*t, "ti0x4", 4); - x->m_icol[0] = new ICol(*x, 0, *t->m_col[1]); - x->m_icol[1] = new ICol(*x, 1, *t->m_col[4]); - x->m_icol[2] = new ICol(*x, 2, *t->m_col[2]); - x->m_icol[3] = new ICol(*x, 3, *t->m_col[3]); + ITab* x = new ITab(*t, "ti0x4", ITab::OrderedIndex, 4); + x->icoladd(0, new ICol(*x, 0, *t->m_col[1])); + x->icoladd(1, new ICol(*x, 1, *t->m_col[4])); + x->icoladd(2, new ICol(*x, 2, *t->m_col[2])); + x->icoladd(3, new ICol(*x, 3, *t->m_col[3])); + t->itabadd(4, x); + } + if (useindex(par, 5)) { + // a, c + ITab* x = new ITab(*t, "ti0z5", ITab::UniqueHashIndex, 2); + x->icoladd(0, new ICol(*x, 0, *t->m_col[0])); + x->icoladd(1, new ICol(*x, 1, *t->m_col[2])); + t->itabadd(5, x); + } + if (useindex(par, 6)) { + // a, e + ITab* x = new ITab(*t, "ti0z6", ITab::UniqueHashIndex, 2); + x->icoladd(0, new ICol(*x, 0, *t->m_col[0])); + x->icoladd(1, new ICol(*x, 1, *t->m_col[4])); + t->itabadd(6, x); } tablist[0] = t; } // ti1 - simple char fields if (usetable(par, 1)) { - const Tab* t = new Tab("ti1", 5, 5, 1); + Tab* t = new Tab("ti1", 5, 7, 1); // name - pk - type - length - nullable - cs - t->m_col[0] = new Col(*t, 0, "a", 0, NdbDictionary::Column::Unsigned, 1, 1, 0); - t->m_col[1] = new Col(*t, 1, "b", 1, NdbDictionary::Column::Unsigned, 1, 0, 0); - t->m_col[2] = new Col(*t, 2, "c", 0, NdbDictionary::Column::Char, 20, 1, getcs(par)); - t->m_col[3] = new Col(*t, 3, "d", 0, NdbDictionary::Column::Char, 5, 1, getcs(par)); - t->m_col[4] = new Col(*t, 4, "e", 0, NdbDictionary::Column::Char, 5, 1, getcs(par)); + t->coladd(0, new Col(*t, 0, "a", 0, Col::Unsigned, 1, 0, 0)); + t->coladd(1, new Col(*t, 1, "b", 1, Col::Unsigned, 1, 0, 0)); + t->coladd(2, new Col(*t, 2, "c", 0, Col::Char, 20, 1, getcs(par))); + t->coladd(3, new Col(*t, 3, "d", 0, Col::Char, 5, 0, getcs(par))); + t->coladd(4, new Col(*t, 4, "e", 0, Col::Char, 5, 1, getcs(par))); if (useindex(par, 0)) { // b - const ITab* x = t->m_itab[0] = new ITab(*t, "ti1x0", 1); - x->m_icol[0] = new ICol(*x, 0, *t->m_col[1]); + ITab* x = new ITab(*t, "ti1x0", ITab::OrderedIndex, 1); + x->icoladd(0, new ICol(*x, 0, *t->m_col[1])); } if (useindex(par, 1)) { // a, c - const ITab* x = t->m_itab[1] = new ITab(*t, "ti1x1", 2); - x->m_icol[0] = new ICol(*x, 0, *t->m_col[0]); - x->m_icol[1] = new ICol(*x, 1, *t->m_col[2]); + ITab* x = new ITab(*t, "ti1x1", ITab::OrderedIndex, 2); + x->icoladd(0, new ICol(*x, 0, *t->m_col[0])); + x->icoladd(1, new ICol(*x, 1, *t->m_col[2])); + t->itabadd(1, x); } if (useindex(par, 2)) { // c, a - const ITab* x = t->m_itab[2] = new ITab(*t, "ti1x2", 2); - x->m_icol[0] = new ICol(*x, 0, *t->m_col[2]); - x->m_icol[1] = new ICol(*x, 1, *t->m_col[0]); + ITab* x = new ITab(*t, "ti1x2", ITab::OrderedIndex, 2); + x->icoladd(0, new ICol(*x, 0, *t->m_col[2])); + x->icoladd(1, new ICol(*x, 1, *t->m_col[0])); + t->itabadd(2, x); } if (useindex(par, 3)) { // e - const ITab* x = t->m_itab[3] = new ITab(*t, "ti1x3", 1); - x->m_icol[0] = new ICol(*x, 0, *t->m_col[4]); + ITab* x = new ITab(*t, "ti1x3", ITab::OrderedIndex, 1); + x->icoladd(0, new ICol(*x, 0, *t->m_col[4])); + t->itabadd(3, x); } if (useindex(par, 4)) { // e, d, c, b - const ITab* x = t->m_itab[4] = new ITab(*t, "ti1x4", 4); - x->m_icol[0] = new ICol(*x, 0, *t->m_col[4]); - x->m_icol[1] = new ICol(*x, 1, *t->m_col[3]); - x->m_icol[2] = new ICol(*x, 2, *t->m_col[2]); - x->m_icol[3] = new ICol(*x, 3, *t->m_col[1]); + ITab* x = new ITab(*t, "ti1x4", ITab::OrderedIndex, 4); + x->icoladd(0, new ICol(*x, 0, *t->m_col[4])); + x->icoladd(1, new ICol(*x, 1, *t->m_col[3])); + x->icoladd(2, new ICol(*x, 2, *t->m_col[2])); + x->icoladd(3, new ICol(*x, 3, *t->m_col[1])); + t->itabadd(4, x); + } + if (useindex(par, 5)) { + // a, b + ITab* x = new ITab(*t, "ti1z5", ITab::UniqueHashIndex, 2); + x->icoladd(0, new ICol(*x, 0, *t->m_col[0])); + x->icoladd(1, new ICol(*x, 1, *t->m_col[1])); + t->itabadd(5, x); + } + if (useindex(par, 6)) { + // a, b, d + ITab* x = new ITab(*t, "ti1z6", ITab::UniqueHashIndex, 3); + x->icoladd(0, new ICol(*x, 0, *t->m_col[0])); + x->icoladd(1, new ICol(*x, 1, *t->m_col[1])); + x->icoladd(2, new ICol(*x, 2, *t->m_col[3])); + t->itabadd(6, x); } tablist[1] = t; } // ti2 - complex char fields if (usetable(par, 2)) { - const Tab* t = new Tab("ti2", 5, 5, 2); + Tab* t = new Tab("ti2", 5, 7, 2); // name - pk - type - length - nullable - cs - t->m_col[0] = new Col(*t, 0, "a", 1, NdbDictionary::Column::Char, 101, 0, getcs(par)); - t->m_col[1] = new Col(*t, 1, "b", 0, NdbDictionary::Column::Char, 4, 1, getcs(par)); - t->m_col[2] = new Col(*t, 2, "c", 1, NdbDictionary::Column::Unsigned, 1, 0, 0); - t->m_col[3] = new Col(*t, 3, "d", 1, NdbDictionary::Column::Char, 3, 0, getcs(par)); - t->m_col[4] = new Col(*t, 4, "e", 0, NdbDictionary::Column::Char, 101, 0, getcs(par)); + t->coladd(0, new Col(*t, 0, "a", 1, Col::Char, 31, 0, getcs(par))); + t->coladd(1, new Col(*t, 1, "b", 0, Col::Char, 4, 1, getcs(par))); + t->coladd(2, new Col(*t, 2, "c", 1, Col::Unsigned, 1, 0, 0)); + t->coladd(3, new Col(*t, 3, "d", 1, Col::Char, 3, 0, getcs(par))); + t->coladd(4, new Col(*t, 4, "e", 0, Col::Char, 17, 0, getcs(par))); if (useindex(par, 0)) { // a, c, d - const ITab* x = t->m_itab[0] = new ITab(*t, "ti2x0", 3); - x->m_icol[0] = new ICol(*x, 0, *t->m_col[0]); - x->m_icol[1] = new ICol(*x, 1, *t->m_col[2]); - x->m_icol[2] = new ICol(*x, 2, *t->m_col[3]); + ITab* x = new ITab(*t, "ti2x0", ITab::OrderedIndex, 3); + x->icoladd(0, new ICol(*x, 0, *t->m_col[0])); + x->icoladd(1, new ICol(*x, 1, *t->m_col[2])); + x->icoladd(2, new ICol(*x, 2, *t->m_col[3])); + t->itabadd(0, x); } if (useindex(par, 1)) { // e, d, c, b, a - const ITab* x = t->m_itab[1] = new ITab(*t, "ti2x1", 5); - x->m_icol[0] = new ICol(*x, 0, *t->m_col[4]); - x->m_icol[1] = new ICol(*x, 1, *t->m_col[3]); - x->m_icol[2] = new ICol(*x, 2, *t->m_col[2]); - x->m_icol[3] = new ICol(*x, 3, *t->m_col[1]); - x->m_icol[4] = new ICol(*x, 4, *t->m_col[0]); + ITab* x = new ITab(*t, "ti2x1", ITab::OrderedIndex, 5); + x->icoladd(0, new ICol(*x, 0, *t->m_col[4])); + x->icoladd(1, new ICol(*x, 1, *t->m_col[3])); + x->icoladd(2, new ICol(*x, 2, *t->m_col[2])); + x->icoladd(3, new ICol(*x, 3, *t->m_col[1])); + x->icoladd(4, new ICol(*x, 4, *t->m_col[0])); + t->itabadd(1, x); } if (useindex(par, 2)) { // d - const ITab* x = t->m_itab[2] = new ITab(*t, "ti2x2", 1); - x->m_icol[0] = new ICol(*x, 0, *t->m_col[3]); + ITab* x = new ITab(*t, "ti2x2", ITab::OrderedIndex, 1); + x->icoladd(0, new ICol(*x, 0, *t->m_col[3])); + t->itabadd(2, x); } if (useindex(par, 3)) { // b - const ITab* x = t->m_itab[3] = new ITab(*t, "ti2x3", 1); - x->m_icol[0] = new ICol(*x, 0, *t->m_col[1]); + ITab* x = new ITab(*t, "ti2x3", ITab::OrderedIndex, 1); + x->icoladd(0, new ICol(*x, 0, *t->m_col[1])); + t->itabadd(3, x); } if (useindex(par, 4)) { // a, e - const ITab* x = t->m_itab[4] = new ITab(*t, "ti2x4", 2); - x->m_icol[0] = new ICol(*x, 0, *t->m_col[0]); - x->m_icol[1] = new ICol(*x, 1, *t->m_col[4]); + ITab* x = new ITab(*t, "ti2x4", ITab::OrderedIndex, 2); + x->icoladd(0, new ICol(*x, 0, *t->m_col[0])); + x->icoladd(1, new ICol(*x, 1, *t->m_col[4])); + t->itabadd(4, x); + } + if (useindex(par, 5)) { + // a, c + ITab* x = new ITab(*t, "ti2z5", ITab::UniqueHashIndex, 2); + x->icoladd(0, new ICol(*x, 0, *t->m_col[0])); + x->icoladd(1, new ICol(*x, 1, *t->m_col[2])); + t->itabadd(5, x); + } + if (useindex(par, 6)) { + // a, c, d, e + ITab* x = new ITab(*t, "ti2z6", ITab::UniqueHashIndex, 4); + x->icoladd(0, new ICol(*x, 0, *t->m_col[0])); + x->icoladd(1, new ICol(*x, 1, *t->m_col[2])); + x->icoladd(2, new ICol(*x, 2, *t->m_col[3])); + x->icoladd(3, new ICol(*x, 3, *t->m_col[4])); + t->itabadd(6, x); } tablist[2] = t; } @@ -944,6 +1094,7 @@ struct Con { NdbDictionary::Dictionary* m_dic; NdbConnection* m_tx; NdbOperation* m_op; + NdbIndexOperation* m_indexop; NdbScanOperation* m_scanop; NdbIndexScanOperation* m_indexscanop; enum ScanMode { ScanNo = 0, Committed, Latest, Exclusive }; @@ -951,7 +1102,7 @@ struct Con { enum ErrType { ErrNone = 0, ErrDeadlock, ErrOther }; ErrType m_errtype; Con() : - m_ndb(0), m_dic(0), m_tx(0), m_op(0), + m_ndb(0), m_dic(0), m_tx(0), m_op(0), m_indexop(0), m_scanop(0), m_indexscanop(0), m_scanmode(ScanNo), m_errtype(ErrNone) {} ~Con() { if (m_tx != 0) @@ -962,6 +1113,7 @@ struct Con { void disconnect(); int startTransaction(); int getNdbOperation(const Tab& tab); + int getNdbIndexOperation(const ITab& itab, const Tab& tab); int getNdbScanOperation(const Tab& tab); int getNdbScanOperation(const ITab& itab, const Tab& tab); int equal(int num, const char* addr); @@ -972,6 +1124,8 @@ struct Con { int execute(ExecType t, bool& deadlock); int openScanRead(unsigned scanbat, unsigned scanpar); int openScanExclusive(unsigned scanbat, unsigned scanpar); + int openScanOrdered(unsigned scanbat, unsigned scanpar, bool descending); + int openScanOrderedExclusive(unsigned scanbat, unsigned scanpar, bool descending); int executeScan(); int nextScanResult(bool fetchAllowed); int nextScanResult(bool fetchAllowed, bool& deadlock); @@ -1025,6 +1179,14 @@ Con::getNdbOperation(const Tab& tab) return 0; } +int +Con::getNdbIndexOperation(const ITab& itab, const Tab& tab) +{ + assert(m_tx != 0); + CHKCON((m_op = m_indexop = m_tx->getNdbIndexOperation(itab.m_name, tab.m_name)) != 0, *this); + return 0; +} + int Con::getNdbScanOperation(const Tab& tab) { @@ -1115,6 +1277,25 @@ Con::openScanExclusive(unsigned scanbat, unsigned scanpar) return 0; } +int +Con::openScanOrdered(unsigned scanbat, unsigned scanpar, bool descending) +{ + assert(m_tx != 0 && m_indexscanop != 0); + NdbOperation::LockMode lm = NdbOperation::LM_Read; + CHKCON(m_indexscanop->readTuples(lm, scanbat, scanpar, true, descending) == 0, *this); + return 0; +} + +int +Con::openScanOrderedExclusive(unsigned scanbat, unsigned scanpar, bool descending) +{ + assert(m_tx != 0 && m_indexscanop != 0); + NdbOperation::LockMode lm = NdbOperation::LM_Exclusive; + CHKCON(m_indexscanop->readTuples(lm, scanbat, scanpar, true, descending) == 0, *this); + return 0; +} + + int Con::executeScan() { @@ -1202,6 +1383,7 @@ Con::printerror(NdbOut& out) if ((code = m_tx->getNdbError().code) != 0) { LL0(++any << " con: error " << m_tx->getNdbError()); die += (code == g_opt.m_die); + // 631 is new, occurs only on 4 db nodes, needs to be checked out if (code == 266 || code == 274 || code == 296 || code == 297 || code == 499 || code == 631) m_errtype = ErrDeadlock; } @@ -1290,7 +1472,7 @@ createtable(Par par) for (unsigned k = 0; k < tab.m_cols; k++) { const Col& col = *tab.m_col[k]; NdbDictionary::Column c(col.m_name); - c.setType(col.m_type); + c.setType((NdbDictionary::Column::Type)col.m_type); c.setLength(col.m_bytelength); // NDB API uses length in bytes c.setPrimaryKey(col.m_pk); c.setNullable(col.m_nullable); @@ -1343,8 +1525,10 @@ createindex(Par par, const ITab& itab) LL4(itab); NdbDictionary::Index x(itab.m_name); x.setTable(tab.m_name); - x.setType(NdbDictionary::Index::OrderedIndex); - x.setLogging(false); + x.setType((NdbDictionary::Index::Type)itab.m_type); + if (par.m_nologging || itab.m_type == ITab::OrderedIndex) { + x.setLogging(false); + } for (unsigned k = 0; k < itab.m_icols; k++) { const ICol& icol = *itab.m_icol[k]; const Col& col = icol.m_col; @@ -1385,6 +1569,8 @@ struct Val { void copy(const void* addr); const void* dataaddr() const; bool m_null; + int equal(Par par) const; + int equal(Par par, const ICol& icol) const; int setval(Par par) const; void calc(Par par, unsigned i); void calckey(Par par, unsigned i); @@ -1402,9 +1588,9 @@ Val::Val(const Col& col) : m_col(col) { switch (col.m_type) { - case NdbDictionary::Column::Unsigned: + case Col::Unsigned: break; - case NdbDictionary::Column::Char: + case Col::Char: m_char = new unsigned char [col.m_bytelength]; break; default: @@ -1417,9 +1603,9 @@ Val::~Val() { const Col& col = m_col; switch (col.m_type) { - case NdbDictionary::Column::Unsigned: + case Col::Unsigned: break; - case NdbDictionary::Column::Char: + case Col::Char: delete [] m_char; break; default: @@ -1446,10 +1632,10 @@ Val::copy(const void* addr) { const Col& col = m_col; switch (col.m_type) { - case NdbDictionary::Column::Unsigned: + case Col::Unsigned: m_uint32 = *(const Uint32*)addr; break; - case NdbDictionary::Column::Char: + case Col::Char: memcpy(m_char, addr, col.m_bytelength); break; default: @@ -1464,9 +1650,9 @@ Val::dataaddr() const { const Col& col = m_col; switch (col.m_type) { - case NdbDictionary::Column::Unsigned: + case Col::Unsigned: return &m_uint32; - case NdbDictionary::Column::Char: + case Col::Char: return m_char; default: break; @@ -1475,19 +1661,38 @@ Val::dataaddr() const return 0; } +int +Val::equal(Par par) const +{ + Con& con = par.con(); + const Col& col = m_col; + assert(col.m_pk && ! m_null); + const char* addr = (const char*)dataaddr(); + LL5("equal [" << col << "] " << *this); + CHK(con.equal(col.m_num, addr) == 0); + return 0; +} + +int +Val::equal(Par par, const ICol& icol) const +{ + Con& con = par.con(); + assert(! m_null); + const char* addr = (const char*)dataaddr(); + LL5("equal [" << icol << "] " << *this); + CHK(con.equal(icol.m_num, addr) == 0); + return 0; +} + int Val::setval(Par par) const { Con& con = par.con(); const Col& col = m_col; - const char* addr = (const char*)dataaddr(); - if (m_null) - addr = 0; - LL5("setval [" << m_col << "] " << *this); - if (col.m_pk) - CHK(con.equal(col.m_num, addr) == 0); - else - CHK(con.setValue(col.m_num, addr) == 0); + assert(! col.m_pk); + const char* addr = ! m_null ? (const char*)dataaddr() : 0; + LL5("setval [" << col << "] " << *this); + CHK(con.setValue(col.m_num, addr) == 0); return 0; } @@ -1506,10 +1711,10 @@ Val::calckey(Par par, unsigned i) const Col& col = m_col; m_null = false; switch (col.m_type) { - case NdbDictionary::Column::Unsigned: + case Col::Unsigned: m_uint32 = i; break; - case NdbDictionary::Column::Char: + case Col::Char: { const Chs* chs = col.m_chs; CHARSET_INFO* cs = chs->m_cs; @@ -1549,10 +1754,10 @@ Val::calcnokey(Par par) } unsigned v = par.m_range + r; switch (col.m_type) { - case NdbDictionary::Column::Unsigned: + case Col::Unsigned: m_uint32 = v; break; - case NdbDictionary::Column::Char: + case Col::Char: { const Chs* chs = col.m_chs; CHARSET_INFO* cs = chs->m_cs; @@ -1609,7 +1814,7 @@ Val::cmp(const Val& val2) const col.verify(val2.dataaddr()); // compare switch (col.m_type) { - case NdbDictionary::Column::Unsigned: + case Col::Unsigned: { if (m_uint32 < val2.m_uint32) return -1; @@ -1618,7 +1823,7 @@ Val::cmp(const Val& val2) const return 0; } break; - case NdbDictionary::Column::Char: + case Col::Char: { const Chs* chs = col.m_chs; CHARSET_INFO* cs = chs->m_cs; @@ -1657,10 +1862,10 @@ operator<<(NdbOut& out, const Val& val) return out; } switch (col.m_type) { - case NdbDictionary::Column::Unsigned: + case Col::Unsigned: out << val.m_uint32; break; - case NdbDictionary::Column::Char: + case Col::Char: { char buf[4 * 8000]; char *p = buf; @@ -1697,19 +1902,25 @@ struct Row { const Tab& m_tab; Val** m_val; bool m_exist; - enum Op { NoOp = 0, ReadOp, InsOp, UpdOp, DelOp }; + enum Op { NoOp = 0, ReadOp = 1, InsOp = 2, UpdOp = 4, DelOp = 8, AnyOp = 15 }; Op m_pending; + Row* m_dbrow; // copy of db row before update Row(const Tab& tab); ~Row(); void copy(const Row& row2); - void calc(Par par, unsigned i); + void calc(Par par, unsigned i, unsigned mask = 0); + const Row& dbrow() const; int verify(const Row& row2) const; int insrow(Par par); int updrow(Par par); + int updrow(Par par, const ITab& itab); int delrow(Par par); + int delrow(Par par, const ITab& itab); int selrow(Par par); + int selrow(Par par, const ITab& itab); int setrow(Par par); int cmp(const Row& row2) const; + int cmp(const Row& row2, const ITab& itab) const; private: Row& operator=(const Row& row2); }; @@ -1724,6 +1935,7 @@ Row::Row(const Tab& tab) : } m_exist = false; m_pending = NoOp; + m_dbrow = 0; } Row::~Row() @@ -1733,6 +1945,7 @@ Row::~Row() delete m_val[k]; } delete [] m_val; + delete m_dbrow; } void @@ -1745,27 +1958,49 @@ Row::copy(const Row& row2) const Val& val2 = *row2.m_val[k]; val.copy(val2); } + m_exist = row2.m_exist; + m_pending = row2.m_pending; + if (row2.m_dbrow == 0) { + m_dbrow = 0; + } else { + assert(row2.m_dbrow->m_dbrow == 0); + if (m_dbrow == 0) + m_dbrow = new Row(tab); + m_dbrow->copy(*row2.m_dbrow); + } } void -Row::calc(Par par, unsigned i) +Row::calc(Par par, unsigned i, unsigned mask) { const Tab& tab = m_tab; for (unsigned k = 0; k < tab.m_cols; k++) { - Val& val = *m_val[k]; - val.calc(par, i); + if (! (mask & (1 << k))) { + Val& val = *m_val[k]; + val.calc(par, i); + } } } +const Row& +Row::dbrow() const +{ + if (m_dbrow == 0) + return *this; + assert(m_pending == Row::UpdOp || m_pending == Row::DelOp); + return *m_dbrow; +} + int Row::verify(const Row& row2) const { const Tab& tab = m_tab; - assert(&tab == &row2.m_tab && m_exist && row2.m_exist); + const Row& row1 = *this; + assert(&row1.m_tab == &row2.m_tab && row1.m_exist && row2.m_exist); for (unsigned k = 0; k < tab.m_cols; k++) { - const Val& val = *m_val[k]; + const Val& val1 = *row1.m_val[k]; const Val& val2 = *row2.m_val[k]; - CHK(val.verify(val2) == 0); + CHK(val1.verify(val2) == 0); } return 0; } @@ -1780,7 +2015,15 @@ Row::insrow(Par par) CHKCON(con.m_op->insertTuple() == 0, con); for (unsigned k = 0; k < tab.m_cols; k++) { const Val& val = *m_val[k]; - CHK(val.setval(par) == 0); + const Col& col = val.m_col; + if (col.m_pk) + CHK(val.equal(par) == 0); + } + for (unsigned k = 0; k < tab.m_cols; k++) { + const Val& val = *m_val[k]; + const Col& col = val.m_col; + if (! col.m_pk) + CHK(val.setval(par) == 0); } m_pending = InsOp; return 0; @@ -1797,16 +2040,40 @@ Row::updrow(Par par) for (unsigned k = 0; k < tab.m_cols; k++) { const Val& val = *m_val[k]; const Col& col = val.m_col; - if (! col.m_pk) - continue; - CHK(val.setval(par) == 0); + if (col.m_pk) + CHK(val.equal(par) == 0); } for (unsigned k = 0; k < tab.m_cols; k++) { const Val& val = *m_val[k]; const Col& col = val.m_col; - if (col.m_pk) - continue; - CHK(val.setval(par) == 0); + if (! col.m_pk) + CHK(val.setval(par) == 0); + } + m_pending = UpdOp; + return 0; +} + +int +Row::updrow(Par par, const ITab& itab) +{ + Con& con = par.con(); + const Tab& tab = m_tab; + assert(itab.m_type == ITab::UniqueHashIndex && &itab.m_tab == &tab); + assert(m_exist); + CHK(con.getNdbIndexOperation(itab, tab) == 0); + CHKCON(con.m_op->updateTuple() == 0, con); + for (unsigned k = 0; k < itab.m_icols; k++) { + const ICol& icol = *itab.m_icol[k]; + const Col& col = icol.m_col; + unsigned m = col.m_num; + const Val& val = *m_val[m]; + CHK(val.equal(par, icol) == 0); + } + for (unsigned k = 0; k < tab.m_cols; k++) { + const Val& val = *m_val[k]; + const Col& col = val.m_col; + if (! col.m_pk) + CHK(val.setval(par) == 0); } m_pending = UpdOp; return 0; @@ -1824,7 +2091,27 @@ Row::delrow(Par par) const Val& val = *m_val[k]; const Col& col = val.m_col; if (col.m_pk) - CHK(val.setval(par) == 0); + CHK(val.equal(par) == 0); + } + m_pending = DelOp; + return 0; +} + +int +Row::delrow(Par par, const ITab& itab) +{ + Con& con = par.con(); + const Tab& tab = m_tab; + assert(itab.m_type == ITab::UniqueHashIndex && &itab.m_tab == &tab); + assert(m_exist); + CHK(con.getNdbIndexOperation(itab, tab) == 0); + CHKCON(con.m_op->deleteTuple() == 0, con); + for (unsigned k = 0; k < itab.m_icols; k++) { + const ICol& icol = *itab.m_icol[k]; + const Col& col = icol.m_col; + unsigned m = col.m_num; + const Val& val = *m_val[m]; + CHK(val.equal(par, icol) == 0); } m_pending = DelOp; return 0; @@ -1841,7 +2128,25 @@ Row::selrow(Par par) const Val& val = *m_val[k]; const Col& col = val.m_col; if (col.m_pk) - CHK(val.setval(par) == 0); + CHK(val.equal(par) == 0); + } + return 0; +} + +int +Row::selrow(Par par, const ITab& itab) +{ + Con& con = par.con(); + const Tab& tab = m_tab; + assert(itab.m_type == ITab::UniqueHashIndex && &itab.m_tab == &tab); + CHK(con.getNdbIndexOperation(itab, tab) == 0); + CHKCON(con.m_op->readTuple() == 0, con); + for (unsigned k = 0; k < itab.m_icols; k++) { + const ICol& icol = *itab.m_icol[k]; + const Col& col = icol.m_col; + unsigned m = col.m_num; + const Val& val = *m_val[m]; + CHK(val.equal(par, icol) == 0); } return 0; } @@ -1876,6 +2181,40 @@ Row::cmp(const Row& row2) const return c; } +int +Row::cmp(const Row& row2, const ITab& itab) const +{ + const Tab& tab = m_tab; + int c = 0; + for (unsigned i = 0; i < itab.m_icols; i++) { + const ICol& icol = *itab.m_icol[i]; + const Col& col = icol.m_col; + unsigned k = col.m_num; + assert(k < tab.m_cols); + const Val& val = *m_val[k]; + const Val& val2 = *row2.m_val[k]; + if ((c = val.cmp(val2)) != 0) + break; + } + return c; +} + +static NdbOut& +operator<<(NdbOut& out, const Row::Op op) +{ + if (op == Row::NoOp) + out << "NoOp"; + else if (op == Row::InsOp) + out << "InsOp"; + else if (op == Row::UpdOp) + out << "UpdOp"; + else if (op == Row::DelOp) + out << "DelOp"; + else + out << op; + return out; +} + static NdbOut& operator<<(NdbOut& out, const Row& row) { @@ -1885,10 +2224,21 @@ operator<<(NdbOut& out, const Row& row) out << " "; out << *row.m_val[i]; } - out << " [exist=" << row.m_exist; + out << " exist=" << row.m_exist; if (row.m_pending) out << " pending=" << row.m_pending; - out << "]"; + if (row.m_dbrow != 0) + out << " [dbrow=" << *row.m_dbrow << "]"; + return out; +} + +static NdbOut& +operator<<(NdbOut& out, const Row* rowptr) +{ + if (rowptr == 0) + out << "null"; + else + out << *rowptr; return out; } @@ -1898,38 +2248,47 @@ struct Set { const Tab& m_tab; unsigned m_rows; Row** m_row; - Row** m_saverow; + unsigned* m_rowkey; // maps row number (from 0) in scan to tuple key Row* m_keyrow; NdbRecAttr** m_rec; Set(const Tab& tab, unsigned rows); ~Set(); void reset(); unsigned count() const; - // row methods + // old and new values bool exist(unsigned i) const; - Row::Op pending(unsigned i) const; + void dbsave(unsigned i); + void calc(Par par, unsigned i, unsigned mask = 0); + bool pending(unsigned i, unsigned mask) const; void notpending(unsigned i); void notpending(const Lst& lst); - void calc(Par par, unsigned i); + void dbdiscard(unsigned i); + void dbdiscard(const Lst& lst); + const Row& dbrow(unsigned i) const; + // operations int insrow(Par par, unsigned i); int updrow(Par par, unsigned i); + int updrow(Par par, const ITab& itab, unsigned i); int delrow(Par par, unsigned i); - int selrow(Par par, unsigned i); + int delrow(Par par, const ITab& itab, unsigned i); + int selrow(Par par, const Row& keyrow); + int selrow(Par par, const ITab& itab, const Row& keyrow); + // set and get + void setkey(Par par, const Row& keyrow); + void setkey(Par par, const ITab& itab, const Row& keyrow); int setrow(Par par, unsigned i); int getval(Par par); int getkey(Par par, unsigned* i); - int putval(unsigned i, bool force); - // set methods + int putval(unsigned i, bool force, unsigned n = ~0); + // verify int verify(const Set& set2) const; - void savepoint(); - void commit(); - void rollback(); + int verifyorder(const ITab& itab, bool descending) const; // protect structure NdbMutex* m_mutex; - void lock() { + void lock() const { NdbMutex_Lock(m_mutex); } - void unlock() { + void unlock() const { NdbMutex_Unlock(m_mutex); } private: @@ -1945,7 +2304,11 @@ Set::Set(const Tab& tab, unsigned rows) : // allocate on need to save space m_row[i] = 0; } - m_saverow = 0; + m_rowkey = new unsigned [m_rows]; + for (unsigned n = 0; n < m_rows; n++) { + // initialize to null + m_rowkey[n] = ~0; + } m_keyrow = new Row(tab); m_rec = new NdbRecAttr* [tab.m_cols]; for (unsigned k = 0; k < tab.m_cols; k++) { @@ -1959,11 +2322,9 @@ Set::~Set() { for (unsigned i = 0; i < m_rows; i++) { delete m_row[i]; - if (m_saverow != 0) - delete m_saverow[i]; } delete [] m_row; - delete [] m_saverow; + delete [] m_rowkey; delete m_keyrow; delete [] m_rec; NdbMutex_Destroy(m_mutex); @@ -1994,6 +2355,8 @@ Set::count() const return count; } +// old and new values + bool Set::exist(unsigned i) const { @@ -2003,13 +2366,37 @@ Set::exist(unsigned i) const return m_row[i]->m_exist; } -Row::Op -Set::pending(unsigned i) const +void +Set::dbsave(unsigned i) +{ + const Tab& tab = m_tab; + assert(i < m_rows && m_row[i] != 0); + Row& row = *m_row[i]; + LL5("dbsave " << i << ": " << row); + assert(row.m_exist && ! row.m_pending && row.m_dbrow == 0); + // could swap pointers but making copy is safer + Row* rowptr = new Row(tab); + rowptr->copy(row); + row.m_dbrow = rowptr; +} + +void +Set::calc(Par par, unsigned i, unsigned mask) +{ + const Tab& tab = m_tab; + if (m_row[i] == 0) + m_row[i] = new Row(tab); + Row& row = *m_row[i]; + row.calc(par, i, mask); +} + +bool +Set::pending(unsigned i, unsigned mask) const { assert(i < m_rows); if (m_row[i] == 0) // not allocated => not pending return Row::NoOp; - return m_row[i]->m_pending; + return m_row[i]->m_pending & mask; } void @@ -2017,10 +2404,13 @@ Set::notpending(unsigned i) { assert(m_row[i] != 0); Row& row = *m_row[i]; - if (row.m_pending == Row::InsOp) + if (row.m_pending == Row::InsOp) { row.m_exist = true; - if (row.m_pending == Row::DelOp) + } else if (row.m_pending == Row::UpdOp) { + ; + } else if (row.m_pending == Row::DelOp) { row.m_exist = false; + } row.m_pending = Row::NoOp; } @@ -2034,15 +2424,35 @@ Set::notpending(const Lst& lst) } void -Set::calc(Par par, unsigned i) +Set::dbdiscard(unsigned i) { - const Tab& tab = m_tab; - if (m_row[i] == 0) - m_row[i] = new Row(tab); + assert(m_row[i] != 0); Row& row = *m_row[i]; - row.calc(par, i); + LL5("dbdiscard " << i << ": " << row); + assert(row.m_dbrow != 0); + delete row.m_dbrow; + row.m_dbrow = 0; } +const Row& +Set::dbrow(unsigned i) const +{ + assert(m_row[i] != 0); + Row& row = *m_row[i]; + return row.dbrow(); +} + +void +Set::dbdiscard(const Lst& lst) +{ + for (unsigned j = 0; j < lst.m_cnt; j++) { + unsigned i = lst.m_arr[j]; + dbdiscard(i); + } +} + +// operations + int Set::insrow(Par par, unsigned i) { @@ -2061,6 +2471,15 @@ Set::updrow(Par par, unsigned i) return 0; } +int +Set::updrow(Par par, const ITab& itab, unsigned i) +{ + assert(m_row[i] != 0); + Row& row = *m_row[i]; + CHK(row.updrow(par, itab) == 0); + return 0; +} + int Set::delrow(Par par, unsigned i) { @@ -2071,15 +2490,67 @@ Set::delrow(Par par, unsigned i) } int -Set::selrow(Par par, unsigned i) +Set::delrow(Par par, const ITab& itab, unsigned i) +{ + assert(m_row[i] != 0); + Row& row = *m_row[i]; + CHK(row.delrow(par, itab) == 0); + return 0; +} + +int +Set::selrow(Par par, const Row& keyrow) { Con& con = par.con(); - m_keyrow->calc(par, i); + const Tab& tab = par.tab(); + setkey(par, keyrow); + LL5("selrow " << tab.m_name << ": keyrow: " << keyrow); CHK(m_keyrow->selrow(par) == 0); CHK(getval(par) == 0); return 0; } +int +Set::selrow(Par par, const ITab& itab, const Row& keyrow) +{ + Con& con = par.con(); + setkey(par, itab, keyrow); + LL5("selrow " << itab.m_name << ": keyrow: " << keyrow); + CHK(m_keyrow->selrow(par, itab) == 0); + CHK(getval(par) == 0); + return 0; +} + +// set and get + +void +Set::setkey(Par par, const Row& keyrow) +{ + const Tab& tab = m_tab; + for (unsigned k = 0; k < tab.m_cols; k++) { + const Col& col = *tab.m_col[k]; + if (col.m_pk) { + Val& val1 = *m_keyrow->m_val[k]; + const Val& val2 = *keyrow.dbrow().m_val[k]; + val1.copy(val2); + } + } +} + +void +Set::setkey(Par par, const ITab& itab, const Row& keyrow) +{ + const Tab& tab = m_tab; + for (unsigned k = 0; k < itab.m_icols; k++) { + const ICol& icol = *itab.m_icol[k]; + const Col& col = icol.m_col; + unsigned m = col.m_num; + Val& val1 = *m_keyrow->m_val[m]; + const Val& val2 = *keyrow.dbrow().m_val[m]; + val1.copy(val2); + } +} + int Set::setrow(Par par, unsigned i) { @@ -2114,7 +2585,7 @@ Set::getkey(Par par, unsigned* i) } int -Set::putval(unsigned i, bool force) +Set::putval(unsigned i, bool force, unsigned n) { const Tab& tab = m_tab; if (m_row[i] == 0) @@ -2135,55 +2606,55 @@ Set::putval(unsigned i, bool force) } if (! row.m_exist) row.m_exist = true; + if (n != ~0) + m_rowkey[n] = i; return 0; } +// verify + int Set::verify(const Set& set2) const { - const Tab& tab = m_tab; - assert(&tab == &set2.m_tab && m_rows == set2.m_rows); - LL3("verify set1 count=" << count() << " vs set2 count=" << set2.count()); + assert(&m_tab == &set2.m_tab && m_rows == set2.m_rows); + LL4("verify set1 count=" << count() << " vs set2 count=" << set2.count()); for (unsigned i = 0; i < m_rows; i++) { - CHK(exist(i) == set2.exist(i)); - if (! exist(i)) - continue; - Row& row = *m_row[i]; - Row& row2 = *set2.m_row[i]; - CHK(row.verify(row2) == 0); + bool ok = true; + if (exist(i) != set2.exist(i)) { + ok = false; + } else if (exist(i)) { + if (dbrow(i).verify(set2.dbrow(i)) != 0) + ok = false; + } + if (! ok) { + LL1("verify failed: key=" << i << " row1=" << m_row[i] << " row2=" << set2.m_row[i]); + CHK(0 == 1); + } } return 0; } -void -Set::savepoint() +int +Set::verifyorder(const ITab& itab, bool descending) const { const Tab& tab = m_tab; - assert(m_saverow == 0); - m_saverow = new Row* [m_rows]; - for (unsigned i = 0; i < m_rows; i++) { - if (m_row[i] == 0) - m_saverow[i] = 0; - else { - m_saverow[i] = new Row(tab); - m_saverow[i]->copy(*m_row[i]); - } + for (unsigned n = 0; n < m_rows; n++) { + unsigned i2 = m_rowkey[n]; + if (i2 == ~0) + break; + if (n == 0) + continue; + unsigned i1 = m_rowkey[n - 1]; + assert(i1 < m_rows && i2 < m_rows); + const Row& row1 = *m_row[i1]; + const Row& row2 = *m_row[i2]; + assert(row1.m_exist && row2.m_exist); + if (! descending) + CHK(row1.cmp(row2, itab) <= 0); + else + CHK(row1.cmp(row2, itab) >= 0); } -} - -void -Set::commit() -{ - delete [] m_saverow; - m_saverow = 0; -} - -void -Set::rollback() -{ - assert(m_saverow != 0); - m_row = m_saverow; - m_saverow = 0; + return 0; } static NdbOut& @@ -2384,7 +2855,9 @@ BSet::filter(const Set& set, Set& set2) const for (unsigned i = 0; i < set.m_rows; i++) { if (! set.exist(i)) continue; - const Row& row = *set.m_row[i]; + set.lock(); + const Row& row = set.dbrow(i); + set.unlock(); if (! g_store_null_key) { bool ok1 = false; for (unsigned k = 0; k < itab.m_icols; k++) { @@ -2430,7 +2903,6 @@ BSet::filter(const Set& set, Set& set2) const Row& row2 = *set2.m_row[i]; assert(! row2.m_exist); row2.copy(row); - row2.m_exist = true; } } @@ -2451,15 +2923,16 @@ static int pkinsert(Par par) { Con& con = par.con(); + const Tab& tab = par.tab(); Set& set = par.set(); - LL3("pkinsert"); + LL3("pkinsert " << tab.m_name); CHK(con.startTransaction() == 0); Lst lst; for (unsigned j = 0; j < par.m_rows; j++) { unsigned j2 = ! par.m_randomkey ? j : urandom(par.m_rows); unsigned i = thrrow(par, j2); set.lock(); - if (set.exist(i) || set.pending(i)) { + if (set.exist(i) || set.pending(i, Row::AnyOp)) { set.unlock(); continue; } @@ -2473,7 +2946,7 @@ pkinsert(Par par) CHK(con.execute(Commit, deadlock) == 0); con.closeTransaction(); if (deadlock) { - LL1("pkinsert: stop on deadlock"); + LL1("pkinsert: stop on deadlock [at 1]"); return 0; } set.lock(); @@ -2488,7 +2961,7 @@ pkinsert(Par par) CHK(con.execute(Commit, deadlock) == 0); con.closeTransaction(); if (deadlock) { - LL1("pkinsert: stop on deadlock"); + LL1("pkinsert: stop on deadlock [at 2]"); return 0; } set.lock(); @@ -2504,8 +2977,9 @@ static int pkupdate(Par par) { Con& con = par.con(); + const Tab& tab = par.tab(); Set& set = par.set(); - LL3("pkupdate"); + LL3("pkupdate " << tab.m_name); CHK(con.startTransaction() == 0); Lst lst; bool deadlock = false; @@ -2513,10 +2987,11 @@ pkupdate(Par par) unsigned j2 = ! par.m_randomkey ? j : urandom(par.m_rows); unsigned i = thrrow(par, j2); set.lock(); - if (! set.exist(i) || set.pending(i)) { + if (! set.exist(i) || set.pending(i, Row::AnyOp)) { set.unlock(); continue; } + set.dbsave(i); set.calc(par, i); CHK(set.updrow(par, i) == 0); set.unlock(); @@ -2526,12 +3001,13 @@ pkupdate(Par par) deadlock = par.m_deadlock; CHK(con.execute(Commit, deadlock) == 0); if (deadlock) { - LL1("pkupdate: stop on deadlock"); + LL1("pkupdate: stop on deadlock [at 1]"); break; } con.closeTransaction(); set.lock(); set.notpending(lst); + set.dbdiscard(lst); set.unlock(); lst.reset(); CHK(con.startTransaction() == 0); @@ -2541,10 +3017,11 @@ pkupdate(Par par) deadlock = par.m_deadlock; CHK(con.execute(Commit, deadlock) == 0); if (deadlock) { - LL1("pkupdate: stop on deadlock"); + LL1("pkupdate: stop on deadlock [at 1]"); } else { set.lock(); set.notpending(lst); + set.dbdiscard(lst); set.unlock(); } } @@ -2556,8 +3033,9 @@ static int pkdelete(Par par) { Con& con = par.con(); + const Tab& tab = par.tab(); Set& set = par.set(); - LL3("pkdelete"); + LL3("pkdelete " << tab.m_name); CHK(con.startTransaction() == 0); Lst lst; bool deadlock = false; @@ -2565,7 +3043,7 @@ pkdelete(Par par) unsigned j2 = ! par.m_randomkey ? j : urandom(par.m_rows); unsigned i = thrrow(par, j2); set.lock(); - if (! set.exist(i) || set.pending(i)) { + if (! set.exist(i) || set.pending(i, Row::AnyOp)) { set.unlock(); continue; } @@ -2577,7 +3055,7 @@ pkdelete(Par par) deadlock = par.m_deadlock; CHK(con.execute(Commit, deadlock) == 0); if (deadlock) { - LL1("pkdelete: stop on deadlock"); + LL1("pkdelete: stop on deadlock [at 1]"); break; } con.closeTransaction(); @@ -2592,7 +3070,7 @@ pkdelete(Par par) deadlock = par.m_deadlock; CHK(con.execute(Commit, deadlock) == 0); if (deadlock) { - LL1("pkdelete: stop on deadlock"); + LL1("pkdelete: stop on deadlock [at 2]"); } else { set.lock(); set.notpending(lst); @@ -2609,19 +3087,19 @@ pkread(Par par) Con& con = par.con(); const Tab& tab = par.tab(); Set& set = par.set(); - LL3((par.m_verify ? "pkverify " : "pkread ") << tab.m_name); + LL3("pkread " << tab.m_name << " verify=" << par.m_verify); // expected const Set& set1 = set; Set set2(tab, set.m_rows); for (unsigned i = 0; i < set.m_rows; i++) { set.lock(); - if (! set.exist(i) || set.pending(i)) { + if (! set.exist(i)) { set.unlock(); continue; } set.unlock(); CHK(con.startTransaction() == 0); - CHK(set2.selrow(par, i) == 0); + CHK(set2.selrow(par, *set1.m_row[i]) == 0); CHK(con.execute(Commit) == 0); unsigned i2 = (unsigned)-1; CHK(set2.getkey(par, &i2) == 0 && i == i2); @@ -2659,6 +3137,146 @@ pkreadfast(Par par, unsigned count) return 0; } +// hash index operations + +static int +hashindexupdate(Par par, const ITab& itab) +{ + Con& con = par.con(); + Set& set = par.set(); + LL3("hashindexupdate " << itab.m_name); + CHK(con.startTransaction() == 0); + Lst lst; + bool deadlock = false; + for (unsigned j = 0; j < par.m_rows; j++) { + unsigned j2 = ! par.m_randomkey ? j : urandom(par.m_rows); + unsigned i = thrrow(par, j2); + set.lock(); + if (! set.exist(i) || set.pending(i, Row::AnyOp)) { + set.unlock(); + continue; + } + set.dbsave(i); + // index key columns are not re-calculated + set.calc(par, i, itab.m_colmask); + CHK(set.updrow(par, itab, i) == 0); + set.unlock(); + LL4("hashindexupdate " << i << ": " << *set.m_row[i]); + lst.push(i); + if (lst.cnt() == par.m_batch) { + deadlock = par.m_deadlock; + CHK(con.execute(Commit, deadlock) == 0); + if (deadlock) { + LL1("hashindexupdate: stop on deadlock [at 1]"); + break; + } + con.closeTransaction(); + set.lock(); + set.notpending(lst); + set.dbdiscard(lst); + set.unlock(); + lst.reset(); + CHK(con.startTransaction() == 0); + } + } + if (! deadlock && lst.cnt() != 0) { + deadlock = par.m_deadlock; + CHK(con.execute(Commit, deadlock) == 0); + if (deadlock) { + LL1("hashindexupdate: stop on deadlock [at 1]"); + } else { + set.lock(); + set.notpending(lst); + set.dbdiscard(lst); + set.unlock(); + } + } + con.closeTransaction(); + return 0; +}; + +static int +hashindexdelete(Par par, const ITab& itab) +{ + Con& con = par.con(); + Set& set = par.set(); + LL3("hashindexdelete " << itab.m_name); + CHK(con.startTransaction() == 0); + Lst lst; + bool deadlock = false; + for (unsigned j = 0; j < par.m_rows; j++) { + unsigned j2 = ! par.m_randomkey ? j : urandom(par.m_rows); + unsigned i = thrrow(par, j2); + set.lock(); + if (! set.exist(i) || set.pending(i, Row::AnyOp)) { + set.unlock(); + continue; + } + CHK(set.delrow(par, itab, i) == 0); + set.unlock(); + LL4("hashindexdelete " << i << ": " << *set.m_row[i]); + lst.push(i); + if (lst.cnt() == par.m_batch) { + deadlock = par.m_deadlock; + CHK(con.execute(Commit, deadlock) == 0); + if (deadlock) { + LL1("hashindexdelete: stop on deadlock [at 1]"); + break; + } + con.closeTransaction(); + set.lock(); + set.notpending(lst); + set.unlock(); + lst.reset(); + CHK(con.startTransaction() == 0); + } + } + if (! deadlock && lst.cnt() != 0) { + deadlock = par.m_deadlock; + CHK(con.execute(Commit, deadlock) == 0); + if (deadlock) { + LL1("hashindexdelete: stop on deadlock [at 2]"); + } else { + set.lock(); + set.notpending(lst); + set.unlock(); + } + } + con.closeTransaction(); + return 0; +}; + +static int +hashindexread(Par par, const ITab& itab) +{ + Con& con = par.con(); + const Tab& tab = par.tab(); + Set& set = par.set(); + LL3("hashindexread " << itab.m_name << " verify=" << par.m_verify); + // expected + const Set& set1 = set; + Set set2(tab, set.m_rows); + for (unsigned i = 0; i < set.m_rows; i++) { + set.lock(); + if (! set.exist(i)) { + set.unlock(); + continue; + } + set.unlock(); + CHK(con.startTransaction() == 0); + CHK(set2.selrow(par, itab, *set1.m_row[i]) == 0); + CHK(con.execute(Commit) == 0); + unsigned i2 = (unsigned)-1; + CHK(set2.getkey(par, &i2) == 0 && i == i2); + CHK(set2.putval(i, false) == 0); + LL4("row " << set2.count() << ": " << *set2.m_row[i]); + con.closeTransaction(); + } + if (par.m_verify) + CHK(set1.verify(set2) == 0); + return 0; +} + // scan read static int @@ -2691,14 +3309,14 @@ scanreadtable(Par par) } unsigned i = (unsigned)-1; CHK(set2.getkey(par, &i) == 0); - CHK(set2.putval(i, false) == 0); + CHK(set2.putval(i, false, n) == 0); LL4("row " << n << ": " << *set2.m_row[i]); n++; } con.closeTransaction(); if (par.m_verify) CHK(set1.verify(set2) == 0); - LL3("scanread " << tab.m_name << " rows=" << n); + LL3("scanread " << tab.m_name << " done rows=" << n); return 0; } @@ -2745,19 +3363,22 @@ scanreadindex(Par par, const ITab& itab, BSet& bset, bool calc) // prefer proper subset if (0 < n && n < set.m_rows) break; - if (urandom(5) == 0) + if (urandom(3) == 0) break; set1.reset(); } } else { bset.filter(set, set1); } - LL3("scanread " << itab.m_name << " bounds=" << bset.m_bvals << " verify=" << par.m_verify); + LL3("scanread " << itab.m_name << " bounds=" << bset << " verify=" << par.m_verify << " ordered=" << par.m_ordered << " descending=" << par.m_descending); LL4("expect " << set1.count() << " rows"); Set set2(tab, set.m_rows); CHK(con.startTransaction() == 0); CHK(con.getNdbScanOperation(itab, tab) == 0); - CHK(con.openScanRead(par.m_scanbat, par.m_scanpar) == 0); + if (! par.m_ordered) + CHK(con.openScanRead(par.m_scanbat, par.m_scanpar) == 0); + else + CHK(con.openScanOrdered(par.m_scanbat, par.m_scanpar, par.m_descending) == 0); CHK(bset.setbnd(par) == 0); set2.getval(par); CHK(con.executeScan() == 0); @@ -2775,15 +3396,17 @@ scanreadindex(Par par, const ITab& itab, BSet& bset, bool calc) } unsigned i = (unsigned)-1; CHK(set2.getkey(par, &i) == 0); - LL4("key " << i); - CHK(set2.putval(i, par.m_dups) == 0); - LL4("row " << n << ": " << *set2.m_row[i]); + CHK(set2.putval(i, par.m_dups, n) == 0); + LL4("key " << i << " row " << n << ": " << *set2.m_row[i]); n++; } con.closeTransaction(); - if (par.m_verify) + if (par.m_verify) { CHK(set1.verify(set2) == 0); - LL3("scanread " << itab.m_name << " rows=" << n); + if (par.m_ordered) + CHK(set2.verifyorder(itab, par.m_descending) == 0); + } + LL3("scanread " << itab.m_name << " done rows=" << n); return 0; } @@ -2821,8 +3444,10 @@ scanreadindex(Par par, const ITab& itab) { const Tab& tab = par.tab(); for (unsigned i = 0; i < par.m_subsubloop; i++) { - BSet bset(tab, itab, par.m_rows); - CHK(scanreadindex(par, itab, bset, true) == 0); + if (itab.m_type == ITab::OrderedIndex) { + BSet bset(tab, itab, par.m_rows); + CHK(scanreadindex(par, itab, bset, true) == 0); + } } return 0; } @@ -2835,7 +3460,11 @@ scanreadindex(Par par) if (tab.m_itab[i] == 0) continue; const ITab& itab = *tab.m_itab[i]; - CHK(scanreadindex(par, itab) == 0); + if (itab.m_type == ITab::OrderedIndex) { + CHK(scanreadindex(par, itab) == 0); + } else { + CHK(hashindexread(par, itab) == 0); + } } return 0; } @@ -2932,7 +3561,7 @@ scanupdatetable(Par par) if (ret == 1) break; if (deadlock) { - LL1("scanupdatetable: stop on deadlock"); + LL1("scanupdatetable: stop on deadlock [at 1]"); break; } if (par.m_scanstop != 0 && urandom(par.m_scanstop) == 0) { @@ -2944,13 +3573,14 @@ scanupdatetable(Par par) CHK(set2.getkey(par, &i) == 0); const Row& row = *set.m_row[i]; set.lock(); - if (! set.exist(i) || set.pending(i)) { + if (! set.exist(i) || set.pending(i, Row::AnyOp)) { LL4("scan update " << tab.m_name << ": skip: " << row); } else { CHKTRY(set2.putval(i, false) == 0, set.unlock()); CHKTRY(con.updateScanTuple(con2) == 0, set.unlock()); Par par2 = par; par2.m_con = &con2; + set.dbsave(i); set.calc(par, i); CHKTRY(set.setrow(par2, i) == 0, set.unlock()); LL4("scan update " << tab.m_name << ": " << row); @@ -2961,12 +3591,13 @@ scanupdatetable(Par par) deadlock = par.m_deadlock; CHK(con2.execute(Commit, deadlock) == 0); if (deadlock) { - LL1("scanupdateindex: stop on deadlock"); + LL1("scanupdatetable: stop on deadlock [at 2]"); goto out; } con2.closeTransaction(); set.lock(); set.notpending(lst); + set.dbdiscard(lst); set.unlock(); count += lst.cnt(); lst.reset(); @@ -2977,12 +3608,13 @@ scanupdatetable(Par par) deadlock = par.m_deadlock; CHK(con2.execute(Commit, deadlock) == 0); if (deadlock) { - LL1("scanupdateindex: stop on deadlock"); + LL1("scanupdatetable: stop on deadlock [at 3]"); goto out; } con2.closeTransaction(); set.lock(); set.notpending(lst); + set.dbdiscard(lst); set.unlock(); count += lst.cnt(); lst.reset(); @@ -3009,7 +3641,10 @@ scanupdateindex(Par par, const ITab& itab, const BSet& bset) Set set2(tab, set.m_rows); CHK(con.startTransaction() == 0); CHK(con.getNdbScanOperation(itab, tab) == 0); - CHK(con.openScanExclusive(par.m_scanbat, par.m_scanpar) == 0); + if (! par.m_ordered) + CHK(con.openScanExclusive(par.m_scanbat, par.m_scanpar) == 0); + else + CHK(con.openScanOrderedExclusive(par.m_scanbat, par.m_scanpar, par.m_descending) == 0); CHK(bset.setbnd(par) == 0); set2.getval(par); CHK(con.executeScan() == 0); @@ -3027,7 +3662,7 @@ scanupdateindex(Par par, const ITab& itab, const BSet& bset) if (ret == 1) break; if (deadlock) { - LL1("scanupdateindex: stop on deadlock"); + LL1("scanupdateindex: stop on deadlock [at 1]"); break; } if (par.m_scanstop != 0 && urandom(par.m_scanstop) == 0) { @@ -3039,13 +3674,14 @@ scanupdateindex(Par par, const ITab& itab, const BSet& bset) CHK(set2.getkey(par, &i) == 0); const Row& row = *set.m_row[i]; set.lock(); - if (! set.exist(i) || set.pending(i)) { + if (! set.exist(i) || set.pending(i, Row::AnyOp)) { LL4("scan update " << itab.m_name << ": skip: " << row); } else { CHKTRY(set2.putval(i, par.m_dups) == 0, set.unlock()); CHKTRY(con.updateScanTuple(con2) == 0, set.unlock()); Par par2 = par; par2.m_con = &con2; + set.dbsave(i); set.calc(par, i); CHKTRY(set.setrow(par2, i) == 0, set.unlock()); LL4("scan update " << itab.m_name << ": " << row); @@ -3056,12 +3692,13 @@ scanupdateindex(Par par, const ITab& itab, const BSet& bset) deadlock = par.m_deadlock; CHK(con2.execute(Commit, deadlock) == 0); if (deadlock) { - LL1("scanupdateindex: stop on deadlock"); + LL1("scanupdateindex: stop on deadlock [at 2]"); goto out; } con2.closeTransaction(); set.lock(); set.notpending(lst); + set.dbdiscard(lst); set.unlock(); count += lst.cnt(); lst.reset(); @@ -3072,12 +3709,13 @@ scanupdateindex(Par par, const ITab& itab, const BSet& bset) deadlock = par.m_deadlock; CHK(con2.execute(Commit, deadlock) == 0); if (deadlock) { - LL1("scanupdateindex: stop on deadlock"); + LL1("scanupdateindex: stop on deadlock [at 3]"); goto out; } con2.closeTransaction(); set.lock(); set.notpending(lst); + set.dbdiscard(lst); set.unlock(); count += lst.cnt(); lst.reset(); @@ -3097,9 +3735,13 @@ scanupdateindex(Par par, const ITab& itab) { const Tab& tab = par.tab(); for (unsigned i = 0; i < par.m_subsubloop; i++) { - BSet bset(tab, itab, par.m_rows); - bset.calc(par); - CHK(scanupdateindex(par, itab, bset) == 0); + if (itab.m_type == ITab::OrderedIndex) { + BSet bset(tab, itab, par.m_rows); + bset.calc(par); + CHK(scanupdateindex(par, itab, bset) == 0); + } else { + CHK(hashindexupdate(par, itab) == 0); + } } return 0; } @@ -3151,8 +3793,12 @@ readverifyfull(Par par) unsigned i = par.m_no - 1; if (i < tab.m_itabs && tab.m_itab[i] != 0) { const ITab& itab = *tab.m_itab[i]; - BSet bset(tab, itab, par.m_rows); - CHK(scanreadindex(par, itab, bset, false) == 0); + if (itab.m_type == ITab::OrderedIndex) { + BSet bset(tab, itab, par.m_rows); + CHK(scanreadindex(par, itab, bset, false) == 0); + } else { + CHK(hashindexread(par, itab) == 0); + } } } return 0; @@ -3162,6 +3808,11 @@ static int readverifyindex(Par par) { par.m_verify = true; + unsigned sel = urandom(10); + if (sel < 9) { + par.m_ordered = true; + par.m_descending = (sel < 5); + } CHK(scanreadindex(par) == 0); return 0; } @@ -3169,26 +3820,56 @@ readverifyindex(Par par) static int pkops(Par par) { + const Tab& tab = par.tab(); par.m_randomkey = true; for (unsigned i = 0; i < par.m_subsubloop; i++) { + unsigned j = 0; + while (j < tab.m_itabs) { + if (tab.m_itab[j] != 0) { + const ITab& itab = *tab.m_itab[j]; + if (itab.m_type == ITab::UniqueHashIndex && urandom(5) == 0) + break; + } + j++; + } unsigned sel = urandom(10); if (par.m_slno % 2 == 0) { // favor insert if (sel < 8) { CHK(pkinsert(par) == 0); } else if (sel < 9) { - CHK(pkupdate(par) == 0); + if (j == tab.m_itabs) + CHK(pkupdate(par) == 0); + else { + const ITab& itab = *tab.m_itab[j]; + CHK(hashindexupdate(par, itab) == 0); + } } else { - CHK(pkdelete(par) == 0); + if (j == tab.m_itabs) + CHK(pkdelete(par) == 0); + else { + const ITab& itab = *tab.m_itab[j]; + CHK(hashindexdelete(par, itab) == 0); + } } } else { // favor delete if (sel < 1) { CHK(pkinsert(par) == 0); } else if (sel < 2) { - CHK(pkupdate(par) == 0); + if (j == tab.m_itabs) + CHK(pkupdate(par) == 0); + else { + const ITab& itab = *tab.m_itab[j]; + CHK(hashindexupdate(par, itab) == 0); + } } else { - CHK(pkdelete(par) == 0); + if (j == tab.m_itabs) + CHK(pkdelete(par) == 0); + else { + const ITab& itab = *tab.m_itab[j]; + CHK(hashindexdelete(par, itab) == 0); + } } } } @@ -3208,6 +3889,10 @@ pkupdatescanread(Par par) CHK(scanreadtable(par) == 0); } else { par.m_verify = false; + if (sel < 8) { + par.m_ordered = true; + par.m_descending = (sel < 7); + } CHK(scanreadindex(par) == 0); } return 0; @@ -3227,6 +3912,10 @@ mixedoperations(Par par) } else if (sel < 6) { CHK(scanupdatetable(par) == 0); } else { + if (sel < 8) { + par.m_ordered = true; + par.m_descending = (sel < 7); + } CHK(scanupdateindex(par) == 0); } return 0; @@ -3720,7 +4409,7 @@ printtables() { Par par(g_opt); makebuiltintables(par); - ndbout << "builtin tables (index x0 is on table pk):" << endl; + ndbout << "builtin tables (x0 on pk, x=ordered z=hash):" << endl; for (unsigned j = 0; j < tabcount; j++) { if (tablist[j] == 0) continue; @@ -3744,6 +4433,7 @@ runtest(Par par) LL1("random seed: " << seed); srandom((unsigned)seed); } else if (par.m_seed != 0) + LL1("random seed: " << par.m_seed); srandom(par.m_seed); // cs assert(par.m_csname != 0); @@ -3953,7 +4643,8 @@ NDB_COMMAND(testOIBasic, "testOIBasic", "testOIBasic", "testOIBasic", 65535) if (strcmp(arg, "-threads") == 0) { if (++argv, --argc > 0) { g_opt.m_threads = atoi(argv[0]); - continue; + if (1 <= g_opt.m_threads) + continue; } } if (strcmp(arg, "-v") == 0) { @@ -3970,7 +4661,7 @@ NDB_COMMAND(testOIBasic, "testOIBasic", "testOIBasic", "testOIBasic", 65535) printhelp(); goto wrongargs; } - ndbout << "testOIBasic: unknown option " << arg; + ndbout << "testOIBasic: bad or unknown option " << arg; goto usage; } { diff --git a/ndb/test/ndbapi/testTimeout.cpp b/ndb/test/ndbapi/testTimeout.cpp index e310e12df81..ac4f257f12c 100644 --- a/ndb/test/ndbapi/testTimeout.cpp +++ b/ndb/test/ndbapi/testTimeout.cpp @@ -87,45 +87,6 @@ int runClearTable(NDBT_Context* ctx, NDBT_Step* step){ result = NDBT_FAILED; \ break; } -int runTimeoutTrans(NDBT_Context* ctx, NDBT_Step* step){ - int result = NDBT_OK; - int loops = ctx->getNumLoops(); - NdbConfig conf(GETNDB(step)->getNodeId()+1); - unsigned int nodeId = conf.getMasterNodeId(); - int stepNo = step->getStepNo(); - - int minSleep = (int)(TIMEOUT * 1.5); - int maxSleep = TIMEOUT * 2; - ndbout << "TransactionInactiveTimeout="<< TIMEOUT - << ", minSleep="<getNumLoops(); @@ -345,27 +306,6 @@ TESTCASE("DontTimeoutTransaction5", FINALIZER(resetTransactionTimeout); FINALIZER(runClearTable); } -TESTCASE("TimeoutTransaction", - "Test that the transaction does timeout "\ - "if we sleep during the transaction. Use a sleep "\ - "value which is larger than TransactionInactiveTimeout"){ - INITIALIZER(runLoadTable); - INITIALIZER(setTransactionTimeout); - STEPS(runTimeoutTrans, 1); - FINALIZER(resetTransactionTimeout); - FINALIZER(runClearTable); -} -TESTCASE("TimeoutTransaction5", - "Test that the transaction does timeout " \ - "if we sleep during the transaction. Use a sleep " \ - "value which is larger than TransactionInactiveTimeout" \ - "Five simultaneous threads"){ - INITIALIZER(runLoadTable); - INITIALIZER(setTransactionTimeout); - STEPS(runTimeoutTrans, 5); - FINALIZER(resetTransactionTimeout); - FINALIZER(runClearTable); -} TESTCASE("TimeoutRandTransaction", "Test that the transaction does timeout "\ "if we sleep during the transaction. Use a sleep "\ diff --git a/ndb/test/run-test/daily-basic-tests.txt b/ndb/test/run-test/daily-basic-tests.txt index dce89766319..fed7b49cec7 100644 --- a/ndb/test/run-test/daily-basic-tests.txt +++ b/ndb/test/run-test/daily-basic-tests.txt @@ -147,34 +147,9 @@ max-time: 500 cmd: testBasic args: -n MassiveRollback2 T1 T6 T13 -#-m 500 1: testBasic -n ReadConsistency T6 max-time: 500 cmd: testTimeout -args: -n DontTimeoutTransaction T1 - -max-time: 500 -cmd: testTimeout -args: -n DontTimeoutTransaction5 T1 - -max-time: 500 -cmd: testTimeout -args: -n TimeoutTransaction T1 - -max-time: 500 -cmd: testTimeout -args: -n TimeoutTransaction5 T1 - -max-time: 500 -cmd: testTimeout -args: -n BuddyTransNoTimeout T1 - -max-time: 500 -cmd: testTimeout -args: -n BuddyTransNoTimeout5 T1 - -max-time: 500 -cmd: testTimeout -args: -n TimeoutRandTransaction T1 +args: T1 # SCAN TESTS # @@ -490,9 +465,73 @@ max-time: 150000 cmd: testOperations args: -max-time: 150000 +max-time: 1500 cmd: testTransactions -args: +args: T1 + +max-time: 1500 +cmd: testTransactions +args: T2 + +max-time: 1500 +cmd: testTransactions +args: T3 + +max-time: 1500 +cmd: testTransactions +args: T4 + +max-time: 1500 +cmd: testTransactions +args: T5 + +max-time: 1500 +cmd: testTransactions +args: T6 + +max-time: 1500 +cmd: testTransactions +args: T7 + +max-time: 1500 +cmd: testTransactions +args: T8 + +max-time: 1500 +cmd: testTransactions +args: T9 + +max-time: 1500 +cmd: testTransactions +args: T10 + +max-time: 1500 +cmd: testTransactions +args: T11 + +max-time: 1500 +cmd: testTransactions +args: T12 + +max-time: 1500 +cmd: testTransactions +args: T13 + +max-time: 1500 +cmd: testTransactions +args: T14 + +max-time: 1500 +cmd: testTransactions +args: I1 + +max-time: 1500 +cmd: testTransactions +args: I2 + +max-time: 1500 +cmd: testTransactions +args: I3 max-time: 1500 cmd: testRestartGci @@ -502,7 +541,7 @@ max-time: 600 cmd: testBlobs args: -max-time: 2500 +max-time: 5000 cmd: testOIBasic args: @@ -510,6 +549,10 @@ max-time: 2500 cmd: testBitfield args: +max-time: 2500 +cmd: testPartitioning +args: + # # # SYSTEM RESTARTS diff --git a/ndb/test/run-test/daily-devel-tests.txt b/ndb/test/run-test/daily-devel-tests.txt index 5c0b2821d85..9527df600f0 100644 --- a/ndb/test/run-test/daily-devel-tests.txt +++ b/ndb/test/run-test/daily-devel-tests.txt @@ -1,6 +1,6 @@ -max-time: 2500 +max-time: 25000 cmd: atrt-mysql-test-run -args: --do-test=ndb --force +args: --force # # INDEX diff --git a/ndb/test/src/HugoTransactions.cpp b/ndb/test/src/HugoTransactions.cpp index 7a641afd8f3..d9207386bf0 100644 --- a/ndb/test/src/HugoTransactions.cpp +++ b/ndb/test/src/HugoTransactions.cpp @@ -1097,7 +1097,7 @@ HugoTransactions::pkReadRecords(Ndb* pNdb, pNdb->closeTransaction(pTrans); return NDBT_FAILED; } - } else{ + } else { if(pIndexScanOp) { int rows_found = 0; @@ -1759,7 +1759,7 @@ HugoTransactions::indexReadRecords(Ndb* pNdb, pNdb->closeTransaction(pTrans); return NDBT_FAILED; } - check = 0; + check = sOp->readTuples(); } if( check == -1 ) { @@ -1948,7 +1948,7 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb, } if(ordered && check != 0){ - g_err << "Row: " << r << " not found!!" << endl; + g_err << check << " - Row: " << r << " not found!!" << endl; pNdb->closeTransaction(pTrans); return NDBT_FAILED; } diff --git a/ndb/test/src/UtilTransactions.cpp b/ndb/test/src/UtilTransactions.cpp index f4ac466820f..a7c9751ed09 100644 --- a/ndb/test/src/UtilTransactions.cpp +++ b/ndb/test/src/UtilTransactions.cpp @@ -629,7 +629,7 @@ UtilTransactions::scanAndCompareUniqueIndex(Ndb* pNdb, parallelism = 1; while (true){ - +restart: if (retryAttempt >= retryMax){ g_info << "ERROR: has retried this operation " << retryAttempt << " times, failing!" << endl; @@ -719,11 +719,26 @@ UtilTransactions::scanAndCompareUniqueIndex(Ndb* pNdb, // ndbout << row.c_str().c_str() << endl; - if (readRowFromTableAndIndex(pNdb, pTrans, pIndex, row) != NDBT_OK){ + + while((eof= pOp->nextResult(false)) == 0); + if(eof == 2) + eof = pOp->nextResult(true); // this should give -1 + if(eof == -1) + { + const NdbError err = pTrans->getNdbError(); + + if (err.status == NdbError::TemporaryError){ + ERR(err); + pNdb->closeTransaction(pTrans); + NdbSleep_MilliSleep(50); + retryAttempt++; + goto restart; + } + } pNdb->closeTransaction(pTrans); return NDBT_FAILED; } @@ -736,7 +751,6 @@ UtilTransactions::scanAndCompareUniqueIndex(Ndb* pNdb, pNdb->closeTransaction(pTrans); NdbSleep_MilliSleep(50); retryAttempt++; - rows--; continue; } ERR(err); @@ -811,7 +825,6 @@ UtilTransactions::readRowFromTableAndIndex(Ndb* pNdb, check = pOp->readTuple(); if( check == -1 ) { ERR(pTrans1->getNdbError()); - pNdb->closeTransaction(pTrans1); goto close_all; } @@ -943,7 +956,7 @@ UtilTransactions::readRowFromTableAndIndex(Ndb* pNdb, #if VERBOSE printf("\n"); #endif - + scanTrans->refresh(); check = pTrans1->execute(Commit); if( check == -1 ) { const NdbError err = pTrans1->getNdbError(); diff --git a/server-tools/instance-manager/Makefile.am b/server-tools/instance-manager/Makefile.am index cc376fe2d18..a15ff9321cb 100644 --- a/server-tools/instance-manager/Makefile.am +++ b/server-tools/instance-manager/Makefile.am @@ -59,7 +59,7 @@ client_settings.h: Makefile rm -f $(srcdir)/client_settings.h @LN_CP_F@ $(top_srcdir)/sql/client_settings.h $(srcdir)/client_settings.h -EXTRA_PROGRAMS= mysqlmanager +bin_PROGRAMS= mysqlmanager mysqlmanager_SOURCES= command.cc command.h mysqlmanager.cc \ manager.h manager.cc log.h log.cc \ diff --git a/server-tools/instance-manager/mysql_connection.cc b/server-tools/instance-manager/mysql_connection.cc index 0ebcb0eea8d..83b046c1e5b 100644 --- a/server-tools/instance-manager/mysql_connection.cc +++ b/server-tools/instance-manager/mysql_connection.cc @@ -38,6 +38,18 @@ Command *parse_command(Command_factory * factory, const char *text); +Mysql_connection_thread_args::Mysql_connection_thread_args( + struct st_vio *vio_arg, + Thread_registry &thread_registry_arg, + const User_map &user_map_arg, + ulong connection_id_arg, + Instance_map &instance_map_arg) : + vio(vio_arg) + ,thread_registry(thread_registry_arg) + ,user_map(user_map_arg) + ,connection_id(connection_id_arg) + ,instance_map(instance_map_arg) + {} /* MySQL connection - handle one connection with mysql command line client @@ -179,9 +191,11 @@ void Mysql_connection_thread::run() int Mysql_connection_thread::check_connection() { ulong pkt_len=0; // to hold client reply length + /* maximum size of the version string */ + enum { MAX_VERSION_LENGTH= 80 }; /* buffer for the first packet */ /* packet contains: */ - char buff[mysqlmanager_version_length + 1 + // server version, 0-ended + char buff[MAX_VERSION_LENGTH + 1 + // server version, 0-ended 4 + // connection id SCRAMBLE_LENGTH + 2 + // scramble (in 2 pieces) 18]; // server variables: flags, diff --git a/server-tools/instance-manager/mysql_connection.h b/server-tools/instance-manager/mysql_connection.h index 225f4a352ce..e0109ce234f 100644 --- a/server-tools/instance-manager/mysql_connection.h +++ b/server-tools/instance-manager/mysql_connection.h @@ -48,13 +48,7 @@ struct Mysql_connection_thread_args Thread_registry &thread_registry_arg, const User_map &user_map_arg, ulong connection_id_arg, - Instance_map &instance_map_arg) : - vio(vio_arg) - ,thread_registry(thread_registry_arg) - ,user_map(user_map_arg) - ,connection_id(connection_id_arg) - ,instance_map(instance_map_arg) - {} + Instance_map &instance_map_arg); }; #endif // INCLUDES_MYSQL_INSTANCE_MANAGER_MYSQL_CONNECTION_H diff --git a/server-tools/instance-manager/priv.cc b/server-tools/instance-manager/priv.cc index 8112ebd41d8..4b47fe5b593 100644 --- a/server-tools/instance-manager/priv.cc +++ b/server-tools/instance-manager/priv.cc @@ -16,6 +16,10 @@ #include "priv.h" +/* + The following string must be less then 80 characters, as + mysql_connection.cc relies on it +*/ const char mysqlmanager_version[] = "0.2-alpha"; const int mysqlmanager_version_length= sizeof(mysqlmanager_version) - 1; diff --git a/sql-common/client.c b/sql-common/client.c index fd65ed01462..a014e398d8b 100644 --- a/sql-common/client.c +++ b/sql-common/client.c @@ -98,9 +98,6 @@ my_bool net_flush(NET *net); # include #endif -#ifndef INADDR_NONE -#define INADDR_NONE -1 -#endif #if defined(MSDOS) || defined(__WIN__) #define perror(A) #else @@ -874,6 +871,7 @@ static const char *default_options[]= "replication-probe", "enable-reads-from-master", "repl-parse-query", "ssl-cipher", "max-allowed-packet", "protocol", "shared-memory-base-name", "multi-results", "multi-queries", "secure-auth", + "report-data-truncation", NullS }; @@ -1084,6 +1082,9 @@ void mysql_read_default_options(struct st_mysql_options *options, case 32: /* secure-auth */ options->secure_auth= TRUE; break; + case 33: /* report-data-truncation */ + options->report_data_truncation= opt_arg ? test(atoi(opt_arg)) : 1; + break; default: DBUG_PRINT("warning",("unknown option: %s",option[0])); } @@ -1427,6 +1428,7 @@ mysql_init(MYSQL *mysql) #endif mysql->options.methods_to_use= MYSQL_OPT_GUESS_CONNECTION; + mysql->options.report_data_truncation= TRUE; /* default */ return mysql; } @@ -2666,6 +2668,9 @@ mysql_options(MYSQL *mysql,enum mysql_option option, const char *arg) case MYSQL_SECURE_AUTH: mysql->options.secure_auth= *(my_bool *) arg; break; + case MYSQL_REPORT_DATA_TRUNCATION: + mysql->options.report_data_truncation= test(*(my_bool *) arg); + break; default: DBUG_RETURN(1); } diff --git a/sql/Makefile.am b/sql/Makefile.am index 501f9b03e0d..8ff55898ba4 100644 --- a/sql/Makefile.am +++ b/sql/Makefile.am @@ -62,7 +62,8 @@ noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \ sp_head.h sp_pcontext.h sp_rcontext.h sp.h sp_cache.h \ parse_file.h sql_view.h sql_trigger.h \ examples/ha_example.h examples/ha_archive.h \ - examples/ha_tina.h + examples/ha_tina.h \ + ha_federated.h mysqld_SOURCES = sql_lex.cc sql_handler.cc \ item.cc item_sum.cc item_buff.cc item_func.cc \ @@ -98,7 +99,9 @@ mysqld_SOURCES = sql_lex.cc sql_handler.cc \ sp_head.cc sp_pcontext.cc sp_rcontext.cc sp.cc \ sp_cache.cc parse_file.cc sql_trigger.cc \ examples/ha_example.cc examples/ha_archive.cc \ - examples/ha_tina.cc + examples/ha_tina.cc \ + ha_federated.cc + gen_lex_hash_SOURCES = gen_lex_hash.cc gen_lex_hash_LDADD = $(LDADD) $(CXXLDFLAGS) mysql_tzinfo_to_sql_SOURCES = mysql_tzinfo_to_sql.cc diff --git a/sql/examples/ha_archive.cc b/sql/examples/ha_archive.cc index 3f176da1c7f..f754793e319 100644 --- a/sql/examples/ha_archive.cc +++ b/sql/examples/ha_archive.cc @@ -22,6 +22,7 @@ #ifdef HAVE_ARCHIVE_DB #include "ha_archive.h" +#include /* First, if you want to understand storage engines you should look at @@ -227,8 +228,7 @@ int ha_archive::read_meta_file(File meta_file, ulonglong *rows) /* This method writes out the header of a meta file and returns whether or not it was successful. By setting dirty you say whether or not the file represents the actual state of the data file. - Upon ::open() we set to dirty, and upon ::close() we set to clean. If we determine during - a read that the file was dirty we will force a rebuild of this file. + Upon ::open() we set to dirty, and upon ::close() we set to clean. */ int ha_archive::write_meta_file(File meta_file, ulonglong rows, bool dirty) { @@ -305,6 +305,7 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, TABLE *table) share->use_count= 0; share->table_name_length= length; share->table_name= tmp_name; + share->crashed= FALSE; fn_format(share->data_file_name,table_name,"",ARZ,MY_REPLACE_EXT|MY_UNPACK_FILENAME); fn_format(meta_file_name,table_name,"",ARM,MY_REPLACE_EXT|MY_UNPACK_FILENAME); strmov(share->table_name,table_name); @@ -315,24 +316,15 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, TABLE *table) if ((share->meta_file= my_open(meta_file_name, O_RDWR, MYF(0))) == -1) goto error; - if (read_meta_file(share->meta_file, &share->rows_recorded)) - { - /* - The problem here is that for some reason, probably a crash, the meta - file has been corrupted. So what do we do? Well we try to rebuild it - ourself. Once that happens, we reread it, but if that fails we just - call it quits and return an error. - */ - if (rebuild_meta_file(share->table_name, share->meta_file)) - goto error; - if (read_meta_file(share->meta_file, &share->rows_recorded)) - goto error; - } /* After we read, we set the file to dirty. When we close, we will do the - opposite. + opposite. If the meta file will not open we assume it is crashed and + leave it up to the user to fix. */ - (void)write_meta_file(share->meta_file, share->rows_recorded, TRUE); + if (read_meta_file(share->meta_file, &share->rows_recorded)) + share->crashed= TRUE; + else + (void)write_meta_file(share->meta_file, share->rows_recorded, TRUE); /* It is expensive to open and close the data files and since you can't have a gzip file that can be both read and written we keep a writer open @@ -408,7 +400,7 @@ int ha_archive::open(const char *name, int mode, uint test_if_locked) DBUG_ENTER("ha_archive::open"); if (!(share= get_share(name, table))) - DBUG_RETURN(1); + DBUG_RETURN(-1); thr_lock_data_init(&share->lock,&lock,NULL); if ((archive= gzopen(share->data_file_name, "rb")) == NULL) @@ -530,6 +522,9 @@ int ha_archive::write_row(byte * buf) z_off_t written; DBUG_ENTER("ha_archive::write_row"); + if (share->crashed) + DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); + statistic_increment(table->in_use->status_var.ha_write_count, &LOCK_status); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) table->timestamp_field->set_time(); @@ -578,6 +573,9 @@ int ha_archive::rnd_init(bool scan) { DBUG_ENTER("ha_archive::rnd_init"); int read; // gzread() returns int, and we use this to check the header + + if (share->crashed) + DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); /* We rewind the file so that we can read from the beginning if scan */ if (scan) @@ -672,6 +670,9 @@ int ha_archive::rnd_next(byte *buf) int rc; DBUG_ENTER("ha_archive::rnd_next"); + if (share->crashed) + DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); + if (!scan_rows) DBUG_RETURN(HA_ERR_END_OF_FILE); scan_rows--; @@ -722,22 +723,23 @@ int ha_archive::rnd_pos(byte * buf, byte *pos) } /* - This method rebuilds the meta file. It does this by walking the datafile and + This method repairs the meta file. It does this by walking the datafile and rewriting the meta file. */ -int ha_archive::rebuild_meta_file(char *table_name, File meta_file) +int ha_archive::repair(THD* thd, HA_CHECK_OPT* check_opt) { int rc; byte *buf; ulonglong rows_recorded= 0; - gzFile rebuild_file; /* Archive file we are working with */ + gzFile rebuild_file; // Archive file we are working with + File meta_file; // Meta file we use char data_file_name[FN_REFLEN]; - DBUG_ENTER("ha_archive::rebuild_meta_file"); + DBUG_ENTER("ha_archive::repair"); /* Open up the meta file to recreate it. */ - fn_format(data_file_name, table_name, "", ARZ, + fn_format(data_file_name, share->table_name, "", ARZ, MY_REPLACE_EXT|MY_UNPACK_FILENAME); if ((rebuild_file= gzopen(data_file_name, "rb")) == NULL) DBUG_RETURN(errno ? errno : -1); @@ -767,11 +769,18 @@ int ha_archive::rebuild_meta_file(char *table_name, File meta_file) */ if (rc == HA_ERR_END_OF_FILE) { - (void)write_meta_file(meta_file, rows_recorded, FALSE); + fn_format(data_file_name,share->table_name,"",ARM,MY_REPLACE_EXT|MY_UNPACK_FILENAME); + if ((meta_file= my_open(data_file_name, O_RDWR, MYF(0))) == -1) + { + rc= HA_ERR_CRASHED_ON_USAGE; + goto error; + } + (void)write_meta_file(meta_file, rows_recorded, TRUE); rc= 0; } my_free((gptr) buf, MYF(0)); + share->crashed= FALSE; error: gzclose(rebuild_file); @@ -790,13 +799,14 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt) char block[IO_SIZE]; char writer_filename[FN_REFLEN]; + /* Closing will cause all data waiting to be flushed */ + gzclose(share->archive_write); + share->archive_write= NULL; + /* Lets create a file to contain the new data */ fn_format(writer_filename, share->table_name, "", ARN, MY_REPLACE_EXT|MY_UNPACK_FILENAME); - /* Closing will cause all data waiting to be flushed, to be flushed */ - gzclose(share->archive_write); - if ((reader= gzopen(share->data_file_name, "rb")) == NULL) DBUG_RETURN(-1); @@ -814,16 +824,6 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt) my_rename(writer_filename,share->data_file_name,MYF(0)); - /* - We reopen the file in case some IO is waiting to go through. - In theory the table is closed right after this operation, - but it is possible for IO to still happen. - I may be being a bit too paranoid right here. - */ - if ((share->archive_write= gzopen(share->data_file_name, "ab")) == NULL) - DBUG_RETURN(errno ? errno : -1); - share->dirty= FALSE; - DBUG_RETURN(0); } @@ -880,13 +880,27 @@ THR_LOCK_DATA **ha_archive::store_lock(THD *thd, void ha_archive::info(uint flag) { DBUG_ENTER("ha_archive::info"); - /* This should be an accurate number now, though bulk and delayed inserts can cause the number to be inaccurate. */ records= share->rows_recorded; deleted= 0; + /* Costs quite a bit more to get all information */ + if (flag & HA_STATUS_TIME) + { + MY_STAT file_stat; // Stat information for the data file + + VOID(my_stat(share->data_file_name, &file_stat, MYF(MY_WME))); + + mean_rec_length= table->reclength + buffer.alloced_length(); + data_file_length= file_stat.st_size; + create_time= file_stat.st_ctime; + update_time= file_stat.st_mtime; + max_data_file_length= share->rows_recorded * mean_rec_length; + } + delete_length= 0; + index_file_length=0; DBUG_VOID_RETURN; } @@ -900,7 +914,7 @@ void ha_archive::info(uint flag) */ void ha_archive::start_bulk_insert(ha_rows rows) { - DBUG_ENTER("ha_archive::info"); + DBUG_ENTER("ha_archive::start_bulk_insert"); bulk_insert= TRUE; DBUG_VOID_RETURN; } @@ -912,6 +926,7 @@ void ha_archive::start_bulk_insert(ha_rows rows) */ int ha_archive::end_bulk_insert() { + DBUG_ENTER("ha_archive::end_bulk_insert"); bulk_insert= FALSE; share->dirty= TRUE; DBUG_RETURN(0); diff --git a/sql/examples/ha_archive.h b/sql/examples/ha_archive.h index 809f52a883b..07bc7baa400 100644 --- a/sql/examples/ha_archive.h +++ b/sql/examples/ha_archive.h @@ -35,6 +35,7 @@ typedef struct st_archive_share { File meta_file; /* Meta file we use */ gzFile archive_write; /* Archive file we are working with */ bool dirty; /* Flag for if a flush should occur */ + bool crashed; /* Meta file is crashed */ ulonglong rows_recorded; /* Number of rows in tables */ } ARCHIVE_SHARE; @@ -91,13 +92,14 @@ public: int write_meta_file(File meta_file, ulonglong rows, bool dirty); ARCHIVE_SHARE *get_share(const char *table_name, TABLE *table); int free_share(ARCHIVE_SHARE *share); - int rebuild_meta_file(char *table_name, File meta_file); + bool auto_repair() const { return 1; } // For the moment we just do this int read_data_header(gzFile file_to_read); int write_data_header(gzFile file_to_write); void position(const byte *record); void info(uint); int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info); int optimize(THD* thd, HA_CHECK_OPT* check_opt); + int repair(THD* thd, HA_CHECK_OPT* check_opt); void start_bulk_insert(ha_rows rows); int end_bulk_insert(); THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, diff --git a/sql/field.cc b/sql/field.cc index 6f38bd3c85a..ebeee476985 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -329,6 +329,27 @@ bool Field::field_cast_compatible(Field::field_cast_enum type) } +/* + Interpret field value as an integer but return the result as a string. + + This is used for printing bit_fields as numbers while debugging +*/ + +String *Field::val_int_as_str(String *val_buffer, my_bool unsigned_flag) +{ + CHARSET_INFO *cs= &my_charset_bin; + uint length= 21; + longlong value= val_int(); + if (val_buffer->alloc(length)) + return 0; + length= (uint) cs->cset->longlong10_to_str(cs, (char*) val_buffer->ptr(), + length, unsigned_flag ? 10 : -10, + value); + val_buffer->length(length); + return val_buffer; +} + + /**************************************************************************** ** Functions for the base classes ** This is an unpacked number. @@ -500,6 +521,22 @@ Field *Field::new_field(MEM_ROOT *root, struct st_table *new_table) return tmp; } + +Field *Field::new_key_field(MEM_ROOT *root, struct st_table *new_table, + char *new_ptr, uchar *new_null_ptr, + uint new_null_bit) +{ + Field *tmp; + if ((tmp= new_field(root, new_table))) + { + tmp->ptr= new_ptr; + tmp->null_ptr= new_null_ptr; + tmp->null_bit= new_null_bit; + } + return tmp; +} + + /**************************************************************************** Field_null, a field that always return NULL ****************************************************************************/ @@ -4657,7 +4694,19 @@ Field *Field_string::new_field(MEM_ROOT *root, struct st_table *new_table) } /**************************************************************************** -** VARCHAR type (Not available for the end user yet) + VARCHAR type + Data in field->ptr is stored as: + 1 or 2 bytes length-prefix-header (from Field_varstring::length_bytes) + data + + NOTE: + When VARCHAR is stored in a key (for handler::index_read() etc) it's always + stored with a 2 byte prefix. (Just like blob keys). + + Normally length_bytes is calculated as (field_length < 256 : 1 ? 2) + The exception is if there is a prefix key field that is part of a long + VARCHAR, in which case field_length for this may be 1 but the length_bytes + is 2. ****************************************************************************/ @@ -4686,8 +4735,11 @@ int Field_varstring::store(const char *from,uint length,CHARSET_INFO *cs) from,from+length, field_length/ field_charset->mbmaxlen); - memcpy(ptr + HA_KEY_BLOB_LENGTH, from, copy_length); - int2store(ptr, copy_length); + memcpy(ptr + length_bytes, from, copy_length); + if (length_bytes == 1) + *ptr= (uchar) copy_length; + else + int2store(ptr, copy_length); if (copy_length < length) error= 1; @@ -4700,91 +4752,117 @@ int Field_varstring::store(const char *from,uint length,CHARSET_INFO *cs) int Field_varstring::store(longlong nr) { char buff[64]; - int l; - CHARSET_INFO *cs=charset(); - l= (cs->cset->longlong10_to_str)(cs,buff,sizeof(buff),-10,nr); - return Field_varstring::store(buff,(uint)l,cs); + uint length; + length= (uint) (field_charset->cset->longlong10_to_str)(field_charset, + buff, + sizeof(buff), + -10,nr); + return Field_varstring::store(buff, length, field_charset); } double Field_varstring::val_real(void) { int not_used; - uint length=uint2korr(ptr)+HA_KEY_BLOB_LENGTH; - CHARSET_INFO *cs=charset(); - return my_strntod(cs, ptr+HA_KEY_BLOB_LENGTH, length, (char**)0, ¬_used); + uint length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr); + return my_strntod(field_charset, ptr+length_bytes, length, (char**) 0, + ¬_used); } longlong Field_varstring::val_int(void) { int not_used; - uint length=uint2korr(ptr)+HA_KEY_BLOB_LENGTH; - CHARSET_INFO *cs=charset(); - return my_strntoll(cs,ptr+HA_KEY_BLOB_LENGTH,length,10,NULL, ¬_used); + uint length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr); + return my_strntoll(field_charset, ptr+length_bytes, length, 10, NULL, + ¬_used); } String *Field_varstring::val_str(String *val_buffer __attribute__((unused)), String *val_ptr) { - uint length=uint2korr(ptr); - val_ptr->set((const char*) ptr+HA_KEY_BLOB_LENGTH,length,field_charset); + uint length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr); + val_ptr->set((const char*) ptr+length_bytes, length, field_charset); return val_ptr; } int Field_varstring::cmp(const char *a_ptr, const char *b_ptr) { - uint a_length=uint2korr(a_ptr); - uint b_length=uint2korr(b_ptr); + uint a_length, b_length; int diff; + + if (length_bytes == 1) + { + a_length= (uint) (uchar) *a_ptr; + b_length= (uint) (uchar) *b_ptr; + } + else + { + a_length= uint2korr(a_ptr); + b_length= uint2korr(b_ptr); + } diff= field_charset->coll->strnncollsp(field_charset, (const uchar*) a_ptr+ - HA_KEY_BLOB_LENGTH, + length_bytes, a_length, (const uchar*) b_ptr+ - HA_KEY_BLOB_LENGTH, + length_bytes, b_length,0); return diff; } +/* + NOTE: varstring and blob keys are ALWAYS stored with a 2 byte length prefix +*/ + int Field_varstring::key_cmp(const byte *key_ptr, uint max_key_length) { char *blob1; - uint length= uint2korr(ptr); - CHARSET_INFO *cs= charset(); - uint char_length= max_key_length / cs->mbmaxlen; + uint length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr); + uint char_length= max_key_length / field_charset->mbmaxlen; - char_length= my_charpos(cs, ptr + HA_KEY_BLOB_LENGTH, - ptr + HA_KEY_BLOB_LENGTH + length, char_length); + char_length= my_charpos(field_charset, ptr + length_bytes, + ptr + length_bytes + length, char_length); set_if_smaller(length, char_length); - return cs->coll->strnncollsp(cs, - (const uchar*) ptr+2, length, - (const uchar*) key_ptr+HA_KEY_BLOB_LENGTH, - uint2korr(key_ptr), 0); + return field_charset->coll->strnncollsp(field_charset, + (const uchar*) ptr + length_bytes, + length, + (const uchar*) key_ptr+ + HA_KEY_BLOB_LENGTH, + uint2korr(key_ptr), 0); } +/* + Compare to key segments (always 2 byte length prefix) + + NOTE + This is used only to compare key segments created for index_read(). + (keys are created and compared in key.cc) +*/ + int Field_varstring::key_cmp(const byte *a,const byte *b) { - CHARSET_INFO *cs= charset(); - return cs->coll->strnncollsp(cs, - (const uchar*) a + HA_KEY_BLOB_LENGTH, - uint2korr(a), - (const uchar*) b + HA_KEY_BLOB_LENGTH, - uint2korr(b), - 0); + return field_charset->coll->strnncollsp(field_charset, + (const uchar*) a + + HA_KEY_BLOB_LENGTH, + uint2korr(a), + (const uchar*) b + + HA_KEY_BLOB_LENGTH, + uint2korr(b), + 0); } void Field_varstring::sort_string(char *to,uint length) { - uint tot_length= uint2korr(ptr); + uint tot_length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr); tot_length= my_strnxfrm(field_charset, (uchar*) to, length, - (uchar*) ptr+HA_KEY_BLOB_LENGTH, + (uchar*) ptr + length_bytes, tot_length); if (tot_length < length) field_charset->cset->fill(field_charset, to+tot_length,length-tot_length, @@ -4792,6 +4870,18 @@ void Field_varstring::sort_string(char *to,uint length) } +enum ha_base_keytype Field_varstring::key_type() const +{ + enum ha_base_keytype res; + + if (binary()) + res= length_bytes == 1 ? HA_KEYTYPE_VARBINARY1 : HA_KEYTYPE_VARBINARY2; + else + res= length_bytes == 1 ? HA_KEYTYPE_VARTEXT1 : HA_KEYTYPE_VARTEXT2; + return res; +} + + void Field_varstring::sql_type(String &res) const { THD *thd= table->in_use; @@ -4809,13 +4899,102 @@ void Field_varstring::sql_type(String &res) const } +/* + Functions to create a packed row. + Here the number of length bytes are depending on the given max_length +*/ + char *Field_varstring::pack(char *to, const char *from, uint max_length) { - uint length=uint2korr(from); + uint length= length_bytes == 1 ? (uint) (uchar) *from : uint2korr(from); set_if_smaller(max_length, field_length); if (length > max_length) length=max_length; *to++= (char) (length & 255); + if (max_length > 255) + *to++= (char) (length >> 8); + if (length) + memcpy(to, from+length_bytes, length); + return to+length; +} + + +char *Field_varstring::pack_key(char *to, const char *key, uint max_length) +{ + uint length= length_bytes == 1 ? (uint) (uchar) *key : uint2korr(key); + uint char_length= ((field_charset->mbmaxlen > 1) ? + max_length/field_charset->mbmaxlen : max_length); + key+= length_bytes; + if (length > char_length) + { + char_length= my_charpos(field_charset, key, key+length, char_length); + set_if_smaller(length, char_length); + } + *to++= (char) (length & 255); + if (max_length > 255) + *to++= (char) (length >> 8); + if (length) + memcpy(to, key, length); + return to+length; +} + + +/* + Unpack a key into a record buffer. + + SYNOPSIS + unpack_key() + to Pointer into the record buffer. + key Pointer to the packed key. + max_length Key length limit from key description. + + DESCRIPTION + A VARCHAR key has a maximum size of 64K-1. + In its packed form, the length field is one or two bytes long, + depending on 'max_length'. + + RETURN + Pointer to end of 'key' (To the next key part if multi-segment key) +*/ + +const char *Field_varstring::unpack_key(char *to, const char *key, + uint max_length) +{ + /* get length of the blob key */ + uint32 length= *((uchar*) key++); + if (max_length > 255) + length+= (*((uchar*) key++)) << 8; + + /* put the length into the record buffer */ + if (length_bytes == 1) + *ptr= (uchar) length; + else + int2store(ptr, length); + memcpy(ptr + length_bytes, key, length); + return key + length; +} + +/* + Create a packed key that will be used for storage in the index tree + + SYNOPSIS + pack_key_from_key_image() + to Store packed key segment here + from Key segment (as given to index_read()) + max_length Max length of key + + RETURN + end of key storage +*/ + +char *Field_varstring::pack_key_from_key_image(char *to, const char *from, + uint max_length) +{ + /* Key length is always stored as 2 bytes */ + uint length= uint2korr(from); + if (length > max_length) + length= max_length; + *to++= (char) (length & 255); if (max_length > 255) *to++= (char) (length >> 8); if (length) @@ -4824,34 +5003,15 @@ char *Field_varstring::pack(char *to, const char *from, uint max_length) } -char *Field_varstring::pack_key(char *to, const char *from, uint max_length) -{ - uint length=uint2korr(from); - uint char_length= ((field_charset->mbmaxlen > 1) ? - max_length/field_charset->mbmaxlen : max_length); - from+= HA_KEY_BLOB_LENGTH; - if (length > char_length) - { - char_length= my_charpos(field_charset, from, from+length, char_length); - set_if_smaller(length, char_length); - } - *to++= (char) (length & 255); - if (max_length > 255) - *to++= (char) (length >> 8); - if (length) - memcpy(to, from, length); - return to+length; -} - +/* + unpack field packed with Field_varstring::pack() +*/ const char *Field_varstring::unpack(char *to, const char *from) { uint length; - if (field_length <= 255) - { + if (length_bytes == 1) length= (uint) (uchar) (*to= *from++); - to[1]=0; - } else { length= uint2korr(from); @@ -4859,7 +5019,7 @@ const char *Field_varstring::unpack(char *to, const char *from) to[1]= *from++; } if (length) - memcpy(to+HA_KEY_BLOB_LENGTH, from, length); + memcpy(to+ length_bytes, from, length); return from+length; } @@ -4867,12 +5027,11 @@ const char *Field_varstring::unpack(char *to, const char *from) int Field_varstring::pack_cmp(const char *a, const char *b, uint key_length, my_bool insert_or_update) { - uint a_length; - uint b_length; + uint a_length, b_length; if (key_length > 255) { - a_length=uint2korr(a); a+= HA_KEY_BLOB_LENGTH; - b_length=uint2korr(b); b+= HA_KEY_BLOB_LENGTH; + a_length=uint2korr(a); a+= 2; + b_length=uint2korr(b); b+= 2; } else { @@ -4889,8 +5048,8 @@ int Field_varstring::pack_cmp(const char *a, const char *b, uint key_length, int Field_varstring::pack_cmp(const char *b, uint key_length, my_bool insert_or_update) { - char *a= ptr+HA_KEY_BLOB_LENGTH; - uint a_length= uint2korr(ptr); + char *a= ptr+ length_bytes; + uint a_length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr); uint b_length; uint char_length= ((field_charset->mbmaxlen > 1) ? key_length / field_charset->mbmaxlen : key_length); @@ -4919,7 +5078,7 @@ int Field_varstring::pack_cmp(const char *b, uint key_length, uint Field_varstring::packed_col_length(const char *data_ptr, uint length) { if (length > 255) - return uint2korr(data_ptr)+HA_KEY_BLOB_LENGTH; + return uint2korr(data_ptr)+2; return (uint) ((uchar) *data_ptr)+1; } @@ -4932,13 +5091,14 @@ uint Field_varstring::max_packed_col_length(uint max_length) void Field_varstring::get_key_image(char *buff, uint length, imagetype type) { - uint f_length= uint2korr(ptr); + uint f_length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr); uint char_length= length / field_charset->mbmaxlen; - char_length= my_charpos(field_charset, ptr, ptr + HA_KEY_BLOB_LENGTH, + char_length= my_charpos(field_charset, ptr, ptr + length_bytes, char_length); set_if_smaller(f_length, char_length); + /* Key is always stored with 2 bytes */ int2store(buff,f_length); - memcpy(buff+HA_KEY_BLOB_LENGTH, ptr+HA_KEY_BLOB_LENGTH, f_length); + memcpy(buff+HA_KEY_BLOB_LENGTH, ptr+length_bytes, f_length); if (f_length < length) { /* @@ -4952,18 +5112,12 @@ void Field_varstring::get_key_image(char *buff, uint length, imagetype type) void Field_varstring::set_key_image(char *buff,uint length) { - length=uint2korr(buff); // Real length is here + length= uint2korr(buff); // Real length is here (void) Field_varstring::store(buff+HA_KEY_BLOB_LENGTH, length, field_charset); } -int Field_varstring::cmp_binary_offset(uint row_offset) -{ - return cmp_binary(ptr, ptr+row_offset); -} - - int Field_varstring::cmp_binary(const char *a_ptr, const char *b_ptr, uint32 max_length) { @@ -4971,13 +5125,49 @@ int Field_varstring::cmp_binary(const char *a_ptr, const char *b_ptr, uint diff; uint32 a_length,b_length; - a_length= uint2korr(a_ptr); - b_length= uint2korr(b_ptr); + if (length_bytes == 1) + { + a_length= (uint) (uchar) *a_ptr; + b_length= (uint) (uchar) *b_ptr; + } + else + { + a_length= uint2korr(a_ptr); + b_length= uint2korr(b_ptr); + } set_if_smaller(a_length, max_length); set_if_smaller(b_length, max_length); if (a_length != b_length) return 1; - return memcmp(a_ptr+2, b_ptr+2, a_length); + return memcmp(a_ptr+length_bytes, b_ptr+length_bytes, a_length); +} + + +Field *Field_varstring::new_field(MEM_ROOT *root, struct st_table *new_table) +{ + Field_varstring *res= (Field_varstring*) Field::new_field(root, new_table); + if (res) + res->length_bytes= length_bytes; + return res; +} + + +Field *Field_varstring::new_key_field(MEM_ROOT *root, + struct st_table *new_table, + char *new_ptr, uchar *new_null_ptr, + uint new_null_bit) +{ + Field_varstring *res; + if ((res= (Field_varstring*) Field::new_key_field(root, + new_table, + new_ptr, + new_null_ptr, + new_null_bit))) + { + /* Keys length prefixes are always packed with 2 bytes */ + res->length_bytes= 2; + } + return res; } @@ -5234,18 +5424,6 @@ int Field_blob::cmp(const char *a_ptr, const char *b_ptr) } -int Field_blob::cmp_offset(uint row_offset) -{ - return Field_blob::cmp(ptr,ptr+row_offset); -} - - -int Field_blob::cmp_binary_offset(uint row_offset) -{ - return cmp_binary(ptr, ptr+row_offset); -} - - int Field_blob::cmp_binary(const char *a_ptr, const char *b_ptr, uint32 max_length) { @@ -5432,8 +5610,7 @@ const char *Field_blob::unpack(char *to, const char *from) int Field_blob::pack_cmp(const char *a, const char *b, uint key_length, my_bool insert_or_update) { - uint a_length; - uint b_length; + uint a_length, b_length; if (key_length > 255) { a_length=uint2korr(a); a+=2; @@ -5539,6 +5716,7 @@ const char *Field_blob::unpack_key(char *to, const char *from, uint max_length) return from + length; } + /* Create a packed key that will be used for storage from a MySQL key */ char *Field_blob::pack_key_from_key_image(char *to, const char *from, @@ -6048,6 +6226,264 @@ bool Field_num::eq_def(Field *field) } +/* + Bit field. + + We store the first 0 - 6 uneven bits among the null bits + at the start of the record. The rest bytes are stored in + the record itself. + + For example: + + CREATE TABLE t1 (a int, b bit(17), c bit(21) not null, d bit(8)); + We would store data as follows in the record: + + Byte Bit + 1 7 - reserve for delete + 6 - null bit for 'a' + 5 - null bit for 'b' + 4 - first (high) bit of 'b' + 3 - first (high) bit of 'c' + 2 - second bit of 'c' + 1 - third bit of 'c' + 0 - forth bit of 'c' + 2 7 - firth bit of 'c' + 6 - null bit for 'd' + 3 - 6 four bytes for 'a' + 7 - 8 two bytes for 'b' + 9 - 10 two bytes for 'c' + 11 one byte for 'd' +*/ + +Field_bit::Field_bit(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg, + uchar null_bit_arg, uchar *bit_ptr_arg, uchar bit_ofs_arg, + enum utype unireg_check_arg, const char *field_name_arg, + struct st_table *table_arg) + : Field(ptr_arg, len_arg >> 3, null_ptr_arg, null_bit_arg, + unireg_check_arg, field_name_arg, table_arg), + bit_ptr(bit_ptr_arg), bit_ofs(bit_ofs_arg), bit_len(len_arg & 7) +{ + /* + Ensure that Field::eq() can distinguish between two different bit fields. + (two bit fields that are not null, may have same ptr and null_ptr) + */ + if (!null_ptr_arg) + null_bit= bit_ofs_arg; +} + + +Field *Field_bit::new_key_field(MEM_ROOT *root, + struct st_table *new_table, + char *new_ptr, uchar *new_null_ptr, + uint new_null_bit) +{ + Field_bit *res; + if ((res= (Field_bit*) Field::new_key_field(root, new_table, + new_ptr, new_null_ptr, + new_null_bit))) + { + /* Move bits normally stored in null_pointer to new_ptr */ + res->bit_ptr= (uchar*) new_ptr; + res->bit_ofs= 0; + if (bit_len) + res->ptr++; // Store rest of data here + } + return res; +} + + +void Field_bit::make_field(Send_field *field) +{ + /* table_cache_key is not set for temp tables */ + field->db_name= (orig_table->table_cache_key ? orig_table->table_cache_key : + ""); + field->org_table_name= orig_table->real_name; + field->table_name= orig_table->table_name; + field->col_name= field->org_col_name= field_name; + field->charsetnr= charset()->number; + field->length= field_length; + field->type= type(); + field->flags= table->maybe_null ? (flags & ~NOT_NULL_FLAG) : flags; + field->decimals= 0; +} + + +int Field_bit::store(const char *from, uint length, CHARSET_INFO *cs) +{ + int delta; + + for (; !*from && length; from++, length--); // skip left 0's + delta= field_length - length; + + if (delta < -1 || + (delta == -1 && (uchar) *from > ((1 << bit_len) - 1)) || + (!bit_len && delta < 0)) + { + set_rec_bits(0xff, bit_ptr, bit_ofs, bit_len); + memset(ptr, 0xff, field_length); + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + return 1; + } + /* delta is >= -1 here */ + if (delta > 0) + { + if (bit_len) + clr_rec_bits(bit_ptr, bit_ofs, bit_len); + bzero(ptr, delta); + memcpy(ptr + delta, from, length); + } + else if (delta == 0) + { + if (bit_len) + clr_rec_bits(bit_ptr, bit_ofs, bit_len); + memcpy(ptr, from, length); + } + else + { + if (bit_len) + { + set_rec_bits((uchar) *from, bit_ptr, bit_ofs, bit_len); + from++; + } + memcpy(ptr, from, field_length); + } + return 0; +} + + +int Field_bit::store(double nr) +{ + return (Field_bit::store((longlong) nr)); +} + + +int Field_bit::store(longlong nr) +{ + char buf[8]; + + mi_int8store(buf, nr); + return store(buf, 8, NULL); +} + + +double Field_bit::val_real(void) +{ + return (double) Field_bit::val_int(); +} + + +longlong Field_bit::val_int(void) +{ + ulonglong bits= 0; + if (bit_len) + bits= get_rec_bits(bit_ptr, bit_ofs, bit_len); + bits<<= (field_length * 8); + + switch (field_length) { + case 0: return bits; + case 1: return bits | (ulonglong) (uchar) ptr[0]; + case 2: return bits | mi_uint2korr(ptr); + case 3: return bits | mi_uint3korr(ptr); + case 4: return bits | mi_uint4korr(ptr); + case 5: return bits | mi_uint5korr(ptr); + case 6: return bits | mi_uint6korr(ptr); + case 7: return bits | mi_uint7korr(ptr); + default: return mi_uint8korr(ptr + field_length - sizeof(longlong)); + } +} + + +String *Field_bit::val_str(String *val_buffer, + String *val_ptr __attribute__((unused))) +{ + uint length= min(pack_length(), sizeof(longlong)); + ulonglong bits= val_int(); + + val_buffer->alloc(length); + memcpy_fixed((char*) val_buffer->ptr(), (char*) &bits, length); + val_buffer->length(length); + val_buffer->set_charset(&my_charset_bin); + return val_buffer; +} + + +int Field_bit::key_cmp(const byte *str, uint length) +{ + if (bit_len) + { + int flag; + uchar bits= get_rec_bits(bit_ptr, bit_ofs, bit_len); + if ((flag= (int) (bits - *(uchar*) str))) + return flag; + str++; + length--; + } + return bcmp(ptr, str, length); +} + + +int Field_bit::cmp_offset(uint row_offset) +{ + if (bit_len) + { + int flag; + uchar bits_a= get_rec_bits(bit_ptr, bit_ofs, bit_len); + uchar bits_b= get_rec_bits(bit_ptr + row_offset, bit_ofs, bit_len); + if ((flag= (int) (bits_a - bits_b))) + return flag; + } + return bcmp(ptr, ptr + row_offset, field_length); +} + + +void Field_bit::get_key_image(char *buff, uint length, imagetype type) +{ + if (bit_len) + { + uchar bits= get_rec_bits(bit_ptr, bit_ofs, bit_len); + *buff++= bits; + length--; + } + memcpy(buff, ptr, min(length, field_length)); +} + + +void Field_bit::sql_type(String &res) const +{ + CHARSET_INFO *cs= res.charset(); + ulong length= cs->cset->snprintf(cs, (char*) res.ptr(), res.alloced_length(), + "bit(%d)", + (int) field_length * 8 + bit_len); + res.length((uint) length); +} + + +char *Field_bit::pack(char *to, const char *from, uint max_length) +{ + uint length= min(field_length + (bit_len > 0), max_length); + if (bit_len) + { + uchar bits= get_rec_bits(bit_ptr, bit_ofs, bit_len); + *to++= bits; + length--; + } + memcpy(to, from, length); + return to + length; +} + + +const char *Field_bit::unpack(char *to, const char *from) +{ + if (bit_len) + { + set_rec_bits(*from, bit_ptr, bit_ofs, bit_len); + from++; + } + memcpy(to, from, field_length); + return from + field_length; +} + + /***************************************************************************** Handling of field and create_field *****************************************************************************/ @@ -6063,15 +6499,22 @@ void create_field::create_length_to_internal_length(void) case MYSQL_TYPE_STRING: case MYSQL_TYPE_VARCHAR: length*= charset->mbmaxlen; - key_length*= charset->mbmaxlen; + key_length= length; pack_length= calc_pack_length(sql_type, length); break; case MYSQL_TYPE_ENUM: case MYSQL_TYPE_SET: + /* Pack_length already calculated in sql_parse.cc */ length*= charset->mbmaxlen; + key_length= pack_length; + break; + case MYSQL_TYPE_BIT: + pack_length= calc_pack_length(sql_type, length); + /* We need one extra byte to store the bits we save among the null bits */ + key_length= pack_length+ test(length & 7); break; default: - /* do nothing */ + key_length= pack_length= calc_pack_length(sql_type, length); break; } } @@ -6102,7 +6545,7 @@ uint32 calc_pack_length(enum_field_types type,uint32 length) case MYSQL_TYPE_VAR_STRING: case MYSQL_TYPE_STRING: case FIELD_TYPE_DECIMAL: return (length); - case MYSQL_TYPE_VARCHAR: return (length+HA_KEY_BLOB_LENGTH); + case MYSQL_TYPE_VARCHAR: return (length + (length < 256 ? 1: 2)); case FIELD_TYPE_YEAR: case FIELD_TYPE_TINY : return 1; case FIELD_TYPE_SHORT : return 2; @@ -6124,6 +6567,7 @@ uint32 calc_pack_length(enum_field_types type,uint32 length) case FIELD_TYPE_GEOMETRY: return 4+portable_sizeof_char_ptr; case FIELD_TYPE_SET: case FIELD_TYPE_ENUM: abort(); return 0; // This shouldn't happen + case FIELD_TYPE_BIT: return length / 8; default: return 0; } return 0; // Keep compiler happy @@ -6154,11 +6598,30 @@ Field *make_field(char *ptr, uint32 field_length, const char *field_name, struct st_table *table) { + uchar *bit_ptr; + uchar bit_offset; + LINT_INIT(bit_ptr); + LINT_INIT(bit_offset); + if (field_type == FIELD_TYPE_BIT) + { + bit_ptr= null_pos; + bit_offset= null_bit; + if (f_maybe_null(pack_flag)) // if null field + { + bit_ptr+= (null_bit == 7); // shift bit_ptr and bit_offset + bit_offset= (bit_offset + 1) & 7; + } + } + if (!f_maybe_null(pack_flag)) { null_pos=0; null_bit=0; } + else + { + null_bit= ((uchar) 1) << null_bit; + } switch (field_type) { @@ -6182,7 +6645,9 @@ Field *make_field(char *ptr, uint32 field_length, unireg_check, field_name, table, field_charset); if (field_type == MYSQL_TYPE_VARCHAR) - return new Field_varstring(ptr,field_length,null_pos,null_bit, + return new Field_varstring(ptr,field_length, + HA_VARCHAR_PACKLENGTH(field_length), + null_pos,null_bit, unireg_check, field_name, table, field_charset); return 0; // Error @@ -6280,6 +6745,9 @@ Field *make_field(char *ptr, uint32 field_length, unireg_check, field_name, table, field_charset); case FIELD_TYPE_NULL: return new Field_null(ptr,field_length,unireg_check,field_name,table, field_charset); + case FIELD_TYPE_BIT: + return new Field_bit(ptr, field_length, null_pos, null_bit, bit_ptr, + bit_offset, unireg_check, field_name, table); default: // Impossible (Wrong version) break; } @@ -6329,15 +6797,17 @@ create_field::create_field(Field *old_field,Field *orig_field) case MYSQL_TYPE_SET: case MYSQL_TYPE_VARCHAR: case MYSQL_TYPE_VAR_STRING: - /* These are corrected in create_length_to_internal_length */ + /* This is corrected in create_length_to_internal_length */ length= (length+charset->mbmaxlen-1) / charset->mbmaxlen; - key_length/= charset->mbmaxlen; break; #ifdef HAVE_SPATIAL case FIELD_TYPE_GEOMETRY: geom_type= ((Field_geom*)old_field)->geom_type; break; #endif + case FIELD_TYPE_BIT: + length= ((Field_bit *) old_field)->bit_len + length * 8; + break; default: break; } diff --git a/sql/field.h b/sql/field.h index e2411fb9400..6ce5cf2a526 100644 --- a/sql/field.h +++ b/sql/field.h @@ -80,7 +80,7 @@ public: FIELD_CAST_TIMESTAMP, FIELD_CAST_YEAR, FIELD_CAST_DATE, FIELD_CAST_NEWDATE, FIELD_CAST_TIME, FIELD_CAST_DATETIME, FIELD_CAST_STRING, FIELD_CAST_VARSTRING, FIELD_CAST_BLOB, - FIELD_CAST_GEOM, FIELD_CAST_ENUM, FIELD_CAST_SET + FIELD_CAST_GEOM, FIELD_CAST_ENUM, FIELD_CAST_SET, FIELD_CAST_BIT }; utype unireg_check; @@ -113,9 +113,14 @@ public: This trickery is used to decrease a number of malloc calls. */ virtual String *val_str(String*,String *)=0; + String *Field::val_int_as_str(String *val_buffer, my_bool unsigned_flag); virtual Item_result result_type () const=0; virtual Item_result cmp_type () const { return result_type(); } - bool eq(Field *field) { return ptr == field->ptr && null_ptr == field->null_ptr; } + bool eq(Field *field) + { + return (ptr == field->ptr && null_ptr == field->null_ptr && + null_bit == field->null_bit); + } virtual bool eq_def(Field *field); virtual uint32 pack_length() const { return (uint32) field_length; } virtual void reset(void) { bzero(ptr,pack_length()); } @@ -139,10 +144,9 @@ public: virtual int cmp(const char *,const char *)=0; virtual int cmp_binary(const char *a,const char *b, uint32 max_length=~0L) { return memcmp(a,b,pack_length()); } - virtual int cmp_offset(uint row_offset) - { return memcmp(ptr,ptr+row_offset,pack_length()); } - virtual int cmp_binary_offset(uint row_offset) - { return memcmp(ptr,ptr+row_offset,pack_length()); } + int cmp_offset(uint row_offset) { return cmp(ptr,ptr+row_offset); } + int cmp_binary_offset(uint row_offset) + { return cmp_binary(ptr, ptr+row_offset); }; virtual int key_cmp(const byte *a,const byte *b) { return cmp((char*) a,(char*) b); } virtual int key_cmp(const byte *str, uint length) @@ -185,7 +189,10 @@ public: virtual bool can_be_compared_as_longlong() const { return FALSE; } virtual void free() {} virtual Field *new_field(MEM_ROOT *root, struct st_table *new_table); - inline void move_field(char *ptr_arg,uchar *null_ptr_arg,uchar null_bit_arg) + virtual Field *new_key_field(MEM_ROOT *root, struct st_table *new_table, + char *new_ptr, uchar *new_null_ptr, + uint new_null_bit); + virtual void move_field(char *ptr_arg,uchar *null_ptr_arg,uchar null_bit_arg) { ptr=ptr_arg; null_ptr=null_ptr_arg; null_bit=null_bit_arg; } @@ -211,6 +218,15 @@ public: ptr-=row_offset; return tmp; } + + inline String *val_str(String *str, char *new_ptr) + { + char *old_ptr= ptr; + ptr= new_ptr; + val_str(str); + ptr= old_ptr; + return str; + } virtual bool send_binary(Protocol *protocol); virtual char *pack(char* to, const char *from, uint max_length=~(uint) 0) { @@ -926,26 +942,31 @@ public: class Field_varstring :public Field_str { public: - Field_varstring(char *ptr_arg, uint32 len_arg,uchar *null_ptr_arg, + /* Store number of bytes used to store length (1 or 2) */ + uint32 length_bytes; + Field_varstring(char *ptr_arg, + uint32 len_arg, uint length_bytes_arg, + uchar *null_ptr_arg, uchar null_bit_arg, enum utype unireg_check_arg, const char *field_name_arg, struct st_table *table_arg, CHARSET_INFO *cs) :Field_str(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, - unireg_check_arg, field_name_arg, table_arg, cs) + unireg_check_arg, field_name_arg, table_arg, cs), + length_bytes(length_bytes_arg) {} Field_varstring(uint32 len_arg,bool maybe_null_arg, const char *field_name_arg, struct st_table *table_arg, CHARSET_INFO *cs) :Field_str((char*) 0,len_arg, maybe_null_arg ? (uchar*) "": 0,0, - NONE, field_name_arg, table_arg, cs) + NONE, field_name_arg, table_arg, cs), + length_bytes(len_arg < 256 ? 1 :2) {} enum_field_types type() const { return MYSQL_TYPE_VARCHAR; } - enum ha_base_keytype key_type() const - { return binary() ? HA_KEYTYPE_VARBINARY : HA_KEYTYPE_VARTEXT; } + enum ha_base_keytype key_type() const; bool zero_pack() const { return 0; } - void reset(void) { bzero(ptr,field_length+2); } - uint32 pack_length() const { return (uint32) field_length+2; } + void reset(void) { bzero(ptr,field_length+length_bytes); } + uint32 pack_length() const { return (uint32) field_length+length_bytes; } uint32 key_length() const { return (uint32) field_length; } int store(const char *to,uint length,CHARSET_INFO *charset); int store(longlong nr); @@ -960,12 +981,13 @@ public: void sql_type(String &str) const; char *pack(char *to, const char *from, uint max_length=~(uint) 0); char *pack_key(char *to, const char *from, uint max_length); + char *pack_key_from_key_image(char* to, const char *from, uint max_length); const char *unpack(char* to, const char *from); + const char *unpack_key(char* to, const char *from, uint max_length); int pack_cmp(const char *a, const char *b, uint key_length, my_bool insert_or_update); int pack_cmp(const char *b, uint key_length,my_bool insert_or_update); int cmp_binary(const char *a,const char *b, uint32 max_length=~0L); - int cmp_binary_offset(uint row_offset); int key_cmp(const byte *,const byte*); int key_cmp(const byte *str, uint length); uint packed_col_length(const char *to, uint length); @@ -975,6 +997,10 @@ public: bool has_charset(void) const { return charset() == &my_charset_bin ? FALSE : TRUE; } field_cast_enum field_cast_type() { return FIELD_CAST_VARSTRING; } + Field *new_field(MEM_ROOT *root, struct st_table *new_table); + Field *new_key_field(MEM_ROOT *root, struct st_table *new_table, + char *new_ptr, uchar *new_null_ptr, + uint new_null_bit); }; @@ -997,7 +1023,7 @@ public: } enum_field_types type() const { return FIELD_TYPE_BLOB;} enum ha_base_keytype key_type() const - { return binary() ? HA_KEYTYPE_VARBINARY : HA_KEYTYPE_VARTEXT; } + { return binary() ? HA_KEYTYPE_VARBINARY2 : HA_KEYTYPE_VARTEXT2; } int store(const char *to,uint length,CHARSET_INFO *charset); int store(double nr); int store(longlong nr); @@ -1006,9 +1032,7 @@ public: String *val_str(String*,String *); int cmp(const char *,const char*); int cmp(const char *a, uint32 a_length, const char *b, uint32 b_length); - int cmp_offset(uint offset); int cmp_binary(const char *a,const char *b, uint32 max_length=~0L); - int cmp_binary_offset(uint row_offset); int key_cmp(const byte *,const byte*); int key_cmp(const byte *str, uint length); uint32 key_length() const { return 0; } @@ -1055,9 +1079,9 @@ public: return 0; } char *pack(char *to, const char *from, uint max_length= ~(uint) 0); - const char *unpack(char *to, const char *from); char *pack_key(char *to, const char *from, uint max_length); char *pack_key_from_key_image(char* to, const char *from, uint max_length); + const char *unpack(char *to, const char *from); const char *unpack_key(char* to, const char *from, uint max_length); int pack_cmp(const char *a, const char *b, uint key_length, my_bool insert_or_update); @@ -1092,7 +1116,7 @@ public: :Field_blob(len_arg, maybe_null_arg, field_name_arg, table_arg, &my_charset_bin) { geom_type= geom_type_arg; } - enum ha_base_keytype key_type() const { return HA_KEYTYPE_VARBINARY; } + enum ha_base_keytype key_type() const { return HA_KEYTYPE_VARBINARY2; } enum_field_types type() const { return FIELD_TYPE_GEOMETRY; } void sql_type(String &str) const; int store(const char *to, uint length, CHARSET_INFO *charset); @@ -1173,6 +1197,52 @@ public: }; +class Field_bit :public Field { +public: + uchar *bit_ptr; // position in record where 'uneven' bits store + uchar bit_ofs; // offset to 'uneven' high bits + uint bit_len; // number of 'uneven' high bits + Field_bit(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg, + uchar null_bit_arg, uchar *bit_ptr_arg, uchar bit_ofs_arg, + enum utype unireg_check_arg, const char *field_name_arg, + struct st_table *table_arg); + enum_field_types type() const { return FIELD_TYPE_BIT; } + enum ha_base_keytype key_type() const { return HA_KEYTYPE_BIT; } + uint32 key_length() const { return (uint32) field_length + (bit_len > 0); } + uint32 max_length() { return (uint32) field_length + (bit_len > 0); } + uint size_of() const { return sizeof(*this); } + Item_result result_type () const { return INT_RESULT; } + void make_field(Send_field *); + void reset(void) { bzero(ptr, field_length); } + int store(const char *to, uint length, CHARSET_INFO *charset); + int store(double nr); + int store(longlong nr); + double val_real(void); + longlong val_int(void); + String *val_str(String*, String *); + int cmp(const char *a, const char *b) + { return cmp_binary(a, b); } + int key_cmp(const byte *a, const byte *b) + { return cmp_binary(a, b); } + int key_cmp(const byte *str, uint length); + int cmp_offset(uint row_offset); + void get_key_image(char *buff, uint length, imagetype type); + void set_key_image(char *buff, uint length) + { Field_bit::store(buff, length, &my_charset_bin); } + void sort_string(char *buff, uint length) + { get_key_image(buff, length, itRAW); } + uint32 pack_length() const + { return (uint32) field_length + (bit_len > 0); } + void sql_type(String &str) const; + field_cast_enum field_cast_type() { return FIELD_CAST_BIT; } + char *pack(char *to, const char *from, uint max_length=~(uint) 0); + const char *unpack(char* to, const char *from); + Field *new_key_field(MEM_ROOT *root, struct st_table *new_table, + char *new_ptr, uchar *new_null_ptr, + uint new_null_bit); +}; + + /* Create field class for CREATE TABLE */ diff --git a/sql/field_conv.cc b/sql/field_conv.cc index f6cc851639a..9fd4f0228b3 100644 --- a/sql/field_conv.cc +++ b/sql/field_conv.cc @@ -305,7 +305,8 @@ static void do_field_string(Copy_field *copy) char buff[MAX_FIELD_WIDTH]; copy->tmp.set_quick(buff,sizeof(buff),copy->tmp.charset()); copy->from_field->val_str(©->tmp); - copy->to_field->store(copy->tmp.c_ptr_quick(),copy->tmp.length(),copy->tmp.charset()); + copy->to_field->store(copy->tmp.c_ptr_quick(),copy->tmp.length(), + copy->tmp.charset()); } @@ -350,7 +351,23 @@ static void do_expand_string(Copy_field *copy) copy->to_length-copy->from_length, ' '); } -static void do_varstring(Copy_field *copy) + +static void do_varstring1(Copy_field *copy) +{ + uint length= (uint) *(uchar*) copy->from_ptr; + if (length > copy->to_length- 1) + { + length=copy->to_length - 1; + if (current_thd->count_cuted_fields) + copy->to_field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_DATA_TRUNCATED, 1); + } + *(uchar*) copy->to_ptr= (uchar) length; + memcpy(copy->to_ptr+1, copy->from_ptr + 1, length); +} + + +static void do_varstring2(Copy_field *copy) { uint length=uint2korr(copy->from_ptr); if (length > copy->to_length- HA_KEY_BLOB_LENGTH) @@ -485,6 +502,9 @@ void (*Copy_field::get_copy_func(Field *to,Field *from))(Copy_field*) } else { + if (to->real_type() == FIELD_TYPE_BIT || + from->real_type() == FIELD_TYPE_BIT) + return do_field_int; // Check if identical fields if (from->result_type() == STRING_RESULT) { @@ -505,9 +525,15 @@ void (*Copy_field::get_copy_func(Field *to,Field *from))(Copy_field*) } else if (to->charset() != from->charset()) return do_field_string; - else if (to->real_type() == MYSQL_TYPE_VARCHAR && to_length != - from_length) - return do_varstring; + else if (to->real_type() == MYSQL_TYPE_VARCHAR) + { + if (((Field_varstring*) to)->length_bytes != + ((Field_varstring*) from)->length_bytes) + return do_field_string; + if (to_length != from_length) + return (((Field_varstring*) to)->length_bytes == 1 ? + do_varstring1 : do_varstring2); + } else if (to_length < from_length) return do_cut_string; else if (to_length > from_length) @@ -587,6 +613,12 @@ void field_conv(Field *to,Field *from) char buff[MAX_FIELD_WIDTH]; String result(buff,sizeof(buff),from->charset()); from->val_str(&result); + /* + We use c_ptr_quick() here to make it easier if to is a float/double + as the conversion routines will do a copy of the result doesn't + end with \0. Can be replaced with .ptr() when we have our own + string->double conversion. + */ to->store(result.c_ptr_quick(),result.length(),from->charset()); } else if (from->result_type() == REAL_RESULT) diff --git a/sql/ha_berkeley.cc b/sql/ha_berkeley.cc index 6cb83624eff..322126ff47b 100644 --- a/sql/ha_berkeley.cc +++ b/sql/ha_berkeley.cc @@ -356,7 +356,8 @@ ulong ha_berkeley::index_flags(uint idx, uint part, bool all_parts) const } switch (table->key_info[idx].key_part[i].field->key_type()) { case HA_KEYTYPE_TEXT: - case HA_KEYTYPE_VARTEXT: + case HA_KEYTYPE_VARTEXT1: + case HA_KEYTYPE_VARTEXT2: /* As BDB stores only one copy of equal strings, we can't use key read on these. Binary collations do support key read though. @@ -391,6 +392,7 @@ berkeley_cmp_packed_key(DB *file, const DBT *new_key, const DBT *saved_key) KEY_PART_INFO *key_part= key->key_part, *end=key_part+key->key_parts; uint key_length=new_key->size; + DBUG_DUMP("key_in_index", saved_key_ptr, saved_key->size); for (; key_part != end && (int) key_length > 0; key_part++) { int cmp; @@ -745,11 +747,11 @@ void ha_berkeley::unpack_row(char *record, DBT *row) void ha_berkeley::unpack_key(char *record, DBT *key, uint index) { - KEY *key_info=table->key_info+index; + KEY *key_info= table->key_info+index; KEY_PART_INFO *key_part= key_info->key_part, - *end=key_part+key_info->key_parts; + *end= key_part+key_info->key_parts; + char *pos= (char*) key->data; - char *pos=(char*) key->data; for (; key_part != end; key_part++) { if (key_part->null_bit) @@ -773,8 +775,10 @@ void ha_berkeley::unpack_key(char *record, DBT *key, uint index) /* - Create a packed key from from a row - This will never fail as the key buffer is pre allocated. + Create a packed key from a row. This key will be written as such + to the index tree. + + This will never fail as the key buffer is pre-allocated. */ DBT *ha_berkeley::create_key(DBT *key, uint keynr, char *buff, @@ -820,7 +824,10 @@ DBT *ha_berkeley::create_key(DBT *key, uint keynr, char *buff, /* - Create a packed key from from a MySQL unpacked key + Create a packed key from from a MySQL unpacked key (like the one that is + sent from the index_read() + + This key is to be used to read a row */ DBT *ha_berkeley::pack_key(DBT *key, uint keynr, char *buff, @@ -1457,7 +1464,7 @@ int ha_berkeley::read_row(int error, char *buf, uint keynr, DBT *row, int ha_berkeley::index_read_idx(byte * buf, uint keynr, const byte * key, uint key_len, enum ha_rkey_function find_flag) { - statistic_increment(table->in_use->status_var.ha_read_key_count,&LOCK_status); + table->in_use->status_var.ha_read_key_count++; DBUG_ENTER("index_read_idx"); current_row.flags=DB_DBT_REALLOC; active_index=MAX_KEY; @@ -1476,10 +1483,9 @@ int ha_berkeley::index_read(byte * buf, const byte * key, int error; KEY *key_info= &table->key_info[active_index]; int do_prev= 0; - DBUG_ENTER("ha_berkeley::index_read"); - statistic_increment(table->in_use->status_var.ha_read_key_count,&LOCK_status); + table->in_use->status_var.ha_read_key_count++; bzero((char*) &row,sizeof(row)); if (find_flag == HA_READ_BEFORE_KEY) { @@ -1679,6 +1685,7 @@ DBT *ha_berkeley::get_pos(DBT *to, byte *pos) pos+=key_part->field->packed_col_length((char*) pos,key_part->length); to->size= (uint) (pos- (byte*) to->data); } + DBUG_DUMP("key", (char*) to->data, to->size); return to; } diff --git a/sql/ha_federated.cc b/sql/ha_federated.cc new file mode 100755 index 00000000000..3118833a47e --- /dev/null +++ b/sql/ha_federated.cc @@ -0,0 +1,1722 @@ +/* Copyright (C) 2004 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +/* + + MySQL Federated Storage Engine + + ha_federated.cc - MySQL Federated Storage Engine + Patrick Galbraith and Brian Aker, 2004 + + This is a handler which uses a remote database as the data file, as + opposed to a handler like MyISAM, which uses .MYD files locally. + + How this handler works + ---------------------------------- + Normal database files are local and as such: You create a table called + 'users', a file such as 'users.MYD' is created. A handler reads, inserts, + deletes, updates data in this file. The data is stored in particular format, + so to read, that data has to be parsed into fields, to write, fields have to + be stored in this format to write to this data file. + + With MySQL Federated storage engine, there will be no local files for each + table's data (such as .MYD). A remote database will store the data that would + normally be in this file. This will necessitate the use of MySQL client API + to read, delete, update, insert this data. The data will have to be retrieve + via an SQL call "SELECT * FROM users". Then, to read this data, it will have + to be retrieved via mysql_fetch_row one row at a time, then converted from + the column in this select into the format that the handler expects. + + The create table will simply create the .frm file, and within the + "CREATE TABLE" SQL, there SHALL be any of the following : + + comment=scheme://username:password@hostname:port/database/tablename + comment=scheme://username@hostname/database/tablename + comment=scheme://username:password@hostname/database/tablename + comment=scheme://username:password@hostname/database/tablename + + An example would be: + + comment=mysql://username:password@hostname:port/database/tablename + + ***IMPORTANT*** + + Only 'mysql://' is supported at this release. + + + This comment connection string is necessary for the handler to be + able to connect to the remote server. + + + The basic flow is this: + + SQL calls issues locally -> + mysql handler API (data in handler format) -> + mysql client API (data converted to SQL calls) -> + remote database -> mysql client API -> + convert result sets (if any) to handler format -> + handler API -> results or rows affected to local + + What this handler does and doesn't support + ------------------------------------------ + * Tables MUST be created on the remote server prior to any action on those + tables via the handler, first version. IMPORTANT: IF you MUST use the + federated storage engine type on the REMOTE end, MAKE SURE [ :) ] That + the table you connect to IS NOT a table pointing BACK to your ORIGNAL + table! You know and have heard the screaching of audio feedback? You + know putting two mirror in front of each other how the reflection + continues for eternity? Well, need I say more?! + * There will not be support for transactions. + * There is no way for the handler to know if the database on the remote end + has changed. The reason for this is that this database has to work like a + data file that would never be written to by anything other than the + database. The integrity of the data in the local table could be breached + if there was any change to the remote database. + * Support for SELECT, INSERT, UPDATE , DELETE, indexes. + * No ALTER TABLE, DROP TABLE or any other Data Definition Language calls. + * Prepared statements will not be used in the first implementation, it + remains to to be seen whether the limited subset of the client API for the + server supports this. + * This uses SELECT, INSERT, UPDATE, DELETE and not HANDLER for its + implementation. + * This will not work with the query cache. + + Method calls + + A two column table, with one record: + + (SELECT) + + "SELECT * FROM foo" + ha_federated::info + ha_federated::scan_time: + ha_federated::rnd_init: share->select_query SELECT * FROM foo + ha_federated::extra + + + ha_federated::rnd_next + ha_federated::convert_row_to_internal_format + ha_federated::rnd_next + + + ha_federated::rnd_end + ha_federated::extra + ha_federated::reset + + (INSERT) + + "INSERT INTO foo (id, ts) VALUES (2, now());" + + ha_federated::write_row + + + ha_federated::quote_data + ha_federated::quote_data + + + ha_federated::reset + + (UPDATE) + + "UPDATE foo SET ts = now() WHERE id = 1;" + + ha_federated::index_init + ha_federated::index_read + ha_federated::index_read_idx + ha_federated::quote_data + ha_federated::rnd_next + ha_federated::convert_row_to_internal_format + ha_federated::update_row + + + + + ha_federated::extra + ha_federated::extra + ha_federated::extra + ha_federated::external_lock + ha_federated::reset + + + How do I use this handler? + -------------------------- + First of all, you need to build this storage engine: + + ./configure --with-federated-storage-engine + make + + Next, to use this handler, it's very simple. You must + have two databases running, either both on the same host, or + on different hosts. + + One the server that will be connecting to the remote + host (client), you create your table as such: + + CREATE TABLE test_table ( + id int(20) NOT NULL auto_increment, + name varchar(32) NOT NULL default '', + other int(20) NOT NULL default '0', + PRIMARY KEY (id), + KEY name (name), + KEY other_key (other)) + ENGINE="FEDERATED" + DEFAULT CHARSET=latin1 + COMMENT='root@127.0.0.1:9306/federated/test_federated'; + + Notice the "COMMENT" and "ENGINE" field? This is where you + respectively set the engine type, "FEDERATED" and remote + host information, this being the database your 'client' database + will connect to and use as the "data file". Obviously, the remote + database is running on port 9306, so you want to start up your other + database so that it is indeed on port 9306, and your federated + database on a port other than that. In my setup, I use port 5554 + for federated, and port 5555 for the remote. + + Then, on the remote database: + + CREATE TABLE test_table ( + id int(20) NOT NULL auto_increment, + name varchar(32) NOT NULL default '', + other int(20) NOT NULL default '0', + PRIMARY KEY (id), + KEY name (name), + KEY other_key (other)) + ENGINE="" <-- whatever you want, or not specify + DEFAULT CHARSET=latin1 ; + + This table is exactly the same (and must be exactly the same), + except that it is not using the federated handler and does + not need the URL. + + + How to see the handler in action + -------------------------------- + + When developing this handler, I compiled the federated database with + debugging: + + ./configure --with-federated-storage-engine + --prefix=/home/mysql/mysql-build/federated/ --with-debug + + Once compiled, I did a 'make install' (not for the purpose of installing + the binary, but to install all the files the binary expects to see in the + diretory I specified in the build with --prefix, + "/home/mysql/mysql-build/federated". + + Then, I started the remote server: + + /usr/local/mysql/bin/mysqld_safe + --user=mysql --log=/tmp/mysqld.5555.log -P 5555 + + Then, I went back to the directory containing the newly compiled mysqld, + /sql/, started up gdb: + + gdb ./mysqld + + Then, withn the (gdb) prompt: + (gdb) run --gdb --port=5554 --socket=/tmp/mysqld.5554 --skip-innodb --debug + + Next, I open several windows for each: + + 1. Tail the debug trace: tail -f /tmp/mysqld.trace|grep ha_fed + 2. Tail the SQL calls to the remote database: tail -f /tmp/mysqld.5555.log + 3. A window with a client open to the federated server on port 5554 + 4. A window with a client open to the federated server on port 5555 + + I would create a table on the client to the remote server on port + 5555, and then to the federated server on port 5554. At this point, + I would run whatever queries I wanted to on the federated server, + just always remembering that whatever changes I wanted to make on + the table, or if I created new tables, that I would have to do that + on the remote server. + + Another thing to look for is 'show variables' to show you that you have + support for federated handler support: + + show variables like '%federat%' + + and: + + show storage engines; + + Both should display the federated storage handler. + + + Testing + ------- + + There is a test for MySQL Federated Storage Handler in ./mysql-test/t, + federatedd.test It starts both a slave and master database using + the same setup that the replication tests use, with the exception that + it turns off replication, and sets replication to ignore the test tables. + After ensuring that you actually do have support for the federated storage + handler, numerous queries/inserts/updates/deletes are run, many derived + from the MyISAM tests, plus som other tests which were meant to reveal + any issues that would be most likely to affect this handler. All tests + should work! ;) + + To run these tests, go into ./mysql-test (based in the directory you + built the server in) + + ./mysql-test-run federatedd + + To run the test, or if you want to run the test and have debug info: + + ./mysql-test-run --debug federated + + This will run the test in debug mode, and you can view the trace and + log files in the ./mysql-test/var/log directory + + ls -l mysql-test/var/log/ + -rw-r--r-- 1 patg patg 17 4 Dec 12:27 current_test + -rw-r--r-- 1 patg patg 692 4 Dec 12:52 manager.log + -rw-rw---- 1 patg patg 21246 4 Dec 12:51 master-bin.000001 + -rw-rw---- 1 patg patg 68 4 Dec 12:28 master-bin.index + -rw-r--r-- 1 patg patg 1620 4 Dec 12:51 master.err + -rw-rw---- 1 patg patg 23179 4 Dec 12:51 master.log + -rw-rw---- 1 patg patg 16696550 4 Dec 12:51 master.trace + -rw-r--r-- 1 patg patg 0 4 Dec 12:28 mysqltest-time + -rw-r--r-- 1 patg patg 2024051 4 Dec 12:51 mysqltest.trace + -rw-rw---- 1 patg patg 94992 4 Dec 12:51 slave-bin.000001 + -rw-rw---- 1 patg patg 67 4 Dec 12:28 slave-bin.index + -rw-rw---- 1 patg patg 249 4 Dec 12:52 slave-relay-bin.000003 + -rw-rw---- 1 patg patg 73 4 Dec 12:28 slave-relay-bin.index + -rw-r--r-- 1 patg patg 1349 4 Dec 12:51 slave.err + -rw-rw---- 1 patg patg 96206 4 Dec 12:52 slave.log + -rw-rw---- 1 patg patg 15706355 4 Dec 12:51 slave.trace + -rw-r--r-- 1 patg patg 0 4 Dec 12:51 warnings + + Of course, again, you can tail the trace log: + + tail -f mysql-test/var/log/master.trace |grep ha_fed + + As well as the slave query log: + + tail -f mysql-test/var/log/slave.log + + Files that comprise the test suit + --------------------------------- + mysql-test/t/federated.test + mysql-test/r/federated.result + mysql-test/r/have_federated_db.require + mysql-test/include/have_federated_db.inc + + + Other tidbits + ------------- + + These were the files that were modified or created for this + Federated handler to work: + + ./configure.in + ./sql/Makefile.am + ./config/ac_macros/ha_federated.m4 + ./sql/handler.cc + ./sql/mysqld.cc + ./sql/set_var.cc + ./sql/field.h + ./sql/sql_string.h + ./mysql-test/mysql-test-run(.sh) + ./mysql-test/t/federated.test + ./mysql-test/r/federated.result + ./mysql-test/r/have_federated_db.require + ./mysql-test/include/have_federated_db.inc + ./sql/ha_federated.cc + ./sql/ha_federated.h + +*/ + +#ifdef __GNUC__ +#pragma implementation // gcc: Class implementation +#endif + +#include + +#ifdef HAVE_FEDERATED_DB +#include "ha_federated.h" +#define MAX_REMOTE_SIZE IO_SIZE +/* Variables for federated share methods */ +static HASH federated_open_tables; // Hash used to track open tables +pthread_mutex_t federated_mutex; // This is the mutex we use to init the hash +static int federated_init= 0; // Variable for checking the init state of hash + +/* + Function we use in the creation of our hash to get key. +*/ +static byte* federated_get_key(FEDERATED_SHARE *share,uint *length, + my_bool not_used __attribute__((unused))) +{ + *length= share->table_name_length; + return (byte*) share->table_name; +} + +/* + Parse connection info from table->comment + + SYNOPSIS + parse_url() + share pointer to FEDERATED share + table pointer to current TABLE class + + DESCRIPTION + populates the share with information about the connection + to the remote database that will serve as the data source. + This string must be specified (currently) in the "comment" field, + listed in the CREATE TABLE statement. + + This string MUST be in the format of any of these: + +scheme://username:password@hostname:port/database/table +scheme://username@hostname/database/table +scheme://username@hostname:port/database/table +scheme://username:password@hostname/database/table + + An Example: + + mysql://joe:joespass@192.168.1.111:9308/federated/testtable + + ***IMPORTANT*** + Currently, only "mysql://" is supported. + + 'password' and 'port' are both optional. + + RETURN VALUE + 0 success + -1 failure, wrong string format + +*/ +int parse_url(FEDERATED_SHARE *share, TABLE *table, uint table_create_flag) +{ + DBUG_ENTER("ha_federated::parse_url"); + + // This either get set or will remain the same. + share->port= 0; + uint error_num= table_create_flag ? ER_CANT_CREATE_TABLE : ER_CONNECT_TO_MASTER ; + + share->scheme= my_strdup(table->comment, MYF(0)); + + + if (share->username= strstr(share->scheme, "://")) + { + share->scheme[share->username - share->scheme] = '\0'; + if (strcmp(share->scheme, "mysql") != 0) + { + DBUG_PRINT("ha_federated::parse_url", + ("The federated handler currently only supports connecting\ + to a MySQL database!!!\n")); + my_error(error_num, MYF(0), + "ERROR: federated handler only supports remote 'mysql://' database"); + DBUG_RETURN(-1); + } + share->username+= 3; + + if (share->hostname= strchr(share->username, '@')) + { + share->username[share->hostname - share->username]= '\0'; + share->hostname++; + + if (share->password= strchr(share->username, ':')) + { + share->username[share->password - share->username]= '\0'; + share->password++; + share->username= share->username; + // make sure there isn't an extra / or @ + if (strchr(share->password, '/') || strchr(share->hostname, '@')) + { + DBUG_PRINT("ha_federated::parse_url", + ("this connection string is not in the correct format!!!\n")); + my_error(error_num, MYF(0), + "this connection string is not in the correct format!!!\n"); + DBUG_RETURN(-1); + } + /* + Found that if the string is: +user:@hostname:port/database/table +Then password is a null string, so set to NULL + */ + if (share->password[0] == '\0') + share->password= NULL; + } + else + share->username= share->username; + + // make sure there isn't an extra / or @ + if (strchr(share->username, '/') || strchr(share->hostname, '@')) + { + DBUG_PRINT("ha_federated::parse_url", + ("this connection string is not in the correct format!!!\n")); + my_error(error_num, MYF(0), + "this connection string is not in the correct format!!!\n"); + DBUG_RETURN(-1); + } + + if (share->database= strchr(share->hostname, '/')) + { + share->hostname[share->database - share->hostname]= '\0'; + share->database++; + + if (share->sport= strchr(share->hostname, ':')) + { + share->hostname[share->sport - share->hostname]= '\0'; + share->sport++; + if (share->sport[0] == '\0') + share->sport= NULL; + else + share->port= atoi(share->sport); + } + + if (share->table_base_name= strchr(share->database, '/')) + { + share->database[share->table_base_name - share->database]= '\0'; + share->table_base_name++; + } + else + { + DBUG_PRINT("ha_federated::parse_url", + ("this connection string is not in the correct format!!!\n")); + my_error(error_num, MYF(0), + "this connection string is not in the correct format!!!\n"); + DBUG_RETURN(-1); + } + } + else + { + DBUG_PRINT("ha_federated::parse_url", + ("this connection string is not in the correct format!!!\n")); + my_error(error_num, MYF(0), + "this connection string is not in the correct format!!!\n"); + DBUG_RETURN(-1); + } + // make sure there's not an extra / + if (strchr(share->table_base_name, '/')) + { + DBUG_PRINT("ha_federated::parse_url", + ("this connection string is not in the correct format!!!\n")); + my_error(error_num, MYF(0), + "this connection string is not in the correct format!!!\n"); + DBUG_RETURN(-1); + } + if (share->hostname[0] == '\0') + share->hostname= NULL; + + DBUG_PRINT("ha_federated::parse_url", + ("scheme %s username %s password %s \ + hostname %s port %d database %s tablename %s\n", + share->scheme, share->username, share->password, share->hostname, + share->port, share->database, share->table_base_name)); + } + else + { + DBUG_PRINT("ha_federated::parse_url", + ("this connection string is not in the correct format!!!\n")); + my_error(error_num, MYF(0), + "this connection string is not in the correct format!!!\n"); + DBUG_RETURN(-1); + } + } + else + { + DBUG_PRINT("ha_federated::parse_url", + ("this connection string is not in the correct format!!!\n")); + my_error(error_num, MYF(0), + "this connection string is not in the correct format!!!\n"); + DBUG_RETURN(-1); + } + DBUG_RETURN(0); +} + +/* + Convert MySQL result set row to handler internal format + + SYNOPSIS + convert_row_to_internal_format() + record Byte pointer to record + row MySQL result set row from fetchrow() + + DESCRIPTION + This method simply iterates through a row returned via fetchrow with + values from a successful SELECT , and then stores each column's value + in the field object via the field object pointer (pointing to the table's + array of field object pointers). This is how the handler needs the data + to be stored to then return results back to the user + + RETURN VALUE + 0 After fields have had field values stored from record + */ +uint ha_federated::convert_row_to_internal_format(byte *record, MYSQL_ROW row) +{ + unsigned long len; + int x= 0; + DBUG_ENTER("ha_federated::convert_row_to_internal_format"); + + // Question this + memset(record, 0, table->null_bytes); + + for (Field **field=table->field; *field ; field++, x++) + { + if (!row[x]) + (*field)->set_null(); + else + /* + changed system_charset_info to default_charset_info because + testing revealed that german text was not being retrieved properly + */ + (*field)->store(row[x], strlen(row[x]), &my_charset_bin); + } + + DBUG_RETURN(0); +} + +/* + SYNOPSIS + quote_data() + unquoted_string Pointer pointing to the value of a field + field MySQL Field pointer to field being checked for type + + DESCRIPTION + Simple method that passes the field type to the method "type_quote" + To get a true/false value as to whether the value in string1 needs + to be enclosed with quotes. This ensures that values in the final + sql statement to be passed to the remote server will be quoted properly + + RETURN_VALUE + void Immediately - if string doesn't need quote + void Upon prepending/appending quotes on each side of variable + +*/ +void ha_federated::quote_data(String *unquoted_string, Field *field ) +{ + char escaped_string[IO_SIZE]; + char *unquoted_string_buffer; + + unquoted_string_buffer= unquoted_string->c_ptr_quick(); + + int quote_flag; + DBUG_ENTER("ha_federated::quote_data"); + // this is the same call that mysql_real_escape_string() calls + escape_string_for_mysql(&my_charset_bin, (char *)escaped_string, + unquoted_string->c_ptr_quick(), unquoted_string->length()); + + DBUG_PRINT("ha_federated::quote_data", + ("escape_string_for_mysql unescaped %s escaped %s", + unquoted_string->c_ptr_quick(), escaped_string)); + + if (field->is_null()) + { + DBUG_PRINT("ha_federated::quote_data", + ("NULL, no quoted needed for unquoted_string %s, returning.", + unquoted_string->c_ptr_quick())); + DBUG_VOID_RETURN; + } + + quote_flag= type_quote(field->type()); + + if (quote_flag == 0) + { + DBUG_PRINT("ha_federated::quote_data", + ("quote flag 0 no quoted needed for unquoted_string %s, returning.", + unquoted_string->c_ptr_quick())); + DBUG_VOID_RETURN; + } + else + { + // reset string, then re-append with quotes and escaped values + unquoted_string->length(0); + unquoted_string->append("'"); + unquoted_string->append((char *)escaped_string); + unquoted_string->append("'"); + } + DBUG_PRINT("ha_federated::quote_data", + ("FINAL quote_flag %d unquoted_string %s escaped_string %s", + quote_flag, unquoted_string->c_ptr_quick(), escaped_string)); + DBUG_VOID_RETURN; +} + +/* + Quote a field type if needed + + SYNOPSIS + ha_federated::type_quote + int field Enumerated field type number + + DESCRIPTION + Simple method to give true/false whether a field should be quoted. + Used when constructing INSERT and UPDATE queries to the remote server + see write_row and update_row + + RETURN VALUE + 0 if value is of type NOT needing quotes + 1 if value is of type needing quotes +*/ +uint ha_federated::type_quote(int type) +{ + DBUG_ENTER("ha_federated::type_quote"); + DBUG_PRINT("ha_federated::type_quote", ("field type %d", type)); + + switch(type) { + //FIX this is a bug, fix when kernel is fixed + case MYSQL_TYPE_VARCHAR : + case FIELD_TYPE_STRING : + case FIELD_TYPE_VAR_STRING : + case FIELD_TYPE_YEAR : + case FIELD_TYPE_NEWDATE : + case FIELD_TYPE_TIME : + case FIELD_TYPE_TIMESTAMP : + case FIELD_TYPE_DATE : + case FIELD_TYPE_DATETIME : + case FIELD_TYPE_TINY_BLOB : + case FIELD_TYPE_BLOB : + case FIELD_TYPE_MEDIUM_BLOB : + case FIELD_TYPE_LONG_BLOB : + case FIELD_TYPE_GEOMETRY : + DBUG_RETURN(1); + + case FIELD_TYPE_DECIMAL : + case FIELD_TYPE_TINY : + case FIELD_TYPE_SHORT : + case FIELD_TYPE_INT24 : + case FIELD_TYPE_LONG : + case FIELD_TYPE_FLOAT : + case FIELD_TYPE_DOUBLE : + case FIELD_TYPE_LONGLONG : + case FIELD_TYPE_NULL : + case FIELD_TYPE_SET : + case FIELD_TYPE_ENUM : + DBUG_RETURN(0); + + default: DBUG_RETURN(0); + } + DBUG_RETURN(0); +} + +int load_conn_info(FEDERATED_SHARE *share, TABLE *table) +{ + DBUG_ENTER("ha_federated::load_conn_info"); + int retcode; + + retcode= parse_url(share, table, 0); + + if (retcode < 0) + { + DBUG_PRINT("ha_federated::load_conn_info", + ("retcode %d, setting defaults", retcode)); + /* sanity checks to make sure all needed pieces are present */ + if (!share->port) + { + if (strcmp(share->hostname, "localhost") == 0) + share->socket= my_strdup("/tmp/mysql.sock",MYF(0)); + else + share->port= 3306; + } + } + DBUG_PRINT("ha_federated::load_conn_info", + ("returned from retcode %d", retcode)); + + DBUG_RETURN(retcode); +} + +/* + Example of simple lock controls. The "share" it creates is structure we will + pass to each federated handler. Do you have to have one of these? Well, you + have pieces that are used for locking, and they are needed to function. +*/ +static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table) +{ + FEDERATED_SHARE *share; + // FIX : need to redo + //String query; + char query_buffer[IO_SIZE]; + String query(query_buffer, sizeof(query_buffer), &my_charset_bin); + query.length(0); + + uint table_name_length, table_base_name_length; + char *tmp_table_name, *tmp_table_base_name, *table_base_name, *select_query; + + // share->table_name has the file location - we want the actual table's + // name! + table_base_name= table->table_name; + DBUG_PRINT("ha_federated::get_share",("table_name %s", table_base_name)); + /* + So why does this exist? There is no way currently to init a storage engine. + Innodb and BDB both have modifications to the server to allow them to + do this. Since you will not want to do this, this is probably the next + best method. + */ + if (!federated_init) + { + /* Hijack a mutex for init'ing the storage engine */ + pthread_mutex_lock(&LOCK_mysql_create_db); + if (!federated_init) + { + federated_init++; + VOID(pthread_mutex_init(&federated_mutex,MY_MUTEX_INIT_FAST)); + (void) hash_init(&federated_open_tables,system_charset_info,32,0,0, + (hash_get_key) federated_get_key,0,0); + } + pthread_mutex_unlock(&LOCK_mysql_create_db); + } + pthread_mutex_lock(&federated_mutex); + table_name_length= (uint) strlen(table_name); + table_base_name_length= (uint) strlen(table_base_name); + + if (!(share= (FEDERATED_SHARE*) hash_search(&federated_open_tables, + (byte*) table_name, + table_name_length))) + { + query.set_charset(system_charset_info); + query.append("SELECT * FROM "); + query.append(table_base_name); + + if (!(share= (FEDERATED_SHARE *) + my_multi_malloc(MYF(MY_WME | MY_ZEROFILL), + &share, sizeof(*share), + &tmp_table_name, table_name_length+1, + &tmp_table_base_name, table_base_name_length+1, + &select_query, query.length()+1, + NullS))) + { + pthread_mutex_unlock(&federated_mutex); + return NULL; + } + + load_conn_info(share, table); + share->use_count= 0; + share->table_name_length= table_name_length; + share->table_name= tmp_table_name; + share->table_base_name_length= table_base_name_length; + share->table_base_name= tmp_table_base_name; + share->select_query= select_query; + strmov(share->table_name,table_name); + strmov(share->table_base_name,table_base_name); + strmov(share->select_query,query.c_ptr_quick()); + DBUG_PRINT("ha_federated::get_share",("share->select_query %s", share->select_query)); + if (my_hash_insert(&federated_open_tables, (byte*) share)) + goto error; + thr_lock_init(&share->lock); + pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST); + } + share->use_count++; + pthread_mutex_unlock(&federated_mutex); + + return share; + +error2: + thr_lock_delete(&share->lock); + pthread_mutex_destroy(&share->mutex); +error: + pthread_mutex_unlock(&federated_mutex); + my_free((gptr) share, MYF(0)); + + return NULL; +} + + +/* + Free lock controls. We call this whenever we close a table. + If the table had the last reference to the share then we + free memory associated with it. +*/ +static int free_share(FEDERATED_SHARE *share) +{ + pthread_mutex_lock(&federated_mutex); + if (!--share->use_count) + { + hash_delete(&federated_open_tables, (byte*) share); + thr_lock_delete(&share->lock); + pthread_mutex_destroy(&share->mutex); + my_free((gptr) share, MYF(0)); + } + pthread_mutex_unlock(&federated_mutex); + + return 0; +} + + +/* + If frm_error() is called then we will use this to to find out + what file extentions exist for the storage engine. This is + also used by the default rename_table and delete_table method + in handler.cc. +*/ +const char **ha_federated::bas_ext() const +{ static const char *ext[]= { NullS }; return ext; } + + +/* + Used for opening tables. The name will be the name of the file. + A table is opened when it needs to be opened. For instance + when a request comes in for a select on the table (tables are not + open and closed for each request, they are cached). + + Called from handler.cc by handler::ha_open(). The server opens + all tables by calling ha_open() which then calls the handler + specific open(). +*/ +int ha_federated::open(const char *name, int mode, uint test_if_locked) +{ + DBUG_ENTER("ha_federated::open"); + int rc; + + if (!(share= get_share(name, table))) + DBUG_RETURN(1); + thr_lock_data_init(&share->lock,&lock,NULL); + + /* Connect to remote database mysql_real_connect() */ + mysql= mysql_init(0); + DBUG_PRINT("ha_federated::open",("hostname %s", share->hostname)); + DBUG_PRINT("ha_federated::open",("username %s", share->username)); + DBUG_PRINT("ha_federated::open",("password %s", share->password)); + DBUG_PRINT("ha_federated::open",("database %s", share->database)); + DBUG_PRINT("ha_federated::open",("port %d", share->port)); + if (!mysql_real_connect(mysql, + share->hostname, + share->username, + share->password, + share->database, + share->port, + NULL, + 0)) + { + my_error(ER_CONNECT_TO_MASTER, MYF(0), mysql_error(mysql)); + DBUG_RETURN(ER_CONNECT_TO_MASTER); + } + DBUG_RETURN(0); +} + + +/* + Closes a table. We call the free_share() function to free any resources + that we have allocated in the "shared" structure. + + Called from sql_base.cc, sql_select.cc, and table.cc. + In sql_select.cc it is only used to close up temporary tables or during + the process where a temporary table is converted over to being a + myisam table. + For sql_base.cc look at close_data_tables(). +*/ +int ha_federated::close(void) +{ + DBUG_ENTER("ha_federated::close"); + /* Disconnect from mysql */ + mysql_close(mysql); + DBUG_RETURN(free_share(share)); + +} + +/* + + Checks if a field in a record is SQL NULL. + + SYNOPSIS + field_in_record_is_null() + table TABLE pointer, MySQL table object + field Field pointer, MySQL field object + record char pointer, contains record + + DESCRIPTION + This method uses the record format information in table to track + the null bit in record. + + RETURN VALUE + 1 if NULL + 0 otherwise +*/ +inline uint field_in_record_is_null ( + TABLE* table, /* in: MySQL table object */ + Field* field, /* in: MySQL field object */ + char* record) /* in: a row in MySQL format */ +{ + int null_offset; + DBUG_ENTER("ha_federated::field_in_record_is_null"); + + if (!field->null_ptr) + DBUG_RETURN(0); + + null_offset= (uint) ((char*) field->null_ptr - (char*) table->record[0]); + + if (record[null_offset] & field->null_bit) + DBUG_RETURN(1); + + DBUG_RETURN(0); +} + +/* + write_row() inserts a row. No extra() hint is given currently if a bulk load + is happeneding. buf() is a byte array of data. You can use the field + information to extract the data from the native byte array type. + Example of this would be: + for (Field **field=table->field ; *field ; field++) + { + ... + } + + Called from item_sum.cc, item_sum.cc, sql_acl.cc, sql_insert.cc, + sql_insert.cc, sql_select.cc, sql_table.cc, sql_udf.cc, and sql_update.cc. +*/ +int ha_federated::write_row(byte * buf) +{ + int x= 0, num_fields= 0; + ulong current_query_id= 1; + ulong tmp_query_id; + int all_fields_have_same_query_id= 1; + + char insert_buffer[IO_SIZE]; + char values_buffer[IO_SIZE], insert_field_value_buffer[IO_SIZE]; + + // The main insert query string + String insert_string(insert_buffer, sizeof(insert_buffer), &my_charset_bin); + insert_string.length(0); + // The string containing the values to be added to the insert + String values_string(values_buffer, sizeof(values_buffer), &my_charset_bin); + values_string.length(0); + // The actual value of the field, to be added to the values_string + String insert_field_value_string(insert_field_value_buffer, + sizeof(insert_field_value_buffer), &my_charset_bin); + insert_field_value_string.length(0); + + DBUG_ENTER("ha_federated::write_row"); + /* + I want to use this and the next line, but the repository needs to be + updated to do so + */ + statistic_increment(table->in_use->status_var.ha_write_count,&LOCK_status); + if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) + table->timestamp_field->set_time(); + + /* + get the current query id - the fields that we add to the insert + statement to send to the remote will not be appended unless they match + this query id + */ + current_query_id= table->in_use->query_id; + DBUG_PRINT("ha_federated::write_row", ("current query id %d", + current_query_id)); + + // start off our string + insert_string.append("INSERT INTO "); + insert_string.append(share->table_base_name); + // start both our field and field values strings + insert_string.append(" ("); + values_string.append(" VALUES ("); + + /* + Even if one field is different, all_fields_same_query_id can't remain + 0 if it remains 0, then that means no fields were specified in the query + such as in the case of INSERT INTO table VALUES (val1, val2, valN) + */ + for (Field **field= table->field; *field ; field++, x++) + { + if (x > 0 && tmp_query_id != (*field)->query_id) + all_fields_have_same_query_id= 0; + + tmp_query_id= (*field)->query_id; + } + /* + loop through the field pointer array, add any fields to both the values + list and the fields list that match the current query id + */ + for (Field **field= table->field; *field ; field++, x++) + { + DBUG_PRINT("ha_federated::write_row", ("field type %d", (*field)->type())); + // if there is a query id and if it's equal to the current query id + if ( ((*field)->query_id && (*field)->query_id == current_query_id ) + || all_fields_have_same_query_id) + { + num_fields++; + + if ((*field)->is_null()) + { + DBUG_PRINT("ha_federated::write_row", + ("current query id %d field is_null query id %d", + current_query_id, (*field)->query_id)); + insert_field_value_string.append("NULL"); + } + else + { + DBUG_PRINT("ha_federated::write_row", + ("current query id %d field is not null query ID %d", + current_query_id, (*field)->query_id)); + (*field)->val_str(&insert_field_value_string); + } + // append the field name + insert_string.append((*field)->field_name); + + // quote these fields if they require it + quote_data(&insert_field_value_string, *field); + // append the value + values_string.append(insert_field_value_string); + insert_field_value_string.length(0); + + // append commas between both fields and fieldnames + insert_string.append(','); + values_string.append(','); + DBUG_PRINT("ha_federated::write_row", + ("insert_string %s values_string %s insert_field_value_string %s", + insert_string.c_ptr_quick(), values_string.c_ptr_quick(), insert_field_value_string.c_ptr_quick())); + + } + } + + /* + chop of the trailing comma, or if there were no fields, a '(' + So, "INSERT INTO foo (" becomes "INSERT INTO foo " + or, with fields, "INSERT INTO foo (field1, field2," becomes + "INSERT INTO foo (field1, field2" + */ + insert_string.chop(); + + + /* + if there were no fields, we don't want to add a closing paren + AND, we don't want to chop off the last char '(' + insert will be "INSERT INTO t1 VALUES ();" + */ + DBUG_PRINT("ha_federated::write_row",("x %d num fields %d", + x, num_fields)); + if (num_fields > 0) + { + // chops off leading commas + values_string.chop(); + insert_string.append(')'); + } + // we always want to append this, even if there aren't any fields + values_string.append(')'); + + // add the values + insert_string.append(values_string); + + DBUG_PRINT("ha_federated::write_row",("insert query %s", + insert_string.c_ptr_quick())); + + if (mysql_real_query(mysql, insert_string.c_ptr_quick(), + insert_string.length())) + { + my_error(ER_QUERY_ON_MASTER,MYF(0),mysql_error(mysql)); + DBUG_RETURN(ER_QUERY_ON_MASTER); + } + + DBUG_RETURN(0); +} + +/* + Yes, update_row() does what you expect, it updates a row. old_data will have + the previous row record in it, while new_data will have the newest data in + it. + + Keep in mind that the server can do updates based on ordering if an ORDER BY + clause was used. Consecutive ordering is not guarenteed. + Currently new_data will not have an updated auto_increament record, or + and updated timestamp field. You can do these for federated by doing these: + if (table->timestamp_on_update_now) + update_timestamp(new_row+table->timestamp_on_update_now-1); + if (table->next_number_field && record == table->record[0]) + update_auto_increment(); + + Called from sql_select.cc, sql_acl.cc, sql_update.cc, and sql_insert.cc. +*/ +int ha_federated::update_row( + const byte * old_data, + byte * new_data + ) +{ + uint x= 0; + uint has_a_primary_key; + int primary_key_field_num; + char old_field_value_buffer[IO_SIZE], new_field_value_buffer[IO_SIZE]; + char update_buffer[IO_SIZE], where_buffer[IO_SIZE]; + + // stores the value to be replaced of the field were are updating + String old_field_value(old_field_value_buffer, sizeof(old_field_value_buffer), &my_charset_bin); + old_field_value.length(0); + // stores the new value of the field + String new_field_value(new_field_value_buffer, sizeof(new_field_value_buffer), &my_charset_bin); + new_field_value.length(0); + // stores the update query + String update_string(update_buffer, sizeof(update_buffer), &my_charset_bin); + update_string.length(0); + // stores the WHERE clause + String where_string(where_buffer, sizeof(where_buffer), &my_charset_bin); + where_string.length(0); + + DBUG_ENTER("ha_federated::update_row"); + + + has_a_primary_key= table->primary_key == 0 ? 1 : 0; + primary_key_field_num= has_a_primary_key ? + table->key_info[table->primary_key].key_part->fieldnr -1 : -1; + if (has_a_primary_key) + DBUG_PRINT("ha_federated::update_row", ("has a primary key")); + + update_string.append("UPDATE "); + update_string.append(share->table_base_name); + update_string.append(" SET "); + +/* + In this loop, we want to match column names to values being inserted + (while building INSERT statement). + + Iterate through table->field (new data) and share->old_filed (old_data) + using the same index to created an SQL UPDATE statement, new data is + used to create SET field=value and old data is used to create WHERE + field=oldvalue + */ + + for (Field **field= table->field ; *field ; field++, x++) + { + /* + In all of these tests for 'has_a_primary_key', what I'm trying to + accomplish is to only use the primary key in the WHERE clause if the + table has a primary key, as opposed to a table without a primary key + in which case we have to use all the fields to create a WHERE clause + using the old/current values, as well as adding a LIMIT statement + */ + if (has_a_primary_key) + { + if (x == primary_key_field_num) + where_string.append((*field)->field_name); + } + else + where_string.append((*field)->field_name); + + update_string.append((*field)->field_name); + update_string.append('='); + + if ((*field)->is_null()) + new_field_value.append("NULL"); + else + { + // otherwise = + (*field)->val_str(&new_field_value); + quote_data(&new_field_value, *field); + + if ( has_a_primary_key ) + { + if (x == primary_key_field_num) + where_string.append("="); + } + else + if (! field_in_record_is_null(table, *field, (char*) old_data)) + where_string.append("="); + } + + if ( has_a_primary_key) + { + if (x == primary_key_field_num) + { + (*field)->val_str(&old_field_value, + (char *)(old_data + (*field)->offset())); + quote_data(&old_field_value, *field); + where_string.append(old_field_value); + } + } + else + { + if (field_in_record_is_null(table, *field, (char*) old_data)) + where_string.append(" IS NULL "); + else + { + (*field)->val_str(&old_field_value, + (char *)(old_data + (*field)->offset())); + quote_data(&old_field_value, *field); + where_string.append(old_field_value); + } + } + update_string.append(new_field_value); + new_field_value.length(0); + + if (x+1 < table->fields) + { + update_string.append(", "); + if (! has_a_primary_key) + where_string.append(" AND "); + } + old_field_value.length(0); + } + update_string.append(" WHERE "); + update_string.append(where_string.c_ptr_quick()); + if (! has_a_primary_key) + update_string.append(" LIMIT 1"); + + DBUG_PRINT("ha_federated::update_row", ("Final update query: %s", + update_string.c_ptr_quick())); + if (mysql_real_query(mysql, update_string.c_ptr_quick(), + update_string.length())) + { + my_error(ER_QUERY_ON_MASTER,MYF(0),mysql_error(mysql)); + DBUG_RETURN(ER_QUERY_ON_MASTER); + } + + + DBUG_RETURN(0); +} + +/* + This will delete a row. 'buf' will contain a copy of the row to be deleted. + The server will call this right after the current row has been called (from + either a previous rnd_nexT() or index call). + If you keep a pointer to the last row or can access a primary key it will + make doing the deletion quite a bit easier. + Keep in mind that the server does no guarentee consecutive deletions. + ORDER BY clauses can be used. + + Called in sql_acl.cc and sql_udf.cc to manage internal table information. + Called in sql_delete.cc, sql_insert.cc, and sql_select.cc. In sql_select + it is used for removing duplicates while in insert it is used for REPLACE + calls. +*/ +int ha_federated::delete_row(const byte * buf) +{ + int x= 0; + char delete_buffer[IO_SIZE]; + char data_buffer[IO_SIZE]; + + String delete_string(delete_buffer, sizeof(delete_buffer), &my_charset_bin); + delete_string.length(0); + String data_string(data_buffer, sizeof(data_buffer), &my_charset_bin); + data_string.length(0); + + DBUG_ENTER("ha_federated::delete_row"); + + delete_string.append("DELETE FROM "); + delete_string.append(share->table_base_name); + delete_string.append(" WHERE "); + + for (Field **field= table->field; *field; field++, x++) + { + delete_string.append((*field)->field_name); + + if ((*field)->is_null()) + { + delete_string.append(" IS "); + data_string.append("NULL"); + } + else + { + delete_string.append("="); + (*field)->val_str(&data_string); + quote_data(&data_string, *field); + } + + delete_string.append(data_string); + data_string.length(0); + + if (x+1 < table->fields) + delete_string.append(" AND "); + } + + delete_string.append(" LIMIT 1"); + DBUG_PRINT("ha_federated::delete_row", + ("Delete sql: %s", delete_string.c_ptr_quick())); + if ( mysql_real_query(mysql, delete_string.c_ptr_quick(), + delete_string.length())) + { + my_error(ER_QUERY_ON_MASTER,MYF(0),mysql_error(mysql)); + DBUG_RETURN(ER_QUERY_ON_MASTER); + } + + DBUG_RETURN(0); +} + + +/* + Positions an index cursor to the index specified in the handle. Fetches the + row if available. If the key value is null, begin at the first key of the + index. This method, which is called in the case of an SQL statement having + a WHERE clause on a non-primary key index, simply calls index_read_idx. +*/ +int ha_federated::index_read(byte * buf, const byte * key, + uint key_len __attribute__((unused)), + enum ha_rkey_function find_flag + __attribute__((unused))) +{ + DBUG_ENTER("ha_federated::index_read"); + DBUG_RETURN(index_read_idx(buf, active_index, key, key_len, find_flag)); +} + + +/* + Positions an index cursor to the index specified in key. Fetches the + row if any. This is only used to read whole keys. + + This method is called via index_read in the case of a WHERE clause using + a regular non-primary key index, OR is called DIRECTLY when the WHERE clause + uses a PRIMARY KEY index. +*/ +int ha_federated::index_read_idx(byte * buf, uint index, const byte * key, + uint key_len __attribute__((unused)), + enum ha_rkey_function find_flag + __attribute__((unused))) +{ + char index_value[IO_SIZE]; + String index_string(index_value, sizeof(index_value), &my_charset_bin); + index_string.length(0); + + char sql_query_buffer[IO_SIZE]; + String sql_query(sql_query_buffer, sizeof(sql_query_buffer), &my_charset_bin); + sql_query.length(0); + + DBUG_ENTER("ha_federated::index_read_idx"); + statistic_increment(table->in_use->status_var.ha_read_key_count,&LOCK_status); + + index_string.length(0); + sql_query.length(0); + + sql_query.append(share->select_query); + sql_query.append(" WHERE "); + sql_query.append(table->key_info[index].key_part->field->field_name); + sql_query.append(" = "); + + table->key_info[index].key_part->field->val_str(&index_string, (char *)(key)); + quote_data(&index_string, table->key_info[index].key_part->field); + sql_query.append(index_string); + + DBUG_PRINT("ha_federated::index_read_idx", + ("sql_query %s", sql_query.c_ptr_quick())); + + if (mysql_real_query(mysql, sql_query.c_ptr_quick(), sql_query.length())) + { + my_error(ER_QUERY_ON_MASTER,MYF(0),mysql_error(mysql)); + DBUG_RETURN(ER_QUERY_ON_MASTER); + } + result= mysql_store_result(mysql); + + if (!result) + { + table->status= STATUS_NOT_FOUND; + DBUG_RETURN(HA_ERR_END_OF_FILE); + } + + if (mysql_errno(mysql)) + { + table->status= STATUS_NOT_FOUND; + DBUG_RETURN(mysql_errno(mysql)); + } + + DBUG_RETURN(rnd_next(buf)); +} + +/* + Initialized at each key walk (called multiple times unlike ::rnd_init()) +*/ +int ha_federated::index_init(uint keynr) +{ + int error; + DBUG_ENTER("ha_federated::index_init"); + DBUG_PRINT("ha_federated::index_init", + ("table: '%s' key: %d", table->real_name, keynr)); + active_index= keynr; + DBUG_RETURN(0); +} + +/* + Used to read forward through the index. +*/ +int ha_federated::index_next(byte * buf) +{ + DBUG_ENTER("ha_federated::index_next"); + DBUG_RETURN(rnd_next(buf)); +} + + +/* + rnd_init() is called when the system wants the storage engine to do a table + scan. + + This is the method that gets data for the SELECT calls. + + See the federated in the introduction at the top of this file to see when + rnd_init() is called. + + Called from filesort.cc, records.cc, sql_handler.cc, sql_select.cc, + sql_table.cc, and sql_update.cc. +*/ +int ha_federated::rnd_init(bool scan) +{ + DBUG_ENTER("ha_federated::rnd_init"); + int num_fields, rows; + + DBUG_PRINT("ha_federated::rnd_init", + ("share->select_query %s", share->select_query)); + if (mysql_real_query(mysql, share->select_query, strlen(share->select_query))) + { + my_error(ER_QUERY_ON_MASTER,MYF(0),mysql_error(mysql)); + DBUG_RETURN(ER_QUERY_ON_MASTER); + } + result= mysql_store_result(mysql); + + if (mysql_errno(mysql)) + DBUG_RETURN(mysql_errno(mysql)); + DBUG_RETURN(0); +} + +int ha_federated::rnd_end() +{ + DBUG_ENTER("ha_federated::rnd_end"); + mysql_free_result(result); + DBUG_RETURN(index_end()); +} + +int ha_federated::index_end(void) +{ + DBUG_ENTER("ha_federated::index_end"); + active_index= MAX_KEY; + DBUG_RETURN(0); +} + +/* + This is called for each row of the table scan. When you run out of records + you should return HA_ERR_END_OF_FILE. Fill buff up with the row information. + The Field structure for the table is the key to getting data into buf + in a manner that will allow the server to understand it. + + Called from filesort.cc, records.cc, sql_handler.cc, sql_select.cc, + sql_table.cc, and sql_update.cc. +*/ +int ha_federated::rnd_next(byte *buf) +{ + MYSQL_ROW row; + DBUG_ENTER("ha_federated::rnd_next"); + + // Fetch a row, insert it back in a row format. + current_position= result->data_cursor; + if (! (row= mysql_fetch_row(result))) + DBUG_RETURN(HA_ERR_END_OF_FILE); + + DBUG_RETURN(convert_row_to_internal_format(buf,row)); +} + + +/* + 'position()' is called after each call to rnd_next() if the data needs to be + ordered. You can do something like the following to store the position: + ha_store_ptr(ref, ref_length, current_position); + + The server uses ref to store data. ref_length in the above case is the size + needed to store current_position. ref is just a byte array that the server + will maintain. If you are using offsets to mark rows, then current_position + should be the offset. If it is a primary key like in BDB, then it needs to + be a primary key. + + Called from filesort.cc, sql_select.cc, sql_delete.cc and sql_update.cc. +*/ +void ha_federated::position(const byte *record) +{ + DBUG_ENTER("ha_federated::position"); + //ha_store_ptr Add seek storage + ha_store_ptr(ref, ref_length, current_position); + DBUG_VOID_RETURN; +} + + +/* + This is like rnd_next, but you are given a position to use to determine the + row. The position will be of the type that you stored in ref. You can use + ha_get_ptr(pos,ref_length) to retrieve whatever key or position you saved + when position() was called. + + This method is required for an ORDER BY. + + Called from filesort.cc records.cc sql_insert.cc sql_select.cc sql_update.cc. +*/ +int ha_federated::rnd_pos(byte * buf, byte *pos) +{ + DBUG_ENTER("ha_federated::rnd_pos"); + statistic_increment(table->in_use->status_var.ha_read_rnd_count,&LOCK_status); + current_position= ha_get_ptr(pos,ref_length); + result->current_row= 0; + result->data_cursor= current_position; + DBUG_RETURN(rnd_next(buf)); +} + + +/* + ::info() is used to return information to the optimizer. + Currently this table handler doesn't implement most of the fields + really needed. SHOW also makes use of this data + Another note, you will probably want to have the following in your + code: + if (records < 2) + records = 2; + The reason is that the server will optimize for cases of only a single + record. If in a table scan you don't know the number of records + it will probably be better to set records to two so you can return + as many records as you need. + Along with records a few more variables you may wish to set are: + records + deleted + data_file_length + index_file_length + delete_length + check_time + Take a look at the public variables in handler.h for more information. + + Called in: + filesort.cc + ha_heap.cc + item_sum.cc + opt_sum.cc + sql_delete.cc + sql_delete.cc + sql_derived.cc + sql_select.cc + sql_select.cc + sql_select.cc + sql_select.cc + sql_select.cc + sql_show.cc + sql_show.cc + sql_show.cc + sql_show.cc + sql_table.cc + sql_union.cc + sql_update.cc + +*/ +// FIX: later version provide better information to the optimizer +void ha_federated::info(uint flag) +{ + DBUG_ENTER("ha_federated::info"); + records= 10000; // Fake! + DBUG_VOID_RETURN; +} + + +/* + Used to delete all rows in a table. Both for cases of truncate and + for cases where the optimizer realizes that all rows will be + removed as a result of a SQL statement. + + Called from item_sum.cc by Item_func_group_concat::clear(), + Item_sum_count_distinct::clear(), and Item_func_group_concat::clear(). + Called from sql_delete.cc by mysql_delete(). + Called from sql_select.cc by JOIN::reinit(). + Called from sql_union.cc by st_select_lex_unit::exec(). +*/ +int ha_federated::delete_all_rows() +{ + DBUG_ENTER("ha_federated::delete_all_rows"); + + char query_buffer[IO_SIZE]; + String query(query_buffer, sizeof(query_buffer), &my_charset_bin); + query.length(0); + + query.set_charset(system_charset_info); + query.append("TRUNCATE "); + query.append(share->table_base_name); + + if (mysql_real_query(mysql, query.c_ptr_quick(), query.length())) { + my_error(ER_QUERY_ON_MASTER,MYF(0),mysql_error(mysql)); + DBUG_RETURN(ER_QUERY_ON_MASTER); + } + + DBUG_RETURN(HA_ERR_WRONG_COMMAND); +} + + +/* + The idea with handler::store_lock() is the following: + + The statement decided which locks we should need for the table + for updates/deletes/inserts we get WRITE locks, for SELECT... we get + read locks. + + Before adding the lock into the table lock handler (see thr_lock.c) + mysqld calls store lock with the requested locks. Store lock can now + modify a write lock to a read lock (or some other lock), ignore the + lock (if we don't want to use MySQL table locks at all) or add locks + for many tables (like we do when we are using a MERGE handler). + + Berkeley DB for federated changes all WRITE locks to TL_WRITE_ALLOW_WRITE + (which signals that we are doing WRITES, but we are still allowing other + reader's and writer's. + + When releasing locks, store_lock() are also called. In this case one + usually doesn't have to do anything. + + In some exceptional cases MySQL may send a request for a TL_IGNORE; + This means that we are requesting the same lock as last time and this + should also be ignored. (This may happen when someone does a flush + table when we have opened a part of the tables, in which case mysqld + closes and reopens the tables and tries to get the same locks at last + time). In the future we will probably try to remove this. + + Called from lock.cc by get_lock_data(). +*/ +THR_LOCK_DATA **ha_federated::store_lock(THD *thd, + THR_LOCK_DATA **to, + enum thr_lock_type lock_type) +{ + if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK) + { + /* + Here is where we get into the guts of a row level lock. + If TL_UNLOCK is set + If we are not doing a LOCK TABLE or DISCARD/IMPORT + TABLESPACE, then allow multiple writers + */ + + if ((lock_type >= TL_WRITE_CONCURRENT_INSERT && + lock_type <= TL_WRITE) && !thd->in_lock_tables + && !thd->tablespace_op) + lock_type= TL_WRITE_ALLOW_WRITE; + + /* + In queries of type INSERT INTO t1 SELECT ... FROM t2 ... + MySQL would use the lock TL_READ_NO_INSERT on t2, and that + would conflict with TL_WRITE_ALLOW_WRITE, blocking all inserts + to t2. Convert the lock to a normal read lock to allow + concurrent inserts to t2. + */ + + if (lock_type == TL_READ_NO_INSERT && !thd->in_lock_tables) + lock_type= TL_READ; + + lock.type= lock_type; + } + + *to++= &lock; + + return to; +} + +/* + create() does nothing, since we have no local setup of our own. + FUTURE: We should potentially connect to the remote database and + create tables if they do not exist. +*/ +int ha_federated::create(const char *name, TABLE *table_arg, + HA_CREATE_INFO *create_info) +{ + DBUG_ENTER("ha_federated::create"); + int retcode; + FEDERATED_SHARE *tmp; + retcode= parse_url(tmp, table_arg, 1); + if (retcode < 0) + { + DBUG_PRINT("ha_federated::create", + ("ERROR: on table creation for %s called parse_url, retcode %d", + create_info->data_file_name, retcode)); + DBUG_RETURN(ER_CANT_CREATE_TABLE); + } + DBUG_RETURN(0); +} +#endif /* HAVE_FEDERATED_DB */ diff --git a/sql/ha_federated.h b/sql/ha_federated.h new file mode 100755 index 00000000000..c11960a836f --- /dev/null +++ b/sql/ha_federated.h @@ -0,0 +1,177 @@ +/* Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +/* + Please read ha_exmple.cc before reading this file. + Please keep in mind that the federated storage engine implements all methods + that are required to be implemented. handler.h has a full list of methods + that you can implement. +*/ + +#ifdef __GNUC__ +#pragma interface /* gcc class implementation */ +#endif + +#include +//#include + +/* + FEDERATED_SHARE is a structure that will be shared amoung all open handlers + The example implements the minimum of what you will probably need. +*/ +//FIX document +typedef struct st_federated_share { + char *table_name; + char *table_base_name; + // the primary select query to be used in rnd_init + char *select_query; + // remote host info, parse_url supplies + char *scheme; + char *hostname; + char *username; + char *password; + char *database; + char *table; + char *socket; + char *sport; + int port; + uint table_name_length,table_base_name_length,use_count; + pthread_mutex_t mutex; + THR_LOCK lock; +} FEDERATED_SHARE; + +/* + Class definition for the storage engine +*/ +class ha_federated: public handler +{ + THR_LOCK_DATA lock; /* MySQL lock */ + FEDERATED_SHARE *share; /* Shared lock info */ + MYSQL *mysql; + MYSQL_RES *result; + uint ref_length; + uint fetch_num; // stores the fetch num + MYSQL_ROW_OFFSET current_position; // Current position used by ::position() + +private: + /* + return 0 on success + return errorcode otherwise + */ + //FIX + uint convert_row_to_internal_format(byte *buf, MYSQL_ROW row); + uint type_quote(int type); + void quote_data(String *string1, Field *field); + +public: + ha_federated(TABLE *table): handler(table), + mysql(0), + ref_length(sizeof(MYSQL_ROW_OFFSET)), current_position(0) + { + } + ~ha_federated() + { + } + /* The name that will be used for display purposes */ + const char *table_type() const { return "FEDERATED"; } + /* + The name of the index type that will be used for display + don't implement this method unless you really have indexes + */ + const char *index_type(uint inx) { return "REMOTE"; } + const char **bas_ext() const; + /* + This is a list of flags that says what the storage engine + implements. The current table flags are documented in + handler.h + Serg: Double check these (Brian) + // FIX add blob support + */ + ulong table_flags() const + { + return (HA_TABLE_SCAN_ON_INDEX | HA_NOT_EXACT_COUNT | + HA_PRIMARY_KEY_IN_READ_INDEX | HA_FILE_BASED | HA_AUTO_PART_KEY | + HA_TABLE_SCAN_ON_INDEX); + } + /* + This is a bitmap of flags that says how the storage engine + implements indexes. The current index flags are documented in + handler.h. If you do not implement indexes, just return zero + here. + + part is the key part to check. First key part is 0 + If all_parts it's set, MySQL want to know the flags for the combined + index up to and including 'part'. + */ + ulong index_flags(uint inx, uint part, bool all_parts) const + { + return (HA_READ_NEXT); + // return (HA_READ_NEXT | HA_ONLY_WHOLE_INDEX); + } + uint max_supported_record_length() const { return HA_MAX_REC_LENGTH; } + uint max_supported_keys() const { return MAX_KEY; } + uint max_supported_key_parts() const { return 1024; } + uint max_supported_key_length() const { return 1024; } + /* + Called in test_quick_select to determine if indexes should be used. + */ + virtual double scan_time() { DBUG_PRINT("ha_federated::scan_time", ("rows %d", records)); return (double)(records*2); } + /* + The next method will never be called if you do not implement indexes. + */ + virtual double read_time(ha_rows rows) { return (double) rows / 20.0+1; } + + /* + Everything below are methods that we implment in ha_federated.cc. + + Most of these methods are not obligatory, skip them and + MySQL will treat them as not implemented + */ + int open(const char *name, int mode, uint test_if_locked); // required + int close(void); // required + + int write_row(byte * buf); + int update_row(const byte * old_data, byte * new_data); + int delete_row(const byte * buf); + int index_init(uint keynr); + int index_read(byte * buf, const byte * key, + uint key_len, enum ha_rkey_function find_flag); + int index_read_idx(byte * buf, uint idx, const byte * key, + uint key_len, enum ha_rkey_function find_flag); + int index_next(byte * buf); + int index_end(); + /* + unlike index_init(), rnd_init() can be called two times + without rnd_end() in between (it only makes sense if scan=1). + then the second call should prepare for the new table scan + (e.g if rnd_init allocates the cursor, second call should + position it to the start of the table, no need to deallocate + and allocate it again + */ + int rnd_init(bool scan); //required + int rnd_end(); + int rnd_next(byte *buf); //required + int rnd_pos(byte * buf, byte *pos); //required + void position(const byte *record); //required + void info(uint); //required + + int delete_all_rows(void); + int create(const char *name, TABLE *form, + HA_CREATE_INFO *create_info); //required + + THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, + enum thr_lock_type lock_type); //required +}; diff --git a/sql/ha_heap.cc b/sql/ha_heap.cc index 60555d51402..1556a18bfca 100644 --- a/sql/ha_heap.cc +++ b/sql/ha_heap.cc @@ -488,8 +488,10 @@ int ha_heap::create(const char *name, TABLE *table_arg, else { if ((seg->type = field->key_type()) != (int) HA_KEYTYPE_TEXT && - seg->type != HA_KEYTYPE_VARTEXT && - seg->type != HA_KEYTYPE_VARBINARY) + seg->type != HA_KEYTYPE_VARTEXT1 && + seg->type != HA_KEYTYPE_VARTEXT2 && + seg->type != HA_KEYTYPE_VARBINARY1 && + seg->type != HA_KEYTYPE_VARBINARY2) seg->type= HA_KEYTYPE_BINARY; } seg->start= (uint) key_part->offset; diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc index 3418f82ee39..7b3f9fb3d7c 100644 --- a/sql/ha_innodb.cc +++ b/sql/ha_innodb.cc @@ -88,6 +88,7 @@ extern "C" { uint innobase_init_flags = 0; ulong innobase_cache_size = 0; +ulong innobase_large_page_size = 0; /* The default values for the following, type long, start-up parameters are declared in mysqld.cc: */ @@ -116,6 +117,9 @@ values */ uint innobase_flush_log_at_trx_commit = 1; my_bool innobase_log_archive = FALSE;/* unused */ +my_bool innobase_use_doublewrite = TRUE; +my_bool innobase_use_checksums = TRUE; +my_bool innobase_use_large_pages = FALSE; my_bool innobase_use_native_aio = FALSE; my_bool innobase_fast_shutdown = TRUE; my_bool innobase_very_fast_shutdown = FALSE; /* this can be set to @@ -1123,6 +1127,12 @@ innobase_init(void) srv_fast_shutdown = (ibool) innobase_fast_shutdown; + srv_use_doublewrite_buf = (ibool) innobase_use_doublewrite; + srv_use_checksums = (ibool) innobase_use_checksums; + + os_use_large_pages = (ibool) innobase_use_large_pages; + os_large_page_size = (ulint) innobase_large_page_size; + srv_file_per_table = (ibool) innobase_file_per_table; srv_locks_unsafe_for_binlog = (ibool) innobase_locks_unsafe_for_binlog; diff --git a/sql/ha_innodb.h b/sql/ha_innodb.h index b3b8d1a29e8..bc4e3db7467 100644 --- a/sql/ha_innodb.h +++ b/sql/ha_innodb.h @@ -181,6 +181,7 @@ extern struct show_var_st innodb_status_variables[]; extern uint innobase_init_flags, innobase_lock_type; extern uint innobase_flush_log_at_trx_commit; extern ulong innobase_cache_size; +extern ulong innobase_large_page_size; extern char *innobase_home, *innobase_tmpdir, *innobase_logdir; extern long innobase_lock_scan_time; extern long innobase_mirrored_log_groups, innobase_log_files_in_group; @@ -195,6 +196,9 @@ extern char *innobase_log_group_home_dir, *innobase_log_arch_dir; extern char *innobase_unix_file_flush_method; /* The following variables have to be my_bool for SHOW VARIABLES to work */ extern my_bool innobase_log_archive, + innobase_use_doublewrite, + innobase_use_checksums, + innobase_use_large_pages, innobase_use_native_aio, innobase_fast_shutdown, innobase_file_per_table, innobase_locks_unsafe_for_binlog, innobase_create_status_file; diff --git a/sql/ha_myisam.cc b/sql/ha_myisam.cc index 726647cd131..c23a728b715 100644 --- a/sql/ha_myisam.cc +++ b/sql/ha_myisam.cc @@ -926,8 +926,11 @@ int ha_myisam::enable_indexes(uint mode) { sql_print_warning("Warning: Enabling keys got errno %d, retrying", my_errno); + thd->clear_error(); param.testflag&= ~(T_REP_BY_SORT | T_QUICK); error= (repair(thd,param,0) != HA_ADMIN_OK); + if (!error && thd->net.report_error) + error= HA_ERR_CRASHED; } info(HA_STATUS_CONST); thd->proc_info=save_proc_info; @@ -1406,7 +1409,8 @@ int ha_myisam::create(const char *name, register TABLE *table_arg, keydef[i].seg[j].type= (int) type; keydef[i].seg[j].start= pos->key_part[j].offset; keydef[i].seg[j].length= pos->key_part[j].length; - keydef[i].seg[j].bit_start=keydef[i].seg[j].bit_end=0; + keydef[i].seg[j].bit_start= keydef[i].seg[j].bit_end= + keydef[i].seg[j].bit_pos= 0; keydef[i].seg[j].language = field->charset()->number; if (field->null_ptr) @@ -1428,6 +1432,13 @@ int ha_myisam::create(const char *name, register TABLE *table_arg, keydef[i].seg[j].bit_start= (uint) (field->pack_length() - table_arg->blob_ptr_size); } + else if (field->type() == FIELD_TYPE_BIT) + { + keydef[i].seg[j].bit_length= ((Field_bit *) field)->bit_len; + keydef[i].seg[j].bit_start= ((Field_bit *) field)->bit_ofs; + keydef[i].seg[j].bit_pos= (uint) (((Field_bit *) field)->bit_ptr - + (uchar*) table_arg->record[0]); + } } keyseg+=pos->key_parts; } @@ -1471,11 +1482,10 @@ int ha_myisam::create(const char *name, register TABLE *table_arg, break; if (found->flags & BLOB_FLAG) - { recinfo_pos->type= (int) FIELD_BLOB; - } - else if (!(options & HA_OPTION_PACK_RECORD) || - found->type() == MYSQL_TYPE_VARCHAR) + else if (found->type() == MYSQL_TYPE_VARCHAR) + recinfo_pos->type= FIELD_VARCHAR; + else if (!(options & HA_OPTION_PACK_RECORD)) recinfo_pos->type= (int) FIELD_NORMAL; else if (found->zero_pack()) recinfo_pos->type= (int) FIELD_SKIP_ZERO; diff --git a/sql/ha_myisam.h b/sql/ha_myisam.h index 527e6a49aba..d2fe36c8357 100644 --- a/sql/ha_myisam.h +++ b/sql/ha_myisam.h @@ -47,7 +47,7 @@ class ha_myisam: public handler int_table_flags(HA_NULL_IN_KEY | HA_CAN_FULLTEXT | HA_CAN_SQL_HANDLER | HA_DUPP_POS | HA_CAN_INDEX_BLOBS | HA_AUTO_PART_KEY | HA_FILE_BASED | HA_CAN_GEOMETRY | HA_READ_RND_SAME | - HA_CAN_INSERT_DELAYED), + HA_CAN_INSERT_DELAYED | HA_CAN_BIT_FIELD), can_enable_indexes(1) {} ~ha_myisam() {} diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 4e9b28225ee..e35c68dce5c 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -771,7 +771,7 @@ int ha_ndbcluster::build_index_list(TABLE *tab, enum ILBP phase) KEY* key_info= tab->key_info; const char **key_name= tab->keynames.type_names; NdbDictionary::Dictionary *dict= m_ndb->getDictionary(); - DBUG_ENTER("build_index_list"); + DBUG_ENTER("ha_ndbcluster::build_index_list"); // Save information about all known indexes for (i= 0; i < tab->keys; i++, key_info++, key_name++) @@ -860,7 +860,7 @@ int ha_ndbcluster::check_index_fields_not_null(uint inx) KEY* key_info= table->key_info + inx; KEY_PART_INFO* key_part= key_info->key_part; KEY_PART_INFO* end= key_part+key_info->key_parts; - DBUG_ENTER("check_index_fields_not_null"); + DBUG_ENTER("ha_ndbcluster::check_index_fields_not_null"); for (; key_part != end; key_part++) { @@ -922,6 +922,7 @@ static const ulong index_type_flags[]= */ // HA_KEYREAD_ONLY | HA_READ_NEXT | + HA_READ_PREV | HA_READ_RANGE | HA_READ_ORDER, @@ -930,11 +931,13 @@ static const ulong index_type_flags[]= /* UNIQUE_ORDERED_INDEX */ HA_READ_NEXT | + HA_READ_PREV | HA_READ_RANGE | HA_READ_ORDER, /* ORDERED_INDEX */ HA_READ_NEXT | + HA_READ_PREV | HA_READ_RANGE | HA_READ_ORDER }; @@ -958,7 +961,7 @@ inline NDB_INDEX_TYPE ha_ndbcluster::get_index_type(uint idx_no) const inline ulong ha_ndbcluster::index_flags(uint idx_no, uint part, bool all_parts) const { - DBUG_ENTER("index_flags"); + DBUG_ENTER("ha_ndbcluster::index_flags"); DBUG_PRINT("info", ("idx_no: %d", idx_no)); DBUG_ASSERT(get_index_type_from_table(idx_no) < index_flags_size); DBUG_RETURN(index_type_flags[get_index_type_from_table(idx_no)]); @@ -1024,7 +1027,7 @@ ha_ndbcluster::set_index_key(NdbOperation *op, const KEY *key_info, const byte * key_ptr) { - DBUG_ENTER("set_index_key"); + DBUG_ENTER("ha_ndbcluster::set_index_key"); uint i; KEY_PART_INFO* key_part= key_info->key_part; KEY_PART_INFO* end= key_part+key_info->key_parts; @@ -1196,7 +1199,7 @@ int ha_ndbcluster::unique_index_read(const byte *key, int res; NdbConnection *trans= m_active_trans; NdbIndexOperation *op; - DBUG_ENTER("unique_index_read"); + DBUG_ENTER("ha_ndbcluster::unique_index_read"); DBUG_PRINT("enter", ("key_len: %u, index: %u", key_len, active_index)); DBUG_DUMP("key", (char*)key, key_len); @@ -1402,6 +1405,7 @@ int ha_ndbcluster::set_bounds(NdbIndexScanOperation *op, case HA_READ_KEY_EXACT: p.bound_type= NdbIndexScanOperation::BoundEQ; break; + // ascending case HA_READ_KEY_OR_NEXT: p.bound_type= NdbIndexScanOperation::BoundLE; break; @@ -1411,6 +1415,19 @@ int ha_ndbcluster::set_bounds(NdbIndexScanOperation *op, else p.bound_type= NdbIndexScanOperation::BoundLT; break; + // descending + case HA_READ_PREFIX_LAST: // weird + p.bound_type= NdbIndexScanOperation::BoundEQ; + break; + case HA_READ_PREFIX_LAST_OR_PREV: // weird + p.bound_type= NdbIndexScanOperation::BoundGE; + break; + case HA_READ_BEFORE_KEY: + if (! p.part_last) + p.bound_type= NdbIndexScanOperation::BoundGE; + else + p.bound_type= NdbIndexScanOperation::BoundGT; + break; default: break; } @@ -1418,6 +1435,7 @@ int ha_ndbcluster::set_bounds(NdbIndexScanOperation *op, if (j == 1) { switch (p.key->flag) { + // ascending case HA_READ_BEFORE_KEY: if (! p.part_last) p.bound_type= NdbIndexScanOperation::BoundGE; @@ -1429,6 +1447,7 @@ int ha_ndbcluster::set_bounds(NdbIndexScanOperation *op, break; default: break; + // descending strangely sets no end key } } @@ -1537,15 +1556,16 @@ int ha_ndbcluster::define_read_attrs(byte* buf, NdbOperation* op) int ha_ndbcluster::ordered_index_scan(const key_range *start_key, const key_range *end_key, - bool sorted, byte* buf) + bool sorted, bool descending, byte* buf) { int res; bool restart; NdbConnection *trans= m_active_trans; NdbIndexScanOperation *op; - DBUG_ENTER("ordered_index_scan"); - DBUG_PRINT("enter", ("index: %u, sorted: %d", active_index, sorted)); + DBUG_ENTER("ha_ndbcluster::ordered_index_scan"); + DBUG_PRINT("enter", ("index: %u, sorted: %d, descending: %d", + active_index, sorted, descending)); DBUG_PRINT("enter", ("Starting new ordered scan on %s", m_tabname)); // Check that sorted seems to be initialised @@ -1559,7 +1579,7 @@ int ha_ndbcluster::ordered_index_scan(const key_range *start_key, if (!(op= trans->getNdbIndexScanOperation((NDBINDEX *) m_index[active_index].index, (const NDBTAB *) m_table)) || - op->readTuples(lm, 0, parallelism, sorted)) + op->readTuples(lm, 0, parallelism, sorted, descending)) ERR_RETURN(trans->getNdbError()); m_active_cursor= op; } else { @@ -2152,18 +2172,21 @@ void ha_ndbcluster::print_results() // Use DBUG_PRINT since DBUG_FILE cannot be filtered out char buf[2000]; Field *field; + void* ptr; const NDBCOL *col; NdbValue value; NdbBlob *ndb_blob; buf[0] = 0; + if (!(value= m_value[f]).ptr) { my_snprintf(buf, sizeof(buf), "not read"); goto print_value; } field= table->field[f]; - DBUG_DUMP("field->ptr", (char*)field->ptr, field->pack_length()); + ptr= field->ptr; + DBUG_DUMP("field->ptr", (char*)ptr, field->pack_length()); col= tab->getColumn(f); if (! (field->flags & BLOB_FLAG)) @@ -2188,96 +2211,97 @@ void ha_ndbcluster::print_results() switch (col->getType()) { case NdbDictionary::Column::Tinyint: { - char value= *field->ptr; + Int8 value= *(Int8*)ptr; my_snprintf(buf, sizeof(buf), "Tinyint %d", value); break; } case NdbDictionary::Column::Tinyunsigned: { - unsigned char value= *field->ptr; + Uint8 value= *(Uint8*)ptr; my_snprintf(buf, sizeof(buf), "Tinyunsigned %u", value); break; } case NdbDictionary::Column::Smallint: { - short value= *field->ptr; + Int16 value= *(Int16*)ptr; my_snprintf(buf, sizeof(buf), "Smallint %d", value); break; } case NdbDictionary::Column::Smallunsigned: { - unsigned short value= *field->ptr; + Uint16 value= *(Uint16*)ptr; my_snprintf(buf, sizeof(buf), "Smallunsigned %u", value); break; } case NdbDictionary::Column::Mediumint: { byte value[3]; - memcpy(value, field->ptr, 3); + memcpy(value, ptr, 3); my_snprintf(buf, sizeof(buf), "Mediumint %d,%d,%d", value[0], value[1], value[2]); break; } case NdbDictionary::Column::Mediumunsigned: { byte value[3]; - memcpy(value, field->ptr, 3); + memcpy(value, ptr, 3); my_snprintf(buf, sizeof(buf), "Mediumunsigned %u,%u,%u", value[0], value[1], value[2]); break; } case NdbDictionary::Column::Int: { + Int32 value= *(Int32*)ptr; my_snprintf(buf, sizeof(buf), "Int %d", value); break; } case NdbDictionary::Column::Unsigned: { - Uint32 value= (Uint32) *field->ptr; + Uint32 value= *(Uint32*)ptr; my_snprintf(buf, sizeof(buf), "Unsigned %u", value); break; } case NdbDictionary::Column::Bigint: { - Int64 value= (Int64) *field->ptr; - my_snprintf(buf, sizeof(buf), "Bigint %lld", value); + Int64 value= *(Int64*)ptr; + my_snprintf(buf, sizeof(buf), "Bigint %d", (int)value); break; } case NdbDictionary::Column::Bigunsigned: { - Uint64 value= (Uint64) *field->ptr; - my_snprintf(buf, sizeof(buf), "Bigunsigned %llu", value); + Uint64 value= *(Uint64*)ptr; + my_snprintf(buf, sizeof(buf), "Bigunsigned %u", (unsigned)value); break; } case NdbDictionary::Column::Float: { - float value= (float) *field->ptr; + float value= *(float*)ptr; my_snprintf(buf, sizeof(buf), "Float %f", (double)value); break; } case NdbDictionary::Column::Double: { - double value= (double) *field->ptr; + double value= *(double*)ptr; my_snprintf(buf, sizeof(buf), "Double %f", value); break; } case NdbDictionary::Column::Decimal: { - char *value= field->ptr; + const char *value= (char*)ptr; my_snprintf(buf, sizeof(buf), "Decimal '%-*s'", field->pack_length(), value); break; } case NdbDictionary::Column::Char:{ - const char *value= (char *) field->ptr; + const char *value= (char*)ptr; my_snprintf(buf, sizeof(buf), "Char '%.*s'", field->pack_length(), value); break; } case NdbDictionary::Column::Varchar: case NdbDictionary::Column::Binary: case NdbDictionary::Column::Varbinary: { - const char *value= (char *) field->ptr; + const char *value= (char*)ptr; my_snprintf(buf, sizeof(buf), "Var '%.*s'", field->pack_length(), value); break; } case NdbDictionary::Column::Bit: { - const char *value= (char *) field->ptr; + const char *value= (char*)ptr; my_snprintf(buf, sizeof(buf), "Bit '%.*s'", field->pack_length(), value); break; } case NdbDictionary::Column::Datetime: { - Uint64 value= (Uint64) *field->ptr; - my_snprintf(buf, sizeof(buf), "Datetime %llu", value); + // todo + my_snprintf(buf, sizeof(buf), "Datetime ?"); break; } case NdbDictionary::Column::Timespec: { - Uint64 value= (Uint64) *field->ptr; - my_snprintf(buf, sizeof(buf), "Timespec %llu", value); + // todo + my_snprintf(buf, sizeof(buf), "Timespec ?"); break; } case NdbDictionary::Column::Blob: { @@ -2307,7 +2331,7 @@ print_value: int ha_ndbcluster::index_init(uint index) { - DBUG_ENTER("index_init"); + DBUG_ENTER("ha_ndbcluster::index_init"); DBUG_PRINT("enter", ("index: %u", index)); DBUG_RETURN(handler::index_init(index)); } @@ -2315,7 +2339,7 @@ int ha_ndbcluster::index_init(uint index) int ha_ndbcluster::index_end() { - DBUG_ENTER("index_end"); + DBUG_ENTER("ha_ndbcluster::index_end"); DBUG_RETURN(close_scan()); } @@ -2346,7 +2370,7 @@ int ha_ndbcluster::index_read(byte *buf, const byte *key, uint key_len, enum ha_rkey_function find_flag) { - DBUG_ENTER("index_read"); + DBUG_ENTER("ha_ndbcluster::index_read"); DBUG_PRINT("enter", ("active_index: %u, key_len: %u, find_flag: %d", active_index, key_len, find_flag)); @@ -2394,7 +2418,18 @@ int ha_ndbcluster::index_read(byte *buf, start_key.key= key; start_key.length= key_len; start_key.flag= find_flag; - error= ordered_index_scan(&start_key, 0, TRUE, buf); + bool descending= FALSE; + switch (find_flag) { + case HA_READ_KEY_OR_PREV: + case HA_READ_BEFORE_KEY: + case HA_READ_PREFIX_LAST: + case HA_READ_PREFIX_LAST_OR_PREV: + descending= TRUE; + break; + default: + break; + } + error= ordered_index_scan(&start_key, 0, TRUE, descending, buf); DBUG_RETURN(error == HA_ERR_END_OF_FILE ? HA_ERR_KEY_NOT_FOUND : error); } @@ -2404,7 +2439,7 @@ int ha_ndbcluster::index_read_idx(byte *buf, uint index_no, enum ha_rkey_function find_flag) { statistic_increment(current_thd->status_var.ha_read_key_count, &LOCK_status); - DBUG_ENTER("index_read_idx"); + DBUG_ENTER("ha_ndbcluster::index_read_idx"); DBUG_PRINT("enter", ("index_no: %u, key_len: %u", index_no, key_len)); index_init(index_no); DBUG_RETURN(index_read(buf, key, key_len, find_flag)); @@ -2413,9 +2448,7 @@ int ha_ndbcluster::index_read_idx(byte *buf, uint index_no, int ha_ndbcluster::index_next(byte *buf) { - DBUG_ENTER("index_next"); - - int error= 1; + DBUG_ENTER("ha_ndbcluster::index_next"); statistic_increment(current_thd->status_var.ha_read_next_count, &LOCK_status); DBUG_RETURN(next_result(buf)); @@ -2424,42 +2457,37 @@ int ha_ndbcluster::index_next(byte *buf) int ha_ndbcluster::index_prev(byte *buf) { - DBUG_ENTER("index_prev"); + DBUG_ENTER("ha_ndbcluster::index_prev"); statistic_increment(current_thd->status_var.ha_read_prev_count, &LOCK_status); - DBUG_RETURN(1); + DBUG_RETURN(next_result(buf)); } int ha_ndbcluster::index_first(byte *buf) { - DBUG_ENTER("index_first"); + DBUG_ENTER("ha_ndbcluster::index_first"); statistic_increment(current_thd->status_var.ha_read_first_count, &LOCK_status); // Start the ordered index scan and fetch the first row // Only HA_READ_ORDER indexes get called by index_first - DBUG_RETURN(ordered_index_scan(0, 0, TRUE, buf)); + DBUG_RETURN(ordered_index_scan(0, 0, TRUE, FALSE, buf)); } int ha_ndbcluster::index_last(byte *buf) { - DBUG_ENTER("index_last"); + DBUG_ENTER("ha_ndbcluster::index_last"); statistic_increment(current_thd->status_var.ha_read_last_count,&LOCK_status); - int res; - if((res= ordered_index_scan(0, 0, TRUE, buf)) == 0){ - NdbScanOperation *cursor= m_active_cursor; - while((res= cursor->nextResult(TRUE, m_force_send)) == 0); - if(res == 1){ - unpack_record(buf); - table->status= 0; - DBUG_RETURN(0); - } - } - DBUG_RETURN(res); + DBUG_RETURN(ordered_index_scan(0, 0, TRUE, TRUE, buf)); } +int ha_ndbcluster::index_read_last(byte * buf, const byte * key, uint key_len) +{ + DBUG_ENTER("ha_ndbcluster::index_read_last"); + DBUG_RETURN(index_read(buf, key, key_len, HA_READ_PREFIX_LAST)); +} inline int ha_ndbcluster::read_range_first_to_buf(const key_range *start_key, @@ -2504,7 +2532,7 @@ int ha_ndbcluster::read_range_first_to_buf(const key_range *start_key, } // Start the ordered index scan and fetch the first row - error= ordered_index_scan(start_key, end_key, sorted, buf); + error= ordered_index_scan(start_key, end_key, sorted, FALSE, buf); DBUG_RETURN(error); } @@ -3610,7 +3638,7 @@ int ha_ndbcluster::create(const char *name, int ha_ndbcluster::create_ordered_index(const char *name, KEY *key_info) { - DBUG_ENTER("create_ordered_index"); + DBUG_ENTER("ha_ndbcluster::create_ordered_index"); DBUG_RETURN(create_index(name, key_info, FALSE)); } @@ -3618,7 +3646,7 @@ int ha_ndbcluster::create_unique_index(const char *name, KEY *key_info) { - DBUG_ENTER("create_unique_index"); + DBUG_ENTER("ha_ndbcluster::create_unique_index"); DBUG_RETURN(create_index(name, key_info, TRUE)); } @@ -3635,7 +3663,7 @@ int ha_ndbcluster::create_index(const char *name, KEY_PART_INFO *key_part= key_info->key_part; KEY_PART_INFO *end= key_part + key_info->key_parts; - DBUG_ENTER("create_index"); + DBUG_ENTER("ha_ndbcluster::create_index"); DBUG_PRINT("enter", ("name: %s ", name)); NdbDictionary::Index ndb_index(name); diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index 3aeeb28d8bd..6e534d14f97 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -96,6 +96,7 @@ class ha_ndbcluster: public handler int index_prev(byte *buf); int index_first(byte *buf); int index_last(byte *buf); + int index_read_last(byte * buf, const byte * key, uint key_len); int rnd_init(bool scan); int rnd_end(); int rnd_next(byte *buf); @@ -176,7 +177,7 @@ class ha_ndbcluster: public handler byte *buf); int ordered_index_scan(const key_range *start_key, const key_range *end_key, - bool sorted, byte* buf); + bool sorted, bool descending, byte* buf); int full_table_scan(byte * buf); int fetch_next(NdbScanOperation* op); int next_result(byte *buf); diff --git a/sql/handler.cc b/sql/handler.cc index edb4d5b488b..e43f2c2e888 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -47,6 +47,9 @@ #ifdef HAVE_NDBCLUSTER_DB #include "ha_ndbcluster.h" #endif +#ifdef HAVE_FEDERATED_DB +#include "ha_federated.h" +#endif #include #include @@ -92,6 +95,8 @@ struct show_table_type_st sys_table_types[]= "Archive storage engine", DB_TYPE_ARCHIVE_DB}, {"CSV",&have_csv_db, "CSV storage engine", DB_TYPE_CSV_DB}, + {"FEDERATED",&have_federated_db, + "Federated MySQL storage engine", DB_TYPE_FEDERATED_DB}, {NullS, NULL, NullS, DB_TYPE_UNKNOWN} }; @@ -200,6 +205,10 @@ handler *get_new_handler(TABLE *table, enum db_type db_type) case DB_TYPE_ARCHIVE_DB: return new ha_archive(table); #endif +#ifdef HAVE_FEDERATED_DB + case DB_TYPE_FEDERATED_DB: + return new ha_federated(table); +#endif #ifdef HAVE_CSV_DB case DB_TYPE_CSV_DB: return new ha_tina(table); diff --git a/sql/handler.h b/sql/handler.h index c9adaefa888..3ee66e4b3cf 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -75,6 +75,7 @@ /* Table data are stored in separate files (for lower_case_table_names) */ #define HA_FILE_BASED (1 << 26) #define HA_NO_VARCHAR (1 << 27) +#define HA_CAN_BIT_FIELD (1 << 28) /* supports bit fields */ /* bits in index_flags(index_number) for what you can do with index */ @@ -152,6 +153,7 @@ enum db_type DB_TYPE_BERKELEY_DB, DB_TYPE_INNODB, DB_TYPE_GEMINI, DB_TYPE_NDBCLUSTER, DB_TYPE_EXAMPLE_DB, DB_TYPE_ARCHIVE_DB, DB_TYPE_CSV_DB, + DB_TYPE_FEDERATED_DB, DB_TYPE_DEFAULT // Must be last }; diff --git a/sql/item.cc b/sql/item.cc index f00a35fe628..9117105f26e 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -2573,11 +2573,11 @@ void Item_real::print(String *str) } -/**************************************************************************** -** varbinary item -** In string context this is a binary string -** In number context this is a longlong value. -****************************************************************************/ +/* + hex item + In string context this is a binary string. + In number context this is a longlong value. +*/ inline uint char_val(char X) { @@ -2587,7 +2587,7 @@ inline uint char_val(char X) } -Item_varbinary::Item_varbinary(const char *str, uint str_length) +Item_hex_string::Item_hex_string(const char *str, uint str_length) { name=(char*) str-2; // Lex makes this start with 0x max_length=(str_length+1)/2; @@ -2608,7 +2608,7 @@ Item_varbinary::Item_varbinary(const char *str, uint str_length) fixed= 1; } -longlong Item_varbinary::val_int() +longlong Item_hex_string::val_int() { // following assert is redundant, because fixed=1 assigned in constructor DBUG_ASSERT(fixed == 1); @@ -2622,7 +2622,7 @@ longlong Item_varbinary::val_int() } -int Item_varbinary::save_in_field(Field *field, bool no_conversions) +int Item_hex_string::save_in_field(Field *field, bool no_conversions) { int error; field->set_notnull(); @@ -2639,6 +2639,44 @@ int Item_varbinary::save_in_field(Field *field, bool no_conversions) } +/* + bin item. + In string context this is a binary string. + In number context this is a longlong value. +*/ + +Item_bin_string::Item_bin_string(const char *str, uint str_length) +{ + const char *end= str + str_length - 1; + uchar bits= 0; + uint power= 1; + + name= (char*) str - 2; + max_length= (str_length + 7) >> 3; + char *ptr= (char*) sql_alloc(max_length + 1); + if (!ptr) + return; + str_value.set(ptr, max_length, &my_charset_bin); + ptr+= max_length - 1; + ptr[1]= 0; // Set end null for string + for (; end >= str; end--) + { + if (power == 256) + { + power= 1; + *ptr--= bits; + bits= 0; + } + if (*end == '1') + bits|= power; + power<<= 1; + } + *ptr= (char) bits; + collation.set(&my_charset_bin, DERIVATION_COERCIBLE); + fixed= 1; +} + + /* Pack data in buffer for sending */ @@ -2672,6 +2710,7 @@ bool Item::send(Protocol *protocol, String *buffer) case MYSQL_TYPE_STRING: case MYSQL_TYPE_VAR_STRING: case MYSQL_TYPE_VARCHAR: + case MYSQL_TYPE_BIT: { String *res; if ((res=val_str(buffer))) diff --git a/sql/item.h b/sql/item.h index cf3dc8896a5..d5361bdcc8a 100644 --- a/sql/item.h +++ b/sql/item.h @@ -959,13 +959,14 @@ public: }; -class Item_varbinary :public Item +class Item_hex_string: public Item { public: - Item_varbinary(const char *str,uint str_length); + Item_hex_string(): Item() {} + Item_hex_string(const char *str,uint str_length); enum Type type() const { return VARBIN_ITEM; } double val_real() - { DBUG_ASSERT(fixed == 1); return (double) Item_varbinary::val_int(); } + { DBUG_ASSERT(fixed == 1); return (double) Item_hex_string::val_int(); } longlong val_int(); bool basic_const_item() const { return 1; } String *val_str(String*) { DBUG_ASSERT(fixed == 1); return &str_value; } @@ -977,6 +978,12 @@ public: }; +class Item_bin_string: public Item_hex_string +{ +public: + Item_bin_string(const char *str,uint str_length); +}; + class Item_result_field :public Item /* Item with result field */ { public: diff --git a/sql/item_sum.cc b/sql/item_sum.cc index b242698d36e..949545bcdb0 100644 --- a/sql/item_sum.cc +++ b/sql/item_sum.cc @@ -1214,7 +1214,7 @@ int composite_key_cmp(void* arg, byte* key1, byte* key2) { Field* f = *field; int len = *lengths++; - int res = f->key_cmp(key1, key2); + int res = f->cmp(key1, key2); if (res) return res; key1 += len; @@ -1668,7 +1668,7 @@ int group_concat_key_cmp_with_distinct(void* arg, byte* key1, { int res; uint offset= (uint) (field->ptr - record); - if ((res= field->key_cmp(key1 + offset, key2 + offset))) + if ((res= field->cmp(key1 + offset, key2 + offset))) return res; } } @@ -1702,7 +1702,7 @@ int group_concat_key_cmp_with_order(void* arg, byte* key1, byte* key2) { int res; uint offset= (uint) (field->ptr - record); - if ((res= field->key_cmp(key1 + offset, key2 + offset))) + if ((res= field->cmp(key1 + offset, key2 + offset))) return (*order_item)->asc ? res : -res; } } diff --git a/sql/key.cc b/sql/key.cc index dfd924f1dc7..d54b8721cab 100644 --- a/sql/key.cc +++ b/sql/key.cc @@ -102,6 +102,19 @@ void key_copy(byte *to_key, byte *from_record, KEY *key_info, uint key_length) key_part->null_bit); key_length--; } + if (key_part->type == HA_KEYTYPE_BIT) + { + Field_bit *field= (Field_bit *) (key_part->field); + if (field->bit_len) + { + uchar bits= get_rec_bits((uchar*) from_record + + key_part->null_offset + + (key_part->null_bit == 128), + field->bit_ofs, field->bit_len); + *to_key++= bits; + key_length--; + } + } if (key_part->key_part_flag & HA_BLOB_PART) { char *pos; @@ -170,6 +183,23 @@ void key_restore(byte *to_record, byte *from_key, KEY *key_info, to_record[key_part->null_offset]&= ~key_part->null_bit; key_length--; } + if (key_part->type == HA_KEYTYPE_BIT) + { + Field_bit *field= (Field_bit *) (key_part->field); + if (field->bit_len) + { + uchar bits= *(from_key + key_part->length - field->field_length -1); + set_rec_bits(bits, to_record + key_part->null_offset + + (key_part->null_bit == 128), + field->bit_ofs, field->bit_len); + } + else + { + clr_rec_bits(to_record + key_part->null_offset + + (key_part->null_bit == 128), + field->bit_ofs, field->bit_len); + } + } if (key_part->key_part_flag & HA_BLOB_PART) { uint blob_length= uint2korr(from_key); @@ -220,54 +250,54 @@ void key_restore(byte *to_record, byte *from_key, KEY *key_info, bool key_cmp_if_same(TABLE *table,const byte *key,uint idx,uint key_length) { - uint length; + uint store_length; KEY_PART_INFO *key_part; + const byte *key_end= key + key_length;; for (key_part=table->key_info[idx].key_part; - (int) key_length > 0; - key_part++, key+=length, key_length-=length) + key < key_end ; + key_part++, key+= store_length) { + uint length; + store_length= key_part->store_length; + if (key_part->null_bit) { - key_length--; if (*key != test(table->record[0][key_part->null_offset] & key_part->null_bit)) return 1; if (*key) - { - length=key_part->store_length; continue; - } key++; + store_length--; } - if (key_part->key_part_flag & (HA_BLOB_PART | HA_VAR_LENGTH_PART)) + if (key_part->key_part_flag & (HA_BLOB_PART | HA_VAR_LENGTH_PART | + HA_BIT_PART)) { if (key_part->field->key_cmp(key, key_part->length)) return 1; - length=key_part->length+HA_KEY_BLOB_LENGTH; + continue; } - else + length= min((uint) (key_end-key), store_length); + if (!(key_part->key_type & (FIELDFLAG_NUMBER+FIELDFLAG_BINARY+ + FIELDFLAG_PACK))) { - length=min(key_length,key_part->length); - if (!(key_part->key_type & (FIELDFLAG_NUMBER+FIELDFLAG_BINARY+ - FIELDFLAG_PACK))) + CHARSET_INFO *cs= key_part->field->charset(); + uint char_length= key_part->length / cs->mbmaxlen; + const byte *pos= table->record[0] + key_part->offset; + if (length > char_length) { - CHARSET_INFO *cs= key_part->field->charset(); - uint char_length= key_part->length / cs->mbmaxlen; - const byte *pos= table->record[0] + key_part->offset; - if (length > char_length) - { - char_length= my_charpos(cs, pos, pos + length, char_length); - set_if_smaller(char_length, length); - } - if (cs->coll->strnncollsp(cs, - (const uchar*) key, length, - (const uchar*) pos, char_length, 0)) - return 1; + char_length= my_charpos(cs, pos, pos + length, char_length); + set_if_smaller(char_length, length); } - else if (memcmp(key,table->record[0]+key_part->offset,length)) - return 1; + if (cs->coll->strnncollsp(cs, + (const uchar*) key, length, + (const uchar*) pos, char_length, 0)) + return 1; + continue; } + if (memcmp(key,table->record[0]+key_part->offset,length)) + return 1; } return 0; } diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index 4985a244824..babec735b62 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -1027,6 +1027,8 @@ extern uint opt_crash_binlog_innodb; extern char *shared_memory_base_name, *mysqld_unix_port; extern bool opt_enable_shared_memory; extern char *default_tz_name; +extern my_bool opt_large_pages; +extern uint opt_large_page_size; extern MYSQL_LOG mysql_log,mysql_slow_log,mysql_bin_log; extern FILE *bootstrap_file; @@ -1070,6 +1072,7 @@ extern struct my_option my_long_options[]; extern SHOW_COMP_OPTION have_isam, have_innodb, have_berkeley_db; extern SHOW_COMP_OPTION have_example_db, have_archive_db, have_csv_db; +extern SHOW_COMP_OPTION have_federated_db; extern SHOW_COMP_OPTION have_raid, have_openssl, have_symlink; extern SHOW_COMP_OPTION have_query_cache, have_berkeley_db, have_innodb; extern SHOW_COMP_OPTION have_geometry, have_rtree_keys; diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 42ae6982eb0..c5698469341 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -78,10 +78,6 @@ #define IF_PURIFY(A,B) (B) #endif -#ifndef INADDR_NONE -#define INADDR_NONE -1 // Error value from inet_addr -#endif - /* stack traces are only supported on linux intel */ #if defined(__linux__) && defined(__i386__) && defined(USE_PSTACK) #define HAVE_STACK_TRACE_ON_SEGV @@ -111,6 +107,7 @@ extern "C" { // Because of SCO 3.2V4.2 #ifdef HAVE_GRP_H #include #endif +#include #if defined(OS2) # include @@ -299,6 +296,8 @@ my_bool opt_short_log_format= 0; my_bool opt_log_queries_not_using_indexes= 0; my_bool lower_case_file_system= 0; my_bool opt_innodb_safe_binlog= 0; +my_bool opt_large_pages= 0; +uint opt_large_page_size= 0; volatile bool mqh_used = 0; uint mysqld_port, test_flags, select_errors, dropping_tables, ha_open_options; @@ -392,6 +391,7 @@ CHARSET_INFO *national_charset_info, *table_alias_charset; SHOW_COMP_OPTION have_berkeley_db, have_innodb, have_isam, have_ndbcluster, have_example_db, have_archive_db, have_csv_db; +SHOW_COMP_OPTION have_federated_db; SHOW_COMP_OPTION have_raid, have_openssl, have_symlink, have_query_cache; SHOW_COMP_OPTION have_geometry, have_rtree_keys; SHOW_COMP_OPTION have_crypt, have_compress; @@ -2423,6 +2423,19 @@ static int init_common_variables(const char *conf_file_name, int argc, DBUG_PRINT("info",("%s Ver %s for %s on %s\n",my_progname, server_version, SYSTEM_TYPE,MACHINE_TYPE)); +#ifdef HAVE_LARGE_PAGES + /* Initialize large page size */ + if (opt_large_pages && (opt_large_page_size= my_get_large_page_size())) + { + my_use_large_pages= 1; + my_large_page_size= opt_large_page_size; +#ifdef HAVE_INNOBASE_DB + innobase_use_large_pages= 1; + innobase_large_page_size= opt_large_page_size; +#endif + } +#endif /* HAVE_LARGE_PAGES */ + /* connections and databases needs lots of files */ { uint files, wanted_files; @@ -4086,6 +4099,8 @@ enum options_mysqld OPT_INNODB_LOG_ARCHIVE, OPT_INNODB_FLUSH_LOG_AT_TRX_COMMIT, OPT_INNODB_FLUSH_METHOD, + OPT_INNODB_DOUBLEWRITE, + OPT_INNODB_CHECKSUMS, OPT_INNODB_FAST_SHUTDOWN, OPT_INNODB_FILE_PER_TABLE, OPT_CRASH_BINLOG_INNODB, OPT_INNODB_LOCKS_UNSAFE_FOR_BINLOG, @@ -4184,7 +4199,8 @@ enum options_mysqld OPT_OPTIMIZER_SEARCH_DEPTH, OPT_OPTIMIZER_PRUNE_LEVEL, OPT_UPDATABLE_VIEWS_WITH_LIMIT, - OPT_AUTO_INCREMENT, OPT_AUTO_INCREMENT_OFFSET + OPT_AUTO_INCREMENT, OPT_AUTO_INCREMENT_OFFSET, + OPT_ENABLE_LARGE_PAGES }; @@ -4343,6 +4359,12 @@ Disable with --skip-bdb (will save memory).", "Set up signals usable for debugging", (gptr*) &opt_debugging, (gptr*) &opt_debugging, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, +#ifdef HAVE_LARGE_PAGES + {"large-pages", OPT_ENABLE_LARGE_PAGES, "Enable support for large pages. \ +Disable with --skip-large-pages.", + (gptr*) &opt_large_pages, (gptr*) &opt_large_pages, 0, GET_BOOL, NO_ARG, 0, 0, 0, + 0, 0, 0}, +#endif {"init-connect", OPT_INIT_CONNECT, "Command(s) that are executed for each new connection", (gptr*) &opt_init_connect, (gptr*) &opt_init_connect, 0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, @@ -4366,6 +4388,12 @@ Disable with --skip-innodb (will save memory).", "The common part for InnoDB table spaces.", (gptr*) &innobase_data_home_dir, (gptr*) &innobase_data_home_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"innodb_doublewrite", OPT_INNODB_DOUBLEWRITE, "Enable InnoDB doublewrite buffer (enabled by default). \ +Disable with --skip-innodb-doublewrite.", (gptr*) &innobase_use_doublewrite, + (gptr*) &innobase_use_doublewrite, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, + {"innodb_checksums", OPT_INNODB_CHECKSUMS, "Enable InnoDB checksums validation (enabled by default). \ +Disable with --skip-innodb-checksums.", (gptr*) &innobase_use_checksums, + (gptr*) &innobase_use_checksums, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"innodb_fast_shutdown", OPT_INNODB_FAST_SHUTDOWN, "Speeds up server shutdown process.", (gptr*) &innobase_fast_shutdown, (gptr*) &innobase_fast_shutdown, 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0}, @@ -5687,7 +5715,8 @@ static void mysql_init_variables(void) mysqld_unix_port= opt_mysql_tmpdir= my_bind_addr_str= NullS; bzero((gptr) &mysql_tmpdir_list, sizeof(mysql_tmpdir_list)); bzero((char *) &global_status_var, sizeof(global_status_var)); - + opt_large_pages= 0; + /* Character sets */ system_charset_info= &my_charset_utf8_general_ci; files_charset_info= &my_charset_utf8_general_ci; @@ -5793,6 +5822,11 @@ static void mysql_init_variables(void) #else have_archive_db= SHOW_OPTION_NO; #endif +#ifdef HAVE_FEDERATED_DB + have_federated_db= SHOW_OPTION_YES; +#else + have_federated_db= SHOW_OPTION_NO; +#endif #ifdef HAVE_CSV_DB have_csv_db= SHOW_OPTION_YES; #else diff --git a/sql/opt_range.cc b/sql/opt_range.cc index f9149f10a30..80237766d29 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -746,7 +746,7 @@ int QUICK_RANGE_SELECT::init() void QUICK_RANGE_SELECT::range_end() { if (file->inited != handler::NONE) - file->ha_index_end(); + file->ha_index_or_rnd_end(); } @@ -3687,7 +3687,8 @@ get_mm_leaf(PARAM *param, COND *conf_func, Field *field, KEY_PART *key_part, } /* Get local copy of key */ copies= 1; - if (field->key_type() == HA_KEYTYPE_VARTEXT) + if (field->key_type() == HA_KEYTYPE_VARTEXT1 || + field->key_type() == HA_KEYTYPE_VARTEXT2) copies= 2; str= str2= (char*) alloc_root(param->mem_root, (key_part->store_length)*copies+1); @@ -4999,7 +5000,9 @@ check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree, char *min_key,uint min_key_flag, char *max_key, uint max_key_flag) { - ha_rows records=0,tmp; + ha_rows records=0, tmp; + uint tmp_min_flag, tmp_max_flag, keynr, min_key_length, max_key_length; + char *tmp_min_key, *tmp_max_key; param->max_key_part=max(param->max_key_part,key_tree->part); if (key_tree->left != &null_element) @@ -5017,13 +5020,12 @@ check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree, return records; } - uint tmp_min_flag,tmp_max_flag,keynr; - char *tmp_min_key=min_key,*tmp_max_key=max_key; - + tmp_min_key= min_key; + tmp_max_key= max_key; key_tree->store(param->key[idx][key_tree->part].store_length, &tmp_min_key,min_key_flag,&tmp_max_key,max_key_flag); - uint min_key_length= (uint) (tmp_min_key- param->min_key); - uint max_key_length= (uint) (tmp_max_key- param->max_key); + min_key_length= (uint) (tmp_min_key- param->min_key); + max_key_length= (uint) (tmp_max_key- param->max_key); if (param->is_ror_scan) { @@ -5888,7 +5890,7 @@ int QUICK_RANGE_SELECT::get_next() SYNOPSIS QUICK_RANGE_SELECT::get_next_prefix() prefix_length length of cur_prefix - cur_prefix prefix of a key to be searached for + cur_prefix prefix of a key to be searched for DESCRIPTION Each subsequent call to the method retrieves the first record that has a @@ -7402,7 +7404,8 @@ TRP_GROUP_MIN_MAX::make_quick(PARAM *param, bool retrieve_full_rows, quick->quick_prefix_select= NULL; /* Can't construct a quick select. */ else /* Make a QUICK_RANGE_SELECT to be used for group prefix retrieval. */ - quick->quick_prefix_select= get_quick_select(param, param_idx, index_tree, + quick->quick_prefix_select= get_quick_select(param, param_idx, + index_tree, &quick->alloc); /* @@ -8446,7 +8449,10 @@ print_key(KEY_PART *key_part,const char *key,uint used_length) store_length--; } field->set_key_image((char*) key, key_part->length); - field->val_str(&tmp); + if (field->type() == MYSQL_TYPE_BIT) + (void) field->val_int_as_str(&tmp, 1); + else + field->val_str(&tmp); fwrite(tmp.ptr(),sizeof(char),tmp.length(),DBUG_FILE); if (key+store_length < key_end) fputc('/',DBUG_FILE); diff --git a/sql/protocol.cc b/sql/protocol.cc index d2e63539610..4c916d78378 100644 --- a/sql/protocol.cc +++ b/sql/protocol.cc @@ -726,6 +726,7 @@ bool Protocol_simple::store(const char *from, uint length, #ifndef DEBUG_OFF DBUG_ASSERT(field_types == 0 || field_types[field_pos] == MYSQL_TYPE_DECIMAL || + field_types[field_pos] == MYSQL_TYPE_BIT || (field_types[field_pos] >= MYSQL_TYPE_ENUM && field_types[field_pos] <= MYSQL_TYPE_GEOMETRY)); field_pos++; @@ -741,6 +742,7 @@ bool Protocol_simple::store(const char *from, uint length, #ifndef DEBUG_OFF DBUG_ASSERT(field_types == 0 || field_types[field_pos] == MYSQL_TYPE_DECIMAL || + field_types[field_pos] == MYSQL_TYPE_BIT || (field_types[field_pos] >= MYSQL_TYPE_ENUM && field_types[field_pos] <= MYSQL_TYPE_GEOMETRY)); field_pos++; diff --git a/sql/set_var.cc b/sql/set_var.cc index 234ec6617c3..da6341597f1 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -715,7 +715,8 @@ struct show_var_st init_vars[]= { {"have_compress", (char*) &have_compress, SHOW_HAVE}, {"have_crypt", (char*) &have_crypt, SHOW_HAVE}, {"have_csv", (char*) &have_csv_db, SHOW_HAVE}, - {"have_example_engine", (char*) &have_example_db, SHOW_HAVE}, + {"have_example_engine", (char*) &have_example_db, SHOW_HAVE}, + {"have_federated_db", (char*) &have_federated_db, SHOW_HAVE}, {"have_geometry", (char*) &have_geometry, SHOW_HAVE}, {"have_innodb", (char*) &have_innodb, SHOW_HAVE}, {"have_isam", (char*) &have_isam, SHOW_HAVE}, @@ -735,6 +736,8 @@ struct show_var_st init_vars[]= { {"innodb_buffer_pool_size", (char*) &innobase_buffer_pool_size, SHOW_LONG }, {"innodb_data_file_path", (char*) &innobase_data_file_path, SHOW_CHAR_PTR}, {"innodb_data_home_dir", (char*) &innobase_data_home_dir, SHOW_CHAR_PTR}, + {"innodb_doublewrite", (char*) &innobase_use_doublewrite, SHOW_MY_BOOL}, + {"innodb_checksums", (char*) &innobase_use_checksums, SHOW_MY_BOOL}, {"innodb_fast_shutdown", (char*) &innobase_fast_shutdown, SHOW_MY_BOOL}, {"innodb_file_io_threads", (char*) &innobase_file_io_threads, SHOW_LONG }, {"innodb_file_per_table", (char*) &innobase_file_per_table, SHOW_MY_BOOL}, @@ -768,6 +771,8 @@ struct show_var_st init_vars[]= { SHOW_SYS}, {"language", language, SHOW_CHAR}, {"large_files_support", (char*) &opt_large_files, SHOW_BOOL}, + {"large_pages", (char*) &opt_large_pages, SHOW_MY_BOOL}, + {"large_page_size", (char*) &opt_large_page_size, SHOW_INT}, {sys_license.name, (char*) &sys_license, SHOW_SYS}, {sys_local_infile.name, (char*) &sys_local_infile, SHOW_SYS}, #ifdef HAVE_MLOCKALL diff --git a/sql/share/Makefile.am b/sql/share/Makefile.am index b50ba2be8da..cfbbb36c489 100644 --- a/sql/share/Makefile.am +++ b/sql/share/Makefile.am @@ -1,5 +1,7 @@ ## Process this file with automake to create Makefile.in +EXTRA_DIST= errmsg.txt + dist-hook: for dir in charsets @AVAILABLE_LANGUAGES@; do \ test -d $(distdir)/$$dir || mkdir $(distdir)/$$dir; \ diff --git a/sql/sql_base.cc b/sql/sql_base.cc index b4a2f368bc2..2500769ee30 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -2467,6 +2467,8 @@ find_item_in_list(Item *find, List &items, uint *counter, bool found_unaliased_non_uniq= 0; uint unaliased_counter; + LINT_INIT(unaliased_counter); // Dependent on found_unaliased + *unaliased= FALSE; if (find->type() == Item::FIELD_ITEM || find->type() == Item::REF_ITEM) diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc index 7cea1c6fcee..1d4b911bb65 100644 --- a/sql/sql_derived.cc +++ b/sql/sql_derived.cc @@ -268,13 +268,8 @@ int mysql_derived_filling(THD *thd, LEX *lex, TABLE_LIST *orig_table_list) unit->cleanup(); } else - { - free_tmp_table(thd, table); unit->cleanup(); - } lex->current_select= save_current_select; - if (res) - free_tmp_table(thd, table); } return res; } diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index 12e4d912f15..2205ec504e9 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -567,8 +567,12 @@ int yylex(void *arg, void *yythd) state= MY_LEX_HEX_NUMBER; break; } - /* Fall through */ - case MY_LEX_IDENT_OR_BIN: // TODO: Add binary string handling + case MY_LEX_IDENT_OR_BIN: + if (yyPeek() == '\'') + { // Found b'bin-number' + state= MY_LEX_BIN_NUMBER; + break; + } case MY_LEX_IDENT: uchar *start; #if defined(USE_MB) && defined(USE_MB_IDENT) @@ -689,6 +693,20 @@ int yylex(void *arg, void *yythd) } yyUnget(); } + else if (c == 'b' && (lex->ptr - lex->tok_start) == 2 && + lex->tok_start[0] == '0' ) + { // b'bin-number' + while (my_isxdigit(cs,(c = yyGet()))) ; + if ((lex->ptr - lex->tok_start) >= 4 && !ident_map[c]) + { + yylval->lex_str= get_token(lex, yyLength()); + yylval->lex_str.str+= 2; // Skip 0x + yylval->lex_str.length-= 2; + lex->yytoklen-= 2; + return (BIN_NUM); + } + yyUnget(); + } // fall through case MY_LEX_IDENT_START: // We come here after '.' result_state= IDENT; @@ -801,6 +819,19 @@ int yylex(void *arg, void *yythd) lex->yytoklen-=3; return (HEX_NUM); + case MY_LEX_BIN_NUMBER: // Found b'bin-string' + yyGet(); // Skip ' + while ((c= yyGet()) == '0' || c == '1'); + length= (lex->ptr - lex->tok_start); // Length of bin-num + 3 + if (c != '\'') + return(ABORT_SYM); // Illegal hex constant + yyGet(); // get_token makes an unget + yylval->lex_str= get_token(lex, length); + yylval->lex_str.str+= 2; // Skip b' + yylval->lex_str.length-= 3; // Don't count b' and last ' + lex->yytoklen-= 3; + return (BIN_NUM); + case MY_LEX_CMP_OP: // Incomplete comparison operator if (state_map[yyPeek()] == MY_LEX_CMP_OP || state_map[yyPeek()] == MY_LEX_LONG_CMP_OP) diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 3d4252a2b17..c076872b755 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -1583,6 +1583,15 @@ bool dispatch_command(enum enum_server_command command, THD *thd, packet, (uint) (pend-packet), thd->charset()); table_list.alias= table_list.real_name= conv_name.str; packet= pend+1; + + if (!my_strcasecmp(system_charset_info, table_list.db, + information_schema_name.str)) + { + ST_SCHEMA_TABLE *schema_table= find_schema_table(thd, table_list.alias); + if (schema_table) + table_list.schema_table= schema_table; + } + /* command not cachable => no gap for data base name */ if (!(thd->query=fields=thd->memdup(packet,thd->query_length+1))) break; @@ -4891,11 +4900,9 @@ bool add_field_to_list(THD *thd, char *field_name, enum_field_types type, break; case MYSQL_TYPE_VARCHAR: /* - We can't use pack_length as this includes the field length Long VARCHAR's are automaticly converted to blobs in mysql_prepare_table if they don't have a default value */ - new_field->key_length= new_field->length; max_field_charlength= MAX_FIELD_VARCHARLENGTH; break; case MYSQL_TYPE_STRING: @@ -5063,6 +5070,19 @@ bool add_field_to_list(THD *thd, char *field_name, enum_field_types type, case MYSQL_TYPE_VAR_STRING: DBUG_ASSERT(0); // Impossible break; + case MYSQL_TYPE_BIT: + { + if (!length) + new_field->length= 1; + if (new_field->length > MAX_BIT_FIELD_LENGTH) + { + my_error(ER_TOO_BIG_FIELDLENGTH, MYF(0), field_name, + MAX_BIT_FIELD_LENGTH); + DBUG_RETURN(1); + } + new_field->pack_length= (new_field->length + 7) / 8; + break; + } } if (!(new_field->flags & BLOB_FLAG) && @@ -5083,16 +5103,12 @@ bool add_field_to_list(THD *thd, char *field_name, enum_field_types type, my_error(ER_WRONG_FIELD_SPEC, MYF(0), field_name); DBUG_RETURN(1); } - if (!new_field->pack_length) - new_field->pack_length= calc_pack_length(new_field->sql_type, - new_field->length); - if (!new_field->key_length) - new_field->key_length= new_field->pack_length; lex->create_list.push_back(new_field); lex->last_field=new_field; DBUG_RETURN(0); } + /* Store position for column in ALTER TABLE .. ADD column */ void store_position_for_column(const char *name) @@ -5258,7 +5274,9 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd, information_schema_name.str)) { ST_SCHEMA_TABLE *schema_table= find_schema_table(thd, ptr->real_name); - if (!schema_table) + if (!schema_table || + (schema_table->hidden && + lex->orig_sql_command == SQLCOM_END)) // not a 'show' command { my_error(ER_UNKNOWN_TABLE, MYF(0), ptr->real_name, information_schema_name.str); diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index 6d59d465445..8afefe3cae8 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -2041,10 +2041,7 @@ static void execute_stmt(THD *thd, Prepared_statement *stmt, thd->cleanup_after_query(); if (stmt->state == Item_arena::PREPARED) - { - thd->current_arena= thd; stmt->state= Item_arena::EXECUTED; - } DBUG_VOID_RETURN; } diff --git a/sql/sql_select.cc b/sql/sql_select.cc index e9097370664..44412cdc43a 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -235,9 +235,7 @@ bool handle_select(THD *thd, LEX *lex, select_result *result) res|= thd->net.report_error; if (unlikely(res)) { - /* - If we have real error reported erly then this will be ignored - */ + /* If we had a another error reported earlier then this will be ignored */ result->send_error(ER_UNKNOWN_ERROR, ER(ER_UNKNOWN_ERROR)); result->abort(); } @@ -4873,7 +4871,7 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse, &keyinfo->key_part[i], (char*) key_buff,maybe_null); /* - Remeber if we are going to use REF_OR_NULL + Remember if we are going to use REF_OR_NULL But only if field _really_ can be null i.e. we force JT_REF instead of JT_REF_OR_NULL in case if field can't be null */ @@ -7538,7 +7536,7 @@ static Field* create_tmp_field_from_field(THD *thd, Field* org_field, { Field *new_field; - if (convert_blob_length && org_field->flags & BLOB_FLAG) + if (convert_blob_length && (org_field->flags & BLOB_FLAG)) new_field= new Field_varstring(convert_blob_length, org_field->maybe_null(), org_field->field_name, table, @@ -7777,6 +7775,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List &fields, KEY_PART_INFO *key_part_info; Item **copy_func; MI_COLUMNDEF *recinfo; + uint total_uneven_bit_length= 0; DBUG_ENTER("create_tmp_table"); DBUG_PRINT("enter",("distinct: %d save_sum_fields: %d rows_limit: %lu group: %d", (int) distinct, (int) save_sum_fields, @@ -7805,7 +7804,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List &fields, else for (ORDER *tmp=group ; tmp ; tmp=tmp->next) { (*tmp->item)->marker=4; // Store null in key - if ((*tmp->item)->max_length >= MAX_CHAR_WIDTH) + if ((*tmp->item)->max_length >= CONVERT_IF_BIGGER_TO_BLOB) using_unique_constraint=1; } if (param->group_length >= MAX_BLOB_WIDTH) @@ -7966,6 +7965,8 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List &fields, reclength+=new_field->pack_length(); if (!(new_field->flags & NOT_NULL_FLAG)) null_count++; + if (new_field->type() == FIELD_TYPE_BIT) + total_uneven_bit_length+= new_field->field_length & 7; if (new_field->flags & BLOB_FLAG) { *blob_field++= new_field; @@ -8014,7 +8015,8 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List &fields, null_count++; } hidden_null_pack_length=(hidden_null_count+7)/8; - null_pack_length=hidden_null_count+(null_count+7)/8; + null_pack_length= hidden_null_count + + (null_count + total_uneven_bit_length + 7) / 8; reclength+=null_pack_length; if (!reclength) reclength=1; // Dummy select @@ -8147,37 +8149,40 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List &fields, key_part_info->null_bit=0; key_part_info->field= field; key_part_info->offset= field->offset(); - key_part_info->length= (uint16) field->pack_length(); + key_part_info->length= (uint16) field->key_length(); key_part_info->type= (uint8) field->key_type(); key_part_info->key_type = ((ha_base_keytype) key_part_info->type == HA_KEYTYPE_TEXT || - (ha_base_keytype) key_part_info->type == HA_KEYTYPE_VARTEXT) ? + (ha_base_keytype) key_part_info->type == HA_KEYTYPE_VARTEXT1 || + (ha_base_keytype) key_part_info->type == HA_KEYTYPE_VARTEXT2) ? 0 : FIELDFLAG_BINARY; if (!using_unique_constraint) { group->buff=(char*) group_buff; - if (!(group->field=field->new_field(thd->mem_root,table))) + if (!(group->field= field->new_key_field(thd->mem_root,table, + (char*) group_buff + + test(maybe_null), + field->null_ptr, + field->null_bit))) goto err; /* purecov: inspected */ if (maybe_null) { /* - To be able to group on NULL, we reserve place in group_buff - for the NULL flag just before the column. + To be able to group on NULL, we reserved place in group_buff + for the NULL flag just before the column. (see above). The field data is after this flag. - The NULL flag is updated by 'end_update()' and 'end_write()' + The NULL flag is updated in 'end_update()' and 'end_write()' */ keyinfo->flags|= HA_NULL_ARE_EQUAL; // def. that NULL == NULL key_part_info->null_bit=field->null_bit; key_part_info->null_offset= (uint) (field->null_ptr - (uchar*) table->record[0]); - group->field->move_field((char*) ++group->buff); - group_buff++; + group->buff++; // Pointer to field data + group_buff++; // Skipp null flag } - else - group->field->move_field((char*) group_buff); /* In GROUP BY 'a' and 'a ' are equal for VARCHAR fields */ key_part_info->key_part_flag|= HA_END_SPACE_ARE_EQUAL; - group_buff+= key_part_info->length; + group_buff+= group->field->pack_length(); } keyinfo->key_length+= key_part_info->length; } @@ -8241,7 +8246,8 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List &fields, key_part_info->type= (uint8) (*reg_field)->key_type(); key_part_info->key_type = ((ha_base_keytype) key_part_info->type == HA_KEYTYPE_TEXT || - (ha_base_keytype) key_part_info->type == HA_KEYTYPE_VARTEXT) ? + (ha_base_keytype) key_part_info->type == HA_KEYTYPE_VARTEXT1 || + (ha_base_keytype) key_part_info->type == HA_KEYTYPE_VARTEXT2) ? 0 : FIELDFLAG_BINARY; } } @@ -8291,8 +8297,8 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param, MI_KEYDEF keydef; MI_UNIQUEDEF uniquedef; KEY *keyinfo=param->keyinfo; - DBUG_ENTER("create_myisam_tmp_table"); + if (table->keys) { // Get keys for ni_create bool using_unique_constraint=0; @@ -8340,19 +8346,18 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param, { seg->type= ((keyinfo->key_part[i].key_type & FIELDFLAG_BINARY) ? - HA_KEYTYPE_VARBINARY : HA_KEYTYPE_VARTEXT); - seg->bit_start=seg->length - table->blob_ptr_size; + HA_KEYTYPE_VARBINARY2 : HA_KEYTYPE_VARTEXT2); + seg->bit_start= field->pack_length() - table->blob_ptr_size; seg->flag= HA_BLOB_PART; seg->length=0; // Whole blob in unique constraint } else { - seg->type= ((keyinfo->key_part[i].key_type & FIELDFLAG_BINARY) ? - HA_KEYTYPE_BINARY : HA_KEYTYPE_TEXT); + seg->type= keyinfo->key_part[i].type; /* Tell handler if it can do suffic space compression */ if (field->real_type() == MYSQL_TYPE_STRING && keyinfo->key_part[i].length > 4) - seg->flag|=HA_SPACE_PACK; + seg->flag|= HA_SPACE_PACK; } if (!(field->flags & NOT_NULL_FLAG)) { @@ -8361,7 +8366,7 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param, /* We are using a GROUP BY on something that contains NULL In this case we have to tell MyISAM that two NULL should - on INSERT be compared as equal + on INSERT be regarded at the same value */ if (!using_unique_constraint) keydef.flag|= HA_NULL_ARE_EQUAL; @@ -8645,21 +8650,19 @@ do_select(JOIN *join,List *fields,TABLE *table,Procedure *procedure) } if (table) { - int tmp; + int tmp, new_errno= 0; if ((tmp=table->file->extra(HA_EXTRA_NO_CACHE))) { DBUG_PRINT("error",("extra(HA_EXTRA_NO_CACHE) failed")); - my_errno= tmp; - error= -1; + new_errno= tmp; } if ((tmp=table->file->ha_index_or_rnd_end())) { DBUG_PRINT("error",("ha_index_or_rnd_end() failed")); - my_errno= tmp; - error= -1; + new_errno= tmp; } - if (error == -1) - table->file->print_error(my_errno,MYF(0)); + if (new_errno) + table->file->print_error(new_errno,MYF(0)); } #ifndef DBUG_OFF if (error) @@ -9831,13 +9834,19 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), DBUG_RETURN(0); } - /* The null bits are already set */ + /* + Copy null bits from group key to table + We can't copy all data as the key may have different format + as the row data (for example as with VARCHAR keys) + */ KEY_PART_INFO *key_part; for (group=table->group,key_part=table->key_info[0].key_part; group ; group=group->next,key_part++) - memcpy(table->record[0]+key_part->offset, group->buff, key_part->length); - + { + if (key_part->null_bit) + memcpy(table->record[0]+key_part->offset, group->buff, 1); + } init_tmptable_sum_functions(join->sum_funcs); copy_funcs(join->tmp_table_param.items_to_copy); if ((error=table->file->write_row(table->record[0]))) @@ -11647,8 +11656,10 @@ calc_group_buffer(JOIN *join,ORDER *group) { if (field->type() == FIELD_TYPE_BLOB) key_length+=MAX_BLOB_WIDTH; // Can't be used as a key + else if (field->type() == MYSQL_TYPE_VARCHAR) + key_length+= field->field_length + HA_KEY_BLOB_LENGTH; else - key_length+=field->pack_length(); + key_length+= field->pack_length(); } else if ((*group->item)->result_type() == REAL_RESULT) key_length+=sizeof(double); diff --git a/sql/sql_select.h b/sql/sql_select.h index 5e42fc0ee30..0f26207b391 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -420,15 +420,15 @@ class store_key :public Sql_alloc :null_ptr(null),err(0) { if (field_arg->type() == FIELD_TYPE_BLOB) - to_field=new Field_varstring(ptr, length, (uchar*) null, 1, + { + /* Key segments are always packed with a 2 byte length prefix */ + to_field=new Field_varstring(ptr, length, 2, (uchar*) null, 1, Field::NONE, field_arg->field_name, field_arg->table, field_arg->charset()); - else - { - to_field=field_arg->new_field(thd->mem_root,field_arg->table); - if (to_field) - to_field->move_field(ptr, (uchar*) null, 1); } + else + to_field=field_arg->new_key_field(thd->mem_root, field_arg->table, + ptr, (uchar*) null, 1); } virtual ~store_key() {} /* Not actually needed */ virtual bool copy()=0; diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 5a135faf52f..826bd2038f9 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -1976,6 +1976,8 @@ int schema_tables_add(THD *thd, List *files, const char *wild) ST_SCHEMA_TABLE *tmp_schema_table= schema_tables; for ( ; tmp_schema_table->table_name; tmp_schema_table++) { + if (tmp_schema_table->hidden) + continue; if (wild) { if (lower_case_table_names) @@ -3389,7 +3391,7 @@ int make_schema_select(THD *thd, SELECT_LEX *sel, /* - Fill temporaty schema tables before SELECT + Fill temporary schema tables before SELECT SYNOPSIS get_schema_tables_result() @@ -3688,38 +3690,38 @@ ST_FIELD_INFO table_names_fields_info[]= ST_SCHEMA_TABLE schema_tables[]= { {"SCHEMATA", schema_fields_info, create_schema_table, - fill_schema_shemata, make_schemata_old_format, 0, 1, -1}, + fill_schema_shemata, make_schemata_old_format, 0, 1, -1, 0}, {"TABLES", tables_fields_info, create_schema_table, - get_all_tables, make_old_format, get_schema_tables_record, 1, 2}, + get_all_tables, make_old_format, get_schema_tables_record, 1, 2, 0}, {"COLUMNS", columns_fields_info, create_schema_table, - get_all_tables, make_columns_old_format, get_schema_column_record, 1, 2}, + get_all_tables, make_columns_old_format, get_schema_column_record, 1, 2, 0}, {"CHARACTER_SETS", charsets_fields_info, create_schema_table, - fill_schema_charsets, make_character_sets_old_format, 0, -1, -1}, + fill_schema_charsets, make_character_sets_old_format, 0, -1, -1, 0}, {"COLLATIONS", collation_fields_info, create_schema_table, - fill_schema_collation, make_old_format, 0, -1, -1}, + fill_schema_collation, make_old_format, 0, -1, -1, 0}, {"COLLATION_CHARACTER_SET_APPLICABILITY", coll_charset_app_fields_info, - create_schema_table, fill_schema_coll_charset_app, 0, 0, -1, -1}, + create_schema_table, fill_schema_coll_charset_app, 0, 0, -1, -1, 0}, {"ROUTINES", proc_fields_info, create_schema_table, - fill_schema_proc, make_proc_old_format, 0, -1, -1}, + fill_schema_proc, make_proc_old_format, 0, -1, -1, 0}, {"STATISTICS", stat_fields_info, create_schema_table, - get_all_tables, make_old_format, get_schema_stat_record, 1, 2}, + get_all_tables, make_old_format, get_schema_stat_record, 1, 2, 0}, {"VIEWS", view_fields_info, create_schema_table, - get_all_tables, 0, get_schema_views_record, 1, 2}, + get_all_tables, 0, get_schema_views_record, 1, 2, 0}, {"USER_PRIVILEGES", user_privileges_fields_info, create_schema_table, - fill_schema_user_privileges, 0, 0, -1, -1}, + fill_schema_user_privileges, 0, 0, -1, -1, 0}, {"SCHEMA_PRIVILEGES", schema_privileges_fields_info, create_schema_table, - fill_schema_schema_privileges, 0, 0, -1, -1}, + fill_schema_schema_privileges, 0, 0, -1, -1, 0}, {"TABLE_PRIVILEGES", table_privileges_fields_info, create_schema_table, - fill_schema_table_privileges, 0, 0, -1, -1}, + fill_schema_table_privileges, 0, 0, -1, -1, 0}, {"COLUMN_PRIVILEGES", column_privileges_fields_info, create_schema_table, - fill_schema_column_privileges, 0, 0, -1, -1}, + fill_schema_column_privileges, 0, 0, -1, -1, 0}, {"TABLE_CONSTRAINTS", table_constraints_fields_info, create_schema_table, - get_all_tables, 0, get_schema_constraints_record, 3, 4}, + get_all_tables, 0, get_schema_constraints_record, 3, 4, 0}, {"KEY_COLUMN_USAGE", key_column_usage_fields_info, create_schema_table, - get_all_tables, 0, get_schema_key_column_usage_record, 4, 5}, + get_all_tables, 0, get_schema_key_column_usage_record, 4, 5, 0}, {"TABLE_NAMES", table_names_fields_info, create_schema_table, - get_all_tables, make_table_names_old_format, 0, 1, 2}, - {0, 0, 0, 0, 0, 0, 0, 0} + get_all_tables, make_table_names_old_format, 0, 1, 2, 1}, + {0, 0, 0, 0, 0, 0, 0, 0, 0} }; diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 56605d1c6e0..6629122a1fa 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -458,6 +458,7 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, int field_no,dup_no; int select_field_pos,auto_increment=0; List_iterator it(fields),it2(fields); + uint total_uneven_bit_length= 0; DBUG_ENTER("mysql_prepare_table"); select_field_pos=fields.elements - select_field_count; @@ -508,7 +509,8 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, String conv, *tmp; for (uint i= 0; (tmp= it++); i++) { - if (String::needs_conversion(tmp->length(), tmp->charset(), cs, &dummy)) + if (String::needs_conversion(tmp->length(), tmp->charset(), cs, + &dummy)) { uint cnv_errs; conv.copy(tmp->ptr(), tmp->length(), tmp->charset(), cs, &cnv_errs); @@ -614,6 +616,9 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, if (!(sql_field->flags & NOT_NULL_FLAG)) null_fields++; + if (sql_field->sql_type == FIELD_TYPE_BIT) + total_uneven_bit_length+= sql_field->length & 7; + if (check_column_name(sql_field->field_name)) { my_error(ER_WRONG_COLUMN_NAME, MYF(0), sql_field->field_name); @@ -666,7 +671,7 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, /* If fixed row records, we need one bit to check for deleted rows */ if (!(db_options & HA_OPTION_PACK_RECORD)) null_fields++; - pos=(null_fields+7)/8; + pos= (null_fields + total_uneven_bit_length + 7) / 8; it.rewind(); while ((sql_field=it++)) @@ -762,6 +767,14 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, case FIELD_TYPE_NULL: sql_field->pack_flag=f_settype((uint) sql_field->sql_type); break; + case FIELD_TYPE_BIT: + if (!(file->table_flags() & HA_CAN_BIT_FIELD)) + { + my_error(ER_CHECK_NOT_IMPLEMENTED, MYF(0), "BIT FIELD"); + DBUG_RETURN(-1); + } + sql_field->pack_flag= FIELDFLAG_NUMBER; + break; case FIELD_TYPE_TIMESTAMP: /* We should replace old TIMESTAMP fields with their newer analogs */ if (sql_field->unireg_check == Field::TIMESTAMP_OLD_FIELD) @@ -3686,7 +3699,7 @@ copy_data_between_tables(TABLE *from,TABLE *to, free_io_cache(from); delete [] copy; // This is never 0 - if (to->file->end_bulk_insert() && !error) + if (to->file->end_bulk_insert() && error <= 0) { to->file->print_error(my_errno,MYF(0)); error=1; diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 3511777dd27..98966374f2a 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -208,6 +208,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token BACKUP_SYM %token BERKELEY_DB_SYM %token BINARY +%token BIN_NUM %token BIT_SYM %token BOOL_SYM %token BOOLEAN_SYM @@ -664,7 +665,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); LEX_HOSTNAME ULONGLONG_NUM field_ident select_alias ident ident_or_text UNDERSCORE_CHARSET IDENT_sys TEXT_STRING_sys TEXT_STRING_literal NCHAR_STRING opt_component key_cache_name - sp_opt_label + sp_opt_label BIN_NUM %type opt_table_alias @@ -2750,8 +2751,10 @@ type: int_type opt_len field_options { $$=$1; } | real_type opt_precision field_options { $$=$1; } | FLOAT_SYM float_options field_options { $$=FIELD_TYPE_FLOAT; } - | BIT_SYM opt_len { Lex->length=(char*) "1"; - $$=FIELD_TYPE_TINY; } + | BIT_SYM { Lex->length= (char*) "1"; + $$=FIELD_TYPE_BIT; } + | BIT_SYM '(' NUM ')' { Lex->length= $3.str; + $$=FIELD_TYPE_BIT; } | BOOL_SYM { Lex->length=(char*) "1"; $$=FIELD_TYPE_TINY; } | BOOLEAN_SYM { Lex->length=(char*) "1"; @@ -6458,15 +6461,25 @@ text_string: { $$= new (YYTHD->mem_root) String($1.str,$1.length,YYTHD->variables.collation_connection); } | HEX_NUM { - Item *tmp = new Item_varbinary($1.str,$1.length); + Item *tmp= new Item_hex_string($1.str, $1.length); /* - it is OK only emulate fix_fieds, because we need only + it is OK only emulate fix_fields, because we need only value of constant */ $$= tmp ? tmp->quick_fix_field(), tmp->val_str((String*) 0) : (String*) 0; } + | BIN_NUM + { + Item *tmp= new Item_bin_string($1.str, $1.length); + /* + it is OK only emulate fix_fields, because we need only + value of constant + */ + $$= tmp ? tmp->quick_fix_field(), tmp->val_str((String*) 0) : + (String*) 0; + } ; param_marker: @@ -6508,10 +6521,11 @@ literal: | NUM_literal { $$ = $1; } | NULL_SYM { $$ = new Item_null(); Lex->next_state=MY_LEX_OPERATOR_OR_IDENT;} - | HEX_NUM { $$ = new Item_varbinary($1.str,$1.length);} + | HEX_NUM { $$ = new Item_hex_string($1.str, $1.length);} + | BIN_NUM { $$= new Item_bin_string($1.str, $1.length); } | UNDERSCORE_CHARSET HEX_NUM { - Item *tmp= new Item_varbinary($2.str,$2.length); + Item *tmp= new Item_hex_string($2.str, $2.length); /* it is OK only emulate fix_fieds, because we need only value of constant @@ -6523,6 +6537,20 @@ literal: str ? str->length() : 0, Lex->charset); } + | UNDERSCORE_CHARSET BIN_NUM + { + Item *tmp= new Item_bin_string($2.str, $2.length); + /* + it is OK only emulate fix_fieds, because we need only + value of constant + */ + String *str= tmp ? + tmp->quick_fix_field(), tmp->val_str((String*) 0) : + (String*) 0; + $$= new Item_string(str ? str->ptr() : "", + str ? str->length() : 0, + Lex->charset); + } | DATE_SYM text_literal { $$ = $2; } | TIME_SYM text_literal { $$ = $2; } | TIMESTAMP text_literal { $$ = $2; }; @@ -6857,6 +6885,7 @@ keyword: | CLIENT_SYM {} | CLOSE_SYM {} | COLLATION_SYM {} + | COLUMNS {} | COMMENT_SYM {} | COMMITTED_SYM {} | COMMIT_SYM {} @@ -6978,6 +7007,7 @@ keyword: | POLYGON {} | PREPARE_SYM {} | PREV_SYM {} + | PRIVILEGES {} | PROCESS {} | PROCESSLIST_SYM {} | QUARTER_SYM {} @@ -7029,6 +7059,7 @@ keyword: | SUBDATE_SYM {} | SUBJECT_SYM {} | SUPER_SYM {} + | TABLES {} | TABLESPACE {} | TEMPORARY {} | TEMPTABLE_SYM {} diff --git a/sql/structs.h b/sql/structs.h index 5d0c7bc4f1f..0b59c3abeb3 100644 --- a/sql/structs.h +++ b/sql/structs.h @@ -74,7 +74,7 @@ typedef struct st_key_part_info { /* Info about a key part */ uint16 store_length; uint16 key_type; uint16 fieldnr; /* Fieldnum in UNIREG */ - uint8 key_part_flag; /* 0 or HA_REVERSE_SORT */ + uint16 key_part_flag; /* 0 or HA_REVERSE_SORT */ uint8 type; uint8 null_bit; /* Position to null_bit */ } KEY_PART_INFO ; diff --git a/sql/table.cc b/sql/table.cc index b54cc351dff..c18a2557337 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -81,7 +81,7 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat, KEY *keyinfo; KEY_PART_INFO *key_part; uchar *null_pos; - uint null_bit, new_frm_ver, field_pack_length; + uint null_bit_pos, new_frm_ver, field_pack_length; SQL_CRYPT *crypted=0; MEM_ROOT **root_ptr, *old_root; DBUG_ENTER("openfrm"); @@ -409,15 +409,15 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat, if (null_field_first) { outparam->null_flags=null_pos=(uchar*) record+1; - null_bit= (db_create_options & HA_OPTION_PACK_RECORD) ? 1 : 2; - outparam->null_bytes=(outparam->null_fields+null_bit+6)/8; + null_bit_pos= (db_create_options & HA_OPTION_PACK_RECORD) ? 0 : 1; + outparam->null_bytes= (outparam->null_fields + null_bit_pos + 7) / 8; } else { outparam->null_bytes=(outparam->null_fields+7)/8; outparam->null_flags=null_pos= (uchar*) (record+1+outparam->reclength-outparam->null_bytes); - null_bit=1; + null_bit_pos= 0; } use_hash= outparam->fields >= MAX_FIELDS_BEFORE_HASH; @@ -512,7 +512,7 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat, *field_ptr=reg_field= make_field(record+recpos, (uint32) field_length, - null_pos,null_bit, + null_pos, null_bit_pos, pack_flag, field_type, charset, @@ -529,13 +529,18 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat, goto err_not_open; /* purecov: inspected */ } reg_field->comment=comment; + if (field_type == FIELD_TYPE_BIT) + { + if ((null_bit_pos+= field_length & 7) > 7) + { + null_pos++; + null_bit_pos-= 8; + } + } if (!(reg_field->flags & NOT_NULL_FLAG)) { - if ((null_bit<<=1) == 256) - { - null_pos++; - null_bit=1; - } + if (!(null_bit_pos= (null_bit_pos + 1) & 7)) + null_pos++; } if (f_no_default(pack_flag)) reg_field->flags|= NO_DEFAULT_VALUE_FLAG; @@ -626,6 +631,9 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat, if (!(field->flags & BINARY_FLAG)) keyinfo->flags|= HA_END_SPACE_KEY; } + if (field->type() == MYSQL_TYPE_BIT) + key_part->key_part_flag|= HA_BIT_PART; + if (i == 0 && key != primary_key) field->flags |= ((keyinfo->flags & HA_NOSAME) && diff --git a/sql/table.h b/sql/table.h index f5f2a76c6f1..a804376ee3c 100644 --- a/sql/table.h +++ b/sql/table.h @@ -254,6 +254,7 @@ typedef struct st_schema_table TABLE *table, bool res, const char *base_name, const char *file_name); int idx_field1, idx_field2; + bool hidden; } ST_SCHEMA_TABLE; diff --git a/sql/unireg.cc b/sql/unireg.cc index 636156940a4..dbd3da58a33 100644 --- a/sql/unireg.cc +++ b/sql/unireg.cc @@ -652,7 +652,7 @@ static bool make_empty_rec(File file,enum db_type table_type, Field *regfield=make_field((char*) buff+field->offset,field->length, field->flags & NOT_NULL_FLAG ? 0: null_pos+null_count/8, - 1 << (null_count & 7), + null_count & 7, field->pack_flag, field->sql_type, field->charset, diff --git a/sql/unireg.h b/sql/unireg.h index 31b28da2423..932bdf4dfc5 100644 --- a/sql/unireg.h +++ b/sql/unireg.h @@ -66,6 +66,8 @@ /* Max column width +1 */ #define MAX_FIELD_WIDTH (MAX_FIELD_CHARLENGTH*MAX_MBWIDTH+1) +#define MAX_BIT_FIELD_LENGTH 64 /* Max length in bits for bit fields */ + #define MAX_DATE_WIDTH 10 /* YYYY-MM-DD */ #define MAX_TIME_WIDTH 23 /* -DDDDDD HH:MM:SS.###### */ #define MAX_DATETIME_FULL_WIDTH 29 /* YYYY-MM-DD HH:MM:SS.###### AM */ diff --git a/strings/my_strtoll10.c b/strings/my_strtoll10.c index 5217564087c..cca7c8ab396 100644 --- a/strings/my_strtoll10.c +++ b/strings/my_strtoll10.c @@ -46,7 +46,7 @@ static unsigned long lfactor[9]= from string nptr and converts it to an signed or unsigned long long integer value. Space characters and tab are ignored. - A sign character might precede the the digit characters. The number + A sign character might precede the digit characters. The number may have any number of pre-zero digits. The function stops reading the string nptr at the first character diff --git a/tests/client_test.c b/tests/client_test.c index b00be2d35b8..25dcb951d3a 100644 --- a/tests/client_test.c +++ b/tests/client_test.c @@ -11936,7 +11936,7 @@ static void test_bug4172() MYSQL_ROW row; int rc; char f[100], d[100], e[100]; - long f_len, d_len, e_len; + ulong f_len, d_len, e_len; myheader("test_bug4172"); @@ -12027,8 +12027,8 @@ static void test_conversion() mysql_stmt_bind_param(stmt, bind); - buff[0]= 0xC3; - buff[1]= 0xA0; + buff[0]= (uchar) 0xC3; + buff[1]= (uchar) 0xA0; length= 2; rc= mysql_stmt_execute(stmt); @@ -12240,7 +12240,7 @@ static void test_truncation() /* int -> float: truncation, not enough bits in float */ DIE_UNLESS(++bind < bind_array + bind_count); - /* do nothing: due to a gcc bug result here is not predictable */ + DIE_UNLESS(*bind->error); /* int -> double: no truncation */ DIE_UNLESS(++bind < bind_array + bind_count); @@ -12302,6 +12302,56 @@ static void test_truncation() myquery(rc); } +static void test_truncation_option() +{ + MYSQL_STMT *stmt; + const char *stmt_text; + int rc; + uint8 buf; + my_bool option= 0; + my_bool error; + MYSQL_BIND bind; + + myheader("test_truncation_option"); + + /* Prepare the test table */ + stmt_text= "select -1"; + + stmt= mysql_stmt_init(mysql); + rc= mysql_stmt_prepare(stmt, stmt_text, strlen(stmt_text)); + check_execute(stmt, rc); + rc= mysql_stmt_execute(stmt); + check_execute(stmt, rc); + + bzero(&bind, sizeof(MYSQL_BIND)); + + bind.buffer= (void*) &buf; + bind.buffer_type= MYSQL_TYPE_TINY; + bind.is_unsigned= TRUE; + bind.error= &error; + + rc= mysql_stmt_bind_result(stmt, &bind); + check_execute(stmt, rc); + rc= mysql_stmt_fetch(stmt); + DIE_UNLESS(rc == MYSQL_DATA_TRUNCATED); + DIE_UNLESS(error); + rc= mysql_options(mysql, MYSQL_REPORT_DATA_TRUNCATION, (char*) &option); + myquery(rc); + /* need to rebind for the new setting to take effect */ + rc= mysql_stmt_bind_result(stmt, &bind); + check_execute(stmt, rc); + rc= mysql_stmt_execute(stmt); + check_execute(stmt, rc); + rc= mysql_stmt_fetch(stmt); + check_execute(stmt, rc); + /* The only change is rc - error pointers are still filled in */ + DIE_UNLESS(error == 1); + /* restore back the defaults */ + option= 1; + mysql_options(mysql, MYSQL_REPORT_DATA_TRUNCATION, (char*) &option); + + mysql_stmt_close(stmt); +} /* Read and parse arguments and MySQL options from my.cnf @@ -12517,6 +12567,7 @@ static struct my_tests_st my_tests[]= { { "test_basic_cursors", test_basic_cursors }, { "test_cursors_with_union", test_cursors_with_union }, { "test_truncation", test_truncation }, + { "test_truncation_option", test_truncation_option }, { 0, 0 } }; diff --git a/vio/vio.c b/vio/vio.c index a356d8edeff..39b5f843e5e 100644 --- a/vio/vio.c +++ b/vio/vio.c @@ -32,7 +32,8 @@ void vio_reset(Vio* vio, enum enum_vio_type type, my_bool localhost) { DBUG_ENTER("vio_reset"); - DBUG_PRINT("enter", ("type=%d sd=%d localhost=%d", type, sd, localhost)); + DBUG_PRINT("enter", ("type: %d sd: %d localhost: %d", type, sd, + localhost)); bzero((char*) vio, sizeof(*vio)); vio->type = type; @@ -123,7 +124,7 @@ Vio *vio_new(my_socket sd, enum enum_vio_type type, my_bool localhost) { Vio *vio; DBUG_ENTER("vio_new"); - DBUG_PRINT("enter", ("sd=%d", sd)); + DBUG_PRINT("enter", ("sd: %d", sd)); if ((vio = (Vio*) my_malloc(sizeof(*vio),MYF(MY_WME)))) { vio_reset(vio, type, sd, 0, localhost); diff --git a/vio/viosocket.c b/vio/viosocket.c index 48a9058480a..2921eb7495e 100644 --- a/vio/viosocket.c +++ b/vio/viosocket.c @@ -33,7 +33,7 @@ int vio_read(Vio * vio, gptr buf, int size) { int r; DBUG_ENTER("vio_read"); - DBUG_PRINT("enter", ("sd=%d, buf=%p, size=%d", vio->sd, buf, size)); + DBUG_PRINT("enter", ("sd: %d, buf: 0x%p, size: %d", vio->sd, buf, size)); #ifdef __WIN__ r = recv(vio->sd, buf, size,0); @@ -56,7 +56,7 @@ int vio_write(Vio * vio, const gptr buf, int size) { int r; DBUG_ENTER("vio_write"); - DBUG_PRINT("enter", ("sd=%d, buf=%p, size=%d", vio->sd, buf, size)); + DBUG_PRINT("enter", ("sd: %d, buf: 0x%p, size: %d", vio->sd, buf, size)); #ifdef __WIN__ r = send(vio->sd, buf, size,0); #else @@ -168,7 +168,7 @@ int vio_keepalive(Vio* vio, my_bool set_keep_alive) int r=0; uint opt = 0; DBUG_ENTER("vio_keepalive"); - DBUG_PRINT("enter", ("sd=%d, set_keep_alive=%d", vio->sd, (int) + DBUG_PRINT("enter", ("sd: %d, set_keep_alive: %d", vio->sd, (int) set_keep_alive)); if (vio->type != VIO_TYPE_NAMEDPIPE) { @@ -315,7 +315,7 @@ int vio_read_pipe(Vio * vio, gptr buf, int size) { DWORD length; DBUG_ENTER("vio_read_pipe"); - DBUG_PRINT("enter", ("sd=%d, buf=%p, size=%d", vio->sd, buf, size)); + DBUG_PRINT("enter", ("sd: %d, buf: 0x%p, size: %d", vio->sd, buf, size)); if (!ReadFile(vio->hPipe, buf, size, &length, NULL)) DBUG_RETURN(-1); @@ -329,7 +329,7 @@ int vio_write_pipe(Vio * vio, const gptr buf, int size) { DWORD length; DBUG_ENTER("vio_write_pipe"); - DBUG_PRINT("enter", ("sd=%d, buf=%p, size=%d", vio->sd, buf, size)); + DBUG_PRINT("enter", ("sd: %d, buf: 0x%p, size: %d", vio->sd, buf, size)); if (!WriteFile(vio->hPipe, (char*) buf, size, &length, NULL)) DBUG_RETURN(-1); @@ -373,7 +373,7 @@ int vio_read_shared_memory(Vio * vio, gptr buf, int size) char *current_postion; DBUG_ENTER("vio_read_shared_memory"); - DBUG_PRINT("enter", ("sd=%d, buf=%p, size=%d", vio->sd, buf, size)); + DBUG_PRINT("enter", ("sd: %d, buf: 0x%p, size: %d", vio->sd, buf, size)); remain_local = size; current_postion=buf; @@ -423,7 +423,7 @@ int vio_write_shared_memory(Vio * vio, const gptr buf, int size) char *current_postion; DBUG_ENTER("vio_write_shared_memory"); - DBUG_PRINT("enter", ("sd=%d, buf=%p, size=%d", vio->sd, buf, size)); + DBUG_PRINT("enter", ("sd: %d, buf: 0x%p, size: %d", vio->sd, buf, size)); remain = size; current_postion = buf; diff --git a/vio/viossl.c b/vio/viossl.c index a489cb98f98..912365adca0 100644 --- a/vio/viossl.c +++ b/vio/viossl.c @@ -99,7 +99,7 @@ int vio_ssl_read(Vio * vio, gptr buf, int size) { int r; DBUG_ENTER("vio_ssl_read"); - DBUG_PRINT("enter", ("sd=%d, buf=%p, size=%d, ssl_=%p", + DBUG_PRINT("enter", ("sd: %d, buf: 0x%p, size: %d, ssl_: 0x%p", vio->sd, buf, size, vio->ssl_arg)); if ((r= SSL_read((SSL*) vio->ssl_arg, buf, size)) < 0) @@ -117,7 +117,7 @@ int vio_ssl_write(Vio * vio, const gptr buf, int size) { int r; DBUG_ENTER("vio_ssl_write"); - DBUG_PRINT("enter", ("sd=%d, buf=%p, size=%d", vio->sd, buf, size)); + DBUG_PRINT("enter", ("sd: %d, buf: 0x%p, size: %d", vio->sd, buf, size)); if ((r= SSL_write((SSL*) vio->ssl_arg, buf, size)) < 0) report_errors(); @@ -157,7 +157,7 @@ int vio_ssl_keepalive(Vio* vio, my_bool set_keep_alive) { int r=0; DBUG_ENTER("vio_ssl_keepalive"); - DBUG_PRINT("enter", ("sd=%d, set_keep_alive=%d", vio->sd, (int) + DBUG_PRINT("enter", ("sd: %d, set_keep_alive: %d", vio->sd, (int) set_keep_alive)); if (vio->type != VIO_TYPE_NAMEDPIPE) { @@ -227,7 +227,7 @@ my_socket vio_ssl_fd(Vio* vio) my_bool vio_ssl_peer_addr(Vio * vio, char *buf, uint16 *port) { DBUG_ENTER("vio_ssl_peer_addr"); - DBUG_PRINT("enter", ("sd=%d", vio->sd)); + DBUG_PRINT("enter", ("sd: %d", vio->sd)); if (vio->localhost) { strmov(buf,"127.0.0.1"); @@ -250,7 +250,7 @@ my_bool vio_ssl_peer_addr(Vio * vio, char *buf, uint16 *port) *port= 0; #endif } - DBUG_PRINT("exit", ("addr=%s", buf)); + DBUG_PRINT("exit", ("addr: %s", buf)); DBUG_RETURN(0); } @@ -279,7 +279,7 @@ int sslaccept(struct st_VioSSLAcceptorFd* ptr, Vio* vio, long timeout) my_bool net_blocking; enum enum_vio_type old_type; DBUG_ENTER("sslaccept"); - DBUG_PRINT("enter", ("sd=%d ptr=%p", vio->sd,ptr)); + DBUG_PRINT("enter", ("sd: %d ptr: Ox%p", vio->sd,ptr)); old_type= vio->type; net_blocking = vio_is_blocking(vio); @@ -294,7 +294,8 @@ int sslaccept(struct st_VioSSLAcceptorFd* ptr, Vio* vio, long timeout) vio_blocking(vio, net_blocking, &unused); DBUG_RETURN(1); } - DBUG_PRINT("info", ("ssl_=%p timeout=%ld",(SSL*) vio->ssl_arg, timeout)); + DBUG_PRINT("info", ("ssl_: Ox%p timeout: %ld", + (SSL*) vio->ssl_arg, timeout)); SSL_clear((SSL*) vio->ssl_arg); SSL_SESSION_set_timeout(SSL_get_session((SSL*) vio->ssl_arg), timeout); SSL_set_fd((SSL*) vio->ssl_arg,vio->sd); @@ -352,7 +353,8 @@ int sslconnect(struct st_VioSSLConnectorFd* ptr, Vio* vio, long timeout) my_bool net_blocking; enum enum_vio_type old_type; DBUG_ENTER("sslconnect"); - DBUG_PRINT("enter", ("sd=%d ptr=%p ctx: %p", vio->sd,ptr,ptr->ssl_context)); + DBUG_PRINT("enter", ("sd: %d ptr: 0x%p ctx: 0x%p", + vio->sd,ptr,ptr->ssl_context)); old_type= vio->type; net_blocking = vio_is_blocking(vio); @@ -367,7 +369,8 @@ int sslconnect(struct st_VioSSLConnectorFd* ptr, Vio* vio, long timeout) vio_blocking(vio, net_blocking, &unused); DBUG_RETURN(1); } - DBUG_PRINT("info", ("ssl_=%p timeout=%ld",(SSL*) vio->ssl_arg, timeout)); + DBUG_PRINT("info", ("ssl_: 0x%p timeout: %ld", + (SSL*) vio->ssl_arg, timeout)); SSL_clear((SSL*) vio->ssl_arg); SSL_SESSION_set_timeout(SSL_get_session((SSL*) vio->ssl_arg), timeout); SSL_set_fd ((SSL*) vio->ssl_arg, vio->sd); diff --git a/vio/viosslfactories.c b/vio/viosslfactories.c index 498d10da0ee..44a077c33fc 100644 --- a/vio/viosslfactories.c +++ b/vio/viosslfactories.c @@ -80,7 +80,7 @@ static int vio_set_cert_stuff(SSL_CTX *ctx, const char *cert_file, const char *key_file) { DBUG_ENTER("vio_set_cert_stuff"); - DBUG_PRINT("enter", ("ctx=%p, cert_file=%s, key_file=%s", + DBUG_PRINT("enter", ("ctx: %p, cert_file: %s, key_file: %s", ctx, cert_file, key_file)); if (cert_file != NULL) { @@ -131,7 +131,7 @@ vio_verify_callback(int ok, X509_STORE_CTX *ctx) int err,depth; DBUG_ENTER("vio_verify_callback"); - DBUG_PRINT("enter", ("ok=%d, ctx=%p", ok, ctx)); + DBUG_PRINT("enter", ("ok: %d, ctx: 0x%p", ok, ctx)); err_cert=X509_STORE_CTX_get_current_cert(ctx); err= X509_STORE_CTX_get_error(ctx); depth= X509_STORE_CTX_get_error_depth(ctx); @@ -220,7 +220,7 @@ new_VioSSLConnectorFd(const char* key_file, DH *dh; DBUG_ENTER("new_VioSSLConnectorFd"); DBUG_PRINT("enter", - ("key_file=%s, cert_file=%s, ca_path=%s, ca_file=%s, cipher=%s", + ("key_file: %s, cert_file: %s, ca_path: %s, ca_file: %s, cipher: %s", key_file, cert_file, ca_path, ca_file, cipher)); if (!(ptr=((struct st_VioSSLConnectorFd*) @@ -315,7 +315,7 @@ new_VioSSLAcceptorFd(const char *key_file, DH *dh; DBUG_ENTER("new_VioSSLAcceptorFd"); DBUG_PRINT("enter", - ("key_file=%s, cert_file=%s, ca_path=%s, ca_file=%s, cipher=%s", + ("key_file: %s, cert_file: %s, ca_path: %s, ca_file: %s, cipher: %s", key_file, cert_file, ca_path, ca_file, cipher)); ptr= ((struct st_VioSSLAcceptorFd*)