mirror of
https://github.com/MariaDB/server.git
synced 2026-05-15 19:37:16 +02:00
Merge mysql.com:/home/gluh/MySQL/Merge/5.0
into mysql.com:/home/gluh/MySQL/Merge/5.0-opt mysql-test/t/subselect.test: Auto merged mysql-test/t/variables.test: Auto merged sql/item_func.cc: Auto merged sql/item_timefunc.cc: Auto merged mysql-test/r/select.result: manual merge mysql-test/t/select.test: manual merge
This commit is contained in:
commit
bed60e0175
62 changed files with 2200 additions and 183 deletions
26
BitKeeper/triggers/pre-commit.check-case.pl
Executable file
26
BitKeeper/triggers/pre-commit.check-case.pl
Executable file
|
|
@ -0,0 +1,26 @@
|
|||
#!/usr/bin/perl
|
||||
my $status = 0;
|
||||
|
||||
my $pending = $ENV{'BK_PENDING'};
|
||||
exit 0 unless -f $pending;
|
||||
|
||||
open FI, "<", $pending || exit 0;
|
||||
while(<FI>) {
|
||||
my ($file, $stuff) = split /\|/, $_, 2;
|
||||
next unless -f $file;
|
||||
$file =~ s/^(.*)\/([^\/]*)$/$2/;
|
||||
my $path = $1;
|
||||
opendir DIR, $path;
|
||||
my @files = sort map { lc } readdir DIR;
|
||||
closedir DIR;
|
||||
my %count = ();
|
||||
$count{$_}++ for @files;
|
||||
@files = grep { $count{$_} > 1 } keys %count;
|
||||
if(@files > 0) {
|
||||
print "$path/$file: duplicate file names: " . (join " ", @files) . "\n";
|
||||
$status = 1;
|
||||
}
|
||||
}
|
||||
close FI;
|
||||
|
||||
exit $status;
|
||||
|
|
@ -109,6 +109,15 @@ IF(CMAKE_GENERATOR MATCHES "Visual Studio 7" OR
|
|||
STRING(REPLACE "/EHsc" "" CMAKE_CXX_FLAGS_INIT ${CMAKE_CXX_FLAGS_INIT})
|
||||
STRING(REPLACE "/EHsc" "" CMAKE_CXX_FLAGS_DEBUG_INIT ${CMAKE_CXX_FLAGS_DEBUG_INIT})
|
||||
|
||||
# Disable automatic manifest generation.
|
||||
STRING(REPLACE "/MANIFEST" "/MANIFEST:NO" CMAKE_EXE_LINKER_FLAGS
|
||||
${CMAKE_EXE_LINKER_FLAGS})
|
||||
# Explicitly disable it since it is the default for newer versions of VS
|
||||
STRING(REGEX MATCH "MANIFEST:NO" tmp_manifest ${CMAKE_EXE_LINKER_FLAGS})
|
||||
IF(NOT tmp_manifest)
|
||||
SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /MANIFEST:NO")
|
||||
ENDIF(NOT tmp_manifest)
|
||||
|
||||
ENDIF(CMAKE_GENERATOR MATCHES "Visual Studio 7" OR
|
||||
CMAKE_GENERATOR MATCHES "Visual Studio 8")
|
||||
|
||||
|
|
@ -156,14 +165,6 @@ IF(EMBED_MANIFESTS)
|
|||
MESSAGE(FATAL_ERROR "Sign tool, signtool.exe, can't be found.")
|
||||
ENDIF(HAVE_SIGN_TOOL)
|
||||
|
||||
# Disable automatic manifest generation.
|
||||
STRING(REPLACE "/MANIFEST" "/MANIFEST:NO" CMAKE_EXE_LINKER_FLAGS
|
||||
${CMAKE_EXE_LINKER_FLAGS})
|
||||
# Explicitly disable it since it is the default for newer versions of VS
|
||||
STRING(REGEX MATCH "MANIFEST:NO" tmp_manifest ${CMAKE_EXE_LINKER_FLAGS})
|
||||
IF(NOT tmp_manifest)
|
||||
SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /MANIFEST:NO")
|
||||
ENDIF(NOT tmp_manifest)
|
||||
# Set the processor architecture.
|
||||
IF(CMAKE_GENERATOR MATCHES "Visual Studio 8 2005 Win64")
|
||||
SET(PROCESSOR_ARCH "amd64")
|
||||
|
|
|
|||
|
|
@ -5,8 +5,7 @@
|
|||
ASRC = $(srcdir)/vi.c $(srcdir)/emacs.c $(srcdir)/common.c
|
||||
AHDR = vi.h emacs.h common.h
|
||||
|
||||
# Make sure to include stuff from this directory first, to get right "config.h"
|
||||
INCLUDES = -I. -I$(top_builddir)/include -I$(top_srcdir)/include
|
||||
INCLUDES = -I$(top_builddir)/include -I$(top_srcdir)/include
|
||||
|
||||
noinst_LIBRARIES = libedit.a
|
||||
|
||||
|
|
@ -31,7 +30,14 @@ EXTRA_DIST = makelist.sh np/unvis.c np/strlcpy.c np/vis.c np/vis.h np/strlcat.c
|
|||
|
||||
CLEANFILES = makelist common.h emacs.h vi.h fcns.h help.h fcns.c help.c
|
||||
|
||||
DEFS = -DUNDEF_THREADS_HACK -DHAVE_CONFIG_H -DNO_KILL_INTR
|
||||
# Make sure to include stuff from this directory first, to get right "config.h"
|
||||
# Automake puts into DEFAULT_INCLUDES this source and corresponding
|
||||
# build directory together with ../../include to let all make files
|
||||
# find the central "config.h". This variable is used before INCLUDES
|
||||
# above. But in automake 1.10 the order of these are changed. Put the
|
||||
# includes of this directory into DEFS to always be sure it is first
|
||||
# before DEFAULT_INCLUDES on the compile line.
|
||||
DEFS = -DUNDEF_THREADS_HACK -DHAVE_CONFIG_H -DNO_KILL_INTR -I. -I$(srcdir)
|
||||
|
||||
SUFFIXES = .sh
|
||||
|
||||
|
|
|
|||
|
|
@ -124,6 +124,8 @@ case $MACHINE_TYPE in
|
|||
esac
|
||||
|
||||
# Save some variables and the command line options for mysqlbug
|
||||
SAVE_CC="$CC"
|
||||
SAVE_CXX="$CXX"
|
||||
SAVE_ASFLAGS="$ASFLAGS"
|
||||
SAVE_CFLAGS="$CFLAGS"
|
||||
SAVE_CXXFLAGS="$CXXFLAGS"
|
||||
|
|
@ -131,6 +133,8 @@ SAVE_LDFLAGS="$LDFLAGS"
|
|||
SAVE_CXXLDFLAGS="$CXXLDFLAGS"
|
||||
CONF_COMMAND="$0 $ac_configure_args"
|
||||
AC_SUBST(CONF_COMMAND)
|
||||
AC_SUBST(SAVE_CC)
|
||||
AC_SUBST(SAVE_CXX)
|
||||
AC_SUBST(SAVE_ASFLAGS)
|
||||
AC_SUBST(SAVE_CFLAGS)
|
||||
AC_SUBST(SAVE_CXXFLAGS)
|
||||
|
|
@ -373,6 +377,7 @@ AC_SUBST(CC)
|
|||
AC_SUBST(CFLAGS)
|
||||
AC_SUBST(CXX)
|
||||
AC_SUBST(CXXFLAGS)
|
||||
AC_SUBST(ASFLAGS)
|
||||
AC_SUBST(LD)
|
||||
AC_SUBST(INSTALL_SCRIPT)
|
||||
|
||||
|
|
@ -631,7 +636,7 @@ AC_SUBST(NOINST_LDFLAGS)
|
|||
|
||||
if test "$TARGET_LINUX" = "true" -a "$static_nss" = ""
|
||||
then
|
||||
tmp=`nm /usr/lib/libc.a | grep _nss_files_getaliasent_r`
|
||||
tmp=`nm /usr/lib*/libc.a | grep _nss_files_getaliasent_r`
|
||||
if test -n "$tmp"
|
||||
then
|
||||
STATIC_NSS_FLAGS="-lc -lnss_files -lnss_dns -lresolv"
|
||||
|
|
|
|||
|
|
@ -879,6 +879,8 @@ extern CHARSET_INFO *get_charset(uint cs_number, myf flags);
|
|||
extern CHARSET_INFO *get_charset_by_name(const char *cs_name, myf flags);
|
||||
extern CHARSET_INFO *get_charset_by_csname(const char *cs_name,
|
||||
uint cs_flags, myf my_flags);
|
||||
extern CHARSET_INFO *get_compatible_charset_with_ctype(CHARSET_INFO
|
||||
*original_cs);
|
||||
extern void free_charsets(void);
|
||||
extern char *get_charsets_dir(char *buf);
|
||||
extern my_bool my_charset_same(CHARSET_INFO *cs1, CHARSET_INFO *cs2);
|
||||
|
|
|
|||
|
|
@ -706,7 +706,8 @@ int cli_read_change_user_result(MYSQL *mysql, char *buff, const char *passwd)
|
|||
my_bool STDCALL mysql_change_user(MYSQL *mysql, const char *user,
|
||||
const char *passwd, const char *db)
|
||||
{
|
||||
char buff[512],*end=buff;
|
||||
char buff[USERNAME_LENGTH+SCRAMBLED_PASSWORD_CHAR_LENGTH+NAME_LEN+2];
|
||||
char *end= buff;
|
||||
int rc;
|
||||
DBUG_ENTER("mysql_change_user");
|
||||
|
||||
|
|
@ -716,7 +717,7 @@ my_bool STDCALL mysql_change_user(MYSQL *mysql, const char *user,
|
|||
passwd="";
|
||||
|
||||
/* Store user into the buffer */
|
||||
end=strmov(end,user)+1;
|
||||
end= strmake(end, user, USERNAME_LENGTH) + 1;
|
||||
|
||||
/* write scrambled password according to server capabilities */
|
||||
if (passwd[0])
|
||||
|
|
@ -736,7 +737,7 @@ my_bool STDCALL mysql_change_user(MYSQL *mysql, const char *user,
|
|||
else
|
||||
*end++= '\0'; /* empty password */
|
||||
/* Add database if needed */
|
||||
end= strmov(end, db ? db : "") + 1;
|
||||
end= strmake(end, db ? db : "", NAME_LEN) + 1;
|
||||
|
||||
/* Write authentication package */
|
||||
simple_command(mysql,COM_CHANGE_USER, buff,(ulong) (end-buff),1);
|
||||
|
|
|
|||
|
|
@ -188,7 +188,7 @@ byte ft_simple_get_word(CHARSET_INFO *cs, byte **start, const byte *end,
|
|||
|
||||
do
|
||||
{
|
||||
for (;; doc+= mbl)
|
||||
for (;; doc+= (mbl ? mbl : 1))
|
||||
{
|
||||
if (doc >= end) DBUG_RETURN(0);
|
||||
if (true_word_char(cs, *doc)) break;
|
||||
|
|
|
|||
|
|
@ -1375,6 +1375,139 @@ int chk_data_link(MI_CHECK *param, MI_INFO *info,int extend)
|
|||
} /* chk_data_link */
|
||||
|
||||
|
||||
/**
|
||||
@brief Drop all indexes
|
||||
|
||||
@param[in] param check parameters
|
||||
@param[in] info MI_INFO handle
|
||||
@param[in] force if to force drop all indexes
|
||||
|
||||
@return status
|
||||
@retval 0 OK
|
||||
@retval != 0 Error
|
||||
|
||||
@note
|
||||
Once allocated, index blocks remain part of the key file forever.
|
||||
When indexes are disabled, no block is freed. When enabling indexes,
|
||||
no block is freed either. The new indexes are create from new
|
||||
blocks. (Bug #4692)
|
||||
|
||||
Before recreating formerly disabled indexes, the unused blocks
|
||||
must be freed. There are two options to do this:
|
||||
- Follow the tree of disabled indexes, add all blocks to the
|
||||
deleted blocks chain. Would require a lot of random I/O.
|
||||
- Drop all blocks by clearing all index root pointers and all
|
||||
delete chain pointers and resetting key_file_length to the end
|
||||
of the index file header. This requires to recreate all indexes,
|
||||
even those that may still be intact.
|
||||
The second method is probably faster in most cases.
|
||||
|
||||
When disabling indexes, MySQL disables either all indexes or all
|
||||
non-unique indexes. When MySQL [re-]enables disabled indexes
|
||||
(T_CREATE_MISSING_KEYS), then we either have "lost" blocks in the
|
||||
index file, or there are no non-unique indexes. In the latter case,
|
||||
mi_repair*() would not be called as there would be no disabled
|
||||
indexes.
|
||||
|
||||
If there would be more unique indexes than disabled (non-unique)
|
||||
indexes, we could do the first method. But this is not implemented
|
||||
yet. By now we drop and recreate all indexes when repair is called.
|
||||
|
||||
However, there is an exception. Sometimes MySQL disables non-unique
|
||||
indexes when the table is empty (e.g. when copying a table in
|
||||
mysql_alter_table()). When enabling the non-unique indexes, they
|
||||
are still empty. So there is no index block that can be lost. This
|
||||
optimization is implemented in this function.
|
||||
|
||||
Note that in normal repair (T_CREATE_MISSING_KEYS not set) we
|
||||
recreate all enabled indexes unconditonally. We do not change the
|
||||
key_map. Otherwise we invert the key map temporarily (outside of
|
||||
this function) and recreate the then "seemingly" enabled indexes.
|
||||
When we cannot use the optimization, and drop all indexes, we
|
||||
pretend that all indexes were disabled. By the inversion, we will
|
||||
then recrate all indexes.
|
||||
*/
|
||||
|
||||
static int mi_drop_all_indexes(MI_CHECK *param, MI_INFO *info, my_bool force)
|
||||
{
|
||||
MYISAM_SHARE *share= info->s;
|
||||
MI_STATE_INFO *state= &share->state;
|
||||
uint i;
|
||||
int error;
|
||||
DBUG_ENTER("mi_drop_all_indexes");
|
||||
|
||||
/*
|
||||
If any of the disabled indexes has a key block assigned, we must
|
||||
drop and recreate all indexes to avoid losing index blocks.
|
||||
|
||||
If we want to recreate disabled indexes only _and_ all of these
|
||||
indexes are empty, we don't need to recreate the existing indexes.
|
||||
*/
|
||||
if (!force && (param->testflag & T_CREATE_MISSING_KEYS))
|
||||
{
|
||||
DBUG_PRINT("repair", ("creating missing indexes"));
|
||||
for (i= 0; i < share->base.keys; i++)
|
||||
{
|
||||
DBUG_PRINT("repair", ("index #: %u key_root: 0x%lx active: %d",
|
||||
i, (long) state->key_root[i],
|
||||
mi_is_key_active(state->key_map, i)));
|
||||
if ((state->key_root[i] != HA_OFFSET_ERROR) &&
|
||||
!mi_is_key_active(state->key_map, i))
|
||||
{
|
||||
/*
|
||||
This index has at least one key block and it is disabled.
|
||||
We would lose its block(s) if would just recreate it.
|
||||
So we need to drop and recreate all indexes.
|
||||
*/
|
||||
DBUG_PRINT("repair", ("nonempty and disabled: recreate all"));
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (i >= share->base.keys)
|
||||
{
|
||||
/*
|
||||
All of the disabled indexes are empty. We can just recreate them.
|
||||
Flush dirty blocks of this index file from key cache and remove
|
||||
all blocks of this index file from key cache.
|
||||
*/
|
||||
DBUG_PRINT("repair", ("all disabled are empty: create missing"));
|
||||
error= flush_key_blocks(share->key_cache, share->kfile,
|
||||
FLUSH_FORCE_WRITE);
|
||||
goto end;
|
||||
}
|
||||
/*
|
||||
We do now drop all indexes and declare them disabled. With the
|
||||
T_CREATE_MISSING_KEYS flag, mi_repair*() will recreate all
|
||||
disabled indexes and enable them.
|
||||
*/
|
||||
mi_clear_all_keys_active(state->key_map);
|
||||
DBUG_PRINT("repair", ("declared all indexes disabled"));
|
||||
}
|
||||
|
||||
/* Remove all key blocks of this index file from key cache. */
|
||||
if ((error= flush_key_blocks(share->key_cache, share->kfile,
|
||||
FLUSH_IGNORE_CHANGED)))
|
||||
goto end;
|
||||
|
||||
/* Clear index root block pointers. */
|
||||
for (i= 0; i < share->base.keys; i++)
|
||||
state->key_root[i]= HA_OFFSET_ERROR;
|
||||
|
||||
/* Clear the delete chains. */
|
||||
for (i= 0; i < state->header.max_block_size; i++)
|
||||
state->key_del[i]= HA_OFFSET_ERROR;
|
||||
|
||||
/* Reset index file length to end of index file header. */
|
||||
info->state->key_file_length= share->base.keystart;
|
||||
|
||||
DBUG_PRINT("repair", ("dropped all indexes"));
|
||||
/* error= 0; set by last (error= flush_key_bocks()). */
|
||||
|
||||
end:
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
|
||||
|
||||
/* Recover old table by reading each record and writing all keys */
|
||||
/* Save new datafile-name in temp_filename */
|
||||
|
||||
|
|
@ -1382,7 +1515,6 @@ int mi_repair(MI_CHECK *param, register MI_INFO *info,
|
|||
my_string name, int rep_quick)
|
||||
{
|
||||
int error,got_error;
|
||||
uint i;
|
||||
ha_rows start_records,new_header_length;
|
||||
my_off_t del;
|
||||
File new_file;
|
||||
|
|
@ -1486,25 +1618,10 @@ int mi_repair(MI_CHECK *param, register MI_INFO *info,
|
|||
|
||||
info->update= (short) (HA_STATE_CHANGED | HA_STATE_ROW_CHANGED);
|
||||
|
||||
/*
|
||||
Clear all keys. Note that all key blocks allocated until now remain
|
||||
"dead" parts of the key file. (Bug #4692)
|
||||
*/
|
||||
for (i=0 ; i < info->s->base.keys ; i++)
|
||||
share->state.key_root[i]= HA_OFFSET_ERROR;
|
||||
|
||||
/* Drop the delete chain. */
|
||||
for (i=0 ; i < share->state.header.max_block_size ; i++)
|
||||
share->state.key_del[i]= HA_OFFSET_ERROR;
|
||||
|
||||
/*
|
||||
If requested, activate (enable) all keys in key_map. In this case,
|
||||
all indexes will be (re-)built.
|
||||
*/
|
||||
/* This function always recreates all enabled indexes. */
|
||||
if (param->testflag & T_CREATE_MISSING_KEYS)
|
||||
mi_set_all_keys_active(share->state.key_map, share->base.keys);
|
||||
|
||||
info->state->key_file_length=share->base.keystart;
|
||||
mi_drop_all_indexes(param, info, TRUE);
|
||||
|
||||
lock_memory(param); /* Everything is alloced */
|
||||
|
||||
|
|
@ -2105,8 +2222,9 @@ int mi_repair_by_sort(MI_CHECK *param, register MI_INFO *info,
|
|||
ulong *rec_per_key_part;
|
||||
char llbuff[22];
|
||||
SORT_INFO sort_info;
|
||||
ulonglong key_map=share->state.key_map;
|
||||
ulonglong key_map;
|
||||
DBUG_ENTER("mi_repair_by_sort");
|
||||
LINT_INIT(key_map);
|
||||
|
||||
start_records=info->state->records;
|
||||
got_error=1;
|
||||
|
|
@ -2179,25 +2297,14 @@ int mi_repair_by_sort(MI_CHECK *param, register MI_INFO *info,
|
|||
}
|
||||
|
||||
info->update= (short) (HA_STATE_CHANGED | HA_STATE_ROW_CHANGED);
|
||||
if (!(param->testflag & T_CREATE_MISSING_KEYS))
|
||||
|
||||
/* Optionally drop indexes and optionally modify the key_map. */
|
||||
mi_drop_all_indexes(param, info, FALSE);
|
||||
key_map= share->state.key_map;
|
||||
if (param->testflag & T_CREATE_MISSING_KEYS)
|
||||
{
|
||||
/*
|
||||
Flush key cache for this file if we are calling this outside
|
||||
myisamchk
|
||||
*/
|
||||
flush_key_blocks(share->key_cache,share->kfile, FLUSH_IGNORE_CHANGED);
|
||||
/* Clear the pointers to the given rows */
|
||||
for (i=0 ; i < share->base.keys ; i++)
|
||||
share->state.key_root[i]= HA_OFFSET_ERROR;
|
||||
for (i=0 ; i < share->state.header.max_block_size ; i++)
|
||||
share->state.key_del[i]= HA_OFFSET_ERROR;
|
||||
info->state->key_file_length=share->base.keystart;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (flush_key_blocks(share->key_cache,share->kfile, FLUSH_FORCE_WRITE))
|
||||
goto err;
|
||||
key_map= ~key_map; /* Create the missing keys */
|
||||
/* Invert the copied key_map to recreate all disabled indexes. */
|
||||
key_map= ~key_map;
|
||||
}
|
||||
|
||||
sort_info.info=info;
|
||||
|
|
@ -2240,6 +2347,10 @@ int mi_repair_by_sort(MI_CHECK *param, register MI_INFO *info,
|
|||
sort_param.read_cache=param->read_cache;
|
||||
sort_param.keyinfo=share->keyinfo+sort_param.key;
|
||||
sort_param.seg=sort_param.keyinfo->seg;
|
||||
/*
|
||||
Skip this index if it is marked disabled in the copied
|
||||
(and possibly inverted) key_map.
|
||||
*/
|
||||
if (! mi_is_key_active(key_map, sort_param.key))
|
||||
{
|
||||
/* Remember old statistics for key */
|
||||
|
|
@ -2247,6 +2358,8 @@ int mi_repair_by_sort(MI_CHECK *param, register MI_INFO *info,
|
|||
(char*) (share->state.rec_per_key_part +
|
||||
(uint) (rec_per_key_part - param->rec_per_key_part)),
|
||||
sort_param.keyinfo->keysegs*sizeof(*rec_per_key_part));
|
||||
DBUG_PRINT("repair", ("skipping seemingly disabled index #: %u",
|
||||
sort_param.key));
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
@ -2302,8 +2415,11 @@ int mi_repair_by_sort(MI_CHECK *param, register MI_INFO *info,
|
|||
if (param->testflag & T_STATISTICS)
|
||||
update_key_parts(sort_param.keyinfo, rec_per_key_part, sort_param.unique,
|
||||
param->stats_method == MI_STATS_METHOD_IGNORE_NULLS?
|
||||
sort_param.notnull: NULL,(ulonglong) info->state->records);
|
||||
sort_param.notnull: NULL,
|
||||
(ulonglong) info->state->records);
|
||||
/* Enable this index in the permanent (not the copied) key_map. */
|
||||
mi_set_key_active(share->state.key_map, sort_param.key);
|
||||
DBUG_PRINT("repair", ("set enabled index #: %u", sort_param.key));
|
||||
|
||||
if (sort_param.fix_datafile)
|
||||
{
|
||||
|
|
@ -2504,9 +2620,10 @@ int mi_repair_parallel(MI_CHECK *param, register MI_INFO *info,
|
|||
IO_CACHE new_data_cache; /* For non-quick repair. */
|
||||
IO_CACHE_SHARE io_share;
|
||||
SORT_INFO sort_info;
|
||||
ulonglong key_map=share->state.key_map;
|
||||
ulonglong key_map;
|
||||
pthread_attr_t thr_attr;
|
||||
DBUG_ENTER("mi_repair_parallel");
|
||||
LINT_INIT(key_map);
|
||||
|
||||
start_records=info->state->records;
|
||||
got_error=1;
|
||||
|
|
@ -2608,25 +2725,14 @@ int mi_repair_parallel(MI_CHECK *param, register MI_INFO *info,
|
|||
}
|
||||
|
||||
info->update= (short) (HA_STATE_CHANGED | HA_STATE_ROW_CHANGED);
|
||||
if (!(param->testflag & T_CREATE_MISSING_KEYS))
|
||||
|
||||
/* Optionally drop indexes and optionally modify the key_map. */
|
||||
mi_drop_all_indexes(param, info, FALSE);
|
||||
key_map= share->state.key_map;
|
||||
if (param->testflag & T_CREATE_MISSING_KEYS)
|
||||
{
|
||||
/*
|
||||
Flush key cache for this file if we are calling this outside
|
||||
myisamchk
|
||||
*/
|
||||
flush_key_blocks(share->key_cache,share->kfile, FLUSH_IGNORE_CHANGED);
|
||||
/* Clear the pointers to the given rows */
|
||||
for (i=0 ; i < share->base.keys ; i++)
|
||||
share->state.key_root[i]= HA_OFFSET_ERROR;
|
||||
for (i=0 ; i < share->state.header.max_block_size ; i++)
|
||||
share->state.key_del[i]= HA_OFFSET_ERROR;
|
||||
info->state->key_file_length=share->base.keystart;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (flush_key_blocks(share->key_cache,share->kfile, FLUSH_FORCE_WRITE))
|
||||
goto err;
|
||||
key_map= ~key_map; /* Create the missing keys */
|
||||
/* Invert the copied key_map to recreate all disabled indexes. */
|
||||
key_map= ~key_map;
|
||||
}
|
||||
|
||||
sort_info.info=info;
|
||||
|
|
@ -2682,6 +2788,10 @@ int mi_repair_parallel(MI_CHECK *param, register MI_INFO *info,
|
|||
sort_param[i].key=key;
|
||||
sort_param[i].keyinfo=share->keyinfo+key;
|
||||
sort_param[i].seg=sort_param[i].keyinfo->seg;
|
||||
/*
|
||||
Skip this index if it is marked disabled in the copied
|
||||
(and possibly inverted) key_map.
|
||||
*/
|
||||
if (! mi_is_key_active(key_map, key))
|
||||
{
|
||||
/* Remember old statistics for key */
|
||||
|
|
|
|||
|
|
@ -13,20 +13,20 @@ CREATE TABLE t2 (p POINT, INDEX(p));
|
|||
INSERT INTO t1 VALUES (POINTFROMTEXT('POINT(1 2)'));
|
||||
INSERT INTO t2 VALUES (POINTFROMTEXT('POINT(1 2)'));
|
||||
|
||||
-- no index, returns 1 as expected
|
||||
# no index, returns 1 as expected
|
||||
SELECT COUNT(*) FROM t1 WHERE p=POINTFROMTEXT('POINT(1 2)');
|
||||
|
||||
-- with index, returns 1 as expected
|
||||
-- EXPLAIN shows that the index is not used though
|
||||
-- due to the "most rows covered anyway, so a scan is more effective" rule
|
||||
# with index, returns 1 as expected
|
||||
# EXPLAIN shows that the index is not used though
|
||||
# due to the "most rows covered anyway, so a scan is more effective" rule
|
||||
EXPLAIN
|
||||
SELECT COUNT(*) FROM t2 WHERE p=POINTFROMTEXT('POINT(1 2)');
|
||||
SELECT COUNT(*) FROM t2 WHERE p=POINTFROMTEXT('POINT(1 2)');
|
||||
|
||||
-- adding another row to the table so that
|
||||
-- the "most rows covered" rule doesn't kick in anymore
|
||||
-- now EXPLAIN shows the index used on the table
|
||||
-- and we're getting the wrong result again
|
||||
# adding another row to the table so that
|
||||
# the "most rows covered" rule doesn't kick in anymore
|
||||
# now EXPLAIN shows the index used on the table
|
||||
# and we're getting the wrong result again
|
||||
INSERT INTO t1 VALUES (POINTFROMTEXT('POINT(1 2)'));
|
||||
INSERT INTO t2 VALUES (POINTFROMTEXT('POINT(1 2)'));
|
||||
EXPLAIN
|
||||
|
|
|
|||
|
|
@ -3715,6 +3715,13 @@ sub mysqld_arguments ($$$$) {
|
|||
# see BUG#28359
|
||||
mtr_add_arg($args, "%s--connect-timeout=60", $prefix);
|
||||
|
||||
# When mysqld is run by a root user(euid is 0), it will fail
|
||||
# to start unless we specify what user to run as. If not running
|
||||
# as root it will be ignored, see BUG#30630
|
||||
if (!(grep(/^--user/, @$extra_opt, @opt_extra_mysqld_opt))) {
|
||||
mtr_add_arg($args, "%s--user=root");
|
||||
}
|
||||
|
||||
if ( $opt_valgrind_mysqld )
|
||||
{
|
||||
mtr_add_arg($args, "%s--skip-safemalloc", $prefix);
|
||||
|
|
|
|||
|
|
@ -5029,4 +5029,46 @@ F7
|
|||
FE þ LATIN SMALL LETTER THORN
|
||||
FF ÿ LATIN SMALL LETTER Y WITH DIAERESIS
|
||||
drop table t1;
|
||||
create table t1(a datetime) engine=csv;
|
||||
insert into t1 values();
|
||||
select * from t1;
|
||||
a
|
||||
0000-00-00 00:00:00
|
||||
drop table t1;
|
||||
create table t1(a set('foo','bar')) engine=csv;
|
||||
insert into t1 values();
|
||||
select * from t1;
|
||||
a
|
||||
|
||||
drop table t1;
|
||||
create table t1(a varchar(32)) engine=csv;
|
||||
insert into t1 values();
|
||||
select * from t1;
|
||||
a
|
||||
|
||||
drop table t1;
|
||||
create table t1(a int) engine=csv;
|
||||
insert into t1 values();
|
||||
select * from t1;
|
||||
a
|
||||
0
|
||||
drop table t1;
|
||||
create table t1(a blob) engine=csv;
|
||||
insert into t1 values();
|
||||
select * from t1;
|
||||
a
|
||||
|
||||
drop table t1;
|
||||
create table t1(a bit(1)) engine=csv;
|
||||
insert into t1 values();
|
||||
select BIN(a) from t1;
|
||||
BIN(a)
|
||||
0
|
||||
drop table t1;
|
||||
create table t1(a enum('foo','bar') default 'foo') engine=csv;
|
||||
insert into t1 values();
|
||||
select * from t1;
|
||||
a
|
||||
foo
|
||||
drop table t1;
|
||||
End of 5.0 tests
|
||||
|
|
|
|||
|
|
@ -811,6 +811,12 @@ quote(name)
|
|||
????????
|
||||
????????????????
|
||||
drop table bug20536;
|
||||
CREATE TABLE t1(a TEXT CHARSET ucs2 COLLATE ucs2_unicode_ci);
|
||||
INSERT INTO t1 VALUES('abcd');
|
||||
SELECT * FROM t1 WHERE MATCH(a) AGAINST ('+abcd' IN BOOLEAN MODE);
|
||||
a
|
||||
abcd
|
||||
DROP TABLE t1;
|
||||
End of 4.1 tests
|
||||
CREATE TABLE t1 (a varchar(64) character set ucs2, b decimal(10,3));
|
||||
INSERT INTO t1 VALUES ("1.1", 0), ("2.1", 0);
|
||||
|
|
|
|||
|
|
@ -463,3 +463,9 @@ ALTER TABLE t1 DISABLE KEYS;
|
|||
SELECT * FROM t1 WHERE MATCH(a) AGAINST('test');
|
||||
ERROR HY000: Can't find FULLTEXT index matching the column list
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1(a TEXT);
|
||||
INSERT INTO t1 VALUES(' aaaaa aaaa');
|
||||
SELECT * FROM t1 WHERE MATCH(a) AGAINST ('"aaaa"' IN BOOLEAN MODE);
|
||||
a
|
||||
aaaaa aaaa
|
||||
DROP TABLE t1;
|
||||
|
|
|
|||
|
|
@ -1806,4 +1806,29 @@ SELECT a FROM t1 FORCE INDEX (inx) WHERE a=1;
|
|||
a
|
||||
1
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1 (c1 INT, c2 INT, UNIQUE INDEX (c1), INDEX (c2)) ENGINE=MYISAM;
|
||||
SHOW TABLE STATUS LIKE 't1';
|
||||
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
|
||||
t1 MyISAM 10 Fixed 0 # # # 1024 # # # # # # #
|
||||
INSERT INTO t1 VALUES (1,1);
|
||||
SHOW TABLE STATUS LIKE 't1';
|
||||
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
|
||||
t1 MyISAM 10 Fixed 1 # # # 3072 # # # # # # #
|
||||
ALTER TABLE t1 DISABLE KEYS;
|
||||
SHOW TABLE STATUS LIKE 't1';
|
||||
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
|
||||
t1 MyISAM 10 Fixed 1 # # # 3072 # # # # # # #
|
||||
ALTER TABLE t1 ENABLE KEYS;
|
||||
SHOW TABLE STATUS LIKE 't1';
|
||||
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
|
||||
t1 MyISAM 10 Fixed 1 # # # 3072 # # # # # # #
|
||||
ALTER TABLE t1 DISABLE KEYS;
|
||||
SHOW TABLE STATUS LIKE 't1';
|
||||
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
|
||||
t1 MyISAM 10 Fixed 1 # # # 3072 # # # # # # #
|
||||
ALTER TABLE t1 ENABLE KEYS;
|
||||
SHOW TABLE STATUS LIKE 't1';
|
||||
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
|
||||
t1 MyISAM 10 Fixed 1 # # # 3072 # # # # # # #
|
||||
DROP TABLE t1;
|
||||
End of 5.0 tests
|
||||
|
|
|
|||
|
|
@ -1888,5 +1888,27 @@ set engine_condition_pushdown = 1;
|
|||
SELECT fname, lname FROM t1 WHERE (fname like 'Y%') or (lname like 'F%');
|
||||
fname lname
|
||||
Young Foo
|
||||
drop table t1;
|
||||
create table t1 (a int, b int, c int, d int, primary key using hash(a))
|
||||
engine=ndbcluster;
|
||||
insert into t1 values (10,1,100,0+0x1111);
|
||||
insert into t1 values (20,2,200,0+0x2222);
|
||||
insert into t1 values (30,3,300,0+0x3333);
|
||||
insert into t1 values (40,4,400,0+0x4444);
|
||||
insert into t1 values (50,5,500,0+0x5555);
|
||||
set engine_condition_pushdown = on;
|
||||
select a,b,d from t1
|
||||
where b in (0,1,2,5)
|
||||
order by b;
|
||||
a b d
|
||||
10 1 4369
|
||||
20 2 8738
|
||||
50 5 21845
|
||||
a b d
|
||||
10 1 4369
|
||||
20 2 8738
|
||||
50 5 21845
|
||||
Warnings:
|
||||
Warning 4294 Scan filter is too large, discarded
|
||||
set engine_condition_pushdown = @old_ecpd;
|
||||
DROP TABLE t1,t2,t3,t4,t5;
|
||||
|
|
|
|||
|
|
@ -39,4 +39,12 @@ pk1 b c
|
|||
10 0 0
|
||||
12 2 2
|
||||
14 1 1
|
||||
create unique index ib on t1(b);
|
||||
update t1 set c = 4 where pk1 = 12;
|
||||
update ignore t1 set b = 55 where pk1 = 14;
|
||||
select * from t1 order by pk1;
|
||||
pk1 b c
|
||||
10 0 0
|
||||
12 2 4
|
||||
14 55 1
|
||||
DROP TABLE IF EXISTS t1;
|
||||
|
|
|
|||
|
|
@ -4287,4 +4287,39 @@ c32
|
|||
1
|
||||
1
|
||||
DROP TABLE t1, t2, t3;
|
||||
|
||||
#
|
||||
# Bug#30736: Row Size Too Large Error Creating a Table and
|
||||
# Inserting Data.
|
||||
#
|
||||
DROP TABLE IF EXISTS t1;
|
||||
DROP TABLE IF EXISTS t2;
|
||||
|
||||
CREATE TABLE t1(
|
||||
c1 DECIMAL(10, 2),
|
||||
c2 FLOAT);
|
||||
|
||||
INSERT INTO t1 VALUES (0, 1), (2, 3), (4, 5);
|
||||
|
||||
CREATE TABLE t2(
|
||||
c3 DECIMAL(10, 2))
|
||||
SELECT
|
||||
c1 * c2 AS c3
|
||||
FROM t1;
|
||||
|
||||
SELECT * FROM t1;
|
||||
c1 c2
|
||||
0.00 1
|
||||
2.00 3
|
||||
4.00 5
|
||||
|
||||
SELECT * FROM t2;
|
||||
c3
|
||||
0.00
|
||||
6.00
|
||||
20.00
|
||||
|
||||
DROP TABLE t1;
|
||||
DROP TABLE t2;
|
||||
|
||||
End of 5.0 tests
|
||||
|
|
|
|||
|
|
@ -99,6 +99,12 @@ t1 CREATE TABLE `t1` (
|
|||
`b` int(11) default NULL
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1
|
||||
drop table t1;
|
||||
CREATE TABLE t1(a INT)
|
||||
DATA DIRECTORY='TEST_DIR/master-data/mysql'
|
||||
INDEX DIRECTORY='TEST_DIR/master-data/mysql';
|
||||
RENAME TABLE t1 TO user;
|
||||
ERROR HY000: Can't create/write to file 'TEST_DIR/master-data/mysql/user.MYI' (Errcode: 17)
|
||||
DROP TABLE t1;
|
||||
show create table t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE `t1` (
|
||||
|
|
|
|||
|
|
@ -737,7 +737,6 @@ a_ascii a_len
|
|||
44 1
|
||||
64 1
|
||||
45 1
|
||||
60 1
|
||||
65 1
|
||||
46 1
|
||||
66 1
|
||||
|
|
@ -779,17 +778,18 @@ a_ascii a_len
|
|||
78 1
|
||||
59 1
|
||||
79 1
|
||||
7E 1
|
||||
5A 1
|
||||
7A 1
|
||||
5D 1
|
||||
5B 1
|
||||
5C 1
|
||||
5D 1
|
||||
5E 1
|
||||
5F 1
|
||||
60 1
|
||||
7B 1
|
||||
7C 1
|
||||
7D 1
|
||||
7E 1
|
||||
7F 1
|
||||
80 1
|
||||
81 1
|
||||
|
|
|
|||
|
|
@ -737,7 +737,6 @@ a_ascii a_len
|
|||
44 1
|
||||
64 1
|
||||
45 1
|
||||
60 1
|
||||
65 1
|
||||
46 1
|
||||
66 1
|
||||
|
|
@ -779,17 +778,18 @@ a_ascii a_len
|
|||
78 1
|
||||
59 1
|
||||
79 1
|
||||
7E 1
|
||||
5A 1
|
||||
7A 1
|
||||
5D 1
|
||||
5B 1
|
||||
5C 1
|
||||
5D 1
|
||||
5E 1
|
||||
5F 1
|
||||
60 1
|
||||
7B 1
|
||||
7C 1
|
||||
7D 1
|
||||
7E 1
|
||||
7F 1
|
||||
80 1
|
||||
81 1
|
||||
|
|
|
|||
|
|
@ -737,7 +737,6 @@ a_ascii a_len
|
|||
44 1
|
||||
64 1
|
||||
45 1
|
||||
60 1
|
||||
65 1
|
||||
46 1
|
||||
66 1
|
||||
|
|
@ -779,17 +778,18 @@ a_ascii a_len
|
|||
78 1
|
||||
59 1
|
||||
79 1
|
||||
7E 1
|
||||
5A 1
|
||||
7A 1
|
||||
5D 1
|
||||
5B 1
|
||||
5C 1
|
||||
5D 1
|
||||
5E 1
|
||||
5F 1
|
||||
60 1
|
||||
7B 1
|
||||
7C 1
|
||||
7D 1
|
||||
7E 1
|
||||
7F 1
|
||||
80 1
|
||||
81 1
|
||||
|
|
|
|||
|
|
@ -737,7 +737,6 @@ a_ascii a_len
|
|||
44 1
|
||||
64 1
|
||||
45 1
|
||||
60 1
|
||||
65 1
|
||||
46 1
|
||||
66 1
|
||||
|
|
@ -779,17 +778,18 @@ a_ascii a_len
|
|||
78 1
|
||||
59 1
|
||||
79 1
|
||||
7E 1
|
||||
5A 1
|
||||
7A 1
|
||||
5D 1
|
||||
5B 1
|
||||
5C 1
|
||||
5D 1
|
||||
5E 1
|
||||
5F 1
|
||||
60 1
|
||||
7B 1
|
||||
7C 1
|
||||
7D 1
|
||||
7E 1
|
||||
7F 1
|
||||
80 1
|
||||
81 1
|
||||
|
|
|
|||
|
|
@ -1427,4 +1427,37 @@ insert into t1 values (0xFF,'LATIN SMALL LETTER Y WITH DIAERESIS');
|
|||
select hex(c), c, name from t1 order by 1;
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# Bug #31473: does not work with NULL value in datetime field
|
||||
# This bug is a 5.1 but is here to prevent 5.0 regression.
|
||||
#
|
||||
create table t1(a datetime) engine=csv;
|
||||
insert into t1 values();
|
||||
select * from t1;
|
||||
drop table t1;
|
||||
create table t1(a set('foo','bar')) engine=csv;
|
||||
insert into t1 values();
|
||||
select * from t1;
|
||||
drop table t1;
|
||||
create table t1(a varchar(32)) engine=csv;
|
||||
insert into t1 values();
|
||||
select * from t1;
|
||||
drop table t1;
|
||||
create table t1(a int) engine=csv;
|
||||
insert into t1 values();
|
||||
select * from t1;
|
||||
drop table t1;
|
||||
create table t1(a blob) engine=csv;
|
||||
insert into t1 values();
|
||||
select * from t1;
|
||||
drop table t1;
|
||||
create table t1(a bit(1)) engine=csv;
|
||||
insert into t1 values();
|
||||
select BIN(a) from t1;
|
||||
drop table t1;
|
||||
create table t1(a enum('foo','bar') default 'foo') engine=csv;
|
||||
insert into t1 values();
|
||||
select * from t1;
|
||||
drop table t1;
|
||||
|
||||
--echo End of 5.0 tests
|
||||
|
|
|
|||
|
|
@ -530,7 +530,7 @@ create table t1 (
|
|||
a varchar(255),
|
||||
key a(a)
|
||||
) character set utf8 collate utf8_czech_ci;
|
||||
-- In Czech 'ch' is a single letter between 'h' and 'i'
|
||||
# In Czech 'ch' is a single letter between 'h' and 'i'
|
||||
insert into t1 values
|
||||
('b'),('c'),('d'),('e'),('f'),('g'),('h'),('ch'),('i'),('j');
|
||||
select * from t1 where a like 'c%';
|
||||
|
|
|
|||
|
|
@ -547,6 +547,14 @@ select quote(name) from bug20536;
|
|||
|
||||
drop table bug20536;
|
||||
|
||||
#
|
||||
# BUG#31159 - fulltext search on ucs2 column crashes server
|
||||
#
|
||||
CREATE TABLE t1(a TEXT CHARSET ucs2 COLLATE ucs2_unicode_ci);
|
||||
INSERT INTO t1 VALUES('abcd');
|
||||
SELECT * FROM t1 WHERE MATCH(a) AGAINST ('+abcd' IN BOOLEAN MODE);
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo End of 4.1 tests
|
||||
|
||||
#
|
||||
|
|
|
|||
|
|
@ -387,4 +387,12 @@ ALTER TABLE t1 DISABLE KEYS;
|
|||
SELECT * FROM t1 WHERE MATCH(a) AGAINST('test');
|
||||
DROP TABLE t1;
|
||||
|
||||
#
|
||||
# BUG#11392 - fulltext search bug
|
||||
#
|
||||
CREATE TABLE t1(a TEXT);
|
||||
INSERT INTO t1 VALUES(' aaaaa aaaa');
|
||||
SELECT * FROM t1 WHERE MATCH(a) AGAINST ('"aaaa"' IN BOOLEAN MODE);
|
||||
DROP TABLE t1;
|
||||
|
||||
# End of 4.1 tests
|
||||
|
|
|
|||
|
|
@ -1161,4 +1161,30 @@ ALTER TABLE t1 ENABLE KEYS;
|
|||
SELECT a FROM t1 FORCE INDEX (inx) WHERE a=1;
|
||||
DROP TABLE t1;
|
||||
|
||||
#
|
||||
# Bug#4692 - DISABLE/ENABLE KEYS waste a space
|
||||
#
|
||||
CREATE TABLE t1 (c1 INT, c2 INT, UNIQUE INDEX (c1), INDEX (c2)) ENGINE=MYISAM;
|
||||
--replace_column 6 # 7 # 8 # 10 # 11 # 12 # 13 # 14 # 15 # 16 #
|
||||
SHOW TABLE STATUS LIKE 't1';
|
||||
INSERT INTO t1 VALUES (1,1);
|
||||
--replace_column 6 # 7 # 8 # 10 # 11 # 12 # 13 # 14 # 15 # 16 #
|
||||
SHOW TABLE STATUS LIKE 't1';
|
||||
ALTER TABLE t1 DISABLE KEYS;
|
||||
--replace_column 6 # 7 # 8 # 10 # 11 # 12 # 13 # 14 # 15 # 16 #
|
||||
SHOW TABLE STATUS LIKE 't1';
|
||||
ALTER TABLE t1 ENABLE KEYS;
|
||||
--replace_column 6 # 7 # 8 # 10 # 11 # 12 # 13 # 14 # 15 # 16 #
|
||||
SHOW TABLE STATUS LIKE 't1';
|
||||
ALTER TABLE t1 DISABLE KEYS;
|
||||
--replace_column 6 # 7 # 8 # 10 # 11 # 12 # 13 # 14 # 15 # 16 #
|
||||
SHOW TABLE STATUS LIKE 't1';
|
||||
ALTER TABLE t1 ENABLE KEYS;
|
||||
--replace_column 6 # 7 # 8 # 10 # 11 # 12 # 13 # 14 # 15 # 16 #
|
||||
SHOW TABLE STATUS LIKE 't1';
|
||||
#--exec ls -log var/master-data/test/t1.MYI
|
||||
#--exec myisamchk -dvv var/master-data/test/t1.MYI
|
||||
#--exec myisamchk -iev var/master-data/test/t1.MYI
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo End of 5.0 tests
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -33,6 +33,11 @@ UPDATE IGNORE t1 set pk1 = 1, c = 2 where pk1 = 4;
|
|||
select * from t1 order by pk1;
|
||||
UPDATE t1 set pk1 = pk1 + 10;
|
||||
select * from t1 order by pk1;
|
||||
# bug#25817
|
||||
create unique index ib on t1(b);
|
||||
update t1 set c = 4 where pk1 = 12;
|
||||
update ignore t1 set b = 55 where pk1 = 14;
|
||||
select * from t1 order by pk1;
|
||||
|
||||
--disable_warnings
|
||||
DROP TABLE IF EXISTS t1;
|
||||
|
|
|
|||
|
|
@ -3504,9 +3504,6 @@ CREATE VIEW v1 AS SELECT 1 AS ` `;
|
|||
--error 1166
|
||||
CREATE VIEW v1 AS SELECT (SELECT 1 AS ` `);
|
||||
|
||||
CREATE VIEW v1 AS SELECT 1 AS ` x`;
|
||||
SELECT `x` FROM v1;
|
||||
|
||||
--error 1166
|
||||
ALTER VIEW v1 AS SELECT 1 AS ` `;
|
||||
|
||||
|
|
@ -3613,5 +3610,52 @@ SELECT c32 FROM t1, t2, t3 WHERE t1.c11 IN (1, 3, 5) AND
|
|||
ORDER BY c32 DESC;
|
||||
|
||||
DROP TABLE t1, t2, t3;
|
||||
###########################################################################
|
||||
|
||||
--echo
|
||||
--echo #
|
||||
--echo # Bug#30736: Row Size Too Large Error Creating a Table and
|
||||
--echo # Inserting Data.
|
||||
--echo #
|
||||
|
||||
--disable_warnings
|
||||
DROP TABLE IF EXISTS t1;
|
||||
DROP TABLE IF EXISTS t2;
|
||||
--enable_warnings
|
||||
|
||||
--echo
|
||||
|
||||
CREATE TABLE t1(
|
||||
c1 DECIMAL(10, 2),
|
||||
c2 FLOAT);
|
||||
|
||||
--echo
|
||||
|
||||
INSERT INTO t1 VALUES (0, 1), (2, 3), (4, 5);
|
||||
|
||||
--echo
|
||||
|
||||
CREATE TABLE t2(
|
||||
c3 DECIMAL(10, 2))
|
||||
SELECT
|
||||
c1 * c2 AS c3
|
||||
FROM t1;
|
||||
|
||||
--echo
|
||||
|
||||
SELECT * FROM t1;
|
||||
|
||||
--echo
|
||||
|
||||
SELECT * FROM t2;
|
||||
|
||||
--echo
|
||||
|
||||
DROP TABLE t1;
|
||||
DROP TABLE t2;
|
||||
|
||||
--echo
|
||||
|
||||
###########################################################################
|
||||
|
||||
--echo End of 5.0 tests
|
||||
|
|
|
|||
|
|
@ -2970,7 +2970,7 @@ DROP TABLE t1,t2;
|
|||
CREATE TABLE t1 (a INT, b INT);
|
||||
INSERT INTO t1 VALUES (1, 2), (1,3), (1,4), (2,1), (2,2);
|
||||
|
||||
-- returns no rows, when it should
|
||||
# returns no rows, when it should
|
||||
SELECT a1.a, COUNT(*) FROM t1 a1 WHERE a1.a = 1
|
||||
AND EXISTS( SELECT a2.a FROM t1 a2 WHERE a2.a = a1.a)
|
||||
GROUP BY a1.a;
|
||||
|
|
|
|||
|
|
@ -124,6 +124,18 @@ enable_query_log;
|
|||
show create table t1;
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# BUG#32111 - Security Breach via DATA/INDEX DIRECORY and RENAME TABLE
|
||||
#
|
||||
--replace_result $MYSQLTEST_VARDIR TEST_DIR
|
||||
eval CREATE TABLE t1(a INT)
|
||||
DATA DIRECTORY='$MYSQLTEST_VARDIR/master-data/mysql'
|
||||
INDEX DIRECTORY='$MYSQLTEST_VARDIR/master-data/mysql';
|
||||
--replace_result $MYSQLTEST_VARDIR TEST_DIR
|
||||
--error 1
|
||||
RENAME TABLE t1 TO user;
|
||||
DROP TABLE t1;
|
||||
|
||||
#
|
||||
# Test specifying DATA DIRECTORY that is the same as what would normally
|
||||
# have been chosen. (Bug #8707)
|
||||
|
|
|
|||
|
|
@ -139,7 +139,7 @@ show global variables like 'net_%';
|
|||
show session variables like 'net_%';
|
||||
set net_buffer_length=1;
|
||||
show variables like 'net_buffer_length';
|
||||
--warning 1292
|
||||
#warning 1292
|
||||
set net_buffer_length=2000000000;
|
||||
show variables like 'net_buffer_length';
|
||||
|
||||
|
|
|
|||
|
|
@ -810,3 +810,43 @@ ulong escape_quotes_for_mysql(CHARSET_INFO *charset_info,
|
|||
*to= 0;
|
||||
return overflow ? (ulong)~0 : (ulong) (to - to_start);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
@brief Find compatible character set with ctype.
|
||||
|
||||
@param[in] original_cs Original character set
|
||||
|
||||
@note
|
||||
128 my_charset_ucs2_general_uca ->192 my_charset_utf8_general_uca_ci
|
||||
129 my_charset_ucs2_icelandic_uca_ci ->193 my_charset_utf8_icelandic_uca_ci
|
||||
130 my_charset_ucs2_latvian_uca_ci ->194 my_charset_utf8_latvian_uca_ci
|
||||
131 my_charset_ucs2_romanian_uca_ci ->195 my_charset_utf8_romanian_uca_ci
|
||||
132 my_charset_ucs2_slovenian_uca_ci ->196 my_charset_utf8_slovenian_uca_ci
|
||||
133 my_charset_ucs2_polish_uca_ci ->197 my_charset_utf8_polish_uca_ci
|
||||
134 my_charset_ucs2_estonian_uca_ci ->198 my_charset_utf8_estonian_uca_ci
|
||||
135 my_charset_ucs2_spanish_uca_ci ->199 my_charset_utf8_spanish_uca_ci
|
||||
136 my_charset_ucs2_swedish_uca_ci ->200 my_charset_utf8_swedish_uca_ci
|
||||
137 my_charset_ucs2_turkish_uca_ci ->201 my_charset_utf8_turkish_uca_ci
|
||||
138 my_charset_ucs2_czech_uca_ci ->202 my_charset_utf8_czech_uca_ci
|
||||
139 my_charset_ucs2_danish_uca_ci ->203 my_charset_utf8_danish_uca_ci
|
||||
140 my_charset_ucs2_lithuanian_uca_ci->204 my_charset_utf8_lithuanian_uca_ci
|
||||
141 my_charset_ucs2_slovak_uca_ci ->205 my_charset_utf8_slovak_uca_ci
|
||||
142 my_charset_ucs2_spanish2_uca_ci ->206 my_charset_utf8_spanish2_uca_ci
|
||||
143 my_charset_ucs2_roman_uca_ci ->207 my_charset_utf8_roman_uca_ci
|
||||
144 my_charset_ucs2_persian_uca_ci ->208 my_charset_utf8_persian_uca_ci
|
||||
|
||||
@return Compatible character set or NULL.
|
||||
*/
|
||||
|
||||
CHARSET_INFO *get_compatible_charset_with_ctype(CHARSET_INFO *original_cs)
|
||||
{
|
||||
CHARSET_INFO *compatible_cs= 0;
|
||||
DBUG_ENTER("get_compatible_charset_with_ctype");
|
||||
if (!strcmp(original_cs->csname, "ucs2") &&
|
||||
(compatible_cs= get_charset(original_cs->number + 64, MYF(0))) &&
|
||||
(!compatible_cs->ctype ||
|
||||
strcmp(original_cs->name + 4, compatible_cs->name + 4)))
|
||||
compatible_cs= 0;
|
||||
DBUG_RETURN(compatible_cs);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -126,6 +126,7 @@ int my_rename_with_symlink(const char *from, const char *to, myf MyFlags)
|
|||
int was_symlink= (!my_disable_symlinks &&
|
||||
!my_readlink(link_name, from, MYF(0)));
|
||||
int result=0;
|
||||
int name_is_different;
|
||||
DBUG_ENTER("my_rename_with_symlink");
|
||||
|
||||
if (!was_symlink)
|
||||
|
|
@ -134,6 +135,14 @@ int my_rename_with_symlink(const char *from, const char *to, myf MyFlags)
|
|||
/* Change filename that symlink pointed to */
|
||||
strmov(tmp_name, to);
|
||||
fn_same(tmp_name,link_name,1); /* Copy dir */
|
||||
name_is_different= strcmp(link_name, tmp_name);
|
||||
if (name_is_different && !access(tmp_name, F_OK))
|
||||
{
|
||||
my_errno= EEXIST;
|
||||
if (MyFlags & MY_WME)
|
||||
my_error(EE_CANTCREATEFILE, MYF(0), tmp_name, EEXIST);
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
|
||||
/* Create new symlink */
|
||||
if (my_symlink(tmp_name, to, MyFlags))
|
||||
|
|
@ -145,7 +154,7 @@ int my_rename_with_symlink(const char *from, const char *to, myf MyFlags)
|
|||
the same basename and different directories.
|
||||
*/
|
||||
|
||||
if (strcmp(link_name, tmp_name) && my_rename(link_name, tmp_name, MyFlags))
|
||||
if (name_is_different && my_rename(link_name, tmp_name, MyFlags))
|
||||
{
|
||||
int save_errno=my_errno;
|
||||
my_delete(to, MyFlags); /* Remove created symlink */
|
||||
|
|
|
|||
|
|
@ -41,8 +41,7 @@ public:
|
|||
STATIC_CONST( FRAGMENT_MEMORY= 0xFFF9 );
|
||||
|
||||
/** Initialize AttributeHeader at location aHeaderPtr */
|
||||
static AttributeHeader& init(void* aHeaderPtr, Uint32 anAttributeId,
|
||||
Uint32 aDataSize);
|
||||
static void init(Uint32* aHeaderPtr, Uint32 anAttributeId, Uint32 aDataSize);
|
||||
|
||||
/** Returns size of AttributeHeader (usually one or two words) */
|
||||
Uint32 getHeaderSize() const; // In 32-bit words
|
||||
|
|
@ -100,10 +99,11 @@ public:
|
|||
*/
|
||||
|
||||
inline
|
||||
AttributeHeader& AttributeHeader::init(void* aHeaderPtr, Uint32 anAttributeId,
|
||||
Uint32 aDataSize)
|
||||
void AttributeHeader::init(Uint32* aHeaderPtr, Uint32 anAttributeId,
|
||||
Uint32 aDataSize)
|
||||
{
|
||||
return * new (aHeaderPtr) AttributeHeader(anAttributeId, aDataSize);
|
||||
AttributeHeader ah(anAttributeId, aDataSize);
|
||||
*aHeaderPtr = ah.m_value;
|
||||
}
|
||||
|
||||
inline
|
||||
|
|
|
|||
|
|
@ -46,6 +46,7 @@ public:
|
|||
* Length of signal
|
||||
*/
|
||||
STATIC_CONST( StaticLength = 11 );
|
||||
STATIC_CONST( MaxTotalAttrInfo = 0xFFFF );
|
||||
|
||||
private:
|
||||
|
||||
|
|
|
|||
|
|
@ -1052,6 +1052,7 @@ class Ndb
|
|||
friend class NdbDictInterface;
|
||||
friend class NdbBlob;
|
||||
friend class NdbImpl;
|
||||
friend class NdbScanFilterImpl;
|
||||
#endif
|
||||
|
||||
public:
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@
|
|||
#define NDB_SCAN_FILTER_HPP
|
||||
|
||||
#include <ndb_types.h>
|
||||
#include <ndbapi_limits.h>
|
||||
|
||||
/**
|
||||
* @class NdbScanFilter
|
||||
|
|
@ -31,8 +32,13 @@ public:
|
|||
/**
|
||||
* Constructor
|
||||
* @param op The NdbOperation that the filter belongs to (is applied to).
|
||||
* @param abort_on_too_large abort transaction on filter too large
|
||||
* default: true
|
||||
* @param max_size Maximum size of generated filter in words
|
||||
*/
|
||||
NdbScanFilter(class NdbOperation * op);
|
||||
NdbScanFilter(class NdbOperation * op,
|
||||
bool abort_on_too_large = true,
|
||||
Uint32 max_size = NDB_MAX_SCANFILTER_SIZE_IN_WORDS);
|
||||
~NdbScanFilter();
|
||||
|
||||
/**
|
||||
|
|
@ -166,6 +172,25 @@ public:
|
|||
/** @} *********************************************************************/
|
||||
#endif
|
||||
|
||||
enum Error {
|
||||
FilterTooLarge = 4294
|
||||
};
|
||||
|
||||
/**
|
||||
* Get filter level error.
|
||||
*
|
||||
* Most errors are set only on operation level, and they abort the
|
||||
* transaction. The error FilterTooLarge is set on filter level and
|
||||
* by default it propagates to operation level and also aborts the
|
||||
* transaction.
|
||||
*
|
||||
* If option abort_on_too_large is set to false, then FilterTooLarge
|
||||
* does not propagate. One can then either ignore this error (in
|
||||
* which case no filtering is done) or try to define a new filter
|
||||
* immediately.
|
||||
*/
|
||||
const class NdbError & getNdbError() const;
|
||||
|
||||
private:
|
||||
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
|
||||
friend class NdbScanFilterImpl;
|
||||
|
|
|
|||
|
|
@ -26,4 +26,6 @@
|
|||
#define NDB_MAX_TUPLE_SIZE (NDB_MAX_TUPLE_SIZE_IN_WORDS*4)
|
||||
#define NDB_MAX_ACTIVE_EVENTS 100
|
||||
|
||||
#define NDB_MAX_SCANFILTER_SIZE_IN_WORDS 50000
|
||||
|
||||
#endif
|
||||
|
|
|
|||
33
ndb/include/util/ndb_rand.h
Normal file
33
ndb/include/util/ndb_rand.h
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
/* Copyright (C) 2003 MySQL AB
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; version 2 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
||||
|
||||
#ifndef NDB_RAND_H
|
||||
#define NDB_RAND_H
|
||||
|
||||
#define NDB_RAND_MAX 32767
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
int ndb_rand(void);
|
||||
|
||||
void ndb_srand(unsigned seed);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
@ -24,7 +24,8 @@ libgeneral_la_SOURCES = \
|
|||
uucode.c random.c version.c \
|
||||
strdup.c \
|
||||
ConfigValues.cpp ndb_init.c basestring_vsnprintf.c \
|
||||
Bitmask.cpp
|
||||
Bitmask.cpp \
|
||||
ndb_rand.c
|
||||
|
||||
EXTRA_PROGRAMS = testBitmask
|
||||
testBitmask_SOURCES = testBitmask.cpp
|
||||
|
|
|
|||
40
ndb/src/common/util/ndb_rand.c
Normal file
40
ndb/src/common/util/ndb_rand.c
Normal file
|
|
@ -0,0 +1,40 @@
|
|||
/* Copyright (C) 2003 MySQL AB
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; version 2 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
||||
|
||||
#include <ndb_rand.h>
|
||||
|
||||
static unsigned long next= 1;
|
||||
|
||||
/**
|
||||
* ndb_rand
|
||||
*
|
||||
* constant time, cheap, pseudo-random number generator.
|
||||
*
|
||||
* NDB_RAND_MAX assumed to be 32767
|
||||
*
|
||||
* This is the POSIX example for "generating the same sequence on
|
||||
* different machines". Although that is not one of our requirements.
|
||||
*/
|
||||
int ndb_rand(void)
|
||||
{
|
||||
next= next * 1103515245 + 12345;
|
||||
return((unsigned)(next/65536) % 32768);
|
||||
}
|
||||
|
||||
void ndb_srand(unsigned seed)
|
||||
{
|
||||
next= seed;
|
||||
}
|
||||
|
||||
|
|
@ -20,6 +20,7 @@
|
|||
#include <RefConvert.hpp>
|
||||
#include <ndb_limits.h>
|
||||
#include <my_sys.h>
|
||||
#include <ndb_rand.h>
|
||||
|
||||
#include <signaldata/EventReport.hpp>
|
||||
#include <signaldata/TcKeyReq.hpp>
|
||||
|
|
@ -6278,7 +6279,7 @@ void Dbtc::timeOutLoopStartLab(Signal* signal, Uint32 api_con_ptr)
|
|||
jam();
|
||||
if (api_timer != 0) {
|
||||
Uint32 error= ZTIME_OUT_ERROR;
|
||||
time_out_value= time_out_param + (api_con_ptr & mask_value);
|
||||
time_out_value= time_out_param + (ndb_rand() & mask_value);
|
||||
if (unlikely(old_mask_value)) // abort during single user mode
|
||||
{
|
||||
apiConnectptr.i = api_con_ptr;
|
||||
|
|
|
|||
|
|
@ -1138,7 +1138,8 @@ Dbtup::updateStartLab(Signal* signal,
|
|||
regOperPtr->attrinbufLen);
|
||||
} else {
|
||||
jam();
|
||||
if (interpreterStartLab(signal, pagePtr, regOperPtr->pageOffset) == -1)
|
||||
retValue = interpreterStartLab(signal, pagePtr, regOperPtr->pageOffset);
|
||||
if (retValue == -1)
|
||||
{
|
||||
jam();
|
||||
return -1;
|
||||
|
|
@ -1577,8 +1578,8 @@ int Dbtup::interpreterNextLab(Signal* signal,
|
|||
Uint32 TdataForUpdate[3];
|
||||
Uint32 Tlen;
|
||||
|
||||
AttributeHeader& ah = AttributeHeader::init(&TdataForUpdate[0],
|
||||
TattrId, TattrNoOfWords);
|
||||
AttributeHeader ah(TattrId, TattrNoOfWords);
|
||||
TdataForUpdate[0] = ah.m_value;
|
||||
TdataForUpdate[1] = TregMemBuffer[theRegister + 2];
|
||||
TdataForUpdate[2] = TregMemBuffer[theRegister + 3];
|
||||
Tlen = TattrNoOfWords + 1;
|
||||
|
|
@ -1594,6 +1595,7 @@ int Dbtup::interpreterNextLab(Signal* signal,
|
|||
// Write a NULL value into the attribute
|
||||
/* --------------------------------------------------------- */
|
||||
ah.setNULL();
|
||||
TdataForUpdate[0] = ah.m_value;
|
||||
Tlen = 1;
|
||||
}//if
|
||||
int TnoDataRW= updateAttributes(pagePtr,
|
||||
|
|
|
|||
|
|
@ -676,8 +676,6 @@ bool
|
|||
Dbtup::checkUpdateOfPrimaryKey(Uint32* updateBuffer, Tablerec* const regTabPtr)
|
||||
{
|
||||
Uint32 keyReadBuffer[MAX_KEY_SIZE_IN_WORDS];
|
||||
Uint32 attributeHeader;
|
||||
AttributeHeader* ahOut = (AttributeHeader*)&attributeHeader;
|
||||
AttributeHeader ahIn(*updateBuffer);
|
||||
Uint32 attributeId = ahIn.getAttributeId();
|
||||
Uint32 attrDescriptorIndex = regTabPtr->tabDescriptor + (attributeId << ZAD_LOG_SIZE);
|
||||
|
|
@ -700,16 +698,17 @@ Dbtup::checkUpdateOfPrimaryKey(Uint32* updateBuffer, Tablerec* const regTabPtr)
|
|||
|
||||
ReadFunction f = regTabPtr->readFunctionArray[attributeId];
|
||||
|
||||
AttributeHeader::init(&attributeHeader, attributeId, 0);
|
||||
AttributeHeader attributeHeader(attributeId, 0);
|
||||
tOutBufIndex = 0;
|
||||
tMaxRead = MAX_KEY_SIZE_IN_WORDS;
|
||||
|
||||
bool tmp = tXfrmFlag;
|
||||
tXfrmFlag = true;
|
||||
ndbrequire((this->*f)(&keyReadBuffer[0], ahOut, attrDescriptor, attributeOffset));
|
||||
ndbrequire((this->*f)(&keyReadBuffer[0], &attributeHeader, attrDescriptor,
|
||||
attributeOffset));
|
||||
tXfrmFlag = tmp;
|
||||
ndbrequire(tOutBufIndex == ahOut->getDataSize());
|
||||
if (ahIn.getDataSize() != ahOut->getDataSize()) {
|
||||
ndbrequire(tOutBufIndex == attributeHeader.getDataSize());
|
||||
if (ahIn.getDataSize() != attributeHeader.getDataSize()) {
|
||||
ljam();
|
||||
return true;
|
||||
}//if
|
||||
|
|
|
|||
|
|
@ -1168,9 +1168,7 @@ DbUtil::prepareOperation(Signal* signal, PreparePtr prepPtr)
|
|||
/**************************************************************
|
||||
* Attribute found - store in mapping (AttributeId, Position)
|
||||
**************************************************************/
|
||||
AttributeHeader & attrMap =
|
||||
AttributeHeader::init(attrMappingIt.data,
|
||||
attrDesc.AttributeId, // 1. Store AttrId
|
||||
AttributeHeader attrMap(attrDesc.AttributeId, // 1. Store AttrId
|
||||
0);
|
||||
|
||||
if (attrDesc.AttributeKeyFlag) {
|
||||
|
|
@ -1199,6 +1197,7 @@ DbUtil::prepareOperation(Signal* signal, PreparePtr prepPtr)
|
|||
return;
|
||||
}
|
||||
}
|
||||
*(attrMappingIt.data) = attrMap.m_value;
|
||||
#if 0
|
||||
ndbout << "BEFORE: attrLength: " << attrLength << endl;
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -230,7 +230,7 @@ MgmtSrvr::startEventLog()
|
|||
void
|
||||
MgmtSrvr::stopEventLog()
|
||||
{
|
||||
// Nothing yet
|
||||
g_eventLogger.close();
|
||||
}
|
||||
|
||||
class ErrorItem
|
||||
|
|
|
|||
|
|
@ -392,9 +392,8 @@ NdbOperation::getValue_impl(const NdbColumnImpl* tAttrInfo, char* aValue)
|
|||
return NULL;
|
||||
}//if
|
||||
}//if
|
||||
Uint32 ah;
|
||||
AttributeHeader::init(&ah, tAttrInfo->m_attrId, 0);
|
||||
if (insertATTRINFO(ah) != -1) {
|
||||
AttributeHeader ah(tAttrInfo->m_attrId, 0);
|
||||
if (insertATTRINFO(ah.m_value) != -1) {
|
||||
// Insert Attribute Id into ATTRINFO part.
|
||||
|
||||
/************************************************************************
|
||||
|
|
@ -525,12 +524,11 @@ NdbOperation::setValue( const NdbColumnImpl* tAttrInfo,
|
|||
|
||||
tAttrId = tAttrInfo->m_attrId;
|
||||
const char *aValue = aValuePassed;
|
||||
Uint32 ahValue;
|
||||
if (aValue == NULL) {
|
||||
if (tAttrInfo->m_nullable) {
|
||||
AttributeHeader& ah = AttributeHeader::init(&ahValue, tAttrId, 0);
|
||||
AttributeHeader ah(tAttrId, 0);
|
||||
ah.setNULL();
|
||||
insertATTRINFO(ahValue);
|
||||
insertATTRINFO(ah.m_value);
|
||||
// Insert Attribute Id with the value
|
||||
// NULL into ATTRINFO part.
|
||||
DBUG_RETURN(0);
|
||||
|
|
@ -563,8 +561,8 @@ NdbOperation::setValue( const NdbColumnImpl* tAttrInfo,
|
|||
}//if
|
||||
const Uint32 totalSizeInWords = (sizeInBytes + 3)/4; // Including bits in last word
|
||||
const Uint32 sizeInWords = sizeInBytes / 4; // Excluding bits in last word
|
||||
(void) AttributeHeader::init(&ahValue, tAttrId, totalSizeInWords);
|
||||
insertATTRINFO( ahValue );
|
||||
AttributeHeader ah(tAttrId, totalSizeInWords);
|
||||
insertATTRINFO( ah.m_value );
|
||||
|
||||
/***********************************************************************
|
||||
* Check if the pointer of the value passed is aligned on a 4 byte boundary.
|
||||
|
|
|
|||
|
|
@ -14,11 +14,15 @@
|
|||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
||||
|
||||
#include <NdbScanFilter.hpp>
|
||||
#include <Ndb.hpp>
|
||||
#include <NdbOperation.hpp>
|
||||
#include "NdbDictionaryImpl.hpp"
|
||||
#include <Vector.hpp>
|
||||
#include <NdbOut.hpp>
|
||||
#include <Interpreter.hpp>
|
||||
#include <signaldata/AttrInfo.hpp>
|
||||
#include "NdbApiSignal.hpp"
|
||||
#include "NdbUtil.hpp"
|
||||
|
||||
#ifdef VM_TRACE
|
||||
#include <NdbEnv.h>
|
||||
|
|
@ -52,14 +56,37 @@ public:
|
|||
|
||||
int cond_col_const(Interpreter::BinaryCondition, Uint32 attrId,
|
||||
const void * value, Uint32 len);
|
||||
|
||||
bool m_abort_on_too_large;
|
||||
|
||||
NdbOperation::OperationStatus m_initial_op_status;
|
||||
Uint32 m_initial_AI_size;
|
||||
Uint32 m_max_size;
|
||||
|
||||
Uint32 get_size() {
|
||||
assert(m_operation->theTotalCurrAI_Len >= m_initial_AI_size);
|
||||
return m_operation->theTotalCurrAI_Len - m_initial_AI_size;
|
||||
}
|
||||
bool check_size() {
|
||||
if (get_size() <= m_max_size)
|
||||
return true;
|
||||
handle_filter_too_large();
|
||||
return false;
|
||||
}
|
||||
void handle_filter_too_large();
|
||||
|
||||
NdbError m_error;
|
||||
};
|
||||
|
||||
const Uint32 LabelExit = ~0;
|
||||
|
||||
|
||||
NdbScanFilter::NdbScanFilter(class NdbOperation * op)
|
||||
NdbScanFilter::NdbScanFilter(class NdbOperation * op,
|
||||
bool abort_on_too_large,
|
||||
Uint32 max_size)
|
||||
: m_impl(* new NdbScanFilterImpl())
|
||||
{
|
||||
DBUG_ENTER("NdbScanFilter::NdbScanFilter");
|
||||
m_impl.m_current.m_group = (NdbScanFilter::Group)0;
|
||||
m_impl.m_current.m_popCount = 0;
|
||||
m_impl.m_current.m_ownLabel = 0;
|
||||
|
|
@ -69,6 +96,21 @@ NdbScanFilter::NdbScanFilter(class NdbOperation * op)
|
|||
m_impl.m_latestAttrib = ~0;
|
||||
m_impl.m_operation = op;
|
||||
m_impl.m_negative = 0;
|
||||
|
||||
DBUG_PRINT("info", ("op status: %d tot AI: %u in curr: %u",
|
||||
op->theStatus,
|
||||
op->theTotalCurrAI_Len, op->theAI_LenInCurrAI));
|
||||
|
||||
m_impl.m_abort_on_too_large = abort_on_too_large;
|
||||
|
||||
m_impl.m_initial_op_status = op->theStatus;
|
||||
m_impl.m_initial_AI_size = op->theTotalCurrAI_Len;
|
||||
if (max_size > NDB_MAX_SCANFILTER_SIZE_IN_WORDS)
|
||||
max_size = NDB_MAX_SCANFILTER_SIZE_IN_WORDS;
|
||||
m_impl.m_max_size = max_size;
|
||||
|
||||
m_impl.m_error.code = 0;
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
NdbScanFilter::~NdbScanFilter(){
|
||||
|
|
@ -200,30 +242,38 @@ NdbScanFilter::end(){
|
|||
switch(tmp.m_group){
|
||||
case NdbScanFilter::AND:
|
||||
if(tmp.m_trueLabel == (Uint32)~0){
|
||||
m_impl.m_operation->interpret_exit_ok();
|
||||
if (m_impl.m_operation->interpret_exit_ok() == -1)
|
||||
return -1;
|
||||
} else {
|
||||
m_impl.m_operation->branch_label(tmp.m_trueLabel);
|
||||
if (m_impl.m_operation->branch_label(tmp.m_trueLabel) == -1)
|
||||
return -1;
|
||||
}
|
||||
break;
|
||||
case NdbScanFilter::NAND:
|
||||
if(tmp.m_trueLabel == (Uint32)~0){
|
||||
m_impl.m_operation->interpret_exit_nok();
|
||||
if (m_impl.m_operation->interpret_exit_nok() == -1)
|
||||
return -1;
|
||||
} else {
|
||||
m_impl.m_operation->branch_label(tmp.m_falseLabel);
|
||||
if (m_impl.m_operation->branch_label(tmp.m_falseLabel) == -1)
|
||||
return -1;
|
||||
}
|
||||
break;
|
||||
case NdbScanFilter::OR:
|
||||
if(tmp.m_falseLabel == (Uint32)~0){
|
||||
m_impl.m_operation->interpret_exit_nok();
|
||||
if (m_impl.m_operation->interpret_exit_nok() == -1)
|
||||
return -1;
|
||||
} else {
|
||||
m_impl.m_operation->branch_label(tmp.m_falseLabel);
|
||||
if (m_impl.m_operation->branch_label(tmp.m_falseLabel) == -1)
|
||||
return -1;
|
||||
}
|
||||
break;
|
||||
case NdbScanFilter::NOR:
|
||||
if(tmp.m_falseLabel == (Uint32)~0){
|
||||
m_impl.m_operation->interpret_exit_ok();
|
||||
if (m_impl.m_operation->interpret_exit_ok() == -1)
|
||||
return -1;
|
||||
} else {
|
||||
m_impl.m_operation->branch_label(tmp.m_trueLabel);
|
||||
if (m_impl.m_operation->branch_label(tmp.m_trueLabel) == -1)
|
||||
return -1;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
|
|
@ -231,17 +281,20 @@ NdbScanFilter::end(){
|
|||
return -1;
|
||||
}
|
||||
|
||||
m_impl.m_operation->def_label(tmp.m_ownLabel);
|
||||
if (m_impl.m_operation->def_label(tmp.m_ownLabel) == -1)
|
||||
return -1;
|
||||
|
||||
if(m_impl.m_stack.size() == 0){
|
||||
switch(tmp.m_group){
|
||||
case NdbScanFilter::AND:
|
||||
case NdbScanFilter::NOR:
|
||||
m_impl.m_operation->interpret_exit_nok();
|
||||
if (m_impl.m_operation->interpret_exit_nok() == -1)
|
||||
return -1;
|
||||
break;
|
||||
case NdbScanFilter::OR:
|
||||
case NdbScanFilter::NAND:
|
||||
m_impl.m_operation->interpret_exit_ok();
|
||||
if (m_impl.m_operation->interpret_exit_ok() == -1)
|
||||
return -1;
|
||||
break;
|
||||
default:
|
||||
m_impl.m_operation->setErrorCodeAbort(4260);
|
||||
|
|
@ -249,6 +302,8 @@ NdbScanFilter::end(){
|
|||
}
|
||||
}
|
||||
|
||||
if (!m_impl.check_size())
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
@ -261,10 +316,16 @@ NdbScanFilter::istrue(){
|
|||
}
|
||||
|
||||
if(m_impl.m_current.m_trueLabel == (Uint32)~0){
|
||||
return m_impl.m_operation->interpret_exit_ok();
|
||||
if (m_impl.m_operation->interpret_exit_ok() == -1)
|
||||
return -1;
|
||||
} else {
|
||||
return m_impl.m_operation->branch_label(m_impl.m_current.m_trueLabel);
|
||||
if (m_impl.m_operation->branch_label(m_impl.m_current.m_trueLabel) == -1)
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!m_impl.check_size())
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
|
|
@ -276,10 +337,16 @@ NdbScanFilter::isfalse(){
|
|||
}
|
||||
|
||||
if(m_impl.m_current.m_falseLabel == (Uint32)~0){
|
||||
return m_impl.m_operation->interpret_exit_nok();
|
||||
if (m_impl.m_operation->interpret_exit_nok() == -1)
|
||||
return -1;
|
||||
} else {
|
||||
return m_impl.m_operation->branch_label(m_impl.m_current.m_falseLabel);
|
||||
if (m_impl.m_operation->branch_label(m_impl.m_current.m_falseLabel) == -1)
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!m_impl.check_size())
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -330,7 +397,11 @@ NdbScanFilterImpl::cond_col(Interpreter::UnaryCondition op, Uint32 AttrId){
|
|||
}
|
||||
|
||||
Branch1 branch = table2[op].m_branches[m_current.m_group];
|
||||
(m_operation->* branch)(AttrId, m_current.m_ownLabel);
|
||||
if ((m_operation->* branch)(AttrId, m_current.m_ownLabel) == -1)
|
||||
return -1;
|
||||
|
||||
if (!check_size())
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
@ -463,8 +534,12 @@ NdbScanFilterImpl::cond_col_const(Interpreter::BinaryCondition op,
|
|||
return -1;
|
||||
}
|
||||
|
||||
int ret = (m_operation->* branch)(AttrId, value, len, false, m_current.m_ownLabel);
|
||||
return ret;
|
||||
if ((m_operation->* branch)(AttrId, value, len, false, m_current.m_ownLabel) == -1)
|
||||
return -1;
|
||||
|
||||
if (!check_size())
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
|
|
@ -492,6 +567,129 @@ NdbScanFilter::cmp(BinaryCondition cond, int ColId,
|
|||
return -1;
|
||||
}
|
||||
|
||||
void
|
||||
NdbScanFilterImpl::handle_filter_too_large()
|
||||
{
|
||||
DBUG_ENTER("NdbScanFilterImpl::handle_filter_too_large");
|
||||
|
||||
NdbOperation* const op = m_operation;
|
||||
m_error.code = NdbScanFilter::FilterTooLarge;
|
||||
if (m_abort_on_too_large)
|
||||
op->setErrorCodeAbort(m_error.code);
|
||||
|
||||
/*
|
||||
* Possible interpreted parts at this point are:
|
||||
*
|
||||
* 1. initial read
|
||||
* 2. interpreted program
|
||||
*
|
||||
* It is assumed that NdbScanFilter has created all of 2
|
||||
* so that we don't have to save interpreter state.
|
||||
*/
|
||||
|
||||
const Uint32 size = get_size();
|
||||
assert(size != 0);
|
||||
|
||||
// new ATTRINFO size
|
||||
const Uint32 new_size = m_initial_AI_size;
|
||||
|
||||
// find last signal for new size
|
||||
assert(op->theFirstATTRINFO != NULL);
|
||||
NdbApiSignal* lastSignal = op->theFirstATTRINFO;
|
||||
Uint32 n = 0;
|
||||
while (n + AttrInfo::DataLength < new_size) {
|
||||
lastSignal = lastSignal->next();
|
||||
assert(lastSignal != NULL);
|
||||
n += AttrInfo::DataLength;
|
||||
}
|
||||
assert(n < size);
|
||||
|
||||
// release remaining signals
|
||||
NdbApiSignal* tSignal = lastSignal->next();
|
||||
op->theNdb->releaseSignalsInList(&tSignal);
|
||||
lastSignal->next(NULL);
|
||||
|
||||
// length of lastSignal
|
||||
const Uint32 new_curr = AttrInfo::HeaderLength + new_size - n;
|
||||
assert(new_curr <= 25);
|
||||
|
||||
DBUG_PRINT("info", ("op status: %d->%d tot AI: %u->%u in curr: %u->%u",
|
||||
op->theStatus, m_initial_op_status,
|
||||
op->theTotalCurrAI_Len, new_size,
|
||||
op->theAI_LenInCurrAI, new_curr));
|
||||
|
||||
// reset op state
|
||||
op->theStatus = m_initial_op_status;
|
||||
|
||||
// reset interpreter state to initial
|
||||
|
||||
NdbBranch* tBranch = op->theFirstBranch;
|
||||
while (tBranch != NULL) {
|
||||
NdbBranch* tmp = tBranch;
|
||||
tBranch = tBranch->theNext;
|
||||
op->theNdb->releaseNdbBranch(tmp);
|
||||
}
|
||||
op->theFirstBranch = NULL;
|
||||
op->theLastBranch = NULL;
|
||||
|
||||
NdbLabel* tLabel = op->theFirstLabel;
|
||||
while (tLabel != NULL) {
|
||||
NdbLabel* tmp = tLabel;
|
||||
tLabel = tLabel->theNext;
|
||||
op->theNdb->releaseNdbLabel(tmp);
|
||||
}
|
||||
op->theFirstLabel = NULL;
|
||||
op->theLastLabel = NULL;
|
||||
|
||||
NdbCall* tCall = op->theFirstCall;
|
||||
while (tCall != NULL) {
|
||||
NdbCall* tmp = tCall;
|
||||
tCall = tCall->theNext;
|
||||
op->theNdb->releaseNdbCall(tmp);
|
||||
}
|
||||
op->theFirstCall = NULL;
|
||||
op->theLastCall = NULL;
|
||||
|
||||
NdbSubroutine* tSubroutine = op->theFirstSubroutine;
|
||||
while (tSubroutine != NULL) {
|
||||
NdbSubroutine* tmp = tSubroutine;
|
||||
tSubroutine = tSubroutine->theNext;
|
||||
op->theNdb->releaseNdbSubroutine(tmp);
|
||||
}
|
||||
op->theFirstSubroutine = NULL;
|
||||
op->theLastSubroutine = NULL;
|
||||
|
||||
op->theNoOfLabels = 0;
|
||||
op->theNoOfSubroutines = 0;
|
||||
|
||||
// reset AI size
|
||||
op->theTotalCurrAI_Len = new_size;
|
||||
op->theAI_LenInCurrAI = new_curr;
|
||||
|
||||
// reset signal pointers
|
||||
op->theCurrentATTRINFO = lastSignal;
|
||||
op->theATTRINFOptr = &lastSignal->getDataPtrSend()[new_curr];
|
||||
|
||||
// interpreter sizes are set later somewhere
|
||||
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
static void
|
||||
update(const NdbError & _err){
|
||||
NdbError & error = (NdbError &) _err;
|
||||
ndberror_struct ndberror = (ndberror_struct)error;
|
||||
ndberror_update(&ndberror);
|
||||
error = NdbError(ndberror);
|
||||
}
|
||||
|
||||
const NdbError &
|
||||
NdbScanFilter::getNdbError() const
|
||||
{
|
||||
update(m_impl.m_error);
|
||||
return m_impl.m_error;
|
||||
}
|
||||
|
||||
|
||||
#if 0
|
||||
int
|
||||
|
|
|
|||
|
|
@ -849,6 +849,10 @@ NdbScanOperation::doSendScan(int aProcessorId)
|
|||
// sending it. This could not be done in openScan because
|
||||
// we created the ATTRINFO signals after the SCAN_TABREQ signal.
|
||||
ScanTabReq * const req = CAST_PTR(ScanTabReq, tSignal->getDataPtrSend());
|
||||
if (unlikely(theTotalCurrAI_Len > ScanTabReq::MaxTotalAttrInfo)) {
|
||||
setErrorCode(4257);
|
||||
return -1;
|
||||
}
|
||||
req->attrLenKeyLen = (tupKeyLen << 16) | theTotalCurrAI_Len;
|
||||
Uint32 tmp = req->requestInfo;
|
||||
ScanTabReq::setDistributionKeyFlag(tmp, theDistrKeyIndicator_);
|
||||
|
|
|
|||
|
|
@ -527,7 +527,8 @@ ErrorBundle ErrorCodes[] = {
|
|||
{ 4270, IE, "Unknown blob error" },
|
||||
{ 4335, AE, "Only one autoincrement column allowed per table. Having a table without primary key uses an autoincremented hidden key, i.e. a table without a primary key can not have an autoincremented column" },
|
||||
{ 4271, AE, "Invalid index object, not retrieved via getIndex()" },
|
||||
{ 4275, AE, "The blob method is incompatible with operation type or lock mode" }
|
||||
{ 4275, AE, "The blob method is incompatible with operation type or lock mode" },
|
||||
{ 4294, AE, "Scan filter is too large, discarded" }
|
||||
};
|
||||
|
||||
static
|
||||
|
|
|
|||
|
|
@ -149,13 +149,19 @@ SUFFIXES = .sh
|
|||
-e 's!@''CC''@!@CC@!'\
|
||||
-e 's!@''CXX''@!@CXX@!'\
|
||||
-e 's!@''GXX''@!@GXX@!'\
|
||||
-e 's!@''SAVE_CC''@!@SAVE_CC@!'\
|
||||
-e 's!@''SAVE_CXX''@!@SAVE_CXX@!'\
|
||||
-e 's!@''CC_VERSION''@!@CC_VERSION@!'\
|
||||
-e 's!@''CXX_VERSION''@!@CXX_VERSION@!'\
|
||||
-e 's!@''PERL''@!@PERL@!' \
|
||||
-e 's!@''ASFLAGS''@!@SAVE_ASFLAGS@!'\
|
||||
-e 's!@''CFLAGS''@!@SAVE_CFLAGS@!'\
|
||||
-e 's!@''CXXFLAGS''@!@SAVE_CXXFLAGS@!'\
|
||||
-e 's!@''LDFLAGS''@!@SAVE_LDFLAGS@!'\
|
||||
-e 's!@''SAVE_ASFLAGS''@!@SAVE_ASFLAGS@!'\
|
||||
-e 's!@''SAVE_CFLAGS''@!@SAVE_CFLAGS@!'\
|
||||
-e 's!@''SAVE_CXXFLAGS''@!@SAVE_CXXFLAGS@!'\
|
||||
-e 's!@''SAVE_LDFLAGS''@!@SAVE_LDFLAGS@!'\
|
||||
-e 's!@''ASFLAGS''@!@ASFLAGS@!'\
|
||||
-e 's!@''CFLAGS''@!@CFLAGS@!'\
|
||||
-e 's!@''CXXFLAGS''@!@CXXFLAGS@!'\
|
||||
-e 's!@''LDFLAGS''@!@LDFLAGS@!'\
|
||||
-e 's!@''CLIENT_LIBS''@!@CLIENT_LIBS@!' \
|
||||
-e 's!@''ZLIB_LIBS''@!@ZLIB_LIBS@!' \
|
||||
-e 's!@''LIBS''@!@LIBS@!' \
|
||||
|
|
|
|||
|
|
@ -23,7 +23,8 @@ VERSION="@VERSION@@MYSQL_SERVER_SUFFIX@"
|
|||
COMPILATION_COMMENT="@COMPILATION_COMMENT@"
|
||||
BUGmysql="mysql@lists.mysql.com"
|
||||
# This is set by configure
|
||||
COMP_ENV_INFO="CC='@CC@' CFLAGS='@CFLAGS@' CXX='@CXX@' CXXFLAGS='@CXXFLAGS@' LDFLAGS='@LDFLAGS@' ASFLAGS='@ASFLAGS@'"
|
||||
COMP_CALL_INFO="CC='@SAVE_CC@' CFLAGS='@SAVE_CFLAGS@' CXX='@SAVE_CXX@' CXXFLAGS='@SAVE_CXXFLAGS@' LDFLAGS='@SAVE_LDFLAGS@' ASFLAGS='@SAVE_ASFLAGS@'"
|
||||
COMP_RUN_INFO="CC='@CC@' CFLAGS='@CFLAGS@' CXX='@CXX@' CXXFLAGS='@CXXFLAGS@' LDFLAGS='@LDFLAGS@' ASFLAGS='@ASFLAGS@'"
|
||||
CONFIGURE_LINE="@CONF_COMMAND@"
|
||||
|
||||
LIBC_INFO=""
|
||||
|
|
@ -261,7 +262,8 @@ ${ORGANIZATION- $ORGANIZATION_C}
|
|||
`test -n "$MACHINE" && echo "Machine: $MACHINE"`
|
||||
`test -n "$FILE_PATHS" && echo "Some paths: $FILE_PATHS"`
|
||||
`test -n "$GCC_INFO" && echo "GCC: $GCC_INFO"`
|
||||
`test -n "$COMP_ENV_INFO" && echo "Compilation info: $COMP_ENV_INFO"`
|
||||
`test -n "$COMP_CALL_INFO" && echo "Compilation info (call): $COMP_CALL_INFO"`
|
||||
`test -n "$COMP_RUN_INFO" && echo "Compilation info (used): $COMP_RUN_INFO"`
|
||||
`test -n "$LIBC_INFO" && echo "LIBC: $LIBC_INFO"`
|
||||
`test -n "$CONFIGURE_LINE" && echo "Configure command: $CONFIGURE_LINE"`
|
||||
`test -n "$PERL_INFO" && echo "Perl: $PERL_INFO"`
|
||||
|
|
|
|||
|
|
@ -1356,6 +1356,30 @@ int ha_ndbcluster::set_primary_key_from_record(NdbOperation *op, const byte *rec
|
|||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
bool ha_ndbcluster::check_index_fields_in_write_set(uint keyno)
|
||||
{
|
||||
KEY* key_info= table->key_info + keyno;
|
||||
KEY_PART_INFO* key_part= key_info->key_part;
|
||||
KEY_PART_INFO* end= key_part+key_info->key_parts;
|
||||
uint i;
|
||||
DBUG_ENTER("check_index_fields_in_write_set");
|
||||
|
||||
if (m_retrieve_all_fields)
|
||||
{
|
||||
DBUG_RETURN(true);
|
||||
}
|
||||
for (i= 0; key_part != end; key_part++, i++)
|
||||
{
|
||||
Field* field= key_part->field;
|
||||
if (field->query_id != current_thd->query_id)
|
||||
{
|
||||
DBUG_RETURN(false);
|
||||
}
|
||||
}
|
||||
|
||||
DBUG_RETURN(true);
|
||||
}
|
||||
|
||||
int ha_ndbcluster::set_index_key_from_record(NdbOperation *op, const byte *record, uint keyno)
|
||||
{
|
||||
KEY* key_info= table->key_info + keyno;
|
||||
|
|
@ -1643,7 +1667,8 @@ check_null_in_record(const KEY* key_info, const byte *record)
|
|||
* primary key or unique index values
|
||||
*/
|
||||
|
||||
int ha_ndbcluster::peek_indexed_rows(const byte *record, bool check_pk)
|
||||
int ha_ndbcluster::peek_indexed_rows(const byte *record,
|
||||
NDB_WRITE_OP write_op)
|
||||
{
|
||||
NdbTransaction *trans= m_active_trans;
|
||||
NdbOperation *op;
|
||||
|
|
@ -1656,7 +1681,7 @@ int ha_ndbcluster::peek_indexed_rows(const byte *record, bool check_pk)
|
|||
(NdbOperation::LockMode)get_ndb_lock_type(m_lock.type);
|
||||
|
||||
first= NULL;
|
||||
if (check_pk && table->s->primary_key != MAX_KEY)
|
||||
if (write_op != NDB_UPDATE && table->s->primary_key != MAX_KEY)
|
||||
{
|
||||
/*
|
||||
* Fetch any row with colliding primary key
|
||||
|
|
@ -1687,9 +1712,15 @@ int ha_ndbcluster::peek_indexed_rows(const byte *record, bool check_pk)
|
|||
*/
|
||||
if (check_null_in_record(key_info, record))
|
||||
{
|
||||
DBUG_PRINT("info", ("skipping check for key with NULL"));
|
||||
DBUG_PRINT("info", ("skipping check for key with NULL"));
|
||||
continue;
|
||||
}
|
||||
if (write_op != NDB_INSERT && !check_index_fields_in_write_set(i))
|
||||
{
|
||||
DBUG_PRINT("info", ("skipping check for key %u not in write_set", i));
|
||||
continue;
|
||||
}
|
||||
|
||||
NdbIndexOperation *iop;
|
||||
NDBINDEX *unique_index = (NDBINDEX *) m_index[i].unique_index;
|
||||
key_part= key_info->key_part;
|
||||
|
|
@ -2268,7 +2299,7 @@ int ha_ndbcluster::write_row(byte *record)
|
|||
start_bulk_insert will set parameters to ensure that each
|
||||
write_row is committed individually
|
||||
*/
|
||||
int peek_res= peek_indexed_rows(record, true);
|
||||
int peek_res= peek_indexed_rows(record, NDB_INSERT);
|
||||
|
||||
if (!peek_res)
|
||||
{
|
||||
|
|
@ -2302,7 +2333,7 @@ int ha_ndbcluster::write_row(byte *record)
|
|||
auto_value, 1) == -1)
|
||||
{
|
||||
if (--retries &&
|
||||
ndb->getNdbError().status == NdbError::TemporaryError);
|
||||
ndb->getNdbError().status == NdbError::TemporaryError)
|
||||
{
|
||||
my_sleep(retry_sleep);
|
||||
continue;
|
||||
|
|
@ -2456,7 +2487,8 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
|
|||
if (m_ignore_dup_key && (thd->lex->sql_command == SQLCOM_UPDATE ||
|
||||
thd->lex->sql_command == SQLCOM_UPDATE_MULTI))
|
||||
{
|
||||
int peek_res= peek_indexed_rows(new_data, pk_update);
|
||||
NDB_WRITE_OP write_op= (pk_update) ? NDB_PK_UPDATE : NDB_UPDATE;
|
||||
int peek_res= peek_indexed_rows(new_data, write_op);
|
||||
|
||||
if (!peek_res)
|
||||
{
|
||||
|
|
@ -4862,7 +4894,7 @@ ulonglong ha_ndbcluster::get_auto_increment()
|
|||
auto_value, cache_size, step, start))
|
||||
{
|
||||
if (--retries &&
|
||||
ndb->getNdbError().status == NdbError::TemporaryError);
|
||||
ndb->getNdbError().status == NdbError::TemporaryError)
|
||||
{
|
||||
my_sleep(retry_sleep);
|
||||
continue;
|
||||
|
|
|
|||
|
|
@ -59,6 +59,12 @@ typedef struct ndb_index_data {
|
|||
bool null_in_unique_index;
|
||||
} NDB_INDEX_DATA;
|
||||
|
||||
typedef enum ndb_write_op {
|
||||
NDB_INSERT = 0,
|
||||
NDB_UPDATE = 1,
|
||||
NDB_PK_UPDATE = 2
|
||||
} NDB_WRITE_OP;
|
||||
|
||||
typedef struct st_ndbcluster_share {
|
||||
THR_LOCK lock;
|
||||
pthread_mutex_t mutex;
|
||||
|
|
@ -251,7 +257,7 @@ private:
|
|||
const NdbOperation *first,
|
||||
const NdbOperation *last,
|
||||
uint errcode);
|
||||
int peek_indexed_rows(const byte *record, bool check_pk);
|
||||
int peek_indexed_rows(const byte *record, NDB_WRITE_OP write_op);
|
||||
int unique_index_read(const byte *key, uint key_len,
|
||||
byte *buf);
|
||||
int ordered_index_scan(const key_range *start_key,
|
||||
|
|
@ -286,6 +292,7 @@ private:
|
|||
int get_ndb_blobs_value(NdbBlob *last_ndb_blob, my_ptrdiff_t ptrdiff);
|
||||
int set_primary_key(NdbOperation *op, const byte *key);
|
||||
int set_primary_key_from_record(NdbOperation *op, const byte *record);
|
||||
bool check_index_fields_in_write_set(uint keyno);
|
||||
int set_index_key_from_record(NdbOperation *op, const byte *record,
|
||||
uint keyno);
|
||||
int set_bounds(NdbIndexScanOperation*, const key_range *keys[2], uint= 0);
|
||||
|
|
|
|||
|
|
@ -1338,9 +1338,23 @@ ha_ndbcluster_cond::generate_scan_filter(NdbScanOperation *op)
|
|||
|
||||
if (m_cond_stack)
|
||||
{
|
||||
NdbScanFilter filter(op);
|
||||
NdbScanFilter filter(op, false); // don't abort on too large
|
||||
|
||||
DBUG_RETURN(generate_scan_filter_from_cond(filter));
|
||||
int ret=generate_scan_filter_from_cond(filter);
|
||||
if (ret != 0)
|
||||
{
|
||||
const NdbError& err=filter.getNdbError();
|
||||
if (err.code == NdbScanFilter::FilterTooLarge)
|
||||
{
|
||||
// err.message has static storage
|
||||
DBUG_PRINT("info", ("%s", err.message));
|
||||
push_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
|
||||
err.code, err.message);
|
||||
ret=0;
|
||||
}
|
||||
}
|
||||
if (ret != 0)
|
||||
DBUG_RETURN(ret);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
|
@ -1391,7 +1405,7 @@ int ha_ndbcluster_cond::generate_scan_filter_from_key(NdbScanOperation *op,
|
|||
{
|
||||
KEY_PART_INFO* key_part= key_info->key_part;
|
||||
KEY_PART_INFO* end= key_part+key_info->key_parts;
|
||||
NdbScanFilter filter(op);
|
||||
NdbScanFilter filter(op, true); // abort on too large
|
||||
int res;
|
||||
DBUG_ENTER("generate_scan_filter_from_key");
|
||||
|
||||
|
|
|
|||
|
|
@ -4945,13 +4945,44 @@ bool Item_func_match::fix_fields(THD *thd, Item **ref)
|
|||
my_error(ER_WRONG_ARGUMENTS,MYF(0),"MATCH");
|
||||
return TRUE;
|
||||
}
|
||||
table=((Item_field *)item)->field->table;
|
||||
/*
|
||||
With prepared statements Item_func_match::fix_fields is called twice.
|
||||
When it is called first time we have original item tree here and add
|
||||
conversion layer for character sets that do not have ctype array a few
|
||||
lines below. When it is called second time, we already have conversion
|
||||
layer in item tree.
|
||||
*/
|
||||
table= (item->type() == Item::FIELD_ITEM) ?
|
||||
((Item_field *)item)->field->table :
|
||||
((Item_field *)((Item_func_conv *)item)->key_item())->field->table;
|
||||
if (!(table->file->table_flags() & HA_CAN_FULLTEXT))
|
||||
{
|
||||
my_error(ER_TABLE_CANT_HANDLE_FT, MYF(0));
|
||||
return 1;
|
||||
}
|
||||
table->fulltext_searched=1;
|
||||
/* A workaround for ucs2 character set */
|
||||
if (!args[1]->collation.collation->ctype)
|
||||
{
|
||||
CHARSET_INFO *compatible_cs=
|
||||
get_compatible_charset_with_ctype(args[1]->collation.collation);
|
||||
bool rc= 1;
|
||||
if (compatible_cs)
|
||||
{
|
||||
Item_string *conv_item= new Item_string("", 0, compatible_cs,
|
||||
DERIVATION_EXPLICIT);
|
||||
item= args[0];
|
||||
args[0]= conv_item;
|
||||
rc= agg_item_charsets(cmp_collation, func_name(), args, arg_count,
|
||||
MY_COLL_ALLOW_SUPERSET_CONV |
|
||||
MY_COLL_ALLOW_COERCIBLE_CONV |
|
||||
MY_COLL_DISALLOW_NONE, 1);
|
||||
args[0]= item;
|
||||
}
|
||||
else
|
||||
my_error(ER_WRONG_ARGUMENTS, MYF(0), "MATCH");
|
||||
return rc;
|
||||
}
|
||||
return agg_arg_collations_for_comparison(cmp_collation,
|
||||
args+1, arg_count-1, 0);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -311,7 +311,7 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables)
|
|||
continue;
|
||||
}
|
||||
|
||||
const char *password= get_field(&mem, table->field[2]);
|
||||
const char *password= get_field(thd->mem_root, table->field[2]);
|
||||
uint password_len= password ? strlen(password) : 0;
|
||||
set_user_salt(&user, password, password_len);
|
||||
if (user.salt_len == 0 && password_len != 0)
|
||||
|
|
@ -364,7 +364,7 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables)
|
|||
/* Starting from 4.0.2 we have more fields */
|
||||
if (table->s->fields >= 31)
|
||||
{
|
||||
char *ssl_type=get_field(&mem, table->field[next_field++]);
|
||||
char *ssl_type=get_field(thd->mem_root, table->field[next_field++]);
|
||||
if (!ssl_type)
|
||||
user.ssl_type=SSL_TYPE_NONE;
|
||||
else if (!strcmp(ssl_type, "ANY"))
|
||||
|
|
@ -378,11 +378,11 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables)
|
|||
user.x509_issuer= get_field(&mem, table->field[next_field++]);
|
||||
user.x509_subject= get_field(&mem, table->field[next_field++]);
|
||||
|
||||
char *ptr = get_field(&mem, table->field[next_field++]);
|
||||
char *ptr = get_field(thd->mem_root, table->field[next_field++]);
|
||||
user.user_resource.questions=ptr ? atoi(ptr) : 0;
|
||||
ptr = get_field(&mem, table->field[next_field++]);
|
||||
ptr = get_field(thd->mem_root, table->field[next_field++]);
|
||||
user.user_resource.updates=ptr ? atoi(ptr) : 0;
|
||||
ptr = get_field(&mem, table->field[next_field++]);
|
||||
ptr = get_field(thd->mem_root, table->field[next_field++]);
|
||||
user.user_resource.conn_per_hour= ptr ? atoi(ptr) : 0;
|
||||
if (user.user_resource.questions || user.user_resource.updates ||
|
||||
user.user_resource.conn_per_hour)
|
||||
|
|
@ -391,7 +391,7 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables)
|
|||
if (table->s->fields >= 36)
|
||||
{
|
||||
/* Starting from 5.0.3 we have max_user_connections field */
|
||||
ptr= get_field(&mem, table->field[next_field++]);
|
||||
ptr= get_field(thd->mem_root, table->field[next_field++]);
|
||||
user.user_resource.user_conn= ptr ? atoi(ptr) : 0;
|
||||
}
|
||||
else
|
||||
|
|
@ -4898,6 +4898,7 @@ static int handle_grant_table(TABLE_LIST *tables, uint table_no, bool drop,
|
|||
byte user_key[MAX_KEY_LENGTH];
|
||||
uint key_prefix_length;
|
||||
DBUG_ENTER("handle_grant_table");
|
||||
THD *thd= current_thd;
|
||||
|
||||
if (! table_no) // mysql.user table
|
||||
{
|
||||
|
|
@ -4965,17 +4966,18 @@ static int handle_grant_table(TABLE_LIST *tables, uint table_no, bool drop,
|
|||
DBUG_PRINT("info",("scan error: %d", error));
|
||||
continue;
|
||||
}
|
||||
if (! (host= get_field(&mem, host_field)))
|
||||
if (! (host= get_field(thd->mem_root, host_field)))
|
||||
host= "";
|
||||
if (! (user= get_field(&mem, user_field)))
|
||||
if (! (user= get_field(thd->mem_root, user_field)))
|
||||
user= "";
|
||||
|
||||
#ifdef EXTRA_DEBUG
|
||||
DBUG_PRINT("loop",("scan fields: '%s'@'%s' '%s' '%s' '%s'",
|
||||
user, host,
|
||||
get_field(&mem, table->field[1]) /*db*/,
|
||||
get_field(&mem, table->field[3]) /*table*/,
|
||||
get_field(&mem, table->field[4]) /*column*/));
|
||||
get_field(thd->mem_root, table->field[1]) /*db*/,
|
||||
get_field(thd->mem_root, table->field[3]) /*table*/,
|
||||
get_field(thd->mem_root,
|
||||
table->field[4]) /*column*/));
|
||||
#endif
|
||||
if (strcmp(user_str, user) ||
|
||||
my_strcasecmp(system_charset_info, host_str, host))
|
||||
|
|
|
|||
|
|
@ -1032,12 +1032,14 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length)
|
|||
Query_cache_block_table *block_table, *block_table_end;
|
||||
ulong tot_length;
|
||||
Query_cache_query_flags flags;
|
||||
#ifndef __WIN__
|
||||
const uint spin_treshold= 50000;
|
||||
const double lock_time_treshold= 0.1; /* Time in seconds */
|
||||
uint spin_count= 0;
|
||||
int lock_status= 0;
|
||||
ulong new_time= 0;
|
||||
ulong stop_time= 0;
|
||||
#endif
|
||||
|
||||
DBUG_ENTER("Query_cache::send_result_to_client");
|
||||
|
||||
|
|
@ -1085,6 +1087,9 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length)
|
|||
}
|
||||
}
|
||||
|
||||
#ifdef __WIN__
|
||||
STRUCT_LOCK(&structure_guard_mutex);
|
||||
#else
|
||||
stop_time= my_clock()+(ulong)lock_time_treshold*CLOCKS_PER_SEC;
|
||||
while ((lock_status= pthread_mutex_trylock(&structure_guard_mutex)) == EBUSY
|
||||
&& spin_count < spin_treshold
|
||||
|
|
@ -1107,6 +1112,7 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length)
|
|||
thd->lex->safe_to_cache_query= FALSE;
|
||||
goto err;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (query_cache_size == 0 || flush_in_progress)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -955,8 +955,8 @@ static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
|
|||
sql_field->length= dup_field->char_length;
|
||||
sql_field->pack_length= dup_field->pack_length;
|
||||
sql_field->key_length= dup_field->key_length;
|
||||
sql_field->create_length_to_internal_length();
|
||||
sql_field->decimals= dup_field->decimals;
|
||||
sql_field->create_length_to_internal_length();
|
||||
sql_field->unireg_check= dup_field->unireg_check;
|
||||
/*
|
||||
We're making one field from two, the result field will have
|
||||
|
|
|
|||
|
|
@ -15864,6 +15864,99 @@ static void test_bug29306()
|
|||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
Bug#31669 Buffer overflow in mysql_change_user()
|
||||
*/
|
||||
|
||||
#define LARGE_BUFFER_SIZE 2048
|
||||
|
||||
static void test_bug31669()
|
||||
{
|
||||
int rc;
|
||||
static char buff[LARGE_BUFFER_SIZE+1];
|
||||
#ifndef EMBEDDED_LIBRARY
|
||||
static char user[USERNAME_LENGTH+1];
|
||||
static char db[NAME_LEN+1];
|
||||
static char query[LARGE_BUFFER_SIZE*2];
|
||||
#endif
|
||||
|
||||
DBUG_ENTER("test_bug31669");
|
||||
myheader("test_bug31669");
|
||||
|
||||
rc= mysql_change_user(mysql, NULL, NULL, NULL);
|
||||
DIE_UNLESS(rc);
|
||||
|
||||
rc= mysql_change_user(mysql, "", "", "");
|
||||
DIE_UNLESS(rc);
|
||||
|
||||
memset(buff, 'a', sizeof(buff));
|
||||
|
||||
rc= mysql_change_user(mysql, buff, buff, buff);
|
||||
DIE_UNLESS(rc);
|
||||
|
||||
rc = mysql_change_user(mysql, opt_user, opt_password, current_db);
|
||||
DIE_UNLESS(!rc);
|
||||
|
||||
#ifndef EMBEDDED_LIBRARY
|
||||
memset(db, 'a', sizeof(db));
|
||||
db[NAME_LEN]= 0;
|
||||
strxmov(query, "CREATE DATABASE IF NOT EXISTS ", db, NullS);
|
||||
rc= mysql_query(mysql, query);
|
||||
myquery(rc);
|
||||
|
||||
memset(user, 'b', sizeof(user));
|
||||
user[USERNAME_LENGTH]= 0;
|
||||
memset(buff, 'c', sizeof(buff));
|
||||
buff[LARGE_BUFFER_SIZE]= 0;
|
||||
strxmov(query, "GRANT ALL PRIVILEGES ON *.* TO '", user, "'@'%' IDENTIFIED BY "
|
||||
"'", buff, "' WITH GRANT OPTION", NullS);
|
||||
rc= mysql_query(mysql, query);
|
||||
myquery(rc);
|
||||
|
||||
rc= mysql_query(mysql, "FLUSH PRIVILEGES");
|
||||
myquery(rc);
|
||||
|
||||
rc= mysql_change_user(mysql, user, buff, db);
|
||||
DIE_UNLESS(!rc);
|
||||
|
||||
user[USERNAME_LENGTH-1]= 'a';
|
||||
rc= mysql_change_user(mysql, user, buff, db);
|
||||
DIE_UNLESS(rc);
|
||||
|
||||
user[USERNAME_LENGTH-1]= 'b';
|
||||
buff[LARGE_BUFFER_SIZE-1]= 'd';
|
||||
rc= mysql_change_user(mysql, user, buff, db);
|
||||
DIE_UNLESS(rc);
|
||||
|
||||
buff[LARGE_BUFFER_SIZE-1]= 'c';
|
||||
db[NAME_LEN-1]= 'e';
|
||||
rc= mysql_change_user(mysql, user, buff, db);
|
||||
DIE_UNLESS(rc);
|
||||
|
||||
db[NAME_LEN-1]= 'a';
|
||||
rc= mysql_change_user(mysql, user, buff, db);
|
||||
DIE_UNLESS(!rc);
|
||||
|
||||
rc= mysql_change_user(mysql, user + 1, buff + 1, db + 1);
|
||||
DIE_UNLESS(rc);
|
||||
|
||||
rc = mysql_change_user(mysql, opt_user, opt_password, current_db);
|
||||
DIE_UNLESS(!rc);
|
||||
|
||||
strxmov(query, "DROP DATABASE ", db, NullS);
|
||||
rc= mysql_query(mysql, query);
|
||||
myquery(rc);
|
||||
|
||||
strxmov(query, "DELETE FROM mysql.user WHERE User='", user, "'", NullS);
|
||||
rc= mysql_query(mysql, query);
|
||||
myquery(rc);
|
||||
DIE_UNLESS(mysql_affected_rows(mysql) == 1);
|
||||
#endif
|
||||
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
/*
|
||||
Read and parse arguments and MySQL options from my.cnf
|
||||
*/
|
||||
|
|
@ -16156,6 +16249,7 @@ static struct my_tests_st my_tests[]= {
|
|||
{ "test_bug27592", test_bug27592 },
|
||||
{ "test_bug29948", test_bug29948 },
|
||||
{ "test_bug29306", test_bug29306 },
|
||||
{ "test_bug31669", test_bug31669 },
|
||||
{ 0, 0 }
|
||||
};
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue