mirror of
https://github.com/MariaDB/server.git
synced 2025-01-16 12:02:42 +01:00
Merge mysql-5.5-bugteam -> mysql-5.5-innodb
This commit is contained in:
commit
7a8ca24d31
74 changed files with 939 additions and 292 deletions
|
@ -42,9 +42,7 @@ cat <<EOF
|
|||
Options used with this script always override any default behaviour.
|
||||
The default package is MySQL Cluster Carrier Grade (standard) Edition.
|
||||
For developers, the default package is MySQL Cluster Carrier Grade
|
||||
Extended Edition, and the default build behaviour is to build with
|
||||
autotools. If you want to skip autotools and start from a source code
|
||||
release you can use the --no-autotools flag.
|
||||
Extended Edition.
|
||||
|
||||
More information for developers can be found in --help,
|
||||
--sysadmin-help, and --extended-help.
|
||||
|
@ -102,7 +100,8 @@ cat <<EOF
|
|||
If your building on a Solaris SPARC machine and you want to compile
|
||||
using SunStudio you must set
|
||||
--compiler=forte; if you want to build using the Intel compiler on
|
||||
Linux, you need to set --compiler=icc.
|
||||
Linux, you need to set --compiler=icc. If you want to use the AMD
|
||||
compiler Open64 set --compiler=open64.
|
||||
|
||||
A synonym for forte is SunStudio, so one can also use
|
||||
--compiler=SunStudio.
|
||||
|
@ -150,14 +149,32 @@ Usage: $0 [options]
|
|||
--without-debug Build non-debug version
|
||||
--use-comment Set the comment in the build
|
||||
--with-fast-mutexes Use try/retry method of acquiring mutex
|
||||
--without-fast-mutexes Don't use try/retry method of acquiring mutex
|
||||
--without-perfschema Don't build with performance schema
|
||||
--generate-feedback path Compile with feedback using the specified directory
|
||||
to store the feedback files
|
||||
--use-feedback path Compile using feedback information from the specified
|
||||
directory
|
||||
--with-debug Build debug version
|
||||
--extra-debug-flag flag Add -Dflag to compiler flags
|
||||
InnoDB supports the following debug flags,
|
||||
UNIV_DEBUG, UNIV_SYNC_DEBUG, UNIV_MEM_DEBUG,
|
||||
UNIV_DEBUG_THREAD_CREATION, UNIV_DEBUG_LOCK_VALIDATE,
|
||||
UNIV_DEBUG_PRINT, UNIV_DEBUG_FILE_ACCESS,
|
||||
UNIV_LIGHT_MEM_DEBUG, UNIV_LOG_DEBUG,
|
||||
UNIV_IBUF_COUNT_DEBUG, UNIV_SEARCH_DEBUG,
|
||||
UNIV_LOG_LSN_DEBUG, UNIV_ZIP_DEBUG, UNIV_AHI_DEBUG,
|
||||
UNIV_DEBUG_VALGRIND, UNIV_SQL_DEBUG, UNIV_AIO_DEBUG,
|
||||
UNIV_BTR_DEBUG, UNIV_LRU_DEBUG, UNIV_BUF_DEBUG,
|
||||
UNIV_HASH_DEBUG, UNIV_LIST_DEBUG, UNIV_IBUF_DEBUG
|
||||
--with-link-time-optimizer
|
||||
Link time optimizations enabled (Requires GCC 4.5
|
||||
if GCC used), available for icc as well. This flag
|
||||
is only considered if also fast is set.
|
||||
--with-mso Special flag used by Open64 compiler (requres at
|
||||
least version 4.2.3) that enables optimisations
|
||||
for multi-core scalability.
|
||||
--configure-only Stop after running configure.
|
||||
--use-autotools Start by running autoconf, automake,.. tools
|
||||
--no-autotools Start from configure
|
||||
--print-only Print commands that the script will execute,
|
||||
but do not actually execute
|
||||
--prefix=path Build with prefix 'path'
|
||||
|
@ -170,7 +187,7 @@ Usage: $0 [options]
|
|||
MySQL use
|
||||
--commercial Use commercial libraries
|
||||
--gpl Use gpl libraries
|
||||
--compiler=[gcc|icc|forte|SunStudio] Select compiler
|
||||
--compiler=[gcc|icc|forte|SunStudio|open64] Select compiler
|
||||
--cpu=[x86|x86_64|sparc|itanium] Select CPU type
|
||||
x86 => x86 and 32-bit binary
|
||||
x86_64 => x86 and 64 bit binary
|
||||
|
@ -389,7 +406,8 @@ extended_usage()
|
|||
platforms supported by this script.
|
||||
|
||||
The --fast option adds -mtune=cpu_arg to the C/C++ flags (provides
|
||||
support for Nocona, K8, and other processors).
|
||||
support for Nocona, K8, and other processors), this option is valid
|
||||
when gcc is the compiler.
|
||||
|
||||
Use of the --debug option adds -g to the C/C++ flags.
|
||||
|
||||
|
@ -397,10 +415,35 @@ extended_usage()
|
|||
by calling the script as follows:
|
||||
CC="/usr/local/bin/gcc" CXX="/usr/local/bin/gcc" BUILD/build_mccge.sh
|
||||
|
||||
FreeBSD/x86/gcc
|
||||
---------------
|
||||
No flags are used. Instead, configure determines the proper flags to
|
||||
use.
|
||||
Feedback profiler on gcc
|
||||
------------------------
|
||||
Using gcc --generate-feedback=path causes the following flags to be added
|
||||
to the compiler flags.
|
||||
|
||||
--fprofile-generate
|
||||
--fprofile-dir=path
|
||||
|
||||
Using gcc with --use-feedback=path causes the following flags to be added
|
||||
to the compiler flags. --fprofile-correction indicates MySQL is a multi-
|
||||
threaded application and thus counters can be inconsistent with each other
|
||||
and the compiler should take this into account.
|
||||
|
||||
--fprofile-use
|
||||
--fprofile-dir=path
|
||||
--fprofile-correction
|
||||
|
||||
Feedback compilation using Open64
|
||||
---------------------------------
|
||||
|
||||
Using Open64 with --generate-feedback=path causes the following flags to
|
||||
be added to the compiler flags.
|
||||
|
||||
-fb-create path/feedback
|
||||
|
||||
Using Open64 with --use-feedback=path causes the following flags to be
|
||||
added to the compiler flags.
|
||||
|
||||
--fb-opt path/feedback
|
||||
|
||||
Linux/x86+Itanium/gcc
|
||||
-------------
|
||||
|
@ -410,6 +453,9 @@ extended_usage()
|
|||
added to the C/C++ flags. (To build a 32-bit binary on a 64-bit CPU,
|
||||
use the --32 option as described previously.)
|
||||
|
||||
When gcc 4.5 is used and the user set --with-link-time-optimizer then
|
||||
also --flto is added to compiler flags and linker flags.
|
||||
|
||||
Linux/x86+Itanium/icc
|
||||
-------------
|
||||
Flags used:
|
||||
|
@ -433,6 +479,19 @@ extended_usage()
|
|||
added to the C/C++ flags; this provides optimisations specific to Core
|
||||
2 Duo. This is added only when the --fast flag is set.
|
||||
|
||||
Linux/x86/Open64
|
||||
----------------
|
||||
For normal builds use -O3, when fast flag is set one also adds
|
||||
--march=auto to generate optimized builds for the CPU used. If
|
||||
--with-link-time-optimizer is set also -ipa is set. There is also
|
||||
a special flag --with-mso which can be set to get --mso set which
|
||||
activates optimisation for multi-core scalability.
|
||||
|
||||
FreeBSD/x86/gcc
|
||||
---------------
|
||||
No flags are used. Instead, configure determines the proper flags to
|
||||
use.
|
||||
|
||||
Solaris/x86/gcc
|
||||
---------------
|
||||
All builds on Solaris are by default 64-bit, so -m64 is always used in
|
||||
|
@ -653,6 +712,9 @@ parse_compiler()
|
|||
forte | SunStudio | sunstudio )
|
||||
compiler="forte"
|
||||
;;
|
||||
open64 | Open64 )
|
||||
compiler="open64"
|
||||
;;
|
||||
*)
|
||||
echo "Unknown compiler '$compiler'"
|
||||
exit 1
|
||||
|
@ -686,6 +748,15 @@ parse_options()
|
|||
--with-fast-mutexes)
|
||||
with_fast_mutexes="yes"
|
||||
;;
|
||||
--without-fast-mutexes)
|
||||
with_fast_mutexes="no"
|
||||
;;
|
||||
--without-perfschema)
|
||||
with_perfschema="no"
|
||||
;;
|
||||
--with-mso)
|
||||
with_mso="yes"
|
||||
;;
|
||||
--use-tcmalloc)
|
||||
use_tcmalloc="yes"
|
||||
;;
|
||||
|
@ -693,6 +764,10 @@ parse_options()
|
|||
with_debug_flag="yes"
|
||||
fast_flag="no"
|
||||
;;
|
||||
--extra-debug-flag)
|
||||
shift
|
||||
extra_debug_flags="$extra_debug_flags -D$1"
|
||||
;;
|
||||
--debug)
|
||||
compile_debug_flag="yes"
|
||||
;;
|
||||
|
@ -712,6 +787,14 @@ parse_options()
|
|||
compiler=`get_key_value "$1"`
|
||||
parse_compiler
|
||||
;;
|
||||
--generate-feedback)
|
||||
shift
|
||||
GENERATE_FEEDBACK_PATH="$1"
|
||||
;;
|
||||
--use-feedback)
|
||||
shift
|
||||
USE_FEEDBACK_PATH="$1"
|
||||
;;
|
||||
--cpu=*)
|
||||
cpu_type=`get_key_value "$1"`
|
||||
parse_cpu_type
|
||||
|
@ -746,12 +829,6 @@ parse_options()
|
|||
--parallelism=*)
|
||||
parallelism=`get_key_value "$1"`
|
||||
;;
|
||||
--use-autotools)
|
||||
use_autotools="yes"
|
||||
;;
|
||||
--no-autotools)
|
||||
use_autotools="no"
|
||||
;;
|
||||
--configure-only)
|
||||
just_configure="yes"
|
||||
;;
|
||||
|
@ -896,6 +973,9 @@ set_cpu_base()
|
|||
#
|
||||
init_configure_commands()
|
||||
{
|
||||
path=`dirname $0`
|
||||
cp $path/cmake_configure.sh $path/../configure
|
||||
chmod +x $path/../configure
|
||||
cflags="$c_warnings $base_cflags $compiler_flags"
|
||||
cxxflags="$cxx_warnings $base_cxxflags $compiler_flags"
|
||||
configure="./configure $base_configs $with_flags"
|
||||
|
@ -1084,6 +1164,7 @@ set_with_debug_flags()
|
|||
loc_debug_flags="-DUNIV_MUST_NOT_INLINE -DEXTRA_DEBUG -DFORCE_INIT_OF_VARS "
|
||||
compiler_flags="$compiler_flags $loc_debug_flags"
|
||||
fi
|
||||
compiler_flags="$compiler_flags $extra_debug_flags"
|
||||
fi
|
||||
}
|
||||
|
||||
|
@ -1105,7 +1186,7 @@ set_no_omit_frame_pointer_for_developers()
|
|||
#
|
||||
set_debug_flag()
|
||||
{
|
||||
if test "x$compile_debug_flags" = "xyes" ; then
|
||||
if test "x$compile_debug_flag" = "xyes" ; then
|
||||
compiler_flags="$compiler_flags -g"
|
||||
fi
|
||||
}
|
||||
|
@ -1152,7 +1233,9 @@ set_base_configs()
|
|||
fi
|
||||
base_configs="$base_configs --with-pic"
|
||||
base_configs="$base_configs --with-csv-storage-engine"
|
||||
if test "x$with_perfschema" != "xno" ; then
|
||||
base_configs="$base_configs --with-perfschema"
|
||||
fi
|
||||
}
|
||||
|
||||
#
|
||||
|
@ -1251,6 +1334,19 @@ set_gcc_special_options()
|
|||
fi
|
||||
}
|
||||
|
||||
#
|
||||
# If we discover a Core 2 Duo architecture and we have enabled the fast
|
||||
# flag, we enable a compile especially optimised for Core 2 Duo. This
|
||||
# feature is currently available on Intel's icc compiler only.
|
||||
#
|
||||
set_icc_special_options()
|
||||
{
|
||||
if test "x$fast_flag" = "xyes" && test "x$cpu_arg" = "xcore2" && \
|
||||
test "x$compiler" = "xicc" ; then
|
||||
compiler_flags="$compiler_flags -xT"
|
||||
fi
|
||||
}
|
||||
|
||||
set_cc_and_cxx_for_gcc()
|
||||
{
|
||||
if test "x$CC" = "x" ; then
|
||||
|
@ -1271,6 +1367,16 @@ set_cc_and_cxx_for_icc()
|
|||
fi
|
||||
}
|
||||
|
||||
set_cc_and_cxx_for_open64()
|
||||
{
|
||||
if test "x$CC" = "x" ; then
|
||||
CC="opencc -static-libgcc -fno-exceptions"
|
||||
fi
|
||||
if test "x$CXX" = "x" ; then
|
||||
CXX="openCC -static-libgcc -fno-exceptions"
|
||||
fi
|
||||
}
|
||||
|
||||
set_cc_and_cxx_for_forte()
|
||||
{
|
||||
if test "x$CC" = "x" ; then
|
||||
|
@ -1281,19 +1387,6 @@ set_cc_and_cxx_for_forte()
|
|||
fi
|
||||
}
|
||||
|
||||
#
|
||||
# If we discover a Core 2 Duo architecture and we have enabled the fast
|
||||
# flag, we enable a compile especially optimised for Core 2 Duo. This
|
||||
# feature is currently available on Intel's icc compiler only.
|
||||
#
|
||||
set_icc_special_options()
|
||||
{
|
||||
if test "x$fast_flag" = "xyes" && test "x$cpu_arg" = "xcore2" && \
|
||||
test "x$compiler" = "xicc" ; then
|
||||
compiler_flags="$compiler_flags -xT"
|
||||
fi
|
||||
}
|
||||
|
||||
#
|
||||
# FreeBSD Section
|
||||
#
|
||||
|
@ -1357,12 +1450,45 @@ get_gcc_version()
|
|||
fi
|
||||
}
|
||||
|
||||
#
|
||||
# Link time optimizer (interprocedural optimizations) for Open64
|
||||
#
|
||||
check_for_open64_link_time_optimizer()
|
||||
{
|
||||
if test "x$with_link_time_optimizer" = "xyes" ; then
|
||||
compiler_flags="$compiler_flags -ipa"
|
||||
LDFLAGS="$LDFLAGS -ipa"
|
||||
fi
|
||||
}
|
||||
|
||||
#
|
||||
# Link time optimizer (interprocedural optimizations) for icc
|
||||
#
|
||||
check_for_icc_link_time_optimizer()
|
||||
{
|
||||
if test "x$with_link_time_optimizer" = "xyes" ; then
|
||||
compiler_flags="$compiler_flags -ipo"
|
||||
LDFLAGS="$LDFLAGS -ipo"
|
||||
fi
|
||||
}
|
||||
|
||||
#
|
||||
# Link time optimizer (interprocedural optimizations) for forte
|
||||
#
|
||||
check_for_forte_link_time_optimizer()
|
||||
{
|
||||
if test "x$with_link_time_optimizer" = "xyes" ; then
|
||||
compiler_flags="$compiler_flags -ipo"
|
||||
LDFLAGS="$LDFLAGS -ipo"
|
||||
fi
|
||||
}
|
||||
|
||||
#
|
||||
# Link Time Optimizer in GCC (LTO) uses a parameter -flto
|
||||
# which was added to GCC 4.5, if --with-link-time-optimizer
|
||||
# is set then use this feature
|
||||
#
|
||||
check_for_link_time_optimizer()
|
||||
check_for_gcc_link_time_optimizer()
|
||||
{
|
||||
get_gcc_version
|
||||
if test "$gcc_version" -ge 405 && \
|
||||
|
@ -1371,11 +1497,37 @@ check_for_link_time_optimizer()
|
|||
LDFLAGS="$LDFLAGS -flto"
|
||||
fi
|
||||
}
|
||||
|
||||
set_feedback_for_gcc()
|
||||
{
|
||||
if test "x$GENERATE_FEEDBACK_PATH" != "x" ; then
|
||||
compiler_flags="$compiler_flags -fprofile-generate"
|
||||
compiler_flags="$compiler_flags -fprofile-dir=$GENERATE_FEEDBACK_PATH"
|
||||
elif test "x$USE_FEEDBACK_PATH" != "x" ; then
|
||||
compiler_flags="$compiler_flags -fprofile-use"
|
||||
compiler_flags="$compiler_flags -fprofile-correction"
|
||||
compiler_flags="$compiler_flags -fprofile-dir=$USE_FEEDBACK_PATH"
|
||||
fi
|
||||
}
|
||||
|
||||
set_feedback_for_open64()
|
||||
{
|
||||
if test "x$GENERATE_FEEDBACK_PATH" != "x" ; then
|
||||
compiler_flags="$compiler_flags --fb-create=$GENERATE_FEEDBACK_PATH/feedback"
|
||||
elif test "x$USE_FEEDBACK_PATH" != "x" ; then
|
||||
compiler_flags="$compiler_flags --fb-opt=$USE_FEEDBACK_PATH/feedback"
|
||||
fi
|
||||
}
|
||||
|
||||
#
|
||||
# Linux Section
|
||||
#
|
||||
set_linux_configs()
|
||||
{
|
||||
# Default to use --with-fast-mutexes on Linux
|
||||
if test "x$with_fast_mutexes" = "x" ; then
|
||||
base_configs="$base_configs --with-fast-mutexes"
|
||||
fi
|
||||
if test "x$cpu_base_type" != "xx86" && \
|
||||
test "x$cpu_base_type" != "xitanium" ; then
|
||||
usage "Only x86 and Itanium CPUs supported for Linux"
|
||||
|
@ -1392,19 +1544,14 @@ set_linux_configs()
|
|||
if test "x$fast_flag" != "xno" ; then
|
||||
if test "x$fast_flag" = "xyes" ; then
|
||||
compiler_flags="$compiler_flags -O3"
|
||||
check_for_link_time_optimizer
|
||||
check_for_gcc_link_time_optimizer
|
||||
else
|
||||
compiler_flags="$compiler_flags -O2"
|
||||
compiler_flags="$compiler_flags -O3"
|
||||
fi
|
||||
else
|
||||
compiler_flags="$compiler_flags -O0"
|
||||
fi
|
||||
check_64_bits
|
||||
if test "x$m64" = "xyes" ; then
|
||||
compiler_flags="$compiler_flags -m64"
|
||||
else
|
||||
compiler_flags="$compiler_flags -m32"
|
||||
fi
|
||||
set_feedback_for_gcc
|
||||
# configure will set proper compiler flags for gcc on Linux
|
||||
elif test "x$compiler" = "xicc" ; then
|
||||
compiler_flags="$compiler_flags -mp -restrict"
|
||||
|
@ -1414,16 +1561,36 @@ set_linux_configs()
|
|||
fi
|
||||
if test "x$fast_flag" != "xno" ; then
|
||||
compiler_flags="$compiler_flags -O3 -unroll2 -ip"
|
||||
if test "x$fast_flag" = "xyes" && \
|
||||
test "x$with_link_time_optimizer" = "xyes" ; then
|
||||
compiler_flags="$compiler_flags -ipo"
|
||||
LDFLAGS="$LDFLAGS -ipo"
|
||||
if test "x$fast_flag" = "xyes" ; then
|
||||
check_for_icc_link_time_optimizer
|
||||
fi
|
||||
fi
|
||||
elif test "x$compiler" = "xopen64" ; then
|
||||
set_cc_and_cxx_for_open64
|
||||
if test "x$fast_flag" != "xno" ; then
|
||||
if test "x$fast_flag" = "xyes" ; then
|
||||
compiler_flags="$compiler_flags -O3"
|
||||
# Generate code specific for the machine you run on
|
||||
compiler_flags="$compiler_flags -march=auto"
|
||||
check_for_open64_link_time_optimizer
|
||||
if test "x$with_mso" = "xyes" ; then
|
||||
compiler_flags="$compiler_flags -mso"
|
||||
fi
|
||||
else
|
||||
usage "Only gcc and icc compilers supported for Linux"
|
||||
compiler_flags="$compiler_flags -O3"
|
||||
fi
|
||||
fi
|
||||
set_feedback_for_open64
|
||||
else
|
||||
usage "Only gcc,icc and Open64 compilers supported for Linux"
|
||||
exit 1
|
||||
fi
|
||||
check_64_bits
|
||||
if test "x$m64" = "xyes" ; then
|
||||
compiler_flags="$compiler_flags -m64"
|
||||
else
|
||||
compiler_flags="$compiler_flags -m32"
|
||||
fi
|
||||
}
|
||||
|
||||
#
|
||||
|
@ -1475,7 +1642,7 @@ set_solaris_configs()
|
|||
if test "x$fast_flag" = "xyes" ; then
|
||||
LDFLAGS="$LDFLAGS -O3"
|
||||
compiler_flags="$compiler_flags -O3"
|
||||
check_for_link_time_optimizer
|
||||
check_for_gcc_link_time_optimizer
|
||||
else
|
||||
if test "x$fast_flag" = "xgeneric" ; then
|
||||
LDFLAGS="$LDFLAGS -O2"
|
||||
|
@ -1498,10 +1665,7 @@ set_solaris_configs()
|
|||
if test "x$fast_flag" = "xyes" ; then
|
||||
compiler_flags="$compiler_flags -xtarget=native"
|
||||
compiler_flags="$compiler_flags -xunroll=3"
|
||||
if test "x$with_link_time_optimizer" = "xyes" ; then
|
||||
compiler_flags="$compiler_flags -xipo"
|
||||
LDFLAGS="$LDFLAGS -xipo"
|
||||
fi
|
||||
check_for_forte_link_time_optimizer
|
||||
else
|
||||
compiler_flags="$compiler_flags -xtarget=generic"
|
||||
fi
|
||||
|
@ -1612,17 +1776,6 @@ set_default_package()
|
|||
fi
|
||||
}
|
||||
|
||||
set_autotool_flags()
|
||||
{
|
||||
if test "x$use_autotools" = "x" ; then
|
||||
if test "x$developer_flag" = "xno" ; then
|
||||
use_autotools="no"
|
||||
else
|
||||
use_autotools="yes"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
set_defaults_based_on_environment()
|
||||
{
|
||||
if test ! -z "$MYSQL_DEVELOPER" ; then
|
||||
|
@ -1674,25 +1827,28 @@ base_cxxflags=
|
|||
base_configs=
|
||||
debug_flags=
|
||||
cxxflags=
|
||||
extra_debug_flags=
|
||||
m64=
|
||||
explicit_size_set=
|
||||
datadir=
|
||||
commands=
|
||||
use_autotools=
|
||||
engine_configs=
|
||||
ASFLAGS=
|
||||
LDFLAGS=
|
||||
use_tcmalloc=
|
||||
without_comment="yes"
|
||||
with_fast_mutexes=
|
||||
with_perfschema="yes"
|
||||
with_link_time_optimizer=
|
||||
with_mso=
|
||||
gcc_version="0"
|
||||
generate_feedback_path=
|
||||
use_feedback_path=
|
||||
|
||||
set_defaults_based_on_environment
|
||||
|
||||
parse_options "$@"
|
||||
|
||||
set_autotool_flags
|
||||
set_default_package
|
||||
|
||||
set -e
|
||||
|
@ -1793,9 +1949,6 @@ set_ccache_usage
|
|||
# Set up commands variable from variables prepared for base
|
||||
# configurations, compiler flags, and warnings flags.
|
||||
#
|
||||
if test "x$use_autotools" = "xyes" ; then
|
||||
init_auto_commands
|
||||
fi
|
||||
init_configure_commands
|
||||
|
||||
if test "x$just_configure" != "xyes" ; then
|
||||
|
@ -1806,8 +1959,8 @@ fi
|
|||
# The commands variable now contains the entire command to be run for
|
||||
# the build; we either execute it, or merely print it out.
|
||||
#
|
||||
if test "x$just_print" = "xyes" ; then
|
||||
echo "$commands"
|
||||
else
|
||||
echo "Running command:"
|
||||
echo "$commands"
|
||||
if test "x$just_print" != "xyes" ; then
|
||||
eval "set -x; $commands"
|
||||
fi
|
||||
|
|
|
@ -8074,13 +8074,16 @@ static void dump_backtrace(void)
|
|||
{
|
||||
struct st_connection *conn= cur_con;
|
||||
|
||||
my_safe_print_str("read_command_buf", read_command_buf,
|
||||
sizeof(read_command_buf));
|
||||
fprintf(stderr, "read_command_buf (%p): ", read_command_buf);
|
||||
my_safe_print_str(read_command_buf, sizeof(read_command_buf));
|
||||
|
||||
if (conn)
|
||||
{
|
||||
my_safe_print_str("conn->name", conn->name, conn->name_len);
|
||||
fprintf(stderr, "conn->name (%p): ", conn->name);
|
||||
my_safe_print_str(conn->name, conn->name_len);
|
||||
#ifdef EMBEDDED_LIBRARY
|
||||
my_safe_print_str("conn->cur_query", conn->cur_query, conn->cur_query_len);
|
||||
fprintf(stderr, "conn->cur_query (%p): ", conn->cur_query);
|
||||
my_safe_print_str(conn->cur_query, conn->cur_query_len);
|
||||
#endif
|
||||
}
|
||||
fputs("Attempting backtrace...\n", stderr);
|
||||
|
|
|
@ -78,15 +78,15 @@
|
|||
: "memory")
|
||||
|
||||
/*
|
||||
Actually 32-bit reads/writes are always atomic on x86
|
||||
But we add LOCK_prefix here anyway to force memory barriers
|
||||
Actually 32/64-bit reads/writes are always atomic on x86_64,
|
||||
nonetheless issue memory barriers as appropriate.
|
||||
*/
|
||||
#define make_atomic_load_body(S) \
|
||||
ret=0; \
|
||||
asm volatile (LOCK_prefix "; cmpxchg %2, %0" \
|
||||
: "=m" (*a), "=a" (ret) \
|
||||
: "r" (ret), "m" (*a) \
|
||||
: "memory")
|
||||
/* Serialize prior load and store operations. */ \
|
||||
asm volatile ("mfence" ::: "memory"); \
|
||||
ret= *a; \
|
||||
/* Prevent compiler from reordering instructions. */ \
|
||||
asm volatile ("" ::: "memory")
|
||||
#define make_atomic_store_body(S) \
|
||||
asm volatile ("; xchg %0, %1;" \
|
||||
: "=m" (*a), "+r" (v) \
|
||||
|
|
|
@ -569,6 +569,8 @@ extern my_bool my_parse_charset_xml(const char *bug, size_t len,
|
|||
int (*add)(CHARSET_INFO *cs));
|
||||
extern char *my_strchr(CHARSET_INFO *cs, const char *str, const char *end,
|
||||
pchar c);
|
||||
extern size_t my_strcspn(CHARSET_INFO *cs, const char *str, const char *end,
|
||||
const char *accept);
|
||||
|
||||
my_bool my_propagate_simple(CHARSET_INFO *cs, const uchar *str, size_t len);
|
||||
my_bool my_propagate_complex(CHARSET_INFO *cs, const uchar *str, size_t len);
|
||||
|
|
|
@ -611,6 +611,7 @@ typedef SOCKET_SIZE_TYPE size_socket;
|
|||
#ifdef _WIN32
|
||||
#define FN_LIBCHAR '\\'
|
||||
#define FN_LIBCHAR2 '/'
|
||||
#define FN_DIRSEP "/\\" /* Valid directory separators */
|
||||
#define FN_ROOTDIR "\\"
|
||||
#define FN_DEVCHAR ':'
|
||||
#define FN_NETWORK_DRIVES /* Uses \\ to indicate network drives */
|
||||
|
@ -618,6 +619,7 @@ typedef SOCKET_SIZE_TYPE size_socket;
|
|||
#else
|
||||
#define FN_LIBCHAR '/'
|
||||
#define FN_LIBCHAR2 '/'
|
||||
#define FN_DIRSEP "/" /* Valid directory separators */
|
||||
#define FN_ROOTDIR "/"
|
||||
#endif
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@ C_MODE_START
|
|||
#if defined(HAVE_STACKTRACE) || defined(HAVE_BACKTRACE)
|
||||
void my_init_stacktrace();
|
||||
void my_print_stacktrace(uchar* stack_bottom, ulong thread_stack);
|
||||
void my_safe_print_str(const char* name, const char* val, int max_len);
|
||||
void my_safe_print_str(const char* val, int max_len);
|
||||
void my_write_core(int sig);
|
||||
#if BACKTRACE_DEMANGLE
|
||||
char *my_demangle(const char *mangled_name, int *status);
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
perl mysql-test-run.pl --timer --force --comment=1st --experimental=collections/default.experimental 1st
|
||||
perl mysql-test-run.pl --timer --force --comment=big-tests --experimental=collections/default.experimental --vardir=var-big-tests --big-test --testcase-timeout=60 --suite-timeout=600 large_tests.alter_table main.alter_table-big main.archive-big main.count_distinct3 main.create-big main.events_stress main.events_time_zone main.information_schema-big main.log_tables-big main.merge-big main.mysqlbinlog_row_big main.read_many_rows_innodb main.ssl-big main.sum_distinct-big main.type_newdecimal-big main.variables-big parts.part_supported_sql_func_innodb parts.partition_alter1_1_2_innodb parts.partition_alter1_1_2_ndb parts.partition_alter1_1_ndb parts.partition_alter1_2_innodb parts.partition_alter1_2_ndb parts.partition_alter2_1_1_innodb parts.partition_alter2_1_2_innodb parts.partition_alter2_2_2_innodb parts.partition_alter4_innodb rpl_ndb.rpl_truncate_7ndb_2
|
||||
perl mysql-test-run.pl --timer --force --parallel=auto --comment=1st --experimental=collections/default.experimental 1st
|
||||
perl mysql-test-run.pl --timer --force --parallel=auto --comment=big-tests --experimental=collections/default.experimental --vardir=var-big-tests --big-test --testcase-timeout=60 --suite-timeout=600 large_tests.alter_table main.alter_table-big main.archive-big main.count_distinct3 main.create-big main.events_stress main.events_time_zone main.information_schema-big main.log_tables-big main.merge-big main.mysqlbinlog_row_big main.read_many_rows_innodb main.ssl-big main.sum_distinct-big main.type_newdecimal-big main.variables-big parts.part_supported_sql_func_innodb parts.partition_alter1_1_2_innodb parts.partition_alter1_1_2_ndb parts.partition_alter1_1_ndb parts.partition_alter1_2_innodb parts.partition_alter1_2_ndb parts.partition_alter2_1_1_innodb parts.partition_alter2_1_2_innodb parts.partition_alter2_2_2_innodb parts.partition_alter4_innodb rpl_ndb.rpl_truncate_7ndb_2
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
perl mysql-test-run.pl --force --timer --comment=normal --skip-ndbcluster --report-features --experimental=collections/default.experimental
|
||||
perl mysql-test-run.pl --force --timer --comment=ps --skip-ndbcluster --ps-protocol --experimental=collections/default.experimental
|
||||
perl mysql-test-run.pl --force --timer --comment=funcs1+ps --suite=funcs_1 --ps-protocol --experimental=collections/default.experimental
|
||||
perl mysql-test-run.pl --force --timer --comment=funcs2 --suite=funcs_2 --experimental=collections/default.experimental
|
||||
perl mysql-test-run.pl --force --timer --comment=partitions --suite=parts --experimental=collections/default.experimental
|
||||
perl mysql-test-run.pl --force --timer --comment=stress --suite=stress --experimental=collections/default.experimental
|
||||
perl mysql-test-run.pl --force --timer --comment=jp --suite=jp --experimental=collections/default.experimental
|
||||
perl mysql-test-run.pl --force --timer --comment=embedded --embedded-server --skip-rpl --skip-ndbcluster --experimental=collections/default.experimental
|
||||
perl mysql-test-run.pl --force --timer --comment=nist --suite=nist --experimental=collections/default.experimental
|
||||
perl mysql-test-run.pl --force --timer --comment=nist+ps --suite=nist --ps-protocol --experimental=collections/default.experimental
|
||||
perl mysql-test-run.pl --force --timer --parallel=auto --comment=normal --skip-ndbcluster --report-features --experimental=collections/default.experimental
|
||||
perl mysql-test-run.pl --force --timer --parallel=auto --comment=ps --skip-ndbcluster --ps-protocol --experimental=collections/default.experimental
|
||||
perl mysql-test-run.pl --force --timer --parallel=auto --comment=funcs1+ps --suite=funcs_1 --ps-protocol --experimental=collections/default.experimental
|
||||
perl mysql-test-run.pl --force --timer --parallel=auto --comment=funcs2 --suite=funcs_2 --experimental=collections/default.experimental
|
||||
perl mysql-test-run.pl --force --timer --parallel=auto --comment=partitions --suite=parts --experimental=collections/default.experimental
|
||||
perl mysql-test-run.pl --force --timer --parallel=auto --comment=stress --suite=stress --experimental=collections/default.experimental
|
||||
perl mysql-test-run.pl --force --timer --parallel=auto --comment=jp --suite=jp --experimental=collections/default.experimental
|
||||
perl mysql-test-run.pl --force --timer --parallel=auto --comment=embedded --embedded-server --skip-rpl --skip-ndbcluster --experimental=collections/default.experimental
|
||||
perl mysql-test-run.pl --force --timer --parallel=auto --comment=nist --suite=nist --experimental=collections/default.experimental
|
||||
perl mysql-test-run.pl --force --timer --parallel=auto --comment=nist+ps --suite=nist --ps-protocol --experimental=collections/default.experimental
|
||||
|
|
|
@ -1 +1 @@
|
|||
perl mysql-test-run.pl --force --timer --comment=debug --skip-ndbcluster --skip-rpl --report-features --experimental=collections/default.experimental
|
||||
perl mysql-test-run.pl --force --timer --parallel=auto --comment=debug --skip-ndbcluster --skip-rpl --report-features --experimental=collections/default.experimental
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
perl mysql-test-run.pl --force --timer --comment=ps --skip-ndbcluster --ps-protocol --report-features --experimental=collections/default.experimental
|
||||
perl mysql-test-run.pl --force --timer --comment=stress --suite=stress --experimental=collections/default.experimental
|
||||
perl mysql-test-run.pl --force --timer --parallel=auto --comment=ps --skip-ndbcluster --ps-protocol --report-features --experimental=collections/default.experimental
|
||||
perl mysql-test-run.pl --force --timer --parallel=auto --comment=stress --suite=stress --experimental=collections/default.experimental
|
||||
|
|
|
@ -430,6 +430,8 @@ SELECT table_name, table_comment FROM information_schema.tables
|
|||
WHERE table_schema= 'test' AND table_name= 't1';
|
||||
table_name table_comment
|
||||
t1 Lock wait timeout exceeded; try restarting transaction
|
||||
Warnings:
|
||||
Warning 1205 Lock wait timeout exceeded; try restarting transaction
|
||||
# Connection default
|
||||
UNLOCK TABLES;
|
||||
# Connection con3
|
||||
|
|
|
@ -2322,6 +2322,8 @@ select table_name, table_type, auto_increment, table_comment
|
|||
from information_schema.tables where table_schema='test' and table_name='t2';
|
||||
table_name table_type auto_increment table_comment
|
||||
t2 BASE TABLE NULL Table 'test'.'t2' was skipped since its definition is being modified by concurrent DDL statement
|
||||
Warnings:
|
||||
Warning 1684 Table 'test'.'t2' was skipped since its definition is being modified by concurrent DDL statement
|
||||
# Switching to connection 'default'.
|
||||
unlock tables;
|
||||
# Switching to connection 'con46044'.
|
||||
|
|
|
@ -2084,6 +2084,8 @@ SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE
|
|||
TABLE_SCHEMA = 'test' and TABLE_NAME='tm1';
|
||||
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME TABLE_TYPE ENGINE VERSION ROW_FORMAT TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE AUTO_INCREMENT CREATE_TIME UPDATE_TIME CHECK_TIME TABLE_COLLATION CHECKSUM CREATE_OPTIONS TABLE_COMMENT
|
||||
def test tm1 BASE TABLE NULL NULL NULL # # # # # # # # # # NULL # # Unable to open underlying table which is differently defined or of non-MyISAM type or doesn't exist
|
||||
Warnings:
|
||||
Warning 1168 Unable to open underlying table which is differently defined or of non-MyISAM type or doesn't exist
|
||||
DROP TABLE tm1;
|
||||
CREATE TABLE t1(C1 INT, C2 INT, KEY C1(C1), KEY C2(C2)) ENGINE=MYISAM;
|
||||
CREATE TABLE t2(C1 INT, C2 INT, KEY C1(C1), KEY C2(C2)) ENGINE=MYISAM;
|
||||
|
|
|
@ -492,16 +492,14 @@ Tables_in_test
|
|||
# Checking --one-database option with non_existent_db
|
||||
# specified with USE command
|
||||
#
|
||||
SHOW TABLES IN test;
|
||||
Tables_in_test
|
||||
table_in_test
|
||||
DROP DATABASE test;
|
||||
CREATE DATABASE connected_db;
|
||||
SHOW TABLES IN connected_db;
|
||||
Tables_in_connected_db
|
||||
table_in_connected_db
|
||||
|
||||
CREATE DATABASE test;
|
||||
SHOW TABLES IN test;
|
||||
Tables_in_test
|
||||
table_in_test
|
||||
DROP DATABASE test;
|
||||
CREATE DATABASE test;
|
||||
SHOW TABLES IN connected_db;
|
||||
Tables_in_connected_db
|
||||
table_in_connected_db
|
||||
DROP DATABASE connected_db;
|
||||
|
||||
End of tests
|
||||
|
|
49
mysql-test/r/partition_binlog.result
Normal file
49
mysql-test/r/partition_binlog.result
Normal file
|
@ -0,0 +1,49 @@
|
|||
DROP TABLE IF EXISTS t1;
|
||||
#
|
||||
# Bug#58147: ALTER TABLE w/ TRUNCATE PARTITION fails
|
||||
# but the statement is written to binlog
|
||||
#
|
||||
CREATE TABLE t1(id INT)
|
||||
PARTITION BY RANGE (id)
|
||||
(PARTITION p0 VALUES LESS THAN (100),
|
||||
PARTITION pmax VALUES LESS THAN (MAXVALUE));
|
||||
INSERT INTO t1 VALUES (1), (10), (100), (1000);
|
||||
ALTER TABLE t1 TRUNCATE PARTITION p1;
|
||||
ERROR HY000: Incorrect partition name
|
||||
ALTER TABLE t1 DROP PARTITION p1;
|
||||
ERROR HY000: Error in list of partitions to DROP
|
||||
# No error returned, output in table format instead:
|
||||
ALTER TABLE t1 ANALYZE PARTITION p1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 analyze error Error in list of partitions to test.t1
|
||||
ALTER TABLE t1 CHECK PARTITION p1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 check error Error in list of partitions to test.t1
|
||||
ALTER TABLE t1 OPTIMIZE PARTITION p1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 optimize error Error in list of partitions to test.t1
|
||||
ALTER TABLE t1 REPAIR PARTITION p1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 repair error Error in list of partitions to test.t1
|
||||
ALTER TABLE t1 ANALYZE PARTITION p0;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 analyze status OK
|
||||
ALTER TABLE t1 CHECK PARTITION p0;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 check status OK
|
||||
ALTER TABLE t1 OPTIMIZE PARTITION p0;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 optimize status OK
|
||||
ALTER TABLE t1 REPAIR PARTITION p0;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 repair status OK
|
||||
ALTER TABLE t1 TRUNCATE PARTITION p0;
|
||||
ALTER TABLE t1 DROP PARTITION p0;
|
||||
show binlog events in 'master-bin.000001' from <binlog_start>;
|
||||
Log_name Pos Event_type Server_id End_log_pos Info
|
||||
master-bin.000001 # Query # # use `test`; ALTER TABLE t1 ANALYZE PARTITION p0
|
||||
master-bin.000001 # Query # # use `test`; ALTER TABLE t1 OPTIMIZE PARTITION p0
|
||||
master-bin.000001 # Query # # use `test`; ALTER TABLE t1 REPAIR PARTITION p0
|
||||
master-bin.000001 # Query # # use `test`; ALTER TABLE t1 TRUNCATE PARTITION p0
|
||||
master-bin.000001 # Query # # use `test`; ALTER TABLE t1 DROP PARTITION p0
|
||||
DROP TABLE t1;
|
|
@ -8,3 +8,5 @@ ERROR 42000: DELETE command denied to user 'bug51770'@'localhost' for table 'plu
|
|||
GRANT DELETE ON mysql.plugin TO bug51770@localhost;
|
||||
UNINSTALL PLUGIN example;
|
||||
DROP USER bug51770@localhost;
|
||||
INSTALL PLUGIN example SONAME '../ha_example.so';
|
||||
ERROR HY000: No paths allowed for shared library
|
||||
|
|
|
@ -663,6 +663,8 @@ flush tables;
|
|||
SHOW TABLE STATUS like 't1';
|
||||
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
|
||||
t1 NULL NULL NULL NULL # # # # NULL NULL NULL NULL NULL NULL NULL NULL Incorrect information in file: './test/t1.frm'
|
||||
Warnings:
|
||||
Warning 1033 Incorrect information in file: './test/t1.frm'
|
||||
show create table t1;
|
||||
ERROR HY000: Incorrect information in file: './test/t1.frm'
|
||||
drop table if exists t1;
|
||||
|
|
|
@ -840,6 +840,8 @@ show table status;
|
|||
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
|
||||
t1 MyISAM 10 Fixed 0 0 0 # 1024 0 NULL # # NULL latin1_swedish_ci NULL
|
||||
v1 NULL NULL NULL NULL NULL NULL # NULL NULL NULL # # NULL NULL NULL NULL View 'test.v1' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them
|
||||
Warnings:
|
||||
Warning 1356 View 'test.v1' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them
|
||||
drop view v1;
|
||||
drop table t1;
|
||||
create view v1 as select 99999999999999999999999999999999999999999999999999999 as col1;
|
||||
|
|
67
mysql-test/suite/federated/federated_bug_35333.result
Normal file
67
mysql-test/suite/federated/federated_bug_35333.result
Normal file
|
@ -0,0 +1,67 @@
|
|||
#
|
||||
# Bug 35333 "If a Federated table can't connect to the remote hose, can't retrieve metadata"
|
||||
#
|
||||
# Queries such as SHOW TABLE STATUS and SELECT * FROM INFORMATION_SCHEMA.TABLES fail
|
||||
# when encountering a federated table that cannot connect to its remote table.
|
||||
#
|
||||
# The fix is to store the error text in the TABLE COMMENTS column of I_S.TABLES, clear
|
||||
# the remote connection error and push a warning instead. This allows the SELECT operation
|
||||
# to complete while still indicating a problem. This fix applies to any non-fatal system
|
||||
# error that occurs during a query against I_S.TABLES.de
|
||||
CREATE DATABASE federated;
|
||||
CREATE DATABASE federated;
|
||||
CREATE DATABASE IF NOT EXISTS realdb;
|
||||
DROP TABLE IF EXISTS realdb.t0;
|
||||
DROP TABLE IF EXISTS federated.t0;
|
||||
#
|
||||
# Create the base table to be referenced
|
||||
#
|
||||
CREATE TABLE realdb.t0 (a text, b text) ENGINE=MYISAM;
|
||||
#
|
||||
# Create a federated table with a bogus port number
|
||||
#
|
||||
CREATE TABLE federated.t0 (a text, b text) ENGINE=FEDERATED
|
||||
CONNECTION='mysql://root@127.0.0.1:63333/realdb/t0';
|
||||
#
|
||||
# Trigger a federated system error during a INFORMATION_SCHEMA.TABLES query
|
||||
#
|
||||
SELECT TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, ENGINE, ROW_FORMAT, TABLE_ROWS, DATA_LENGTH, TABLE_COMMENT
|
||||
FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'realdb' or TABLE_SCHEMA = 'federated';
|
||||
TABLE_SCHEMA TABLE_NAME TABLE_TYPE ENGINE ROW_FORMAT TABLE_ROWS DATA_LENGTH TABLE_COMMENT
|
||||
federated t0 BASE TABLE FEDERATED NULL 0 Unable to connect to foreign data source: Can't connect to MySQL server on '127.0.0.1' (socket errno)
|
||||
realdb t0 BASE TABLE MyISAM Dynamic 0 0
|
||||
Warnings:
|
||||
Warning 1429 Unable to connect to foreign data source: Can't connect to MySQL server on '127.0.0.1' (socket errno)
|
||||
SHOW WARNINGS;
|
||||
Level Code Message
|
||||
Warning 1429 Unable to connect to foreign data source: Can't connect to MySQL server on '127.0.0.1' (socket errno)
|
||||
#
|
||||
# Create a MyISAM table then corrupt the file
|
||||
#
|
||||
USE realdb;
|
||||
CREATE TABLE t1 (c1 int) ENGINE=MYISAM;
|
||||
#
|
||||
# Corrupt the MyISAM table by deleting the base file
|
||||
#
|
||||
#
|
||||
# Trigger a MyISAM system error during an INFORMATION_SCHEMA.TABLES query
|
||||
#
|
||||
SELECT TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, ENGINE, ROW_FORMAT, TABLE_ROWS, DATA_LENGTH, TABLE_COMMENT
|
||||
FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1';
|
||||
TABLE_SCHEMA TABLE_NAME TABLE_TYPE ENGINE ROW_FORMAT TABLE_ROWS DATA_LENGTH TABLE_COMMENT
|
||||
realdb t1 BASE TABLE NULL NULL NULL NULL Can't find file: 't1' (errno: 2)
|
||||
Warnings:
|
||||
Warning 1017 Can't find file: 't1' (errno: 2)
|
||||
SHOW WARNINGS;
|
||||
Level Code Message
|
||||
Warning 1017 Can't find file: 't1' (errno: 2)
|
||||
#
|
||||
# Cleanup
|
||||
#
|
||||
DROP TABLE IF EXISTS realdb.t0;
|
||||
DROP TABLE IF EXISTS federated.t0;
|
||||
DROP DATABASE realdb;
|
||||
DROP TABLE IF EXISTS federated.t1;
|
||||
DROP DATABASE federated;
|
||||
DROP TABLE IF EXISTS federated.t1;
|
||||
DROP DATABASE federated;
|
74
mysql-test/suite/federated/federated_bug_35333.test
Normal file
74
mysql-test/suite/federated/federated_bug_35333.test
Normal file
|
@ -0,0 +1,74 @@
|
|||
--echo #
|
||||
--echo # Bug 35333 "If a Federated table can't connect to the remote hose, can't retrieve metadata"
|
||||
--echo #
|
||||
--echo # Queries such as SHOW TABLE STATUS and SELECT * FROM INFORMATION_SCHEMA.TABLES fail
|
||||
--echo # when encountering a federated table that cannot connect to its remote table.
|
||||
--echo #
|
||||
--echo # The fix is to store the error text in the TABLE COMMENTS column of I_S.TABLES, clear
|
||||
--echo # the remote connection error and push a warning instead. This allows the SELECT operation
|
||||
--echo # to complete while still indicating a problem. This fix applies to any non-fatal system
|
||||
--echo # error that occurs during a query against I_S.TABLES.de
|
||||
|
||||
--source federated.inc
|
||||
|
||||
--disable_warnings
|
||||
CREATE DATABASE IF NOT EXISTS realdb;
|
||||
# Federated database exists
|
||||
DROP TABLE IF EXISTS realdb.t0;
|
||||
DROP TABLE IF EXISTS federated.t0;
|
||||
--enable_warnings
|
||||
|
||||
--echo #
|
||||
--echo # Create the base table to be referenced
|
||||
--echo #
|
||||
CREATE TABLE realdb.t0 (a text, b text) ENGINE=MYISAM;
|
||||
|
||||
--echo #
|
||||
--echo # Create a federated table with a bogus port number
|
||||
--echo #
|
||||
CREATE TABLE federated.t0 (a text, b text) ENGINE=FEDERATED
|
||||
CONNECTION='mysql://root@127.0.0.1:63333/realdb/t0';
|
||||
|
||||
#--warning ER_CONNECT_TO_FOREIGN_DATA_SOURCE
|
||||
|
||||
--echo #
|
||||
--echo # Trigger a federated system error during a INFORMATION_SCHEMA.TABLES query
|
||||
--echo #
|
||||
# Remove O/S-specific socket error
|
||||
--replace_regex /\(.*\)/(socket errno)/
|
||||
SELECT TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, ENGINE, ROW_FORMAT, TABLE_ROWS, DATA_LENGTH, TABLE_COMMENT
|
||||
FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'realdb' or TABLE_SCHEMA = 'federated';
|
||||
|
||||
# Remove O/S-specific socket error
|
||||
--replace_regex /\(.*\)/(socket errno)/
|
||||
SHOW WARNINGS;
|
||||
|
||||
--echo #
|
||||
--echo # Create a MyISAM table then corrupt the file
|
||||
--echo #
|
||||
USE realdb;
|
||||
CREATE TABLE t1 (c1 int) ENGINE=MYISAM;
|
||||
--echo #
|
||||
--echo # Corrupt the MyISAM table by deleting the base file
|
||||
--echo #
|
||||
let $MYSQLD_DATADIR= `SELECT @@datadir`;
|
||||
--remove_file $MYSQLD_DATADIR/realdb/t1.MYD
|
||||
--remove_file $MYSQLD_DATADIR/realdb/t1.MYI
|
||||
|
||||
--echo #
|
||||
--echo # Trigger a MyISAM system error during an INFORMATION_SCHEMA.TABLES query
|
||||
--echo #
|
||||
SELECT TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, ENGINE, ROW_FORMAT, TABLE_ROWS, DATA_LENGTH, TABLE_COMMENT
|
||||
FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1';
|
||||
|
||||
SHOW WARNINGS;
|
||||
--echo #
|
||||
--echo # Cleanup
|
||||
--echo #
|
||||
--disable_warnings
|
||||
DROP TABLE IF EXISTS realdb.t0;
|
||||
DROP TABLE IF EXISTS federated.t0;
|
||||
DROP DATABASE realdb;
|
||||
--enable_warnings
|
||||
|
||||
--source federated_cleanup.inc
|
|
@ -100,16 +100,16 @@ create trigger performance_schema.bi_file_instances
|
|||
before insert on performance_schema.file_instances
|
||||
for each row begin end;
|
||||
|
||||
--error ER_WRONG_PERFSCHEMA_USAGE
|
||||
--error ER_CANT_CREATE_TABLE
|
||||
create table test.t1(a int) engine=PERFORMANCE_SCHEMA;
|
||||
|
||||
--error ER_WRONG_PERFSCHEMA_USAGE
|
||||
--error ER_CANT_CREATE_TABLE
|
||||
create table test.t1 like performance_schema.setup_instruments;
|
||||
|
||||
--error ER_WRONG_PERFSCHEMA_USAGE
|
||||
--error ER_CANT_CREATE_TABLE
|
||||
create table test.t1 like performance_schema.events_waits_current;
|
||||
|
||||
--error ER_WRONG_PERFSCHEMA_USAGE
|
||||
--error ER_CANT_CREATE_TABLE
|
||||
create table test.t1 like performance_schema.file_instances;
|
||||
|
||||
--error ER_TABLEACCESS_DENIED_ERROR
|
||||
|
|
|
@ -1,9 +1,6 @@
|
|||
select * from performance_schema.cond_instances limit 1;
|
||||
NAME OBJECT_INSTANCE_BEGIN
|
||||
# #
|
||||
select * from performance_schema.cond_instances
|
||||
where name='FOO';
|
||||
NAME OBJECT_INSTANCE_BEGIN
|
||||
insert into performance_schema.cond_instances
|
||||
set name='FOO', object_instance_begin=12;
|
||||
ERROR 42000: INSERT command denied to user 'root'@'localhost' for table 'cond_instances'
|
||||
|
|
|
@ -1,10 +1,7 @@
|
|||
select * from performance_schema.events_waits_current
|
||||
where event_name like 'Wait/Synch/%' limit 1;
|
||||
THREAD_ID EVENT_ID EVENT_NAME SOURCE TIMER_START TIMER_END TIMER_WAIT SPINS OBJECT_SCHEMA OBJECT_NAME OBJECT_TYPE OBJECT_INSTANCE_BEGIN NESTING_EVENT_ID OPERATION NUMBER_OF_BYTES FLAGS
|
||||
# # # # # # # # NULL NULL NULL # NULL # NULL 0
|
||||
select * from performance_schema.events_waits_current
|
||||
where event_name='FOO';
|
||||
THREAD_ID EVENT_ID EVENT_NAME SOURCE TIMER_START TIMER_END TIMER_WAIT SPINS OBJECT_SCHEMA OBJECT_NAME OBJECT_TYPE OBJECT_INSTANCE_BEGIN NESTING_EVENT_ID OPERATION NUMBER_OF_BYTES FLAGS
|
||||
insert into performance_schema.events_waits_current
|
||||
set thread_id='1', event_id=1,
|
||||
event_name='FOO', timer_start=1, timer_end=2, timer_wait=3;
|
||||
|
|
|
@ -1,18 +1,11 @@
|
|||
select * from performance_schema.events_waits_history
|
||||
where event_name like 'Wait/Synch/%' limit 1;
|
||||
THREAD_ID EVENT_ID EVENT_NAME SOURCE TIMER_START TIMER_END TIMER_WAIT SPINS OBJECT_SCHEMA OBJECT_NAME OBJECT_TYPE OBJECT_INSTANCE_BEGIN NESTING_EVENT_ID OPERATION NUMBER_OF_BYTES FLAGS
|
||||
# # # # # # # # NULL NULL NULL # NULL # NULL 0
|
||||
select * from performance_schema.events_waits_history
|
||||
where event_name='FOO';
|
||||
THREAD_ID EVENT_ID EVENT_NAME SOURCE TIMER_START TIMER_END TIMER_WAIT SPINS OBJECT_SCHEMA OBJECT_NAME OBJECT_TYPE OBJECT_INSTANCE_BEGIN NESTING_EVENT_ID OPERATION NUMBER_OF_BYTES FLAGS
|
||||
select * from performance_schema.events_waits_history
|
||||
where event_name like 'Wait/Synch/%' order by timer_wait limit 1;
|
||||
THREAD_ID EVENT_ID EVENT_NAME SOURCE TIMER_START TIMER_END TIMER_WAIT SPINS OBJECT_SCHEMA OBJECT_NAME OBJECT_TYPE OBJECT_INSTANCE_BEGIN NESTING_EVENT_ID OPERATION NUMBER_OF_BYTES FLAGS
|
||||
# # # # # # # # NULL NULL NULL # NULL # NULL 0
|
||||
select * from performance_schema.events_waits_history
|
||||
where event_name like 'Wait/Synch/%' order by timer_wait desc limit 1;
|
||||
THREAD_ID EVENT_ID EVENT_NAME SOURCE TIMER_START TIMER_END TIMER_WAIT SPINS OBJECT_SCHEMA OBJECT_NAME OBJECT_TYPE OBJECT_INSTANCE_BEGIN NESTING_EVENT_ID OPERATION NUMBER_OF_BYTES FLAGS
|
||||
# # # # # # # # NULL NULL NULL # NULL # NULL 0
|
||||
insert into performance_schema.events_waits_history
|
||||
set thread_id='1', event_id=1,
|
||||
event_name='FOO', timer_start=1, timer_end=2, timer_wait=3;
|
||||
|
|
|
@ -1,18 +1,11 @@
|
|||
select * from performance_schema.events_waits_history_long
|
||||
where event_name like 'Wait/Synch/%' limit 1;
|
||||
THREAD_ID EVENT_ID EVENT_NAME SOURCE TIMER_START TIMER_END TIMER_WAIT SPINS OBJECT_SCHEMA OBJECT_NAME OBJECT_TYPE OBJECT_INSTANCE_BEGIN NESTING_EVENT_ID OPERATION NUMBER_OF_BYTES FLAGS
|
||||
# # # # # # # # NULL NULL NULL # NULL # NULL 0
|
||||
select * from performance_schema.events_waits_history_long
|
||||
where event_name='FOO';
|
||||
THREAD_ID EVENT_ID EVENT_NAME SOURCE TIMER_START TIMER_END TIMER_WAIT SPINS OBJECT_SCHEMA OBJECT_NAME OBJECT_TYPE OBJECT_INSTANCE_BEGIN NESTING_EVENT_ID OPERATION NUMBER_OF_BYTES FLAGS
|
||||
select * from performance_schema.events_waits_history_long
|
||||
where event_name like 'Wait/Synch/%' order by timer_wait limit 1;
|
||||
THREAD_ID EVENT_ID EVENT_NAME SOURCE TIMER_START TIMER_END TIMER_WAIT SPINS OBJECT_SCHEMA OBJECT_NAME OBJECT_TYPE OBJECT_INSTANCE_BEGIN NESTING_EVENT_ID OPERATION NUMBER_OF_BYTES FLAGS
|
||||
# # # # # # # # NULL NULL NULL # NULL # NULL 0
|
||||
select * from performance_schema.events_waits_history_long
|
||||
where event_name like 'Wait/Synch/%' order by timer_wait desc limit 1;
|
||||
THREAD_ID EVENT_ID EVENT_NAME SOURCE TIMER_START TIMER_END TIMER_WAIT SPINS OBJECT_SCHEMA OBJECT_NAME OBJECT_TYPE OBJECT_INSTANCE_BEGIN NESTING_EVENT_ID OPERATION NUMBER_OF_BYTES FLAGS
|
||||
# # # # # # # # NULL NULL NULL # NULL # NULL 0
|
||||
insert into performance_schema.events_waits_history_long
|
||||
set thread_id='1', event_id=1,
|
||||
event_name='FOO', timer_start=1, timer_end=2, timer_wait=3;
|
||||
|
|
|
@ -1,26 +1,15 @@
|
|||
select * from performance_schema.events_waits_summary_by_instance
|
||||
where event_name like 'Wait/Synch/%' limit 1;
|
||||
EVENT_NAME OBJECT_INSTANCE_BEGIN COUNT_STAR SUM_TIMER_WAIT MIN_TIMER_WAIT AVG_TIMER_WAIT MAX_TIMER_WAIT
|
||||
# # # # # # #
|
||||
select * from performance_schema.events_waits_summary_by_instance
|
||||
where event_name='FOO';
|
||||
EVENT_NAME OBJECT_INSTANCE_BEGIN COUNT_STAR SUM_TIMER_WAIT MIN_TIMER_WAIT AVG_TIMER_WAIT MAX_TIMER_WAIT
|
||||
select * from performance_schema.events_waits_summary_by_instance
|
||||
order by count_star limit 1;
|
||||
EVENT_NAME OBJECT_INSTANCE_BEGIN COUNT_STAR SUM_TIMER_WAIT MIN_TIMER_WAIT AVG_TIMER_WAIT MAX_TIMER_WAIT
|
||||
# # # # # # #
|
||||
select * from performance_schema.events_waits_summary_by_instance
|
||||
order by count_star desc limit 1;
|
||||
EVENT_NAME OBJECT_INSTANCE_BEGIN COUNT_STAR SUM_TIMER_WAIT MIN_TIMER_WAIT AVG_TIMER_WAIT MAX_TIMER_WAIT
|
||||
# # # # # # #
|
||||
select * from performance_schema.events_waits_summary_by_instance
|
||||
where min_timer_wait > 0 order by count_star limit 1;
|
||||
EVENT_NAME OBJECT_INSTANCE_BEGIN COUNT_STAR SUM_TIMER_WAIT MIN_TIMER_WAIT AVG_TIMER_WAIT MAX_TIMER_WAIT
|
||||
# # # # # # #
|
||||
select * from performance_schema.events_waits_summary_by_instance
|
||||
where min_timer_wait > 0 order by count_star desc limit 1;
|
||||
EVENT_NAME OBJECT_INSTANCE_BEGIN COUNT_STAR SUM_TIMER_WAIT MIN_TIMER_WAIT AVG_TIMER_WAIT MAX_TIMER_WAIT
|
||||
# # # # # # #
|
||||
insert into performance_schema.events_waits_summary_by_instance
|
||||
set event_name='FOO', object_instance_begin=0,
|
||||
count_star=1, sum_timer_wait=2, min_timer_wait=3,
|
||||
|
|
|
@ -1,10 +1,7 @@
|
|||
select * from performance_schema.events_waits_summary_by_thread_by_event_name
|
||||
where event_name like 'Wait/Synch/%' limit 1;
|
||||
THREAD_ID EVENT_NAME COUNT_STAR SUM_TIMER_WAIT MIN_TIMER_WAIT AVG_TIMER_WAIT MAX_TIMER_WAIT
|
||||
# # # # # # #
|
||||
select * from performance_schema.events_waits_summary_by_thread_by_event_name
|
||||
where event_name='FOO';
|
||||
THREAD_ID EVENT_NAME COUNT_STAR SUM_TIMER_WAIT MIN_TIMER_WAIT AVG_TIMER_WAIT MAX_TIMER_WAIT
|
||||
insert into performance_schema.events_waits_summary_by_thread_by_event_name
|
||||
set event_name='FOO', thread_id=1,
|
||||
count_star=1, sum_timer_wait=2, min_timer_wait=3,
|
||||
|
|
|
@ -1,10 +1,7 @@
|
|||
select * from performance_schema.events_waits_summary_global_by_event_name
|
||||
where event_name like 'Wait/Synch/%' limit 1;
|
||||
EVENT_NAME COUNT_STAR SUM_TIMER_WAIT MIN_TIMER_WAIT AVG_TIMER_WAIT MAX_TIMER_WAIT
|
||||
# # # # # #
|
||||
select * from performance_schema.events_waits_summary_global_by_event_name
|
||||
where event_name='FOO';
|
||||
EVENT_NAME COUNT_STAR SUM_TIMER_WAIT MIN_TIMER_WAIT AVG_TIMER_WAIT MAX_TIMER_WAIT
|
||||
insert into performance_schema.events_waits_summary_global_by_event_name
|
||||
set event_name='FOO', count_star=1, sum_timer_wait=2, min_timer_wait=3,
|
||||
avg_timer_wait=4, max_timer_wait=5;
|
||||
|
|
|
@ -1,9 +1,6 @@
|
|||
select * from performance_schema.file_instances limit 1;
|
||||
FILE_NAME EVENT_NAME OPEN_COUNT
|
||||
# # #
|
||||
select * from performance_schema.file_instances
|
||||
where file_name='FOO';
|
||||
FILE_NAME EVENT_NAME OPEN_COUNT
|
||||
insert into performance_schema.file_instances
|
||||
set file_name='FOO', event_name='BAR', open_count=12;
|
||||
ERROR 42000: INSERT command denied to user 'root'@'localhost' for table 'file_instances'
|
||||
|
|
|
@ -1,10 +1,7 @@
|
|||
select * from performance_schema.file_summary_by_event_name
|
||||
where event_name like 'Wait/io/%' limit 1;
|
||||
EVENT_NAME COUNT_READ COUNT_WRITE SUM_NUMBER_OF_BYTES_READ SUM_NUMBER_OF_BYTES_WRITE
|
||||
# # # # #
|
||||
select * from performance_schema.file_summary_by_event_name
|
||||
where event_name='FOO';
|
||||
EVENT_NAME COUNT_READ COUNT_WRITE SUM_NUMBER_OF_BYTES_READ SUM_NUMBER_OF_BYTES_WRITE
|
||||
insert into performance_schema.file_summary_by_event_name
|
||||
set event_name='FOO', count_read=1, count_write=2,
|
||||
sum_number_of_bytes_read=4, sum_number_of_bytes_write=5;
|
||||
|
|
|
@ -1,10 +1,7 @@
|
|||
select * from performance_schema.file_summary_by_instance
|
||||
where event_name like 'Wait/io/%' limit 1;
|
||||
FILE_NAME EVENT_NAME COUNT_READ COUNT_WRITE SUM_NUMBER_OF_BYTES_READ SUM_NUMBER_OF_BYTES_WRITE
|
||||
# # # # # #
|
||||
select * from performance_schema.file_summary_by_instance
|
||||
where event_name='FOO';
|
||||
FILE_NAME EVENT_NAME COUNT_READ COUNT_WRITE SUM_NUMBER_OF_BYTES_READ SUM_NUMBER_OF_BYTES_WRITE
|
||||
insert into performance_schema.file_summary_by_instance
|
||||
set event_name='FOO', count_read=1, count_write=2,
|
||||
sum_number_of_bytes_read=4, sum_number_of_bytes_write=5;
|
||||
|
|
|
@ -1,9 +1,6 @@
|
|||
select * from performance_schema.mutex_instances limit 1;
|
||||
NAME OBJECT_INSTANCE_BEGIN LOCKED_BY_THREAD_ID
|
||||
# # #
|
||||
select * from performance_schema.mutex_instances
|
||||
where name='FOO';
|
||||
NAME OBJECT_INSTANCE_BEGIN LOCKED_BY_THREAD_ID
|
||||
insert into performance_schema.mutex_instances
|
||||
set name='FOO', object_instance_begin=12;
|
||||
ERROR 42000: INSERT command denied to user 'root'@'localhost' for table 'mutex_instances'
|
||||
|
|
|
@ -1,9 +1,6 @@
|
|||
select * from performance_schema.rwlock_instances limit 1;
|
||||
NAME OBJECT_INSTANCE_BEGIN WRITE_LOCKED_BY_THREAD_ID READ_LOCKED_BY_COUNT
|
||||
# # # #
|
||||
select * from performance_schema.rwlock_instances
|
||||
where name='FOO';
|
||||
NAME OBJECT_INSTANCE_BEGIN WRITE_LOCKED_BY_THREAD_ID READ_LOCKED_BY_COUNT
|
||||
insert into performance_schema.rwlock_instances
|
||||
set name='FOO', object_instance_begin=12;
|
||||
ERROR 42000: INSERT command denied to user 'root'@'localhost' for table 'rwlock_instances'
|
||||
|
|
|
@ -1,10 +1,7 @@
|
|||
select * from performance_schema.threads
|
||||
where name like 'Thread/%' limit 1;
|
||||
THREAD_ID PROCESSLIST_ID NAME
|
||||
# # #
|
||||
select * from performance_schema.threads
|
||||
where name='FOO';
|
||||
THREAD_ID PROCESSLIST_ID NAME
|
||||
insert into performance_schema.threads
|
||||
set name='FOO', thread_id=1, processlist_id=2;
|
||||
ERROR 42000: INSERT command denied to user 'root'@'localhost' for table 'threads'
|
||||
|
|
|
@ -6,9 +6,9 @@ AND EVENT_NAME IN
|
|||
WHERE NAME LIKE "wait/synch/%")
|
||||
LIMIT 1;
|
||||
create table test.t1(a int) engine=performance_schema;
|
||||
ERROR HY000: Invalid performance_schema usage.
|
||||
ERROR HY000: Can't create table 'test.t1' (errno: 131)
|
||||
create table test.t1 like performance_schema.events_waits_current;
|
||||
ERROR HY000: Invalid performance_schema usage.
|
||||
ERROR HY000: Can't create table 'test.t1' (errno: 131)
|
||||
create table performance_schema.t1(a int);
|
||||
ERROR 42000: CREATE command denied to user 'root'@'localhost' for table 't1'
|
||||
drop table if exists test.ghost;
|
||||
|
|
|
@ -16,6 +16,7 @@ operation, number_of_bytes,
|
|||
substring(object_name, locate("no_index_tab", object_name)) as short_name
|
||||
from performance_schema.events_waits_history_long
|
||||
where operation not like "tell"
|
||||
and event_name like "wait/io/file/myisam/%"
|
||||
order by thread_id, event_id;
|
||||
event_name short_source operation number_of_bytes short_name
|
||||
wait/io/file/myisam/kfile mi_create.c: create NULL no_index_tab.MYI
|
||||
|
|
|
@ -152,13 +152,13 @@ before insert on performance_schema.file_instances
|
|||
for each row begin end;
|
||||
ERROR 42000: Access denied for user 'root'@'localhost' to database 'performance_schema'
|
||||
create table test.t1(a int) engine=PERFORMANCE_SCHEMA;
|
||||
ERROR HY000: Invalid performance_schema usage.
|
||||
ERROR HY000: Can't create table 'test.t1' (errno: 131)
|
||||
create table test.t1 like performance_schema.setup_instruments;
|
||||
ERROR HY000: Invalid performance_schema usage.
|
||||
ERROR HY000: Can't create table 'test.t1' (errno: 131)
|
||||
create table test.t1 like performance_schema.events_waits_current;
|
||||
ERROR HY000: Invalid performance_schema usage.
|
||||
ERROR HY000: Can't create table 'test.t1' (errno: 131)
|
||||
create table test.t1 like performance_schema.file_instances;
|
||||
ERROR HY000: Invalid performance_schema usage.
|
||||
ERROR HY000: Can't create table 'test.t1' (errno: 131)
|
||||
insert into performance_schema.setup_instruments
|
||||
set name="foo";
|
||||
ERROR 42000: INSERT command denied to user 'root'@'localhost' for table 'setup_instruments'
|
||||
|
@ -250,13 +250,13 @@ before insert on performance_schema.file_instances
|
|||
for each row begin end;
|
||||
ERROR 42000: Access denied for user 'pfs_user_1'@'localhost' to database 'performance_schema'
|
||||
create table test.t1(a int) engine=PERFORMANCE_SCHEMA;
|
||||
ERROR HY000: Invalid performance_schema usage.
|
||||
ERROR HY000: Can't create table 'test.t1' (errno: 131)
|
||||
create table test.t1 like performance_schema.setup_instruments;
|
||||
ERROR HY000: Invalid performance_schema usage.
|
||||
ERROR HY000: Can't create table 'test.t1' (errno: 131)
|
||||
create table test.t1 like performance_schema.events_waits_current;
|
||||
ERROR HY000: Invalid performance_schema usage.
|
||||
ERROR HY000: Can't create table 'test.t1' (errno: 131)
|
||||
create table test.t1 like performance_schema.file_instances;
|
||||
ERROR HY000: Invalid performance_schema usage.
|
||||
ERROR HY000: Can't create table 'test.t1' (errno: 131)
|
||||
insert into performance_schema.setup_instruments
|
||||
set name="foo";
|
||||
ERROR 42000: INSERT command denied to user 'pfs_user_1'@'localhost' for table 'setup_instruments'
|
||||
|
@ -348,13 +348,13 @@ before insert on performance_schema.file_instances
|
|||
for each row begin end;
|
||||
ERROR 42000: Access denied for user 'pfs_user_2'@'localhost' to database 'performance_schema'
|
||||
create table test.t1(a int) engine=PERFORMANCE_SCHEMA;
|
||||
ERROR HY000: Invalid performance_schema usage.
|
||||
ERROR HY000: Can't create table 'test.t1' (errno: 131)
|
||||
create table test.t1 like performance_schema.setup_instruments;
|
||||
ERROR HY000: Invalid performance_schema usage.
|
||||
ERROR HY000: Can't create table 'test.t1' (errno: 131)
|
||||
create table test.t1 like performance_schema.events_waits_current;
|
||||
ERROR HY000: Invalid performance_schema usage.
|
||||
ERROR HY000: Can't create table 'test.t1' (errno: 131)
|
||||
create table test.t1 like performance_schema.file_instances;
|
||||
ERROR HY000: Invalid performance_schema usage.
|
||||
ERROR HY000: Can't create table 'test.t1' (errno: 131)
|
||||
insert into performance_schema.setup_instruments
|
||||
set name="foo";
|
||||
ERROR 42000: INSERT command denied to user 'pfs_user_2'@'localhost' for table 'setup_instruments'
|
||||
|
@ -446,13 +446,13 @@ before insert on performance_schema.file_instances
|
|||
for each row begin end;
|
||||
ERROR 42000: Access denied for user 'pfs_user_3'@'localhost' to database 'performance_schema'
|
||||
create table test.t1(a int) engine=PERFORMANCE_SCHEMA;
|
||||
ERROR HY000: Invalid performance_schema usage.
|
||||
ERROR HY000: Can't create table 'test.t1' (errno: 131)
|
||||
create table test.t1 like performance_schema.setup_instruments;
|
||||
ERROR HY000: Invalid performance_schema usage.
|
||||
ERROR HY000: Can't create table 'test.t1' (errno: 131)
|
||||
create table test.t1 like performance_schema.events_waits_current;
|
||||
ERROR HY000: Invalid performance_schema usage.
|
||||
ERROR HY000: Can't create table 'test.t1' (errno: 131)
|
||||
create table test.t1 like performance_schema.file_instances;
|
||||
ERROR HY000: Invalid performance_schema usage.
|
||||
ERROR HY000: Can't create table 'test.t1' (errno: 131)
|
||||
insert into performance_schema.setup_instruments
|
||||
set name="foo";
|
||||
ERROR 42000: INSERT command denied to user 'pfs_user_3'@'localhost' for table 'setup_instruments'
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
flush status;
|
||||
SET @saved_thread_cache_size = @@global.thread_cache_size;
|
||||
set global thread_cache_size = 0;
|
||||
show variables like "thread_cache_size";
|
||||
|
@ -32,3 +33,7 @@ select @thread_id_increment;
|
|||
@thread_id_increment
|
||||
1
|
||||
set global thread_cache_size = @saved_thread_cache_size;
|
||||
show status like "performance_schema_thread%";
|
||||
Variable_name Value
|
||||
Performance_schema_thread_classes_lost 0
|
||||
Performance_schema_thread_instances_lost 0
|
||||
|
|
|
@ -18,11 +18,12 @@
|
|||
--source include/not_embedded.inc
|
||||
--source include/have_perfschema.inc
|
||||
|
||||
--replace_column 1 # 2 #
|
||||
--disable_result_log
|
||||
select * from performance_schema.cond_instances limit 1;
|
||||
|
||||
select * from performance_schema.cond_instances
|
||||
where name='FOO';
|
||||
--enable_result_log
|
||||
|
||||
--error ER_TABLEACCESS_DENIED_ERROR
|
||||
insert into performance_schema.cond_instances
|
||||
|
|
|
@ -18,12 +18,13 @@
|
|||
--source include/not_embedded.inc
|
||||
--source include/have_perfschema.inc
|
||||
|
||||
--replace_column 1 # 2 # 3 # 4 # 5 # 6 # 7 # 8 # 12 # 14 #
|
||||
--disable_result_log
|
||||
select * from performance_schema.events_waits_current
|
||||
where event_name like 'Wait/Synch/%' limit 1;
|
||||
|
||||
select * from performance_schema.events_waits_current
|
||||
where event_name='FOO';
|
||||
--enable_result_log
|
||||
|
||||
--error ER_TABLEACCESS_DENIED_ERROR
|
||||
insert into performance_schema.events_waits_current
|
||||
|
|
|
@ -18,20 +18,19 @@
|
|||
--source include/not_embedded.inc
|
||||
--source include/have_perfschema.inc
|
||||
|
||||
--replace_column 1 # 2 # 3 # 4 # 5 # 6 # 7 # 8 # 12 # 14 #
|
||||
--disable_result_log
|
||||
select * from performance_schema.events_waits_history
|
||||
where event_name like 'Wait/Synch/%' limit 1;
|
||||
|
||||
select * from performance_schema.events_waits_history
|
||||
where event_name='FOO';
|
||||
|
||||
--replace_column 1 # 2 # 3 # 4 # 5 # 6 # 7 # 8 # 12 # 14 #
|
||||
select * from performance_schema.events_waits_history
|
||||
where event_name like 'Wait/Synch/%' order by timer_wait limit 1;
|
||||
|
||||
--replace_column 1 # 2 # 3 # 4 # 5 # 6 # 7 # 8 # 12 # 14 #
|
||||
select * from performance_schema.events_waits_history
|
||||
where event_name like 'Wait/Synch/%' order by timer_wait desc limit 1;
|
||||
--enable_result_log
|
||||
|
||||
--error ER_TABLEACCESS_DENIED_ERROR
|
||||
insert into performance_schema.events_waits_history
|
||||
|
|
|
@ -18,20 +18,19 @@
|
|||
--source include/not_embedded.inc
|
||||
--source include/have_perfschema.inc
|
||||
|
||||
--replace_column 1 # 2 # 3 # 4 # 5 # 6 # 7 # 8 # 12 # 14 #
|
||||
--disable_result_log
|
||||
select * from performance_schema.events_waits_history_long
|
||||
where event_name like 'Wait/Synch/%' limit 1;
|
||||
|
||||
select * from performance_schema.events_waits_history_long
|
||||
where event_name='FOO';
|
||||
|
||||
--replace_column 1 # 2 # 3 # 4 # 5 # 6 # 7 # 8 # 12 # 14 #
|
||||
select * from performance_schema.events_waits_history_long
|
||||
where event_name like 'Wait/Synch/%' order by timer_wait limit 1;
|
||||
|
||||
--replace_column 1 # 2 # 3 # 4 # 5 # 6 # 7 # 8 # 12 # 14 #
|
||||
select * from performance_schema.events_waits_history_long
|
||||
where event_name like 'Wait/Synch/%' order by timer_wait desc limit 1;
|
||||
--enable_result_log
|
||||
|
||||
--error ER_TABLEACCESS_DENIED_ERROR
|
||||
insert into performance_schema.events_waits_history_long
|
||||
|
|
|
@ -18,28 +18,25 @@
|
|||
--source include/not_embedded.inc
|
||||
--source include/have_perfschema.inc
|
||||
|
||||
--replace_column 1 # 2 # 3 # 4 # 5 # 6 # 7 #
|
||||
--disable_result_log
|
||||
select * from performance_schema.events_waits_summary_by_instance
|
||||
where event_name like 'Wait/Synch/%' limit 1;
|
||||
|
||||
select * from performance_schema.events_waits_summary_by_instance
|
||||
where event_name='FOO';
|
||||
|
||||
--replace_column 1 # 2 # 3 # 4 # 5 # 6 # 7 #
|
||||
select * from performance_schema.events_waits_summary_by_instance
|
||||
order by count_star limit 1;
|
||||
|
||||
--replace_column 1 # 2 # 3 # 4 # 5 # 6 # 7 #
|
||||
select * from performance_schema.events_waits_summary_by_instance
|
||||
order by count_star desc limit 1;
|
||||
|
||||
--replace_column 1 # 2 # 3 # 4 # 5 # 6 # 7 #
|
||||
select * from performance_schema.events_waits_summary_by_instance
|
||||
where min_timer_wait > 0 order by count_star limit 1;
|
||||
|
||||
--replace_column 1 # 2 # 3 # 4 # 5 # 6 # 7 #
|
||||
select * from performance_schema.events_waits_summary_by_instance
|
||||
where min_timer_wait > 0 order by count_star desc limit 1;
|
||||
--enable_result_log
|
||||
|
||||
--error ER_TABLEACCESS_DENIED_ERROR
|
||||
insert into performance_schema.events_waits_summary_by_instance
|
||||
|
|
|
@ -18,12 +18,13 @@
|
|||
--source include/not_embedded.inc
|
||||
--source include/have_perfschema.inc
|
||||
|
||||
--replace_column 1 # 2 # 3 # 4 # 5 # 6 # 7 #
|
||||
--disable_result_log
|
||||
select * from performance_schema.events_waits_summary_by_thread_by_event_name
|
||||
where event_name like 'Wait/Synch/%' limit 1;
|
||||
|
||||
select * from performance_schema.events_waits_summary_by_thread_by_event_name
|
||||
where event_name='FOO';
|
||||
--enable_result_log
|
||||
|
||||
--error ER_TABLEACCESS_DENIED_ERROR
|
||||
insert into performance_schema.events_waits_summary_by_thread_by_event_name
|
||||
|
|
|
@ -18,12 +18,13 @@
|
|||
--source include/not_embedded.inc
|
||||
--source include/have_perfschema.inc
|
||||
|
||||
--replace_column 1 # 2 # 3 # 4 # 5 # 6 #
|
||||
--disable_result_log
|
||||
select * from performance_schema.events_waits_summary_global_by_event_name
|
||||
where event_name like 'Wait/Synch/%' limit 1;
|
||||
|
||||
select * from performance_schema.events_waits_summary_global_by_event_name
|
||||
where event_name='FOO';
|
||||
--enable_result_log
|
||||
|
||||
--error ER_TABLEACCESS_DENIED_ERROR
|
||||
insert into performance_schema.events_waits_summary_global_by_event_name
|
||||
|
|
|
@ -18,11 +18,12 @@
|
|||
--source include/not_embedded.inc
|
||||
--source include/have_perfschema.inc
|
||||
|
||||
--replace_column 1 # 2 # 3 #
|
||||
--disable_result_log
|
||||
select * from performance_schema.file_instances limit 1;
|
||||
|
||||
select * from performance_schema.file_instances
|
||||
where file_name='FOO';
|
||||
--enable_result_log
|
||||
|
||||
--error ER_TABLEACCESS_DENIED_ERROR
|
||||
insert into performance_schema.file_instances
|
||||
|
|
|
@ -18,12 +18,13 @@
|
|||
--source include/not_embedded.inc
|
||||
--source include/have_perfschema.inc
|
||||
|
||||
--replace_column 1 # 2 # 3 # 4 # 5 #
|
||||
--disable_result_log
|
||||
select * from performance_schema.file_summary_by_event_name
|
||||
where event_name like 'Wait/io/%' limit 1;
|
||||
|
||||
select * from performance_schema.file_summary_by_event_name
|
||||
where event_name='FOO';
|
||||
--enable_result_log
|
||||
|
||||
--error ER_TABLEACCESS_DENIED_ERROR
|
||||
insert into performance_schema.file_summary_by_event_name
|
||||
|
|
|
@ -18,12 +18,13 @@
|
|||
--source include/not_embedded.inc
|
||||
--source include/have_perfschema.inc
|
||||
|
||||
--replace_column 1 # 2 # 3 # 4 # 5 # 6 #
|
||||
--disable_result_log
|
||||
select * from performance_schema.file_summary_by_instance
|
||||
where event_name like 'Wait/io/%' limit 1;
|
||||
|
||||
select * from performance_schema.file_summary_by_instance
|
||||
where event_name='FOO';
|
||||
--enable_result_log
|
||||
|
||||
--error ER_TABLEACCESS_DENIED_ERROR
|
||||
insert into performance_schema.file_summary_by_instance
|
||||
|
|
|
@ -18,11 +18,12 @@
|
|||
--source include/not_embedded.inc
|
||||
--source include/have_perfschema.inc
|
||||
|
||||
--replace_column 1 # 2 # 3 #
|
||||
--disable_result_log
|
||||
select * from performance_schema.mutex_instances limit 1;
|
||||
|
||||
select * from performance_schema.mutex_instances
|
||||
where name='FOO';
|
||||
--enable_result_log
|
||||
|
||||
--error ER_TABLEACCESS_DENIED_ERROR
|
||||
insert into performance_schema.mutex_instances
|
||||
|
|
|
@ -18,11 +18,12 @@
|
|||
--source include/not_embedded.inc
|
||||
--source include/have_perfschema.inc
|
||||
|
||||
--replace_column 1 # 2 # 3 # 4 #
|
||||
--disable_result_log
|
||||
select * from performance_schema.rwlock_instances limit 1;
|
||||
|
||||
select * from performance_schema.rwlock_instances
|
||||
where name='FOO';
|
||||
--enable_result_log
|
||||
|
||||
--error ER_TABLEACCESS_DENIED_ERROR
|
||||
insert into performance_schema.rwlock_instances
|
||||
|
|
|
@ -18,12 +18,13 @@
|
|||
--source include/not_embedded.inc
|
||||
--source include/have_perfschema.inc
|
||||
|
||||
--replace_column 1 # 2 # 3 #
|
||||
--disable_result_log
|
||||
select * from performance_schema.threads
|
||||
where name like 'Thread/%' limit 1;
|
||||
|
||||
select * from performance_schema.threads
|
||||
where name='FOO';
|
||||
--enable_result_log
|
||||
|
||||
--error ER_TABLEACCESS_DENIED_ERROR
|
||||
insert into performance_schema.threads
|
||||
|
|
|
@ -38,14 +38,14 @@ LIMIT 1;
|
|||
# Bug#45088 Should not be able to create tables of engine PERFORMANCE_SCHEMA
|
||||
#
|
||||
|
||||
--error ER_WRONG_PERFSCHEMA_USAGE
|
||||
--error ER_CANT_CREATE_TABLE
|
||||
create table test.t1(a int) engine=performance_schema;
|
||||
|
||||
#
|
||||
# Bug#44897 Performance Schema: can create a ghost table in another database
|
||||
#
|
||||
|
||||
--error ER_WRONG_PERFSCHEMA_USAGE
|
||||
--error ER_CANT_CREATE_TABLE
|
||||
create table test.t1 like performance_schema.events_waits_current;
|
||||
|
||||
#
|
||||
|
|
|
@ -46,6 +46,13 @@ insert into no_index_tab set a = 'foo', b = 1;
|
|||
# Verification
|
||||
# Note that mi_create.c contains mysql_file_tell() calls in debug only,
|
||||
# so the result are filtered to remove 'tell'.
|
||||
# Note that even after setting other instruments to enabled='NO'
|
||||
# and truncating the events_waits_history_long table,
|
||||
# some events -- that were already started but not completed --
|
||||
# for other instruments could still be added in the history.
|
||||
# To protect against that, an extra where clause
|
||||
# "and event_name like "wait/io/file/myisam/%"
|
||||
# is added to the select to filter out the result.
|
||||
|
||||
select event_name,
|
||||
left(source, locate(":", source)) as short_source,
|
||||
|
@ -53,6 +60,7 @@ select event_name,
|
|||
substring(object_name, locate("no_index_tab", object_name)) as short_name
|
||||
from performance_schema.events_waits_history_long
|
||||
where operation not like "tell"
|
||||
and event_name like "wait/io/file/myisam/%"
|
||||
order by thread_id, event_id;
|
||||
|
||||
# In case of failures, this will tell if file io are lost.
|
||||
|
|
|
@ -20,6 +20,8 @@
|
|||
|
||||
# Setup
|
||||
|
||||
flush status;
|
||||
|
||||
SET @saved_thread_cache_size = @@global.thread_cache_size;
|
||||
|
||||
set global thread_cache_size = 0;
|
||||
|
@ -40,7 +42,7 @@ let $con2_ID=`select connection_id()`;
|
|||
let $con2_THREAD_ID=`select thread_id from performance_schema.threads
|
||||
where PROCESSLIST_ID = connection_id()`;
|
||||
|
||||
connection default;
|
||||
--connection default
|
||||
|
||||
--disable_query_log
|
||||
eval select ($con2_ID - $con1_ID) into @id_increment;
|
||||
|
@ -52,7 +54,15 @@ select @id_increment;
|
|||
# Expect 1, THREAD_ID is incremented for each new connection
|
||||
select @thread_id_increment;
|
||||
|
||||
disconnect con2;
|
||||
--disconnect con2
|
||||
|
||||
--connection default
|
||||
|
||||
# Wait for the disconnect con2 to complete
|
||||
let $wait_condition=
|
||||
select count(*) = 2 from performance_schema.threads
|
||||
where name like "thread/sql/one_connection";
|
||||
--source include/wait_condition.inc
|
||||
|
||||
connect (con3, localhost, root, , );
|
||||
|
||||
|
@ -61,10 +71,16 @@ let $con3_ID=`select connection_id()`;
|
|||
let $con3_THREAD_ID=`select thread_id from performance_schema.threads
|
||||
where PROCESSLIST_ID = connection_id()`;
|
||||
|
||||
disconnect con3;
|
||||
disconnect con1;
|
||||
--disconnect con3
|
||||
--disconnect con1
|
||||
|
||||
connection default;
|
||||
--connection default
|
||||
|
||||
# Wait for the disconnect con1 and con3 to complete
|
||||
let $wait_condition=
|
||||
select count(*) = 1 from performance_schema.threads
|
||||
where name like "thread/sql/one_connection";
|
||||
--source include/wait_condition.inc
|
||||
|
||||
--disable_query_log
|
||||
eval select ($con3_ID - $con2_ID) into @id_increment;
|
||||
|
@ -92,7 +108,7 @@ let $con2_ID=`select connection_id()`;
|
|||
let $con2_THREAD_ID=`select thread_id from performance_schema.threads
|
||||
where PROCESSLIST_ID = connection_id()`;
|
||||
|
||||
connection default;
|
||||
--connection default
|
||||
|
||||
--disable_query_log
|
||||
eval select ($con2_ID - $con1_ID) into @id_increment;
|
||||
|
@ -102,7 +118,15 @@ eval select ($con2_THREAD_ID - $con1_THREAD_ID) into @thread_id_increment;
|
|||
select @id_increment;
|
||||
select @thread_id_increment;
|
||||
|
||||
disconnect con2;
|
||||
--disconnect con2
|
||||
|
||||
--connection default
|
||||
|
||||
# Wait for the disconnect con2 to complete
|
||||
let $wait_condition=
|
||||
select count(*) = 2 from performance_schema.threads
|
||||
where name like "thread/sql/one_connection";
|
||||
--source include/wait_condition.inc
|
||||
|
||||
connect (con3, localhost, root, , );
|
||||
|
||||
|
@ -111,10 +135,16 @@ let $con3_ID=`select connection_id()`;
|
|||
let $con3_THREAD_ID=`select thread_id from performance_schema.threads
|
||||
where PROCESSLIST_ID = connection_id()`;
|
||||
|
||||
disconnect con3;
|
||||
disconnect con1;
|
||||
--disconnect con3
|
||||
--disconnect con1
|
||||
|
||||
connection default;
|
||||
--connection default
|
||||
|
||||
# Wait for the disconnect con1 and con3 to complete
|
||||
let $wait_condition=
|
||||
select count(*) = 1 from performance_schema.threads
|
||||
where name like "thread/sql/one_connection";
|
||||
--source include/wait_condition.inc
|
||||
|
||||
--disable_query_log
|
||||
eval select ($con3_ID - $con2_ID) into @id_increment;
|
||||
|
@ -132,3 +162,5 @@ select @thread_id_increment;
|
|||
|
||||
set global thread_cache_size = @saved_thread_cache_size;
|
||||
|
||||
show status like "performance_schema_thread%";
|
||||
|
||||
|
|
|
@ -536,35 +536,34 @@ SHOW TABLES IN test;
|
|||
--echo # specified with USE command
|
||||
--echo #
|
||||
|
||||
# CASE 1 : When 'test' database exists and passed at commandline.
|
||||
# CASE 1 : When 'connected_db' database exists and passed at commandline.
|
||||
--write_file $MYSQLTEST_VARDIR/tmp/one_db_1.sql
|
||||
CREATE TABLE `table_in_test`(i INT);
|
||||
CREATE TABLE `table_in_connected_db`(i INT);
|
||||
USE non_existent_db;
|
||||
# Following statement should be filtered out.
|
||||
CREATE TABLE `table_in_non_existent_db`(i INT);
|
||||
EOF
|
||||
|
||||
# CASE 2 : When 'test' database exists but dropped and recreated in load file.
|
||||
# CASE 2 : When 'connected_db' database exists but dropped and recreated in
|
||||
# load file.
|
||||
--write_file $MYSQLTEST_VARDIR/tmp/one_db_2.sql
|
||||
DROP DATABASE test;
|
||||
CREATE DATABASE test;
|
||||
DROP DATABASE connected_db;
|
||||
CREATE DATABASE connected_db;
|
||||
USE non_existent_db;
|
||||
# Following statements should be filtered out.
|
||||
CREATE TABLE `table_in_non_existent_db`(i INT);
|
||||
USE test;
|
||||
USE connected_db;
|
||||
# Following statements should not be filtered out.
|
||||
CREATE TABLE `table_in_test`(i INT);
|
||||
CREATE TABLE `table_in_connected_db`(i INT);
|
||||
EOF
|
||||
|
||||
--exec $MYSQL --one-database test < $MYSQLTEST_VARDIR/tmp/one_db_1.sql
|
||||
SHOW TABLES IN test;
|
||||
DROP DATABASE test;
|
||||
CREATE DATABASE connected_db;
|
||||
--exec $MYSQL --one-database connected_db < $MYSQLTEST_VARDIR/tmp/one_db_1.sql
|
||||
SHOW TABLES IN connected_db;
|
||||
--echo
|
||||
CREATE DATABASE test;
|
||||
--exec $MYSQL --one-database test < $MYSQLTEST_VARDIR/tmp/one_db_2.sql
|
||||
SHOW TABLES IN test;
|
||||
DROP DATABASE test;
|
||||
CREATE DATABASE test;
|
||||
--exec $MYSQL --one-database connected_db < $MYSQLTEST_VARDIR/tmp/one_db_2.sql
|
||||
SHOW TABLES IN connected_db;
|
||||
DROP DATABASE connected_db;
|
||||
|
||||
--remove_file $MYSQLTEST_VARDIR/tmp/one_db_1.sql
|
||||
--remove_file $MYSQLTEST_VARDIR/tmp/one_db_2.sql
|
||||
|
|
42
mysql-test/t/partition_binlog.test
Normal file
42
mysql-test/t/partition_binlog.test
Normal file
|
@ -0,0 +1,42 @@
|
|||
--source include/have_log_bin.inc
|
||||
|
||||
--disable_warnings
|
||||
DROP TABLE IF EXISTS t1;
|
||||
--enable_warnings
|
||||
|
||||
--echo #
|
||||
--echo # Bug#58147: ALTER TABLE w/ TRUNCATE PARTITION fails
|
||||
--echo # but the statement is written to binlog
|
||||
--echo #
|
||||
|
||||
CREATE TABLE t1(id INT)
|
||||
PARTITION BY RANGE (id)
|
||||
(PARTITION p0 VALUES LESS THAN (100),
|
||||
PARTITION pmax VALUES LESS THAN (MAXVALUE));
|
||||
|
||||
INSERT INTO t1 VALUES (1), (10), (100), (1000);
|
||||
|
||||
--let $binlog_file=query_get_value(SHOW MASTER STATUS, File, 1)
|
||||
--let $binlog_start=query_get_value(SHOW MASTER STATUS, Position, 1)
|
||||
|
||||
--error ER_WRONG_PARTITION_NAME
|
||||
ALTER TABLE t1 TRUNCATE PARTITION p1;
|
||||
--error ER_DROP_PARTITION_NON_EXISTENT
|
||||
ALTER TABLE t1 DROP PARTITION p1;
|
||||
|
||||
--echo # No error returned, output in table format instead:
|
||||
ALTER TABLE t1 ANALYZE PARTITION p1;
|
||||
ALTER TABLE t1 CHECK PARTITION p1;
|
||||
ALTER TABLE t1 OPTIMIZE PARTITION p1;
|
||||
ALTER TABLE t1 REPAIR PARTITION p1;
|
||||
|
||||
ALTER TABLE t1 ANALYZE PARTITION p0;
|
||||
ALTER TABLE t1 CHECK PARTITION p0;
|
||||
ALTER TABLE t1 OPTIMIZE PARTITION p0;
|
||||
ALTER TABLE t1 REPAIR PARTITION p0;
|
||||
ALTER TABLE t1 TRUNCATE PARTITION p0;
|
||||
ALTER TABLE t1 DROP PARTITION p0;
|
||||
|
||||
--source include/show_binlog_events.inc
|
||||
|
||||
DROP TABLE t1;
|
|
@ -18,3 +18,15 @@ UNINSTALL PLUGIN example;
|
|||
disconnect con1;
|
||||
connection default;
|
||||
DROP USER bug51770@localhost;
|
||||
|
||||
#
|
||||
# BUG#58246: INSTALL PLUGIN not secure & crashable
|
||||
#
|
||||
# The bug consisted of not recognizing / on Windows, so checking / on
|
||||
# all platforms should cover this case.
|
||||
|
||||
let $path = `select CONCAT_WS('/', '..', $HA_EXAMPLE_SO)`;
|
||||
--replace_regex /\.dll/.so/
|
||||
--error ER_UDF_NO_PATHS
|
||||
eval INSTALL PLUGIN example SONAME '$path';
|
||||
|
||||
|
|
|
@ -24,6 +24,11 @@
|
|||
#include <unistd.h>
|
||||
#include <strings.h>
|
||||
|
||||
#ifdef __linux__
|
||||
#include <ctype.h> /* isprint */
|
||||
#include <sys/syscall.h> /* SYS_gettid */
|
||||
#endif
|
||||
|
||||
#if HAVE_EXECINFO_H
|
||||
#include <execinfo.h>
|
||||
#endif
|
||||
|
@ -43,10 +48,99 @@ void my_init_stacktrace()
|
|||
#endif
|
||||
}
|
||||
|
||||
void my_safe_print_str(const char* name, const char* val, int max_len)
|
||||
#ifdef __linux__
|
||||
|
||||
static void print_buffer(char *buffer, size_t count)
|
||||
{
|
||||
char *heap_end= (char*) sbrk(0);
|
||||
fprintf(stderr, "%s at %p ", name, val);
|
||||
for (; count && *buffer; --count)
|
||||
{
|
||||
int c= (int) *buffer++;
|
||||
fputc(isprint(c) ? c : ' ', stderr);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
Access the pages of this process through /proc/self/task/<tid>/mem
|
||||
in order to safely print the contents of a memory address range.
|
||||
|
||||
@param addr The address at the start of the memory region.
|
||||
@param max_len The length of the memory region.
|
||||
|
||||
@return Zero on success.
|
||||
*/
|
||||
static int safe_print_str(const char *addr, int max_len)
|
||||
{
|
||||
int fd;
|
||||
pid_t tid;
|
||||
off_t offset;
|
||||
ssize_t nbytes= 0;
|
||||
size_t total, count;
|
||||
char buf[256];
|
||||
|
||||
tid= (pid_t) syscall(SYS_gettid);
|
||||
|
||||
sprintf(buf, "/proc/self/task/%d/mem", tid);
|
||||
|
||||
if ((fd= open(buf, O_RDONLY)) < 0)
|
||||
return -1;
|
||||
|
||||
/* Ensure that off_t can hold a pointer. */
|
||||
compile_time_assert(sizeof(off_t) >= sizeof(intptr));
|
||||
|
||||
total= max_len;
|
||||
offset= (intptr) addr;
|
||||
|
||||
/* Read up to the maximum number of bytes. */
|
||||
while (total)
|
||||
{
|
||||
count= min(sizeof(buf), total);
|
||||
|
||||
if ((nbytes= pread(fd, buf, count, offset)) < 0)
|
||||
{
|
||||
/* Just in case... */
|
||||
if (errno == EINTR)
|
||||
continue;
|
||||
else
|
||||
break;
|
||||
}
|
||||
|
||||
/* Advance offset into memory. */
|
||||
total-= nbytes;
|
||||
offset+= nbytes;
|
||||
addr+= nbytes;
|
||||
|
||||
/* Output the printable characters. */
|
||||
print_buffer(buf, nbytes);
|
||||
|
||||
/* Break if less than requested... */
|
||||
if ((count - nbytes))
|
||||
break;
|
||||
}
|
||||
|
||||
/* Output a new line if something was printed. */
|
||||
if (total != (size_t) max_len)
|
||||
fputc('\n', stderr);
|
||||
|
||||
if (nbytes == -1)
|
||||
fprintf(stderr, "Can't read from address %p: %m.\n", addr);
|
||||
|
||||
close(fd);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
void my_safe_print_str(const char* val, int max_len)
|
||||
{
|
||||
char *heap_end;
|
||||
|
||||
#ifdef __linux__
|
||||
if (!safe_print_str(val, max_len))
|
||||
return;
|
||||
#endif
|
||||
|
||||
heap_end= (char*) sbrk(0);
|
||||
|
||||
if (!PTR_SANE(val))
|
||||
{
|
||||
|
@ -54,7 +148,6 @@ void my_safe_print_str(const char* name, const char* val, int max_len)
|
|||
return;
|
||||
}
|
||||
|
||||
fprintf(stderr, "= ");
|
||||
for (; max_len && PTR_SANE(val) && *val; --max_len)
|
||||
fputc(*val++, stderr);
|
||||
fputc('\n', stderr);
|
||||
|
@ -607,9 +700,8 @@ void my_write_core(int unused)
|
|||
}
|
||||
|
||||
|
||||
void my_safe_print_str(const char *name, const char *val, int len)
|
||||
void my_safe_print_str(const char *val, int len)
|
||||
{
|
||||
fprintf(stderr,"%s at %p", name, val);
|
||||
__try
|
||||
{
|
||||
fprintf(stderr,"=%.*s\n", len, val);
|
||||
|
|
|
@ -3428,7 +3428,7 @@ int ha_partition::truncate()
|
|||
ALTER TABLE t TRUNCATE PARTITION ...
|
||||
*/
|
||||
|
||||
int ha_partition::truncate_partition(Alter_info *alter_info)
|
||||
int ha_partition::truncate_partition(Alter_info *alter_info, bool *binlog_stmt)
|
||||
{
|
||||
int error= 0;
|
||||
List_iterator<partition_element> part_it(m_part_info->partitions);
|
||||
|
@ -3440,6 +3440,9 @@ int ha_partition::truncate_partition(Alter_info *alter_info)
|
|||
PART_ADMIN);
|
||||
DBUG_ENTER("ha_partition::truncate_partition");
|
||||
|
||||
/* Only binlog when it starts any call to the partitions handlers */
|
||||
*binlog_stmt= false;
|
||||
|
||||
/*
|
||||
TRUNCATE also means resetting auto_increment. Hence, reset
|
||||
it so that it will be initialized again at the next use.
|
||||
|
@ -3453,6 +3456,8 @@ int ha_partition::truncate_partition(Alter_info *alter_info)
|
|||
(!(alter_info->flags & ALTER_ALL_PARTITION)))
|
||||
DBUG_RETURN(HA_ERR_NO_PARTITION_FOUND);
|
||||
|
||||
*binlog_stmt= true;
|
||||
|
||||
do
|
||||
{
|
||||
partition_element *part_elem= part_it++;
|
||||
|
|
|
@ -362,7 +362,7 @@ public:
|
|||
@remark This method is a partitioning-specific hook
|
||||
and thus not a member of the general SE API.
|
||||
*/
|
||||
int truncate_partition(Alter_info *);
|
||||
int truncate_partition(Alter_info *, bool *binlog_stmt);
|
||||
|
||||
virtual bool is_fatal_error(int error, uint flags)
|
||||
{
|
||||
|
|
|
@ -3308,7 +3308,7 @@ bool MYSQL_BIN_LOG::reset_logs(THD* thd)
|
|||
}
|
||||
|
||||
/* Start logging with a new file */
|
||||
close(LOG_CLOSE_INDEX);
|
||||
close(LOG_CLOSE_INDEX | LOG_CLOSE_TO_BE_OPENED);
|
||||
if ((error= my_delete_allow_opened(index_file_name, MYF(0)))) // Reset (open will update)
|
||||
{
|
||||
if (my_errno == ENOENT)
|
||||
|
|
|
@ -2427,7 +2427,7 @@ the thread stack. Please read http://dev.mysql.com/doc/mysql/en/linux.html\n\n",
|
|||
|
||||
if (!(test_flags & TEST_NO_STACKTRACE))
|
||||
{
|
||||
fprintf(stderr, "thd: 0x%lx\n",(long) thd);
|
||||
fprintf(stderr, "Thread pointer: 0x%lx\n", (long) thd);
|
||||
fprintf(stderr, "Attempting backtrace. You can use the following "
|
||||
"information to find out\nwhere mysqld died. If "
|
||||
"you see no messages after this, something went\n"
|
||||
|
@ -2455,11 +2455,13 @@ the thread stack. Please read http://dev.mysql.com/doc/mysql/en/linux.html\n\n",
|
|||
kreason= "KILLED_NO_VALUE";
|
||||
break;
|
||||
}
|
||||
fprintf(stderr, "Trying to get some variables.\n\
|
||||
Some pointers may be invalid and cause the dump to abort...\n");
|
||||
my_safe_print_str("thd->query", thd->query(), 1024);
|
||||
fprintf(stderr, "thd->thread_id=%lu\n", (ulong) thd->thread_id);
|
||||
fprintf(stderr, "thd->killed=%s\n", kreason);
|
||||
fprintf(stderr, "\nTrying to get some variables.\n"
|
||||
"Some pointers may be invalid and cause the dump to abort.\n");
|
||||
fprintf(stderr, "Query (%p): ", thd->query());
|
||||
my_safe_print_str(thd->query(), min(1024, thd->query_length()));
|
||||
fprintf(stderr, "Connection ID (thread ID): %lu\n", (ulong) thd->thread_id);
|
||||
fprintf(stderr, "Status: %s\n", kreason);
|
||||
fputc('\n', stderr);
|
||||
}
|
||||
fprintf(stderr, "\
|
||||
The manual page at http://dev.mysql.com/doc/mysql/en/crashing.html contains\n\
|
||||
|
|
|
@ -110,6 +110,7 @@ bool Alter_table_truncate_partition_statement::execute(THD *thd)
|
|||
ha_partition *partition;
|
||||
ulong timeout= thd->variables.lock_wait_timeout;
|
||||
TABLE_LIST *first_table= thd->lex->select_lex.table_list.first;
|
||||
bool binlog_stmt;
|
||||
DBUG_ENTER("Alter_table_truncate_partition_statement::execute");
|
||||
|
||||
/*
|
||||
|
@ -161,16 +162,18 @@ bool Alter_table_truncate_partition_statement::execute(THD *thd)
|
|||
partition= (ha_partition *) first_table->table->file;
|
||||
|
||||
/* Invoke the handler method responsible for truncating the partition. */
|
||||
if ((error= partition->truncate_partition(&thd->lex->alter_info)))
|
||||
if ((error= partition->truncate_partition(&thd->lex->alter_info,
|
||||
&binlog_stmt)))
|
||||
first_table->table->file->print_error(error, MYF(0));
|
||||
|
||||
/*
|
||||
All effects of a truncate operation are committed even if the
|
||||
operation fails. Thus, the query must be written to the binary
|
||||
log. The only exception is a unimplemented truncate method. Also,
|
||||
it is logged in statement format, regardless of the binlog format.
|
||||
log. The exception is a unimplemented truncate method or failure
|
||||
before any call to handler::truncate() is done.
|
||||
Also, it is logged in statement format, regardless of the binlog format.
|
||||
*/
|
||||
if (error != HA_ERR_WRONG_COMMAND)
|
||||
if (error != HA_ERR_WRONG_COMMAND && binlog_stmt)
|
||||
error|= write_bin_log(thd, !error, thd->query(), thd->query_length());
|
||||
|
||||
/*
|
||||
|
|
|
@ -280,6 +280,26 @@ static void report_error(int where_to, uint error, ...)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
Check if the provided path is valid in the sense that it does cause
|
||||
a relative reference outside the directory.
|
||||
|
||||
@note Currently, this function only check if there are any
|
||||
characters in FN_DIRSEP in the string, but it might change in the
|
||||
future.
|
||||
|
||||
@code
|
||||
check_valid_path("../foo.so") -> true
|
||||
check_valid_path("foo.so") -> false
|
||||
@endcode
|
||||
*/
|
||||
bool check_valid_path(const char *path, size_t len)
|
||||
{
|
||||
size_t prefix= my_strcspn(files_charset_info, path, path + len, FN_DIRSEP);
|
||||
return prefix < len;
|
||||
}
|
||||
|
||||
|
||||
/****************************************************************************
|
||||
Value type thunks, allows the C world to play in the C++ world
|
||||
****************************************************************************/
|
||||
|
@ -408,13 +428,15 @@ static st_plugin_dl *plugin_dl_add(const LEX_STRING *dl, int report)
|
|||
struct st_plugin_dl *tmp, plugin_dl;
|
||||
void *sym;
|
||||
DBUG_ENTER("plugin_dl_add");
|
||||
DBUG_PRINT("enter", ("dl->str: '%s', dl->length: %d",
|
||||
dl->str, (int) dl->length));
|
||||
plugin_dir_len= strlen(opt_plugin_dir);
|
||||
/*
|
||||
Ensure that the dll doesn't have a path.
|
||||
This is done to ensure that only approved libraries from the
|
||||
plugin directory are used (to make this even remotely secure).
|
||||
*/
|
||||
if (my_strchr(files_charset_info, dl->str, dl->str + dl->length, FN_LIBCHAR) ||
|
||||
if (check_valid_path(dl->str, dl->length) ||
|
||||
check_string_char_length((LEX_STRING *) dl, "", NAME_CHAR_LEN,
|
||||
system_charset_info, 1) ||
|
||||
plugin_dir_len + dl->length + 1 >= FN_REFLEN)
|
||||
|
|
|
@ -153,6 +153,7 @@ extern bool plugin_register_builtin(struct st_mysql_plugin *plugin);
|
|||
extern void plugin_thdvar_init(THD *thd);
|
||||
extern void plugin_thdvar_cleanup(THD *thd);
|
||||
extern SHOW_COMP_OPTION plugin_status(const char *name, int len, size_t type);
|
||||
extern bool check_valid_path(const char *path, size_t length);
|
||||
|
||||
typedef my_bool (plugin_foreach_func)(THD *thd,
|
||||
plugin_ref plugin,
|
||||
|
|
|
@ -3781,6 +3781,7 @@ static int get_schema_tables_record(THD *thd, TABLE_LIST *tables,
|
|||
{
|
||||
const char *tmp_buff;
|
||||
MYSQL_TIME time;
|
||||
int info_error= 0;
|
||||
CHARSET_INFO *cs= system_charset_info;
|
||||
DBUG_ENTER("get_schema_tables_record");
|
||||
|
||||
|
@ -3788,22 +3789,21 @@ static int get_schema_tables_record(THD *thd, TABLE_LIST *tables,
|
|||
table->field[0]->store(STRING_WITH_LEN("def"), cs);
|
||||
table->field[1]->store(db_name->str, db_name->length, cs);
|
||||
table->field[2]->store(table_name->str, table_name->length, cs);
|
||||
|
||||
if (res)
|
||||
{
|
||||
/*
|
||||
there was errors during opening tables
|
||||
*/
|
||||
const char *error= thd->is_error() ? thd->stmt_da->message() : "";
|
||||
/* There was a table open error, so set the table type and return */
|
||||
if (tables->view)
|
||||
table->field[3]->store(STRING_WITH_LEN("VIEW"), cs);
|
||||
else if (tables->schema_table)
|
||||
table->field[3]->store(STRING_WITH_LEN("SYSTEM VIEW"), cs);
|
||||
else
|
||||
table->field[3]->store(STRING_WITH_LEN("BASE TABLE"), cs);
|
||||
table->field[20]->store(error, strlen(error), cs);
|
||||
thd->clear_error();
|
||||
|
||||
goto err;
|
||||
}
|
||||
else if (tables->view)
|
||||
|
||||
if (tables->view)
|
||||
{
|
||||
table->field[3]->store(STRING_WITH_LEN("VIEW"), cs);
|
||||
table->field[20]->store(STRING_WITH_LEN("VIEW"), cs);
|
||||
|
@ -3818,6 +3818,7 @@ static int get_schema_tables_record(THD *thd, TABLE_LIST *tables,
|
|||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
bool is_partitioned= FALSE;
|
||||
#endif
|
||||
|
||||
if (share->tmp_table == SYSTEM_TMP_TABLE)
|
||||
table->field[3]->store(STRING_WITH_LEN("SYSTEM VIEW"), cs);
|
||||
else if (share->tmp_table)
|
||||
|
@ -3831,6 +3832,9 @@ static int get_schema_tables_record(THD *thd, TABLE_LIST *tables,
|
|||
continue;
|
||||
table->field[i]->set_notnull();
|
||||
}
|
||||
|
||||
/* Collect table info from the table share */
|
||||
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
if (share->db_type() == partition_hton &&
|
||||
share->partition_info_str_len)
|
||||
|
@ -3839,62 +3843,82 @@ static int get_schema_tables_record(THD *thd, TABLE_LIST *tables,
|
|||
is_partitioned= TRUE;
|
||||
}
|
||||
#endif
|
||||
|
||||
tmp_buff= (char *) ha_resolve_storage_engine_name(tmp_db_type);
|
||||
table->field[4]->store(tmp_buff, strlen(tmp_buff), cs);
|
||||
table->field[5]->store((longlong) share->frm_version, TRUE);
|
||||
|
||||
ptr=option_buff;
|
||||
|
||||
if (share->min_rows)
|
||||
{
|
||||
ptr=strmov(ptr," min_rows=");
|
||||
ptr=longlong10_to_str(share->min_rows,ptr,10);
|
||||
}
|
||||
|
||||
if (share->max_rows)
|
||||
{
|
||||
ptr=strmov(ptr," max_rows=");
|
||||
ptr=longlong10_to_str(share->max_rows,ptr,10);
|
||||
}
|
||||
|
||||
if (share->avg_row_length)
|
||||
{
|
||||
ptr=strmov(ptr," avg_row_length=");
|
||||
ptr=longlong10_to_str(share->avg_row_length,ptr,10);
|
||||
}
|
||||
|
||||
if (share->db_create_options & HA_OPTION_PACK_KEYS)
|
||||
ptr=strmov(ptr," pack_keys=1");
|
||||
|
||||
if (share->db_create_options & HA_OPTION_NO_PACK_KEYS)
|
||||
ptr=strmov(ptr," pack_keys=0");
|
||||
|
||||
/* We use CHECKSUM, instead of TABLE_CHECKSUM, for backward compability */
|
||||
if (share->db_create_options & HA_OPTION_CHECKSUM)
|
||||
ptr=strmov(ptr," checksum=1");
|
||||
|
||||
if (share->db_create_options & HA_OPTION_DELAY_KEY_WRITE)
|
||||
ptr=strmov(ptr," delay_key_write=1");
|
||||
|
||||
if (share->row_type != ROW_TYPE_DEFAULT)
|
||||
ptr=strxmov(ptr, " row_format=",
|
||||
ha_row_type[(uint) share->row_type],
|
||||
NullS);
|
||||
|
||||
if (share->key_block_size)
|
||||
{
|
||||
ptr= strmov(ptr, " KEY_BLOCK_SIZE=");
|
||||
ptr= longlong10_to_str(share->key_block_size, ptr, 10);
|
||||
}
|
||||
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
if (is_partitioned)
|
||||
ptr= strmov(ptr, " partitioned");
|
||||
#endif
|
||||
|
||||
table->field[19]->store(option_buff+1,
|
||||
(ptr == option_buff ? 0 :
|
||||
(uint) (ptr-option_buff)-1), cs);
|
||||
|
||||
tmp_buff= (share->table_charset ?
|
||||
share->table_charset->name : "default");
|
||||
|
||||
table->field[17]->store(tmp_buff, strlen(tmp_buff), cs);
|
||||
|
||||
if (share->comment.str)
|
||||
table->field[20]->store(share->comment.str, share->comment.length, cs);
|
||||
|
||||
/* Collect table info from the storage engine */
|
||||
|
||||
if(file)
|
||||
{
|
||||
file->info(HA_STATUS_VARIABLE | HA_STATUS_TIME | HA_STATUS_AUTO);
|
||||
/* If info() fails, then there's nothing else to do */
|
||||
if ((info_error= file->info(HA_STATUS_VARIABLE |
|
||||
HA_STATUS_TIME |
|
||||
HA_STATUS_AUTO)) != 0)
|
||||
goto err;
|
||||
|
||||
enum row_type row_type = file->get_row_type();
|
||||
switch (row_type) {
|
||||
case ROW_TYPE_NOT_USED:
|
||||
|
@ -3923,7 +3947,9 @@ static int get_schema_tables_record(THD *thd, TABLE_LIST *tables,
|
|||
tmp_buff= "Paged";
|
||||
break;
|
||||
}
|
||||
|
||||
table->field[6]->store(tmp_buff, strlen(tmp_buff), cs);
|
||||
|
||||
if (!tables->schema_table)
|
||||
{
|
||||
table->field[7]->store((longlong) file->stats.records, TRUE);
|
||||
|
@ -3972,6 +3998,26 @@ static int get_schema_tables_record(THD *thd, TABLE_LIST *tables,
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
err:
|
||||
if (res || info_error)
|
||||
{
|
||||
/*
|
||||
If an error was encountered, push a warning, set the TABLE COMMENT
|
||||
column with the error text, and clear the error so that the operation
|
||||
can continue.
|
||||
*/
|
||||
const char *error= thd->is_error() ? thd->stmt_da->message() : "";
|
||||
table->field[20]->store(error, strlen(error), cs);
|
||||
|
||||
if (thd->is_error())
|
||||
{
|
||||
push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
|
||||
thd->stmt_da->sql_errno(), thd->stmt_da->message());
|
||||
thd->clear_error();
|
||||
}
|
||||
}
|
||||
|
||||
DBUG_RETURN(schema_table_store_record(thd, table));
|
||||
}
|
||||
|
||||
|
|
|
@ -199,10 +199,7 @@ void udf_init()
|
|||
|
||||
On windows we must check both FN_LIBCHAR and '/'.
|
||||
*/
|
||||
if (my_strchr(files_charset_info, dl_name,
|
||||
dl_name + strlen(dl_name), FN_LIBCHAR) ||
|
||||
IF_WIN(my_strchr(files_charset_info, dl_name,
|
||||
dl_name + strlen(dl_name), '/'), 0) ||
|
||||
if (check_valid_path(dl_name, strlen(dl_name)) ||
|
||||
check_string_char_length(&name, "", NAME_CHAR_LEN,
|
||||
system_charset_info, 1))
|
||||
{
|
||||
|
@ -442,13 +439,8 @@ int mysql_create_function(THD *thd,udf_func *udf)
|
|||
Ensure that the .dll doesn't have a path
|
||||
This is done to ensure that only approved dll from the system
|
||||
directories are used (to make this even remotely secure).
|
||||
|
||||
On windows we must check both FN_LIBCHAR and '/'.
|
||||
*/
|
||||
if (my_strchr(files_charset_info, udf->dl,
|
||||
udf->dl + strlen(udf->dl), FN_LIBCHAR) ||
|
||||
IF_WIN(my_strchr(files_charset_info, udf->dl,
|
||||
udf->dl + strlen(udf->dl), '/'), 0))
|
||||
if (check_valid_path(udf->dl, strlen(udf->dl)))
|
||||
{
|
||||
my_message(ER_UDF_NO_PATHS, ER(ER_UDF_NO_PATHS), MYF(0));
|
||||
DBUG_RETURN(1);
|
||||
|
|
|
@ -228,7 +228,6 @@ int ha_perfschema::write_row(uchar *buf)
|
|||
result= m_table_share->m_write_row(table, buf, table->field);
|
||||
else
|
||||
{
|
||||
my_error(ER_WRONG_PERFSCHEMA_USAGE, MYF(0));
|
||||
result= HA_ERR_WRONG_COMMAND;
|
||||
}
|
||||
|
||||
|
@ -339,7 +338,6 @@ int ha_perfschema::delete_all_rows(void)
|
|||
result= m_table_share->m_delete_all_rows();
|
||||
else
|
||||
{
|
||||
my_error(ER_WRONG_PERFSCHEMA_USAGE, MYF(0));
|
||||
result= HA_ERR_WRONG_COMMAND;
|
||||
}
|
||||
DBUG_RETURN(result);
|
||||
|
@ -370,7 +368,6 @@ int ha_perfschema::delete_table(const char *name)
|
|||
int ha_perfschema::rename_table(const char * from, const char * to)
|
||||
{
|
||||
DBUG_ENTER("ha_perfschema::rename_table ");
|
||||
my_error(ER_WRONG_PERFSCHEMA_USAGE, MYF(0));
|
||||
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
|
||||
}
|
||||
|
||||
|
@ -395,7 +392,37 @@ int ha_perfschema::create(const char *name, TABLE *table_arg,
|
|||
This is not a general purpose engine.
|
||||
Failure to CREATE TABLE is the expected result.
|
||||
*/
|
||||
my_error(ER_WRONG_PERFSCHEMA_USAGE, MYF(0));
|
||||
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
|
||||
}
|
||||
|
||||
void ha_perfschema::print_error(int error, myf errflag)
|
||||
{
|
||||
switch (error)
|
||||
{
|
||||
case HA_ERR_TABLE_NEEDS_UPGRADE:
|
||||
/*
|
||||
The error message for ER_TABLE_NEEDS_UPGRADE refers to REPAIR table,
|
||||
which does not apply to performance schema tables.
|
||||
*/
|
||||
my_error(ER_WRONG_NATIVE_TABLE_STRUCTURE, MYF(0),
|
||||
table_share->db.str, table_share->table_name.str);
|
||||
break;
|
||||
case HA_ERR_WRONG_COMMAND:
|
||||
/*
|
||||
The performance schema is not a general purpose storage engine,
|
||||
some operations are not supported, by design.
|
||||
We do not want to print "Command not supported",
|
||||
which gives the impression that a command implementation is missing,
|
||||
and that the failure should be considered a bug.
|
||||
We print "Invalid performance_schema usage." instead,
|
||||
to emphasise that the operation attempted is not meant to be legal,
|
||||
and that the failure returned is indeed the expected result.
|
||||
*/
|
||||
my_error(ER_WRONG_PERFSCHEMA_USAGE, MYF(0));
|
||||
break;
|
||||
default:
|
||||
handler::print_error(error, errflag);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -100,9 +100,6 @@ public:
|
|||
double scan_time(void)
|
||||
{ return 1.0; }
|
||||
|
||||
double read_time(ha_rows)
|
||||
{ return 1.0; }
|
||||
|
||||
int open(const char *name, int mode, uint test_if_locked);
|
||||
|
||||
int close(void);
|
||||
|
@ -149,6 +146,8 @@ public:
|
|||
return FALSE;
|
||||
}
|
||||
|
||||
virtual void print_error(int error, myf errflags);
|
||||
|
||||
private:
|
||||
/** MySQL lock */
|
||||
THR_LOCK_DATA m_thr_lock;
|
||||
|
|
|
@ -232,8 +232,6 @@ int PFS_engine_table::read_row(TABLE *table,
|
|||
*/
|
||||
if (! m_share_ptr->m_checked)
|
||||
{
|
||||
my_error(ER_WRONG_NATIVE_TABLE_STRUCTURE, MYF(0),
|
||||
PERFORMANCE_SCHEMA_str.str, m_share_ptr->m_name.str);
|
||||
return HA_ERR_TABLE_NEEDS_UPGRADE;
|
||||
}
|
||||
|
||||
|
@ -279,8 +277,6 @@ int PFS_engine_table::update_row(TABLE *table,
|
|||
*/
|
||||
if (! m_share_ptr->m_checked)
|
||||
{
|
||||
my_error(ER_WRONG_NATIVE_TABLE_STRUCTURE, MYF(0),
|
||||
PERFORMANCE_SCHEMA_str.str, m_share_ptr->m_name.str);
|
||||
return HA_ERR_TABLE_NEEDS_UPGRADE;
|
||||
}
|
||||
|
||||
|
@ -351,7 +347,6 @@ int PFS_engine_table::update_row_values(TABLE *,
|
|||
unsigned char *,
|
||||
Field **)
|
||||
{
|
||||
my_error(ER_WRONG_PERFSCHEMA_USAGE, MYF(0));
|
||||
return HA_ERR_WRONG_COMMAND;
|
||||
}
|
||||
|
||||
|
|
|
@ -192,7 +192,6 @@ int table_setup_consumers::update_row_values(TABLE *table,
|
|||
switch(f->field_index)
|
||||
{
|
||||
case 0: /* NAME */
|
||||
my_error(ER_WRONG_PERFSCHEMA_USAGE, MYF(0));
|
||||
return HA_ERR_WRONG_COMMAND;
|
||||
case 1: /* ENABLED */
|
||||
{
|
||||
|
|
|
@ -253,7 +253,6 @@ int table_setup_instruments::update_row_values(TABLE *table,
|
|||
switch(f->field_index)
|
||||
{
|
||||
case 0: /* NAME */
|
||||
my_error(ER_WRONG_PERFSCHEMA_USAGE, MYF(0));
|
||||
return HA_ERR_WRONG_COMMAND;
|
||||
case 1: /* ENABLED */
|
||||
value= (enum_yes_no) get_field_enum(f);
|
||||
|
|
|
@ -164,7 +164,6 @@ int table_setup_timers::update_row_values(TABLE *table,
|
|||
switch(f->field_index)
|
||||
{
|
||||
case 0: /* NAME */
|
||||
my_error(ER_WRONG_PERFSCHEMA_USAGE, MYF(0));
|
||||
return HA_ERR_WRONG_COMMAND;
|
||||
case 1: /* TIMER_NAME */
|
||||
value= get_field_enum(f);
|
||||
|
|
|
@ -13,6 +13,45 @@
|
|||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
||||
|
||||
#include <my_global.h>
|
||||
#include "m_string.h"
|
||||
#include "m_ctype.h"
|
||||
|
||||
#define NEQ(A, B) ((A) != (B))
|
||||
#define EQU(A, B) ((A) == (B))
|
||||
|
||||
/**
|
||||
Macro for the body of the string scanning.
|
||||
|
||||
@param CS The character set of the string
|
||||
@param STR Pointer to beginning of string
|
||||
@param END Pointer to one-after-end of string
|
||||
@param ACC Pointer to beginning of accept (or reject) string
|
||||
@param LEN Length of accept (or reject) string
|
||||
@param CMP is a function-like for doing the comparison of two characters.
|
||||
*/
|
||||
|
||||
#define SCAN_STRING(CS, STR, END, ACC, LEN, CMP) \
|
||||
do { \
|
||||
uint mbl; \
|
||||
const char *ptr_str, *ptr_acc; \
|
||||
const char *acc_end= (ACC) + (LEN); \
|
||||
for (ptr_str= (STR) ; ptr_str < (END) ; ptr_str+= mbl) \
|
||||
{ \
|
||||
mbl= my_mbcharlen((CS), *(uchar*)ptr_str); \
|
||||
if (mbl < 2) \
|
||||
{ \
|
||||
DBUG_ASSERT(mbl == 1); \
|
||||
for (ptr_acc= (ACC) ; ptr_acc < acc_end ; ++ptr_acc) \
|
||||
if (CMP(*ptr_acc, *ptr_str)) \
|
||||
goto end; \
|
||||
} \
|
||||
} \
|
||||
end: \
|
||||
return (size_t) (ptr_str - (STR)); \
|
||||
} while (0)
|
||||
|
||||
|
||||
/*
|
||||
my_strchr(cs, str, end, c) returns a pointer to the first place in
|
||||
str where c (1-byte character) occurs, or NULL if c does not occur
|
||||
|
@ -21,11 +60,6 @@
|
|||
frequently.
|
||||
*/
|
||||
|
||||
#include <my_global.h>
|
||||
#include "m_string.h"
|
||||
#include "m_ctype.h"
|
||||
|
||||
|
||||
char *my_strchr(CHARSET_INFO *cs, const char *str, const char *end,
|
||||
pchar c)
|
||||
{
|
||||
|
@ -45,3 +79,26 @@ char *my_strchr(CHARSET_INFO *cs, const char *str, const char *end,
|
|||
return(0);
|
||||
}
|
||||
|
||||
/**
|
||||
Calculate the length of the initial segment of 'str' which consists
|
||||
entirely of characters not in 'reject'.
|
||||
|
||||
@note The reject string points to single-byte characters so it is
|
||||
only possible to find the first occurrence of a single-byte
|
||||
character. Multi-byte characters in 'str' are treated as not
|
||||
matching any character in the reject string.
|
||||
|
||||
@todo should be moved to CHARSET_INFO if it's going to be called
|
||||
frequently.
|
||||
|
||||
@internal The implementation builds on the assumption that 'str' is long,
|
||||
while 'reject' is short. So it compares each character in string
|
||||
with the characters in 'reject' in a tight loop over the characters
|
||||
in 'reject'.
|
||||
*/
|
||||
|
||||
size_t my_strcspn(CHARSET_INFO *cs, const char *str, const char *str_end,
|
||||
const char *reject)
|
||||
{
|
||||
SCAN_STRING(cs, str, str_end, reject, strlen(reject), EQU);
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue