mirror of
https://github.com/MariaDB/server.git
synced 2025-03-29 18:35:35 +01:00
Make storage engines "pluggable", handlerton work
Makefile.am: Changes to autoconf subst config/ac-macros/ha_berkeley.m4: simplify config/ac-macros/ha_ndbcluster.m4: simplify config/ac-macros/ha_partition.m4: simplify configure.in: strip configure of storage engine specific cruft and simplify extra/Makefile.am: changes to autoconf/automake subst libmysqld/Makefile.am: only compile storage engines if required. make find object file a little smarter libmysqld/examples/Makefile.am: changes to autoconf subst mysql-test/Makefile.am: remove storage engine specific cruft mysql-test/r/ps_1general.result: cannot gaurantee order of results from 'show storage engines' mysql-test/r/show_check.result: fix test - frm file fails to be deleted if it is invalid mysql-test/r/sql_mode.result: isam does not exist, test may need to be redone/fixed in 5.0 mysql-test/r/warnings.result: isam no longer exists mysql-test/t/ps_1general.test: cannot gaurantee order of results from 'show storage engines' mysql-test/t/show_check.test: fix test - frm file fails to be deleted if it is invalid mysql-test/t/sql_mode.test: isam does not exist, test may need to be redone/fixed in 5.0 mysql-test/t/system_mysql_db_fix.test: change isam to myisam mysql-test/t/view.test: change isam to myisam mysql-test/t/warnings.test: isam no longer exists sql/Makefile.am: Make storage engines "pluggable" stage 1 only compile storage engines if included sql/examples/ha_example.cc: handlerton work sql/examples/ha_example.h: handlerton work sql/examples/ha_tina.cc: handlerton work sql/examples/ha_tina.h: handlerton work sql/ha_archive.cc: handlerton work sql/ha_archive.h: handlerton work sql/ha_berkeley.cc: handlerton work sql/ha_berkeley.h: handlerton work sql/ha_blackhole.cc: handlerton work sql/ha_federated.cc: handlerton work sql/ha_federated.h: handlerton work sql/ha_heap.cc: handlerton work sql/ha_innodb.cc: handlerton work sql/ha_innodb.h: handlerton work sql/ha_myisam.cc: handlerton work sql/ha_myisammrg.cc: handlerton work sql/ha_ndbcluster.cc: handlerton work sql/ha_ndbcluster.h: handlerton work sql/ha_partition.cc: handlerton work sql/handler.cc: start removing storage engine specific cruft sql/handler.h: start removing storage engine specific cruft db_type for binlog handlerton handlerton flag for not-user-selectable storage engines sql/lex.h: start removing storage engine specific cruft sql/log.cc: handlerton work give binlog handlerton a 'real' db_type sql/mysql_priv.h: start removing storage engine specific cruft sql/mysqld.cc: start removing storage engine specific cruft sql/set_var.cc: start removing storage engine specific cruft sql/sp_head.cc: start removing storage engine specific cruft sql/sql_class.cc: start removing storage engine specific cruft sql/sql_class.h: start removing storage engine specific cruft sql/sql_lex.h: start removing storage engine specific cruft sql/sql_manager.cc: start removing storage engine specific cruft sql/sql_manager.h: start removing storage engine specific cruft sql/sql_parse.cc: start removing storage engine specific cruft sql/sql_partition.cc: start removing storage engine specific cruft sql/sql_prepare.cc: start removing storage engine specific cruft sql/sql_show.cc: start removing storage engine specific cruft sql/sql_table.cc: changed define from HAVE_PARTITION_DB to WITH_PARTITION_STORAGE_ENGINE start removing storage engine specific cruft sql/sql_update.cc: changed define from HAVE_PARTITION_DB to WITH_PARTITION_STORAGE_ENGINE sql/sql_yacc.yy: start removing storage engine specific cruft test if we should throw error sql/table.cc: changed define from HAVE_PARTITION_DB to WITH_PARTITION_STORAGE_ENGINE sql/table.h: changed define from HAVE_PARTITION_DB to WITH_PARTITION_STORAGE_ENGINE sql/unireg.cc: changed define from HAVE_PARTITION_DB to WITH_PARTITION_STORAGE_ENGINE storage/ndb/include/kernel/kernel_types.h: added my_config.h storage/ndb/include/ndb_global.h.in: added my_config.h storage/ndb/include/ndb_types.h.in: added my_config.h config/ac-macros/storage.m4: New BitKeeper file ``config/ac-macros/storage.m4'' sql/handlerton.cc.in: New BitKeeper file ``sql/handlerton.cc.in''
This commit is contained in:
parent
df33aacd87
commit
6b3a9caef9
67 changed files with 1470 additions and 1266 deletions
Makefile.am
config/ac-macros
configure.inextra
libmysqld
mysql-test
sql
Makefile.am
examples
ha_archive.ccha_archive.hha_berkeley.ccha_berkeley.hha_blackhole.ccha_federated.ccha_federated.hha_heap.ccha_innodb.ccha_innodb.hha_myisam.ccha_myisammrg.ccha_ndbcluster.ccha_ndbcluster.hha_partition.cchandler.cchandler.hhandlerton.cc.inlex.hlog.ccmysql_priv.hmysqld.ccset_var.ccsp_head.ccsql_class.ccsql_class.hsql_lex.hsql_manager.ccsql_manager.hsql_parse.ccsql_partition.ccsql_prepare.ccsql_show.ccsql_table.ccsql_update.ccsql_yacc.yytable.cctable.hunireg.ccstorage/ndb/include
|
@ -23,7 +23,8 @@ EXTRA_DIST = INSTALL-SOURCE README COPYING EXCEPTIONS-CLIENT
|
|||
SUBDIRS = . include @docs_dirs@ @zlib_dir@ @yassl_dir@ \
|
||||
@readline_topdir@ sql-common \
|
||||
@thread_dirs@ pstack \
|
||||
@sql_union_dirs@ scripts @man_dirs@ tests \
|
||||
@sql_union_dirs@ @mysql_se_dirs@ \
|
||||
@sql_server@ scripts @man_dirs@ tests \
|
||||
netware @libmysqld_dirs@ \
|
||||
@bench_dirs@ support-files @tools_dirs@
|
||||
|
||||
|
|
|
@ -5,13 +5,14 @@ dnl Makes sure db version is correct.
|
|||
dnl Looks in $srcdir for Berkeley distribution if not told otherwise
|
||||
dnl ---------------------------------------------------------------------------
|
||||
|
||||
AC_DEFUN([MYSQL_CHECK_BDB], [
|
||||
|
||||
AC_DEFUN([MYSQL_SETUP_BERKELEY_DB], [
|
||||
AC_ARG_WITH([berkeley-db],
|
||||
[
|
||||
--with-berkeley-db[=DIR]
|
||||
Use BerkeleyDB located in DIR],
|
||||
[bdb="$withval"],
|
||||
[bdb=no])
|
||||
[bdb=yes])
|
||||
|
||||
AC_ARG_WITH([berkeley-db-includes],
|
||||
[
|
||||
|
@ -27,45 +28,27 @@ AC_DEFUN([MYSQL_CHECK_BDB], [
|
|||
[bdb_libs="$withval"],
|
||||
[bdb_libs=default])
|
||||
|
||||
AC_MSG_CHECKING([for BerkeleyDB])
|
||||
|
||||
dnl SORT OUT THE SUPPLIED ARGUMENTS TO DETERMINE WHAT TO DO
|
||||
dnl echo "DBG1: bdb='$bdb'; incl='$bdb_includes'; lib='$bdb_libs'"
|
||||
have_berkeley_db=no
|
||||
# echo " bdb $bdb $bdb_includes---$bdb_libs "
|
||||
case "$bdb" in
|
||||
no )
|
||||
mode=no
|
||||
AC_MSG_RESULT([no])
|
||||
;;
|
||||
yes | default )
|
||||
yes )
|
||||
case "$bdb_includes---$bdb_libs" in
|
||||
default---default )
|
||||
mode=search-$bdb
|
||||
AC_MSG_RESULT([searching...])
|
||||
;;
|
||||
default---* | *---default | yes---* | *---yes )
|
||||
AC_MSG_ERROR([if either 'includes' or 'libs' is specified, both must be specified])
|
||||
;;
|
||||
* )
|
||||
mode=supplied-two
|
||||
AC_MSG_RESULT([supplied])
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
* )
|
||||
mode=supplied-one
|
||||
AC_MSG_RESULT([supplied])
|
||||
;;
|
||||
esac
|
||||
|
||||
dnl echo "DBG2: [$mode] bdb='$bdb'; incl='$bdb_includes'; lib='$bdb_libs'"
|
||||
|
||||
case $mode in
|
||||
no )
|
||||
bdb_includes=
|
||||
bdb_libs=
|
||||
bdb_libs_with_path=
|
||||
;;
|
||||
supplied-two )
|
||||
MYSQL_CHECK_INSTALLED_BDB([$bdb_includes], [$bdb_libs])
|
||||
case $bdb_dir_ok in
|
||||
|
@ -86,16 +69,7 @@ dnl echo "DBG2: [$mode] bdb='$bdb'; incl='$bdb_includes'; lib='$bdb_libs'"
|
|||
case $bdb_dir_ok in
|
||||
source ) mode=compile ;;
|
||||
installed ) mode=yes ;;
|
||||
* )
|
||||
# not found
|
||||
case $mode in
|
||||
*-yes ) AC_MSG_ERROR([no suitable BerkeleyDB found]) ;;
|
||||
* ) mode=no ;;
|
||||
esac
|
||||
bdb_includes=
|
||||
bdb_libs=
|
||||
bdb_libs_with_path=
|
||||
;;
|
||||
* ) AC_MSG_ERROR([no suitable BerkeleyDB found]) ;;
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
|
@ -103,11 +77,7 @@ dnl echo "DBG2: [$mode] bdb='$bdb'; incl='$bdb_includes'; lib='$bdb_libs'"
|
|||
;;
|
||||
esac
|
||||
|
||||
dnl echo "DBG3: [$mode] bdb='$bdb'; incl='$bdb_includes'; lib='$bdb_libs'"
|
||||
case $mode in
|
||||
no )
|
||||
AC_MSG_RESULT([Not using Berkeley DB])
|
||||
;;
|
||||
yes )
|
||||
have_berkeley_db="yes"
|
||||
AC_MSG_RESULT([Using Berkeley DB in '$bdb_includes'])
|
||||
|
@ -121,9 +91,41 @@ dnl echo "DBG3: [$mode] bdb='$bdb'; incl='$bdb_includes'; lib='$bdb_libs'"
|
|||
;;
|
||||
esac
|
||||
|
||||
bdb_conf_flags="--disable-shared --build=$build_alias"
|
||||
if test $with_debug = "yes"
|
||||
then
|
||||
bdb_conf_flags="$bdb_conf_flags --enable-debug --enable-diagnostic"
|
||||
fi
|
||||
# NOTICE: if you're compiling BDB, it needs to be a SUBDIR
|
||||
# of $srcdir (i.e., you can 'cd $srcdir/$bdb'). It won't
|
||||
# work otherwise.
|
||||
if test -d "$bdb"; then :
|
||||
else
|
||||
# This should only happen when doing a VPATH build
|
||||
echo "NOTICE: I have to make the BDB directory: `pwd`:$bdb"
|
||||
mkdir "$bdb" || exit 1
|
||||
fi
|
||||
if test -d "$bdb"/build_unix; then :
|
||||
else
|
||||
# This should only happen when doing a VPATH build
|
||||
echo "NOTICE: I have to make the build_unix directory: `pwd`:$bdb/build_unix"
|
||||
mkdir "$bdb/build_unix" || exit 1
|
||||
fi
|
||||
rel_srcdir=
|
||||
case "$srcdir" in
|
||||
/* ) rel_srcdir="$srcdir" ;;
|
||||
* ) rel_srcdir="../../../$srcdir" ;;
|
||||
esac
|
||||
(cd $bdb/build_unix && \
|
||||
sh $rel_srcdir/$bdb/dist/configure $bdb_conf_flags) || \
|
||||
AC_MSG_ERROR([could not configure Berkeley DB])
|
||||
|
||||
mysql_se_libs="$mysql_se_libs $bdb_libs_with_path"
|
||||
|
||||
AC_SUBST(bdb_includes)
|
||||
AC_SUBST(bdb_libs)
|
||||
AC_SUBST(bdb_libs_with_path)
|
||||
AC_CONFIG_FILES(storage/bdb/Makefile)
|
||||
])
|
||||
|
||||
AC_DEFUN([MYSQL_CHECK_INSTALLED_BDB], [
|
||||
|
@ -217,29 +219,29 @@ AC_DEFUN([MYSQL_CHECK_BDB_VERSION], [
|
|||
test -z "$db_minor" && db_minor=0
|
||||
test -z "$db_patch" && db_patch=0
|
||||
|
||||
# This is ugly, but about as good as it can get
|
||||
# mysql_bdb=
|
||||
# if test $db_major -eq 3 && test $db_minor -eq 2 && test $db_patch -eq 3
|
||||
# then
|
||||
# mysql_bdb=h
|
||||
# elif test $db_major -eq 3 && test $db_minor -eq 2 && test $db_patch -eq 9
|
||||
# then
|
||||
# want_bdb_version="3.2.9a" # hopefully this will stay up-to-date
|
||||
# mysql_bdb=a
|
||||
# fi
|
||||
dnl # This is ugly, but about as good as it can get
|
||||
dnl # mysql_bdb=
|
||||
dnl # if test $db_major -eq 3 && test $db_minor -eq 2 && test $db_patch -eq 3
|
||||
dnl # then
|
||||
dnl # mysql_bdb=h
|
||||
dnl # elif test $db_major -eq 3 && test $db_minor -eq 2 && test $db_patch -eq 9
|
||||
dnl # then
|
||||
dnl # want_bdb_version="3.2.9a" # hopefully this will stay up-to-date
|
||||
dnl # mysql_bdb=a
|
||||
dnl # fi
|
||||
|
||||
dnl RAM:
|
||||
want_bdb_version="4.1.24"
|
||||
bdb_version_ok=yes
|
||||
|
||||
# if test -n "$mysql_bdb" && \
|
||||
# grep "DB_VERSION_STRING.*:.*$mysql_bdb: " [$1] > /dev/null
|
||||
# then
|
||||
# bdb_version_ok=yes
|
||||
# else
|
||||
# bdb_version_ok="invalid version $db_major.$db_minor.$db_patch"
|
||||
# bdb_version_ok="$bdb_version_ok (must be version 3.2.3h or $want_bdb_version)"
|
||||
# fi
|
||||
dnl # if test -n "$mysql_bdb" && \
|
||||
dnl # grep "DB_VERSION_STRING.*:.*$mysql_bdb: " [$1] > /dev/null
|
||||
dnl # then
|
||||
dnl # bdb_version_ok=yes
|
||||
dnl # else
|
||||
dnl # bdb_version_ok="invalid version $db_major.$db_minor.$db_patch"
|
||||
dnl # bdb_version_ok="$bdb_version_ok (must be version 3.2.3h or $want_bdb_version)"
|
||||
dnl # fi
|
||||
])
|
||||
|
||||
AC_DEFUN([MYSQL_TOP_BUILDDIR], [
|
||||
|
|
|
@ -1,8 +1,29 @@
|
|||
dnl ---------------------------------------------------------------------------
|
||||
dnl Macro: MYSQL_CHECK_NDBCLUSTER
|
||||
dnl Sets HAVE_NDBCLUSTER_DB if --with-ndbcluster is used
|
||||
dnl ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
NDB_VERSION_MAJOR=`echo $VERSION | cut -d. -f1`
|
||||
NDB_VERSION_MINOR=`echo $VERSION | cut -d. -f2`
|
||||
NDB_VERSION_BUILD=`echo $VERSION | cut -d. -f3 | cut -d- -f1`
|
||||
NDB_VERSION_STATUS=`echo $VERSION | cut -d- -f2`
|
||||
# if there was now -suffix, $NDB_VERSION_STATUS will be the same as $VERSION
|
||||
if test "$NDB_VERSION_STATUS" = "$VERSION"
|
||||
then
|
||||
NDB_VERSION_STATUS=""
|
||||
fi
|
||||
TEST_NDBCLUSTER=""
|
||||
|
||||
dnl for build ndb docs
|
||||
|
||||
AC_PATH_PROG(DOXYGEN, doxygen, no)
|
||||
AC_PATH_PROG(PDFLATEX, pdflatex, no)
|
||||
AC_PATH_PROG(MAKEINDEX, makeindex, no)
|
||||
|
||||
AC_SUBST(DOXYGEN)
|
||||
AC_SUBST(PDFLATEX)
|
||||
AC_SUBST(MAKEINDEX)
|
||||
|
||||
|
||||
AC_DEFUN([MYSQL_CHECK_NDB_OPTIONS], [
|
||||
AC_ARG_WITH([ndb-sci],
|
||||
AC_HELP_STRING([--with-ndb-sci=DIR],
|
||||
|
@ -67,7 +88,7 @@ AC_DEFUN([MYSQL_CHECK_NDB_OPTIONS], [
|
|||
[ndb_ccflags=${withval}],
|
||||
[ndb_ccflags=""])
|
||||
|
||||
case "$ndb_ccflags" in
|
||||
case "$ndb_ccflags" in
|
||||
"yes")
|
||||
AC_MSG_RESULT([The --ndb-ccflags option requires a parameter (passed to CC for ndb compilation)])
|
||||
;;
|
||||
|
@ -122,43 +143,196 @@ AC_DEFUN([MYSQL_CHECK_NDB_OPTIONS], [
|
|||
AC_MSG_RESULT([done.])
|
||||
])
|
||||
|
||||
AC_DEFUN([MYSQL_CHECK_NDBCLUSTER], [
|
||||
AC_ARG_WITH([ndbcluster],
|
||||
[
|
||||
--with-ndbcluster Include the NDB Cluster table handler],
|
||||
[ndbcluster="$withval"],
|
||||
[ndbcluster=no])
|
||||
|
||||
AC_MSG_CHECKING([for NDB Cluster])
|
||||
|
||||
have_ndbcluster=no
|
||||
ndbcluster_includes=
|
||||
ndbcluster_libs=
|
||||
ndb_mgmclient_libs=
|
||||
case "$ndbcluster" in
|
||||
yes )
|
||||
AC_MSG_RESULT([Using NDB Cluster and Partitioning])
|
||||
AC_DEFINE([HAVE_NDBCLUSTER_DB], [1], [Using Ndb Cluster DB])
|
||||
AC_DEFINE([HAVE_PARTITION_DB], [1], [Builds Partition DB])
|
||||
have_ndbcluster="yes"
|
||||
ndbcluster_includes="-I\$(top_builddir)/storage/ndb/include -I\$(top_builddir)/storage/ndb/include/ndbapi -I\$(top_builddir)/storage/ndb/include/mgmapi"
|
||||
ndbcluster_libs="\$(top_builddir)/storage/ndb/src/.libs/libndbclient.a"
|
||||
ndbcluster_system_libs=""
|
||||
ndb_mgmclient_libs="\$(top_builddir)/storage/ndb/src/mgmclient/libndbmgmclient.la"
|
||||
MYSQL_CHECK_NDB_OPTIONS
|
||||
AC_DEFUN([NDBCLUSTER_WORKAROUNDS], [
|
||||
|
||||
#workaround for Sun Forte/x86 see BUG#4681
|
||||
case $SYSTEM_TYPE-$MACHINE_TYPE-$ac_cv_prog_gcc in
|
||||
*solaris*-i?86-no)
|
||||
CFLAGS="$CFLAGS -DBIG_TABLES"
|
||||
CXXFLAGS="$CXXFLAGS -DBIG_TABLES"
|
||||
;;
|
||||
* )
|
||||
AC_MSG_RESULT([Not using NDB Cluster])
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
AM_CONDITIONAL([HAVE_NDBCLUSTER_DB], [ test "$have_ndbcluster" = "yes" ])
|
||||
# workaround for Sun Forte compile problem for ndb
|
||||
case $SYSTEM_TYPE-$ac_cv_prog_gcc in
|
||||
*solaris*-no)
|
||||
ndb_cxxflags_fix="$ndb_cxxflags_fix -instances=static"
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
# ndb fail for whatever strange reason to link Sun Forte/x86
|
||||
# unless using incremental linker
|
||||
case $SYSTEM_TYPE-$MACHINE_TYPE-$ac_cv_prog_gcc-$have_ndbcluster in
|
||||
*solaris*-i?86-no-yes)
|
||||
CXXFLAGS="$CXXFLAGS -xildon"
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
])
|
||||
|
||||
AC_DEFUN([MYSQL_SETUP_NDBCLUSTER], [
|
||||
|
||||
with_partition="yes"
|
||||
ndb_cxxflags_fix=""
|
||||
TEST_NDBCLUSTER="--ndbcluster"
|
||||
|
||||
ndbcluster_includes="-I\$(top_builddir)/storage/ndb/include -I\$(top_builddir)/storage/ndb/include/ndbapi -I\$(top_builddir)/storage/ndb/include/mgmapi"
|
||||
ndbcluster_libs="\$(top_builddir)/storage/ndb/src/.libs/libndbclient.a"
|
||||
ndbcluster_system_libs=""
|
||||
ndb_mgmclient_libs="\$(top_builddir)/storage/ndb/src/mgmclient/libndbmgmclient.la"
|
||||
|
||||
MYSQL_CHECK_NDB_OPTIONS
|
||||
NDBCLUSTER_WORKAROUNDS
|
||||
|
||||
MAKE_BINARY_DISTRIBUTION_OPTIONS="$MAKE_BINARY_DISTRIBUTION_OPTIONS --with-ndbcluster"
|
||||
|
||||
# CXXFLAGS="$CXXFLAGS \$(NDB_CXXFLAGS)"
|
||||
if test "$have_ndb_debug" = "default"
|
||||
then
|
||||
have_ndb_debug=$with_debug
|
||||
fi
|
||||
|
||||
if test "$have_ndb_debug" = "yes"
|
||||
then
|
||||
# Medium debug.
|
||||
NDB_DEFS="-DNDB_DEBUG -DVM_TRACE -DERROR_INSERT -DARRAY_GUARD"
|
||||
elif test "$have_ndb_debug" = "full"
|
||||
then
|
||||
NDB_DEFS="-DNDB_DEBUG_FULL -DVM_TRACE -DERROR_INSERT -DARRAY_GUARD"
|
||||
else
|
||||
# no extra ndb debug but still do asserts if debug version
|
||||
if test "$with_debug" = "yes" -o "$with_debug" = "full"
|
||||
then
|
||||
NDB_DEFS=""
|
||||
else
|
||||
NDB_DEFS="-DNDEBUG"
|
||||
fi
|
||||
fi
|
||||
|
||||
if test X"$ndb_port" = Xdefault
|
||||
then
|
||||
ndb_port="1186"
|
||||
fi
|
||||
|
||||
ndb_transporter_opt_objs=""
|
||||
if test "$ac_cv_func_shmget" = "yes" &&
|
||||
test "$ac_cv_func_shmat" = "yes" &&
|
||||
test "$ac_cv_func_shmdt" = "yes" &&
|
||||
test "$ac_cv_func_shmctl" = "yes" &&
|
||||
test "$ac_cv_func_sigaction" = "yes" &&
|
||||
test "$ac_cv_func_sigemptyset" = "yes" &&
|
||||
test "$ac_cv_func_sigaddset" = "yes" &&
|
||||
test "$ac_cv_func_pthread_sigmask" = "yes"
|
||||
then
|
||||
AC_DEFINE([NDB_SHM_TRANSPORTER], [1],
|
||||
[Including Ndb Cluster DB shared memory transporter])
|
||||
AC_MSG_RESULT([Including ndb shared memory transporter])
|
||||
ndb_transporter_opt_objs="$ndb_transporter_opt_objs SHM_Transporter.lo SHM_Transporter.unix.lo"
|
||||
else
|
||||
AC_MSG_RESULT([Not including ndb shared memory transporter])
|
||||
fi
|
||||
|
||||
if test X"$have_ndb_sci" = Xyes
|
||||
then
|
||||
ndb_transporter_opt_objs="$ndb_transporter_opt_objs SCI_Transporter.lo"
|
||||
fi
|
||||
|
||||
ndb_opt_subdirs=
|
||||
ndb_bin_am_ldflags="-static"
|
||||
if test X"$have_ndb_test" = Xyes
|
||||
then
|
||||
ndb_opt_subdirs="test"
|
||||
ndb_bin_am_ldflags=""
|
||||
fi
|
||||
|
||||
if test X"$have_ndb_docs" = Xyes
|
||||
then
|
||||
ndb_opt_subdirs="$ndb_opt_subdirs docs"
|
||||
ndb_bin_am_ldflags=""
|
||||
fi
|
||||
|
||||
mysql_se_libs="$mysql_se_libs $ndbcluster_libs $ndbcluster_system_libs"
|
||||
mysql_se_libs="$mysql_se_libs $NDB_SCI_LIBS"
|
||||
|
||||
AC_SUBST(NDB_VERSION_MAJOR)
|
||||
AC_SUBST(NDB_VERSION_MINOR)
|
||||
AC_SUBST(NDB_VERSION_BUILD)
|
||||
AC_SUBST(NDB_VERSION_STATUS)
|
||||
AC_DEFINE_UNQUOTED([NDB_VERSION_MAJOR], [$NDB_VERSION_MAJOR],
|
||||
[NDB major version])
|
||||
AC_DEFINE_UNQUOTED([NDB_VERSION_MINOR], [$NDB_VERSION_MINOR],
|
||||
[NDB minor version])
|
||||
AC_DEFINE_UNQUOTED([NDB_VERSION_BUILD], [$NDB_VERSION_BUILD],
|
||||
[NDB build version])
|
||||
AC_DEFINE_UNQUOTED([NDB_VERSION_STATUS], ["$NDB_VERSION_STATUS"],
|
||||
[NDB status version])
|
||||
|
||||
AC_SUBST(ndbcluster_includes)
|
||||
AC_SUBST(ndbcluster_libs)
|
||||
AC_SUBST(ndbcluster_system_libs)
|
||||
AC_SUBST(ndb_mgmclient_libs)
|
||||
|
||||
AC_SUBST(ndb_transporter_opt_objs)
|
||||
AC_SUBST(ndb_port)
|
||||
AC_SUBST(ndb_bin_am_ldflags)
|
||||
AC_SUBST(ndb_opt_subdirs)
|
||||
|
||||
AC_SUBST(NDB_DEFS)
|
||||
AC_SUBST(ndb_cxxflags_fix)
|
||||
|
||||
AC_CONFIG_FILES(storage/ndb/Makefile storage/ndb/include/Makefile dnl
|
||||
storage/ndb/src/Makefile storage/ndb/src/common/Makefile dnl
|
||||
storage/ndb/docs/Makefile dnl
|
||||
storage/ndb/tools/Makefile dnl
|
||||
storage/ndb/src/common/debugger/Makefile dnl
|
||||
storage/ndb/src/common/debugger/signaldata/Makefile dnl
|
||||
storage/ndb/src/common/portlib/Makefile dnl
|
||||
storage/ndb/src/common/util/Makefile dnl
|
||||
storage/ndb/src/common/logger/Makefile dnl
|
||||
storage/ndb/src/common/transporter/Makefile dnl
|
||||
storage/ndb/src/common/mgmcommon/Makefile dnl
|
||||
storage/ndb/src/kernel/Makefile dnl
|
||||
storage/ndb/src/kernel/error/Makefile dnl
|
||||
storage/ndb/src/kernel/blocks/Makefile dnl
|
||||
storage/ndb/src/kernel/blocks/cmvmi/Makefile dnl
|
||||
storage/ndb/src/kernel/blocks/dbacc/Makefile dnl
|
||||
storage/ndb/src/kernel/blocks/dbdict/Makefile dnl
|
||||
storage/ndb/src/kernel/blocks/dbdih/Makefile dnl
|
||||
storage/ndb/src/kernel/blocks/dblqh/Makefile dnl
|
||||
storage/ndb/src/kernel/blocks/dbtc/Makefile dnl
|
||||
storage/ndb/src/kernel/blocks/dbtup/Makefile dnl
|
||||
storage/ndb/src/kernel/blocks/ndbfs/Makefile dnl
|
||||
storage/ndb/src/kernel/blocks/ndbcntr/Makefile dnl
|
||||
storage/ndb/src/kernel/blocks/qmgr/Makefile dnl
|
||||
storage/ndb/src/kernel/blocks/trix/Makefile dnl
|
||||
storage/ndb/src/kernel/blocks/backup/Makefile dnl
|
||||
storage/ndb/src/kernel/blocks/dbutil/Makefile dnl
|
||||
storage/ndb/src/kernel/blocks/suma/Makefile dnl
|
||||
storage/ndb/src/kernel/blocks/dbtux/Makefile dnl
|
||||
storage/ndb/src/kernel/vm/Makefile dnl
|
||||
storage/ndb/src/mgmapi/Makefile dnl
|
||||
storage/ndb/src/ndbapi/Makefile dnl
|
||||
storage/ndb/src/mgmsrv/Makefile dnl
|
||||
storage/ndb/src/mgmclient/Makefile dnl
|
||||
storage/ndb/src/cw/Makefile dnl
|
||||
storage/ndb/src/cw/cpcd/Makefile dnl
|
||||
storage/ndb/test/Makefile dnl
|
||||
storage/ndb/test/src/Makefile dnl
|
||||
storage/ndb/test/ndbapi/Makefile dnl
|
||||
storage/ndb/test/ndbapi/bank/Makefile dnl
|
||||
storage/ndb/test/tools/Makefile dnl
|
||||
storage/ndb/test/run-test/Makefile dnl
|
||||
storage/ndb/include/ndb_version.h storage/ndb/include/ndb_global.h dnl
|
||||
storage/ndb/include/ndb_types.h dnl
|
||||
)
|
||||
])
|
||||
|
||||
|
||||
AC_SUBST(TEST_NDBCLUSTER)
|
||||
dnl ---------------------------------------------------------------------------
|
||||
dnl END OF MYSQL_CHECK_NDBCLUSTER SECTION
|
||||
dnl ---------------------------------------------------------------------------
|
||||
|
|
|
@ -11,17 +11,20 @@ AC_DEFUN([MYSQL_CHECK_PARTITIONDB], [
|
|||
[partitiondb=no])
|
||||
AC_MSG_CHECKING([for partition])
|
||||
|
||||
case "$partitiondb" in
|
||||
yes )
|
||||
dnl case "$partitiondb" in
|
||||
dnl yes )
|
||||
dnl AC_DEFINE([HAVE_PARTITION_DB], [1], [Builds Partition DB])
|
||||
dnl AC_MSG_RESULT([yes])
|
||||
dnl [partitiondb=yes]
|
||||
dnl ;;
|
||||
dnl * )
|
||||
dnl AC_MSG_RESULT([no])
|
||||
dnl [partitiondb=no]
|
||||
dnl ;;
|
||||
dnl esac
|
||||
AC_DEFINE([HAVE_PARTITION_DB], [1], [Builds Partition DB])
|
||||
AC_MSG_RESULT([yes])
|
||||
[partitiondb=yes]
|
||||
;;
|
||||
* )
|
||||
AC_MSG_RESULT([no])
|
||||
[partitiondb=no]
|
||||
;;
|
||||
esac
|
||||
|
||||
])
|
||||
dnl ---------------------------------------------------------------------------
|
||||
|
|
47
config/ac-macros/storage.m4
Normal file
47
config/ac-macros/storage.m4
Normal file
|
@ -0,0 +1,47 @@
|
|||
dnl ---------------------------------------------------------------------------
|
||||
dnl Macro: MYSQL_STORAGE_ENGINE
|
||||
dnl
|
||||
dnl What it does:
|
||||
dnl creates --with-xxx configure option
|
||||
dnl adds HAVE_XXX to config.h
|
||||
dnl appends &xxx_hton, to the list of hanldertons
|
||||
dnl appends a dir to the list of source directories
|
||||
dnl appends ha_xxx.cc to the list of handler files
|
||||
dnl
|
||||
dnl all names above are configurable with reasonable defaults.
|
||||
dnl
|
||||
dnl ---------------------------------------------------------------------------
|
||||
|
||||
AC_DEFUN([MYSQL_STORAGE_ENGINE],
|
||||
[_MYSQL_STORAGE_ENGINE(
|
||||
[$1], dnl name
|
||||
m4_default([$2], [$1 storage engine]), dnl verbose name
|
||||
m4_default([$3], [$1-storage-engine]), dnl with-name
|
||||
m4_default([$4], no), dnl default
|
||||
m4_default([$5], [WITH_]AS_TR_CPP([$1])[_STORAGE_ENGINE]),
|
||||
m4_default([$6], $1[_hton]), dnl hton
|
||||
m4_default([$7], []), dnl path to the code
|
||||
m4_default([$8], [ha_$1.o]), dnl path to the handler in
|
||||
m4_default([$9], []), dnl path to extra libraries
|
||||
[$10], dnl code-if-set
|
||||
)])
|
||||
|
||||
AC_DEFUN([_MYSQL_STORAGE_ENGINE],
|
||||
[
|
||||
AC_ARG_WITH([$3], AS_HELP_STRING([--with-$3], [enable $2 (default is $4)]),
|
||||
[], [ [with_]m4_bpatsubst([$3], -, _)=['$4']])
|
||||
AC_CACHE_CHECK([whether to use $2], [mysql_cv_use_]m4_bpatsubst([$3], -, _),
|
||||
[mysql_cv_use_]m4_bpatsubst([$3], -, _)=[$with_]m4_bpatsubst([$3], -, _))
|
||||
AH_TEMPLATE([$5], [Build $2])
|
||||
if test "[$mysql_cv_use_]m4_bpatsubst([$3], -, _)" != no; then
|
||||
AC_DEFINE([$5])
|
||||
mysql_se_decls="${mysql_se_decls},$6"
|
||||
mysql_se_htons="${mysql_se_htons},&$6"
|
||||
mysql_se_dirs="$mysql_se_dirs $7"
|
||||
mysql_se_objs="$mysql_se_objs $8"
|
||||
mysql_se_libs="$mysql_se_libs $9"
|
||||
$10
|
||||
fi
|
||||
])
|
||||
|
||||
dnl ---------------------------------------------------------------------------
|
393
configure.in
393
configure.in
|
@ -15,12 +15,6 @@ DOT_FRM_VERSION=6
|
|||
# See the libtool docs for information on how to do shared lib versions.
|
||||
SHARED_LIB_VERSION=15:0:0
|
||||
|
||||
# ndb version
|
||||
NDB_VERSION_MAJOR=5
|
||||
NDB_VERSION_MINOR=1
|
||||
NDB_VERSION_BUILD=2
|
||||
NDB_VERSION_STATUS="alpha"
|
||||
|
||||
# Set all version vars based on $VERSION. How do we do this more elegant ?
|
||||
# Remember that regexps needs to quote [ and ] since this is run through m4
|
||||
MYSQL_NO_DASH_VERSION=`echo $VERSION | sed -e "s|[[a-z]]*-.*$||"`
|
||||
|
@ -36,15 +30,9 @@ sinclude(config/ac-macros/alloca.m4)
|
|||
sinclude(config/ac-macros/check_cpu.m4)
|
||||
sinclude(config/ac-macros/character_sets.m4)
|
||||
sinclude(config/ac-macros/compiler_flag.m4)
|
||||
sinclude(config/ac-macros/ha_archive.m4)
|
||||
sinclude(config/ac-macros/storage.m4)
|
||||
sinclude(config/ac-macros/ha_berkeley.m4)
|
||||
sinclude(config/ac-macros/ha_blackhole.m4)
|
||||
sinclude(config/ac-macros/ha_example.m4)
|
||||
sinclude(config/ac-macros/ha_federated.m4)
|
||||
sinclude(config/ac-macros/ha_innodb.m4)
|
||||
sinclude(config/ac-macros/ha_ndbcluster.m4)
|
||||
sinclude(config/ac-macros/ha_partition.m4)
|
||||
sinclude(config/ac-macros/ha_tina.m4)
|
||||
sinclude(config/ac-macros/large_file.m4)
|
||||
sinclude(config/ac-macros/misc.m4)
|
||||
sinclude(config/ac-macros/openssl.m4)
|
||||
|
@ -61,6 +49,7 @@ romanian russian serbian slovak spanish swedish ukrainian"
|
|||
#####
|
||||
#####
|
||||
|
||||
|
||||
AC_SUBST(MYSQL_NO_DASH_VERSION)
|
||||
AC_SUBST(MYSQL_BASE_VERSION)
|
||||
AC_SUBST(MYSQL_VERSION_ID)
|
||||
|
@ -74,19 +63,6 @@ AC_SUBST(SHARED_LIB_VERSION)
|
|||
AC_SUBST(AVAILABLE_LANGUAGES)
|
||||
AC_SUBST(AVAILABLE_LANGUAGES_ERRORS)
|
||||
|
||||
AC_SUBST([NDB_VERSION_MAJOR])
|
||||
AC_SUBST([NDB_VERSION_MINOR])
|
||||
AC_SUBST([NDB_VERSION_BUILD])
|
||||
AC_SUBST([NDB_VERSION_STATUS])
|
||||
AC_DEFINE_UNQUOTED([NDB_VERSION_MAJOR], [$NDB_VERSION_MAJOR],
|
||||
[NDB major version])
|
||||
AC_DEFINE_UNQUOTED([NDB_VERSION_MINOR], [$NDB_VERSION_MINOR],
|
||||
[NDB minor version])
|
||||
AC_DEFINE_UNQUOTED([NDB_VERSION_BUILD], [$NDB_VERSION_BUILD],
|
||||
[NDB build version])
|
||||
AC_DEFINE_UNQUOTED([NDB_VERSION_STATUS], ["$NDB_VERSION_STATUS"],
|
||||
[NDB status version])
|
||||
|
||||
|
||||
# Canonicalize the configuration name.
|
||||
SYSTEM_TYPE="$host_vendor-$host_os"
|
||||
|
@ -418,15 +394,6 @@ AC_SUBST(HOSTNAME)
|
|||
AC_SUBST(PERL)
|
||||
AC_SUBST(PERL5)
|
||||
|
||||
# for build ndb docs
|
||||
|
||||
AC_PATH_PROG(DOXYGEN, doxygen, no)
|
||||
AC_PATH_PROG(PDFLATEX, pdflatex, no)
|
||||
AC_PATH_PROG(MAKEINDEX, makeindex, no)
|
||||
AC_SUBST(DOXYGEN)
|
||||
AC_SUBST(PDFLATEX)
|
||||
AC_SUBST(MAKEINDEX)
|
||||
|
||||
# Lock for PS
|
||||
AC_PATH_PROG(PS, ps, ps)
|
||||
AC_MSG_CHECKING("how to check if pid exists")
|
||||
|
@ -917,24 +884,6 @@ esac
|
|||
MAX_C_OPTIMIZE="-O3"
|
||||
MAX_CXX_OPTIMIZE="-O3"
|
||||
|
||||
ndb_cxxflags_fix=
|
||||
case $SYSTEM_TYPE-$MACHINE_TYPE-$ac_cv_prog_gcc in
|
||||
# workaround for Sun Forte/x86 see BUG#4681
|
||||
*solaris*-i?86-no)
|
||||
CFLAGS="$CFLAGS -DBIG_TABLES"
|
||||
CXXFLAGS="$CXXFLAGS -DBIG_TABLES"
|
||||
;;
|
||||
*) ;;
|
||||
esac
|
||||
case $SYSTEM_TYPE-$ac_cv_prog_gcc in
|
||||
# workaround for Sun Forte compile problem for ndb
|
||||
*solaris*-no)
|
||||
ndb_cxxflags_fix="$ndb_cxxflags_fix -instances=static"
|
||||
;;
|
||||
*) ;;
|
||||
esac
|
||||
|
||||
|
||||
case $SYSTEM_TYPE in
|
||||
*solaris2.7*)
|
||||
# Solaris 2.7 has a broken /usr/include/widec.h
|
||||
|
@ -1130,7 +1079,7 @@ dnl Is this the right match for DEC OSF on alpha?
|
|||
sql/Makefile.in)
|
||||
# Use gen_lex_hash.linux instead of gen_lex_hash
|
||||
# Add library dependencies to mysqld_DEPENDENCIES
|
||||
lib_DEPENDENCIES="\$(bdb_libs_with_path) \$(innodb_libs) \$(ndbcluster_libs) \$(pstack_libs) \$(innodb_system_libs) \$(openssl_libs) \$(yassl_libs)"
|
||||
lib_DEPENDENCIES="\$(pstack_libs) \$(openssl_libs) \$(yassl_libs)"
|
||||
cat > $filesed << EOF
|
||||
s,\(^.*\$(MAKE) gen_lex_hash\)\$(EXEEXT),#\1,
|
||||
s,\(\./gen_lex_hash\)\$(EXEEXT),\1.linux,
|
||||
|
@ -2339,7 +2288,8 @@ readline_dir=""
|
|||
readline_h_ln_cmd=""
|
||||
readline_link=""
|
||||
|
||||
if expr "$SYSTEM_TYPE" : ".*netware.*" > /dev/null; then
|
||||
if expr "$SYSTEM_TYPE" : ".*netware.*" > /dev/null
|
||||
then
|
||||
# For NetWare, do not need readline
|
||||
echo "Skipping readline"
|
||||
else
|
||||
|
@ -2375,7 +2325,8 @@ else
|
|||
then
|
||||
# Use the new readline interface
|
||||
readline_link="-lreadline"
|
||||
elif [test "$mysql_cv_libedit_interface" = "yes"]; then
|
||||
elif [test "$mysql_cv_libedit_interface" = "yes"]
|
||||
then
|
||||
# Use libedit
|
||||
readline_link="-ledit"
|
||||
else
|
||||
|
@ -2393,18 +2344,65 @@ AC_SUBST(readline_link)
|
|||
AC_SUBST(readline_h_ln_cmd)
|
||||
|
||||
MYSQL_CHECK_BIG_TABLES
|
||||
MYSQL_CHECK_BDB
|
||||
MYSQL_CHECK_INNODB
|
||||
MYSQL_CHECK_EXAMPLEDB
|
||||
MYSQL_CHECK_ARCHIVEDB
|
||||
MYSQL_CHECK_CSVDB
|
||||
MYSQL_CHECK_BLACKHOLEDB
|
||||
MYSQL_CHECK_NDBCLUSTER
|
||||
MYSQL_CHECK_FEDERATED
|
||||
MYSQL_CHECK_PARTITIONDB
|
||||
|
||||
MYSQL_STORAGE_ENGINE(innobase,,innodb,,,,storage/innobase,ha_innodb.o,[ dnl
|
||||
\$(top_builddir)/storage/innobase/usr/libusr.a dnl
|
||||
\$(top_builddir)/storage/innobase/srv/libsrv.a dnl
|
||||
\$(top_builddir)/storage/innobase/dict/libdict.a dnl
|
||||
\$(top_builddir)/storage/innobase/que/libque.a dnl
|
||||
\$(top_builddir)/storage/innobase/srv/libsrv.a dnl
|
||||
\$(top_builddir)/storage/innobase/ibuf/libibuf.a dnl
|
||||
\$(top_builddir)/storage/innobase/row/librow.a dnl
|
||||
\$(top_builddir)/storage/innobase/pars/libpars.a dnl
|
||||
\$(top_builddir)/storage/innobase/btr/libbtr.a dnl
|
||||
\$(top_builddir)/storage/innobase/trx/libtrx.a dnl
|
||||
\$(top_builddir)/storage/innobase/read/libread.a dnl
|
||||
\$(top_builddir)/storage/innobase/usr/libusr.a dnl
|
||||
\$(top_builddir)/storage/innobase/buf/libbuf.a dnl
|
||||
\$(top_builddir)/storage/innobase/ibuf/libibuf.a dnl
|
||||
\$(top_builddir)/storage/innobase/eval/libeval.a dnl
|
||||
\$(top_builddir)/storage/innobase/log/liblog.a dnl
|
||||
\$(top_builddir)/storage/innobase/fsp/libfsp.a dnl
|
||||
\$(top_builddir)/storage/innobase/fut/libfut.a dnl
|
||||
\$(top_builddir)/storage/innobase/fil/libfil.a dnl
|
||||
\$(top_builddir)/storage/innobase/lock/liblock.a dnl
|
||||
\$(top_builddir)/storage/innobase/mtr/libmtr.a dnl
|
||||
\$(top_builddir)/storage/innobase/page/libpage.a dnl
|
||||
\$(top_builddir)/storage/innobase/rem/librem.a dnl
|
||||
\$(top_builddir)/storage/innobase/thr/libthr.a dnl
|
||||
\$(top_builddir)/storage/innobase/sync/libsync.a dnl
|
||||
\$(top_builddir)/storage/innobase/data/libdata.a dnl
|
||||
\$(top_builddir)/storage/innobase/mach/libmach.a dnl
|
||||
\$(top_builddir)/storage/innobase/ha/libha.a dnl
|
||||
\$(top_builddir)/storage/innobase/dyn/libdyn.a dnl
|
||||
\$(top_builddir)/storage/innobase/mem/libmem.a dnl
|
||||
\$(top_builddir)/storage/innobase/sync/libsync.a dnl
|
||||
\$(top_builddir)/storage/innobase/ut/libut.a dnl
|
||||
\$(top_builddir)/storage/innobase/os/libos.a dnl
|
||||
\$(top_builddir)/storage/innobase/ut/libut.a],[
|
||||
AC_CHECK_LIB(rt, aio_read, [innodb_system_libs="-lrt"])
|
||||
AC_SUBST(innodb_includes)
|
||||
AC_SUBST(innodb_libs)
|
||||
AC_SUBST(innodb_system_libs)
|
||||
other_configures="$other_configures storage/innobase/configure"
|
||||
])
|
||||
|
||||
MYSQL_STORAGE_ENGINE(berkeley,,berkeley-db,,,,storage/bdb,,,[
|
||||
MYSQL_SETUP_BERKELEY_DB
|
||||
])
|
||||
MYSQL_STORAGE_ENGINE(example)
|
||||
MYSQL_STORAGE_ENGINE(archive)
|
||||
MYSQL_STORAGE_ENGINE(csv,,,,,tina_hton,,ha_tina.o)
|
||||
MYSQL_STORAGE_ENGINE(blackhole)
|
||||
MYSQL_STORAGE_ENGINE(federated)
|
||||
MYSQL_STORAGE_ENGINE(ndbcluster,,ndbcluster,,,,storage/ndb,,,[
|
||||
MYSQL_SETUP_NDBCLUSTER
|
||||
])
|
||||
MYSQL_STORAGE_ENGINE(partition,,partition)
|
||||
|
||||
# If we have threads generate some library functions and test programs
|
||||
sql_server_dirs=
|
||||
sql_server=
|
||||
server_scripts=
|
||||
thread_dirs=
|
||||
|
||||
|
@ -2431,7 +2429,8 @@ AC_SUBST(linked_client_targets)
|
|||
# If configuring for NetWare, set up to link sources from and build the netware directory
|
||||
netware_dir=
|
||||
linked_netware_sources=
|
||||
if expr "$SYSTEM_TYPE" : ".*netware.*" > /dev/null; then
|
||||
if expr "$SYSTEM_TYPE" : ".*netware.*" > /dev/null
|
||||
then
|
||||
netware_dir="netware"
|
||||
linked_netware_sources="linked_netware_sources"
|
||||
fi
|
||||
|
@ -2452,115 +2451,8 @@ then
|
|||
AC_SUBST(THREAD_LOBJECTS)
|
||||
server_scripts="mysqld_safe mysql_install_db"
|
||||
sql_server_dirs="strings mysys dbug extra regex"
|
||||
|
||||
|
||||
#
|
||||
# Configuration for optional table handlers
|
||||
#
|
||||
|
||||
if test X"$have_berkeley_db" != Xno; then
|
||||
if test X"$have_berkeley_db" != Xyes; then
|
||||
# we must build berkeley db from source
|
||||
sql_server_dirs="$sql_server_dirs $have_berkeley_db"
|
||||
AC_CONFIG_FILES(storage/bdb/Makefile)
|
||||
|
||||
echo "CONFIGURING FOR BERKELEY DB"
|
||||
bdb_conf_flags="--disable-shared --build=$build_alias"
|
||||
if test $with_debug = "yes"
|
||||
then
|
||||
bdb_conf_flags="$bdb_conf_flags --enable-debug --enable-diagnostic"
|
||||
fi
|
||||
# NOTICE: if you're compiling BDB, it needs to be a SUBDIR
|
||||
# of $srcdir (i.e., you can 'cd $srcdir/$bdb'). It won't
|
||||
# work otherwise.
|
||||
if test -d "$bdb"; then :
|
||||
else
|
||||
# This should only happen when doing a VPATH build
|
||||
echo "NOTICE: I have to make the BDB directory: `pwd`:$bdb"
|
||||
mkdir "$bdb" || exit 1
|
||||
fi
|
||||
if test -d "$bdb"/build_unix; then :
|
||||
else
|
||||
# This should only happen when doing a VPATH build
|
||||
echo "NOTICE: I have to make the build_unix directory: `pwd`:$bdb/build_unix"
|
||||
mkdir "$bdb/build_unix" || exit 1
|
||||
fi
|
||||
rel_srcdir=
|
||||
case "$srcdir" in
|
||||
/* ) rel_srcdir="$srcdir" ;;
|
||||
* ) rel_srcdir="../../../$srcdir" ;;
|
||||
esac
|
||||
echo $bdb/build_unix
|
||||
echo $rel_srcdir/$bdb/dist/configure
|
||||
(cd $bdb/build_unix && \
|
||||
sh $rel_srcdir/$bdb/dist/configure $bdb_conf_flags) || \
|
||||
AC_MSG_ERROR([could not configure Berkeley DB])
|
||||
|
||||
dnl echo "bdb = '$bdb'; inc = '$bdb_includes', lib = '$bdb_libs'"
|
||||
echo "END OF BERKELEY DB CONFIGURATION"
|
||||
fi
|
||||
|
||||
AC_DEFINE([HAVE_BERKELEY_DB], [1], [Have berkeley db installed])
|
||||
else
|
||||
if test -d bdb; then :
|
||||
else
|
||||
mkdir storage/bdb && mkdir storage/bdb/build_unix
|
||||
fi
|
||||
|
||||
if test -r storage/bdb/build_unix/db.h; then :
|
||||
else
|
||||
cat <<EOF > storage/bdb/build_unix/db.h
|
||||
|
||||
This file is a placeholder to fool make. The way that automake
|
||||
and GNU make work together causes some files to depend on this
|
||||
header, even if we're not building with Berkeley DB.
|
||||
|
||||
Obviously, if this file *is* used, it'll break and hopefully we can find
|
||||
out why this file was generated by ${top_srcdir}/configure instead of
|
||||
the real db.h.
|
||||
|
||||
If you run into some problems because of this file, please use mysql_bug
|
||||
to generate a bug report, and give the exact output of make and any
|
||||
details you can think of. Send the message to bugs@lists.mysql.com.
|
||||
|
||||
Thank you!
|
||||
|
||||
EOF
|
||||
fi
|
||||
fi
|
||||
|
||||
if test X"$have_innodb" = Xyes
|
||||
then
|
||||
innodb_conf_flags=""
|
||||
sql_server_dirs="$sql_server_dirs storage/innobase"
|
||||
AC_CONFIG_SUBDIRS(storage/innobase)
|
||||
fi
|
||||
|
||||
case $SYSTEM_TYPE-$MACHINE_TYPE-$ac_cv_prog_gcc-$have_ndbcluster in
|
||||
*solaris*-i?86-no-yes)
|
||||
# ndb fail for whatever strange reason to link Sun Forte/x86
|
||||
# unless using incremental linker
|
||||
CXXFLAGS="$CXXFLAGS -xildon"
|
||||
;;
|
||||
*) ;;
|
||||
esac
|
||||
|
||||
if test X"$have_ndbcluster" = Xyes
|
||||
then
|
||||
if test X"$mysql_cv_compress" != Xyes
|
||||
then
|
||||
echo
|
||||
echo "MySQL Cluster table handler ndbcluster requires compress/uncompress."
|
||||
echo "Commonly available in libzlib.a. Please install and rerun configure."
|
||||
echo
|
||||
exit 1
|
||||
fi
|
||||
sql_server_dirs="$sql_server_dirs storage/ndb"
|
||||
fi
|
||||
#
|
||||
# END of configuration for optional table handlers
|
||||
#
|
||||
sql_server_dirs="$sql_server_dirs storage/myisam storage/myisammrg storage/heap vio sql"
|
||||
mysql_se_dirs="storage/myisam storage/myisammrg storage/heap $mysql_se_dirs"
|
||||
sql_server="$sql_server vio sql"
|
||||
fi
|
||||
|
||||
# IMPORTANT - do not modify LIBS past this line - this hack is the only way
|
||||
|
@ -2571,9 +2463,17 @@ LDFLAGS="$LDFLAGS $OTHER_LIBC_LIB"
|
|||
LIBS="$LIBS $STATIC_NSS_FLAGS"
|
||||
|
||||
AC_SUBST(sql_server_dirs)
|
||||
AC_SUBST(sql_server)
|
||||
AC_SUBST(thread_dirs)
|
||||
AC_SUBST(server_scripts)
|
||||
|
||||
AC_SUBST(mysql_se_dirs)
|
||||
AC_SUBST(mysql_se_libs)
|
||||
AC_SUBST(mysql_se_objs)
|
||||
AC_SUBST(mysql_se_htons)
|
||||
AC_SUBST(mysql_se_decls)
|
||||
|
||||
|
||||
# Now that sql_client_dirs and sql_server_dirs are stable, determine the union.
|
||||
# Start with the (longer) server list, add each client item not yet present.
|
||||
sql_union_dirs=" $sql_server_dirs "
|
||||
|
@ -2602,142 +2502,9 @@ case $SYSTEM_TYPE in
|
|||
;;
|
||||
esac
|
||||
|
||||
|
||||
if test X"$have_ndbcluster" = Xyes
|
||||
then
|
||||
MAKE_BINARY_DISTRIBUTION_OPTIONS="$MAKE_BINARY_DISTRIBUTION_OPTIONS --with-ndbcluster"
|
||||
|
||||
CXXFLAGS="$CXXFLAGS \$(NDB_CXXFLAGS)"
|
||||
if test "$have_ndb_debug" = "default"
|
||||
then
|
||||
have_ndb_debug=$with_debug
|
||||
fi
|
||||
|
||||
if test "$have_ndb_debug" = "yes"
|
||||
then
|
||||
# Medium debug.
|
||||
NDB_DEFS="-DNDB_DEBUG -DVM_TRACE -DERROR_INSERT -DARRAY_GUARD"
|
||||
elif test "$have_ndb_debug" = "full"
|
||||
then
|
||||
NDB_DEFS="-DNDB_DEBUG_FULL -DVM_TRACE -DERROR_INSERT -DARRAY_GUARD"
|
||||
else
|
||||
# no extra ndb debug but still do asserts if debug version
|
||||
if test "$with_debug" = "yes" -o "$with_debug" = "full"
|
||||
then
|
||||
NDB_DEFS=""
|
||||
else
|
||||
NDB_DEFS="-DNDEBUG"
|
||||
fi
|
||||
fi
|
||||
|
||||
AC_SUBST([NDB_DEFS])
|
||||
AC_SUBST([ndb_cxxflags_fix])
|
||||
|
||||
|
||||
if test X"$ndb_port" = Xdefault
|
||||
then
|
||||
ndb_port="1186"
|
||||
fi
|
||||
AC_SUBST([ndb_port])
|
||||
|
||||
ndb_transporter_opt_objs=""
|
||||
if test "$ac_cv_func_shmget" = "yes" &&
|
||||
test "$ac_cv_func_shmat" = "yes" &&
|
||||
test "$ac_cv_func_shmdt" = "yes" &&
|
||||
test "$ac_cv_func_shmctl" = "yes" &&
|
||||
test "$ac_cv_func_sigaction" = "yes" &&
|
||||
test "$ac_cv_func_sigemptyset" = "yes" &&
|
||||
test "$ac_cv_func_sigaddset" = "yes" &&
|
||||
test "$ac_cv_func_pthread_sigmask" = "yes"
|
||||
then
|
||||
AC_DEFINE([NDB_SHM_TRANSPORTER], [1],
|
||||
[Including Ndb Cluster DB shared memory transporter])
|
||||
AC_MSG_RESULT([Including ndb shared memory transporter])
|
||||
ndb_transporter_opt_objs="$ndb_transporter_opt_objs SHM_Transporter.lo SHM_Transporter.unix.lo"
|
||||
else
|
||||
AC_MSG_RESULT([Not including ndb shared memory transporter])
|
||||
fi
|
||||
|
||||
if test X"$have_ndb_sci" = Xyes
|
||||
then
|
||||
ndb_transporter_opt_objs="$ndb_transporter_opt_objs SCI_Transporter.lo"
|
||||
fi
|
||||
AC_SUBST([ndb_transporter_opt_objs])
|
||||
|
||||
ndb_opt_subdirs=
|
||||
ndb_bin_am_ldflags="-static"
|
||||
if test X"$have_ndb_test" = Xyes
|
||||
then
|
||||
ndb_opt_subdirs="test"
|
||||
ndb_bin_am_ldflags=""
|
||||
fi
|
||||
if test X"$have_ndb_docs" = Xyes
|
||||
then
|
||||
ndb_opt_subdirs="$ndb_opt_subdirs docs"
|
||||
ndb_bin_am_ldflags=""
|
||||
fi
|
||||
AC_SUBST([ndb_bin_am_ldflags])
|
||||
AC_SUBST([ndb_opt_subdirs])
|
||||
|
||||
NDB_SIZEOF_CHARP="$ac_cv_sizeof_charp"
|
||||
NDB_SIZEOF_CHAR="$ac_cv_sizeof_char"
|
||||
NDB_SIZEOF_SHORT="$ac_cv_sizeof_short"
|
||||
NDB_SIZEOF_INT="$ac_cv_sizeof_int"
|
||||
NDB_SIZEOF_LONG="$ac_cv_sizeof_long"
|
||||
NDB_SIZEOF_LONG_LONG="$ac_cv_sizeof_long_long"
|
||||
AC_SUBST([NDB_SIZEOF_CHARP])
|
||||
AC_SUBST([NDB_SIZEOF_CHAR])
|
||||
AC_SUBST([NDB_SIZEOF_SHORT])
|
||||
AC_SUBST([NDB_SIZEOF_INT])
|
||||
AC_SUBST([NDB_SIZEOF_LONG])
|
||||
AC_SUBST([NDB_SIZEOF_LONG_LONG])
|
||||
|
||||
AC_CONFIG_FILES(storage/ndb/Makefile storage/ndb/include/Makefile dnl
|
||||
storage/ndb/src/Makefile storage/ndb/src/common/Makefile dnl
|
||||
storage/ndb/docs/Makefile dnl
|
||||
storage/ndb/tools/Makefile dnl
|
||||
storage/ndb/src/common/debugger/Makefile dnl
|
||||
storage/ndb/src/common/debugger/signaldata/Makefile dnl
|
||||
storage/ndb/src/common/portlib/Makefile dnl
|
||||
storage/ndb/src/common/util/Makefile dnl
|
||||
storage/ndb/src/common/logger/Makefile dnl
|
||||
storage/ndb/src/common/transporter/Makefile dnl
|
||||
storage/ndb/src/common/mgmcommon/Makefile dnl
|
||||
storage/ndb/src/kernel/Makefile dnl
|
||||
storage/ndb/src/kernel/error/Makefile dnl
|
||||
storage/ndb/src/kernel/blocks/Makefile dnl
|
||||
storage/ndb/src/kernel/blocks/cmvmi/Makefile dnl
|
||||
storage/ndb/src/kernel/blocks/dbacc/Makefile dnl
|
||||
storage/ndb/src/kernel/blocks/dbdict/Makefile dnl
|
||||
storage/ndb/src/kernel/blocks/dbdih/Makefile dnl
|
||||
storage/ndb/src/kernel/blocks/dblqh/Makefile dnl
|
||||
storage/ndb/src/kernel/blocks/dbtc/Makefile dnl
|
||||
storage/ndb/src/kernel/blocks/dbtup/Makefile dnl
|
||||
storage/ndb/src/kernel/blocks/ndbfs/Makefile dnl
|
||||
storage/ndb/src/kernel/blocks/ndbcntr/Makefile dnl
|
||||
storage/ndb/src/kernel/blocks/qmgr/Makefile dnl
|
||||
storage/ndb/src/kernel/blocks/trix/Makefile dnl
|
||||
storage/ndb/src/kernel/blocks/backup/Makefile dnl
|
||||
storage/ndb/src/kernel/blocks/dbutil/Makefile dnl
|
||||
storage/ndb/src/kernel/blocks/suma/Makefile dnl
|
||||
storage/ndb/src/kernel/blocks/dbtux/Makefile dnl
|
||||
storage/ndb/src/kernel/vm/Makefile dnl
|
||||
storage/ndb/src/mgmapi/Makefile dnl
|
||||
storage/ndb/src/ndbapi/Makefile dnl
|
||||
storage/ndb/src/mgmsrv/Makefile dnl
|
||||
storage/ndb/src/mgmclient/Makefile dnl
|
||||
storage/ndb/src/cw/Makefile dnl
|
||||
storage/ndb/src/cw/cpcd/Makefile dnl
|
||||
storage/ndb/test/Makefile dnl
|
||||
storage/ndb/test/src/Makefile dnl
|
||||
storage/ndb/test/ndbapi/Makefile dnl
|
||||
storage/ndb/test/ndbapi/bank/Makefile dnl
|
||||
storage/ndb/test/tools/Makefile dnl
|
||||
storage/ndb/test/run-test/Makefile mysql-test/ndb/Makefile dnl
|
||||
storage/ndb/include/ndb_version.h storage/ndb/include/ndb_global.h dnl
|
||||
storage/ndb/include/ndb_types.h dnl
|
||||
)
|
||||
fi
|
||||
for CONF in $other_configures; do
|
||||
(cd `dirname $CONF`; ./`basename $CONF`)
|
||||
done
|
||||
|
||||
AC_SUBST(MAKE_BINARY_DISTRIBUTION_OPTIONS)
|
||||
|
||||
|
@ -2750,13 +2517,13 @@ AC_CONFIG_FILES(Makefile extra/Makefile mysys/Makefile dnl
|
|||
libmysql_r/Makefile libmysqld/Makefile libmysqld/examples/Makefile dnl
|
||||
libmysql/Makefile client/Makefile dnl
|
||||
pstack/Makefile pstack/aout/Makefile sql/Makefile sql/share/Makefile dnl
|
||||
sql-common/Makefile SSL/Makefile dnl
|
||||
sql/handlerton.cc sql-common/Makefile SSL/Makefile dnl
|
||||
dbug/Makefile scripts/Makefile dnl
|
||||
include/Makefile sql-bench/Makefile tools/Makefile dnl
|
||||
server-tools/Makefile server-tools/instance-manager/Makefile dnl
|
||||
tests/Makefile Docs/Makefile support-files/Makefile dnl
|
||||
support-files/MacOSX/Makefile mysql-test/Makefile dnl
|
||||
netware/Makefile dnl
|
||||
mysql-test/ndb/Makefile netware/Makefile dnl
|
||||
include/mysql_version.h dnl
|
||||
cmd-line-utils/Makefile dnl
|
||||
cmd-line-utils/libedit/Makefile dnl
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
|
||||
INCLUDES = -I$(top_builddir)/include -I$(top_srcdir)/include \
|
||||
@ndbcluster_includes@ -I$(top_srcdir)/sql
|
||||
-I$(top_srcdir)/sql
|
||||
LDADD = @CLIENT_EXTRA_LDFLAGS@ ../mysys/libmysys.a \
|
||||
../dbug/libdbug.a ../strings/libmystrings.a
|
||||
BUILT_SOURCES= $(top_builddir)/include/mysqld_error.h \
|
||||
|
|
|
@ -27,7 +27,7 @@ DEFS = -DEMBEDDED_LIBRARY -DMYSQL_SERVER \
|
|||
-DDATADIR="\"$(MYSQLDATAdir)\"" \
|
||||
-DSHAREDIR="\"$(MYSQLSHAREdir)\"" \
|
||||
-DLIBDIR="\"$(MYSQLLIBdir)\""
|
||||
INCLUDES= @bdb_includes@ \
|
||||
INCLUDES= -I$(top_builddir)/include -I$(top_srcdir)/include \
|
||||
-I$(top_builddir)/include -I$(top_srcdir)/include \
|
||||
-I$(top_srcdir)/sql -I$(top_srcdir)/sql/examples \
|
||||
-I$(top_srcdir)/regex \
|
||||
|
@ -39,13 +39,11 @@ SUBDIRS = . examples
|
|||
libmysqld_sources= libmysqld.c lib_sql.cc emb_qcache.cc
|
||||
libmysqlsources = errmsg.c get_password.c libmysql.c client.c pack.c \
|
||||
my_time.c
|
||||
sqlexamplessources = ha_example.cc ha_tina.cc
|
||||
|
||||
noinst_HEADERS = embedded_priv.h emb_qcache.h
|
||||
|
||||
sqlsources = derror.cc field.cc field_conv.cc strfunc.cc filesort.cc \
|
||||
ha_innodb.cc ha_berkeley.cc ha_heap.cc ha_federated.cc \
|
||||
ha_myisam.cc ha_myisammrg.cc handler.cc sql_handler.cc \
|
||||
ha_heap.cc ha_myisam.cc ha_myisammrg.cc handler.cc sql_handler.cc \
|
||||
hostname.cc init.cc password.c \
|
||||
item.cc item_buff.cc item_cmpfunc.cc item_create.cc \
|
||||
item_func.cc item_strfunc.cc item_sum.cc item_timefunc.cc \
|
||||
|
@ -65,13 +63,17 @@ sqlsources = derror.cc field.cc field_conv.cc strfunc.cc filesort.cc \
|
|||
spatial.cc gstream.cc sql_help.cc tztime.cc sql_cursor.cc \
|
||||
sp_head.cc sp_pcontext.cc sp.cc sp_cache.cc sp_rcontext.cc \
|
||||
parse_file.cc sql_view.cc sql_trigger.cc my_decimal.cc \
|
||||
rpl_filter.cc \
|
||||
ha_blackhole.cc ha_archive.cc sql_partition.cc ha_partition.cc \
|
||||
sql_plugin.cc
|
||||
rpl_filter.cc sql_partition.cc handlerton.cc sql_plugin.cc
|
||||
|
||||
libmysqld_int_a_SOURCES= $(libmysqld_sources) $(libmysqlsources) $(sqlsources) $(sqlexamplessources)
|
||||
libmysqld_int_a_SOURCES= $(libmysqld_sources) $(libmysqlsources) $(sqlsources)
|
||||
EXTRA_libmysqld_a_SOURCES = ha_innodb.cc ha_berkeley.cc ha_archive.cc \
|
||||
ha_blackhole.cc ha_federated.cc ha_ndbcluster.cc \
|
||||
ha_tina.cc ha_example.cc ha_partition.cc
|
||||
libmysqld_a_DEPENDENCIES= @mysql_se_objs@
|
||||
libmysqld_a_SOURCES=
|
||||
|
||||
sqlstoragesources = $(EXTRA_libmysqld_a_SOURCES)
|
||||
|
||||
# automake misses these
|
||||
sql_yacc.cc sql_yacc.h: $(top_srcdir)/sql/sql_yacc.yy
|
||||
|
||||
|
@ -80,13 +82,27 @@ INC_LIB= $(top_builddir)/regex/libregex.a \
|
|||
$(top_builddir)/storage/myisam/libmyisam.a \
|
||||
$(top_builddir)/storage/myisammrg/libmyisammrg.a \
|
||||
$(top_builddir)/storage/heap/libheap.a \
|
||||
@innodb_libs@ @bdb_libs_with_path@ \
|
||||
$(top_builddir)/mysys/libmysys.a \
|
||||
$(top_builddir)/strings/libmystrings.a \
|
||||
$(top_builddir)/dbug/libdbug.a \
|
||||
$(top_builddir)/vio/libvio.a \
|
||||
@mysql_se_libs@ \
|
||||
@yassl_libs_with_path@
|
||||
|
||||
|
||||
# Storage engine specific compilation options
|
||||
|
||||
ha_berkeley.o: ha_berkeley.cc
|
||||
$(CXXCOMPILE) @bdb_includes@ $(LM_CFLAGS) -c $<
|
||||
|
||||
ha_ndbcluster.o:ha_ndbcluster.cc
|
||||
$(CXXCOMPILE) @ndbcluster_includes@ $(LM_CFLAGS) -c $<
|
||||
|
||||
# Until we can remove dependency on ha_ndbcluster.h
|
||||
handler.o: handler.cc
|
||||
$(CXXCOMPILE) @ndbcluster_includes@ $(LM_CFLAGS) -c $<
|
||||
|
||||
|
||||
#
|
||||
# To make it easy for the end user to use the embedded library we
|
||||
# generate a total libmysqld.a from all library files,
|
||||
|
@ -97,7 +113,7 @@ INC_LIB= $(top_builddir)/regex/libregex.a \
|
|||
# need to add the same file twice to the library, so 'sort -u' save us
|
||||
# some time and spares unnecessary work.
|
||||
|
||||
libmysqld.a: libmysqld_int.a $(INC_LIB)
|
||||
libmysqld.a: libmysqld_int.a $(INC_LIB) $(libmysqld_a_DEPENDENCIES)
|
||||
if DARWIN_MWCC
|
||||
mwld -lib -o $@ libmysqld_int.a `echo $(INC_LIB) | sort -u`
|
||||
else
|
||||
|
@ -106,10 +122,11 @@ else
|
|||
then \
|
||||
$(libmysqld_a_AR) libmysqld.a libmysqld_int.a $(INC_LIB) ; \
|
||||
else \
|
||||
for arc in ./libmysqld_int.a $(INC_LIB); do \
|
||||
(for arc in ./libmysqld_int.a $(INC_LIB); do \
|
||||
arpath=`echo $$arc|sed 's|[^/]*$$||'`; \
|
||||
$(AR) t $$arc|sed "s|^|$$arpath|"; \
|
||||
done | sort -u | xargs $(AR) cq libmysqld.a ; \
|
||||
$(AR) t $$arc|xargs -n 1 find $$arpath -name; \
|
||||
$(AR) t $$arc|xargs -n 1 find `dirname $$arpath` -path \*/`basename $$arpath`/\* -name; \
|
||||
done; echo $(libmysqld_a_DEPENDENCIES) ) | sort -u | xargs $(AR) cq libmysqld.a ; \
|
||||
$(RANLIB) libmysqld.a ; \
|
||||
fi
|
||||
endif
|
||||
|
@ -133,16 +150,16 @@ link_sources:
|
|||
rm -f $(srcdir)/$$f; \
|
||||
@LN_CP_F@ $(srcdir)/../libmysql/$$f $(srcdir)/$$f; \
|
||||
done; \
|
||||
for f in $(sqlexamplessources); do \
|
||||
for f in $(sqlstoragesources); do \
|
||||
rm -f $(srcdir)/$$f; \
|
||||
@LN_CP_F@ $(srcdir)/../sql/examples/$$f $(srcdir)/$$f; \
|
||||
@LN_CP_F@ `find $(srcdir)/../sql -name $$f` $(srcdir)/$$f; \
|
||||
done; \
|
||||
rm -f $(srcdir)/client_settings.h; \
|
||||
@LN_CP_F@ $(srcdir)/../libmysql/client_settings.h $(srcdir)/client_settings.h;
|
||||
|
||||
|
||||
clean-local:
|
||||
rm -f `echo $(sqlsources) $(libmysqlsources) $(sqlexamplessources) | sed "s;\.lo;.c;g"` \
|
||||
rm -f `echo $(sqlsources) $(libmysqlsources) $(sqlstoragesources) | sed "s;\.lo;.c;g"` \
|
||||
$(top_srcdir)/linked_libmysqld_sources; \
|
||||
rm -f client_settings.h
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ INCLUDES = -I$(top_builddir)/include -I$(top_srcdir)/include -I$(srcdir) \
|
|||
-I$(top_srcdir) -I$(top_srcdir)/client -I$(top_srcdir)/regex \
|
||||
$(openssl_includes)
|
||||
LIBS = @LIBS@ @WRAPLIBS@ @CLIENT_LIBS@
|
||||
LDADD = @CLIENT_EXTRA_LDFLAGS@ ../libmysqld.a @innodb_system_libs@ @LIBDL@ $(CXXLDFLAGS)
|
||||
LDADD = @CLIENT_EXTRA_LDFLAGS@ ../libmysqld.a @LIBDL@ $(CXXLDFLAGS)
|
||||
|
||||
mysqltest_embedded_LINK = $(CXXLINK)
|
||||
mysqltest_embedded_SOURCES = mysqltest.c
|
||||
|
|
|
@ -17,18 +17,8 @@
|
|||
|
||||
## Process this file with automake to create Makefile.in
|
||||
|
||||
if HAVE_NDBCLUSTER_DB
|
||||
SUBDIRS = ndb
|
||||
DIST_SUBDIRS=ndb
|
||||
USE_NDBCLUSTER=\"--ndbcluster\"
|
||||
else
|
||||
# If one uses automake conditionals, automake will automatically
|
||||
# include all possible branches to DIST_SUBDIRS goal.
|
||||
# Reset DIST_SUBDIRS if we don't use NDB
|
||||
SUBDIRS=
|
||||
DIST_SUBDIRS=
|
||||
USE_NDBCLUSTER=\"\"
|
||||
endif
|
||||
|
||||
benchdir_root= $(prefix)
|
||||
testdir = $(benchdir_root)/mysql-test
|
||||
|
@ -126,7 +116,7 @@ SUFFIXES = .sh
|
|||
-e 's!@''MYSQL_TCP_PORT''@!@MYSQL_TCP_PORT@!' \
|
||||
-e 's!@''MYSQL_NO_DASH_VERSION''@!@MYSQL_NO_DASH_VERSION@!' \
|
||||
-e 's!@''MYSQL_SERVER_SUFFIX''@!@MYSQL_SERVER_SUFFIX@!' \
|
||||
-e 's!@''USE_NDBCLUSTER''@!$(USE_NDBCLUSTER)!g' \
|
||||
-e 's!@''USE_NDBCLUSTER''@!@TEST_NDBCLUSTER@!g' \
|
||||
$< > $@-t
|
||||
@CHMOD@ +x $@-t
|
||||
@MV@ $@-t $@
|
||||
|
|
|
@ -320,21 +320,6 @@ prepare stmt4 from ' show errors limit 20 ';
|
|||
ERROR HY000: This command is not supported in the prepared statement protocol yet
|
||||
prepare stmt4 from ' show storage engines ';
|
||||
execute stmt4;
|
||||
Engine Support Comment
|
||||
MyISAM YES/NO Default engine as of MySQL 3.23 with great performance
|
||||
MEMORY YES/NO Hash based, stored in memory, useful for temporary tables
|
||||
InnoDB YES/NO Supports transactions, row-level locking, and foreign keys
|
||||
BerkeleyDB YES/NO Supports transactions and page-level locking
|
||||
BLACKHOLE YES/NO /dev/null storage engine (anything you write to it disappears)
|
||||
EXAMPLE YES/NO Example storage engine
|
||||
ARCHIVE YES/NO Archive storage engine
|
||||
CSV YES/NO CSV storage engine
|
||||
ndbcluster YES/NO Clustered, fault-tolerant, memory-based tables
|
||||
FEDERATED YES/NO Federated MySQL storage engine
|
||||
MRG_MYISAM YES/NO Collection of identical MyISAM tables
|
||||
binlog YES/NO This is a meta storage engine to represent the binlog in a transaction
|
||||
ISAM YES/NO Obsolete storage engine
|
||||
partition YES/NO Partition engine
|
||||
drop table if exists t5;
|
||||
prepare stmt1 from ' drop table if exists t5 ' ;
|
||||
execute stmt1 ;
|
||||
|
|
|
@ -518,7 +518,7 @@ Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length I
|
|||
t1 NULL NULL NULL NULL # # # # NULL NULL NULL NULL NULL NULL NULL NULL Incorrect information in file: './test/t1.frm'
|
||||
show create table t1;
|
||||
ERROR HY000: Incorrect information in file: './test/t1.frm'
|
||||
drop table t1;
|
||||
drop table if exists t1;
|
||||
CREATE TABLE txt1(a int);
|
||||
CREATE TABLE tyt2(a int);
|
||||
CREATE TABLE urkunde(a int);
|
||||
|
|
|
@ -423,24 +423,6 @@ a\b a\"b a'\b a'\"b
|
|||
SELECT "a\\b", "a\\\'b", "a""\\b", "a""\\\'b";
|
||||
a\b a\'b a"\b a"\'b
|
||||
a\b a\'b a"\b a"\'b
|
||||
set session sql_mode = 'NO_ENGINE_SUBSTITUTION';
|
||||
create table t1 (a int) engine=isam;
|
||||
ERROR HY000: The 'ISAM' feature is disabled; you need MySQL built with 'ISAM' to have it working
|
||||
show create table t1;
|
||||
ERROR 42S02: Table 'test.t1' doesn't exist
|
||||
drop table if exists t1;
|
||||
Warnings:
|
||||
Note 1051 Unknown table 't1'
|
||||
set session sql_mode = '';
|
||||
create table t1 (a int) engine=isam;
|
||||
Warnings:
|
||||
Warning 1266 Using storage engine MyISAM for table 't1'
|
||||
show create table t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE `t1` (
|
||||
`a` int(11) default NULL
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1
|
||||
drop table t1;
|
||||
SET @@SQL_MODE='';
|
||||
create function `foo` () returns int return 5;
|
||||
show create function `foo`;
|
||||
|
|
|
@ -166,13 +166,6 @@ show variables like 'max_error_count';
|
|||
Variable_name Value
|
||||
max_error_count 10
|
||||
drop table t1;
|
||||
create table t1 (id int) engine=isam;
|
||||
Warnings:
|
||||
Warning 1266 Using storage engine MyISAM for table 't1'
|
||||
alter table t1 engine=isam;
|
||||
Warnings:
|
||||
Warning 1266 Using storage engine MyISAM for table 't1'
|
||||
drop table t1;
|
||||
create table t1 (id int) type=heap;
|
||||
Warnings:
|
||||
Warning 1287 'TYPE=storage_engine' is deprecated; use 'ENGINE=storage_engine' instead
|
||||
|
|
|
@ -342,8 +342,11 @@ prepare stmt4 from ' show warnings limit 20 ';
|
|||
--error 1295
|
||||
prepare stmt4 from ' show errors limit 20 ';
|
||||
prepare stmt4 from ' show storage engines ';
|
||||
--replace_column 2 YES/NO
|
||||
# The output depends upon the precise order in which
|
||||
# storage engines are registered, so we switch off the output.
|
||||
--disable_result_log
|
||||
execute stmt4;
|
||||
--enable_result_log
|
||||
|
||||
################ MISC STUFF ################
|
||||
## get a warning and an error
|
||||
|
|
|
@ -396,7 +396,8 @@ system echo "this is a junk file for test" >> var/master-data/test/t1.frm ;
|
|||
SHOW TABLE STATUS like 't1';
|
||||
--error 1033
|
||||
show create table t1;
|
||||
drop table t1;
|
||||
drop table if exists t1;
|
||||
system rm -f var/master-data/test/t1.frm ;
|
||||
|
||||
|
||||
# End of 4.1 tests
|
||||
|
|
|
@ -209,18 +209,18 @@ SELECT "a\\b", "a\\\'b", "a""\\b", "a""\\\'b";
|
|||
# is not available
|
||||
#
|
||||
|
||||
set session sql_mode = 'NO_ENGINE_SUBSTITUTION';
|
||||
--error 1289
|
||||
create table t1 (a int) engine=isam;
|
||||
--error 1146
|
||||
show create table t1;
|
||||
drop table if exists t1;
|
||||
|
||||
# for comparison, lets see the warnings...
|
||||
set session sql_mode = '';
|
||||
create table t1 (a int) engine=isam;
|
||||
show create table t1;
|
||||
drop table t1;
|
||||
#set session sql_mode = 'NO_ENGINE_SUBSTITUTION';
|
||||
#--error 1289
|
||||
#create table t1 (a int) engine=isam;
|
||||
#--error 1146
|
||||
#show create table t1;
|
||||
#drop table if exists t1;
|
||||
#
|
||||
## for comparison, lets see the warnings...
|
||||
#set session sql_mode = '';
|
||||
#create table t1 (a int) engine=isam;
|
||||
#show create table t1;
|
||||
#drop table t1;
|
||||
|
||||
#
|
||||
# Bug #6903: ANSI_QUOTES does not come into play with SHOW CREATE FUNCTION
|
||||
|
|
|
@ -34,7 +34,7 @@ CREATE TABLE db (
|
|||
PRIMARY KEY Host (Host,Db,User),
|
||||
KEY User (User)
|
||||
)
|
||||
type=ISAM;
|
||||
type=MyISAM;
|
||||
--enable-warnings
|
||||
|
||||
INSERT INTO db VALUES ('%','test', '','Y','Y','Y','Y','Y','Y');
|
||||
|
@ -52,7 +52,7 @@ CREATE TABLE host (
|
|||
Drop_priv enum('N','Y') DEFAULT 'N' NOT NULL,
|
||||
PRIMARY KEY Host (Host,Db)
|
||||
)
|
||||
type=ISAM;
|
||||
type=MyISAM;
|
||||
--enable-warnings
|
||||
|
||||
--disable_warnings
|
||||
|
@ -71,7 +71,7 @@ CREATE TABLE user (
|
|||
Process_priv enum('N','Y') DEFAULT 'N' NOT NULL,
|
||||
PRIMARY KEY Host (Host,User)
|
||||
)
|
||||
type=ISAM;
|
||||
type=MyISAM;
|
||||
--enable-warnings
|
||||
|
||||
INSERT INTO user VALUES ('localhost','root','','Y','Y','Y','Y','Y','Y','Y','Y','Y');
|
||||
|
|
|
@ -1804,7 +1804,9 @@ drop table t1;
|
|||
# underlying tables (BUG#6443)
|
||||
#
|
||||
set sql_mode='strict_all_tables';
|
||||
--disable_warnings
|
||||
CREATE TABLE t1 (col1 INT NOT NULL, col2 INT NOT NULL) ENGINE = INNODB;
|
||||
--enable_warnings
|
||||
CREATE VIEW v1 (vcol1) AS SELECT col1 FROM t1;
|
||||
CREATE VIEW v2 (vcol1) AS SELECT col1 FROM t1 WHERE col2 > 2;
|
||||
-- error 1364
|
||||
|
@ -1860,7 +1862,9 @@ drop table t1;
|
|||
#
|
||||
# Test for bug #11771: wrong query_id in SELECT * FROM <view>
|
||||
#
|
||||
--disable_warnings
|
||||
CREATE TABLE t1 (f1 char) ENGINE = innodb;
|
||||
--enable_warnings
|
||||
INSERT INTO t1 VALUES ('A');
|
||||
CREATE VIEW v1 AS SELECT * FROM t1;
|
||||
|
||||
|
|
|
@ -113,9 +113,9 @@ show variables like 'max_error_count';
|
|||
# Test for handler type
|
||||
#
|
||||
drop table t1;
|
||||
create table t1 (id int) engine=isam;
|
||||
alter table t1 engine=isam;
|
||||
drop table t1;
|
||||
#create table t1 (id int) engine=isam;
|
||||
#alter table t1 engine=isam;
|
||||
#drop table t1;
|
||||
|
||||
#
|
||||
# Test for deprecated TYPE= syntax
|
||||
|
|
|
@ -21,7 +21,6 @@ MYSQLSHAREdir = $(pkgdatadir)
|
|||
MYSQLBASEdir= $(prefix)
|
||||
MYSQLLIBdir= $(pkglibdir)
|
||||
INCLUDES = @ZLIB_INCLUDES@ \
|
||||
@bdb_includes@ @innodb_includes@ @ndbcluster_includes@ \
|
||||
-I$(top_builddir)/include -I$(top_srcdir)/include \
|
||||
-I$(top_srcdir)/regex -I$(srcdir) $(yassl_includes) \
|
||||
$(openssl_includes)
|
||||
|
@ -38,12 +37,11 @@ LDADD = $(top_builddir)/storage/myisam/libmyisam.a \
|
|||
$(top_builddir)/mysys/libmysys.a \
|
||||
$(top_builddir)/dbug/libdbug.a \
|
||||
$(top_builddir)/regex/libregex.a \
|
||||
$(top_builddir)/strings/libmystrings.a @ZLIB_LIBS@ @NDB_SCI_LIBS@
|
||||
$(top_builddir)/strings/libmystrings.a @ZLIB_LIBS@
|
||||
|
||||
mysqld_LDADD = @MYSQLD_EXTRA_LDFLAGS@ \
|
||||
@bdb_libs@ @innodb_libs@ @pstack_libs@ \
|
||||
@innodb_system_libs@ \
|
||||
@ndbcluster_libs@ @ndbcluster_system_libs@ \
|
||||
@pstack_libs@ \
|
||||
@mysql_se_objs@ @mysql_se_libs@ \
|
||||
$(LDADD) $(CXXLDFLAGS) $(WRAPLIBS) @LIBDL@ \
|
||||
@yassl_libs@ @openssl_libs@
|
||||
noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \
|
||||
|
@ -53,9 +51,8 @@ noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \
|
|||
procedure.h sql_class.h sql_lex.h sql_list.h \
|
||||
sql_manager.h sql_map.h sql_string.h unireg.h \
|
||||
sql_error.h field.h handler.h mysqld_suffix.h \
|
||||
ha_myisammrg.h\
|
||||
ha_heap.h ha_myisam.h ha_berkeley.h ha_innodb.h \
|
||||
ha_ndbcluster.h opt_range.h protocol.h \
|
||||
ha_heap.h ha_myisam.h ha_myisammrg.h ha_partition.h \
|
||||
opt_range.h protocol.h \
|
||||
sql_select.h structs.h table.h sql_udf.h hash_filo.h\
|
||||
lex.h lex_symbol.h sql_acl.h sql_crypt.h \
|
||||
log_event.h sql_repl.h slave.h rpl_filter.h \
|
||||
|
@ -65,11 +62,8 @@ noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \
|
|||
sp_head.h sp_pcontext.h sp_rcontext.h sp.h sp_cache.h \
|
||||
parse_file.h sql_view.h sql_trigger.h \
|
||||
sql_array.h sql_cursor.h \
|
||||
examples/ha_example.h ha_archive.h \
|
||||
examples/ha_tina.h ha_blackhole.h \
|
||||
ha_federated.h ha_partition.h \
|
||||
sql_plugin.h
|
||||
mysqld_SOURCES = sql_lex.cc sql_handler.cc \
|
||||
mysqld_SOURCES = sql_lex.cc sql_handler.cc sql_partition.cc \
|
||||
item.cc item_sum.cc item_buff.cc item_func.cc \
|
||||
item_cmpfunc.cc item_strfunc.cc item_timefunc.cc \
|
||||
thr_malloc.cc item_create.cc item_subselect.cc \
|
||||
|
@ -88,9 +82,7 @@ mysqld_SOURCES = sql_lex.cc sql_handler.cc \
|
|||
unireg.cc des_key_file.cc \
|
||||
discover.cc time.cc opt_range.cc opt_sum.cc \
|
||||
records.cc filesort.cc handler.cc \
|
||||
ha_heap.cc ha_myisam.cc ha_myisammrg.cc \
|
||||
ha_berkeley.cc ha_innodb.cc \
|
||||
ha_ndbcluster.cc \
|
||||
ha_heap.cc ha_myisam.cc ha_myisammrg.cc \
|
||||
sql_db.cc sql_table.cc sql_rename.cc sql_crypt.cc \
|
||||
sql_load.cc mf_iocache.cc field_conv.cc sql_show.cc \
|
||||
sql_udf.cc sql_analyse.cc sql_analyse.h sql_cache.cc \
|
||||
|
@ -103,11 +95,13 @@ mysqld_SOURCES = sql_lex.cc sql_handler.cc \
|
|||
tztime.cc my_time.c my_decimal.cc\
|
||||
sp_head.cc sp_pcontext.cc sp_rcontext.cc sp.cc \
|
||||
sp_cache.cc parse_file.cc sql_trigger.cc \
|
||||
examples/ha_example.cc ha_archive.cc \
|
||||
examples/ha_tina.cc ha_blackhole.cc \
|
||||
ha_partition.cc sql_partition.cc \
|
||||
ha_federated.cc \
|
||||
sql_plugin.cc
|
||||
sql_plugin.cc\
|
||||
handlerton.cc
|
||||
EXTRA_mysqld_SOURCES = ha_innodb.cc ha_berkeley.cc ha_archive.cc \
|
||||
ha_blackhole.cc ha_federated.cc ha_ndbcluster.cc \
|
||||
ha_partition.cc \
|
||||
examples/ha_tina.cc examples/ha_example.cc
|
||||
mysqld_DEPENDENCIES = @mysql_se_objs@
|
||||
gen_lex_hash_SOURCES = gen_lex_hash.cc
|
||||
gen_lex_hash_LDADD = $(LDADD) $(CXXLDFLAGS)
|
||||
mysql_tzinfo_to_sql_SOURCES = mysql_tzinfo_to_sql.cc
|
||||
|
@ -156,6 +150,16 @@ sql_yacc.o: sql_yacc.cc sql_yacc.h $(HEADERS)
|
|||
lex_hash.h: gen_lex_hash$(EXEEXT)
|
||||
./gen_lex_hash$(EXEEXT) > $@
|
||||
|
||||
ha_berkeley.o: ha_berkeley.cc ha_berkeley.h
|
||||
$(CXXCOMPILE) @bdb_includes@ $(LM_CFLAGS) -c $<
|
||||
|
||||
ha_ndbcluster.o:ha_ndbcluster.cc ha_ndbcluster.h
|
||||
$(CXXCOMPILE) @ndbcluster_includes@ $(LM_CFLAGS) -c $<
|
||||
|
||||
#Until we can get rid of dependencies on ha_ndbcluster.h
|
||||
handler.o: handler.cc ha_ndbcluster.h
|
||||
$(CXXCOMPILE) @ndbcluster_includes@ $(CXXFLAGS) -c $<
|
||||
|
||||
# For testing of udf_example.so; Works on platforms with gcc
|
||||
# (This is not part of our build process but only provided as an example)
|
||||
udf_example.so: udf_example.cc
|
||||
|
|
|
@ -69,9 +69,9 @@
|
|||
|
||||
#include "../mysql_priv.h"
|
||||
|
||||
#ifdef HAVE_EXAMPLE_DB
|
||||
#include "ha_example.h"
|
||||
|
||||
static handler* example_create_handler(TABLE *table);
|
||||
|
||||
handlerton example_hton= {
|
||||
"EXAMPLE",
|
||||
|
@ -94,6 +94,15 @@ handlerton example_hton= {
|
|||
NULL, /* create_cursor_read_view */
|
||||
NULL, /* set_cursor_read_view */
|
||||
NULL, /* close_cursor_read_view */
|
||||
example_create_handler, /* Create a new handler */
|
||||
NULL, /* Drop a database */
|
||||
NULL, /* Panic call */
|
||||
NULL, /* Release temporary latches */
|
||||
NULL, /* Update Statistics */
|
||||
NULL, /* Start Consistent Snapshot */
|
||||
NULL, /* Flush logs */
|
||||
NULL, /* Show status */
|
||||
NULL, /* Replication Report Sent Binlog */
|
||||
HTON_CAN_RECREATE
|
||||
};
|
||||
|
||||
|
@ -204,6 +213,12 @@ static int free_share(EXAMPLE_SHARE *share)
|
|||
}
|
||||
|
||||
|
||||
static handler* example_create_handler(TABLE *table)
|
||||
{
|
||||
return new ha_example(table);
|
||||
}
|
||||
|
||||
|
||||
ha_example::ha_example(TABLE *table_arg)
|
||||
:handler(&example_hton, table_arg)
|
||||
{}
|
||||
|
@ -696,4 +711,3 @@ int ha_example::create(const char *name, TABLE *table_arg,
|
|||
/* This is not implemented but we want someone to be able that it works. */
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
#endif /* HAVE_EXAMPLE_DB */
|
||||
|
|
|
@ -152,3 +152,4 @@ public:
|
|||
THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
|
||||
enum thr_lock_type lock_type); //required
|
||||
};
|
||||
|
||||
|
|
|
@ -48,8 +48,6 @@ TODO:
|
|||
|
||||
#include "mysql_priv.h"
|
||||
|
||||
#ifdef HAVE_CSV_DB
|
||||
|
||||
#include "ha_tina.h"
|
||||
#include <sys/mman.h>
|
||||
|
||||
|
@ -57,6 +55,7 @@ TODO:
|
|||
pthread_mutex_t tina_mutex;
|
||||
static HASH tina_open_tables;
|
||||
static int tina_init= 0;
|
||||
static handler* tina_create_handler(TABLE *table);
|
||||
|
||||
handlerton tina_hton= {
|
||||
"CSV",
|
||||
|
@ -79,6 +78,15 @@ handlerton tina_hton= {
|
|||
NULL, /* create_cursor_read_view */
|
||||
NULL, /* set_cursor_read_view */
|
||||
NULL, /* close_cursor_read_view */
|
||||
tina_create_handler, /* Create a new handler */
|
||||
NULL, /* Drop a database */
|
||||
tina_end, /* Panic call */
|
||||
NULL, /* Release temporary latches */
|
||||
NULL, /* Update Statistics */
|
||||
NULL, /* Start Consistent Snapshot */
|
||||
NULL, /* Flush logs */
|
||||
NULL, /* Show status */
|
||||
NULL, /* Replication Report Sent Binlog */
|
||||
HTON_CAN_RECREATE
|
||||
};
|
||||
|
||||
|
@ -247,7 +255,7 @@ static int free_share(TINA_SHARE *share)
|
|||
DBUG_RETURN(result_code);
|
||||
}
|
||||
|
||||
bool tina_end()
|
||||
int tina_end(ha_panic_function type)
|
||||
{
|
||||
if (tina_init)
|
||||
{
|
||||
|
@ -255,7 +263,7 @@ bool tina_end()
|
|||
VOID(pthread_mutex_destroy(&tina_mutex));
|
||||
}
|
||||
tina_init= 0;
|
||||
return FALSE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -272,6 +280,12 @@ byte * find_eoln(byte *data, off_t begin, off_t end)
|
|||
}
|
||||
|
||||
|
||||
static handler* tina_create_handler(TABLE *table)
|
||||
{
|
||||
return new ha_tina(table);
|
||||
}
|
||||
|
||||
|
||||
ha_tina::ha_tina(TABLE *table_arg)
|
||||
:handler(&tina_hton, table_arg),
|
||||
/*
|
||||
|
@ -909,4 +923,3 @@ int ha_tina::create(const char *name, TABLE *table_arg,
|
|||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
#endif /* enable CSV */
|
||||
|
|
|
@ -125,5 +125,5 @@ public:
|
|||
int chain_append();
|
||||
};
|
||||
|
||||
bool tina_end();
|
||||
int tina_end(ha_panic_function type);
|
||||
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
|
||||
#include "mysql_priv.h"
|
||||
|
||||
#ifdef HAVE_ARCHIVE_DB
|
||||
#include "ha_archive.h"
|
||||
#include <my_dir.h>
|
||||
|
||||
|
@ -135,6 +134,10 @@ static HASH archive_open_tables;
|
|||
#define DATA_BUFFER_SIZE 2 // Size of the data used in the data file
|
||||
#define ARCHIVE_CHECK_HEADER 254 // The number we use to determine corruption
|
||||
|
||||
/* Static declarations for handerton */
|
||||
static handler *archive_create_handler(TABLE *table);
|
||||
|
||||
|
||||
/* dummy handlerton - only to have something to return from archive_db_init */
|
||||
handlerton archive_hton = {
|
||||
"ARCHIVE",
|
||||
|
@ -157,9 +160,22 @@ handlerton archive_hton = {
|
|||
NULL, /* create_cursor_read_view */
|
||||
NULL, /* set_cursor_read_view */
|
||||
NULL, /* close_cursor_read_view */
|
||||
archive_create_handler, /* Create a new handler */
|
||||
NULL, /* Drop a database */
|
||||
archive_db_end, /* Panic call */
|
||||
NULL, /* Release temporary latches */
|
||||
NULL, /* Update Statistics */
|
||||
NULL, /* Start Consistent Snapshot */
|
||||
NULL, /* Flush logs */
|
||||
NULL, /* Show status */
|
||||
NULL, /* Replication Report Sent Binlog */
|
||||
HTON_NO_FLAGS
|
||||
};
|
||||
|
||||
static handler *archive_create_handler(TABLE *table)
|
||||
{
|
||||
return new ha_archive(table);
|
||||
}
|
||||
|
||||
/*
|
||||
Used for hash table that tracks open tables.
|
||||
|
@ -215,7 +231,7 @@ error:
|
|||
FALSE OK
|
||||
*/
|
||||
|
||||
bool archive_db_end()
|
||||
int archive_db_end(ha_panic_function type)
|
||||
{
|
||||
if (archive_inited)
|
||||
{
|
||||
|
@ -223,7 +239,7 @@ bool archive_db_end()
|
|||
VOID(pthread_mutex_destroy(&archive_mutex));
|
||||
}
|
||||
archive_inited= 0;
|
||||
return FALSE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
ha_archive::ha_archive(TABLE *table_arg)
|
||||
|
@ -1129,4 +1145,3 @@ bool ha_archive::check_and_repair(THD *thd)
|
|||
DBUG_RETURN(HA_ADMIN_OK);
|
||||
}
|
||||
}
|
||||
#endif /* HAVE_ARCHIVE_DB */
|
||||
|
|
|
@ -109,5 +109,5 @@ public:
|
|||
};
|
||||
|
||||
bool archive_db_init(void);
|
||||
bool archive_db_end(void);
|
||||
int archive_db_end(ha_panic_function type);
|
||||
|
||||
|
|
|
@ -53,7 +53,6 @@
|
|||
|
||||
#include "mysql_priv.h"
|
||||
|
||||
#ifdef HAVE_BERKELEY_DB
|
||||
#include <m_ctype.h>
|
||||
#include <myisampack.h>
|
||||
#include <hash.h>
|
||||
|
@ -72,6 +71,9 @@
|
|||
#define STATUS_ROW_COUNT_INIT 2
|
||||
#define STATUS_BDB_ANALYZE 4
|
||||
|
||||
const u_int32_t bdb_DB_TXN_NOSYNC= DB_TXN_NOSYNC;
|
||||
const u_int32_t bdb_DB_RECOVER= DB_RECOVER;
|
||||
const u_int32_t bdb_DB_PRIVATE= DB_PRIVATE;
|
||||
const char *ha_berkeley_ext=".db";
|
||||
bool berkeley_shared_data=0;
|
||||
u_int32_t berkeley_init_flags= DB_PRIVATE | DB_RECOVER, berkeley_env_flags=0,
|
||||
|
@ -107,6 +109,7 @@ static void berkeley_noticecall(DB_ENV *db_env, db_notices notice);
|
|||
static int berkeley_close_connection(THD *thd);
|
||||
static int berkeley_commit(THD *thd, bool all);
|
||||
static int berkeley_rollback(THD *thd, bool all);
|
||||
static handler *berkeley_create_handler(TABLE *table);
|
||||
|
||||
handlerton berkeley_hton = {
|
||||
"BerkeleyDB",
|
||||
|
@ -129,9 +132,23 @@ handlerton berkeley_hton = {
|
|||
NULL, /* create_cursor_read_view */
|
||||
NULL, /* set_cursor_read_view */
|
||||
NULL, /* close_cursor_read_view */
|
||||
HTON_CLOSE_CURSORS_AT_COMMIT
|
||||
berkeley_create_handler, /* Create a new handler */
|
||||
NULL, /* Drop a database */
|
||||
berkeley_end, /* Panic call */
|
||||
NULL, /* Release temporary latches */
|
||||
NULL, /* Update Statistics */
|
||||
NULL, /* Start Consistent Snapshot */
|
||||
berkeley_flush_logs, /* Flush logs */
|
||||
berkeley_show_status, /* Show status */
|
||||
NULL, /* Replication Report Sent Binlog */
|
||||
HTON_CLOSE_CURSORS_AT_COMMIT | HTON_FLUSH_AFTER_RENAME
|
||||
};
|
||||
|
||||
handler *berkeley_create_handler(TABLE *table)
|
||||
{
|
||||
return new ha_berkeley(table);
|
||||
}
|
||||
|
||||
typedef struct st_berkeley_trx_data {
|
||||
DB_TXN *all;
|
||||
DB_TXN *stmt;
|
||||
|
@ -215,18 +232,19 @@ error:
|
|||
}
|
||||
|
||||
|
||||
bool berkeley_end(void)
|
||||
int berkeley_end(ha_panic_function type)
|
||||
{
|
||||
int error;
|
||||
int error= 0;
|
||||
DBUG_ENTER("berkeley_end");
|
||||
if (!db_env)
|
||||
return 1; /* purecov: tested */
|
||||
berkeley_cleanup_log_files();
|
||||
error=db_env->close(db_env,0); // Error is logged
|
||||
db_env=0;
|
||||
hash_free(&bdb_open_tables);
|
||||
pthread_mutex_destroy(&bdb_mutex);
|
||||
DBUG_RETURN(error != 0);
|
||||
if (db_env)
|
||||
{
|
||||
berkeley_cleanup_log_files();
|
||||
error= db_env->close(db_env,0); // Error is logged
|
||||
db_env= 0;
|
||||
hash_free(&bdb_open_tables);
|
||||
pthread_mutex_destroy(&bdb_mutex);
|
||||
}
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
|
||||
static int berkeley_close_connection(THD *thd)
|
||||
|
@ -280,7 +298,7 @@ static int berkeley_rollback(THD *thd, bool all)
|
|||
}
|
||||
|
||||
|
||||
int berkeley_show_logs(Protocol *protocol)
|
||||
static bool berkeley_show_logs(THD *thd, stat_print_fn *stat_print)
|
||||
{
|
||||
char **all_logs, **free_logs, **a, **f;
|
||||
int error=1;
|
||||
|
@ -307,21 +325,19 @@ int berkeley_show_logs(Protocol *protocol)
|
|||
{
|
||||
for (a = all_logs, f = free_logs; *a; ++a)
|
||||
{
|
||||
protocol->prepare_for_resend();
|
||||
protocol->store(*a, system_charset_info);
|
||||
protocol->store("BDB", 3, system_charset_info);
|
||||
const char *status;
|
||||
if (f && *f && strcmp(*a, *f) == 0)
|
||||
{
|
||||
f++;
|
||||
protocol->store(SHOW_LOG_STATUS_FREE, system_charset_info);
|
||||
f++;
|
||||
status= SHOW_LOG_STATUS_FREE;
|
||||
}
|
||||
else
|
||||
protocol->store(SHOW_LOG_STATUS_INUSE, system_charset_info);
|
||||
|
||||
if (protocol->write())
|
||||
status= SHOW_LOG_STATUS_INUSE;
|
||||
|
||||
if (stat_print(thd, berkeley_hton.name, *a, status))
|
||||
{
|
||||
error=1;
|
||||
goto err;
|
||||
error=1;
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -331,6 +347,16 @@ err:
|
|||
DBUG_RETURN(error);
|
||||
}
|
||||
|
||||
bool berkeley_show_status(THD *thd, stat_print_fn *stat_print,
|
||||
enum ha_stat_type stat_type)
|
||||
{
|
||||
switch (stat_type) {
|
||||
case HA_ENGINE_LOGS:
|
||||
return berkeley_show_logs(thd, stat_print);
|
||||
default:
|
||||
return FALSE;
|
||||
}
|
||||
}
|
||||
|
||||
static void berkeley_print_error(const DB_ENV *db_env, const char *db_errpfx,
|
||||
const char *buffer)
|
||||
|
@ -344,9 +370,7 @@ static void berkeley_noticecall(DB_ENV *db_env, db_notices notice)
|
|||
switch (notice)
|
||||
{
|
||||
case DB_NOTICE_LOGFILE_CHANGED: /* purecov: tested */
|
||||
pthread_mutex_lock(&LOCK_manager);
|
||||
manager_status |= MANAGER_BERKELEY_LOG_CLEANUP;
|
||||
pthread_mutex_unlock(&LOCK_manager);
|
||||
mysql_manager_submit(berkeley_cleanup_log_files);
|
||||
pthread_cond_signal(&COND_manager);
|
||||
break;
|
||||
}
|
||||
|
@ -2669,4 +2693,3 @@ bool ha_berkeley::check_if_incompatible_data(HA_CREATE_INFO *info,
|
|||
}
|
||||
|
||||
|
||||
#endif /* HAVE_BERKELEY_DB */
|
||||
|
|
|
@ -157,6 +157,9 @@ class ha_berkeley: public handler
|
|||
bool check_if_incompatible_data(HA_CREATE_INFO *info, uint table_changes);
|
||||
};
|
||||
|
||||
extern const u_int32_t bdb_DB_TXN_NOSYNC;
|
||||
extern const u_int32_t bdb_DB_RECOVER;
|
||||
extern const u_int32_t bdb_DB_PRIVATE;
|
||||
extern bool berkeley_shared_data;
|
||||
extern u_int32_t berkeley_init_flags,berkeley_env_flags, berkeley_lock_type,
|
||||
berkeley_lock_types[];
|
||||
|
@ -166,6 +169,6 @@ extern long berkeley_lock_scan_time;
|
|||
extern TYPELIB berkeley_lock_typelib;
|
||||
|
||||
bool berkeley_init(void);
|
||||
bool berkeley_end(void);
|
||||
int berkeley_end(ha_panic_function type);
|
||||
bool berkeley_flush_logs(void);
|
||||
int berkeley_show_logs(Protocol *protocol);
|
||||
bool berkeley_show_status(THD *thd, stat_print_fn *print, enum ha_stat_type);
|
||||
|
|
|
@ -20,9 +20,12 @@
|
|||
#endif
|
||||
|
||||
#include "mysql_priv.h"
|
||||
#ifdef HAVE_BLACKHOLE_DB
|
||||
#include "ha_blackhole.h"
|
||||
|
||||
/* Static declarations for handlerton */
|
||||
|
||||
static handler *blackhole_create_handler(TABLE *table);
|
||||
|
||||
|
||||
/* Blackhole storage engine handlerton */
|
||||
|
||||
|
@ -47,9 +50,25 @@ handlerton blackhole_hton= {
|
|||
NULL, /* create_cursor_read_view */
|
||||
NULL, /* set_cursor_read_view */
|
||||
NULL, /* close_cursor_read_view */
|
||||
blackhole_create_handler, /* Create a new handler */
|
||||
NULL, /* Drop a database */
|
||||
NULL, /* Panic call */
|
||||
NULL, /* Release temporary latches */
|
||||
NULL, /* Update Statistics */
|
||||
NULL, /* Start Consistent Snapshot */
|
||||
NULL, /* Flush logs */
|
||||
NULL, /* Show status */
|
||||
NULL, /* Replication Report Sent Binlog */
|
||||
HTON_CAN_RECREATE
|
||||
};
|
||||
|
||||
|
||||
static handler *blackhole_create_handler(TABLE *table)
|
||||
{
|
||||
return new ha_blackhole(table);
|
||||
}
|
||||
|
||||
|
||||
/*****************************************************************************
|
||||
** BLACKHOLE tables
|
||||
*****************************************************************************/
|
||||
|
@ -227,4 +246,3 @@ int ha_blackhole::index_last(byte * buf)
|
|||
DBUG_RETURN(HA_ERR_END_OF_FILE);
|
||||
}
|
||||
|
||||
#endif /* HAVE_BLACKHOLE_DB */
|
||||
|
|
|
@ -351,7 +351,6 @@
|
|||
#pragma implementation // gcc: Class implementation
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_FEDERATED_DB
|
||||
#include "ha_federated.h"
|
||||
|
||||
#include "m_string.h"
|
||||
|
@ -363,6 +362,11 @@ pthread_mutex_t federated_mutex; // This is the mutex we use to
|
|||
static int federated_init= FALSE; // Variable for checking the
|
||||
// init state of hash
|
||||
|
||||
/* Static declaration for handerton */
|
||||
|
||||
static handler *federated_create_handler(TABLE *table);
|
||||
|
||||
|
||||
/* Federated storage engine handlerton */
|
||||
|
||||
handlerton federated_hton= {
|
||||
|
@ -386,10 +390,25 @@ handlerton federated_hton= {
|
|||
NULL, /* create_cursor_read_view */
|
||||
NULL, /* set_cursor_read_view */
|
||||
NULL, /* close_cursor_read_view */
|
||||
federated_create_handler, /* Create a new handler */
|
||||
NULL, /* Drop a database */
|
||||
federated_db_end, /* Panic call */
|
||||
NULL, /* Release temporary latches */
|
||||
NULL, /* Update Statistics */
|
||||
NULL, /* Start Consistent Snapshot */
|
||||
NULL, /* Flush logs */
|
||||
NULL, /* Show status */
|
||||
NULL, /* Replication Report Sent Binlog */
|
||||
HTON_ALTER_NOT_SUPPORTED
|
||||
};
|
||||
|
||||
|
||||
static handler *federated_create_handler(TABLE *table)
|
||||
{
|
||||
return new ha_federated(table);
|
||||
}
|
||||
|
||||
|
||||
/* Function we use in the creation of our hash to get key. */
|
||||
|
||||
static byte *federated_get_key(FEDERATED_SHARE *share, uint *length,
|
||||
|
@ -443,7 +462,7 @@ error:
|
|||
FALSE OK
|
||||
*/
|
||||
|
||||
bool federated_db_end()
|
||||
int federated_db_end(ha_panic_function type)
|
||||
{
|
||||
if (federated_init)
|
||||
{
|
||||
|
@ -451,7 +470,7 @@ bool federated_db_end()
|
|||
VOID(pthread_mutex_destroy(&federated_mutex));
|
||||
}
|
||||
federated_init= 0;
|
||||
return FALSE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2614,4 +2633,3 @@ bool ha_federated::get_error_message(int error, String* buf)
|
|||
DBUG_RETURN(FALSE);
|
||||
}
|
||||
|
||||
#endif /* HAVE_FEDERATED_DB */
|
||||
|
|
|
@ -301,4 +301,4 @@ public:
|
|||
};
|
||||
|
||||
bool federated_db_init(void);
|
||||
bool federated_db_end(void);
|
||||
int federated_db_end(ha_panic_function type);
|
||||
|
|
|
@ -23,6 +23,9 @@
|
|||
#include <myisampack.h>
|
||||
#include "ha_heap.h"
|
||||
|
||||
|
||||
static handler *heap_create_handler(TABLE *table);
|
||||
|
||||
handlerton heap_hton= {
|
||||
"MEMORY",
|
||||
SHOW_OPTION_YES,
|
||||
|
@ -44,9 +47,24 @@ handlerton heap_hton= {
|
|||
NULL, /* create_cursor_read_view */
|
||||
NULL, /* set_cursor_read_view */
|
||||
NULL, /* close_cursor_read_view */
|
||||
heap_create_handler, /* Create a new handler */
|
||||
NULL, /* Drop a database */
|
||||
heap_panic, /* Panic call */
|
||||
NULL, /* Release temporary latches */
|
||||
NULL, /* Update Statistics */
|
||||
NULL, /* Start Consistent Snapshot */
|
||||
NULL, /* Flush logs */
|
||||
NULL, /* Show status */
|
||||
NULL, /* Replication Report Sent Binlog */
|
||||
HTON_CAN_RECREATE
|
||||
};
|
||||
|
||||
static handler *heap_create_handler(TABLE *table)
|
||||
{
|
||||
return new ha_heap(table);
|
||||
}
|
||||
|
||||
|
||||
/*****************************************************************************
|
||||
** HEAP tables
|
||||
*****************************************************************************/
|
||||
|
|
148
sql/ha_innodb.cc
148
sql/ha_innodb.cc
|
@ -34,7 +34,6 @@ have disables the InnoDB inlining in this file. */
|
|||
#include "mysql_priv.h"
|
||||
#include "slave.h"
|
||||
|
||||
#ifdef HAVE_INNOBASE_DB
|
||||
#include <m_ctype.h>
|
||||
#include <hash.h>
|
||||
#include <myisampack.h>
|
||||
|
@ -205,6 +204,7 @@ static int innobase_rollback(THD* thd, bool all);
|
|||
static int innobase_rollback_to_savepoint(THD* thd, void *savepoint);
|
||||
static int innobase_savepoint(THD* thd, void *savepoint);
|
||||
static int innobase_release_savepoint(THD* thd, void *savepoint);
|
||||
static handler *innobase_create_handler(TABLE *table);
|
||||
|
||||
handlerton innobase_hton = {
|
||||
"InnoDB",
|
||||
|
@ -227,9 +227,29 @@ handlerton innobase_hton = {
|
|||
innobase_create_cursor_view,
|
||||
innobase_set_cursor_view,
|
||||
innobase_close_cursor_view,
|
||||
innobase_create_handler, /* Create a new handler */
|
||||
innobase_drop_database, /* Drop a database */
|
||||
innobase_end, /* Panic call */
|
||||
innobase_release_temporary_latches, /* Release temporary latches */
|
||||
innodb_export_status, /* Update Statistics */
|
||||
innobase_start_trx_and_assign_read_view, /* Start Consistent Snapshot */
|
||||
innobase_flush_logs, /* Flush logs */
|
||||
innobase_show_status, /* Show status */
|
||||
#ifdef HAVE_REPLICATION
|
||||
innobase_repl_report_sent_binlog, /* Replication Report Sent Binlog */
|
||||
#else
|
||||
NULL,
|
||||
#endif
|
||||
HTON_NO_FLAGS
|
||||
};
|
||||
|
||||
|
||||
static handler *innobase_create_handler(TABLE *table)
|
||||
{
|
||||
return new ha_innobase(table);
|
||||
}
|
||||
|
||||
|
||||
/*********************************************************************
|
||||
Commits a transaction in an InnoDB database. */
|
||||
|
||||
|
@ -390,7 +410,7 @@ Call this function when mysqld passes control to the client. That is to
|
|||
avoid deadlocks on the adaptive hash S-latch possibly held by thd. For more
|
||||
documentation, see handler.cc. */
|
||||
|
||||
void
|
||||
int
|
||||
innobase_release_temporary_latches(
|
||||
/*===============================*/
|
||||
THD *thd)
|
||||
|
@ -399,7 +419,7 @@ innobase_release_temporary_latches(
|
|||
|
||||
if (!innodb_inited) {
|
||||
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
|
||||
trx = (trx_t*) thd->ha_data[innobase_hton.slot];
|
||||
|
@ -407,6 +427,7 @@ innobase_release_temporary_latches(
|
|||
if (trx) {
|
||||
innobase_release_stat_resources(trx);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/************************************************************************
|
||||
|
@ -1430,8 +1451,8 @@ error:
|
|||
/***********************************************************************
|
||||
Closes an InnoDB database. */
|
||||
|
||||
bool
|
||||
innobase_end(void)
|
||||
int
|
||||
innobase_end(ha_panic_function type)
|
||||
/*==============*/
|
||||
/* out: TRUE if error */
|
||||
{
|
||||
|
@ -5051,7 +5072,7 @@ ha_innobase::delete_table(
|
|||
/*********************************************************************
|
||||
Removes all tables in the named database inside InnoDB. */
|
||||
|
||||
int
|
||||
void
|
||||
innobase_drop_database(
|
||||
/*===================*/
|
||||
/* out: error number */
|
||||
|
@ -5117,10 +5138,13 @@ innobase_drop_database(
|
|||
|
||||
innobase_commit_low(trx);
|
||||
trx_free_for_mysql(trx);
|
||||
|
||||
#ifdef NO_LONGER_INTERESTED_IN_DROP_DB_ERROR
|
||||
error = convert_error_code_to_mysql(error, NULL);
|
||||
|
||||
return(error);
|
||||
#else
|
||||
return;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*************************************************************************
|
||||
|
@ -6425,11 +6449,12 @@ ha_innobase::transactional_table_lock(
|
|||
/****************************************************************************
|
||||
Here we export InnoDB status variables to MySQL. */
|
||||
|
||||
void
|
||||
int
|
||||
innodb_export_status(void)
|
||||
/*======================*/
|
||||
{
|
||||
srv_export_innodb_status();
|
||||
return 0;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
|
@ -6439,9 +6464,9 @@ Monitor to the client. */
|
|||
bool
|
||||
innodb_show_status(
|
||||
/*===============*/
|
||||
THD* thd) /* in: the MySQL query thread of the caller */
|
||||
THD* thd, /* in: the MySQL query thread of the caller */
|
||||
stat_print_fn *stat_print)
|
||||
{
|
||||
Protocol* protocol = thd->protocol;
|
||||
trx_t* trx;
|
||||
static const char truncated_msg[] = "... truncated...\n";
|
||||
const long MAX_STATUS_SIZE = 64000;
|
||||
|
@ -6451,10 +6476,7 @@ innodb_show_status(
|
|||
DBUG_ENTER("innodb_show_status");
|
||||
|
||||
if (have_innodb != SHOW_OPTION_YES) {
|
||||
my_message(ER_NOT_SUPPORTED_YET,
|
||||
"Cannot call SHOW INNODB STATUS because skip-innodb is defined",
|
||||
MYF(0));
|
||||
DBUG_RETURN(TRUE);
|
||||
DBUG_RETURN(FALSE);
|
||||
}
|
||||
|
||||
trx = check_trx_exists(thd);
|
||||
|
@ -6516,28 +6538,14 @@ innodb_show_status(
|
|||
|
||||
mutex_exit_noninline(&srv_monitor_file_mutex);
|
||||
|
||||
List<Item> field_list;
|
||||
bool result = FALSE;
|
||||
|
||||
field_list.push_back(new Item_empty_string("Status", flen));
|
||||
|
||||
if (protocol->send_fields(&field_list, Protocol::SEND_NUM_ROWS |
|
||||
Protocol::SEND_EOF)) {
|
||||
my_free(str, MYF(0));
|
||||
|
||||
DBUG_RETURN(TRUE);
|
||||
if (stat_print(thd, innobase_hton.name, "", str)) {
|
||||
result= TRUE;
|
||||
}
|
||||
my_free(str, MYF(0));
|
||||
|
||||
protocol->prepare_for_resend();
|
||||
protocol->store(str, flen, system_charset_info);
|
||||
my_free(str, MYF(0));
|
||||
|
||||
if (protocol->write()) {
|
||||
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
send_eof(thd);
|
||||
|
||||
DBUG_RETURN(FALSE);
|
||||
DBUG_RETURN(FALSE);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
|
@ -6546,10 +6554,10 @@ Implements the SHOW MUTEX STATUS command. . */
|
|||
bool
|
||||
innodb_mutex_show_status(
|
||||
/*===============*/
|
||||
THD* thd) /* in: the MySQL query thread of the caller */
|
||||
THD* thd, /* in: the MySQL query thread of the caller */
|
||||
stat_print_fn *stat_print)
|
||||
{
|
||||
Protocol *protocol= thd->protocol;
|
||||
List<Item> field_list;
|
||||
char buf1[IO_SIZE], buf2[IO_SIZE];
|
||||
mutex_t* mutex;
|
||||
ulint rw_lock_count= 0;
|
||||
ulint rw_lock_count_spin_loop= 0;
|
||||
|
@ -6559,19 +6567,6 @@ innodb_mutex_show_status(
|
|||
ulonglong rw_lock_wait_time= 0;
|
||||
DBUG_ENTER("innodb_mutex_show_status");
|
||||
|
||||
field_list.push_back(new Item_empty_string("Mutex", FN_REFLEN));
|
||||
field_list.push_back(new Item_empty_string("Module", FN_REFLEN));
|
||||
field_list.push_back(new Item_uint("Count", 21));
|
||||
field_list.push_back(new Item_uint("Spin_waits", 21));
|
||||
field_list.push_back(new Item_uint("Spin_rounds", 21));
|
||||
field_list.push_back(new Item_uint("OS_waits", 21));
|
||||
field_list.push_back(new Item_uint("OS_yields", 21));
|
||||
field_list.push_back(new Item_uint("OS_waits_time", 21));
|
||||
|
||||
if (protocol->send_fields(&field_list,
|
||||
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
|
||||
DBUG_RETURN(TRUE);
|
||||
|
||||
#ifdef MUTEX_PROTECT_TO_BE_ADDED_LATER
|
||||
mutex_enter(&mutex_list_mutex);
|
||||
#endif
|
||||
|
@ -6584,17 +6579,16 @@ innodb_mutex_show_status(
|
|||
{
|
||||
if (mutex->count_using > 0)
|
||||
{
|
||||
protocol->prepare_for_resend();
|
||||
protocol->store(mutex->cmutex_name, system_charset_info);
|
||||
protocol->store(mutex->cfile_name, system_charset_info);
|
||||
protocol->store((ulonglong)mutex->count_using);
|
||||
protocol->store((ulonglong)mutex->count_spin_loop);
|
||||
protocol->store((ulonglong)mutex->count_spin_rounds);
|
||||
protocol->store((ulonglong)mutex->count_os_wait);
|
||||
protocol->store((ulonglong)mutex->count_os_yield);
|
||||
protocol->store((ulonglong)mutex->lspent_time/1000);
|
||||
|
||||
if (protocol->write())
|
||||
my_snprintf(buf1, sizeof(buf1), "%s:%s",
|
||||
mutex->cmutex_name, mutex->cfile_name);
|
||||
my_snprintf(buf2, sizeof(buf2),
|
||||
"count=%lu, spin_waits=%lu, spin_rounds=%lu, "
|
||||
"os_waits=%lu, os_yields=%lu, os_wait_times=%lu",
|
||||
mutex->count_using, mutex->count_spin_loop,
|
||||
mutex->count_spin_rounds,
|
||||
mutex->count_os_wait, mutex->count_os_yield,
|
||||
mutex->lspent_time/1000);
|
||||
if (stat_print(thd, innobase_hton.name, buf1, buf2))
|
||||
{
|
||||
#ifdef MUTEX_PROTECT_TO_BE_ADDED_LATER
|
||||
mutex_exit(&mutex_list_mutex);
|
||||
|
@ -6616,17 +6610,15 @@ innodb_mutex_show_status(
|
|||
mutex = UT_LIST_GET_NEXT(list, mutex);
|
||||
}
|
||||
|
||||
protocol->prepare_for_resend();
|
||||
protocol->store("rw_lock_mutexes", system_charset_info);
|
||||
protocol->store("", system_charset_info);
|
||||
protocol->store((ulonglong)rw_lock_count);
|
||||
protocol->store((ulonglong)rw_lock_count_spin_loop);
|
||||
protocol->store((ulonglong)rw_lock_count_spin_rounds);
|
||||
protocol->store((ulonglong)rw_lock_count_os_wait);
|
||||
protocol->store((ulonglong)rw_lock_count_os_yield);
|
||||
protocol->store((ulonglong)rw_lock_wait_time/1000);
|
||||
my_snprintf(buf2, sizeof(buf2),
|
||||
"count=%lu, spin_waits=%lu, spin_rounds=%lu, "
|
||||
"os_waits=%lu, os_yields=%lu, os_wait_times=%lu",
|
||||
rw_lock_count, rw_lock_count_spin_loop,
|
||||
rw_lock_count_spin_rounds,
|
||||
rw_lock_count_os_wait, rw_lock_count_os_yield,
|
||||
rw_lock_wait_time/1000);
|
||||
|
||||
if (protocol->write())
|
||||
if (stat_print(thd, innobase_hton.name, "rw_lock_mutexes", buf2))
|
||||
{
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
|
@ -6634,10 +6626,23 @@ innodb_mutex_show_status(
|
|||
#ifdef MUTEX_PROTECT_TO_BE_ADDED_LATER
|
||||
mutex_exit(&mutex_list_mutex);
|
||||
#endif
|
||||
send_eof(thd);
|
||||
DBUG_RETURN(FALSE);
|
||||
}
|
||||
|
||||
bool innobase_show_status(THD* thd, stat_print_fn* stat_print,
|
||||
enum ha_stat_type stat_type)
|
||||
{
|
||||
switch (stat_type) {
|
||||
case HA_ENGINE_STATUS:
|
||||
return innodb_show_status(thd, stat_print);
|
||||
case HA_ENGINE_MUTEX:
|
||||
return innodb_mutex_show_status(thd, stat_print);
|
||||
default:
|
||||
return FALSE;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/****************************************************************************
|
||||
Handling the shared INNOBASE_SHARE structure that is needed to provide table
|
||||
locking.
|
||||
|
@ -7470,4 +7475,3 @@ bool ha_innobase::check_if_incompatible_data(HA_CREATE_INFO *info,
|
|||
return COMPATIBLE_DATA_YES;
|
||||
}
|
||||
|
||||
#endif /* HAVE_INNOBASE_DB */
|
||||
|
|
|
@ -254,7 +254,7 @@ extern ulong srv_commit_concurrency;
|
|||
extern TYPELIB innobase_lock_typelib;
|
||||
|
||||
bool innobase_init(void);
|
||||
bool innobase_end(void);
|
||||
int innobase_end(ha_panic_function type);
|
||||
bool innobase_flush_logs(void);
|
||||
uint innobase_get_free_space(void);
|
||||
|
||||
|
@ -272,12 +272,11 @@ int innobase_commit_complete(void* trx_handle);
|
|||
void innobase_store_binlog_offset_and_flush_log(char *binlog_name,longlong offset);
|
||||
#endif
|
||||
|
||||
int innobase_drop_database(char *path);
|
||||
bool innodb_show_status(THD* thd);
|
||||
bool innodb_mutex_show_status(THD* thd);
|
||||
void innodb_export_status(void);
|
||||
void innobase_drop_database(char *path);
|
||||
bool innobase_show_status(THD* thd, stat_print_fn*, enum ha_stat_type);
|
||||
int innodb_export_status(void);
|
||||
|
||||
void innobase_release_temporary_latches(THD *thd);
|
||||
int innobase_release_temporary_latches(THD *thd);
|
||||
|
||||
void innobase_store_binlog_offset_and_flush_log(char *binlog_name,longlong offset);
|
||||
|
||||
|
|
|
@ -50,6 +50,8 @@ TYPELIB myisam_stats_method_typelib= {
|
|||
** MyISAM tables
|
||||
*****************************************************************************/
|
||||
|
||||
static handler *myisam_create_handler(TABLE *table);
|
||||
|
||||
/* MyISAM handlerton */
|
||||
|
||||
handlerton myisam_hton= {
|
||||
|
@ -77,9 +79,25 @@ handlerton myisam_hton= {
|
|||
MyISAM doesn't support transactions and doesn't have
|
||||
transaction-dependent context: cursors can survive a commit.
|
||||
*/
|
||||
myisam_create_handler, /* Create a new handler */
|
||||
NULL, /* Drop a database */
|
||||
mi_panic,/* Panic call */
|
||||
NULL, /* Release temporary latches */
|
||||
NULL, /* Update Statistics */
|
||||
NULL, /* Start Consistent Snapshot */
|
||||
NULL, /* Flush logs */
|
||||
NULL, /* Show status */
|
||||
NULL, /* Replication Report Sent Binlog */
|
||||
HTON_CAN_RECREATE
|
||||
};
|
||||
|
||||
|
||||
static handler *myisam_create_handler(TABLE *table)
|
||||
{
|
||||
return new ha_myisam(table);
|
||||
}
|
||||
|
||||
|
||||
// collect errors printed by mi_check routines
|
||||
|
||||
static void mi_check_print_msg(MI_CHECK *param, const char* msg_type,
|
||||
|
|
|
@ -32,6 +32,8 @@
|
|||
** MyISAM MERGE tables
|
||||
*****************************************************************************/
|
||||
|
||||
static handler *myisammrg_create_handler(TABLE *table);
|
||||
|
||||
/* MyISAM MERGE handlerton */
|
||||
|
||||
handlerton myisammrg_hton= {
|
||||
|
@ -55,9 +57,23 @@ handlerton myisammrg_hton= {
|
|||
NULL, /* create_cursor_read_view */
|
||||
NULL, /* set_cursor_read_view */
|
||||
NULL, /* close_cursor_read_view */
|
||||
myisammrg_create_handler, /* Create a new handler */
|
||||
NULL, /* Drop a database */
|
||||
myrg_panic, /* Panic call */
|
||||
NULL, /* Release temporary latches */
|
||||
NULL, /* Update Statistics */
|
||||
NULL, /* Start Consistent Snapshot */
|
||||
NULL, /* Flush logs */
|
||||
NULL, /* Show status */
|
||||
NULL, /* Replication Report Sent Binlog */
|
||||
HTON_CAN_RECREATE
|
||||
};
|
||||
|
||||
static handler *myisammrg_create_handler(TABLE *table)
|
||||
{
|
||||
return new ha_myisammrg(table);
|
||||
}
|
||||
|
||||
|
||||
ha_myisammrg::ha_myisammrg(TABLE *table_arg)
|
||||
:handler(&myisammrg_hton, table_arg), file(0)
|
||||
|
|
|
@ -26,7 +26,6 @@
|
|||
|
||||
#include "mysql_priv.h"
|
||||
|
||||
#ifdef HAVE_NDBCLUSTER_DB
|
||||
#include <my_dir.h>
|
||||
#include "ha_ndbcluster.h"
|
||||
#include <ndbapi/NdbApi.hpp>
|
||||
|
@ -35,9 +34,14 @@
|
|||
|
||||
// options from from mysqld.cc
|
||||
extern my_bool opt_ndb_optimized_node_selection;
|
||||
extern enum ndb_distribution opt_ndb_distribution_id;
|
||||
extern const char *opt_ndbcluster_connectstring;
|
||||
|
||||
const char *ndb_distribution_names[]= {"KEYHASH", "LINHASH", NullS};
|
||||
TYPELIB ndb_distribution_typelib= { array_elements(ndb_distribution_names)-1,
|
||||
"", ndb_distribution_names, NULL };
|
||||
const char *opt_ndb_distribution= ndb_distribution_names[ND_KEYHASH];
|
||||
enum ndb_distribution opt_ndb_distribution_id= ND_KEYHASH;
|
||||
|
||||
// Default value for parallelism
|
||||
static const int parallelism= 0;
|
||||
|
||||
|
@ -51,6 +55,7 @@ static const char share_prefix[]= "./";
|
|||
static int ndbcluster_close_connection(THD *thd);
|
||||
static int ndbcluster_commit(THD *thd, bool all);
|
||||
static int ndbcluster_rollback(THD *thd, bool all);
|
||||
static handler* ndbcluster_create_handler(TABLE *table);
|
||||
|
||||
handlerton ndbcluster_hton = {
|
||||
"ndbcluster",
|
||||
|
@ -73,9 +78,23 @@ handlerton ndbcluster_hton = {
|
|||
NULL, /* create_cursor_read_view */
|
||||
NULL, /* set_cursor_read_view */
|
||||
NULL, /* close_cursor_read_view */
|
||||
ndbcluster_create_handler, /* Create a new handler */
|
||||
ndbcluster_drop_database, /* Drop a database */
|
||||
ndbcluster_end, /* Panic call */
|
||||
NULL, /* Release temporary latches */
|
||||
NULL, /* Update Statistics */
|
||||
NULL, /* Start Consistent Snapshot */
|
||||
NULL, /* Flush logs */
|
||||
ndbcluster_show_status, /* Show status */
|
||||
NULL, /* Replication Report Sent Binlog */
|
||||
HTON_NO_FLAGS
|
||||
};
|
||||
|
||||
static handler *ndbcluster_create_handler(TABLE *table)
|
||||
{
|
||||
return new ha_ndbcluster(table);
|
||||
}
|
||||
|
||||
#define NDB_HIDDEN_PRIMARY_KEY_LENGTH 8
|
||||
|
||||
#define NDB_FAILED_AUTO_INCREMENT ~(Uint64)0
|
||||
|
@ -4629,9 +4648,10 @@ extern "C" byte* tables_get_key(const char *entry, uint *length,
|
|||
|
||||
/*
|
||||
Drop a database in NDB Cluster
|
||||
*/
|
||||
NOTE add a dummy void function, since stupid handlerton is returning void instead of int...
|
||||
*/
|
||||
|
||||
int ndbcluster_drop_database(const char *path)
|
||||
int ndbcluster_drop_database_impl(const char *path)
|
||||
{
|
||||
DBUG_ENTER("ndbcluster_drop_database");
|
||||
THD *thd= current_thd;
|
||||
|
@ -4646,13 +4666,13 @@ int ndbcluster_drop_database(const char *path)
|
|||
DBUG_PRINT("enter", ("db: %s", dbname));
|
||||
|
||||
if (!(ndb= check_ndb_in_thd(thd)))
|
||||
DBUG_RETURN(HA_ERR_NO_CONNECTION);
|
||||
DBUG_RETURN(-1);
|
||||
|
||||
// List tables in NDB
|
||||
NDBDICT *dict= ndb->getDictionary();
|
||||
if (dict->listObjects(list,
|
||||
NdbDictionary::Object::UserTable) != 0)
|
||||
ERR_RETURN(dict->getNdbError());
|
||||
DBUG_RETURN(-1);
|
||||
for (i= 0 ; i < list.count ; i++)
|
||||
{
|
||||
NdbDictionary::Dictionary::List::Element& elmt= list.elements[i];
|
||||
|
@ -4685,6 +4705,10 @@ int ndbcluster_drop_database(const char *path)
|
|||
DBUG_RETURN(ret);
|
||||
}
|
||||
|
||||
void ndbcluster_drop_database(char *path)
|
||||
{
|
||||
ndbcluster_drop_database_impl(path);
|
||||
}
|
||||
/*
|
||||
find all tables in ndb and discover those needed
|
||||
*/
|
||||
|
@ -5057,7 +5081,7 @@ ndbcluster_init_error:
|
|||
ndbcluster_init()
|
||||
*/
|
||||
|
||||
bool ndbcluster_end()
|
||||
int ndbcluster_end(ha_panic_function type)
|
||||
{
|
||||
DBUG_ENTER("ndbcluster_end");
|
||||
|
||||
|
@ -7941,29 +7965,21 @@ ha_ndbcluster::generate_scan_filter(Ndb_cond_stack *ndb_cond_stack,
|
|||
/*
|
||||
Implements the SHOW NDB STATUS command.
|
||||
*/
|
||||
int
|
||||
ndbcluster_show_status(THD* thd)
|
||||
bool
|
||||
ndbcluster_show_status(THD* thd, stat_print_fn *stat_print,
|
||||
enum ha_stat_type stat_type)
|
||||
{
|
||||
Protocol *protocol= thd->protocol;
|
||||
|
||||
char buf[IO_SIZE];
|
||||
DBUG_ENTER("ndbcluster_show_status");
|
||||
|
||||
if (have_ndbcluster != SHOW_OPTION_YES)
|
||||
{
|
||||
my_message(ER_NOT_SUPPORTED_YET,
|
||||
"Cannot call SHOW NDBCLUSTER STATUS because skip-ndbcluster is defined",
|
||||
MYF(0));
|
||||
DBUG_RETURN(TRUE);
|
||||
DBUG_RETURN(FALSE);
|
||||
}
|
||||
if (stat_type != HA_ENGINE_STATUS)
|
||||
{
|
||||
DBUG_RETURN(FALSE);
|
||||
}
|
||||
|
||||
List<Item> field_list;
|
||||
field_list.push_back(new Item_empty_string("free_list", 255));
|
||||
field_list.push_back(new Item_return_int("created", 10,MYSQL_TYPE_LONG));
|
||||
field_list.push_back(new Item_return_int("free", 10,MYSQL_TYPE_LONG));
|
||||
field_list.push_back(new Item_return_int("sizeof", 10,MYSQL_TYPE_LONG));
|
||||
|
||||
if (protocol->send_fields(&field_list, Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
|
||||
DBUG_RETURN(TRUE);
|
||||
|
||||
if (get_thd_ndb(thd) && get_thd_ndb(thd)->ndb)
|
||||
{
|
||||
|
@ -7971,14 +7987,11 @@ ndbcluster_show_status(THD* thd)
|
|||
Ndb::Free_list_usage tmp; tmp.m_name= 0;
|
||||
while (ndb->get_free_list_usage(&tmp))
|
||||
{
|
||||
protocol->prepare_for_resend();
|
||||
|
||||
protocol->store(tmp.m_name, &my_charset_bin);
|
||||
protocol->store((uint)tmp.m_created);
|
||||
protocol->store((uint)tmp.m_free);
|
||||
protocol->store((uint)tmp.m_sizeof);
|
||||
if (protocol->write())
|
||||
DBUG_RETURN(TRUE);
|
||||
my_snprintf(buf, sizeof(buf),
|
||||
"created=%u, free=%u, sizeof=%u",
|
||||
tmp.m_created, tmp.m_free, tmp.m_sizeof);
|
||||
if (stat_print(thd, ndbcluster_hton.name, tmp.m_name, buf))
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
}
|
||||
send_eof(thd);
|
||||
|
@ -8192,4 +8205,3 @@ bool ha_ndbcluster::check_if_incompatible_data(HA_CREATE_INFO *info,
|
|||
return COMPATIBLE_DATA_YES;
|
||||
}
|
||||
|
||||
#endif /* HAVE_NDBCLUSTER_DB */
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
#pragma interface /* gcc class implementation */
|
||||
#endif
|
||||
|
||||
#include <ndbapi/NdbApi.hpp>
|
||||
#include <NdbApi.hpp>
|
||||
#include <ndbapi_limits.h>
|
||||
|
||||
class Ndb; // Forward declaration
|
||||
|
@ -136,7 +136,6 @@ struct negated_function_mapping
|
|||
NDB_FUNC_TYPE neg_fun;
|
||||
};
|
||||
|
||||
enum ndb_distribution { ND_KEYHASH= 0, ND_LINHASH= 1 };
|
||||
|
||||
/*
|
||||
Define what functions can be negated in condition pushdown.
|
||||
|
@ -615,7 +614,7 @@ static void set_tabname(const char *pathname, char *tabname);
|
|||
const char *tabname, bool global);
|
||||
|
||||
private:
|
||||
friend int ndbcluster_drop_database(const char *path);
|
||||
friend int ndbcluster_drop_database_impl(const char *path);
|
||||
int alter_table_name(const char *to);
|
||||
static int delete_table(ha_ndbcluster *h, Ndb *ndb,
|
||||
const char *path,
|
||||
|
@ -772,7 +771,7 @@ private:
|
|||
extern struct show_var_st ndb_status_variables[];
|
||||
|
||||
bool ndbcluster_init(void);
|
||||
bool ndbcluster_end(void);
|
||||
int ndbcluster_end(ha_panic_function flag);
|
||||
|
||||
int ndbcluster_discover(THD* thd, const char* dbname, const char* name,
|
||||
const void** frmblob, uint* frmlen);
|
||||
|
@ -780,9 +779,9 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path,
|
|||
const char *wild, bool dir, List<char> *files);
|
||||
int ndbcluster_table_exists_in_engine(THD* thd,
|
||||
const char *db, const char *name);
|
||||
int ndbcluster_drop_database(const char* path);
|
||||
void ndbcluster_drop_database(char* path);
|
||||
|
||||
void ndbcluster_print_error(int error, const NdbOperation *error_op);
|
||||
|
||||
int ndbcluster_show_status(THD*);
|
||||
bool ndbcluster_show_status(THD*,stat_print_fn *,enum ha_stat_type);
|
||||
|
||||
|
|
|
@ -54,7 +54,6 @@
|
|||
|
||||
#include <mysql_priv.h>
|
||||
|
||||
#ifdef HAVE_PARTITION_DB
|
||||
#include "ha_partition.h"
|
||||
|
||||
static const char *ha_par_ext= ".par";
|
||||
|
@ -67,12 +66,14 @@ static PARTITION_SHARE *get_share(const char *table_name, TABLE * table);
|
|||
MODULE create/delete handler object
|
||||
****************************************************************************/
|
||||
|
||||
static handler* partition_create_handler(TABLE *table);
|
||||
|
||||
handlerton partition_hton = {
|
||||
"partition",
|
||||
SHOW_OPTION_YES,
|
||||
"Partition engine", /* A comment used by SHOW to describe an engine */
|
||||
"Partition Storage Engine Helper", /* A comment used by SHOW to describe an engine */
|
||||
DB_TYPE_PARTITION_DB,
|
||||
0, /* Method that initizlizes a storage engine */
|
||||
0, /* Method that initializes a storage engine */
|
||||
0, /* slot */
|
||||
0, /* savepoint size */
|
||||
NULL /*ndbcluster_close_connection*/,
|
||||
|
@ -88,9 +89,23 @@ handlerton partition_hton = {
|
|||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
HTON_NO_FLAGS
|
||||
partition_create_handler, /* Create a new handler */
|
||||
NULL, /* Drop a database */
|
||||
NULL, /* Panic call */
|
||||
NULL, /* Release temporary latches */
|
||||
NULL, /* Update Statistics */
|
||||
NULL, /* Start Consistent Snapshot */
|
||||
NULL, /* Flush logs */
|
||||
NULL, /* Show status */
|
||||
NULL, /* Replication Report Sent Binlog */
|
||||
HTON_NOT_USER_SELECTABLE
|
||||
};
|
||||
|
||||
static handler* partition_create_handler(TABLE *table)
|
||||
{
|
||||
return new ha_partition(table);
|
||||
}
|
||||
|
||||
ha_partition::ha_partition(TABLE *table)
|
||||
:handler(&partition_hton, table), m_part_info(NULL), m_create_handler(FALSE),
|
||||
m_is_sub_partitioned(0)
|
||||
|
@ -947,6 +962,8 @@ int ha_partition::close(void)
|
|||
{
|
||||
handler **file;
|
||||
DBUG_ENTER("ha_partition::close");
|
||||
|
||||
delete_queue(&queue);
|
||||
file= m_file;
|
||||
do
|
||||
{
|
||||
|
@ -3252,4 +3269,3 @@ static int free_share(PARTITION_SHARE *share)
|
|||
return 0;
|
||||
}
|
||||
#endif /* NOT_USED */
|
||||
#endif /* HAVE_PARTITION_DB */
|
||||
|
|
445
sql/handler.cc
445
sql/handler.cc
|
@ -27,112 +27,19 @@
|
|||
#include "ha_myisammrg.h"
|
||||
|
||||
|
||||
/*
|
||||
We have dummy hanldertons in case the handler has not been compiled
|
||||
in. This will be removed in 5.1.
|
||||
*/
|
||||
#ifdef HAVE_BERKELEY_DB
|
||||
#include "ha_berkeley.h"
|
||||
extern handlerton berkeley_hton;
|
||||
#else
|
||||
handlerton berkeley_hton = { "BerkeleyDB", SHOW_OPTION_NO,
|
||||
"Supports transactions and page-level locking", DB_TYPE_BERKELEY_DB, NULL,
|
||||
0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
|
||||
NULL, NULL, HTON_NO_FLAGS };
|
||||
#endif
|
||||
#ifdef HAVE_BLACKHOLE_DB
|
||||
#include "ha_blackhole.h"
|
||||
extern handlerton blackhole_hton;
|
||||
#else
|
||||
handlerton blackhole_hton = { "BLACKHOLE", SHOW_OPTION_NO,
|
||||
"/dev/null storage engine (anything you write to it disappears)",
|
||||
DB_TYPE_BLACKHOLE_DB, NULL, 0, 0, NULL, NULL,
|
||||
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
|
||||
HTON_NO_FLAGS };
|
||||
#endif
|
||||
#ifdef HAVE_EXAMPLE_DB
|
||||
#include "examples/ha_example.h"
|
||||
extern handlerton example_hton;
|
||||
#else
|
||||
handlerton example_hton = { "EXAMPLE", SHOW_OPTION_NO,
|
||||
"Example storage engine",
|
||||
DB_TYPE_EXAMPLE_DB, NULL, 0, 0, NULL, NULL,
|
||||
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
|
||||
HTON_NO_FLAGS };
|
||||
#endif
|
||||
#ifdef HAVE_PARTITION_DB
|
||||
#include "ha_partition.h"
|
||||
extern handlerton partition_hton;
|
||||
#else
|
||||
handlerton partition_hton = { "partition", SHOW_OPTION_NO,
|
||||
"Partition engine",
|
||||
DB_TYPE_EXAMPLE_DB, NULL, 0, 0, NULL, NULL,
|
||||
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
|
||||
HTON_NO_FLAGS };
|
||||
#endif
|
||||
#ifdef HAVE_ARCHIVE_DB
|
||||
#include "ha_archive.h"
|
||||
extern handlerton archive_hton;
|
||||
#else
|
||||
handlerton archive_hton = { "ARCHIVE", SHOW_OPTION_NO,
|
||||
"Archive storage engine", DB_TYPE_ARCHIVE_DB, NULL, 0, 0, NULL, NULL,
|
||||
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
|
||||
HTON_NO_FLAGS };
|
||||
#endif
|
||||
#ifdef HAVE_CSV_DB
|
||||
#include "examples/ha_tina.h"
|
||||
extern handlerton tina_hton;
|
||||
#else
|
||||
handlerton tina_hton = { "CSV", SHOW_OPTION_NO, "CSV storage engine",
|
||||
DB_TYPE_CSV_DB, NULL, 0, 0, NULL, NULL,
|
||||
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
|
||||
HTON_NO_FLAGS };
|
||||
#endif
|
||||
#ifdef HAVE_INNOBASE_DB
|
||||
#include "ha_innodb.h"
|
||||
extern handlerton innobase_hton;
|
||||
#else
|
||||
handlerton innobase_hton = { "InnoDB", SHOW_OPTION_NO,
|
||||
"Supports transactions, row-level locking, and foreign keys",
|
||||
DB_TYPE_INNODB, NULL, 0, 0, NULL, NULL,
|
||||
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
|
||||
HTON_NO_FLAGS };
|
||||
#endif
|
||||
#ifdef HAVE_NDBCLUSTER_DB
|
||||
#include "ha_ndbcluster.h"
|
||||
extern handlerton ndbcluster_hton;
|
||||
#else
|
||||
handlerton ndbcluster_hton = { "ndbcluster", SHOW_OPTION_NO,
|
||||
"Clustered, fault-tolerant, memory-based tables",
|
||||
DB_TYPE_NDBCLUSTER, NULL, 0, 0, NULL, NULL,
|
||||
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
|
||||
HTON_NO_FLAGS };
|
||||
#endif
|
||||
#ifdef HAVE_FEDERATED_DB
|
||||
#include "ha_federated.h"
|
||||
extern handlerton federated_hton;
|
||||
#else
|
||||
handlerton federated_hton = { "FEDERATED", SHOW_OPTION_NO,
|
||||
"Federated MySQL storage engine", DB_TYPE_FEDERATED_DB, NULL, 0, 0, NULL,
|
||||
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
|
||||
HTON_NO_FLAGS };
|
||||
#endif
|
||||
#include <myisampack.h>
|
||||
#include <errno.h>
|
||||
|
||||
#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
|
||||
#define NDB_MAX_ATTRIBUTES_IN_TABLE 128
|
||||
#include "ha_ndbcluster.h"
|
||||
#endif
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
#include "ha_partition.h"
|
||||
#endif
|
||||
|
||||
extern handlerton myisam_hton;
|
||||
extern handlerton myisammrg_hton;
|
||||
extern handlerton heap_hton;
|
||||
extern handlerton binlog_hton;
|
||||
|
||||
/*
|
||||
Obsolete
|
||||
*/
|
||||
handlerton isam_hton = { "ISAM", SHOW_OPTION_NO, "Obsolete storage engine",
|
||||
DB_TYPE_ISAM, NULL, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
|
||||
NULL, NULL, NULL, NULL, NULL, NULL, HTON_NO_FLAGS };
|
||||
|
||||
|
||||
extern handlerton *sys_table_types[];
|
||||
|
||||
/* static functions defined in this file */
|
||||
|
||||
static SHOW_COMP_OPTION have_yes= SHOW_OPTION_YES;
|
||||
|
@ -144,28 +51,6 @@ ulong total_ha_2pc;
|
|||
/* size of savepoint storage area (see ha_init) */
|
||||
ulong savepoint_alloc_size;
|
||||
|
||||
/*
|
||||
This array is used for processing compiled in engines.
|
||||
*/
|
||||
handlerton *sys_table_types[]=
|
||||
{
|
||||
&myisam_hton,
|
||||
&heap_hton,
|
||||
&innobase_hton,
|
||||
&berkeley_hton,
|
||||
&blackhole_hton,
|
||||
&example_hton,
|
||||
&archive_hton,
|
||||
&tina_hton,
|
||||
&ndbcluster_hton,
|
||||
&federated_hton,
|
||||
&myisammrg_hton,
|
||||
&binlog_hton,
|
||||
&isam_hton,
|
||||
&partition_hton,
|
||||
NULL
|
||||
};
|
||||
|
||||
struct show_table_alias_st sys_table_aliases[]=
|
||||
{
|
||||
{"INNOBASE", "InnoDB"},
|
||||
|
@ -203,9 +88,11 @@ enum db_type ha_resolve_by_name(const char *name, uint namelen)
|
|||
retest:
|
||||
for (types= sys_table_types; *types; types++)
|
||||
{
|
||||
if (!my_strnncoll(&my_charset_latin1,
|
||||
(const uchar *)name, namelen,
|
||||
(const uchar *)(*types)->name, strlen((*types)->name)))
|
||||
if ((!my_strnncoll(&my_charset_latin1,
|
||||
(const uchar *)name, namelen,
|
||||
(const uchar *)(*types)->name,
|
||||
strlen((*types)->name))) &&
|
||||
!((*types)->flags & HTON_NOT_USER_SELECTABLE))
|
||||
return (enum db_type) (*types)->db_type;
|
||||
}
|
||||
|
||||
|
@ -258,9 +145,8 @@ my_bool ha_storage_engine_is_enabled(enum db_type database_type)
|
|||
handlerton **types;
|
||||
for (types= sys_table_types; *types; types++)
|
||||
{
|
||||
if ((database_type == (*types)->db_type) &&
|
||||
((*types)->state == SHOW_OPTION_YES))
|
||||
return TRUE;
|
||||
if (database_type == (*types)->db_type)
|
||||
return ((*types)->state == SHOW_OPTION_YES) ? TRUE : FALSE;
|
||||
}
|
||||
return FALSE;
|
||||
}
|
||||
|
@ -273,13 +159,6 @@ enum db_type ha_checktype(THD *thd, enum db_type database_type,
|
|||
{
|
||||
if (ha_storage_engine_is_enabled(database_type))
|
||||
return database_type;
|
||||
#ifdef HAVE_PARTITION_DB
|
||||
/*
|
||||
Partition handler is not in the list of handlers shown since it is an internal handler
|
||||
*/
|
||||
if (database_type == DB_TYPE_PARTITION_DB)
|
||||
return database_type;
|
||||
#endif
|
||||
if (no_substitute)
|
||||
{
|
||||
if (report_error)
|
||||
|
@ -312,81 +191,33 @@ enum db_type ha_checktype(THD *thd, enum db_type database_type,
|
|||
|
||||
handler *get_new_handler(TABLE *table, MEM_ROOT *alloc, enum db_type db_type)
|
||||
{
|
||||
handler *file;
|
||||
switch (db_type) {
|
||||
#ifndef NO_HASH
|
||||
case DB_TYPE_HASH:
|
||||
file= new (alloc) ha_hash(table);
|
||||
break;
|
||||
#endif
|
||||
case DB_TYPE_MRG_ISAM:
|
||||
file= new (alloc) ha_myisammrg(table);
|
||||
break;
|
||||
#ifdef HAVE_BERKELEY_DB
|
||||
case DB_TYPE_BERKELEY_DB:
|
||||
file= new (alloc) ha_berkeley(table);
|
||||
break;
|
||||
#endif
|
||||
#ifdef HAVE_INNOBASE_DB
|
||||
case DB_TYPE_INNODB:
|
||||
file= new (alloc) ha_innobase(table);
|
||||
break;
|
||||
#endif
|
||||
#ifdef HAVE_EXAMPLE_DB
|
||||
case DB_TYPE_EXAMPLE_DB:
|
||||
file= new ha_example(table);
|
||||
break;
|
||||
#endif
|
||||
#ifdef HAVE_PARTITION_DB
|
||||
case DB_TYPE_PARTITION_DB:
|
||||
handler *file= NULL;
|
||||
handlerton **types;
|
||||
/*
|
||||
handlers are allocated with new in the handlerton create() function
|
||||
we need to set the thd mem_root for these to be allocated correctly
|
||||
*/
|
||||
THD *thd= current_thd;
|
||||
MEM_ROOT *thd_save_mem_root= thd->mem_root;
|
||||
thd->mem_root= alloc;
|
||||
for (types= sys_table_types; *types; types++)
|
||||
{
|
||||
file= new (alloc) ha_partition(table);
|
||||
break;
|
||||
if (db_type == (*types)->db_type && (*types)->create)
|
||||
{
|
||||
file= ((*types)->state == SHOW_OPTION_YES) ?
|
||||
(*types)->create(table) : NULL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#ifdef HAVE_ARCHIVE_DB
|
||||
case DB_TYPE_ARCHIVE_DB:
|
||||
file= new (alloc) ha_archive(table);
|
||||
break;
|
||||
#endif
|
||||
#ifdef HAVE_BLACKHOLE_DB
|
||||
case DB_TYPE_BLACKHOLE_DB:
|
||||
file= new (alloc) ha_blackhole(table);
|
||||
break;
|
||||
#endif
|
||||
#ifdef HAVE_FEDERATED_DB
|
||||
case DB_TYPE_FEDERATED_DB:
|
||||
file= new (alloc) ha_federated(table);
|
||||
break;
|
||||
#endif
|
||||
#ifdef HAVE_CSV_DB
|
||||
case DB_TYPE_CSV_DB:
|
||||
file= new (alloc) ha_tina(table);
|
||||
break;
|
||||
#endif
|
||||
#ifdef HAVE_NDBCLUSTER_DB
|
||||
case DB_TYPE_NDBCLUSTER:
|
||||
file= new (alloc) ha_ndbcluster(table);
|
||||
break;
|
||||
#endif
|
||||
case DB_TYPE_HEAP:
|
||||
file= new (alloc) ha_heap(table);
|
||||
break;
|
||||
default: // should never happen
|
||||
thd->mem_root= thd_save_mem_root;
|
||||
|
||||
if (!file)
|
||||
{
|
||||
enum db_type def=(enum db_type) current_thd->variables.table_type;
|
||||
/* Try first with 'default table type' */
|
||||
if (db_type != def)
|
||||
return get_new_handler(table, alloc, def);
|
||||
}
|
||||
/* Fall back to MyISAM */
|
||||
case DB_TYPE_MYISAM:
|
||||
file= new (alloc) ha_myisam(table);
|
||||
break;
|
||||
case DB_TYPE_MRG_MYISAM:
|
||||
file= new (alloc) ha_myisammrg(table);
|
||||
break;
|
||||
}
|
||||
if (file)
|
||||
{
|
||||
if (file->ha_initialise())
|
||||
|
@ -399,7 +230,7 @@ handler *get_new_handler(TABLE *table, MEM_ROOT *alloc, enum db_type db_type)
|
|||
}
|
||||
|
||||
|
||||
#ifdef HAVE_PARTITION_DB
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
handler *get_ha_partition(partition_info *part_info)
|
||||
{
|
||||
ha_partition *partition;
|
||||
|
@ -557,40 +388,13 @@ int ha_init()
|
|||
int ha_panic(enum ha_panic_function flag)
|
||||
{
|
||||
int error=0;
|
||||
#ifndef NO_HASH
|
||||
error|=h_panic(flag); /* fix hash */
|
||||
#endif
|
||||
#ifdef HAVE_ISAM
|
||||
error|=mrg_panic(flag);
|
||||
error|=nisam_panic(flag);
|
||||
#endif
|
||||
error|=heap_panic(flag);
|
||||
error|=mi_panic(flag);
|
||||
error|=myrg_panic(flag);
|
||||
#ifdef HAVE_BERKELEY_DB
|
||||
if (have_berkeley_db == SHOW_OPTION_YES)
|
||||
error|=berkeley_end();
|
||||
#endif
|
||||
#ifdef HAVE_INNOBASE_DB
|
||||
if (have_innodb == SHOW_OPTION_YES)
|
||||
error|=innobase_end();
|
||||
#endif
|
||||
#ifdef HAVE_NDBCLUSTER_DB
|
||||
if (have_ndbcluster == SHOW_OPTION_YES)
|
||||
error|=ndbcluster_end();
|
||||
#endif
|
||||
#ifdef HAVE_FEDERATED_DB
|
||||
if (have_federated_db == SHOW_OPTION_YES)
|
||||
error|= federated_db_end();
|
||||
#endif
|
||||
#ifdef HAVE_ARCHIVE_DB
|
||||
if (have_archive_db == SHOW_OPTION_YES)
|
||||
error|= archive_db_end();
|
||||
#endif
|
||||
#ifdef HAVE_CSV_DB
|
||||
if (have_csv_db == SHOW_OPTION_YES)
|
||||
error|= tina_end();
|
||||
#endif
|
||||
handlerton **types;
|
||||
|
||||
for (types= sys_table_types; *types; types++)
|
||||
{
|
||||
if ((*types)->state == SHOW_OPTION_YES && (*types)->panic)
|
||||
error|= (*types)->panic(flag);
|
||||
}
|
||||
if (ha_finish_errors())
|
||||
error= 1;
|
||||
return error;
|
||||
|
@ -598,14 +402,13 @@ int ha_panic(enum ha_panic_function flag)
|
|||
|
||||
void ha_drop_database(char* path)
|
||||
{
|
||||
#ifdef HAVE_INNOBASE_DB
|
||||
if (have_innodb == SHOW_OPTION_YES)
|
||||
innobase_drop_database(path);
|
||||
#endif
|
||||
#ifdef HAVE_NDBCLUSTER_DB
|
||||
if (have_ndbcluster == SHOW_OPTION_YES)
|
||||
ndbcluster_drop_database(path);
|
||||
#endif
|
||||
handlerton **types;
|
||||
|
||||
for (types= sys_table_types; *types; types++)
|
||||
{
|
||||
if ((*types)->state == SHOW_OPTION_YES && (*types)->drop_database)
|
||||
(*types)->drop_database(path);
|
||||
}
|
||||
}
|
||||
|
||||
/* don't bother to rollback here, it's done already */
|
||||
|
@ -613,7 +416,8 @@ void ha_close_connection(THD* thd)
|
|||
{
|
||||
handlerton **types;
|
||||
for (types= sys_table_types; *types; types++)
|
||||
if (thd->ha_data[(*types)->slot])
|
||||
/* XXX Maybe do a rollback if close_connection == NULL ? */
|
||||
if (thd->ha_data[(*types)->slot] && (*types)->close_connection)
|
||||
(*types)->close_connection(thd);
|
||||
}
|
||||
|
||||
|
@ -1190,10 +994,14 @@ bool mysql_xa_recover(THD *thd)
|
|||
|
||||
int ha_release_temporary_latches(THD *thd)
|
||||
{
|
||||
#ifdef HAVE_INNOBASE_DB
|
||||
if (opt_innodb)
|
||||
innobase_release_temporary_latches(thd);
|
||||
#endif
|
||||
handlerton **types;
|
||||
|
||||
for (types= sys_table_types; *types; types++)
|
||||
{
|
||||
if ((*types)->state == SHOW_OPTION_YES &&
|
||||
(*types)->release_temporary_latches)
|
||||
(*types)->release_temporary_latches(thd);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1205,10 +1013,13 @@ int ha_release_temporary_latches(THD *thd)
|
|||
|
||||
int ha_update_statistics()
|
||||
{
|
||||
#ifdef HAVE_INNOBASE_DB
|
||||
if (opt_innodb)
|
||||
innodb_export_status();
|
||||
#endif
|
||||
handlerton **types;
|
||||
|
||||
for (types= sys_table_types; *types; types++)
|
||||
{
|
||||
if ((*types)->state == SHOW_OPTION_YES && (*types)->update_statistics)
|
||||
(*types)->update_statistics();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1317,35 +1128,45 @@ int ha_release_savepoint(THD *thd, SAVEPOINT *sv)
|
|||
|
||||
int ha_start_consistent_snapshot(THD *thd)
|
||||
{
|
||||
#ifdef HAVE_INNOBASE_DB
|
||||
if ((have_innodb == SHOW_OPTION_YES) &&
|
||||
!innobase_start_trx_and_assign_read_view(thd))
|
||||
return 0;
|
||||
#endif
|
||||
bool warn= true;
|
||||
handlerton **types;
|
||||
|
||||
for (types= sys_table_types; *types; types++)
|
||||
{
|
||||
if ((*types)->state == SHOW_OPTION_YES &&
|
||||
(*types)->start_consistent_snapshot)
|
||||
{
|
||||
(*types)->start_consistent_snapshot(thd);
|
||||
warn= false; /* hope user is using engine */
|
||||
}
|
||||
}
|
||||
/*
|
||||
Same idea as when one wants to CREATE TABLE in one engine which does not
|
||||
exist:
|
||||
*/
|
||||
push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR,
|
||||
"This MySQL server does not support any "
|
||||
"consistent-read capable storage engine");
|
||||
if (warn)
|
||||
push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR,
|
||||
"This MySQL server does not support any "
|
||||
"consistent-read capable storage engine");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
bool ha_flush_logs()
|
||||
bool ha_flush_logs(enum db_type db_type)
|
||||
{
|
||||
bool result=0;
|
||||
#ifdef HAVE_BERKELEY_DB
|
||||
if ((have_berkeley_db == SHOW_OPTION_YES) &&
|
||||
berkeley_flush_logs())
|
||||
result=1;
|
||||
#endif
|
||||
#ifdef HAVE_INNOBASE_DB
|
||||
if ((have_innodb == SHOW_OPTION_YES) &&
|
||||
innobase_flush_logs())
|
||||
result=1;
|
||||
#endif
|
||||
handlerton **types;
|
||||
|
||||
for (types= sys_table_types; *types; types++)
|
||||
{
|
||||
if ((*types)->state == SHOW_OPTION_YES &&
|
||||
(db_type == DB_TYPE_DEFAULT || db_type == (*types)->db_type) &&
|
||||
(*types)->flush_logs)
|
||||
{
|
||||
if ((*types)->flush_logs())
|
||||
result= 1;
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -2326,7 +2147,7 @@ int ha_discover(THD *thd, const char *db, const char *name,
|
|||
DBUG_PRINT("enter", ("db: %s, name: %s", db, name));
|
||||
if (is_prefix(name,tmp_file_prefix)) /* skip temporary tables */
|
||||
DBUG_RETURN(error);
|
||||
#ifdef HAVE_NDBCLUSTER_DB
|
||||
#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
|
||||
if (have_ndbcluster == SHOW_OPTION_YES)
|
||||
error= ndbcluster_discover(thd, db, name, frmblob, frmlen);
|
||||
#endif
|
||||
|
@ -2350,7 +2171,7 @@ ha_find_files(THD *thd,const char *db,const char *path,
|
|||
DBUG_ENTER("ha_find_files");
|
||||
DBUG_PRINT("enter", ("db: %s, path: %s, wild: %s, dir: %d",
|
||||
db, path, wild, dir));
|
||||
#ifdef HAVE_NDBCLUSTER_DB
|
||||
#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
|
||||
if (have_ndbcluster == SHOW_OPTION_YES)
|
||||
error= ndbcluster_find_files(thd, db, path, wild, dir, files);
|
||||
#endif
|
||||
|
@ -2372,7 +2193,7 @@ int ha_table_exists_in_engine(THD* thd, const char* db, const char* name)
|
|||
int error= 0;
|
||||
DBUG_ENTER("ha_table_exists_in_engine");
|
||||
DBUG_PRINT("enter", ("db: %s, name: %s", db, name));
|
||||
#ifdef HAVE_NDBCLUSTER_DB
|
||||
#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
|
||||
if (have_ndbcluster == SHOW_OPTION_YES)
|
||||
error= ndbcluster_table_exists_in_engine(thd, db, name);
|
||||
#endif
|
||||
|
@ -2699,6 +2520,54 @@ TYPELIB *ha_known_exts(void)
|
|||
return &known_extensions;
|
||||
}
|
||||
|
||||
static bool stat_print(THD *thd, const char *type, const char *file,
|
||||
const char *status)
|
||||
{
|
||||
Protocol *protocol= thd->protocol;
|
||||
protocol->prepare_for_resend();
|
||||
protocol->store(type, system_charset_info);
|
||||
protocol->store(file, system_charset_info);
|
||||
protocol->store(status, system_charset_info);
|
||||
if (protocol->write())
|
||||
return TRUE;
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
bool ha_show_status(THD *thd, enum db_type db_type, enum ha_stat_type stat)
|
||||
{
|
||||
handlerton **types;
|
||||
List<Item> field_list;
|
||||
Protocol *protocol= thd->protocol;
|
||||
|
||||
field_list.push_back(new Item_empty_string("Type",10));
|
||||
field_list.push_back(new Item_empty_string("Name",FN_REFLEN));
|
||||
field_list.push_back(new Item_empty_string("Status",10));
|
||||
|
||||
if (protocol->send_fields(&field_list,
|
||||
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
|
||||
return TRUE;
|
||||
|
||||
for (types= sys_table_types; *types; types++)
|
||||
{
|
||||
if ((*types)->state == SHOW_OPTION_YES &&
|
||||
(db_type == DB_TYPE_DEFAULT || db_type == (*types)->db_type) &&
|
||||
(*types)->show_status)
|
||||
{
|
||||
if ((*types)->show_status(thd, stat_print, stat))
|
||||
return TRUE;
|
||||
}
|
||||
else if (db_type == (*types)->db_type &&
|
||||
(*types)->state != SHOW_OPTION_YES)
|
||||
{
|
||||
if (stat_print(thd, (*types)->name, "", "DISABLED"))
|
||||
return TRUE;
|
||||
}
|
||||
}
|
||||
|
||||
send_eof(thd);
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
|
||||
#ifdef HAVE_REPLICATION
|
||||
/*
|
||||
|
@ -2722,11 +2591,19 @@ TYPELIB *ha_known_exts(void)
|
|||
int ha_repl_report_sent_binlog(THD *thd, char *log_file_name,
|
||||
my_off_t end_offset)
|
||||
{
|
||||
#ifdef HAVE_INNOBASE_DB
|
||||
return innobase_repl_report_sent_binlog(thd,log_file_name,end_offset);
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
int result= 0;
|
||||
handlerton **types;
|
||||
|
||||
for (types= sys_table_types; *types; types++)
|
||||
{
|
||||
if ((*types)->state == SHOW_OPTION_YES &&
|
||||
(*types)->repl_report_sent_binlog)
|
||||
{
|
||||
(*types)->repl_report_sent_binlog(thd,log_file_name,end_offset);
|
||||
result= 0;
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -28,10 +28,7 @@
|
|||
#define NO_HASH /* Not yet implemented */
|
||||
#endif
|
||||
|
||||
#if defined(HAVE_BERKELEY_DB) || defined(HAVE_INNOBASE_DB) || \
|
||||
defined(HAVE_NDBCLUSTER_DB)
|
||||
#define USING_TRANSACTIONS
|
||||
#endif
|
||||
|
||||
// the following is for checking tables
|
||||
|
||||
|
@ -191,6 +188,7 @@ enum db_type
|
|||
DB_TYPE_FEDERATED_DB,
|
||||
DB_TYPE_BLACKHOLE_DB,
|
||||
DB_TYPE_PARTITION_DB,
|
||||
DB_TYPE_BINLOG,
|
||||
DB_TYPE_DEFAULT // Must be last
|
||||
};
|
||||
|
||||
|
@ -308,6 +306,16 @@ typedef struct xid_t XID;
|
|||
#define MAX_XID_LIST_SIZE (1024*128)
|
||||
#endif
|
||||
|
||||
/* The handler for a table type. Will be included in the TABLE structure */
|
||||
|
||||
struct st_table;
|
||||
typedef struct st_table TABLE;
|
||||
struct st_foreign_key_info;
|
||||
typedef struct st_foreign_key_info FOREIGN_KEY_INFO;
|
||||
typedef bool (stat_print_fn)(THD *thd, const char *type, const char *file,
|
||||
const char *status);
|
||||
enum ha_stat_type { HA_ENGINE_STATUS, HA_ENGINE_LOGS, HA_ENGINE_MUTEX };
|
||||
|
||||
/*
|
||||
handlerton is a singleton structure - one instance per storage engine -
|
||||
to provide access to storage engine functionality that works on the
|
||||
|
@ -402,6 +410,16 @@ typedef struct
|
|||
void *(*create_cursor_read_view)();
|
||||
void (*set_cursor_read_view)(void *);
|
||||
void (*close_cursor_read_view)(void *);
|
||||
handler *(*create)(TABLE *table);
|
||||
void (*drop_database)(char* path);
|
||||
int (*panic)(enum ha_panic_function flag);
|
||||
int (*release_temporary_latches)(THD *thd);
|
||||
int (*update_statistics)();
|
||||
int (*start_consistent_snapshot)(THD *thd);
|
||||
bool (*flush_logs)();
|
||||
bool (*show_status)(THD *thd, stat_print_fn *print, enum ha_stat_type stat);
|
||||
int (*repl_report_sent_binlog)(THD *thd, char *log_file_name,
|
||||
my_off_t end_offset);
|
||||
uint32 flags; /* global handler flags */
|
||||
} handlerton;
|
||||
|
||||
|
@ -415,6 +433,8 @@ struct show_table_alias_st {
|
|||
#define HTON_CLOSE_CURSORS_AT_COMMIT (1 << 0)
|
||||
#define HTON_ALTER_NOT_SUPPORTED (1 << 1)
|
||||
#define HTON_CAN_RECREATE (1 << 2)
|
||||
#define HTON_FLUSH_AFTER_RENAME (1 << 3)
|
||||
#define HTON_NOT_USER_SELECTABLE (1 << 4)
|
||||
|
||||
typedef struct st_thd_trans
|
||||
{
|
||||
|
@ -430,6 +450,8 @@ enum enum_tx_isolation { ISO_READ_UNCOMMITTED, ISO_READ_COMMITTED,
|
|||
ISO_REPEATABLE_READ, ISO_SERIALIZABLE};
|
||||
|
||||
|
||||
enum ndb_distribution { ND_KEYHASH= 0, ND_LINHASH= 1 };
|
||||
|
||||
typedef struct {
|
||||
uint32 start_part;
|
||||
uint32 end_part;
|
||||
|
@ -608,7 +630,7 @@ public:
|
|||
};
|
||||
|
||||
|
||||
#ifdef HAVE_PARTITION_DB
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
/*
|
||||
Answers the question if subpartitioning is used for a certain table
|
||||
SYNOPSIS
|
||||
|
@ -670,12 +692,6 @@ typedef struct st_ha_create_information
|
|||
} HA_CREATE_INFO;
|
||||
|
||||
|
||||
/* The handler for a table type. Will be included in the TABLE structure */
|
||||
|
||||
struct st_table;
|
||||
typedef struct st_table TABLE;
|
||||
struct st_foreign_key_info;
|
||||
typedef struct st_foreign_key_info FOREIGN_KEY_INFO;
|
||||
|
||||
typedef struct st_savepoint SAVEPOINT;
|
||||
extern ulong savepoint_alloc_size;
|
||||
|
@ -693,7 +709,7 @@ typedef struct st_ha_check_opt
|
|||
} HA_CHECK_OPT;
|
||||
|
||||
|
||||
#ifdef HAVE_PARTITION_DB
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
bool is_partition_in_list(char *part_name, List<char> list_part_names);
|
||||
bool is_partitions_in_table(partition_info *new_part_info,
|
||||
partition_info *old_part_info);
|
||||
|
@ -743,7 +759,7 @@ typedef struct st_handler_buffer
|
|||
|
||||
class handler :public Sql_alloc
|
||||
{
|
||||
#ifdef HAVE_PARTITION_DB
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
friend class ha_partition;
|
||||
#endif
|
||||
protected:
|
||||
|
@ -1246,7 +1262,7 @@ public:
|
|||
virtual const char **bas_ext() const =0;
|
||||
virtual ulong table_flags(void) const =0;
|
||||
virtual ulong alter_table_flags(void) const { return 0; }
|
||||
#ifdef HAVE_PARTITION_DB
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
virtual ulong partition_flags(void) const { return 0;}
|
||||
virtual int get_default_no_partitions(ulonglong max_rows) { return 1;}
|
||||
#endif
|
||||
|
@ -1402,13 +1418,16 @@ int ha_panic(enum ha_panic_function flag);
|
|||
int ha_update_statistics();
|
||||
void ha_close_connection(THD* thd);
|
||||
my_bool ha_storage_engine_is_enabled(enum db_type database_type);
|
||||
bool ha_flush_logs(void);
|
||||
bool ha_flush_logs(enum db_type db_type=DB_TYPE_DEFAULT);
|
||||
void ha_drop_database(char* path);
|
||||
int ha_create_table(const char *name, HA_CREATE_INFO *create_info,
|
||||
bool update_create_info);
|
||||
int ha_delete_table(THD *thd, enum db_type db_type, const char *path,
|
||||
const char *alias, bool generate_warning);
|
||||
|
||||
/* statistics and info */
|
||||
bool ha_show_status(THD *thd, enum db_type db_type, enum ha_stat_type stat);
|
||||
|
||||
/* discovery */
|
||||
int ha_create_table_from_engine(THD* thd, const char *db, const char *name);
|
||||
int ha_discover(THD* thd, const char* dbname, const char* name,
|
||||
|
|
14
sql/handlerton.cc.in
Normal file
14
sql/handlerton.cc.in
Normal file
|
@ -0,0 +1,14 @@
|
|||
|
||||
#include "mysql_priv.h"
|
||||
|
||||
extern handlerton heap_hton,myisam_hton,myisammrg_hton,
|
||||
binlog_hton@mysql_se_decls@;
|
||||
|
||||
/*
|
||||
This array is used for processing compiled in engines.
|
||||
*/
|
||||
handlerton *sys_table_types[]=
|
||||
{
|
||||
&heap_hton,&myisam_hton@mysql_se_htons@,&myisammrg_hton,&binlog_hton,NULL
|
||||
};
|
||||
|
|
@ -373,9 +373,7 @@ static SYMBOL symbols[] = {
|
|||
{ "PACK_KEYS", SYM(PACK_KEYS_SYM)},
|
||||
{ "PARSER", SYM(PARSER_SYM)},
|
||||
{ "PARTIAL", SYM(PARTIAL)},
|
||||
#ifdef HAVE_PARTITION_DB
|
||||
{ "PARTITION", SYM(PARTITION_SYM)},
|
||||
#endif
|
||||
{ "PARTITIONS", SYM(PARTITIONS_SYM)},
|
||||
{ "PASSWORD", SYM(PASSWORD)},
|
||||
{ "PHASE", SYM(PHASE_SYM)},
|
||||
|
|
14
sql/log.cc
14
sql/log.cc
|
@ -51,7 +51,7 @@ handlerton binlog_hton = {
|
|||
"binlog",
|
||||
SHOW_OPTION_YES,
|
||||
"This is a meta storage engine to represent the binlog in a transaction",
|
||||
DB_TYPE_UNKNOWN, /* IGNORE for now */
|
||||
DB_TYPE_BINLOG, /* IGNORE for now */
|
||||
binlog_init,
|
||||
0,
|
||||
sizeof(my_off_t), /* savepoint size = binlog offset */
|
||||
|
@ -68,9 +68,19 @@ handlerton binlog_hton = {
|
|||
NULL, /* create_cursor_read_view */
|
||||
NULL, /* set_cursor_read_view */
|
||||
NULL, /* close_cursor_read_view */
|
||||
HTON_NO_FLAGS
|
||||
NULL, /* Create a new handler */
|
||||
NULL, /* Drop a database */
|
||||
NULL, /* Panic call */
|
||||
NULL, /* Release temporary latches */
|
||||
NULL, /* Update Statistics */
|
||||
NULL, /* Start Consistent Snapshot */
|
||||
NULL, /* Flush logs */
|
||||
NULL, /* Show status */
|
||||
NULL, /* Replication Report Sent Binlog */
|
||||
HTON_NOT_USER_SELECTABLE
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
this function is mostly a placeholder.
|
||||
conceptually, binlog initialization (now mostly done in MYSQL_LOG::open)
|
||||
|
|
|
@ -179,11 +179,6 @@ extern CHARSET_INFO *national_charset_info, *table_alias_charset;
|
|||
#define FLUSH_TIME 0 /* Don't flush tables */
|
||||
#define MAX_CONNECT_ERRORS 10 // errors before disabling host
|
||||
|
||||
#ifdef HAVE_INNOBASE_DB
|
||||
#define IF_INNOBASE_DB(A, B) (A)
|
||||
#else
|
||||
#define IF_INNOBASE_DB(A, B) (B)
|
||||
#endif
|
||||
#ifdef __NETWARE__
|
||||
#define IF_NETWARE(A,B) (A)
|
||||
#else
|
||||
|
@ -1064,6 +1059,8 @@ extern ulong volatile manager_status;
|
|||
extern bool volatile manager_thread_in_use, mqh_used;
|
||||
extern pthread_t manager_thread;
|
||||
pthread_handler_t handle_manager(void *arg);
|
||||
bool mysql_manager_submit(void (*action)());
|
||||
|
||||
|
||||
/* sql_test.cc */
|
||||
#ifndef DBUG_OFF
|
||||
|
@ -1253,17 +1250,67 @@ extern const LEX_STRING view_type;
|
|||
|
||||
/* optional things, have_* variables */
|
||||
|
||||
extern SHOW_COMP_OPTION have_isam, have_innodb, have_berkeley_db;
|
||||
extern SHOW_COMP_OPTION have_example_db, have_archive_db, have_csv_db;
|
||||
#ifdef WITH_INNOBASE_STORAGE_ENGINE
|
||||
extern handlerton innobase_hton;
|
||||
#define have_innodb innobase_hton.state
|
||||
#else
|
||||
extern SHOW_COMP_OPTION have_innodb;
|
||||
#endif
|
||||
#ifdef WITH_BERKELEY_STORAGE_ENGINE
|
||||
extern handlerton berkeley_hton;
|
||||
#define have_berkeley_db berkeley_hton.state
|
||||
#else
|
||||
extern SHOW_COMP_OPTION have_berkeley_db;
|
||||
#endif
|
||||
#ifdef WITH_EXAMPLE_STORAGE_ENGINE
|
||||
extern handlerton example_hton;
|
||||
#define have_example_db example_hton.state
|
||||
#else
|
||||
extern SHOW_COMP_OPTION have_example_db;
|
||||
#endif
|
||||
#ifdef WITH_ARCHIVE_STORAGE_ENGINE
|
||||
extern handlerton archive_hton;
|
||||
#define have_archive_db archive_hton.state
|
||||
#else
|
||||
extern SHOW_COMP_OPTION have_archive_db;
|
||||
#endif
|
||||
#ifdef WITH_CSV_STORAGE_ENGINE
|
||||
extern handlerton tina_hton;
|
||||
#define have_csv_db tina_hton.state
|
||||
#else
|
||||
extern SHOW_COMP_OPTION have_csv_db;
|
||||
#endif
|
||||
#ifdef WITH_FEDERATED_STORAGE_ENGINE
|
||||
extern handlerton federated_hton;
|
||||
#define have_federated_db federated_hton.state
|
||||
#else
|
||||
extern SHOW_COMP_OPTION have_federated_db;
|
||||
#endif
|
||||
#ifdef WITH_BLACKHOLE_STORAGE_ENGINE
|
||||
extern handlerton blackhole_hton;
|
||||
#define have_blackhole_db blackhole_hton.state
|
||||
#else
|
||||
extern SHOW_COMP_OPTION have_blackhole_db;
|
||||
#endif
|
||||
#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
|
||||
extern handlerton ndbcluster_hton;
|
||||
#define have_ndbcluster ndbcluster_hton.state
|
||||
#else
|
||||
extern SHOW_COMP_OPTION have_ndbcluster;
|
||||
#endif
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
extern handlerton partition_hton;
|
||||
#define have_partition_db partition_hton.state
|
||||
#else
|
||||
extern SHOW_COMP_OPTION have_partition_db;
|
||||
#endif
|
||||
|
||||
extern SHOW_COMP_OPTION have_isam;
|
||||
extern SHOW_COMP_OPTION have_raid, have_openssl, have_symlink;
|
||||
extern SHOW_COMP_OPTION have_query_cache;
|
||||
extern SHOW_COMP_OPTION have_geometry, have_rtree_keys;
|
||||
extern SHOW_COMP_OPTION have_crypt;
|
||||
extern SHOW_COMP_OPTION have_compress;
|
||||
extern SHOW_COMP_OPTION have_partition_db;
|
||||
|
||||
#ifndef __WIN__
|
||||
extern pthread_t signal_thread;
|
||||
|
|
274
sql/mysqld.cc
274
sql/mysqld.cc
|
@ -24,24 +24,16 @@
|
|||
#include "stacktrace.h"
|
||||
#include "mysqld_suffix.h"
|
||||
#include "mysys_err.h"
|
||||
#ifdef HAVE_BERKELEY_DB
|
||||
#include "ha_berkeley.h"
|
||||
#endif
|
||||
#ifdef HAVE_INNOBASE_DB
|
||||
#include "ha_innodb.h"
|
||||
#endif
|
||||
#include "ha_myisam.h"
|
||||
#ifdef HAVE_NDBCLUSTER_DB
|
||||
#include "ha_ndbcluster.h"
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_INNOBASE_DB
|
||||
#include "ha_myisam.h"
|
||||
|
||||
#ifdef WITH_INNOBASE_STORAGE_ENGINE
|
||||
#define OPT_INNODB_DEFAULT 1
|
||||
#else
|
||||
#define OPT_INNODB_DEFAULT 0
|
||||
#endif
|
||||
#define OPT_BDB_DEFAULT 0
|
||||
#ifdef HAVE_NDBCLUSTER_DB
|
||||
#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
|
||||
#define OPT_NDBCLUSTER_DEFAULT 0
|
||||
#if defined(NOT_ENOUGH_TESTED) \
|
||||
&& defined(NDB_SHM_TRANSPORTER) && MYSQL_VERSION_ID >= 50000
|
||||
|
@ -330,7 +322,7 @@ static I_List<THD> thread_cache;
|
|||
|
||||
static pthread_cond_t COND_thread_cache, COND_flush_thread_cache;
|
||||
|
||||
#ifdef HAVE_BERKELEY_DB
|
||||
#ifdef WITH_BERKELEY_STORAGE_ENGINE
|
||||
static my_bool opt_sync_bdb_logs;
|
||||
#endif
|
||||
|
||||
|
@ -355,7 +347,59 @@ my_bool opt_safe_user_create = 0, opt_no_mix_types = 0;
|
|||
my_bool opt_show_slave_auth_info, opt_sql_bin_update = 0;
|
||||
my_bool opt_log_slave_updates= 0;
|
||||
my_bool opt_innodb;
|
||||
#ifdef HAVE_NDBCLUSTER_DB
|
||||
#ifdef WITH_INNOBASE_STORAGE_ENGINE
|
||||
extern struct show_var_st innodb_status_variables[];
|
||||
extern uint innobase_init_flags, innobase_lock_type;
|
||||
extern uint innobase_flush_log_at_trx_commit;
|
||||
extern ulong innobase_cache_size, innobase_fast_shutdown;
|
||||
extern ulong innobase_large_page_size;
|
||||
extern char *innobase_home, *innobase_tmpdir, *innobase_logdir;
|
||||
extern long innobase_lock_scan_time;
|
||||
extern long innobase_mirrored_log_groups, innobase_log_files_in_group;
|
||||
extern long innobase_log_file_size, innobase_log_buffer_size;
|
||||
extern long innobase_buffer_pool_size, innobase_additional_mem_pool_size;
|
||||
extern long innobase_buffer_pool_awe_mem_mb;
|
||||
extern long innobase_file_io_threads, innobase_lock_wait_timeout;
|
||||
extern long innobase_force_recovery;
|
||||
extern long innobase_open_files;
|
||||
extern char *innobase_data_home_dir, *innobase_data_file_path;
|
||||
extern char *innobase_log_group_home_dir, *innobase_log_arch_dir;
|
||||
extern char *innobase_unix_file_flush_method;
|
||||
/* The following variables have to be my_bool for SHOW VARIABLES to work */
|
||||
extern my_bool innobase_log_archive,
|
||||
innobase_use_doublewrite,
|
||||
innobase_use_checksums,
|
||||
innobase_use_large_pages,
|
||||
innobase_use_native_aio,
|
||||
innobase_file_per_table, innobase_locks_unsafe_for_binlog,
|
||||
innobase_create_status_file;
|
||||
extern my_bool innobase_very_fast_shutdown; /* set this to 1 just before
|
||||
calling innobase_end() if you want
|
||||
InnoDB to shut down without
|
||||
flushing the buffer pool: this
|
||||
is equivalent to a 'crash' */
|
||||
extern "C" {
|
||||
extern ulong srv_max_buf_pool_modified_pct;
|
||||
extern ulong srv_max_purge_lag;
|
||||
extern ulong srv_auto_extend_increment;
|
||||
extern ulong srv_n_spin_wait_rounds;
|
||||
extern ulong srv_n_free_tickets_to_enter;
|
||||
extern ulong srv_thread_sleep_delay;
|
||||
extern ulong srv_thread_concurrency;
|
||||
extern ulong srv_commit_concurrency;
|
||||
}
|
||||
#endif
|
||||
#ifdef WITH_BERKELEY_STORAGE_ENGINE
|
||||
extern const u_int32_t bdb_DB_TXN_NOSYNC, bdb_DB_RECOVER, bdb_DB_PRIVATE;
|
||||
extern bool berkeley_shared_data;
|
||||
extern u_int32_t berkeley_init_flags,berkeley_env_flags, berkeley_lock_type,
|
||||
berkeley_lock_types[];
|
||||
extern ulong berkeley_cache_size, berkeley_max_lock, berkeley_log_buffer_size;
|
||||
extern char *berkeley_home, *berkeley_tmpdir, *berkeley_logdir;
|
||||
extern long berkeley_lock_scan_time;
|
||||
extern TYPELIB berkeley_lock_typelib;
|
||||
#endif
|
||||
#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
|
||||
const char *opt_ndbcluster_connectstring= 0;
|
||||
const char *opt_ndb_connectstring= 0;
|
||||
char opt_ndb_constrbuf[1024];
|
||||
|
@ -365,11 +409,11 @@ ulong opt_ndb_cache_check_time;
|
|||
const char *opt_ndb_mgmd;
|
||||
ulong opt_ndb_nodeid;
|
||||
|
||||
const char *ndb_distribution_names[]= {"KEYHASH", "LINHASH", NullS};
|
||||
TYPELIB ndb_distribution_typelib= { array_elements(ndb_distribution_names)-1,
|
||||
"", ndb_distribution_names, NULL };
|
||||
const char *opt_ndb_distribution= ndb_distribution_names[ND_KEYHASH];
|
||||
enum ndb_distribution opt_ndb_distribution_id= ND_KEYHASH;
|
||||
extern struct show_var_st ndb_status_variables[];
|
||||
extern const char *ndb_distribution_names[];
|
||||
extern TYPELIB ndb_distribution_typelib;
|
||||
extern const char *opt_ndb_distribution;
|
||||
extern enum ndb_distribution opt_ndb_distribution_id;
|
||||
#endif
|
||||
my_bool opt_readonly, use_temp_pool, relay_log_purge;
|
||||
my_bool opt_sync_frm, opt_allow_suspicious_udfs;
|
||||
|
@ -474,14 +518,9 @@ MY_BITMAP temp_pool;
|
|||
CHARSET_INFO *system_charset_info, *files_charset_info ;
|
||||
CHARSET_INFO *national_charset_info, *table_alias_charset;
|
||||
|
||||
SHOW_COMP_OPTION have_berkeley_db, have_innodb, have_isam, have_ndbcluster,
|
||||
have_example_db, have_archive_db, have_csv_db;
|
||||
SHOW_COMP_OPTION have_federated_db;
|
||||
SHOW_COMP_OPTION have_partition_db;
|
||||
SHOW_COMP_OPTION have_raid, have_openssl, have_symlink, have_query_cache;
|
||||
SHOW_COMP_OPTION have_geometry, have_rtree_keys;
|
||||
SHOW_COMP_OPTION have_crypt, have_compress;
|
||||
SHOW_COMP_OPTION have_blackhole_db;
|
||||
|
||||
/* Thread specific variables */
|
||||
|
||||
|
@ -2465,7 +2504,7 @@ pthread_handler_t handle_shutdown(void *arg)
|
|||
|
||||
|
||||
static const char *load_default_groups[]= {
|
||||
#ifdef HAVE_NDBCLUSTER_DB
|
||||
#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
|
||||
"mysql_cluster",
|
||||
#endif
|
||||
"mysqld","server", MYSQL_BASE_VERSION, 0, 0};
|
||||
|
@ -2585,7 +2624,7 @@ static int init_common_variables(const char *conf_file_name, int argc,
|
|||
{
|
||||
my_use_large_pages= 1;
|
||||
my_large_page_size= opt_large_page_size;
|
||||
#ifdef HAVE_INNOBASE_DB
|
||||
#ifdef WITH_INNOBASE_STORAGE_ENGINE
|
||||
innobase_use_large_pages= 1;
|
||||
innobase_large_page_size= opt_large_page_size;
|
||||
#endif
|
||||
|
@ -3130,7 +3169,7 @@ server.");
|
|||
static void create_maintenance_thread()
|
||||
{
|
||||
if (
|
||||
#ifdef HAVE_BERKELEY_DB
|
||||
#ifdef WITH_BERKELEY_STORAGE_ENGINE
|
||||
(have_berkeley_db == SHOW_OPTION_YES) ||
|
||||
#endif
|
||||
(flush_time && flush_time != ~(ulong) 0L))
|
||||
|
@ -4629,7 +4668,7 @@ struct my_option my_long_options[] =
|
|||
Disable with --skip-bdb (will save memory).",
|
||||
(gptr*) &opt_bdb, (gptr*) &opt_bdb, 0, GET_BOOL, NO_ARG, OPT_BDB_DEFAULT, 0, 0,
|
||||
0, 0, 0},
|
||||
#ifdef HAVE_BERKELEY_DB
|
||||
#ifdef WITH_BERKELEY_STORAGE_ENGINE
|
||||
{"bdb-home", OPT_BDB_HOME, "Berkeley home directory.", (gptr*) &berkeley_home,
|
||||
(gptr*) &berkeley_home, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
|
||||
{"bdb-lock-detect", OPT_BDB_LOCK,
|
||||
|
@ -4650,7 +4689,7 @@ Disable with --skip-bdb (will save memory).",
|
|||
{"bdb-tmpdir", OPT_BDB_TMP, "Berkeley DB tempfile name.",
|
||||
(gptr*) &berkeley_tmpdir, (gptr*) &berkeley_tmpdir, 0, GET_STR,
|
||||
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
|
||||
#endif /* HAVE_BERKELEY_DB */
|
||||
#endif /* WITH_BERKELEY_STORAGE_ENGINE */
|
||||
{"big-tables", OPT_BIG_TABLES,
|
||||
"Allow big result sets by saving all temporary sets on file (Solves most 'table full' errors).",
|
||||
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
|
||||
|
@ -4786,7 +4825,7 @@ Disable with --skip-large-pages.",
|
|||
Disable with --skip-innodb (will save memory).",
|
||||
(gptr*) &opt_innodb, (gptr*) &opt_innodb, 0, GET_BOOL, NO_ARG, OPT_INNODB_DEFAULT, 0, 0,
|
||||
0, 0, 0},
|
||||
#ifdef HAVE_INNOBASE_DB
|
||||
#ifdef WITH_INNOBASE_STORAGE_ENGINE
|
||||
{"innodb_checksums", OPT_INNODB_CHECKSUMS, "Enable InnoDB checksums validation (enabled by default). \
|
||||
Disable with --skip-innodb-checksums.", (gptr*) &innobase_use_checksums,
|
||||
(gptr*) &innobase_use_checksums, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0},
|
||||
|
@ -4794,7 +4833,7 @@ Disable with --skip-innodb-checksums.", (gptr*) &innobase_use_checksums,
|
|||
{"innodb_data_file_path", OPT_INNODB_DATA_FILE_PATH,
|
||||
"Path to individual files and their sizes.",
|
||||
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
|
||||
#ifdef HAVE_INNOBASE_DB
|
||||
#ifdef WITH_INNOBASE_STORAGE_ENGINE
|
||||
{"innodb_data_home_dir", OPT_INNODB_DATA_HOME_DIR,
|
||||
"The common part for InnoDB table spaces.", (gptr*) &innobase_data_home_dir,
|
||||
(gptr*) &innobase_data_home_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0,
|
||||
|
@ -4865,7 +4904,7 @@ Disable with --skip-innodb-doublewrite.", (gptr*) &innobase_use_doublewrite,
|
|||
(gptr*) &global_system_variables.innodb_support_xa,
|
||||
(gptr*) &global_system_variables.innodb_support_xa,
|
||||
0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0},
|
||||
#endif /* End HAVE_INNOBASE_DB */
|
||||
#endif /* End WITH_INNOBASE_STORAGE_ENGINE */
|
||||
{"isam", OPT_ISAM, "Obsolete. ISAM storage engine is no longer supported.",
|
||||
(gptr*) &opt_isam, (gptr*) &opt_isam, 0, GET_BOOL, NO_ARG, 0, 0, 0,
|
||||
0, 0, 0},
|
||||
|
@ -5025,7 +5064,7 @@ master-ssl",
|
|||
Disable with --skip-ndbcluster (will save memory).",
|
||||
(gptr*) &opt_ndbcluster, (gptr*) &opt_ndbcluster, 0, GET_BOOL, NO_ARG,
|
||||
OPT_NDBCLUSTER_DEFAULT, 0, 0, 0, 0, 0},
|
||||
#ifdef HAVE_NDBCLUSTER_DB
|
||||
#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
|
||||
{"ndb-connectstring", OPT_NDB_CONNECTSTRING,
|
||||
"Connect string for ndbcluster.",
|
||||
(gptr*) &opt_ndb_connectstring,
|
||||
|
@ -5356,7 +5395,7 @@ log and this option does nothing anymore.",
|
|||
"The number of outstanding connection requests MySQL can have. This comes into play when the main MySQL thread gets very many connection requests in a very short time.",
|
||||
(gptr*) &back_log, (gptr*) &back_log, 0, GET_ULONG,
|
||||
REQUIRED_ARG, 50, 1, 65535, 0, 1, 0 },
|
||||
#ifdef HAVE_BERKELEY_DB
|
||||
#ifdef WITH_BERKELEY_STORAGE_ENGINE
|
||||
{ "bdb_cache_size", OPT_BDB_CACHE_SIZE,
|
||||
"The buffer that is allocated to cache index and rows for BDB tables.",
|
||||
(gptr*) &berkeley_cache_size, (gptr*) &berkeley_cache_size, 0, GET_ULONG,
|
||||
|
@ -5373,7 +5412,7 @@ log and this option does nothing anymore.",
|
|||
"The maximum number of locks you can have active on a BDB table.",
|
||||
(gptr*) &berkeley_max_lock, (gptr*) &berkeley_max_lock, 0, GET_ULONG,
|
||||
REQUIRED_ARG, 10000, 0, (long) ~0, 0, 1, 0},
|
||||
#endif /* HAVE_BERKELEY_DB */
|
||||
#endif /* WITH_BERKELEY_STORAGE_ENGINE */
|
||||
{"binlog_cache_size", OPT_BINLOG_CACHE_SIZE,
|
||||
"The size of the cache to hold the SQL statements for the binary log during a transaction. If you often use big, multi-statement transactions you can increase this to get more performance.",
|
||||
(gptr*) &binlog_cache_size, (gptr*) &binlog_cache_size, 0, GET_ULONG,
|
||||
|
@ -5449,7 +5488,7 @@ log and this option does nothing anymore.",
|
|||
(gptr*) &global_system_variables.group_concat_max_len,
|
||||
(gptr*) &max_system_variables.group_concat_max_len, 0, GET_ULONG,
|
||||
REQUIRED_ARG, 1024, 4, (long) ~0, 0, 1, 0},
|
||||
#ifdef HAVE_INNOBASE_DB
|
||||
#ifdef WITH_INNOBASE_STORAGE_ENGINE
|
||||
{"innodb_additional_mem_pool_size", OPT_INNODB_ADDITIONAL_MEM_POOL_SIZE,
|
||||
"Size of a memory pool InnoDB uses to store data dictionary information and other internal data structures.",
|
||||
(gptr*) &innobase_additional_mem_pool_size,
|
||||
|
@ -5526,7 +5565,7 @@ log and this option does nothing anymore.",
|
|||
(gptr*) &srv_thread_sleep_delay,
|
||||
(gptr*) &srv_thread_sleep_delay,
|
||||
0, GET_LONG, REQUIRED_ARG, 10000L, 0L, ~0L, 0, 1L, 0},
|
||||
#endif /* HAVE_INNOBASE_DB */
|
||||
#endif /* WITH_INNOBASE_STORAGE_ENGINE */
|
||||
{"interactive_timeout", OPT_INTERACTIVE_TIMEOUT,
|
||||
"The number of seconds the server waits for activity on an interactive connection before closing it.",
|
||||
(gptr*) &global_system_variables.net_interactive_timeout,
|
||||
|
@ -5846,12 +5885,12 @@ The minimum value for this variable is 4096.",
|
|||
(gptr*) &max_system_variables.sortbuff_size, 0, GET_ULONG, REQUIRED_ARG,
|
||||
MAX_SORT_MEMORY, MIN_SORT_MEMORY+MALLOC_OVERHEAD*2, ~0L, MALLOC_OVERHEAD,
|
||||
1, 0},
|
||||
#ifdef HAVE_BERKELEY_DB
|
||||
#ifdef WITH_BERKELEY_STORAGE_ENGINE
|
||||
{"sync-bdb-logs", OPT_BDB_SYNC,
|
||||
"Synchronously flush Berkeley DB logs. Enabled by default",
|
||||
(gptr*) &opt_sync_bdb_logs, (gptr*) &opt_sync_bdb_logs, 0, GET_BOOL,
|
||||
NO_ARG, 1, 0, 0, 0, 0, 0},
|
||||
#endif /* HAVE_BERKELEY_DB */
|
||||
#endif /* WITH_BERKELEY_STORAGE_ENGINE */
|
||||
{"sync-binlog", OPT_SYNC_BINLOG,
|
||||
"Synchronously flush binary log to disk after every #th event. "
|
||||
"Use 0 (default) to disable synchronous flushing.",
|
||||
|
@ -6003,14 +6042,14 @@ struct show_var_st status_vars[]= {
|
|||
{"Com_show_create_db", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_CREATE_DB]), SHOW_LONG_STATUS},
|
||||
{"Com_show_create_table", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_CREATE]), SHOW_LONG_STATUS},
|
||||
{"Com_show_databases", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_DATABASES]), SHOW_LONG_STATUS},
|
||||
{"Com_show_engine_logs", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_ENGINE_LOGS]), SHOW_LONG_STATUS},
|
||||
{"Com_show_engine_mutex", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_ENGINE_MUTEX]), SHOW_LONG_STATUS},
|
||||
{"Com_show_engine_status", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_ENGINE_STATUS]), SHOW_LONG_STATUS},
|
||||
{"Com_show_errors", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_ERRORS]), SHOW_LONG_STATUS},
|
||||
{"Com_show_fields", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_FIELDS]), SHOW_LONG_STATUS},
|
||||
{"Com_show_grants", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_GRANTS]), SHOW_LONG_STATUS},
|
||||
{"Com_show_innodb_status", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_INNODB_STATUS]), SHOW_LONG_STATUS},
|
||||
{"Com_show_keys", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_KEYS]), SHOW_LONG_STATUS},
|
||||
{"Com_show_logs", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_LOGS]), SHOW_LONG_STATUS},
|
||||
{"Com_show_master_status", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_MASTER_STAT]), SHOW_LONG_STATUS},
|
||||
{"Com_show_ndb_status", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_NDBCLUSTER_STATUS]), SHOW_LONG_STATUS},
|
||||
{"Com_show_new_master", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_NEW_MASTER]), SHOW_LONG_STATUS},
|
||||
{"Com_show_open_tables", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_OPEN_TABLES]), SHOW_LONG_STATUS},
|
||||
{"Com_show_privileges", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_PRIVILEGES]), SHOW_LONG_STATUS},
|
||||
|
@ -6065,9 +6104,9 @@ struct show_var_st status_vars[]= {
|
|||
{"Handler_savepoint_rollback",(char*) offsetof(STATUS_VAR, ha_savepoint_rollback_count), SHOW_LONG_STATUS},
|
||||
{"Handler_update", (char*) offsetof(STATUS_VAR, ha_update_count), SHOW_LONG_STATUS},
|
||||
{"Handler_write", (char*) offsetof(STATUS_VAR, ha_write_count), SHOW_LONG_STATUS},
|
||||
#ifdef HAVE_INNOBASE_DB
|
||||
#ifdef WITH_INNOBASE_STORAGE_ENGINE
|
||||
{"Innodb_", (char*) &innodb_status_variables, SHOW_VARS},
|
||||
#endif /*HAVE_INNOBASE_DB*/
|
||||
#endif /* WITH_INNOBASE_STORAGE_ENGINE */
|
||||
{"Key_blocks_not_flushed", (char*) &dflt_key_cache_var.global_blocks_changed, SHOW_KEY_CACHE_LONG},
|
||||
{"Key_blocks_unused", (char*) &dflt_key_cache_var.blocks_unused, SHOW_KEY_CACHE_CONST_LONG},
|
||||
{"Key_blocks_used", (char*) &dflt_key_cache_var.blocks_used, SHOW_KEY_CACHE_CONST_LONG},
|
||||
|
@ -6077,9 +6116,9 @@ struct show_var_st status_vars[]= {
|
|||
{"Key_writes", (char*) &dflt_key_cache_var.global_cache_write, SHOW_KEY_CACHE_LONGLONG},
|
||||
{"Last_query_cost", (char*) offsetof(STATUS_VAR, last_query_cost), SHOW_DOUBLE_STATUS},
|
||||
{"Max_used_connections", (char*) &max_used_connections, SHOW_LONG},
|
||||
#ifdef HAVE_NDBCLUSTER_DB
|
||||
#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
|
||||
{"Ndb_", (char*) &ndb_status_variables, SHOW_VARS},
|
||||
#endif /*HAVE_NDBCLUSTER_DB*/
|
||||
#endif /* WITH_NDBCLUSTER_STORAGE_ENGINE */
|
||||
{"Not_flushed_delayed_rows", (char*) &delayed_rows_in_use, SHOW_LONG_CONST},
|
||||
{"Open_files", (char*) &my_file_opened, SHOW_LONG_CONST},
|
||||
{"Open_streams", (char*) &my_stream_opened, SHOW_LONG_CONST},
|
||||
|
@ -6343,48 +6382,7 @@ static void mysql_init_variables(void)
|
|||
"d:t:i:o,/tmp/mysqld.trace");
|
||||
#endif
|
||||
opt_error_log= IF_WIN(1,0);
|
||||
#ifdef HAVE_BERKELEY_DB
|
||||
have_berkeley_db= SHOW_OPTION_YES;
|
||||
#else
|
||||
have_berkeley_db= SHOW_OPTION_NO;
|
||||
#endif
|
||||
#ifdef HAVE_INNOBASE_DB
|
||||
have_innodb=SHOW_OPTION_YES;
|
||||
#else
|
||||
have_innodb=SHOW_OPTION_NO;
|
||||
#endif
|
||||
have_isam=SHOW_OPTION_NO;
|
||||
#ifdef HAVE_EXAMPLE_DB
|
||||
have_example_db= SHOW_OPTION_YES;
|
||||
#else
|
||||
have_example_db= SHOW_OPTION_NO;
|
||||
#endif
|
||||
#ifdef HAVE_PARTITION_DB
|
||||
have_partition_db= SHOW_OPTION_YES;
|
||||
#else
|
||||
have_partition_db= SHOW_OPTION_NO;
|
||||
#endif
|
||||
#ifdef HAVE_ARCHIVE_DB
|
||||
have_archive_db= SHOW_OPTION_YES;
|
||||
#else
|
||||
have_archive_db= SHOW_OPTION_NO;
|
||||
#endif
|
||||
#ifdef HAVE_BLACKHOLE_DB
|
||||
have_blackhole_db= SHOW_OPTION_YES;
|
||||
#else
|
||||
have_blackhole_db= SHOW_OPTION_NO;
|
||||
#endif
|
||||
#ifdef HAVE_FEDERATED_DB
|
||||
have_federated_db= SHOW_OPTION_YES;
|
||||
#else
|
||||
have_federated_db= SHOW_OPTION_NO;
|
||||
#endif
|
||||
#ifdef HAVE_CSV_DB
|
||||
have_csv_db= SHOW_OPTION_YES;
|
||||
#else
|
||||
have_csv_db= SHOW_OPTION_NO;
|
||||
#endif
|
||||
#ifdef HAVE_NDBCLUSTER_DB
|
||||
#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
|
||||
have_ndbcluster=SHOW_OPTION_DISABLED;
|
||||
global_system_variables.ndb_index_stat_enable=TRUE;
|
||||
max_system_variables.ndb_index_stat_enable=TRUE;
|
||||
|
@ -6803,19 +6801,19 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
|
|||
global_system_variables.tx_isolation= (type-1);
|
||||
break;
|
||||
}
|
||||
#ifdef HAVE_BERKELEY_DB
|
||||
#ifdef WITH_BERKELEY_STORAGE_ENGINE
|
||||
case OPT_BDB_NOSYNC:
|
||||
/* Deprecated option */
|
||||
opt_sync_bdb_logs= 0;
|
||||
/* Fall through */
|
||||
case OPT_BDB_SYNC:
|
||||
if (!opt_sync_bdb_logs)
|
||||
berkeley_env_flags|= DB_TXN_NOSYNC;
|
||||
berkeley_env_flags|= bdb_DB_TXN_NOSYNC;
|
||||
else
|
||||
berkeley_env_flags&= ~DB_TXN_NOSYNC;
|
||||
berkeley_env_flags&= ~bdb_DB_TXN_NOSYNC;
|
||||
break;
|
||||
case OPT_BDB_NO_RECOVER:
|
||||
berkeley_init_flags&= ~(DB_RECOVER);
|
||||
berkeley_init_flags&= ~(bdb_DB_RECOVER);
|
||||
break;
|
||||
case OPT_BDB_LOCK:
|
||||
{
|
||||
|
@ -6839,12 +6837,12 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
|
|||
break;
|
||||
}
|
||||
case OPT_BDB_SHARED:
|
||||
berkeley_init_flags&= ~(DB_PRIVATE);
|
||||
berkeley_init_flags&= ~(bdb_DB_PRIVATE);
|
||||
berkeley_shared_data= 1;
|
||||
break;
|
||||
#endif /* HAVE_BERKELEY_DB */
|
||||
#endif /* WITH_BERKELEY_STORAGE_ENGINE */
|
||||
case OPT_BDB:
|
||||
#ifdef HAVE_BERKELEY_DB
|
||||
#ifdef WITH_BERKELEY_STORAGE_ENGINE
|
||||
if (opt_bdb)
|
||||
have_berkeley_db= SHOW_OPTION_YES;
|
||||
else
|
||||
|
@ -6852,14 +6850,14 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
|
|||
#endif
|
||||
break;
|
||||
case OPT_NDBCLUSTER:
|
||||
#ifdef HAVE_NDBCLUSTER_DB
|
||||
#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
|
||||
if (opt_ndbcluster)
|
||||
have_ndbcluster= SHOW_OPTION_YES;
|
||||
else
|
||||
have_ndbcluster= SHOW_OPTION_DISABLED;
|
||||
#endif
|
||||
break;
|
||||
#ifdef HAVE_NDBCLUSTER_DB
|
||||
#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
|
||||
case OPT_NDB_MGMD:
|
||||
case OPT_NDB_NODEID:
|
||||
{
|
||||
|
@ -6899,7 +6897,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
|
|||
break;
|
||||
#endif
|
||||
case OPT_INNODB:
|
||||
#ifdef HAVE_INNOBASE_DB
|
||||
#ifdef WITH_INNOBASE_STORAGE_ENGINE
|
||||
if (opt_innodb)
|
||||
have_innodb= SHOW_OPTION_YES;
|
||||
else
|
||||
|
@ -6907,15 +6905,15 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
|
|||
#endif
|
||||
break;
|
||||
case OPT_INNODB_DATA_FILE_PATH:
|
||||
#ifdef HAVE_INNOBASE_DB
|
||||
#ifdef WITH_INNOBASE_STORAGE_ENGINE
|
||||
innobase_data_file_path= argument;
|
||||
#endif
|
||||
break;
|
||||
#ifdef HAVE_INNOBASE_DB
|
||||
#ifdef WITH_INNOBASE_STORAGE_ENGINE
|
||||
case OPT_INNODB_LOG_ARCHIVE:
|
||||
innobase_log_archive= argument ? test(atoi(argument)) : 1;
|
||||
break;
|
||||
#endif /* HAVE_INNOBASE_DB */
|
||||
#endif /* WITH_INNOBASE_STORAGE_ENGINE */
|
||||
case OPT_MYISAM_RECOVER:
|
||||
{
|
||||
if (!argument || !argument[0])
|
||||
|
@ -7061,19 +7059,19 @@ static void get_options(int argc,char **argv)
|
|||
get_one_option)))
|
||||
exit(ho_error);
|
||||
|
||||
#ifndef HAVE_NDBCLUSTER_DB
|
||||
#ifndef WITH_NDBCLUSTER_STORAGE_ENGINE
|
||||
if (opt_ndbcluster)
|
||||
sql_print_warning("this binary does not contain NDBCLUSTER storage engine");
|
||||
#endif
|
||||
#ifndef HAVE_INNOBASE_DB
|
||||
#ifndef WITH_INNOBASE_STORAGE_ENGINE
|
||||
if (opt_innodb)
|
||||
sql_print_warning("this binary does not contain INNODB storage engine");
|
||||
#endif
|
||||
#ifndef HAVE_ISAM
|
||||
#ifndef WITH_ISAM_STORAGE_ENGINE
|
||||
if (opt_isam)
|
||||
sql_print_warning("this binary does not contain ISAM storage engine");
|
||||
#endif
|
||||
#ifndef HAVE_BERKELEY_DB
|
||||
#ifndef WITH_BERKELEY_STORAGE_ENGINE
|
||||
if (opt_bdb)
|
||||
sql_print_warning("this binary does not contain BDB storage engine");
|
||||
#endif
|
||||
|
@ -7385,6 +7383,70 @@ static void create_pid_file()
|
|||
}
|
||||
|
||||
|
||||
/*****************************************************************************
|
||||
Instantiate have_xyx for missing storage engines
|
||||
*****************************************************************************/
|
||||
#undef have_isam
|
||||
#undef have_berkeley_db
|
||||
#undef have_innodb
|
||||
#undef have_ndbcluster
|
||||
#undef have_example_db
|
||||
#undef have_archive_db
|
||||
#undef have_csv_db
|
||||
#undef have_federated_db
|
||||
#undef have_partition_db
|
||||
#undef have_blackhole_db
|
||||
|
||||
SHOW_COMP_OPTION have_berkeley_db= SHOW_OPTION_NO;
|
||||
SHOW_COMP_OPTION have_innodb= SHOW_OPTION_NO;
|
||||
SHOW_COMP_OPTION have_isam= SHOW_OPTION_NO;
|
||||
SHOW_COMP_OPTION have_ndbcluster= SHOW_OPTION_NO;
|
||||
SHOW_COMP_OPTION have_example_db= SHOW_OPTION_NO;
|
||||
SHOW_COMP_OPTION have_archive_db= SHOW_OPTION_NO;
|
||||
SHOW_COMP_OPTION have_csv_db= SHOW_OPTION_NO;
|
||||
SHOW_COMP_OPTION have_federated_db= SHOW_OPTION_NO;
|
||||
SHOW_COMP_OPTION have_partition_db= SHOW_OPTION_NO;
|
||||
SHOW_COMP_OPTION have_blackhole_db= SHOW_OPTION_NO;
|
||||
|
||||
#ifndef WITH_BERKELEY_STORAGE_ENGINE
|
||||
bool berkeley_shared_data;
|
||||
ulong berkeley_cache_size, berkeley_max_lock, berkeley_log_buffer_size;
|
||||
char *berkeley_home, *berkeley_tmpdir, *berkeley_logdir;
|
||||
#endif
|
||||
|
||||
#ifndef WITH_INNOBASE_STORAGE_ENGINE
|
||||
uint innobase_flush_log_at_trx_commit;
|
||||
ulong innobase_fast_shutdown;
|
||||
long innobase_mirrored_log_groups, innobase_log_files_in_group;
|
||||
long innobase_log_file_size, innobase_log_buffer_size;
|
||||
long innobase_buffer_pool_size, innobase_additional_mem_pool_size;
|
||||
long innobase_buffer_pool_awe_mem_mb;
|
||||
long innobase_file_io_threads, innobase_lock_wait_timeout;
|
||||
long innobase_force_recovery;
|
||||
long innobase_open_files;
|
||||
char *innobase_data_home_dir, *innobase_data_file_path;
|
||||
char *innobase_log_group_home_dir, *innobase_log_arch_dir;
|
||||
char *innobase_unix_file_flush_method;
|
||||
my_bool innobase_log_archive,
|
||||
innobase_use_doublewrite,
|
||||
innobase_use_checksums,
|
||||
innobase_file_per_table,
|
||||
innobase_locks_unsafe_for_binlog;
|
||||
|
||||
ulong srv_max_buf_pool_modified_pct;
|
||||
ulong srv_max_purge_lag;
|
||||
ulong srv_auto_extend_increment;
|
||||
ulong srv_n_spin_wait_rounds;
|
||||
ulong srv_n_free_tickets_to_enter;
|
||||
ulong srv_thread_sleep_delay;
|
||||
ulong srv_thread_concurrency;
|
||||
ulong srv_commit_concurrency;
|
||||
#endif
|
||||
|
||||
#ifndef WITH_NDBCLUSTER_STORAGE_ENGINE
|
||||
ulong ndb_cache_check_time;
|
||||
#endif
|
||||
|
||||
/*****************************************************************************
|
||||
Instantiate templates
|
||||
*****************************************************************************/
|
||||
|
@ -7399,3 +7461,5 @@ template class I_List<NAMED_LIST>;
|
|||
template class I_List<Statement>;
|
||||
template class I_List_iterator<Statement>;
|
||||
#endif
|
||||
|
||||
|
||||
|
|
|
@ -58,15 +58,46 @@
|
|||
#include <my_getopt.h>
|
||||
#include <thr_alarm.h>
|
||||
#include <myisam.h>
|
||||
#ifdef HAVE_BERKELEY_DB
|
||||
#include "ha_berkeley.h"
|
||||
#endif
|
||||
#ifdef HAVE_INNOBASE_DB
|
||||
#include "ha_innodb.h"
|
||||
#endif
|
||||
#ifdef HAVE_NDBCLUSTER_DB
|
||||
#include "ha_ndbcluster.h"
|
||||
#endif
|
||||
|
||||
/* WITH_BERKELEY_STORAGE_ENGINE */
|
||||
extern bool berkeley_shared_data;
|
||||
extern ulong berkeley_cache_size, berkeley_max_lock, berkeley_log_buffer_size;
|
||||
extern char *berkeley_home, *berkeley_tmpdir, *berkeley_logdir;
|
||||
|
||||
/* WITH_INNOBASE_STORAGE_ENGINE */
|
||||
extern uint innobase_flush_log_at_trx_commit;
|
||||
extern ulong innobase_fast_shutdown;
|
||||
extern long innobase_mirrored_log_groups, innobase_log_files_in_group;
|
||||
extern long innobase_log_file_size, innobase_log_buffer_size;
|
||||
extern long innobase_buffer_pool_size, innobase_additional_mem_pool_size;
|
||||
extern long innobase_buffer_pool_awe_mem_mb;
|
||||
extern long innobase_file_io_threads, innobase_lock_wait_timeout;
|
||||
extern long innobase_force_recovery;
|
||||
extern long innobase_open_files;
|
||||
extern char *innobase_data_home_dir, *innobase_data_file_path;
|
||||
extern char *innobase_log_group_home_dir, *innobase_log_arch_dir;
|
||||
extern char *innobase_unix_file_flush_method;
|
||||
/* The following variables have to be my_bool for SHOW VARIABLES to work */
|
||||
extern my_bool innobase_log_archive,
|
||||
innobase_use_doublewrite,
|
||||
innobase_use_checksums,
|
||||
innobase_file_per_table,
|
||||
innobase_locks_unsafe_for_binlog;
|
||||
|
||||
extern ulong srv_max_buf_pool_modified_pct;
|
||||
extern ulong srv_max_purge_lag;
|
||||
extern ulong srv_auto_extend_increment;
|
||||
extern ulong srv_n_spin_wait_rounds;
|
||||
extern ulong srv_n_free_tickets_to_enter;
|
||||
extern ulong srv_thread_sleep_delay;
|
||||
extern ulong srv_thread_concurrency;
|
||||
extern ulong srv_commit_concurrency;
|
||||
|
||||
/* WITH_NDBCLUSTER_STORAGE_ENGINE */
|
||||
extern ulong ndb_cache_check_time;
|
||||
|
||||
|
||||
|
||||
|
||||
static HASH system_variable_hash;
|
||||
const char *bool_type_names[]= { "OFF", "ON", NullS };
|
||||
|
@ -398,7 +429,6 @@ sys_var_bool_ptr sys_timed_mutexes("timed_mutexes",
|
|||
sys_var_thd_ulong sys_net_wait_timeout("wait_timeout",
|
||||
&SV::net_wait_timeout);
|
||||
|
||||
#ifdef HAVE_INNOBASE_DB
|
||||
sys_var_long_ptr sys_innodb_fast_shutdown("innodb_fast_shutdown",
|
||||
&innobase_fast_shutdown);
|
||||
sys_var_long_ptr sys_innodb_max_dirty_pages_pct("innodb_max_dirty_pages_pct",
|
||||
|
@ -421,14 +451,12 @@ sys_var_long_ptr sys_innodb_thread_concurrency("innodb_thread_concurrency",
|
|||
&srv_thread_concurrency);
|
||||
sys_var_long_ptr sys_innodb_commit_concurrency("innodb_commit_concurrency",
|
||||
&srv_commit_concurrency);
|
||||
#endif
|
||||
|
||||
/* Condition pushdown to storage engine */
|
||||
sys_var_thd_bool
|
||||
sys_engine_condition_pushdown("engine_condition_pushdown",
|
||||
&SV::engine_condition_pushdown);
|
||||
|
||||
#ifdef HAVE_NDBCLUSTER_DB
|
||||
/* ndb thread specific variable settings */
|
||||
sys_var_thd_ulong
|
||||
sys_ndb_autoincrement_prefetch_sz("ndb_autoincrement_prefetch_sz",
|
||||
|
@ -450,7 +478,6 @@ sys_ndb_index_stat_cache_entries("ndb_index_stat_cache_entries",
|
|||
sys_var_thd_ulong
|
||||
sys_ndb_index_stat_update_freq("ndb_index_stat_update_freq",
|
||||
&SV::ndb_index_stat_update_freq);
|
||||
#endif
|
||||
|
||||
/* Time/date/datetime formats */
|
||||
|
||||
|
@ -593,7 +620,6 @@ struct show_var_st init_vars[]= {
|
|||
{sys_automatic_sp_privileges.name,(char*) &sys_automatic_sp_privileges, SHOW_SYS},
|
||||
{"back_log", (char*) &back_log, SHOW_LONG},
|
||||
{"basedir", mysql_home, SHOW_CHAR},
|
||||
#ifdef HAVE_BERKELEY_DB
|
||||
{"bdb_cache_size", (char*) &berkeley_cache_size, SHOW_LONG},
|
||||
{"bdb_home", (char*) &berkeley_home, SHOW_CHAR_PTR},
|
||||
{"bdb_log_buffer_size", (char*) &berkeley_log_buffer_size, SHOW_LONG},
|
||||
|
@ -601,7 +627,6 @@ struct show_var_st init_vars[]= {
|
|||
{"bdb_max_lock", (char*) &berkeley_max_lock, SHOW_LONG},
|
||||
{"bdb_shared_data", (char*) &berkeley_shared_data, SHOW_BOOL},
|
||||
{"bdb_tmpdir", (char*) &berkeley_tmpdir, SHOW_CHAR_PTR},
|
||||
#endif
|
||||
{sys_binlog_cache_size.name,(char*) &sys_binlog_cache_size, SHOW_SYS},
|
||||
{sys_bulk_insert_buff_size.name,(char*) &sys_bulk_insert_buff_size,SHOW_SYS},
|
||||
{sys_character_set_client.name,(char*) &sys_character_set_client, SHOW_SYS},
|
||||
|
@ -658,7 +683,6 @@ struct show_var_st init_vars[]= {
|
|||
{"init_connect", (char*) &sys_init_connect, SHOW_SYS},
|
||||
{"init_file", (char*) &opt_init_file, SHOW_CHAR_PTR},
|
||||
{"init_slave", (char*) &sys_init_slave, SHOW_SYS},
|
||||
#ifdef HAVE_INNOBASE_DB
|
||||
{"innodb_additional_mem_pool_size", (char*) &innobase_additional_mem_pool_size, SHOW_LONG },
|
||||
{sys_innodb_autoextend_increment.name, (char*) &sys_innodb_autoextend_increment, SHOW_SYS},
|
||||
{"innodb_buffer_pool_awe_mem_mb", (char*) &innobase_buffer_pool_awe_mem_mb, SHOW_LONG },
|
||||
|
@ -692,7 +716,6 @@ struct show_var_st init_vars[]= {
|
|||
{sys_innodb_table_locks.name, (char*) &sys_innodb_table_locks, SHOW_SYS},
|
||||
{sys_innodb_thread_concurrency.name, (char*) &sys_innodb_thread_concurrency, SHOW_SYS},
|
||||
{sys_innodb_thread_sleep_delay.name, (char*) &sys_innodb_thread_sleep_delay, SHOW_SYS},
|
||||
#endif
|
||||
{sys_interactive_timeout.name,(char*) &sys_interactive_timeout, SHOW_SYS},
|
||||
{sys_join_buffer_size.name, (char*) &sys_join_buffer_size, SHOW_SYS},
|
||||
{sys_key_buffer_size.name, (char*) &sys_key_buffer_size, SHOW_SYS},
|
||||
|
@ -757,7 +780,6 @@ struct show_var_st init_vars[]= {
|
|||
#ifdef __NT__
|
||||
{"named_pipe", (char*) &opt_enable_named_pipe, SHOW_MY_BOOL},
|
||||
#endif
|
||||
#ifdef HAVE_NDBCLUSTER_DB
|
||||
{sys_ndb_autoincrement_prefetch_sz.name,
|
||||
(char*) &sys_ndb_autoincrement_prefetch_sz, SHOW_SYS},
|
||||
{sys_ndb_cache_check_time.name,(char*) &sys_ndb_cache_check_time, SHOW_SYS},
|
||||
|
@ -767,7 +789,6 @@ struct show_var_st init_vars[]= {
|
|||
{sys_ndb_index_stat_update_freq.name, (char*) &sys_ndb_index_stat_update_freq, SHOW_SYS},
|
||||
{sys_ndb_use_exact_count.name,(char*) &sys_ndb_use_exact_count, SHOW_SYS},
|
||||
{sys_ndb_use_transactions.name,(char*) &sys_ndb_use_transactions, SHOW_SYS},
|
||||
#endif
|
||||
{sys_net_buffer_length.name,(char*) &sys_net_buffer_length, SHOW_SYS},
|
||||
{sys_net_read_timeout.name, (char*) &sys_net_read_timeout, SHOW_SYS},
|
||||
{sys_net_retry_count.name, (char*) &sys_net_retry_count, SHOW_SYS},
|
||||
|
@ -865,9 +886,6 @@ struct show_var_st init_vars[]= {
|
|||
{sys_updatable_views_with_limit.name,
|
||||
(char*) &sys_updatable_views_with_limit,SHOW_SYS},
|
||||
{"version", server_version, SHOW_CHAR},
|
||||
#ifdef HAVE_BERKELEY_DB
|
||||
{"version_bdb", (char*) DB_VERSION_STRING, SHOW_CHAR},
|
||||
#endif
|
||||
{"version_comment", (char*) MYSQL_COMPILATION_COMMENT, SHOW_CHAR},
|
||||
{"version_compile_machine", (char*) MACHINE_TYPE, SHOW_CHAR},
|
||||
{sys_os.name, (char*) &sys_os, SHOW_SYS},
|
||||
|
|
|
@ -87,11 +87,11 @@ sp_get_flags_for_command(LEX *lex)
|
|||
case SQLCOM_SHOW_ERRORS:
|
||||
case SQLCOM_SHOW_FIELDS:
|
||||
case SQLCOM_SHOW_GRANTS:
|
||||
case SQLCOM_SHOW_INNODB_STATUS:
|
||||
case SQLCOM_SHOW_ENGINE_STATUS:
|
||||
case SQLCOM_SHOW_ENGINE_LOGS:
|
||||
case SQLCOM_SHOW_ENGINE_MUTEX:
|
||||
case SQLCOM_SHOW_KEYS:
|
||||
case SQLCOM_SHOW_LOGS:
|
||||
case SQLCOM_SHOW_MASTER_STAT:
|
||||
case SQLCOM_SHOW_MUTEX_STATUS:
|
||||
case SQLCOM_SHOW_NEW_MASTER:
|
||||
case SQLCOM_SHOW_OPEN_TABLES:
|
||||
case SQLCOM_SHOW_PRIVILEGES:
|
||||
|
|
|
@ -287,7 +287,7 @@ void THD::init(void)
|
|||
variables.date_format);
|
||||
variables.datetime_format= date_time_format_copy((THD*) 0,
|
||||
variables.datetime_format);
|
||||
#ifdef HAVE_NDBCLUSTER_DB
|
||||
#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
|
||||
variables.ndb_use_transactions= 1;
|
||||
#endif
|
||||
pthread_mutex_unlock(&LOCK_global_system_variables);
|
||||
|
@ -902,7 +902,7 @@ bool select_send::send_data(List<Item> &items)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef HAVE_INNOBASE_DB
|
||||
#ifdef WITH_INNOBASE_STORAGE_ENGINE
|
||||
/*
|
||||
We may be passing the control from mysqld to the client: release the
|
||||
InnoDB adaptive hash S-latch to avoid thread deadlocks if it was reserved
|
||||
|
@ -938,7 +938,7 @@ bool select_send::send_data(List<Item> &items)
|
|||
|
||||
bool select_send::send_eof()
|
||||
{
|
||||
#ifdef HAVE_INNOBASE_DB
|
||||
#ifdef WITH_INNOBASE_STORAGE_ENGINE
|
||||
/* We may be passing the control from mysqld to the client: release the
|
||||
InnoDB adaptive hash S-latch to avoid thread deadlocks if it was reserved
|
||||
by thd */
|
||||
|
|
|
@ -563,11 +563,8 @@ struct system_variables
|
|||
ulong sync_replication_slave_id;
|
||||
ulong sync_replication_timeout;
|
||||
#endif /* HAVE_REPLICATION */
|
||||
#ifdef HAVE_INNOBASE_DB
|
||||
my_bool innodb_table_locks;
|
||||
my_bool innodb_support_xa;
|
||||
#endif /* HAVE_INNOBASE_DB */
|
||||
#ifdef HAVE_NDBCLUSTER_DB
|
||||
ulong ndb_autoincrement_prefetch_sz;
|
||||
my_bool ndb_force_send;
|
||||
my_bool ndb_use_exact_count;
|
||||
|
@ -575,7 +572,6 @@ struct system_variables
|
|||
my_bool ndb_index_stat_enable;
|
||||
ulong ndb_index_stat_cache_entries;
|
||||
ulong ndb_index_stat_update_freq;
|
||||
#endif /* HAVE_NDBCLUSTER_DB */
|
||||
my_bool old_alter_table;
|
||||
my_bool old_passwords;
|
||||
|
||||
|
|
|
@ -53,8 +53,8 @@ enum enum_sql_command {
|
|||
SQLCOM_DELETE, SQLCOM_TRUNCATE, SQLCOM_DROP_TABLE, SQLCOM_DROP_INDEX,
|
||||
|
||||
SQLCOM_SHOW_DATABASES, SQLCOM_SHOW_TABLES, SQLCOM_SHOW_FIELDS,
|
||||
SQLCOM_SHOW_KEYS, SQLCOM_SHOW_VARIABLES, SQLCOM_SHOW_LOGS, SQLCOM_SHOW_STATUS,
|
||||
SQLCOM_SHOW_INNODB_STATUS, SQLCOM_SHOW_NDBCLUSTER_STATUS, SQLCOM_SHOW_MUTEX_STATUS,
|
||||
SQLCOM_SHOW_KEYS, SQLCOM_SHOW_VARIABLES, SQLCOM_SHOW_STATUS,
|
||||
SQLCOM_SHOW_ENGINE_LOGS, SQLCOM_SHOW_ENGINE_STATUS, SQLCOM_SHOW_ENGINE_MUTEX,
|
||||
SQLCOM_SHOW_PROCESSLIST, SQLCOM_SHOW_MASTER_STAT, SQLCOM_SHOW_SLAVE_STAT,
|
||||
SQLCOM_SHOW_GRANTS, SQLCOM_SHOW_CREATE, SQLCOM_SHOW_CHARSETS,
|
||||
SQLCOM_SHOW_COLLATIONS, SQLCOM_SHOW_CREATE_DB, SQLCOM_SHOW_TABLE_STATUS,
|
||||
|
|
|
@ -32,12 +32,43 @@ pthread_t manager_thread;
|
|||
pthread_mutex_t LOCK_manager;
|
||||
pthread_cond_t COND_manager;
|
||||
|
||||
struct handler_cb {
|
||||
struct handler_cb *next;
|
||||
void (*action)(void);
|
||||
};
|
||||
|
||||
static struct handler_cb * volatile cb_list;
|
||||
|
||||
bool mysql_manager_submit(void (*action)())
|
||||
{
|
||||
bool result= FALSE;
|
||||
struct handler_cb * volatile *cb;
|
||||
pthread_mutex_lock(&LOCK_manager);
|
||||
cb= &cb_list;
|
||||
while (*cb && (*cb)->action != action)
|
||||
cb= &(*cb)->next;
|
||||
if (!*cb)
|
||||
{
|
||||
*cb= (struct handler_cb *)my_malloc(sizeof(struct handler_cb), MYF(MY_WME));
|
||||
if (!*cb)
|
||||
result= TRUE;
|
||||
else
|
||||
{
|
||||
(*cb)->next= NULL;
|
||||
(*cb)->action= action;
|
||||
}
|
||||
}
|
||||
pthread_mutex_unlock(&LOCK_manager);
|
||||
return result;
|
||||
}
|
||||
|
||||
pthread_handler_t handle_manager(void *arg __attribute__((unused)))
|
||||
{
|
||||
int error = 0;
|
||||
ulong status;
|
||||
struct timespec abstime;
|
||||
bool reset_flush_time = TRUE;
|
||||
struct handler_cb *cb= NULL;
|
||||
my_thread_init();
|
||||
DBUG_ENTER("handle_manager");
|
||||
|
||||
|
@ -68,6 +99,11 @@ pthread_handler_t handle_manager(void *arg __attribute__((unused)))
|
|||
}
|
||||
status = manager_status;
|
||||
manager_status = 0;
|
||||
if (cb == NULL)
|
||||
{
|
||||
cb= cb_list;
|
||||
cb_list= NULL;
|
||||
}
|
||||
pthread_mutex_unlock(&LOCK_manager);
|
||||
|
||||
if (abort_loop)
|
||||
|
@ -80,13 +116,13 @@ pthread_handler_t handle_manager(void *arg __attribute__((unused)))
|
|||
reset_flush_time = TRUE;
|
||||
}
|
||||
|
||||
#ifdef HAVE_BERKELEY_DB
|
||||
if (status & MANAGER_BERKELEY_LOG_CLEANUP)
|
||||
while (cb)
|
||||
{
|
||||
berkeley_cleanup_log_files();
|
||||
status &= ~MANAGER_BERKELEY_LOG_CLEANUP;
|
||||
struct handler_cb *next= cb->next;
|
||||
cb->action();
|
||||
my_free((gptr)cb, MYF(0));
|
||||
cb= next;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (status)
|
||||
DBUG_PRINT("error", ("manager did not handle something: %lx", status));
|
||||
|
|
|
@ -14,6 +14,6 @@
|
|||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
||||
|
||||
#ifdef HAVE_BERKELEY_DB
|
||||
#ifdef WITH_BERKELEY_STORAGE_ENGINE
|
||||
void berkeley_cleanup_log_files(void);
|
||||
#endif /* HAVE_BERKELEY_DB */
|
||||
#endif /* WITH_BERKELEY_STORAGE_ENGINE */
|
||||
|
|
|
@ -22,14 +22,6 @@
|
|||
#include <myisam.h>
|
||||
#include <my_dir.h>
|
||||
|
||||
#ifdef HAVE_INNOBASE_DB
|
||||
#include "ha_innodb.h"
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_NDBCLUSTER_DB
|
||||
#include "ha_ndbcluster.h"
|
||||
#endif
|
||||
|
||||
#include "sp_head.h"
|
||||
#include "sp.h"
|
||||
#include "sp_cache.h"
|
||||
|
@ -1767,8 +1759,9 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
|
|||
TABLE_LIST table_list;
|
||||
LEX_STRING conv_name;
|
||||
/* Saved variable value */
|
||||
my_bool old_innodb_table_locks=
|
||||
IF_INNOBASE_DB(thd->variables.innodb_table_locks, FALSE);
|
||||
my_bool old_innodb_table_locks= thd->variables.innodb_table_locks;
|
||||
|
||||
|
||||
/* used as fields initializator */
|
||||
lex_start(thd, 0, 0);
|
||||
|
||||
|
@ -2685,29 +2678,20 @@ mysql_execute_command(THD *thd)
|
|||
res = load_master_data(thd);
|
||||
break;
|
||||
#endif /* HAVE_REPLICATION */
|
||||
#ifdef HAVE_NDBCLUSTER_DB
|
||||
case SQLCOM_SHOW_NDBCLUSTER_STATUS:
|
||||
{
|
||||
res = ndbcluster_show_status(thd);
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
#ifdef HAVE_INNOBASE_DB
|
||||
case SQLCOM_SHOW_INNODB_STATUS:
|
||||
{
|
||||
if (check_global_access(thd, SUPER_ACL))
|
||||
goto error;
|
||||
res = innodb_show_status(thd);
|
||||
break;
|
||||
}
|
||||
case SQLCOM_SHOW_MUTEX_STATUS:
|
||||
case SQLCOM_SHOW_ENGINE_STATUS:
|
||||
{
|
||||
if (check_global_access(thd, SUPER_ACL))
|
||||
goto error;
|
||||
res = innodb_mutex_show_status(thd);
|
||||
res = ha_show_status(thd, lex->create_info.db_type, HA_ENGINE_STATUS);
|
||||
break;
|
||||
}
|
||||
case SQLCOM_SHOW_ENGINE_MUTEX:
|
||||
{
|
||||
if (check_global_access(thd, SUPER_ACL))
|
||||
goto error;
|
||||
res = ha_show_status(thd, lex->create_info.db_type, HA_ENGINE_MUTEX);
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
#ifdef HAVE_REPLICATION
|
||||
case SQLCOM_LOAD_MASTER_TABLE:
|
||||
{
|
||||
|
@ -3431,7 +3415,7 @@ end_with_restore_list:
|
|||
case SQLCOM_SHOW_COLUMN_TYPES:
|
||||
res= mysqld_show_column_types(thd);
|
||||
break;
|
||||
case SQLCOM_SHOW_LOGS:
|
||||
case SQLCOM_SHOW_ENGINE_LOGS:
|
||||
#ifdef DONT_ALLOW_SHOW_COMMANDS
|
||||
my_message(ER_NOT_ALLOWED_COMMAND, ER(ER_NOT_ALLOWED_COMMAND),
|
||||
MYF(0)); /* purecov: inspected */
|
||||
|
@ -3440,7 +3424,7 @@ end_with_restore_list:
|
|||
{
|
||||
if (grant_option && check_access(thd, FILE_ACL, any_db,0,0,0,0))
|
||||
goto error;
|
||||
res= mysqld_show_logs(thd);
|
||||
res= ha_show_status(thd, lex->create_info.db_type, HA_ENGINE_LOGS);
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -38,7 +38,7 @@
|
|||
#include <m_ctype.h>
|
||||
#include "md5.h"
|
||||
|
||||
#ifdef HAVE_PARTITION_DB
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
/*
|
||||
Partition related functions declarations and some static constants;
|
||||
*/
|
||||
|
@ -101,8 +101,8 @@ uint32 get_partition_id_linear_key_sub(partition_info *part_info);
|
|||
TRUE Yes, it is part of a management partition command
|
||||
FALSE No, not a management partition command
|
||||
DESCRIPTION
|
||||
This needs to be outside of HAVE_PARTITION_DB since it is used from the
|
||||
sql parser that doesn't have any #ifdef's
|
||||
This needs to be outside of WITH_PARTITION_STORAGE_ENGINE since it is
|
||||
used from the sql parser that doesn't have any #ifdef's
|
||||
*/
|
||||
|
||||
my_bool is_partition_management(LEX *lex)
|
||||
|
@ -112,7 +112,7 @@ my_bool is_partition_management(LEX *lex)
|
|||
lex->alter_info.flags == ALTER_REORGANISE_PARTITION));
|
||||
}
|
||||
|
||||
#ifdef HAVE_PARTITION_DB
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
/*
|
||||
A support function to check if a partition name is in a list of strings
|
||||
SYNOPSIS
|
||||
|
|
|
@ -1726,7 +1726,9 @@ static bool check_prepared_statement(Prepared_statement *stmt,
|
|||
case SQLCOM_SHOW_COLUMN_TYPES:
|
||||
case SQLCOM_SHOW_STATUS:
|
||||
case SQLCOM_SHOW_VARIABLES:
|
||||
case SQLCOM_SHOW_LOGS:
|
||||
case SQLCOM_SHOW_ENGINE_LOGS:
|
||||
case SQLCOM_SHOW_ENGINE_STATUS:
|
||||
case SQLCOM_SHOW_ENGINE_MUTEX:
|
||||
case SQLCOM_SHOW_TABLES:
|
||||
case SQLCOM_SHOW_OPEN_TABLES:
|
||||
case SQLCOM_SHOW_CHARSETS:
|
||||
|
|
|
@ -25,9 +25,6 @@
|
|||
#include "sql_trigger.h"
|
||||
#include <my_dir.h>
|
||||
|
||||
#ifdef HAVE_BERKELEY_DB
|
||||
#include "ha_berkeley.h" // For berkeley_show_logs
|
||||
#endif
|
||||
|
||||
static const char *grant_names[]={
|
||||
"select","insert","update","delete","create","drop","reload","shutdown",
|
||||
|
@ -512,29 +509,6 @@ bool mysqld_show_create_db(THD *thd, char *dbname,
|
|||
DBUG_RETURN(FALSE);
|
||||
}
|
||||
|
||||
bool
|
||||
mysqld_show_logs(THD *thd)
|
||||
{
|
||||
List<Item> field_list;
|
||||
Protocol *protocol= thd->protocol;
|
||||
DBUG_ENTER("mysqld_show_logs");
|
||||
|
||||
field_list.push_back(new Item_empty_string("File",FN_REFLEN));
|
||||
field_list.push_back(new Item_empty_string("Type",10));
|
||||
field_list.push_back(new Item_empty_string("Status",10));
|
||||
|
||||
if (protocol->send_fields(&field_list,
|
||||
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
|
||||
DBUG_RETURN(TRUE);
|
||||
|
||||
#ifdef HAVE_BERKELEY_DB
|
||||
if ((have_berkeley_db == SHOW_OPTION_YES) && berkeley_show_logs(protocol))
|
||||
DBUG_RETURN(TRUE);
|
||||
#endif
|
||||
|
||||
send_eof(thd);
|
||||
DBUG_RETURN(FALSE);
|
||||
}
|
||||
|
||||
|
||||
/****************************************************************************
|
||||
|
@ -974,7 +948,7 @@ store_create_info(THD *thd, TABLE_LIST *table_list, String *packet)
|
|||
packet->append(" TYPE=", 6);
|
||||
else
|
||||
packet->append(" ENGINE=", 8);
|
||||
#ifdef HAVE_PARTITION_DB
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
if (table->s->part_info)
|
||||
packet->append(ha_get_storage_engine(
|
||||
table->s->part_info->default_engine_type));
|
||||
|
@ -1054,7 +1028,7 @@ store_create_info(THD *thd, TABLE_LIST *table_list, String *packet)
|
|||
append_directory(thd, packet, "DATA", create_info.data_file_name);
|
||||
append_directory(thd, packet, "INDEX", create_info.index_file_name);
|
||||
}
|
||||
#ifdef HAVE_PARTITION_DB
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
{
|
||||
/*
|
||||
Partition syntax for CREATE TABLE is at the end of the syntax.
|
||||
|
|
|
@ -17,9 +17,6 @@
|
|||
/* drop and alter of tables */
|
||||
|
||||
#include "mysql_priv.h"
|
||||
#ifdef HAVE_BERKELEY_DB
|
||||
#include "ha_berkeley.h"
|
||||
#endif
|
||||
#include <hash.h>
|
||||
#include <myisam.h>
|
||||
#include <my_dir.h>
|
||||
|
@ -1607,7 +1604,7 @@ bool mysql_create_table(THD *thd,const char *db, const char *table_name,
|
|||
my_error(ER_OUTOFMEMORY, MYF(0), 128);//128 bytes invented
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
#ifdef HAVE_PARTITION_DB
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
partition_info *part_info= thd->lex->part_info;
|
||||
if (part_info)
|
||||
{
|
||||
|
@ -3392,7 +3389,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
|
|||
uint db_create_options, used_fields;
|
||||
enum db_type old_db_type,new_db_type;
|
||||
uint need_copy_table= 0;
|
||||
#ifdef HAVE_PARTITION_DB
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
bool online_add_empty_partition= FALSE;
|
||||
bool online_drop_partition= FALSE;
|
||||
bool partition_changed= FALSE;
|
||||
|
@ -3474,7 +3471,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
|
|||
if (create_info->db_type == DB_TYPE_DEFAULT)
|
||||
create_info->db_type= old_db_type;
|
||||
|
||||
#ifdef HAVE_PARTITION_DB
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
/*
|
||||
We need to handle both partition management command such as Add Partition
|
||||
and others here as well as an ALTER TABLE that completely changes the
|
||||
|
@ -4251,7 +4248,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
|
|||
|
||||
set_table_default_charset(thd, create_info, db);
|
||||
|
||||
#ifdef HAVE_PARTITION_DB
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
if (thd->variables.old_alter_table || partition_changed)
|
||||
#else
|
||||
if (thd->variables.old_alter_table)
|
||||
|
@ -4270,7 +4267,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
|
|||
if (!need_copy_table)
|
||||
create_info->frm_only= 1;
|
||||
|
||||
#ifdef HAVE_PARTITION_DB
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
if (partition_changed)
|
||||
{
|
||||
if (online_drop_partition)
|
||||
|
@ -4626,12 +4623,11 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
|
|||
write_bin_log(thd, TRUE);
|
||||
VOID(pthread_cond_broadcast(&COND_refresh));
|
||||
VOID(pthread_mutex_unlock(&LOCK_open));
|
||||
#ifdef HAVE_BERKELEY_DB
|
||||
/*
|
||||
TODO RONM: This problem needs to handled for Berkeley DB partitions
|
||||
as well
|
||||
*/
|
||||
if (old_db_type == DB_TYPE_BERKELEY_DB)
|
||||
if (ha_check_storage_engine_flag(old_db_type,HTON_FLUSH_AFTER_RENAME))
|
||||
{
|
||||
/*
|
||||
For the alter table to be properly flushed to the logs, we
|
||||
|
@ -4647,11 +4643,10 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
|
|||
my_free((char*) table, MYF(0));
|
||||
}
|
||||
else
|
||||
sql_print_warning("Could not open BDB table %s.%s after rename\n",
|
||||
sql_print_warning("Could not open table %s.%s after rename\n",
|
||||
new_db,table_name);
|
||||
(void) berkeley_flush_logs();
|
||||
ha_flush_logs(old_db_type);
|
||||
}
|
||||
#endif
|
||||
table_list->table=0; // For query cache
|
||||
query_cache_invalidate3(thd, table_list, 0);
|
||||
|
||||
|
|
|
@ -291,7 +291,7 @@ int mysql_update(THD *thd,
|
|||
used_key_is_modified= check_if_key_used(table, used_index, fields);
|
||||
}
|
||||
|
||||
#ifdef HAVE_PARTITION_DB
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
if (used_key_is_modified || order ||
|
||||
partition_key_modified(table, fields))
|
||||
#else
|
||||
|
|
|
@ -3255,7 +3255,9 @@ storage_engines:
|
|||
ident_or_text
|
||||
{
|
||||
$$ = ha_resolve_by_name($1.str,$1.length);
|
||||
if ($$ == DB_TYPE_UNKNOWN) {
|
||||
if ($$ == DB_TYPE_UNKNOWN &&
|
||||
test(YYTHD->variables.sql_mode & MODE_NO_ENGINE_SUBSTITUTION))
|
||||
{
|
||||
my_error(ER_UNKNOWN_STORAGE_ENGINE, MYF(0), $1.str);
|
||||
YYABORT;
|
||||
}
|
||||
|
@ -7110,6 +7112,9 @@ show_param:
|
|||
| ENGINE_SYM storage_engines
|
||||
{ Lex->create_info.db_type= $2; }
|
||||
show_engine_param
|
||||
| ENGINE_SYM ALL
|
||||
{ Lex->create_info.db_type= DB_TYPE_DEFAULT; }
|
||||
show_engine_param
|
||||
| opt_full COLUMNS from_or_in table_ident opt_db wild_and_where
|
||||
{
|
||||
LEX *lex= Lex;
|
||||
|
@ -7192,9 +7197,19 @@ show_param:
|
|||
YYABORT;
|
||||
}
|
||||
| INNOBASE_SYM STATUS_SYM
|
||||
{ Lex->sql_command = SQLCOM_SHOW_INNODB_STATUS; WARN_DEPRECATED("SHOW INNODB STATUS", "SHOW ENGINE INNODB STATUS"); }
|
||||
{
|
||||
LEX *lex= Lex;
|
||||
lex->sql_command = SQLCOM_SHOW_ENGINE_STATUS;
|
||||
lex->create_info.db_type= DB_TYPE_INNODB;
|
||||
WARN_DEPRECATED("SHOW INNODB STATUS", "SHOW ENGINE INNODB STATUS");
|
||||
}
|
||||
| MUTEX_SYM STATUS_SYM
|
||||
{ Lex->sql_command = SQLCOM_SHOW_MUTEX_STATUS; }
|
||||
{
|
||||
LEX *lex= Lex;
|
||||
lex->sql_command = SQLCOM_SHOW_ENGINE_MUTEX;
|
||||
lex->create_info.db_type= DB_TYPE_INNODB;
|
||||
WARN_DEPRECATED("SHOW MUTEX STATUS", "SHOW ENGINE INNODB MUTEX");
|
||||
}
|
||||
| opt_full PROCESSLIST_SYM
|
||||
{ Lex->sql_command= SQLCOM_SHOW_PROCESSLIST;}
|
||||
| opt_var_type VARIABLES wild_and_where
|
||||
|
@ -7223,9 +7238,19 @@ show_param:
|
|||
YYABORT;
|
||||
}
|
||||
| BERKELEY_DB_SYM LOGS_SYM
|
||||
{ Lex->sql_command= SQLCOM_SHOW_LOGS; WARN_DEPRECATED("SHOW BDB LOGS", "SHOW ENGINE BDB LOGS"); }
|
||||
{
|
||||
LEX *lex= Lex;
|
||||
lex->sql_command= SQLCOM_SHOW_ENGINE_LOGS;
|
||||
lex->create_info.db_type= DB_TYPE_BERKELEY_DB;
|
||||
WARN_DEPRECATED("SHOW BDB LOGS", "SHOW ENGINE BDB LOGS");
|
||||
}
|
||||
| LOGS_SYM
|
||||
{ Lex->sql_command= SQLCOM_SHOW_LOGS; WARN_DEPRECATED("SHOW LOGS", "SHOW ENGINE BDB LOGS"); }
|
||||
{
|
||||
LEX *lex= Lex;
|
||||
lex->sql_command= SQLCOM_SHOW_ENGINE_LOGS;
|
||||
lex->create_info.db_type= DB_TYPE_BERKELEY_DB;
|
||||
WARN_DEPRECATED("SHOW LOGS", "SHOW ENGINE BDB LOGS");
|
||||
}
|
||||
| GRANTS
|
||||
{
|
||||
LEX *lex=Lex;
|
||||
|
@ -7324,30 +7349,11 @@ show_param:
|
|||
|
||||
show_engine_param:
|
||||
STATUS_SYM
|
||||
{
|
||||
switch (Lex->create_info.db_type) {
|
||||
case DB_TYPE_NDBCLUSTER:
|
||||
Lex->sql_command = SQLCOM_SHOW_NDBCLUSTER_STATUS;
|
||||
break;
|
||||
case DB_TYPE_INNODB:
|
||||
Lex->sql_command = SQLCOM_SHOW_INNODB_STATUS;
|
||||
break;
|
||||
default:
|
||||
my_error(ER_NOT_SUPPORTED_YET, MYF(0), "STATUS");
|
||||
YYABORT;
|
||||
}
|
||||
}
|
||||
{ Lex->sql_command= SQLCOM_SHOW_ENGINE_STATUS; }
|
||||
| MUTEX_SYM
|
||||
{ Lex->sql_command= SQLCOM_SHOW_ENGINE_MUTEX; }
|
||||
| LOGS_SYM
|
||||
{
|
||||
switch (Lex->create_info.db_type) {
|
||||
case DB_TYPE_BERKELEY_DB:
|
||||
Lex->sql_command = SQLCOM_SHOW_LOGS;
|
||||
break;
|
||||
default:
|
||||
my_error(ER_NOT_SUPPORTED_YET, MYF(0), "LOGS");
|
||||
YYABORT;
|
||||
}
|
||||
};
|
||||
{ Lex->sql_command= SQLCOM_SHOW_ENGINE_LOGS; };
|
||||
|
||||
master_or_binary:
|
||||
MASTER_SYM
|
||||
|
|
10
sql/table.cc
10
sql/table.cc
|
@ -341,7 +341,7 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
|
|||
str_db_type_length, next_chunk + 2,
|
||||
share->db_type));
|
||||
}
|
||||
#ifdef HAVE_PARTITION_DB
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
else
|
||||
{
|
||||
if (!strncmp(next_chunk + 2, "partition", str_db_type_length))
|
||||
|
@ -361,7 +361,7 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
|
|||
part_info_len= uint4korr(next_chunk);
|
||||
if (part_info_len > 0)
|
||||
{
|
||||
#ifdef HAVE_PARTITION_DB
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
if (mysql_unpack_partition(thd, (uchar *)(next_chunk + 4),
|
||||
part_info_len, outparam,
|
||||
default_part_db_type))
|
||||
|
@ -974,7 +974,7 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
|
|||
|
||||
/* Fix the partition functions and ensure they are not constant functions*/
|
||||
if (part_info_len > 0)
|
||||
#ifdef HAVE_PARTITION_DB
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
if (fix_partition_func(thd,name,outparam))
|
||||
#endif
|
||||
goto err;
|
||||
|
@ -1044,7 +1044,7 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
|
|||
if (! error_reported)
|
||||
frm_error(error,outparam,name,ME_ERROR+ME_WAITTANG, errarg);
|
||||
delete outparam->file;
|
||||
#ifdef HAVE_PARTITION_DB
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
if (outparam->s->part_info)
|
||||
{
|
||||
free_items(outparam->s->part_info->item_free_list);
|
||||
|
@ -1088,7 +1088,7 @@ int closefrm(register TABLE *table)
|
|||
table->field= 0;
|
||||
}
|
||||
delete table->file;
|
||||
#ifdef HAVE_PARTITION_DB
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
if (table->s->part_info)
|
||||
{
|
||||
free_items(table->s->part_info->item_free_list);
|
||||
|
|
|
@ -107,7 +107,7 @@ class Table_triggers_list;
|
|||
|
||||
typedef struct st_table_share
|
||||
{
|
||||
#ifdef HAVE_PARTITION_DB
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
partition_info *part_info; /* Partition related information */
|
||||
#endif
|
||||
/* hash of field names (contains pointers to elements of field array) */
|
||||
|
|
|
@ -85,11 +85,11 @@ bool mysql_create_frm(THD *thd, my_string file_name,
|
|||
TYPELIB formnames;
|
||||
uchar *screen_buff;
|
||||
char buff[5];
|
||||
#ifdef HAVE_PARTITION_DB
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
partition_info *part_info= thd->lex->part_info;
|
||||
#endif
|
||||
DBUG_ENTER("mysql_create_frm");
|
||||
#ifdef HAVE_PARTITION_DB
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
thd->lex->part_info= NULL;
|
||||
#endif
|
||||
|
||||
|
@ -132,7 +132,7 @@ bool mysql_create_frm(THD *thd, my_string file_name,
|
|||
2 + create_info->connect_string.length);
|
||||
/* Partition */
|
||||
create_info->extra_size+= 5;
|
||||
#ifdef HAVE_PARTITION_DB
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
if (part_info)
|
||||
create_info->extra_size+= part_info->part_info_len;
|
||||
#endif
|
||||
|
@ -166,7 +166,7 @@ bool mysql_create_frm(THD *thd, my_string file_name,
|
|||
60);
|
||||
forminfo[46]=(uchar) strlen((char*)forminfo+47); // Length of comment
|
||||
|
||||
#ifdef HAVE_PARTITION_DB
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
if (part_info)
|
||||
fileinfo[61]= (uchar) part_info->default_engine_type;
|
||||
#endif
|
||||
|
@ -194,7 +194,7 @@ bool mysql_create_frm(THD *thd, my_string file_name,
|
|||
str_db_type.length, MYF(MY_NABP)))
|
||||
goto err;
|
||||
|
||||
#ifdef HAVE_PARTITION_DB
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
if (part_info)
|
||||
{
|
||||
int4store(buff, part_info->part_info_len);
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#ifndef NDB_KERNEL_TYPES_H
|
||||
#define NDB_KERNEL_TYPES_H
|
||||
|
||||
#include <my_config.h>
|
||||
#include <ndb_types.h>
|
||||
|
||||
typedef Uint16 NodeId;
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#ifndef NDB_GLOBAL_H
|
||||
#define NDB_GLOBAL_H
|
||||
|
||||
#include <my_config.h>
|
||||
#include <ndb_types.h>
|
||||
|
||||
#define NDB_PORT "@ndb_port@"
|
||||
|
|
|
@ -21,6 +21,8 @@
|
|||
#ifndef NDB_TYPES_H
|
||||
#define NDB_TYPES_H
|
||||
|
||||
#include <my_config.h>
|
||||
|
||||
#if defined(_WIN32) || defined(__WIN32__) || defined(WIN32) || defined(_WIN64)
|
||||
#define NDB_SIZEOF_CHARP SIZEOF_CHARP
|
||||
#define NDB_SIZEOF_CHAR SIZEOF_CHAR
|
||||
|
@ -31,12 +33,12 @@
|
|||
typedef unsigned __int64 Uint64;
|
||||
typedef signed __int64 Int64;
|
||||
#else
|
||||
#define NDB_SIZEOF_CHARP @NDB_SIZEOF_CHARP@
|
||||
#define NDB_SIZEOF_CHAR @NDB_SIZEOF_CHAR@
|
||||
#define NDB_SIZEOF_INT @NDB_SIZEOF_INT@
|
||||
#define NDB_SIZEOF_SHORT @NDB_SIZEOF_SHORT@
|
||||
#define NDB_SIZEOF_LONG @NDB_SIZEOF_LONG@
|
||||
#define NDB_SIZEOF_LONG_LONG @NDB_SIZEOF_LONG_LONG@
|
||||
#define NDB_SIZEOF_CHARP SIZEOF_CHARP
|
||||
#define NDB_SIZEOF_CHAR SIZEOF_CHAR
|
||||
#define NDB_SIZEOF_INT SIZEOF_INT
|
||||
#define NDB_SIZEOF_SHORT SIZEOF_SHORT
|
||||
#define NDB_SIZEOF_LONG SIZEOF_LONG
|
||||
#define NDB_SIZEOF_LONG_LONG SIZEOF_LONG_LONG
|
||||
typedef unsigned long long Uint64;
|
||||
typedef signed long long Int64;
|
||||
#endif
|
||||
|
|
Loading…
Add table
Reference in a new issue