mirror of
https://github.com/MariaDB/server.git
synced 2025-01-26 08:44:33 +01:00
Merge 10.3 into 10.4
This commit is contained in:
commit
5008171b05
115 changed files with 1750 additions and 573 deletions
CMakeLists.txt
cmake
debian
extra/mariabackup
backup_copy.ccbackup_mysql.ccdatasink.hds_archive.ccds_buffer.ccds_compress.ccds_local.ccds_stdout.ccds_tmpfile.ccds_xbstream.ccinnobackupex.ccxtrabackup.ccxtrabackup.h
mysql-test
lib
main
derived_opt.resultderived_opt.testmysqldump-nl.testparser_not_embedded.testps.resultps.testselectivity_innodb.resultuserstat.resultuserstat.test
mysql-test-run.plsuite
galera
gcol
innodb
r
t
innodb_fts
mariabackup
error_during_copyback.resulterror_during_copyback.testinnodb_force_recovery.resultinnodb_force_recovery.test
rpl/t
versioning
plugin/userstat
scripts
sql
handler.ccitem.ccitem.hitem_subselect.hspatial.ccsql_base.ccsql_lex.ccsql_lex.hsql_select.ccsql_table.ccsql_update.cc
storage/innobase
btr
buf
dict
fts
gis
handler
ibuf
include
lock
page
rem
row
|
@ -1,5 +1,5 @@
|
|||
# Copyright (c) 2006, 2017, Oracle and/or its affiliates.
|
||||
# Copyright (c) 2008, 2020, MariaDB Corporation.
|
||||
# Copyright (c) 2008, 2021, MariaDB Corporation.
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
|
@ -132,7 +132,7 @@ ENDIF()
|
|||
# NUMA
|
||||
SET(WITH_NUMA "AUTO" CACHE STRING "Build with non-uniform memory access, allowing --innodb-numa-interleave. Options are ON|OFF|AUTO. ON = enabled (requires NUMA library), OFF = disabled, AUTO = enabled if NUMA library found.")
|
||||
|
||||
SET(MYSQL_MAINTAINER_MODE "AUTO" CACHE STRING "MySQL maintainer-specific development environment. Options are: ON OFF AUTO.")
|
||||
SET(MYSQL_MAINTAINER_MODE "AUTO" CACHE STRING "Enable MariaDB maintainer-specific warnings. One of: NO (warnings are disabled) WARN (warnings are enabled) ERR (warnings are errors) AUTO (warnings are errors in Debug only)")
|
||||
|
||||
# Packaging
|
||||
IF (NOT CPACK_GENERATOR)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Copyright (c) 2010, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2020, MariaDB
|
||||
# Copyright (c) 2011, 2021, MariaDB
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
|
@ -14,7 +14,7 @@
|
|||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA
|
||||
|
||||
IF(MSVC)
|
||||
IF(MYSQL_MAINTAINER_MODE STREQUAL "NO")
|
||||
RETURN()
|
||||
ENDIF()
|
||||
|
||||
|
@ -46,7 +46,7 @@ IF(CMAKE_COMPILER_IS_GNUCC AND CMAKE_C_COMPILER_VERSION VERSION_LESS "6.0.0")
|
|||
SET(MY_ERROR_FLAGS ${MY_ERROR_FLAGS} -Wno-error=maybe-uninitialized)
|
||||
ENDIF()
|
||||
|
||||
IF(MYSQL_MAINTAINER_MODE MATCHES "OFF")
|
||||
IF(MYSQL_MAINTAINER_MODE MATCHES "OFF|WARN")
|
||||
RETURN()
|
||||
ELSEIF(MYSQL_MAINTAINER_MODE MATCHES "AUTO")
|
||||
SET(WHERE DEBUG)
|
||||
|
|
9
debian/autobake-deb.sh
vendored
9
debian/autobake-deb.sh
vendored
|
@ -111,6 +111,15 @@ then
|
|||
sed '/Package: mariadb-plugin-cassandra/,/^$/d' -i debian/control
|
||||
fi
|
||||
|
||||
# From Debian Stretch/Ubuntu Bionic onwards dh-systemd is just an empty
|
||||
# transitional metapackage and the functionality was merged into debhelper.
|
||||
# In Ubuntu Hirsute is was completely removed, so it can't be referenced anymore.
|
||||
# Keep using it only on Debian Jessie and Ubuntu Xenial.
|
||||
if apt-cache madison dh-systemd | grep 'dh-systemd' >/dev/null 2>&1
|
||||
then
|
||||
sed 's/debhelper (>= 9.20160709~),/debhelper (>= 9), dh-systemd,/' -i debian/control
|
||||
fi
|
||||
|
||||
# Mroonga, TokuDB never built on Travis CI anyway, see build flags above
|
||||
if [[ $TRAVIS ]]
|
||||
then
|
||||
|
|
2
debian/control
vendored
2
debian/control
vendored
|
@ -5,7 +5,7 @@ Maintainer: MariaDB Developers <maria-developers@lists.launchpad.net>
|
|||
Build-Depends: bison,
|
||||
chrpath,
|
||||
cmake (>= 2.7),
|
||||
debhelper (>= 9),
|
||||
debhelper (>= 9.20160709~),
|
||||
dh-apparmor,
|
||||
dh-exec,
|
||||
dh-systemd,
|
||||
|
|
4
debian/mariadb-server-10.4.postinst
vendored
4
debian/mariadb-server-10.4.postinst
vendored
|
@ -165,8 +165,8 @@ EOF
|
|||
;;
|
||||
|
||||
triggered)
|
||||
if [ -x "$(command -v systemctl)" ]; then
|
||||
systemctl daemon-reload
|
||||
if [ -d /run/systemd/system ]; then
|
||||
systemctl --system daemon-reload
|
||||
fi
|
||||
invoke restart
|
||||
;;
|
||||
|
|
|
@ -1062,6 +1062,7 @@ copy_file(ds_ctxt_t *datasink,
|
|||
ds_file_t *dstfile = NULL;
|
||||
datafile_cur_t cursor;
|
||||
xb_fil_cur_result_t res;
|
||||
DBUG_ASSERT(datasink->datasink->remove);
|
||||
const char *dst_path =
|
||||
(xtrabackup_copy_back || xtrabackup_move_back)?
|
||||
dst_file_path : trim_dotslash(dst_file_path);
|
||||
|
@ -1087,6 +1088,7 @@ copy_file(ds_ctxt_t *datasink,
|
|||
if (ds_write(dstfile, cursor.buf, cursor.buf_read)) {
|
||||
goto error;
|
||||
}
|
||||
DBUG_EXECUTE_IF("copy_file_error", errno=ENOSPC;goto error;);
|
||||
}
|
||||
|
||||
if (res == XB_FIL_CUR_ERROR) {
|
||||
|
@ -1108,6 +1110,7 @@ copy_file(ds_ctxt_t *datasink,
|
|||
error:
|
||||
datafile_close(&cursor);
|
||||
if (dstfile != NULL) {
|
||||
datasink->datasink->remove(dstfile->path);
|
||||
ds_close(dstfile);
|
||||
}
|
||||
|
||||
|
@ -1152,17 +1155,18 @@ move_file(ds_ctxt_t *datasink,
|
|||
|
||||
if (my_rename(src_file_path, dst_file_path_abs, MYF(0)) != 0) {
|
||||
if (my_errno == EXDEV) {
|
||||
bool ret;
|
||||
ret = copy_file(datasink, src_file_path,
|
||||
dst_file_path, thread_n);
|
||||
/* Fallback to copy/unlink */
|
||||
if(!copy_file(datasink, src_file_path,
|
||||
dst_file_path, thread_n))
|
||||
return false;
|
||||
msg(thread_n,"Removing %s", src_file_path);
|
||||
if (unlink(src_file_path) != 0) {
|
||||
my_strerror(errbuf, sizeof(errbuf), errno);
|
||||
msg("Error: unlink %s failed: %s",
|
||||
msg("Warning: unlink %s failed: %s",
|
||||
src_file_path,
|
||||
errbuf);
|
||||
}
|
||||
return(ret);
|
||||
return true;
|
||||
}
|
||||
my_strerror(errbuf, sizeof(errbuf), my_errno);
|
||||
msg("Can not move file %s to %s: %s",
|
||||
|
|
|
@ -44,6 +44,7 @@ Street, Fifth Floor, Boston, MA 02110-1335 USA
|
|||
#include <mysql.h>
|
||||
#include <mysqld.h>
|
||||
#include <my_sys.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <limits>
|
||||
#include "common.h"
|
||||
|
@ -108,6 +109,13 @@ xb_mysql_connect()
|
|||
return(NULL);
|
||||
}
|
||||
|
||||
#if !defined(DONT_USE_MYSQL_PWD)
|
||||
if (!opt_password)
|
||||
{
|
||||
opt_password=getenv("MYSQL_PWD");
|
||||
}
|
||||
#endif
|
||||
|
||||
if (!opt_secure_auth) {
|
||||
mysql_options(connection, MYSQL_SECURE_AUTH,
|
||||
(char *) &opt_secure_auth);
|
||||
|
|
|
@ -50,9 +50,15 @@ struct datasink_struct {
|
|||
ds_file_t *(*open)(ds_ctxt_t *ctxt, const char *path, MY_STAT *stat);
|
||||
int (*write)(ds_file_t *file, const unsigned char *buf, size_t len);
|
||||
int (*close)(ds_file_t *file);
|
||||
int (*remove)(const char *path);
|
||||
void (*deinit)(ds_ctxt_t *ctxt);
|
||||
};
|
||||
|
||||
|
||||
static inline int dummy_remove(const char *) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Supported datasink types */
|
||||
typedef enum {
|
||||
DS_TYPE_STDOUT,
|
||||
|
|
|
@ -57,6 +57,7 @@ datasink_t datasink_archive = {
|
|||
&archive_open,
|
||||
&archive_write,
|
||||
&archive_close,
|
||||
&dummy_remove,
|
||||
&archive_deinit
|
||||
};
|
||||
|
||||
|
|
|
@ -54,6 +54,7 @@ datasink_t datasink_buffer = {
|
|||
&buffer_open,
|
||||
&buffer_write,
|
||||
&buffer_close,
|
||||
&dummy_remove,
|
||||
&buffer_deinit
|
||||
};
|
||||
|
||||
|
|
|
@ -75,6 +75,7 @@ datasink_t datasink_compress = {
|
|||
&compress_open,
|
||||
&compress_write,
|
||||
&compress_close,
|
||||
&dummy_remove,
|
||||
&compress_deinit
|
||||
};
|
||||
|
||||
|
|
|
@ -43,12 +43,18 @@ static int local_write(ds_file_t *file, const uchar *buf, size_t len);
|
|||
static int local_close(ds_file_t *file);
|
||||
static void local_deinit(ds_ctxt_t *ctxt);
|
||||
|
||||
static int local_remove(const char *path)
|
||||
{
|
||||
return unlink(path);
|
||||
}
|
||||
|
||||
extern "C" {
|
||||
datasink_t datasink_local = {
|
||||
&local_init,
|
||||
&local_open,
|
||||
&local_write,
|
||||
&local_close,
|
||||
&local_remove,
|
||||
&local_deinit
|
||||
};
|
||||
}
|
||||
|
|
|
@ -40,6 +40,7 @@ datasink_t datasink_stdout = {
|
|||
&stdout_open,
|
||||
&stdout_write,
|
||||
&stdout_close,
|
||||
&dummy_remove,
|
||||
&stdout_deinit
|
||||
};
|
||||
|
||||
|
|
|
@ -51,6 +51,7 @@ datasink_t datasink_tmpfile = {
|
|||
&tmpfile_open,
|
||||
&tmpfile_write,
|
||||
&tmpfile_close,
|
||||
&dummy_remove,
|
||||
&tmpfile_deinit
|
||||
};
|
||||
|
||||
|
|
|
@ -50,6 +50,7 @@ datasink_t datasink_xbstream = {
|
|||
&xbstream_open,
|
||||
&xbstream_write,
|
||||
&xbstream_close,
|
||||
&dummy_remove,
|
||||
&xbstream_deinit
|
||||
};
|
||||
|
||||
|
|
|
@ -208,7 +208,8 @@ enum innobackupex_options
|
|||
OPT_STREAM,
|
||||
OPT_TABLES_FILE,
|
||||
OPT_THROTTLE,
|
||||
OPT_USE_MEMORY
|
||||
OPT_USE_MEMORY,
|
||||
OPT_INNODB_FORCE_RECOVERY,
|
||||
};
|
||||
|
||||
ibx_mode_t ibx_mode = IBX_MODE_BACKUP;
|
||||
|
@ -626,6 +627,16 @@ static struct my_option ibx_long_options[] =
|
|||
0, GET_LL, REQUIRED_ARG, 100*1024*1024L, 1024*1024L, LONGLONG_MAX, 0,
|
||||
1024*1024L, 0},
|
||||
|
||||
{"innodb-force-recovery", OPT_INNODB_FORCE_RECOVERY,
|
||||
"This option starts up the embedded InnoDB instance in crash "
|
||||
"recovery mode to ignore page corruption; should be used "
|
||||
"with the \"--apply-log\" option, in emergencies only. The "
|
||||
"default value is 0. Refer to \"innodb_force_recovery\" server "
|
||||
"system variable documentation for more details.",
|
||||
(uchar*)&xtrabackup_innodb_force_recovery,
|
||||
(uchar*)&xtrabackup_innodb_force_recovery,
|
||||
0, GET_ULONG, OPT_ARG, 0, 0, SRV_FORCE_IGNORE_CORRUPT, 0, 0, 0},
|
||||
|
||||
{ 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
|
||||
};
|
||||
|
||||
|
@ -671,6 +682,7 @@ innobackupex [--compress] [--compress-threads=NUMBER-OF-THREADS] [--compress-chu
|
|||
innobackupex --apply-log [--use-memory=B]\n\
|
||||
[--defaults-file=MY.CNF]\n\
|
||||
[--export] [--ibbackup=IBBACKUP-BINARY]\n\
|
||||
[--innodb-force-recovery=1]\n\
|
||||
BACKUP-DIR\n\
|
||||
\n\
|
||||
innobackupex --copy-back [--defaults-file=MY.CNF] [--defaults-group=GROUP-NAME] BACKUP-DIR\n\
|
||||
|
|
|
@ -4,7 +4,7 @@ MariaBackup: hot backup tool for InnoDB
|
|||
Originally Created 3/3/2009 Yasufumi Kinoshita
|
||||
Written by Alexey Kopytov, Aleksandr Kuzminsky, Stewart Smith, Vadim Tkachenko,
|
||||
Yasufumi Kinoshita, Ignacio Nin and Baron Schwartz.
|
||||
(c) 2017, 2020, MariaDB Corporation.
|
||||
(c) 2017, 2021, MariaDB Corporation.
|
||||
Portions written by Marko Mäkelä.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
|
@ -274,6 +274,12 @@ static char *xtrabackup_debug_sync = NULL;
|
|||
|
||||
my_bool xtrabackup_incremental_force_scan = FALSE;
|
||||
|
||||
/*
|
||||
* Ignore corrupt pages (disabled by default; used
|
||||
* by "innobackupex" as a command line argument).
|
||||
*/
|
||||
ulong xtrabackup_innodb_force_recovery = 0;
|
||||
|
||||
/* The flushed lsn which is read from data files */
|
||||
lsn_t flushed_lsn= 0;
|
||||
|
||||
|
@ -1050,7 +1056,8 @@ enum options_xtrabackup
|
|||
OPT_BACKUP_ROCKSDB,
|
||||
OPT_XTRA_CHECK_PRIVILEGES,
|
||||
OPT_XTRA_MYSQLD_ARGS,
|
||||
OPT_XB_IGNORE_INNODB_PAGE_CORRUPTION
|
||||
OPT_XB_IGNORE_INNODB_PAGE_CORRUPTION,
|
||||
OPT_INNODB_FORCE_RECOVERY
|
||||
};
|
||||
|
||||
struct my_option xb_client_options[]= {
|
||||
|
@ -1677,6 +1684,13 @@ struct my_option xb_server_options[] =
|
|||
&opt_check_privileges, &opt_check_privileges,
|
||||
0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0 },
|
||||
|
||||
{"innodb_force_recovery", OPT_INNODB_FORCE_RECOVERY,
|
||||
"(for --prepare): Crash recovery mode (ignores "
|
||||
"page corruption; for emergencies only).",
|
||||
(G_PTR*)&srv_force_recovery,
|
||||
(G_PTR*)&srv_force_recovery,
|
||||
0, GET_ULONG, OPT_ARG, 0, 0, SRV_FORCE_IGNORE_CORRUPT, 0, 0, 0},
|
||||
|
||||
{"mysqld-args", OPT_XTRA_MYSQLD_ARGS,
|
||||
"All arguments that follow this argument are considered as server "
|
||||
"options, and if some of them are not supported by mariabackup, they "
|
||||
|
@ -1813,31 +1827,33 @@ static int prepare_export()
|
|||
|
||||
// Process defaults-file , it can have some --lc-language stuff,
|
||||
// which is* unfortunately* still necessary to get mysqld up
|
||||
if (strncmp(orig_argv1,"--defaults-file=",16) == 0)
|
||||
if (strncmp(orig_argv1,"--defaults-file=", 16) == 0)
|
||||
{
|
||||
snprintf(cmdline, sizeof cmdline,
|
||||
IF_WIN("\"","") "\"%s\" --mysqld \"%s\" "
|
||||
IF_WIN("\"","") "\"%s\" --mysqld \"%s\""
|
||||
" --defaults-extra-file=./backup-my.cnf --defaults-group-suffix=%s --datadir=."
|
||||
" --innodb --innodb-fast-shutdown=0 --loose-partition"
|
||||
" --innodb_purge_rseg_truncate_frequency=1 --innodb-buffer-pool-size=%llu"
|
||||
" --console --skip-log-error --skip-log-bin --bootstrap < "
|
||||
" --console --skip-log-error --skip-log-bin --bootstrap %s< "
|
||||
BOOTSTRAP_FILENAME IF_WIN("\"",""),
|
||||
mariabackup_exe,
|
||||
mariabackup_exe,
|
||||
orig_argv1, (my_defaults_group_suffix?my_defaults_group_suffix:""),
|
||||
xtrabackup_use_memory);
|
||||
xtrabackup_use_memory,
|
||||
(srv_force_recovery ? "--innodb-force-recovery=1 " : ""));
|
||||
}
|
||||
else
|
||||
{
|
||||
sprintf(cmdline,
|
||||
IF_WIN("\"","") "\"%s\" --mysqld"
|
||||
snprintf(cmdline, sizeof cmdline,
|
||||
IF_WIN("\"","") "\"%s\" --mysqld"
|
||||
" --defaults-file=./backup-my.cnf --defaults-group-suffix=%s --datadir=."
|
||||
" --innodb --innodb-fast-shutdown=0 --loose-partition"
|
||||
" --innodb_purge_rseg_truncate_frequency=1 --innodb-buffer-pool-size=%llu"
|
||||
" --console --log-error= --skip-log-bin --bootstrap < "
|
||||
" --console --log-error= --skip-log-bin --bootstrap %s< "
|
||||
BOOTSTRAP_FILENAME IF_WIN("\"",""),
|
||||
mariabackup_exe,
|
||||
(my_defaults_group_suffix?my_defaults_group_suffix:""),
|
||||
xtrabackup_use_memory);
|
||||
xtrabackup_use_memory,
|
||||
(srv_force_recovery ? "--innodb-force-recovery=1 " : ""));
|
||||
}
|
||||
|
||||
msg("Prepare export : executing %s\n", cmdline);
|
||||
|
@ -1985,6 +2001,13 @@ xb_get_one_option(int optid,
|
|||
ADD_PRINT_PARAM_OPT(innobase_buffer_pool_filename);
|
||||
break;
|
||||
|
||||
case OPT_INNODB_FORCE_RECOVERY:
|
||||
|
||||
if (srv_force_recovery) {
|
||||
ADD_PRINT_PARAM_OPT(srv_force_recovery);
|
||||
}
|
||||
break;
|
||||
|
||||
case OPT_XTRA_TARGET_DIR:
|
||||
strmake(xtrabackup_real_target_dir,argument, sizeof(xtrabackup_real_target_dir)-1);
|
||||
xtrabackup_target_dir= xtrabackup_real_target_dir;
|
||||
|
@ -2234,6 +2257,29 @@ static bool innodb_init_param()
|
|||
srv_undo_dir = (char*) ".";
|
||||
}
|
||||
|
||||
compile_time_assert(SRV_FORCE_IGNORE_CORRUPT == 1);
|
||||
|
||||
/*
|
||||
* This option can be read both from the command line, and the
|
||||
* defaults file. The assignment should account for both cases,
|
||||
* and for "--innobackupex". Since the command line argument is
|
||||
* parsed after the defaults file, it takes precedence.
|
||||
*/
|
||||
if (xtrabackup_innodb_force_recovery) {
|
||||
srv_force_recovery = xtrabackup_innodb_force_recovery;
|
||||
}
|
||||
|
||||
if (srv_force_recovery >= SRV_FORCE_IGNORE_CORRUPT) {
|
||||
if (!xtrabackup_prepare) {
|
||||
msg("mariabackup: The option \"innodb_force_recovery\""
|
||||
" should only be used with \"%s\".",
|
||||
(innobackupex_mode ? "--apply-log" : "--prepare"));
|
||||
goto error;
|
||||
} else {
|
||||
msg("innodb_force_recovery = %lu", srv_force_recovery);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef _WIN32
|
||||
srv_use_native_aio = TRUE;
|
||||
#endif
|
||||
|
@ -6616,6 +6662,8 @@ int main(int argc, char **argv)
|
|||
char **client_defaults;
|
||||
char **backup_defaults;
|
||||
|
||||
my_getopt_prefix_matching= 0;
|
||||
|
||||
if (get_exepath(mariabackup_exe,FN_REFLEN, argv[0]))
|
||||
strncpy(mariabackup_exe,argv[0], FN_REFLEN-1);
|
||||
|
||||
|
|
|
@ -174,6 +174,8 @@ enum binlog_info_enum { BINLOG_INFO_OFF, BINLOG_INFO_ON,
|
|||
|
||||
extern ulong opt_binlog_info;
|
||||
|
||||
extern ulong xtrabackup_innodb_force_recovery;
|
||||
|
||||
void xtrabackup_io_throttling(void);
|
||||
my_bool xb_write_delta_metadata(const char *filename,
|
||||
const xb_delta_info_t *info);
|
||||
|
|
|
@ -497,23 +497,21 @@ sub mtr_report_stats ($$$$) {
|
|||
$test_time = sprintf("%.3f", $test->{timer} / 1000);
|
||||
$test->{'name'} =~ s/$current_suite\.//;
|
||||
|
||||
my $test_result;
|
||||
|
||||
# if a test case has to be retried it should have the result MTR_RES_FAILED in jUnit XML
|
||||
if ($test->{'retries'} > 0) {
|
||||
$test_result = "MTR_RES_FAILED";
|
||||
my $combinations;
|
||||
if (defined($test->{combinations})){
|
||||
$combinations = join ',', sort @{$test->{combinations}};
|
||||
} else {
|
||||
$test_result = $test->{'result'};
|
||||
$combinations = "";
|
||||
}
|
||||
|
||||
$xml_report .= qq(\t\t<testcase assertions="" classname="$current_suite" name="$test->{'name'}" status="$test_result" time="$test_time");
|
||||
$xml_report .= qq(\t\t<testcase assertions="" classname="$current_suite" name="$test->{'name'}" ).
|
||||
qq(status="$test->{'result'}" time="$test_time" combinations="$combinations");
|
||||
|
||||
my $comment = $test->{'comment'};
|
||||
$comment =~ s/[\"]//g;
|
||||
my $comment= replace_special_symbols($test->{'comment'});
|
||||
|
||||
# if a test case has to be retried it should have the result MTR_RES_FAILED in jUnit XML
|
||||
if ($test->{'result'} eq "MTR_RES_FAILED" || $test->{'retries'} > 0) {
|
||||
if ($test->{'result'} eq "MTR_RES_FAILED") {
|
||||
my $logcontents = $test->{'logfile-failed'} || $test->{'logfile'};
|
||||
$logcontents= $logcontents.$test->{'warnings'}."\n";
|
||||
# remove any double ] that would end the cdata
|
||||
$logcontents =~ s/]]/\x{fffd}/g;
|
||||
# replace wide characters that aren't allowed in XML 1.0
|
||||
|
@ -576,6 +574,16 @@ sub mtr_print_line () {
|
|||
print '-' x 74 . "\n";
|
||||
}
|
||||
|
||||
sub replace_special_symbols($) {
|
||||
my $text= shift;
|
||||
$text =~ s/&/&/g;
|
||||
$text =~ s/'/'/g;
|
||||
$text =~ s/"/"/g;
|
||||
$text =~ s/</</g;
|
||||
$text =~ s/>/>/g;
|
||||
return $text;
|
||||
}
|
||||
|
||||
|
||||
sub mtr_print_thick_line {
|
||||
my $char= shift || '=';
|
||||
|
|
|
@ -540,4 +540,31 @@ id select_type table type possible_keys key key_len ref rows Extra
|
|||
set join_cache_level=default;
|
||||
set optimizer_switch= @save_optimizer_switch;
|
||||
DROP TABLE t1,t2;
|
||||
set @save_optimizer_switch= @@optimizer_switch;
|
||||
set optimizer_switch="derived_merge=on";
|
||||
CREATE TABLE t1 (id int, d2 datetime, id1 int) ;
|
||||
insert into t1 values (1,'2020-01-01 10:10:10',1),(2,'2020-01-01 10:10:10',2),(3,'2020-01-01 10:10:10',3);
|
||||
CREATE TABLE t2 (id int, d1 datetime, id1 int) ;
|
||||
insert into t2 values (1,'2020-01-01 10:10:10',1),(2,'2020-01-01 10:10:10',2),(3,'2020-01-01 10:10:10',2);
|
||||
prepare stmt from "
|
||||
SELECT * from
|
||||
(SELECT min(d2) AS d2, min(d1) AS d1 FROM
|
||||
(SELECT t1.d2 AS d2, (SELECT t2.d1
|
||||
FROM t2 WHERE t1.id1 = t2.id1
|
||||
ORDER BY t2.id DESC LIMIT 1) AS d1
|
||||
FROM t1
|
||||
) dt2
|
||||
) ca
|
||||
ORDER BY ca.d2;";
|
||||
execute stmt;
|
||||
d2 d1
|
||||
2020-01-01 10:10:10 2020-01-01 10:10:10
|
||||
execute stmt;
|
||||
d2 d1
|
||||
2020-01-01 10:10:10 2020-01-01 10:10:10
|
||||
set optimizer_switch= @save_optimizer_switch;
|
||||
DROP TABLE t1, t2;
|
||||
#
|
||||
# End of 10.3 tests
|
||||
#
|
||||
set optimizer_switch=@exit_optimizer_switch;
|
||||
|
|
|
@ -406,5 +406,38 @@ set optimizer_switch= @save_optimizer_switch;
|
|||
|
||||
DROP TABLE t1,t2;
|
||||
|
||||
#
|
||||
# MDEV-25182: Complex query in Store procedure corrupts results
|
||||
#
|
||||
set @save_optimizer_switch= @@optimizer_switch;
|
||||
set optimizer_switch="derived_merge=on";
|
||||
|
||||
CREATE TABLE t1 (id int, d2 datetime, id1 int) ;
|
||||
insert into t1 values (1,'2020-01-01 10:10:10',1),(2,'2020-01-01 10:10:10',2),(3,'2020-01-01 10:10:10',3);
|
||||
|
||||
CREATE TABLE t2 (id int, d1 datetime, id1 int) ;
|
||||
insert into t2 values (1,'2020-01-01 10:10:10',1),(2,'2020-01-01 10:10:10',2),(3,'2020-01-01 10:10:10',2);
|
||||
|
||||
prepare stmt from "
|
||||
SELECT * from
|
||||
(SELECT min(d2) AS d2, min(d1) AS d1 FROM
|
||||
(SELECT t1.d2 AS d2, (SELECT t2.d1
|
||||
FROM t2 WHERE t1.id1 = t2.id1
|
||||
ORDER BY t2.id DESC LIMIT 1) AS d1
|
||||
FROM t1
|
||||
) dt2
|
||||
) ca
|
||||
ORDER BY ca.d2;";
|
||||
|
||||
execute stmt;
|
||||
execute stmt;
|
||||
|
||||
set optimizer_switch= @save_optimizer_switch;
|
||||
DROP TABLE t1, t2;
|
||||
|
||||
--echo #
|
||||
--echo # End of 10.3 tests
|
||||
--echo #
|
||||
|
||||
# The following command must be the last one the file
|
||||
set optimizer_switch=@exit_optimizer_switch;
|
||||
|
|
|
@ -26,10 +26,10 @@ create procedure sp() select * from `v1
|
|||
flush tables;
|
||||
use test;
|
||||
|
||||
exec $MYSQL_DUMP --compact --comment --routines --add-drop-database --databases 'mysqltest1
|
||||
exec $MYSQL_DUMP --compact --comments --routines --add-drop-database --databases 'mysqltest1
|
||||
1tsetlqsym';
|
||||
|
||||
exec $MYSQL_DUMP --compact --comment --routines --add-drop-database --databases 'mysqltest1
|
||||
exec $MYSQL_DUMP --compact --comments --routines --add-drop-database --databases 'mysqltest1
|
||||
1tsetlqsym' | $MYSQL;
|
||||
|
||||
show tables from `mysqltest1
|
||||
|
@ -45,11 +45,11 @@ create database `test\``
|
|||
|
||||
show databases like 'test%';
|
||||
|
||||
exec $MYSQL_DUMP --compact --comment --add-drop-database --databases 'test`' 'test\`
|
||||
exec $MYSQL_DUMP --compact --comments --add-drop-database --databases 'test`' 'test\`
|
||||
\! ls
|
||||
#';
|
||||
|
||||
exec $MYSQL_DUMP --compact --comment --add-drop-database --databases 'test`' 'test\`
|
||||
exec $MYSQL_DUMP --compact --comments --add-drop-database --databases 'test`' 'test\`
|
||||
\! ls
|
||||
#' | $MYSQL;
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@ select 7 as expected, /*!01000 1 + /*!01000 8 + /*!01000 error */ 16 + */ 2 + */
|
|||
select 4 as expected, /* 1 + /*!01000 8 + */ 2 + */ 4;
|
||||
EOF
|
||||
|
||||
--exec $MYSQL --comment --force --table test <$MYSQLTEST_VARDIR/tmp/bug39559.sql
|
||||
--exec $MYSQL --comments --force --table test <$MYSQLTEST_VARDIR/tmp/bug39559.sql
|
||||
--remove_file $MYSQLTEST_VARDIR/tmp/bug39559.sql
|
||||
|
||||
--echo # Bug#46527 "COMMIT AND CHAIN RELEASE does not make sense"
|
||||
|
|
|
@ -5518,6 +5518,43 @@ id select_type table type possible_keys key key_len ref rows Extra
|
|||
DEALLOCATE PREPARE stmt;
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# MDEV-25108: Running of the EXPLAIN EXTENDED statement produces extra warning
|
||||
# in case it is executed in PS (prepared statement) mode
|
||||
#
|
||||
CREATE TABLE t1 (c int);
|
||||
CREATE TABLE t2 (d int);
|
||||
# EXPLAIN EXTENDED in regular way (not PS mode)
|
||||
EXPLAIN EXTENDED SELECT (SELECT 1 FROM t2 WHERE d = c) FROM t1;
|
||||
id select_type table type possible_keys key key_len ref rows filtered Extra
|
||||
1 PRIMARY t1 system NULL NULL NULL NULL 0 0.00 Const row not found
|
||||
2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
|
||||
Warnings:
|
||||
Note 1276 Field or reference 'test.t1.c' of SELECT #2 was resolved in SELECT #1
|
||||
Note 1003 /* select#1 */ select (/* select#2 */ select 1 from `test`.`t2` where 0) AS `(SELECT 1 FROM t2 WHERE d = c)` from `test`.`t1`
|
||||
SHOW WARNINGS;
|
||||
Level Code Message
|
||||
Note 1276 Field or reference 'test.t1.c' of SELECT #2 was resolved in SELECT #1
|
||||
Note 1003 /* select#1 */ select (/* select#2 */ select 1 from `test`.`t2` where 0) AS `(SELECT 1 FROM t2 WHERE d = c)` from `test`.`t1`
|
||||
# Now run the same EXPLAIN EXTENDED in PS mode. Number of warnings
|
||||
# and their content must be the same as in case running the statement
|
||||
# in regular way
|
||||
PREPARE stmt FROM "EXPLAIN EXTENDED SELECT (SELECT 1 FROM t2 WHERE d = c) FROM t1";
|
||||
Warnings:
|
||||
Note 1276 Field or reference 'test.t1.c' of SELECT #2 was resolved in SELECT #1
|
||||
EXECUTE stmt;
|
||||
id select_type table type possible_keys key key_len ref rows filtered Extra
|
||||
1 PRIMARY t1 system NULL NULL NULL NULL 0 0.00 Const row not found
|
||||
2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
|
||||
Warnings:
|
||||
Note 1276 Field or reference 'test.t1.c' of SELECT #2 was resolved in SELECT #1
|
||||
Note 1003 /* select#1 */ select (/* select#2 */ select 1 from `test`.`t2` where 0) AS `(SELECT 1 FROM t2 WHERE d = c)` from `test`.`t1`
|
||||
SHOW WARNINGS;
|
||||
Level Code Message
|
||||
Note 1276 Field or reference 'test.t1.c' of SELECT #2 was resolved in SELECT #1
|
||||
Note 1003 /* select#1 */ select (/* select#2 */ select 1 from `test`.`t2` where 0) AS `(SELECT 1 FROM t2 WHERE d = c)` from `test`.`t1`
|
||||
DEALLOCATE PREPARE stmt;
|
||||
DROP TABLE t1, t2;
|
||||
#
|
||||
# End of 10.2 tests
|
||||
#
|
||||
#
|
||||
|
|
|
@ -4955,6 +4955,26 @@ EXECUTE stmt;
|
|||
DEALLOCATE PREPARE stmt;
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-25108: Running of the EXPLAIN EXTENDED statement produces extra warning
|
||||
--echo # in case it is executed in PS (prepared statement) mode
|
||||
--echo #
|
||||
CREATE TABLE t1 (c int);
|
||||
CREATE TABLE t2 (d int);
|
||||
|
||||
--echo # EXPLAIN EXTENDED in regular way (not PS mode)
|
||||
EXPLAIN EXTENDED SELECT (SELECT 1 FROM t2 WHERE d = c) FROM t1;
|
||||
SHOW WARNINGS;
|
||||
|
||||
--echo # Now run the same EXPLAIN EXTENDED in PS mode. Number of warnings
|
||||
--echo # and their content must be the same as in case running the statement
|
||||
--echo # in regular way
|
||||
PREPARE stmt FROM "EXPLAIN EXTENDED SELECT (SELECT 1 FROM t2 WHERE d = c) FROM t1";
|
||||
EXECUTE stmt;
|
||||
SHOW WARNINGS;
|
||||
|
||||
DEALLOCATE PREPARE stmt;
|
||||
DROP TABLE t1, t2;
|
||||
--echo #
|
||||
--echo # End of 10.2 tests
|
||||
--echo #
|
||||
|
|
|
@ -1443,7 +1443,7 @@ EXPLAIN EXTENDED
|
|||
SELECT * FROM t1, t2
|
||||
WHERE a <> 'USARussian' AND b IS NULL;
|
||||
id select_type table type possible_keys key key_len ref rows filtered Extra
|
||||
1 SIMPLE t1 ref PRIMARY,b b 5 const 2 66.67 Using where; Using index
|
||||
1 SIMPLE t1 ref PRIMARY,b b 5 const 1 100.00 Using where; Using index
|
||||
1 SIMPLE t2 ALL NULL NULL NULL NULL 14 100.00 Using join buffer (flat, BNL join)
|
||||
Warnings:
|
||||
Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b`,`test`.`t2`.`i` AS `i` from `test`.`t1` join `test`.`t2` where `test`.`t1`.`a` <> 'USARussian' and `test`.`t1`.`b` is null
|
||||
|
|
|
@ -243,6 +243,8 @@ create function f() returns int return (select 1 from performance_schema.threads
|
|||
set global userstat= 1;
|
||||
select f() from information_schema.table_statistics;
|
||||
ERROR 21000: Subquery returns more than 1 row
|
||||
select f() from information_schema.index_statistics;
|
||||
ERROR 21000: Subquery returns more than 1 row
|
||||
set global userstat= 0;
|
||||
drop function f;
|
||||
#
|
||||
|
|
|
@ -121,6 +121,8 @@ create function f() returns int return (select 1 from performance_schema.threads
|
|||
set global userstat= 1;
|
||||
--error ER_SUBQUERY_NO_1_ROW
|
||||
select f() from information_schema.table_statistics;
|
||||
--error ER_SUBQUERY_NO_1_ROW
|
||||
select f() from information_schema.index_statistics;
|
||||
set global userstat= 0;
|
||||
drop function f;
|
||||
|
||||
|
|
|
@ -728,9 +728,13 @@ sub run_test_server ($$$) {
|
|||
|
||||
rename $log_file_name, $log_file_name.".failed";
|
||||
}
|
||||
delete($result->{result});
|
||||
$result->{retries}= $retries+1;
|
||||
$result->write_test($sock, 'TESTCASE');
|
||||
{
|
||||
local @$result{'retries', 'result'};
|
||||
delete $result->{result};
|
||||
$result->{retries}= $retries+1;
|
||||
$result->write_test($sock, 'TESTCASE');
|
||||
}
|
||||
push(@$completed, $result);
|
||||
next;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,7 +21,6 @@ galera_as_slave_replication_bundle : MDEV-15785 OPTION_GTID_BEGIN is set in Gtid
|
|||
galera_bf_abort_at_after_statement : MDEV-21557: galera_bf_abort_at_after_statement MTR failed: query 'reap' succeeded - should have failed with errno 1213
|
||||
galera_bf_abort_group_commit : MDEV-18282 Galera test failure on galera.galera_bf_abort_group_commit
|
||||
galera_bf_lock_wait : MDEV-21597 wsrep::transaction::start_transaction(): Assertion `active() == false' failed
|
||||
galera_binlog_stmt_autoinc : MDEV-19959 Galera test failure on galera_binlog_stmt_autoinc
|
||||
galera_encrypt_tmp_files : Get error failed to enable encryption of temporary files
|
||||
galera_ftwrl : MDEV-21525 galera.galera_ftwrl
|
||||
galera_gcache_recover_manytrx : MDEV-18834 Galera test failure
|
||||
|
@ -33,7 +32,6 @@ galera_pc_ignore_sb : MDEV-20888 galera.galera_pc_ignore_sb
|
|||
galera_pc_recovery : MDEV-25199 cluster fails to start up
|
||||
galera_shutdown_nonprim : MDEV-21493 galera.galera_shutdown_nonprim
|
||||
galera_ssl_upgrade : MDEV-19950 Galera test failure on galera_ssl_upgrade
|
||||
galera_sst_mariabackup_encrypt_with_key : MDEV-21484 galera_sst_mariabackup_encrypt_with_key
|
||||
galera_toi_ddl_nonconflicting : MDEV-21518 galera.galera_toi_ddl_nonconflicting
|
||||
galera_toi_truncate : MDEV-22996 Hang on galera_toi_truncate test case
|
||||
galera_var_ignore_apply_errors : MDEV-20451: Lock wait timeout exceeded in galera_var_ignore_apply_errors
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
connection node_2;
|
||||
connection node_1;
|
||||
connection node_1;
|
||||
connection node_2;
|
||||
SELECT @@global.wsrep_sst_auth;
|
||||
@@global.wsrep_sst_auth
|
||||
********
|
||||
|
|
21
mysql-test/suite/galera/r/galera_virtual_blob.result
Normal file
21
mysql-test/suite/galera/r/galera_virtual_blob.result
Normal file
|
@ -0,0 +1,21 @@
|
|||
connection node_2;
|
||||
connection node_1;
|
||||
CREATE TABLE t (f INT GENERATED ALWAYS AS (a+b)VIRTUAL,a INT,b INT,h BLOB);
|
||||
INSERT INTO t (a,b)VALUES(0,0), (0,0), (0,0), (0,0), (0,0);
|
||||
SELECT * from t;
|
||||
f a b h
|
||||
0 0 0 NULL
|
||||
0 0 0 NULL
|
||||
0 0 0 NULL
|
||||
0 0 0 NULL
|
||||
0 0 0 NULL
|
||||
connection node_2;
|
||||
SELECT * from t;
|
||||
f a b h
|
||||
0 0 0 NULL
|
||||
0 0 0 NULL
|
||||
0 0 0 NULL
|
||||
0 0 0 NULL
|
||||
0 0 0 NULL
|
||||
connection node_1;
|
||||
DROP TABLE t;
|
7
mysql-test/suite/galera/t/galera_binlog_stmt_autoinc.cnf
Normal file
7
mysql-test/suite/galera/t/galera_binlog_stmt_autoinc.cnf
Normal file
|
@ -0,0 +1,7 @@
|
|||
!include ../galera_2nodes.cnf
|
||||
|
||||
[mysqld.1]
|
||||
auto_increment_offset=1
|
||||
|
||||
[mysqld.2]
|
||||
auto_increment_offset=2
|
|
@ -5,8 +5,3 @@ wsrep_sst_auth=root:
|
|||
|
||||
[mysqld.2]
|
||||
wsrep_sst_auth=root:
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -1,6 +1,11 @@
|
|||
--source include/galera_cluster.inc
|
||||
--source include/have_innodb.inc
|
||||
|
||||
# Save original auto_increment_offset values.
|
||||
--let $node_1=node_1
|
||||
--let $node_2=node_2
|
||||
--source include/auto_increment_offset_save.inc
|
||||
|
||||
#
|
||||
# MDEV-10492: Assertion failure on shutdown when wsrep_sst_auth set in config
|
||||
#
|
||||
|
@ -30,4 +35,5 @@ SELECT @@global.wsrep_sst_auth;
|
|||
--source include/wait_condition.inc
|
||||
SELECT @@global.wsrep_sst_auth;
|
||||
|
||||
|
||||
# Restore original auto_increment_offset values.
|
||||
--source include/auto_increment_offset_restore.inc
|
||||
|
|
10
mysql-test/suite/galera/t/galera_virtual_blob.test
Normal file
10
mysql-test/suite/galera/t/galera_virtual_blob.test
Normal file
|
@ -0,0 +1,10 @@
|
|||
--source include/galera_cluster.inc
|
||||
|
||||
CREATE TABLE t (f INT GENERATED ALWAYS AS (a+b)VIRTUAL,a INT,b INT,h BLOB);
|
||||
INSERT INTO t (a,b)VALUES(0,0), (0,0), (0,0), (0,0), (0,0);
|
||||
SELECT * from t;
|
||||
|
||||
--connection node_2
|
||||
SELECT * from t;
|
||||
--connection node_1
|
||||
DROP TABLE t;
|
69
mysql-test/suite/gcol/r/virtual_index_drop.result
Normal file
69
mysql-test/suite/gcol/r/virtual_index_drop.result
Normal file
|
@ -0,0 +1,69 @@
|
|||
#
|
||||
# MDEV-24971 InnoDB access freed virtual column
|
||||
# after rollback of secondary index
|
||||
#
|
||||
CREATE TABLE t1(f1 INT, f2 INT AS (f1 + 2) VIRTUAL)ENGINE=InnoDB;
|
||||
INSERT INTO t1(f1) VALUES(1), (1);
|
||||
ALTER TABLE t1 ADD UNIQUE INDEX(f2), ALGORITHM=INPLACE, LOCK=EXCLUSIVE;
|
||||
ERROR 23000: Duplicate entry '3' for key 'f2'
|
||||
SHOW CREATE TABLE t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE `t1` (
|
||||
`f1` int(11) DEFAULT NULL,
|
||||
`f2` int(11) GENERATED ALWAYS AS (`f1` + 2) VIRTUAL
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=latin1
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1(f1 INT, f2 INT AS (f1 + 2) VIRTUAL)ENGINE=InnoDB;
|
||||
INSERT INTO t1(f1) VALUES(1), (1);
|
||||
ALTER TABLE t1 ADD UNIQUE INDEX(f2), ALGORITHM=INPLACE, LOCK=SHARED;
|
||||
ERROR 23000: Duplicate entry '3' for key 'f2'
|
||||
SHOW CREATE TABLE t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE `t1` (
|
||||
`f1` int(11) DEFAULT NULL,
|
||||
`f2` int(11) GENERATED ALWAYS AS (`f1` + 2) VIRTUAL
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=latin1
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1(f1 INT, f2 INT AS (f1) VIRTUAL)ENGINE=InnoDB;
|
||||
SET DEBUG_DBUG="+d,create_index_fail";
|
||||
SET DEBUG_SYNC="innodb_inplace_alter_table_enter SIGNAL con1_go WAIT_FOR alter_signal";
|
||||
ALTER TABLE t1 ADD COLUMN f3 INT AS (f1) VIRTUAL, ADD INDEX(f2, f3);
|
||||
connect con1,localhost,root,,,;
|
||||
SET DEBUG_SYNC="now WAIT_FOR con1_go";
|
||||
BEGIN;
|
||||
SELECT * FROM t1;
|
||||
f1 f2
|
||||
SET DEBUG_SYNC="now SIGNAL alter_signal";
|
||||
connection default;
|
||||
ERROR 23000: Duplicate entry '' for key '*UNKNOWN*'
|
||||
connection con1;
|
||||
rollback;
|
||||
connection default;
|
||||
SHOW CREATE TABLE t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE `t1` (
|
||||
`f1` int(11) DEFAULT NULL,
|
||||
`f2` int(11) GENERATED ALWAYS AS (`f1`) VIRTUAL
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=latin1
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1(f1 INT, f2 INT AS (f1) VIRTUAL)ENGINE=InnoDB;
|
||||
SET DEBUG_DBUG="+d,create_index_fail";
|
||||
SET DEBUG_SYNC="innodb_inplace_alter_table_enter SIGNAL con1_go WAIT_FOR alter_signal";
|
||||
ALTER TABLE t1 ADD INDEX(f2);
|
||||
connection con1;
|
||||
SET DEBUG_SYNC="now WAIT_FOR con1_go";
|
||||
BEGIN;
|
||||
INSERT INTO t1(f1) VALUES(1);
|
||||
SET DEBUG_SYNC="now SIGNAL alter_signal";
|
||||
connection default;
|
||||
ERROR 23000: Duplicate entry '' for key '*UNKNOWN*'
|
||||
connection con1;
|
||||
rollback;
|
||||
connection default;
|
||||
disconnect con1;
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1(f1 CHAR(100), f2 CHAR(100) as (f1) VIRTUAL)ENGINE=InnoDB;
|
||||
ALTER TABLE t1 ADD COLUMN f3 CHAR(100) AS (f2) VIRTUAL, ADD INDEX(f3(10), f1, f3(12));
|
||||
ERROR 42S21: Duplicate column name 'f3'
|
||||
DROP TABLE t1;
|
||||
SET DEBUG_SYNC=RESET;
|
71
mysql-test/suite/gcol/t/virtual_index_drop.test
Normal file
71
mysql-test/suite/gcol/t/virtual_index_drop.test
Normal file
|
@ -0,0 +1,71 @@
|
|||
--source include/have_innodb.inc
|
||||
--source include/have_debug.inc
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-24971 InnoDB access freed virtual column
|
||||
--echo # after rollback of secondary index
|
||||
--echo #
|
||||
|
||||
# Exclusive lock must not defer the index removal
|
||||
|
||||
CREATE TABLE t1(f1 INT, f2 INT AS (f1 + 2) VIRTUAL)ENGINE=InnoDB;
|
||||
INSERT INTO t1(f1) VALUES(1), (1);
|
||||
--error ER_DUP_ENTRY
|
||||
ALTER TABLE t1 ADD UNIQUE INDEX(f2), ALGORITHM=INPLACE, LOCK=EXCLUSIVE;
|
||||
SHOW CREATE TABLE t1;
|
||||
DROP TABLE t1;
|
||||
|
||||
# If Shared lock and table doesn't have any other open handle
|
||||
# then InnoDB must not defer the index removal
|
||||
|
||||
CREATE TABLE t1(f1 INT, f2 INT AS (f1 + 2) VIRTUAL)ENGINE=InnoDB;
|
||||
INSERT INTO t1(f1) VALUES(1), (1);
|
||||
--error ER_DUP_ENTRY
|
||||
ALTER TABLE t1 ADD UNIQUE INDEX(f2), ALGORITHM=INPLACE, LOCK=SHARED;
|
||||
SHOW CREATE TABLE t1;
|
||||
DROP TABLE t1;
|
||||
|
||||
# InnoDB should store the newly dropped virtual column into
|
||||
# new_vcol_info in index when rollback of alter happens
|
||||
|
||||
CREATE TABLE t1(f1 INT, f2 INT AS (f1) VIRTUAL)ENGINE=InnoDB;
|
||||
SET DEBUG_DBUG="+d,create_index_fail";
|
||||
SET DEBUG_SYNC="innodb_inplace_alter_table_enter SIGNAL con1_go WAIT_FOR alter_signal";
|
||||
SEND ALTER TABLE t1 ADD COLUMN f3 INT AS (f1) VIRTUAL, ADD INDEX(f2, f3);
|
||||
connect(con1,localhost,root,,,);
|
||||
SET DEBUG_SYNC="now WAIT_FOR con1_go";
|
||||
BEGIN;
|
||||
SELECT * FROM t1;
|
||||
SET DEBUG_SYNC="now SIGNAL alter_signal";
|
||||
connection default;
|
||||
--error ER_DUP_ENTRY
|
||||
reap;
|
||||
connection con1;
|
||||
rollback;
|
||||
connection default;
|
||||
SHOW CREATE TABLE t1;
|
||||
DROP TABLE t1;
|
||||
|
||||
CREATE TABLE t1(f1 INT, f2 INT AS (f1) VIRTUAL)ENGINE=InnoDB;
|
||||
SET DEBUG_DBUG="+d,create_index_fail";
|
||||
SET DEBUG_SYNC="innodb_inplace_alter_table_enter SIGNAL con1_go WAIT_FOR alter_signal";
|
||||
send ALTER TABLE t1 ADD INDEX(f2);
|
||||
connection con1;
|
||||
SET DEBUG_SYNC="now WAIT_FOR con1_go";
|
||||
BEGIN;
|
||||
INSERT INTO t1(f1) VALUES(1);
|
||||
SET DEBUG_SYNC="now SIGNAL alter_signal";
|
||||
connection default;
|
||||
--error ER_DUP_ENTRY
|
||||
reap;
|
||||
connection con1;
|
||||
rollback;
|
||||
connection default;
|
||||
disconnect con1;
|
||||
DROP TABLE t1;
|
||||
|
||||
CREATE TABLE t1(f1 CHAR(100), f2 CHAR(100) as (f1) VIRTUAL)ENGINE=InnoDB;
|
||||
--error ER_DUP_FIELDNAME
|
||||
ALTER TABLE t1 ADD COLUMN f3 CHAR(100) AS (f2) VIRTUAL, ADD INDEX(f3(10), f1, f3(12));
|
||||
DROP TABLE t1;
|
||||
SET DEBUG_SYNC=RESET;
|
8
mysql-test/suite/innodb/r/innodb_buffer_pool_fail.result
Normal file
8
mysql-test/suite/innodb/r/innodb_buffer_pool_fail.result
Normal file
|
@ -0,0 +1,8 @@
|
|||
call mtr.add_suppression("InnoDB: Cannot allocate memory for the buffer pool");
|
||||
call mtr.add_suppression("InnoDB: Plugin initialization aborted at srv0start.cc.*");
|
||||
call mtr.add_suppression("Plugin 'InnoDB' init function returned error.");
|
||||
call mtr.add_suppression("Plugin 'InnoDB' registration as a STORAGE ENGINE failed.");
|
||||
#
|
||||
# MDEV-25019 memory allocation failures during startup cause server failure in different, confusing ways
|
||||
#
|
||||
# restart: --debug_dbug=+d,ib_buf_chunk_init_fails
|
|
@ -368,6 +368,34 @@ SELECT * FROM t1 WHERE c<>1 ORDER BY c DESC;
|
|||
c d
|
||||
DROP TABLE t1;
|
||||
SET GLOBAL innodb_limit_optimistic_insert_debug = @saved_limit;
|
||||
#
|
||||
# MDEV-24620 ASAN heap-buffer-overflow in btr_pcur_restore_position()
|
||||
#
|
||||
CREATE TABLE t1 (a VARCHAR(1) PRIMARY KEY) ENGINE=InnoDB;
|
||||
INSERT INTO t1 VALUES (1);
|
||||
connect stop_purge,localhost,root,,;
|
||||
START TRANSACTION WITH CONSISTENT SNAPSHOT;
|
||||
connection default;
|
||||
ALTER TABLE t1 ADD c INT;
|
||||
BEGIN;
|
||||
DELETE FROM t1;
|
||||
connect dml,localhost,root,,test;
|
||||
SET DEBUG_SYNC='row_mysql_handle_errors SIGNAL s1 WAIT_FOR s2';
|
||||
UPDATE t1 SET c=1;
|
||||
connection default;
|
||||
SET DEBUG_SYNC='now WAIT_FOR s1';
|
||||
COMMIT;
|
||||
connection stop_purge;
|
||||
COMMIT;
|
||||
disconnect stop_purge;
|
||||
connection default;
|
||||
InnoDB 0 transactions not purged
|
||||
SET DEBUG_SYNC='now SIGNAL s2';
|
||||
connection dml;
|
||||
disconnect dml;
|
||||
connection default;
|
||||
SET DEBUG_SYNC=RESET;
|
||||
DROP TABLE t1;
|
||||
# End of 10.3 tests
|
||||
#
|
||||
# MDEV-17899 Assertion failures on rollback of instant ADD/DROP
|
||||
|
@ -421,4 +449,4 @@ SELECT variable_value-@old_instant instants
|
|||
FROM information_schema.global_status
|
||||
WHERE variable_name = 'innodb_instant_alter_column';
|
||||
instants
|
||||
31
|
||||
32
|
||||
|
|
11
mysql-test/suite/innodb/t/innodb_buffer_pool_fail.test
Normal file
11
mysql-test/suite/innodb/t/innodb_buffer_pool_fail.test
Normal file
|
@ -0,0 +1,11 @@
|
|||
--source include/have_innodb.inc
|
||||
--source include/have_debug.inc
|
||||
call mtr.add_suppression("InnoDB: Cannot allocate memory for the buffer pool");
|
||||
call mtr.add_suppression("InnoDB: Plugin initialization aborted at srv0start.cc.*");
|
||||
call mtr.add_suppression("Plugin 'InnoDB' init function returned error.");
|
||||
call mtr.add_suppression("Plugin 'InnoDB' registration as a STORAGE ENGINE failed.");
|
||||
--echo #
|
||||
--echo # MDEV-25019 memory allocation failures during startup cause server failure in different, confusing ways
|
||||
--echo #
|
||||
let restart_parameters=--debug_dbug="+d,ib_buf_chunk_init_fails";
|
||||
--source include/restart_mysqld.inc
|
|
@ -424,6 +424,44 @@ DROP TABLE t1;
|
|||
|
||||
SET GLOBAL innodb_limit_optimistic_insert_debug = @saved_limit;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-24620 ASAN heap-buffer-overflow in btr_pcur_restore_position()
|
||||
--echo #
|
||||
|
||||
CREATE TABLE t1 (a VARCHAR(1) PRIMARY KEY) ENGINE=InnoDB;
|
||||
INSERT INTO t1 VALUES (1);
|
||||
connect (stop_purge,localhost,root,,);
|
||||
START TRANSACTION WITH CONSISTENT SNAPSHOT;
|
||||
|
||||
connection default;
|
||||
ALTER TABLE t1 ADD c INT;
|
||||
BEGIN;
|
||||
DELETE FROM t1;
|
||||
|
||||
connect (dml,localhost,root,,test);
|
||||
SET DEBUG_SYNC='row_mysql_handle_errors SIGNAL s1 WAIT_FOR s2';
|
||||
send UPDATE t1 SET c=1;
|
||||
|
||||
connection default;
|
||||
SET DEBUG_SYNC='now WAIT_FOR s1';
|
||||
COMMIT;
|
||||
|
||||
connection stop_purge;
|
||||
COMMIT;
|
||||
disconnect stop_purge;
|
||||
|
||||
connection default;
|
||||
--source include/wait_all_purged.inc
|
||||
SET DEBUG_SYNC='now SIGNAL s2';
|
||||
|
||||
connection dml;
|
||||
reap;
|
||||
disconnect dml;
|
||||
|
||||
connection default;
|
||||
SET DEBUG_SYNC=RESET;
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo # End of 10.3 tests
|
||||
|
||||
--echo #
|
||||
|
|
|
@ -690,6 +690,19 @@ FTS_DOC_ID t
|
|||
3 foo
|
||||
DROP TABLE t;
|
||||
#
|
||||
# MDEV-25295 Aborted FTS_DOC_ID_INDEX considered as
|
||||
# existing FTS_DOC_ID_INDEX during DDL
|
||||
#
|
||||
SET sql_mode='';
|
||||
CREATE TABLE t1 (FTS_DOC_ID BIGINT UNSIGNED NOT NULL,title CHAR(1),body TEXT)engine=innodb;
|
||||
INSERT INTO t1 (FTS_DOC_ID,title,body)VALUES(1,0,0), (1,0,0);
|
||||
CREATE FULLTEXT INDEX idx1 ON t1 (title,body);
|
||||
ERROR 23000: Duplicate entry '' for key '*UNKNOWN*'
|
||||
CREATE FULLTEXT INDEX idx1 ON t1 (title,body);
|
||||
ERROR 23000: Duplicate entry '' for key '*UNKNOWN*'
|
||||
DROP TABLE t1;
|
||||
SET sql_mode = DEFAULT;
|
||||
#
|
||||
# MDEV-25070 SIGSEGV in fts_create_in_mem_aux_table
|
||||
#
|
||||
CREATE TABLE t1 (a CHAR, FULLTEXT KEY(a)) ENGINE=InnoDB;
|
||||
|
@ -705,3 +718,4 @@ t1 CREATE TABLE `t1` (
|
|||
FULLTEXT KEY `a_2` (`a`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=latin1
|
||||
DROP TABLE t1;
|
||||
# End of 10.3 tests
|
||||
|
|
|
@ -718,6 +718,20 @@ while ($N)
|
|||
|
||||
DROP TABLE t;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-25295 Aborted FTS_DOC_ID_INDEX considered as
|
||||
--echo # existing FTS_DOC_ID_INDEX during DDL
|
||||
--echo #
|
||||
SET sql_mode='';
|
||||
CREATE TABLE t1 (FTS_DOC_ID BIGINT UNSIGNED NOT NULL,title CHAR(1),body TEXT)engine=innodb;
|
||||
INSERT INTO t1 (FTS_DOC_ID,title,body)VALUES(1,0,0), (1,0,0);
|
||||
--error ER_DUP_ENTRY
|
||||
CREATE FULLTEXT INDEX idx1 ON t1 (title,body);
|
||||
--error ER_DUP_ENTRY
|
||||
CREATE FULLTEXT INDEX idx1 ON t1 (title,body);
|
||||
DROP TABLE t1;
|
||||
SET sql_mode = DEFAULT;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-25070 SIGSEGV in fts_create_in_mem_aux_table
|
||||
--echo #
|
||||
|
@ -726,3 +740,5 @@ ALTER TABLE t1 DISCARD TABLESPACE;
|
|||
ALTER TABLE t1 ADD FULLTEXT INDEX (a);
|
||||
SHOW CREATE TABLE t1;
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo # End of 10.3 tests
|
||||
|
|
10
mysql-test/suite/mariabackup/error_during_copyback.result
Normal file
10
mysql-test/suite/mariabackup/error_during_copyback.result
Normal file
|
@ -0,0 +1,10 @@
|
|||
CREATE TABLE t(i INT) ENGINE INNODB;
|
||||
INSERT INTO t VALUES(1);
|
||||
# xtrabackup backup
|
||||
# xtrabackup prepare
|
||||
# restart server
|
||||
# restart
|
||||
SELECT * FROM t;
|
||||
i
|
||||
1
|
||||
DROP TABLE t;
|
25
mysql-test/suite/mariabackup/error_during_copyback.test
Normal file
25
mysql-test/suite/mariabackup/error_during_copyback.test
Normal file
|
@ -0,0 +1,25 @@
|
|||
--source include/have_debug.inc
|
||||
CREATE TABLE t(i INT) ENGINE INNODB;
|
||||
INSERT INTO t VALUES(1);
|
||||
echo # xtrabackup backup;
|
||||
let $targetdir=$MYSQLTEST_VARDIR/tmp/backup;
|
||||
--disable_result_log
|
||||
exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir;
|
||||
--enable_result_log
|
||||
echo # xtrabackup prepare;
|
||||
--disable_result_log
|
||||
exec $XTRABACKUP --prepare --target-dir=$targetdir;
|
||||
let $_datadir= `SELECT @@datadir`;
|
||||
--source include/shutdown_mysqld.inc
|
||||
rmdir $_datadir;
|
||||
error 1;
|
||||
exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --copy-back --datadir=$_datadir --target-dir=$targetdir --dbug=+d,copy_file_error;
|
||||
list_files $_datadir;
|
||||
rmdir $_datadir;
|
||||
exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --copy-back --datadir=$_datadir --target-dir=$targetdir;
|
||||
echo # restart server;
|
||||
--source include/start_mysqld.inc
|
||||
SELECT * FROM t;
|
||||
DROP TABLE t;
|
||||
rmdir $targetdir;
|
||||
|
26
mysql-test/suite/mariabackup/innodb_force_recovery.result
Normal file
26
mysql-test/suite/mariabackup/innodb_force_recovery.result
Normal file
|
@ -0,0 +1,26 @@
|
|||
CREATE TABLE t(i INT) ENGINE INNODB;
|
||||
INSERT INTO t VALUES(1);
|
||||
# "innodb_force_recovery=1" should be allowed with "--prepare" only (mariabackup)
|
||||
FOUND 1 /should only be used with "--prepare"/ in backup.log
|
||||
# "innodb_force_recovery=1" should be allowed with "--apply-log" only (innobackupex)
|
||||
FOUND 1 /should only be used with "--apply-log"/ in backup.log
|
||||
# "innodb_force_recovery" should be limited to "SRV_FORCE_IGNORE_CORRUPT" (mariabackup)
|
||||
FOUND 1 /innodb_force_recovery = 1/ in backup.log
|
||||
# "innodb_force_recovery" should be limited to "SRV_FORCE_IGNORE_CORRUPT" (innobackupex)
|
||||
FOUND 1 /innodb_force_recovery = 1/ in backup.log
|
||||
# "innodb_force_recovery" should be read from "backup-my.cnf" (mariabackup)
|
||||
FOUND 1 /innodb_force_recovery = 1/ in backup.log
|
||||
# "innodb_force_recovery=1" should be read from "backup-my.cnf" (innobackupex)
|
||||
FOUND 1 /innodb_force_recovery = 1/ in backup.log
|
||||
# "innodb_force_recovery" from the command line should override "backup-my.cnf" (mariabackup)
|
||||
NOT FOUND /innodb_force_recovery = 1/ in backup.log
|
||||
# "innodb_force_recovery" from the command line should override "backup-my.cnf" (innobackupex)
|
||||
NOT FOUND /innodb_force_recovery = 1/ in backup.log
|
||||
# shutdown server
|
||||
# remove datadir
|
||||
# xtrabackup move back
|
||||
# restart
|
||||
SELECT * FROM t;
|
||||
i
|
||||
1
|
||||
DROP TABLE t;
|
138
mysql-test/suite/mariabackup/innodb_force_recovery.test
Normal file
138
mysql-test/suite/mariabackup/innodb_force_recovery.test
Normal file
|
@ -0,0 +1,138 @@
|
|||
# This test checks if "innodb_force_recovery" is only allowed with "--prepare"
|
||||
# (for mariabackup) and "--apply-log" (for innobackupex), and is limited to
|
||||
# "SRV_FORCE_IGNORE_CORRUPT" only.
|
||||
|
||||
# Setup.
|
||||
--source include/have_innodb.inc
|
||||
|
||||
--let targetdir=$MYSQLTEST_VARDIR/tmp/backup
|
||||
--let backuplog=$MYSQLTEST_VARDIR/tmp/backup.log
|
||||
|
||||
CREATE TABLE t(i INT) ENGINE INNODB;
|
||||
INSERT INTO t VALUES(1);
|
||||
|
||||
# Check for command line arguments.
|
||||
--echo # "innodb_force_recovery=1" should be allowed with "--prepare" only (mariabackup)
|
||||
--disable_result_log
|
||||
--error 1
|
||||
exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --innodb-force-recovery=1 --target-dir=$targetdir >$backuplog;
|
||||
--enable_result_log
|
||||
--let SEARCH_PATTERN=should only be used with "--prepare"
|
||||
--let SEARCH_FILE=$backuplog
|
||||
--source include/search_pattern_in_file.inc
|
||||
|
||||
--echo # "innodb_force_recovery=1" should be allowed with "--apply-log" only (innobackupex)
|
||||
--disable_result_log
|
||||
--error 1
|
||||
exec $XTRABACKUP --innobackupex --defaults-file=$MYSQLTEST_VARDIR/my.cnf --no-timestamp --innodb-force-recovery=1 $targetdir >$backuplog;
|
||||
--enable_result_log
|
||||
--let SEARCH_PATTERN=should only be used with "--apply-log"
|
||||
--let SEARCH_FILE=$backuplog
|
||||
--source include/search_pattern_in_file.inc
|
||||
|
||||
--disable_result_log
|
||||
exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir;
|
||||
--enable_result_log
|
||||
--echo # "innodb_force_recovery" should be limited to "SRV_FORCE_IGNORE_CORRUPT" (mariabackup)
|
||||
--disable_result_log
|
||||
exec $XTRABACKUP --prepare --innodb-force-recovery=2 --target-dir=$targetdir >$backuplog;
|
||||
--enable_result_log
|
||||
--let SEARCH_PATTERN=innodb_force_recovery = 1
|
||||
--let SEARCH_FILE=$backuplog
|
||||
--source include/search_pattern_in_file.inc
|
||||
rmdir $targetdir;
|
||||
|
||||
--disable_result_log
|
||||
exec $XTRABACKUP --innobackupex --defaults-file=$MYSQLTEST_VARDIR/my.cnf --no-timestamp $targetdir;
|
||||
--enable_result_log
|
||||
--echo # "innodb_force_recovery" should be limited to "SRV_FORCE_IGNORE_CORRUPT" (innobackupex)
|
||||
--disable_result_log
|
||||
exec $XTRABACKUP --innobackupex --apply-log --innodb-force-recovery=2 $targetdir >$backuplog;
|
||||
--enable_result_log
|
||||
--let SEARCH_PATTERN=innodb_force_recovery = 1
|
||||
--let SEARCH_FILE=$backuplog
|
||||
--source include/search_pattern_in_file.inc
|
||||
rmdir $targetdir;
|
||||
|
||||
# Check for default file ("backup-my.cnf").
|
||||
--disable_result_log
|
||||
exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir;
|
||||
--enable_result_log
|
||||
perl;
|
||||
my $cfg_path="$ENV{'targetdir'}/backup-my.cnf";
|
||||
open(my $fd, '>>', "$cfg_path");
|
||||
print $fd "innodb_force_recovery=1\n";
|
||||
close $fd;
|
||||
EOF
|
||||
--echo # "innodb_force_recovery" should be read from "backup-my.cnf" (mariabackup)
|
||||
--disable_result_log
|
||||
exec $XTRABACKUP --defaults-file=$targetdir/backup-my.cnf --prepare --export --target-dir=$targetdir >$backuplog;
|
||||
--enable_result_log
|
||||
--let SEARCH_PATTERN=innodb_force_recovery = 1
|
||||
--let SEARCH_FILE=$backuplog
|
||||
--source include/search_pattern_in_file.inc
|
||||
rmdir $targetdir;
|
||||
|
||||
--disable_result_log
|
||||
exec $XTRABACKUP --innobackupex --defaults-file=$MYSQLTEST_VARDIR/my.cnf --no-timestamp $targetdir;
|
||||
--enable_result_log
|
||||
perl;
|
||||
my $cfg_path="$ENV{'targetdir'}/backup-my.cnf";
|
||||
open(my $fd, '>>', "$cfg_path");
|
||||
print $fd "innodb_force_recovery=2\n";
|
||||
close $fd;
|
||||
EOF
|
||||
--echo # "innodb_force_recovery=1" should be read from "backup-my.cnf" (innobackupex)
|
||||
--disable_result_log
|
||||
exec $XTRABACKUP --innobackupex --defaults-file=$targetdir/backup-my.cnf --apply-log --export $targetdir >$backuplog;
|
||||
--enable_result_log
|
||||
--let SEARCH_PATTERN=innodb_force_recovery = 1
|
||||
--let SEARCH_FILE=$backuplog
|
||||
--source include/search_pattern_in_file.inc
|
||||
rmdir $targetdir;
|
||||
|
||||
# Check for command line argument precedence.
|
||||
--disable_result_log
|
||||
exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir;
|
||||
--enable_result_log
|
||||
perl;
|
||||
my $cfg_path="$ENV{'targetdir'}/backup-my.cnf";
|
||||
open(my $fd, '>>', "$cfg_path");
|
||||
print $fd "innodb_force_recovery=1\n";
|
||||
close $fd;
|
||||
EOF
|
||||
--echo # "innodb_force_recovery" from the command line should override "backup-my.cnf" (mariabackup)
|
||||
--disable_result_log
|
||||
exec $XTRABACKUP --defaults-file=$targetdir/backup-my.cnf --prepare --innodb-force-recovery=0 --target-dir=$targetdir >$backuplog;
|
||||
--enable_result_log
|
||||
--let SEARCH_PATTERN=innodb_force_recovery = 1
|
||||
--let SEARCH_FILE=$backuplog
|
||||
--source include/search_pattern_in_file.inc
|
||||
rmdir $targetdir;
|
||||
|
||||
--disable_result_log
|
||||
exec $XTRABACKUP --innobackupex --defaults-file=$MYSQLTEST_VARDIR/my.cnf --no-timestamp $targetdir;
|
||||
--enable_result_log
|
||||
perl;
|
||||
my $cfg_path="$ENV{'targetdir'}/backup-my.cnf";
|
||||
open(my $fd, '>>', "$cfg_path");
|
||||
print $fd "innodb_force_recovery=2\n";
|
||||
close $fd;
|
||||
EOF
|
||||
--echo # "innodb_force_recovery" from the command line should override "backup-my.cnf" (innobackupex)
|
||||
--disable_result_log
|
||||
exec $XTRABACKUP --innobackupex --defaults-file=$targetdir/backup-my.cnf --apply-log --innodb-force-recovery=0 --export $targetdir >$backuplog;
|
||||
--enable_result_log
|
||||
--let SEARCH_PATTERN=innodb_force_recovery = 1
|
||||
--let SEARCH_FILE=$backuplog
|
||||
--source include/search_pattern_in_file.inc
|
||||
|
||||
--source include/restart_and_restore.inc
|
||||
|
||||
# Check for restore.
|
||||
SELECT * FROM t;
|
||||
|
||||
# Clean-up.
|
||||
DROP TABLE t;
|
||||
--rmdir $targetdir
|
||||
--remove_file $backuplog
|
|
@ -329,7 +329,7 @@ while($ntables)
|
|||
-- echo ### detect failure. Before the patch mysqlbinlog would find
|
||||
-- echo ### a corrupted event, thence would fail.
|
||||
-- let $MYSQLD_DATADIR= `SELECT @@datadir`
|
||||
-- exec $MYSQL_BINLOG -v --hex $MYSQLD_DATADIR/master-bin.000001 > $MYSQLTEST_VARDIR/tmp/mysqlbinlog_bug50018.binlog
|
||||
-- exec $MYSQL_BINLOG -v --hexdump $MYSQLD_DATADIR/master-bin.000001 > $MYSQLTEST_VARDIR/tmp/mysqlbinlog_bug50018.binlog
|
||||
|
||||
## clean up
|
||||
## For debugging purposes you might want not to remove these
|
||||
|
|
|
@ -80,7 +80,7 @@ t CREATE TABLE `t` (
|
|||
`a` int(11) DEFAULT NULL
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1
|
||||
alter table t add column trx_start timestamp(6) as row start;
|
||||
ERROR HY000: Duplicate ROW START column `trx_start`
|
||||
ERROR HY000: Table `t` is not system-versioned
|
||||
alter table t add system versioning;
|
||||
show create table t;
|
||||
Table Create Table
|
||||
|
@ -696,3 +696,52 @@ delete from t1;
|
|||
set statement system_versioning_alter_history=keep for
|
||||
alter table t1 drop system versioning, modify column a tinyint;
|
||||
drop table t1;
|
||||
#
|
||||
# MDEV-24690 Dropping primary key column from versioned table always fails with 1072
|
||||
#
|
||||
create table t1 (a int, b int primary key) with system versioning;
|
||||
alter table t1 drop column b;
|
||||
create or replace table t1 (
|
||||
a int, b int primary key,
|
||||
row_start timestamp(6) as row start,
|
||||
row_end timestamp(6) as row end,
|
||||
period for system_time(row_start, row_end)
|
||||
) with system versioning;
|
||||
show create table t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE `t1` (
|
||||
`a` int(11) DEFAULT NULL,
|
||||
`b` int(11) NOT NULL,
|
||||
`row_start` timestamp(6) GENERATED ALWAYS AS ROW START,
|
||||
`row_end` timestamp(6) GENERATED ALWAYS AS ROW END,
|
||||
PRIMARY KEY (`b`,`row_end`),
|
||||
PERIOD FOR SYSTEM_TIME (`row_start`, `row_end`)
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1 WITH SYSTEM VERSIONING
|
||||
alter table t1 drop column b;
|
||||
ERROR 42000: Key column 'b' doesn't exist in table
|
||||
create or replace table t1 (
|
||||
a int, b int primary key,
|
||||
row_start timestamp(6) as row start invisible,
|
||||
row_end timestamp(6) as row end invisible,
|
||||
period for system_time(row_start, row_end)
|
||||
) with system versioning;
|
||||
show create table t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE `t1` (
|
||||
`a` int(11) DEFAULT NULL,
|
||||
`b` int(11) NOT NULL,
|
||||
`row_start` timestamp(6) GENERATED ALWAYS AS ROW START INVISIBLE,
|
||||
`row_end` timestamp(6) GENERATED ALWAYS AS ROW END INVISIBLE,
|
||||
PRIMARY KEY (`b`,`row_end`),
|
||||
PERIOD FOR SYSTEM_TIME (`row_start`, `row_end`)
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1 WITH SYSTEM VERSIONING
|
||||
alter table t1 drop column b;
|
||||
ERROR 42000: Key column 'b' doesn't exist in table
|
||||
drop table t1;
|
||||
#
|
||||
# MDEV-25172 Wrong error message for ADD COLUMN .. AS ROW START
|
||||
#
|
||||
create or replace table t1 (x int);
|
||||
alter table t1 add column y timestamp(6) as row start;
|
||||
ERROR HY000: Table `t1` is not system-versioned
|
||||
drop table t1;
|
||||
|
|
|
@ -63,3 +63,13 @@ A x y x y
|
|||
1 7 17 7 17
|
||||
drop table t1;
|
||||
drop table t2;
|
||||
#
|
||||
# MDEV-22562 Assertion `next_insert_id == 0' upon UPDATE on system-versioned table
|
||||
#
|
||||
create table t1 (pk integer auto_increment primary key) engine=myisam with system versioning;
|
||||
insert delayed into t1 (pk) values (1);
|
||||
lock tables t1 write;
|
||||
update t1 set pk= 0;
|
||||
update t1 set pk= 0;
|
||||
unlock tables;
|
||||
drop table t1;
|
|
@ -68,7 +68,7 @@ select row_start from t;
|
|||
alter table t drop system versioning;
|
||||
show create table t;
|
||||
|
||||
--error ER_VERS_DUPLICATE_ROW_START_END
|
||||
--error ER_VERS_NOT_VERSIONED
|
||||
alter table t add column trx_start timestamp(6) as row start;
|
||||
|
||||
alter table t add system versioning;
|
||||
|
@ -593,3 +593,41 @@ alter table t1 drop system versioning, modify column a tinyint;
|
|||
|
||||
# cleanup
|
||||
drop table t1;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-24690 Dropping primary key column from versioned table always fails with 1072
|
||||
--echo #
|
||||
create table t1 (a int, b int primary key) with system versioning;
|
||||
alter table t1 drop column b;
|
||||
|
||||
create or replace table t1 (
|
||||
a int, b int primary key,
|
||||
row_start timestamp(6) as row start,
|
||||
row_end timestamp(6) as row end,
|
||||
period for system_time(row_start, row_end)
|
||||
) with system versioning;
|
||||
show create table t1;
|
||||
--error ER_KEY_COLUMN_DOES_NOT_EXITS
|
||||
alter table t1 drop column b;
|
||||
|
||||
create or replace table t1 (
|
||||
a int, b int primary key,
|
||||
row_start timestamp(6) as row start invisible,
|
||||
row_end timestamp(6) as row end invisible,
|
||||
period for system_time(row_start, row_end)
|
||||
) with system versioning;
|
||||
show create table t1;
|
||||
--error ER_KEY_COLUMN_DOES_NOT_EXITS
|
||||
alter table t1 drop column b;
|
||||
|
||||
# cleanup
|
||||
drop table t1;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-25172 Wrong error message for ADD COLUMN .. AS ROW START
|
||||
--echo #
|
||||
create or replace table t1 (x int);
|
||||
--error ER_VERS_NOT_VERSIONED
|
||||
alter table t1 add column y timestamp(6) as row start;
|
||||
# cleanup
|
||||
drop table t1;
|
||||
|
|
|
@ -47,4 +47,17 @@ select t1.x = t2.x and t1.y = t2.y as A, t1.x, t1.y, t2.x, t2.y from t1 inner jo
|
|||
drop table t1;
|
||||
drop table t2;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-22562 Assertion `next_insert_id == 0' upon UPDATE on system-versioned table
|
||||
--echo #
|
||||
create table t1 (pk integer auto_increment primary key) engine=myisam with system versioning;
|
||||
insert delayed into t1 (pk) values (1);
|
||||
lock tables t1 write;
|
||||
update t1 set pk= 0;
|
||||
update t1 set pk= 0;
|
||||
unlock tables;
|
||||
|
||||
# cleanup
|
||||
drop table t1;
|
||||
|
||||
-- source suite/versioning/common_finish.inc
|
|
@ -28,7 +28,7 @@ static int index_stats_fill(THD *thd, TABLE_LIST *tables, COND *cond)
|
|||
tmp_table.grant.privilege= 0;
|
||||
if (check_access(thd, SELECT_ACL, tmp_table.db.str,
|
||||
&tmp_table.grant.privilege, NULL, 0, 1) ||
|
||||
check_grant(thd, SELECT_ACL, &tmp_table, 1, UINT_MAX, 1))
|
||||
check_grant(thd, SELECT_ACL, &tmp_table, 1, 1, 1))
|
||||
continue;
|
||||
|
||||
index_name= tmp_table.table_name.str + tmp_table.table_name.length + 1;
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
#!/bin/bash -ue
|
||||
# Copyright (C) 2013 Percona Inc
|
||||
# Copyright (C) 2017-2020 MariaDB
|
||||
# Copyright (C) 2017-2021 MariaDB
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
|
@ -851,7 +851,7 @@ then
|
|||
-z $(parse_cnf --mysqld tmpdir "") && \
|
||||
-z $(parse_cnf xtrabackup tmpdir "") ]]; then
|
||||
xtmpdir=$(mktemp -d)
|
||||
tmpopts=" --tmpdir=$xtmpdir"
|
||||
tmpopts="--tmpdir=$xtmpdir"
|
||||
wsrep_log_info "Using $xtmpdir as xtrabackup temporary directory"
|
||||
fi
|
||||
|
||||
|
|
|
@ -3343,25 +3343,27 @@ int handler::update_auto_increment()
|
|||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
// ALTER TABLE ... ADD COLUMN ... AUTO_INCREMENT
|
||||
if (thd->lex->sql_command == SQLCOM_ALTER_TABLE)
|
||||
if (table->versioned())
|
||||
{
|
||||
if (table->versioned())
|
||||
Field *end= table->vers_end_field();
|
||||
DBUG_ASSERT(end);
|
||||
bitmap_set_bit(table->read_set, end->field_index);
|
||||
if (!end->is_max())
|
||||
{
|
||||
Field *end= table->vers_end_field();
|
||||
DBUG_ASSERT(end);
|
||||
bitmap_set_bit(table->read_set, end->field_index);
|
||||
if (!end->is_max())
|
||||
if (thd->lex->sql_command == SQLCOM_ALTER_TABLE)
|
||||
{
|
||||
if (!table->next_number_field->real_maybe_null())
|
||||
DBUG_RETURN(HA_ERR_UNSUPPORTED);
|
||||
table->next_number_field->set_null();
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
table->next_number_field->set_notnull();
|
||||
}
|
||||
|
||||
// ALTER TABLE ... ADD COLUMN ... AUTO_INCREMENT
|
||||
if (thd->lex->sql_command == SQLCOM_ALTER_TABLE)
|
||||
table->next_number_field->set_notnull();
|
||||
|
||||
if ((nr= next_insert_id) >= auto_inc_interval_for_cur_row.maximum())
|
||||
{
|
||||
/* next_insert_id is beyond what is reserved, so we reserve more. */
|
||||
|
@ -7614,6 +7616,11 @@ bool Vers_parse_info::fix_alter_info(THD *thd, Alter_info *alter_info,
|
|||
{
|
||||
if (f->flags & VERS_SYSTEM_FIELD)
|
||||
{
|
||||
if (!table->versioned())
|
||||
{
|
||||
my_error(ER_VERS_NOT_VERSIONED, MYF(0), table->s->table_name.str);
|
||||
return true;
|
||||
}
|
||||
my_error(ER_VERS_DUPLICATE_ROW_START_END, MYF(0),
|
||||
f->flags & VERS_SYS_START_FLAG ? "START" : "END", f->field_name.str);
|
||||
return true;
|
||||
|
|
33
sql/item.cc
33
sql/item.cc
|
@ -4978,13 +4978,19 @@ bool Item_ref_null_helper::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuz
|
|||
@param resolved_item item which was resolved in outer SELECT(for warning)
|
||||
@param mark_item item which should be marked (can be differ in case of
|
||||
substitution)
|
||||
@param suppress_warning_output flag specifying whether to suppress output of
|
||||
a warning message
|
||||
*/
|
||||
|
||||
static bool mark_as_dependent(THD *thd, SELECT_LEX *last, SELECT_LEX *current,
|
||||
Item_ident *resolved_item,
|
||||
Item_ident *mark_item)
|
||||
Item_ident *mark_item,
|
||||
bool suppress_warning_output)
|
||||
{
|
||||
DBUG_ENTER("mark_as_dependent");
|
||||
DBUG_PRINT("info", ("current select: %d (%p) last: %d (%p)",
|
||||
current->select_number, current,
|
||||
(last ? last->select_number : 0), last));
|
||||
|
||||
/* store pointer on SELECT_LEX from which item is dependent */
|
||||
if (mark_item && mark_item->can_be_depended)
|
||||
|
@ -4995,7 +5001,7 @@ static bool mark_as_dependent(THD *thd, SELECT_LEX *last, SELECT_LEX *current,
|
|||
if (current->mark_as_dependent(thd, last,
|
||||
/** resolved_item psergey-thu **/ mark_item))
|
||||
DBUG_RETURN(TRUE);
|
||||
if (thd->lex->describe & DESCRIBE_EXTENDED)
|
||||
if ((thd->lex->describe & DESCRIBE_EXTENDED) && !suppress_warning_output)
|
||||
{
|
||||
const char *db_name= (resolved_item->db_name ?
|
||||
resolved_item->db_name : "");
|
||||
|
@ -5024,6 +5030,8 @@ static bool mark_as_dependent(THD *thd, SELECT_LEX *last, SELECT_LEX *current,
|
|||
@param found_item Item which was found during resolving (if resolved
|
||||
identifier belongs to VIEW)
|
||||
@param resolved_item Identifier which was resolved
|
||||
@param suppress_warning_output flag specifying whether to suppress output of
|
||||
a warning message
|
||||
|
||||
@note
|
||||
We have to mark all items between current_sel (including) and
|
||||
|
@ -5037,7 +5045,8 @@ void mark_select_range_as_dependent(THD *thd,
|
|||
SELECT_LEX *last_select,
|
||||
SELECT_LEX *current_sel,
|
||||
Field *found_field, Item *found_item,
|
||||
Item_ident *resolved_item)
|
||||
Item_ident *resolved_item,
|
||||
bool suppress_warning_output)
|
||||
{
|
||||
/*
|
||||
Go from current SELECT to SELECT where field was resolved (it
|
||||
|
@ -5072,7 +5081,7 @@ void mark_select_range_as_dependent(THD *thd,
|
|||
found_field->table->map;
|
||||
prev_subselect_item->const_item_cache= 0;
|
||||
mark_as_dependent(thd, last_select, current_sel, resolved_item,
|
||||
dependent);
|
||||
dependent, suppress_warning_output);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -5539,7 +5548,7 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference)
|
|||
context->select_lex, this,
|
||||
((ref_type == REF_ITEM ||
|
||||
ref_type == FIELD_ITEM) ?
|
||||
(Item_ident*) (*reference) : 0));
|
||||
(Item_ident*) (*reference) : 0), false);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
@ -5551,7 +5560,7 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference)
|
|||
context->select_lex, this,
|
||||
((ref_type == REF_ITEM || ref_type == FIELD_ITEM) ?
|
||||
(Item_ident*) (*reference) :
|
||||
0));
|
||||
0), false);
|
||||
if (thd->lex->in_sum_func &&
|
||||
thd->lex->in_sum_func->nest_level >= select->nest_level)
|
||||
{
|
||||
|
@ -5665,7 +5674,7 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference)
|
|||
set_max_sum_func_level(thd, select);
|
||||
mark_as_dependent(thd, last_checked_context->select_lex,
|
||||
context->select_lex, rf,
|
||||
rf);
|
||||
rf, false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -5678,7 +5687,7 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference)
|
|||
set_max_sum_func_level(thd, select);
|
||||
mark_as_dependent(thd, last_checked_context->select_lex,
|
||||
context->select_lex,
|
||||
this, (Item_ident*)*reference);
|
||||
this, (Item_ident*)*reference, false);
|
||||
if (last_checked_context->select_lex->having_fix_field)
|
||||
{
|
||||
Item_ref *rf;
|
||||
|
@ -7649,7 +7658,7 @@ public:
|
|||
if (tbl->table == item->field->table)
|
||||
{
|
||||
if (sel != current_select)
|
||||
mark_as_dependent(thd, sel, current_select, item, item);
|
||||
mark_as_dependent(thd, sel, current_select, item, item, false);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -7845,7 +7854,7 @@ bool Item_ref::fix_fields(THD *thd, Item **reference)
|
|||
((refer_type == REF_ITEM ||
|
||||
refer_type == FIELD_ITEM) ?
|
||||
(Item_ident*) (*reference) :
|
||||
0));
|
||||
0), false);
|
||||
/*
|
||||
view reference found, we substituted it instead of this
|
||||
Item, so can quit
|
||||
|
@ -7895,7 +7904,7 @@ bool Item_ref::fix_fields(THD *thd, Item **reference)
|
|||
goto error;
|
||||
thd->change_item_tree(reference, fld);
|
||||
mark_as_dependent(thd, last_checked_context->select_lex,
|
||||
current_sel, fld, fld);
|
||||
current_sel, fld, fld, false);
|
||||
/*
|
||||
A reference is resolved to a nest level that's outer or the same as
|
||||
the nest level of the enclosing set function : adjust the value of
|
||||
|
@ -7918,7 +7927,7 @@ bool Item_ref::fix_fields(THD *thd, Item **reference)
|
|||
/* Should be checked in resolve_ref_in_select_and_group(). */
|
||||
DBUG_ASSERT(*ref && (*ref)->is_fixed());
|
||||
mark_as_dependent(thd, last_checked_context->select_lex,
|
||||
context->select_lex, this, this);
|
||||
context->select_lex, this, this, false);
|
||||
/*
|
||||
A reference is resolved to a nest level that's outer or the same as
|
||||
the nest level of the enclosing set function : adjust the value of
|
||||
|
|
|
@ -7400,7 +7400,8 @@ void mark_select_range_as_dependent(THD *thd,
|
|||
st_select_lex *last_select,
|
||||
st_select_lex *current_sel,
|
||||
Field *found_field, Item *found_item,
|
||||
Item_ident *resolved_item);
|
||||
Item_ident *resolved_item,
|
||||
bool suppress_warning_output);
|
||||
|
||||
extern Cached_item *new_Cached_item(THD *thd, Item *item,
|
||||
bool pass_through_ref);
|
||||
|
|
|
@ -291,7 +291,8 @@ public:
|
|||
friend bool Item_ref::fix_fields(THD *, Item **);
|
||||
friend void mark_select_range_as_dependent(THD*,
|
||||
st_select_lex*, st_select_lex*,
|
||||
Field*, Item*, Item_ident*);
|
||||
Field*, Item*, Item_ident*,
|
||||
bool);
|
||||
friend bool convert_join_subqueries_to_semijoins(JOIN *join);
|
||||
};
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/*
|
||||
Copyright (c) 2002, 2013, Oracle and/or its affiliates.
|
||||
Copyright (c) 2011, 2013, Monty Program Ab.
|
||||
Copyright (c) 2011, 2021, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
|
@ -1050,7 +1050,7 @@ double Gis_point::calculate_haversine(const Geometry *g,
|
|||
int *error)
|
||||
{
|
||||
DBUG_ASSERT(sphere_radius > 0);
|
||||
double x1r, x2r, y1r, y2r, dlong, dlat, res;
|
||||
double x1r, x2r, y1r, y2r;
|
||||
|
||||
// This check is done only for optimization purposes where we know it will
|
||||
// be one and only one point in Multipoint
|
||||
|
@ -1067,31 +1067,39 @@ double Gis_point::calculate_haversine(const Geometry *g,
|
|||
Geometry *gg= Geometry::construct(&gbuff, point_temp, point_size-1);
|
||||
DBUG_ASSERT(gg);
|
||||
if (static_cast<Gis_point *>(gg)->get_xy_radian(&x2r, &y2r))
|
||||
{
|
||||
DBUG_ASSERT(0);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (static_cast<const Gis_point *>(g)->get_xy_radian(&x2r, &y2r))
|
||||
{
|
||||
DBUG_ASSERT(0);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
if (this->get_xy_radian(&x1r, &y1r))
|
||||
{
|
||||
DBUG_ASSERT(0);
|
||||
return -1;
|
||||
}
|
||||
// Check boundary conditions: longitude[-180,180]
|
||||
if (!((x2r >= -M_PI && x2r <= M_PI) && (x1r >= -M_PI && x1r <= M_PI)))
|
||||
{
|
||||
*error=1;
|
||||
return -1;
|
||||
}
|
||||
// Check boundary conditions: lattitude[-90,90]
|
||||
// Check boundary conditions: latitude[-90,90]
|
||||
if (!((y2r >= -M_PI/2 && y2r <= M_PI/2) && (y1r >= -M_PI/2 && y1r <= M_PI/2)))
|
||||
{
|
||||
*error=-1;
|
||||
return -1;
|
||||
}
|
||||
dlat= sin((y2r - y1r)/2)*sin((y2r - y1r)/2);
|
||||
dlong= sin((x2r - x1r)/2)*sin((x2r - x1r)/2);
|
||||
res= 2*sphere_radius*asin((sqrt(dlat + cos(y1r)*cos(y2r)*dlong)));
|
||||
return res;
|
||||
double dlat= sin((y2r - y1r)/2)*sin((y2r - y1r)/2);
|
||||
double dlong= sin((x2r - x1r)/2)*sin((x2r - x1r)/2);
|
||||
return 2*sphere_radius*asin((sqrt(dlat + cos(y1r)*cos(y2r)*dlong)));
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -6486,7 +6486,7 @@ find_field_in_tables(THD *thd, Item_ident *item,
|
|||
if (!all_merged && current_sel != last_select)
|
||||
{
|
||||
mark_select_range_as_dependent(thd, last_select, current_sel,
|
||||
found, *ref, item);
|
||||
found, *ref, item, true);
|
||||
}
|
||||
}
|
||||
return found;
|
||||
|
|
|
@ -2806,7 +2806,7 @@ void st_select_lex_unit::exclude_tree()
|
|||
*/
|
||||
|
||||
bool st_select_lex::mark_as_dependent(THD *thd, st_select_lex *last,
|
||||
Item *dependency)
|
||||
Item_ident *dependency)
|
||||
{
|
||||
|
||||
DBUG_ASSERT(this != last);
|
||||
|
@ -2814,10 +2814,14 @@ bool st_select_lex::mark_as_dependent(THD *thd, st_select_lex *last,
|
|||
/*
|
||||
Mark all selects from resolved to 1 before select where was
|
||||
found table as depended (of select where was found table)
|
||||
|
||||
We move by name resolution context, bacause during merge can some select
|
||||
be excleded from SELECT tree
|
||||
*/
|
||||
SELECT_LEX *s= this;
|
||||
Name_resolution_context *c= &this->context;
|
||||
do
|
||||
{
|
||||
SELECT_LEX *s= c->select_lex;
|
||||
if (!(s->uncacheable & UNCACHEABLE_DEPENDENT_GENERATED))
|
||||
{
|
||||
// Select is dependent of outer select
|
||||
|
@ -2839,7 +2843,7 @@ bool st_select_lex::mark_as_dependent(THD *thd, st_select_lex *last,
|
|||
if (subquery_expr && subquery_expr->mark_as_dependent(thd, last,
|
||||
dependency))
|
||||
return TRUE;
|
||||
} while ((s= s->outer_select()) != last && s != 0);
|
||||
} while ((c= c->outer_context) != NULL && (c->select_lex != last));
|
||||
is_correlated= TRUE;
|
||||
this->master_unit()->item->is_correlated= TRUE;
|
||||
return FALSE;
|
||||
|
|
|
@ -1336,7 +1336,8 @@ public:
|
|||
}
|
||||
inline bool is_subquery_function() { return master_unit()->item != 0; }
|
||||
|
||||
bool mark_as_dependent(THD *thd, st_select_lex *last, Item *dependency);
|
||||
bool mark_as_dependent(THD *thd, st_select_lex *last,
|
||||
Item_ident *dependency);
|
||||
|
||||
void set_braces(bool value)
|
||||
{
|
||||
|
|
|
@ -13545,10 +13545,12 @@ ha_rows JOIN_TAB::get_examined_rows()
|
|||
bool JOIN_TAB::preread_init()
|
||||
{
|
||||
TABLE_LIST *derived= table->pos_in_table_list;
|
||||
DBUG_ENTER("JOIN_TAB::preread_init");
|
||||
|
||||
if (!derived || !derived->is_materialized_derived())
|
||||
{
|
||||
preread_init_done= TRUE;
|
||||
return FALSE;
|
||||
DBUG_RETURN(FALSE);
|
||||
}
|
||||
|
||||
/* Materialize derived table/view. */
|
||||
|
@ -13557,7 +13559,7 @@ bool JOIN_TAB::preread_init()
|
|||
derived->get_unit()->uncacheable) &&
|
||||
mysql_handle_single_derived(join->thd->lex,
|
||||
derived, DT_CREATE | DT_FILL))
|
||||
return TRUE;
|
||||
DBUG_RETURN(TRUE);
|
||||
|
||||
if (!(derived->get_unit()->uncacheable & UNCACHEABLE_DEPENDENT) ||
|
||||
derived->is_nonrecursive_derived_with_rec_ref())
|
||||
|
@ -13575,9 +13577,9 @@ bool JOIN_TAB::preread_init()
|
|||
/* init ftfuns for just initialized derived table */
|
||||
if (table->fulltext_searched)
|
||||
if (init_ftfuncs(join->thd, join->select_lex, MY_TEST(join->order)))
|
||||
return TRUE;
|
||||
DBUG_RETURN(TRUE);
|
||||
|
||||
return FALSE;
|
||||
DBUG_RETURN(FALSE);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -8451,6 +8451,7 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
|
|||
long_hash_key= true;
|
||||
}
|
||||
const char *dropped_key_part= NULL;
|
||||
bool user_keyparts= false; // some user-defined keyparts left
|
||||
KEY_PART_INFO *key_part= key_info->key_part;
|
||||
key_parts.empty();
|
||||
bool delete_index_stat= FALSE;
|
||||
|
@ -8526,6 +8527,8 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
|
|||
key_parts.push_back(new (thd->mem_root) Key_part_spec(&cfield->field_name,
|
||||
key_part_length, true),
|
||||
thd->mem_root);
|
||||
if (cfield->invisible < INVISIBLE_SYSTEM)
|
||||
user_keyparts= true;
|
||||
}
|
||||
if (table->s->tmp_table == NO_TMP_TABLE)
|
||||
{
|
||||
|
@ -8571,7 +8574,7 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
|
|||
key_type= Key::PRIMARY;
|
||||
else
|
||||
key_type= Key::UNIQUE;
|
||||
if (dropped_key_part)
|
||||
if (dropped_key_part && user_keyparts)
|
||||
{
|
||||
my_error(ER_KEY_COLUMN_DOES_NOT_EXITS, MYF(0), dropped_key_part);
|
||||
if (long_hash_key)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/* Copyright (c) 2000, 2016, Oracle and/or its affiliates.
|
||||
Copyright (c) 2011, 2020, MariaDB
|
||||
Copyright (c) 2011, 2021, MariaDB
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
|
@ -2489,10 +2489,10 @@ int multi_update::send_data(List<Item> ¬_used_values)
|
|||
{
|
||||
TABLE_LIST *cur_table;
|
||||
DBUG_ENTER("multi_update::send_data");
|
||||
int error= 0;
|
||||
|
||||
for (cur_table= update_tables; cur_table; cur_table= cur_table->next_local)
|
||||
{
|
||||
int error= 0;
|
||||
TABLE *table= cur_table->table;
|
||||
uint offset= cur_table->shared;
|
||||
/*
|
||||
|
@ -2562,21 +2562,7 @@ int multi_update::send_data(List<Item> ¬_used_values)
|
|||
updated--;
|
||||
if (!ignore ||
|
||||
table->file->is_fatal_error(error, HA_CHECK_ALL))
|
||||
{
|
||||
error:
|
||||
/*
|
||||
If (ignore && error == is ignorable) we don't have to
|
||||
do anything; otherwise...
|
||||
*/
|
||||
myf flags= 0;
|
||||
|
||||
if (table->file->is_fatal_error(error, HA_CHECK_ALL))
|
||||
flags|= ME_FATAL; /* Other handler errors are fatal */
|
||||
|
||||
prepare_record_for_error_message(error, table);
|
||||
table->file->print_error(error,MYF(flags));
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
goto error;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -2653,7 +2639,22 @@ error:
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
continue;
|
||||
error:
|
||||
DBUG_ASSERT(error > 0);
|
||||
/*
|
||||
If (ignore && error == is ignorable) we don't have to
|
||||
do anything; otherwise...
|
||||
*/
|
||||
myf flags= 0;
|
||||
|
||||
if (table->file->is_fatal_error(error, HA_CHECK_ALL))
|
||||
flags|= ME_FATAL; /* Other handler errors are fatal */
|
||||
|
||||
prepare_record_for_error_message(error, table);
|
||||
table->file->print_error(error,MYF(flags));
|
||||
DBUG_RETURN(1);
|
||||
} // for (cur_table)
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
|
|
|
@ -898,7 +898,7 @@ btr_page_get_father_node_ptr_func(
|
|||
|
||||
node_ptr = btr_cur_get_rec(cursor);
|
||||
|
||||
offsets = rec_get_offsets(node_ptr, index, offsets, false,
|
||||
offsets = rec_get_offsets(node_ptr, index, offsets, 0,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
if (btr_node_ptr_get_child_page_no(node_ptr, offsets) != page_no) {
|
||||
|
@ -915,10 +915,11 @@ btr_page_get_father_node_ptr_func(
|
|||
print_rec = page_rec_get_next(
|
||||
page_get_infimum_rec(page_align(user_rec)));
|
||||
offsets = rec_get_offsets(print_rec, index, offsets,
|
||||
page_rec_is_leaf(user_rec),
|
||||
page_rec_is_leaf(user_rec)
|
||||
? index->n_core_fields : 0,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
page_rec_print(print_rec, offsets);
|
||||
offsets = rec_get_offsets(node_ptr, index, offsets, false,
|
||||
offsets = rec_get_offsets(node_ptr, index, offsets, 0,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
page_rec_print(node_ptr, offsets);
|
||||
|
||||
|
@ -2284,7 +2285,9 @@ btr_page_get_split_rec(
|
|||
incl_data += insert_size;
|
||||
} else {
|
||||
offsets = rec_get_offsets(rec, cursor->index, offsets,
|
||||
page_is_leaf(page),
|
||||
page_is_leaf(page)
|
||||
? cursor->index->n_core_fields
|
||||
: 0,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
incl_data += rec_offs_size(offsets);
|
||||
}
|
||||
|
@ -2393,7 +2396,9 @@ btr_page_insert_fits(
|
|||
space after rec is removed from page. */
|
||||
|
||||
*offsets = rec_get_offsets(rec, cursor->index, *offsets,
|
||||
page_is_leaf(page),
|
||||
page_is_leaf(page)
|
||||
? cursor->index->n_core_fields
|
||||
: 0,
|
||||
ULINT_UNDEFINED, heap);
|
||||
|
||||
total_data -= rec_offs_size(*offsets);
|
||||
|
@ -2680,7 +2685,8 @@ btr_page_tuple_smaller(
|
|||
first_rec = page_cur_get_rec(&pcur);
|
||||
|
||||
*offsets = rec_get_offsets(
|
||||
first_rec, cursor->index, *offsets, page_is_leaf(block->frame),
|
||||
first_rec, cursor->index, *offsets,
|
||||
page_is_leaf(block->frame) ? cursor->index->n_core_fields : 0,
|
||||
n_uniq, heap);
|
||||
|
||||
return(cmp_dtuple_rec(tuple, first_rec, *offsets) < 0);
|
||||
|
@ -2964,7 +2970,9 @@ func_start:
|
|||
first_rec = move_limit = split_rec;
|
||||
|
||||
*offsets = rec_get_offsets(split_rec, cursor->index, *offsets,
|
||||
page_is_leaf(page), n_uniq, heap);
|
||||
page_is_leaf(page)
|
||||
? cursor->index->n_core_fields : 0,
|
||||
n_uniq, heap);
|
||||
|
||||
insert_left = !tuple
|
||||
|| cmp_dtuple_rec(tuple, split_rec, *offsets) < 0;
|
||||
|
@ -3730,7 +3738,7 @@ retry:
|
|||
rec_offs* offsets2 = NULL;
|
||||
|
||||
/* For rtree, we need to update father's mbr. */
|
||||
if (dict_index_is_spatial(index)) {
|
||||
if (index->is_spatial()) {
|
||||
/* We only support merge pages with the same parent
|
||||
page */
|
||||
if (!rtr_check_same_block(
|
||||
|
@ -3748,7 +3756,8 @@ retry:
|
|||
|
||||
offsets2 = rec_get_offsets(
|
||||
btr_cur_get_rec(&cursor2), index, NULL,
|
||||
page_is_leaf(cursor2.page_cur.block->frame),
|
||||
page_is_leaf(cursor2.page_cur.block->frame)
|
||||
? index->n_fields : 0,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
/* Check if parent entry needs to be updated */
|
||||
|
@ -3922,13 +3931,14 @@ retry:
|
|||
#endif /* UNIV_DEBUG */
|
||||
|
||||
/* For rtree, we need to update father's mbr. */
|
||||
if (dict_index_is_spatial(index)) {
|
||||
if (index->is_spatial()) {
|
||||
rec_offs* offsets2;
|
||||
ulint rec_info;
|
||||
|
||||
offsets2 = rec_get_offsets(
|
||||
btr_cur_get_rec(&cursor2), index, NULL,
|
||||
page_is_leaf(cursor2.page_cur.block->frame),
|
||||
page_is_leaf(cursor2.page_cur.block->frame)
|
||||
? index->n_fields : 0,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
ut_ad(btr_node_ptr_get_child_page_no(
|
||||
|
@ -4151,13 +4161,14 @@ btr_discard_only_page_on_level(
|
|||
}
|
||||
#endif /* UNIV_BTR_DEBUG */
|
||||
|
||||
mem_heap_t* heap = NULL;
|
||||
const rec_t* rec = NULL;
|
||||
rec_offs* offsets = NULL;
|
||||
mem_heap_t* heap = nullptr;
|
||||
const rec_t* rec = nullptr;
|
||||
rec_offs* offsets = nullptr;
|
||||
if (index->table->instant) {
|
||||
if (rec_is_alter_metadata(r, *index)) {
|
||||
heap = mem_heap_create(srv_page_size);
|
||||
offsets = rec_get_offsets(r, index, NULL, true,
|
||||
offsets = rec_get_offsets(r, index, nullptr,
|
||||
index->n_core_fields,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
rec = rec_copy(mem_heap_alloc(heap,
|
||||
rec_offs_size(offsets)),
|
||||
|
@ -4431,7 +4442,7 @@ btr_print_recursive(
|
|||
node_ptr = page_cur_get_rec(&cursor);
|
||||
|
||||
*offsets = rec_get_offsets(
|
||||
node_ptr, index, *offsets, false,
|
||||
node_ptr, index, *offsets, 0,
|
||||
ULINT_UNDEFINED, heap);
|
||||
btr_print_recursive(index,
|
||||
btr_node_ptr_get_child(node_ptr,
|
||||
|
@ -4580,7 +4591,9 @@ btr_index_rec_validate(
|
|||
|
||||
page = page_align(rec);
|
||||
|
||||
if (dict_index_is_ibuf(index)) {
|
||||
ut_ad(index->n_core_fields);
|
||||
|
||||
if (index->is_ibuf()) {
|
||||
/* The insert buffer index tree can contain records from any
|
||||
other index: we cannot check the number of fields or
|
||||
their length */
|
||||
|
@ -4644,7 +4657,8 @@ n_field_mismatch:
|
|||
}
|
||||
}
|
||||
|
||||
offsets = rec_get_offsets(rec, index, offsets, page_is_leaf(page),
|
||||
offsets = rec_get_offsets(rec, index, offsets, page_is_leaf(page)
|
||||
? index->n_core_fields : 0,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
const dict_field_t* field = index->fields;
|
||||
ut_ad(rec_offs_n_fields(offsets)
|
||||
|
@ -4901,7 +4915,7 @@ btr_validate_level(
|
|||
page_cur_move_to_next(&cursor);
|
||||
|
||||
node_ptr = page_cur_get_rec(&cursor);
|
||||
offsets = rec_get_offsets(node_ptr, index, offsets, false,
|
||||
offsets = rec_get_offsets(node_ptr, index, offsets, 0,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
savepoint2 = mtr_set_savepoint(&mtr);
|
||||
|
@ -5025,10 +5039,12 @@ loop:
|
|||
right_rec = page_rec_get_next(page_get_infimum_rec(
|
||||
right_page));
|
||||
offsets = rec_get_offsets(rec, index, offsets,
|
||||
page_is_leaf(page),
|
||||
page_is_leaf(page)
|
||||
? index->n_core_fields : 0,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
offsets2 = rec_get_offsets(right_rec, index, offsets2,
|
||||
page_is_leaf(right_page),
|
||||
page_is_leaf(right_page)
|
||||
? index->n_core_fields : 0,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
/* For spatial index, we cannot guarantee the key ordering
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 2014, 2019, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2017, 2020, MariaDB Corporation.
|
||||
Copyright (c) 2017, 2021, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
|
@ -193,7 +193,8 @@ PageBulk::insert(
|
|||
if (!page_rec_is_infimum_low(page_offset(m_cur_rec))) {
|
||||
rec_t* old_rec = m_cur_rec;
|
||||
rec_offs* old_offsets = rec_get_offsets(
|
||||
old_rec, m_index, NULL, is_leaf,
|
||||
old_rec, m_index, NULL, is_leaf
|
||||
? m_index->n_core_fields : 0,
|
||||
ULINT_UNDEFINED, &m_heap);
|
||||
|
||||
ut_ad(cmp_rec_rec(rec, old_rec, offsets, old_offsets, m_index)
|
||||
|
@ -447,6 +448,7 @@ PageBulk::getSplitRec()
|
|||
|
||||
ut_ad(m_page_zip != NULL);
|
||||
ut_ad(m_rec_no >= 2);
|
||||
ut_ad(!m_index->is_instant());
|
||||
|
||||
ut_ad(page_get_free_space_of_empty(m_is_comp) > m_free_space);
|
||||
total_used_size = page_get_free_space_of_empty(m_is_comp)
|
||||
|
@ -456,13 +458,13 @@ PageBulk::getSplitRec()
|
|||
n_recs = 0;
|
||||
offsets = NULL;
|
||||
rec = page_get_infimum_rec(m_page);
|
||||
const ulint n_core = page_is_leaf(m_page) ? m_index->n_core_fields : 0;
|
||||
|
||||
do {
|
||||
rec = page_rec_get_next(rec);
|
||||
ut_ad(page_rec_is_user_rec(rec));
|
||||
|
||||
offsets = rec_get_offsets(rec, m_index, offsets,
|
||||
page_is_leaf(m_page),
|
||||
offsets = rec_get_offsets(rec, m_index, offsets, n_core,
|
||||
ULINT_UNDEFINED, &m_heap);
|
||||
total_recs_size += rec_offs_size(offsets);
|
||||
n_recs++;
|
||||
|
@ -491,9 +493,11 @@ PageBulk::copyIn(
|
|||
ut_ad(m_rec_no == 0);
|
||||
ut_ad(page_rec_is_user_rec(rec));
|
||||
|
||||
const ulint n_core = page_rec_is_leaf(rec)
|
||||
? m_index->n_core_fields : 0;
|
||||
|
||||
do {
|
||||
offsets = rec_get_offsets(rec, m_index, offsets,
|
||||
page_rec_is_leaf(split_rec),
|
||||
offsets = rec_get_offsets(rec, m_index, offsets, n_core,
|
||||
ULINT_UNDEFINED, &m_heap);
|
||||
|
||||
insert(rec, offsets);
|
||||
|
@ -534,8 +538,10 @@ PageBulk::copyOut(
|
|||
/* Set last record's next in page */
|
||||
rec_offs* offsets = NULL;
|
||||
rec = page_rec_get_prev(split_rec);
|
||||
offsets = rec_get_offsets(rec, m_index, offsets,
|
||||
page_rec_is_leaf(split_rec),
|
||||
const ulint n_core = page_rec_is_leaf(split_rec)
|
||||
? m_index->n_core_fields : 0;
|
||||
|
||||
offsets = rec_get_offsets(rec, m_index, offsets, n_core,
|
||||
ULINT_UNDEFINED, &m_heap);
|
||||
page_rec_set_next(rec, page_get_supremum_rec(m_page));
|
||||
|
||||
|
@ -543,8 +549,7 @@ PageBulk::copyOut(
|
|||
m_cur_rec = rec;
|
||||
m_heap_top = rec_get_end(rec, offsets);
|
||||
|
||||
offsets = rec_get_offsets(last_rec, m_index, offsets,
|
||||
page_rec_is_leaf(split_rec),
|
||||
offsets = rec_get_offsets(last_rec, m_index, offsets, n_core,
|
||||
ULINT_UNDEFINED, &m_heap);
|
||||
|
||||
m_free_space += ulint(rec_get_end(last_rec, offsets) - m_heap_top)
|
||||
|
@ -976,7 +981,8 @@ BtrBulk::insert(
|
|||
/* Convert tuple to rec. */
|
||||
rec = rec_convert_dtuple_to_rec(static_cast<byte*>(mem_heap_alloc(
|
||||
page_bulk->m_heap, rec_size)), m_index, tuple, n_ext);
|
||||
offsets = rec_get_offsets(rec, m_index, offsets, !level,
|
||||
offsets = rec_get_offsets(rec, m_index, offsets, level
|
||||
? 0 : m_index->n_core_fields,
|
||||
ULINT_UNDEFINED, &page_bulk->m_heap);
|
||||
|
||||
page_bulk->insert(rec, offsets);
|
||||
|
|
|
@ -595,7 +595,8 @@ incompatible:
|
|||
}
|
||||
|
||||
mem_heap_t* heap = NULL;
|
||||
rec_offs* offsets = rec_get_offsets(rec, index, NULL, true,
|
||||
rec_offs* offsets = rec_get_offsets(rec, index, NULL,
|
||||
index->n_core_fields,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
if (rec_offs_any_default(offsets)) {
|
||||
inconsistent:
|
||||
|
@ -2049,7 +2050,7 @@ retry_page_get:
|
|||
|
||||
node_ptr = page_cur_get_rec(page_cursor);
|
||||
|
||||
offsets = rec_get_offsets(node_ptr, index, offsets, false,
|
||||
offsets = rec_get_offsets(node_ptr, index, offsets, 0,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
/* If the rec is the first or last in the page for
|
||||
|
@ -2180,7 +2181,7 @@ need_opposite_intention:
|
|||
|
||||
offsets2 = rec_get_offsets(
|
||||
first_rec, index, offsets2,
|
||||
false, ULINT_UNDEFINED, &heap);
|
||||
0, ULINT_UNDEFINED, &heap);
|
||||
cmp_rec_rec(node_ptr, first_rec,
|
||||
offsets, offsets2, index, false,
|
||||
&matched_fields);
|
||||
|
@ -2198,7 +2199,7 @@ need_opposite_intention:
|
|||
|
||||
offsets2 = rec_get_offsets(
|
||||
last_rec, index, offsets2,
|
||||
false, ULINT_UNDEFINED, &heap);
|
||||
0, ULINT_UNDEFINED, &heap);
|
||||
cmp_rec_rec(
|
||||
node_ptr, last_rec,
|
||||
offsets, offsets2, index,
|
||||
|
@ -2367,7 +2368,7 @@ need_opposite_intention:
|
|||
|
||||
offsets = rec_get_offsets(
|
||||
my_node_ptr, index, offsets,
|
||||
false, ULINT_UNDEFINED, &heap);
|
||||
0, ULINT_UNDEFINED, &heap);
|
||||
|
||||
ulint my_page_no
|
||||
= btr_node_ptr_get_child_page_no(
|
||||
|
@ -2820,7 +2821,7 @@ btr_cur_open_at_index_side_func(
|
|||
|
||||
node_ptr = page_cur_get_rec(page_cursor);
|
||||
offsets = rec_get_offsets(node_ptr, cursor->index, offsets,
|
||||
false, ULINT_UNDEFINED, &heap);
|
||||
0, ULINT_UNDEFINED, &heap);
|
||||
|
||||
/* If the rec is the first or last in the page for
|
||||
pessimistic delete intention, it might cause node_ptr insert
|
||||
|
@ -3115,7 +3116,7 @@ btr_cur_open_at_rnd_pos_func(
|
|||
|
||||
node_ptr = page_cur_get_rec(page_cursor);
|
||||
offsets = rec_get_offsets(node_ptr, cursor->index, offsets,
|
||||
false, ULINT_UNDEFINED, &heap);
|
||||
0, ULINT_UNDEFINED, &heap);
|
||||
|
||||
/* If the rec is the first or last in the page for
|
||||
pessimistic delete intention, it might cause node_ptr insert
|
||||
|
@ -4117,7 +4118,8 @@ btr_cur_parse_update_in_place(
|
|||
flags != (BTR_NO_UNDO_LOG_FLAG
|
||||
| BTR_NO_LOCKING_FLAG
|
||||
| BTR_KEEP_SYS_FLAG)
|
||||
|| page_is_leaf(page),
|
||||
|| page_is_leaf(page)
|
||||
? index->n_core_fields : 0,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
if (!(flags & BTR_KEEP_SYS_FLAG)) {
|
||||
|
@ -4575,7 +4577,7 @@ btr_cur_optimistic_update(
|
|||
ut_ad(fil_page_index_page_check(page));
|
||||
ut_ad(btr_page_get_index_id(page) == index->id);
|
||||
|
||||
*offsets = rec_get_offsets(rec, index, *offsets, true,
|
||||
*offsets = rec_get_offsets(rec, index, *offsets, index->n_core_fields,
|
||||
ULINT_UNDEFINED, heap);
|
||||
#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG
|
||||
ut_a(!rec_offs_any_null_extern(rec, *offsets)
|
||||
|
@ -5433,7 +5435,8 @@ btr_cur_parse_del_mark_set_clust_rec(
|
|||
if (!(flags & BTR_KEEP_SYS_FLAG)) {
|
||||
row_upd_rec_sys_fields_in_recovery(
|
||||
rec, page_zip,
|
||||
rec_get_offsets(rec, index, offsets, true,
|
||||
rec_get_offsets(rec, index, offsets,
|
||||
index->n_core_fields,
|
||||
pos + 2, &heap),
|
||||
pos, trx_id, roll_ptr);
|
||||
} else {
|
||||
|
@ -5442,7 +5445,8 @@ btr_cur_parse_del_mark_set_clust_rec(
|
|||
ut_ad(memcmp(rec_get_nth_field(
|
||||
rec,
|
||||
rec_get_offsets(rec, index,
|
||||
offsets, true,
|
||||
offsets, index
|
||||
->n_core_fields,
|
||||
pos, &heap),
|
||||
pos, &offset),
|
||||
field_ref_zero, DATA_TRX_ID_LEN));
|
||||
|
@ -5777,7 +5781,8 @@ btr_cur_optimistic_delete_func(
|
|||
|
||||
rec = btr_cur_get_rec(cursor);
|
||||
|
||||
offsets = rec_get_offsets(rec, cursor->index, offsets, true,
|
||||
offsets = rec_get_offsets(rec, cursor->index, offsets,
|
||||
cursor->index->n_core_fields,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
const ibool no_compress_needed = !rec_offs_any_extern(offsets)
|
||||
|
@ -5985,7 +5990,8 @@ btr_cur_pessimistic_delete(
|
|||
ut_a(!page_zip || page_zip_validate(page_zip, page, index));
|
||||
#endif /* UNIV_ZIP_DEBUG */
|
||||
|
||||
offsets = rec_get_offsets(rec, index, NULL, page_is_leaf(page),
|
||||
offsets = rec_get_offsets(rec, index, NULL, page_is_leaf(page)
|
||||
? index->n_core_fields : 0,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
if (rec_offs_any_extern(offsets)) {
|
||||
|
@ -6085,7 +6091,7 @@ discard_page:
|
|||
pointer as the predefined minimum record */
|
||||
|
||||
min_mark_next_rec = true;
|
||||
} else if (dict_index_is_spatial(index)) {
|
||||
} else if (index->is_spatial()) {
|
||||
/* For rtree, if delete the leftmost node pointer,
|
||||
we need to update parent page. */
|
||||
rtr_mbr_t father_mbr;
|
||||
|
@ -6100,7 +6106,7 @@ discard_page:
|
|||
&father_cursor);
|
||||
offsets = rec_get_offsets(
|
||||
btr_cur_get_rec(&father_cursor), index, NULL,
|
||||
false, ULINT_UNDEFINED, &heap);
|
||||
0, ULINT_UNDEFINED, &heap);
|
||||
|
||||
father_rec = btr_cur_get_rec(&father_cursor);
|
||||
rtr_read_mbr(rec_get_nth_field(
|
||||
|
@ -7022,12 +7028,13 @@ btr_estimate_number_of_different_key_vals(dict_index_t* index)
|
|||
page = btr_cur_get_page(&cursor);
|
||||
|
||||
rec = page_rec_get_next(page_get_infimum_rec(page));
|
||||
const bool is_leaf = page_is_leaf(page);
|
||||
const ulint n_core = page_is_leaf(page)
|
||||
? index->n_core_fields : 0;
|
||||
|
||||
if (!page_rec_is_supremum(rec)) {
|
||||
not_empty_flag = 1;
|
||||
offsets_rec = rec_get_offsets(rec, index, offsets_rec,
|
||||
is_leaf,
|
||||
n_core,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
if (n_not_null != NULL) {
|
||||
|
@ -7048,7 +7055,7 @@ btr_estimate_number_of_different_key_vals(dict_index_t* index)
|
|||
|
||||
offsets_next_rec = rec_get_offsets(next_rec, index,
|
||||
offsets_next_rec,
|
||||
is_leaf,
|
||||
n_core,
|
||||
ULINT_UNDEFINED,
|
||||
&heap);
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*****************************************************************************
|
||||
|
||||
Copyright (C) 2012, 2014 Facebook, Inc. All Rights Reserved.
|
||||
Copyright (C) 2014, 2019, MariaDB Corporation.
|
||||
Copyright (C) 2014, 2021, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
|
@ -340,12 +340,12 @@ btr_defragment_calc_n_recs_for_size(
|
|||
ulint size = 0;
|
||||
page_cur_t cur;
|
||||
|
||||
const ulint n_core = page_is_leaf(page) ? index->n_core_fields : 0;
|
||||
page_cur_set_before_first(block, &cur);
|
||||
page_cur_move_to_next(&cur);
|
||||
while (page_cur_get_rec(&cur) != page_get_supremum_rec(page)) {
|
||||
rec_t* cur_rec = page_cur_get_rec(&cur);
|
||||
offsets = rec_get_offsets(cur_rec, index, offsets,
|
||||
page_is_leaf(page),
|
||||
offsets = rec_get_offsets(cur_rec, index, offsets, n_core,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
ulint rec_size = rec_offs_size(offsets);
|
||||
size += rec_size;
|
||||
|
@ -357,6 +357,9 @@ btr_defragment_calc_n_recs_for_size(
|
|||
page_cur_move_to_next(&cur);
|
||||
}
|
||||
*n_recs_size = size;
|
||||
if (UNIV_LIKELY_NULL(heap)) {
|
||||
mem_heap_free(heap);
|
||||
}
|
||||
return n_recs;
|
||||
}
|
||||
|
||||
|
|
|
@ -61,6 +61,7 @@ btr_pcur_reset(
|
|||
cursor->btr_cur.index = NULL;
|
||||
cursor->btr_cur.page_cur.rec = NULL;
|
||||
cursor->old_rec = NULL;
|
||||
cursor->old_n_core_fields = 0;
|
||||
cursor->old_n_fields = 0;
|
||||
cursor->old_stored = false;
|
||||
|
||||
|
@ -179,19 +180,21 @@ before_first:
|
|||
|
||||
if (index->is_ibuf()) {
|
||||
ut_ad(!index->table->not_redundant());
|
||||
cursor->old_n_fields = rec_get_n_fields_old(rec);
|
||||
} else if (page_rec_is_leaf(rec)) {
|
||||
cursor->old_n_fields = dict_index_get_n_unique_in_tree(index);
|
||||
} else if (index->is_spatial()) {
|
||||
ut_ad(dict_index_get_n_unique_in_tree_nonleaf(index)
|
||||
== DICT_INDEX_SPATIAL_NODEPTR_SIZE);
|
||||
/* For R-tree, we have to compare
|
||||
the child page numbers as well. */
|
||||
cursor->old_n_fields = DICT_INDEX_SPATIAL_NODEPTR_SIZE + 1;
|
||||
cursor->old_n_fields = uint16_t(rec_get_n_fields_old(rec));
|
||||
} else {
|
||||
cursor->old_n_fields = dict_index_get_n_unique_in_tree(index);
|
||||
cursor->old_n_fields = static_cast<uint16>(
|
||||
dict_index_get_n_unique_in_tree(index));
|
||||
if (index->is_spatial() && !page_rec_is_leaf(rec)) {
|
||||
ut_ad(dict_index_get_n_unique_in_tree_nonleaf(index)
|
||||
== DICT_INDEX_SPATIAL_NODEPTR_SIZE);
|
||||
/* For R-tree, we have to compare
|
||||
the child page numbers as well. */
|
||||
cursor->old_n_fields
|
||||
= DICT_INDEX_SPATIAL_NODEPTR_SIZE + 1;
|
||||
}
|
||||
}
|
||||
|
||||
cursor->old_n_core_fields = index->n_core_fields;
|
||||
cursor->old_rec = rec_copy_prefix_to_buf(rec, index,
|
||||
cursor->old_n_fields,
|
||||
&cursor->old_rec_buf,
|
||||
|
@ -226,6 +229,7 @@ btr_pcur_copy_stored_position(
|
|||
+ (pcur_donate->old_rec - pcur_donate->old_rec_buf);
|
||||
}
|
||||
|
||||
pcur_receive->old_n_core_fields = pcur_donate->old_n_core_fields;
|
||||
pcur_receive->old_n_fields = pcur_donate->old_n_fields;
|
||||
}
|
||||
|
||||
|
@ -317,6 +321,8 @@ btr_pcur_restore_position_func(
|
|||
}
|
||||
|
||||
ut_a(cursor->old_rec);
|
||||
ut_a(cursor->old_n_core_fields);
|
||||
ut_a(cursor->old_n_core_fields <= index->n_core_fields);
|
||||
ut_a(cursor->old_n_fields);
|
||||
|
||||
switch (latch_mode) {
|
||||
|
@ -350,11 +356,16 @@ btr_pcur_restore_position_func(
|
|||
rec_offs_init(offsets2_);
|
||||
|
||||
heap = mem_heap_create(256);
|
||||
ut_ad(cursor->old_n_core_fields
|
||||
== index->n_core_fields);
|
||||
|
||||
offsets1 = rec_get_offsets(
|
||||
cursor->old_rec, index, offsets1, true,
|
||||
cursor->old_rec, index, offsets1,
|
||||
cursor->old_n_core_fields,
|
||||
cursor->old_n_fields, &heap);
|
||||
offsets2 = rec_get_offsets(
|
||||
rec, index, offsets2, true,
|
||||
rec, index, offsets2,
|
||||
index->n_core_fields,
|
||||
cursor->old_n_fields, &heap);
|
||||
|
||||
ut_ad(!cmp_rec_rec(cursor->old_rec,
|
||||
|
@ -379,8 +390,14 @@ btr_pcur_restore_position_func(
|
|||
|
||||
heap = mem_heap_create(256);
|
||||
|
||||
tuple = dict_index_build_data_tuple(cursor->old_rec, index, true,
|
||||
cursor->old_n_fields, heap);
|
||||
tuple = dtuple_create(heap, cursor->old_n_fields);
|
||||
|
||||
dict_index_copy_types(tuple, index, cursor->old_n_fields);
|
||||
|
||||
rec_copy_prefix_to_dtuple(tuple, cursor->old_rec, index,
|
||||
cursor->old_n_core_fields,
|
||||
cursor->old_n_fields, heap);
|
||||
ut_ad(dtuple_check_typed(tuple));
|
||||
|
||||
/* Save the old search mode of the cursor */
|
||||
old_mode = cursor->search_mode;
|
||||
|
@ -419,7 +436,8 @@ btr_pcur_restore_position_func(
|
|||
&& btr_pcur_is_on_user_rec(cursor)
|
||||
&& !cmp_dtuple_rec(tuple, btr_pcur_get_rec(cursor),
|
||||
rec_get_offsets(btr_pcur_get_rec(cursor),
|
||||
index, offsets, true,
|
||||
index, offsets,
|
||||
index->n_core_fields,
|
||||
ULINT_UNDEFINED, &heap))) {
|
||||
|
||||
/* We have to store the NEW value for the modify clock,
|
||||
|
|
|
@ -696,7 +696,8 @@ btr_search_update_hash_ref(
|
|||
|
||||
ulint fold = rec_fold(
|
||||
rec,
|
||||
rec_get_offsets(rec, index, offsets_, true,
|
||||
rec_get_offsets(rec, index, offsets_,
|
||||
index->n_core_fields,
|
||||
ULINT_UNDEFINED, &heap),
|
||||
block->curr_n_fields,
|
||||
block->curr_n_bytes, index->id);
|
||||
|
@ -755,7 +756,8 @@ btr_search_check_guess(
|
|||
|
||||
match = 0;
|
||||
|
||||
offsets = rec_get_offsets(rec, cursor->index, offsets, true,
|
||||
offsets = rec_get_offsets(rec, cursor->index, offsets,
|
||||
cursor->index->n_core_fields,
|
||||
n_unique, &heap);
|
||||
cmp = cmp_dtuple_rec_with_match(tuple, rec, offsets, &match);
|
||||
|
||||
|
@ -806,7 +808,8 @@ btr_search_check_guess(
|
|||
}
|
||||
|
||||
offsets = rec_get_offsets(prev_rec, cursor->index, offsets,
|
||||
true, n_unique, &heap);
|
||||
cursor->index->n_core_fields,
|
||||
n_unique, &heap);
|
||||
cmp = cmp_dtuple_rec_with_match(
|
||||
tuple, prev_rec, offsets, &match);
|
||||
if (mode == PAGE_CUR_GE) {
|
||||
|
@ -829,7 +832,8 @@ btr_search_check_guess(
|
|||
}
|
||||
|
||||
offsets = rec_get_offsets(next_rec, cursor->index, offsets,
|
||||
true, n_unique, &heap);
|
||||
cursor->index->n_core_fields,
|
||||
n_unique, &heap);
|
||||
cmp = cmp_dtuple_rec_with_match(
|
||||
tuple, next_rec, offsets, &match);
|
||||
if (mode == PAGE_CUR_LE) {
|
||||
|
@ -1195,7 +1199,7 @@ retry:
|
|||
|
||||
while (!page_rec_is_supremum(rec)) {
|
||||
offsets = rec_get_offsets(
|
||||
rec, index, offsets, true,
|
||||
rec, index, offsets, index->n_core_fields,
|
||||
btr_search_get_n_fields(n_fields, n_bytes),
|
||||
&heap);
|
||||
fold = rec_fold(rec, offsets, n_fields, n_bytes, index_id);
|
||||
|
@ -1421,7 +1425,7 @@ btr_search_build_page_hash_index(
|
|||
ut_a(index->id == btr_page_get_index_id(page));
|
||||
|
||||
offsets = rec_get_offsets(
|
||||
rec, index, offsets, true,
|
||||
rec, index, offsets, index->n_core_fields,
|
||||
btr_search_get_n_fields(n_fields, n_bytes),
|
||||
&heap);
|
||||
ut_ad(page_rec_is_supremum(rec)
|
||||
|
@ -1452,7 +1456,7 @@ btr_search_build_page_hash_index(
|
|||
}
|
||||
|
||||
offsets = rec_get_offsets(
|
||||
next_rec, index, offsets, true,
|
||||
next_rec, index, offsets, index->n_core_fields,
|
||||
btr_search_get_n_fields(n_fields, n_bytes), &heap);
|
||||
next_fold = rec_fold(next_rec, offsets, n_fields,
|
||||
n_bytes, index->id);
|
||||
|
@ -1692,7 +1696,8 @@ void btr_search_update_hash_on_delete(btr_cur_t* cursor)
|
|||
|
||||
rec = btr_cur_get_rec(cursor);
|
||||
|
||||
fold = rec_fold(rec, rec_get_offsets(rec, index, offsets_, true,
|
||||
fold = rec_fold(rec, rec_get_offsets(rec, index, offsets_,
|
||||
index->n_core_fields,
|
||||
ULINT_UNDEFINED, &heap),
|
||||
block->curr_n_fields, block->curr_n_bytes, index->id);
|
||||
if (UNIV_LIKELY_NULL(heap)) {
|
||||
|
@ -1869,13 +1874,14 @@ btr_search_update_hash_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch)
|
|||
ins_rec = page_rec_get_next_const(rec);
|
||||
next_rec = page_rec_get_next_const(ins_rec);
|
||||
|
||||
offsets = rec_get_offsets(ins_rec, index, offsets, true,
|
||||
offsets = rec_get_offsets(ins_rec, index, offsets,
|
||||
index->n_core_fields,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
ins_fold = rec_fold(ins_rec, offsets, n_fields, n_bytes, index->id);
|
||||
|
||||
if (!page_rec_is_supremum(next_rec)) {
|
||||
offsets = rec_get_offsets(
|
||||
next_rec, index, offsets, true,
|
||||
next_rec, index, offsets, index->n_core_fields,
|
||||
btr_search_get_n_fields(n_fields, n_bytes), &heap);
|
||||
next_fold = rec_fold(next_rec, offsets, n_fields,
|
||||
n_bytes, index->id);
|
||||
|
@ -1887,7 +1893,7 @@ btr_search_update_hash_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch)
|
|||
|
||||
if (!page_rec_is_infimum(rec) && !rec_is_metadata(rec, *index)) {
|
||||
offsets = rec_get_offsets(
|
||||
rec, index, offsets, true,
|
||||
rec, index, offsets, index->n_core_fields,
|
||||
btr_search_get_n_fields(n_fields, n_bytes), &heap);
|
||||
fold = rec_fold(rec, offsets, n_fields, n_bytes, index->id);
|
||||
} else {
|
||||
|
@ -2093,7 +2099,8 @@ btr_search_hash_table_validate(ulint hash_table_id)
|
|||
page_index_id = btr_page_get_index_id(block->frame);
|
||||
|
||||
offsets = rec_get_offsets(
|
||||
node->data, block->index, offsets, true,
|
||||
node->data, block->index, offsets,
|
||||
block->index->n_core_fields,
|
||||
btr_search_get_n_fields(block->curr_n_fields,
|
||||
block->curr_n_bytes),
|
||||
&heap);
|
||||
|
|
|
@ -1911,6 +1911,10 @@ buf_pool_init_instance(
|
|||
ut_free(buf_pool->chunks);
|
||||
buf_pool_mutex_exit(buf_pool);
|
||||
|
||||
/* InnoDB should free the mutex which was
|
||||
created so far before freeing the instance */
|
||||
mutex_free(&buf_pool->mutex);
|
||||
mutex_free(&buf_pool->zip_mutex);
|
||||
return(DB_ERROR);
|
||||
}
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2012, Facebook Inc.
|
||||
Copyright (c) 2013, 2020, MariaDB Corporation.
|
||||
Copyright (c) 2013, 2021, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
|
@ -270,7 +270,7 @@ dict_table_try_drop_aborted(
|
|||
&& !UT_LIST_GET_FIRST(table->locks)) {
|
||||
/* Silence a debug assertion in row_merge_drop_indexes(). */
|
||||
ut_d(table->acquire());
|
||||
row_merge_drop_indexes(trx, table, TRUE);
|
||||
row_merge_drop_indexes(trx, table, true);
|
||||
ut_d(table->release());
|
||||
ut_ad(table->get_ref_count() == ref_count);
|
||||
trx_commit_for_mysql(trx);
|
||||
|
@ -4849,7 +4849,9 @@ dict_index_build_node_ptr(
|
|||
|
||||
dtype_set(dfield_get_type(field), DATA_SYS_CHILD, DATA_NOT_NULL, 4);
|
||||
|
||||
rec_copy_prefix_to_dtuple(tuple, rec, index, !level, n_unique, heap);
|
||||
rec_copy_prefix_to_dtuple(tuple, rec, index,
|
||||
level ? 0 : index->n_core_fields,
|
||||
n_unique, heap);
|
||||
dtuple_set_info_bits(tuple, dtuple_get_info_bits(tuple)
|
||||
| REC_STATUS_NODE_PTR);
|
||||
|
||||
|
@ -4873,11 +4875,14 @@ dict_index_build_data_tuple(
|
|||
ulint n_fields,
|
||||
mem_heap_t* heap)
|
||||
{
|
||||
ut_ad(!index->is_clust());
|
||||
|
||||
dtuple_t* tuple = dtuple_create(heap, n_fields);
|
||||
|
||||
dict_index_copy_types(tuple, index, n_fields);
|
||||
|
||||
rec_copy_prefix_to_dtuple(tuple, rec, index, leaf, n_fields, heap);
|
||||
rec_copy_prefix_to_dtuple(tuple, rec, index,
|
||||
leaf ? n_fields : 0, n_fields, heap);
|
||||
|
||||
ut_ad(dtuple_check_typed(tuple));
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
Copyright (c) 1996, 2018, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2012, Facebook Inc.
|
||||
Copyright (c) 2013, 2020, MariaDB Corporation.
|
||||
Copyright (c) 2013, 2021, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
|
@ -939,7 +939,7 @@ dict_mem_fill_vcol_from_v_indexes(
|
|||
Later virtual column set will be
|
||||
refreshed during loading of table. */
|
||||
if (!dict_index_has_virtual(index)
|
||||
|| index->has_new_v_col) {
|
||||
|| index->has_new_v_col()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -1375,7 +1375,8 @@ dict_index_t::vers_history_row(
|
|||
rec_t* clust_rec =
|
||||
row_get_clust_rec(BTR_SEARCH_LEAF, rec, this, &clust_index, &mtr);
|
||||
if (clust_rec) {
|
||||
offsets = rec_get_offsets(clust_rec, clust_index, offsets, true,
|
||||
offsets = rec_get_offsets(clust_rec, clust_index, offsets,
|
||||
clust_index->n_core_fields,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
history_row = clust_index->vers_history_row(clust_rec, offsets);
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 2009, 2019, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2015, 2020, MariaDB Corporation.
|
||||
Copyright (c) 2015, 2021, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
|
@ -1157,7 +1157,7 @@ dict_stats_analyze_index_level(
|
|||
|
||||
prev_rec_offsets = rec_get_offsets(
|
||||
prev_rec, index, prev_rec_offsets,
|
||||
true,
|
||||
index->n_core_fields,
|
||||
n_uniq, &heap);
|
||||
|
||||
prev_rec = rec_copy_prefix_to_buf(
|
||||
|
@ -1169,8 +1169,9 @@ dict_stats_analyze_index_level(
|
|||
|
||||
continue;
|
||||
}
|
||||
rec_offsets = rec_get_offsets(
|
||||
rec, index, rec_offsets, !level, n_uniq, &heap);
|
||||
rec_offsets = rec_get_offsets(rec, index, rec_offsets,
|
||||
level ? 0 : index->n_core_fields,
|
||||
n_uniq, &heap);
|
||||
|
||||
(*total_recs)++;
|
||||
|
||||
|
@ -1178,7 +1179,8 @@ dict_stats_analyze_index_level(
|
|||
ulint matched_fields;
|
||||
|
||||
prev_rec_offsets = rec_get_offsets(
|
||||
prev_rec, index, prev_rec_offsets, !level,
|
||||
prev_rec, index, prev_rec_offsets,
|
||||
level ? 0 : index->n_core_fields,
|
||||
n_uniq, &heap);
|
||||
|
||||
cmp_rec_rec(prev_rec, rec,
|
||||
|
@ -1332,7 +1334,7 @@ be big enough)
|
|||
@param[in] index index of the page
|
||||
@param[in] page the page to scan
|
||||
@param[in] n_prefix look at the first n_prefix columns
|
||||
@param[in] is_leaf whether this is the leaf page
|
||||
@param[in] n_core 0, or index->n_core_fields for leaf
|
||||
@param[out] n_diff number of distinct records encountered
|
||||
@param[out] n_external_pages if this is non-NULL then it will be set
|
||||
to the number of externally stored pages which were encountered
|
||||
|
@ -1347,7 +1349,7 @@ dict_stats_scan_page(
|
|||
const dict_index_t* index,
|
||||
const page_t* page,
|
||||
ulint n_prefix,
|
||||
bool is_leaf,
|
||||
ulint n_core,
|
||||
ib_uint64_t* n_diff,
|
||||
ib_uint64_t* n_external_pages)
|
||||
{
|
||||
|
@ -1359,9 +1361,9 @@ dict_stats_scan_page(
|
|||
Because offsets1,offsets2 should be big enough,
|
||||
this memory heap should never be used. */
|
||||
mem_heap_t* heap = NULL;
|
||||
ut_ad(is_leaf == page_is_leaf(page));
|
||||
ut_ad(!!n_core == page_is_leaf(page));
|
||||
const rec_t* (*get_next)(const rec_t*)
|
||||
= !is_leaf || srv_stats_include_delete_marked
|
||||
= !n_core || srv_stats_include_delete_marked
|
||||
? page_rec_get_next_const
|
||||
: page_rec_get_next_non_del_marked;
|
||||
|
||||
|
@ -1380,7 +1382,7 @@ dict_stats_scan_page(
|
|||
return(NULL);
|
||||
}
|
||||
|
||||
offsets_rec = rec_get_offsets(rec, index, offsets_rec, is_leaf,
|
||||
offsets_rec = rec_get_offsets(rec, index, offsets_rec, n_core,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
if (should_count_external_pages) {
|
||||
|
@ -1397,7 +1399,7 @@ dict_stats_scan_page(
|
|||
ulint matched_fields;
|
||||
|
||||
offsets_next_rec = rec_get_offsets(next_rec, index,
|
||||
offsets_next_rec, is_leaf,
|
||||
offsets_next_rec, n_core,
|
||||
ULINT_UNDEFINED,
|
||||
&heap);
|
||||
|
||||
|
@ -1411,7 +1413,7 @@ dict_stats_scan_page(
|
|||
|
||||
(*n_diff)++;
|
||||
|
||||
if (!is_leaf) {
|
||||
if (!n_core) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -1497,7 +1499,7 @@ dict_stats_analyze_index_below_cur(
|
|||
rec = btr_cur_get_rec(cur);
|
||||
ut_ad(!page_rec_is_leaf(rec));
|
||||
|
||||
offsets_rec = rec_get_offsets(rec, index, offsets1, false,
|
||||
offsets_rec = rec_get_offsets(rec, index, offsets1, 0,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
page_id_t page_id(index->table->space_id,
|
||||
|
@ -1531,7 +1533,7 @@ dict_stats_analyze_index_below_cur(
|
|||
/* search for the first non-boring record on the page */
|
||||
offsets_rec = dict_stats_scan_page(
|
||||
&rec, offsets1, offsets2, index, page, n_prefix,
|
||||
false, n_diff, NULL);
|
||||
0, n_diff, NULL);
|
||||
|
||||
/* pages on level > 0 are not allowed to be empty */
|
||||
ut_a(offsets_rec != NULL);
|
||||
|
@ -1576,7 +1578,7 @@ dict_stats_analyze_index_below_cur(
|
|||
|
||||
offsets_rec = dict_stats_scan_page(
|
||||
&rec, offsets1, offsets2, index, page, n_prefix,
|
||||
true, n_diff,
|
||||
index->n_core_fields, n_diff,
|
||||
n_external_pages);
|
||||
|
||||
#if 0
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 2012, 2017, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2017, 2020, MariaDB Corporation.
|
||||
Copyright (c) 2017, 2021, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
|
@ -156,9 +156,24 @@ schedule new estimates for table and index statistics to be calculated.
|
|||
void dict_stats_update_if_needed_func(dict_table_t *table)
|
||||
#endif
|
||||
{
|
||||
ut_ad(table->stat_initialized);
|
||||
ut_ad(!mutex_own(&dict_sys.mutex));
|
||||
|
||||
if (UNIV_UNLIKELY(!table->stat_initialized)) {
|
||||
/* The table may have been evicted from dict_sys
|
||||
and reloaded internally by InnoDB for FOREIGN KEY
|
||||
processing, but not reloaded by the SQL layer.
|
||||
|
||||
We can (re)compute the transient statistics when the
|
||||
table is actually loaded by the SQL layer.
|
||||
|
||||
Note: If InnoDB persistent statistics are enabled,
|
||||
we will skip the updates. We must do this, because
|
||||
dict_table_get_n_rows() below assumes that the
|
||||
statistics have been initialized. The DBA may have
|
||||
to execute ANALYZE TABLE. */
|
||||
return;
|
||||
}
|
||||
|
||||
ulonglong counter = table->stat_modified_counter++;
|
||||
ulonglong n_rows = dict_table_get_n_rows(table);
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 2011, 2018, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2016, 2020, MariaDB Corporation.
|
||||
Copyright (c) 2016, 2021, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
|
@ -2518,7 +2518,8 @@ fts_get_max_cache_size(
|
|||
}
|
||||
} else {
|
||||
ib::error() << "(" << error << ") reading max"
|
||||
" cache config value from config table";
|
||||
" cache config value from config table "
|
||||
<< fts_table->table->name;
|
||||
}
|
||||
|
||||
ut_free(value.f_str);
|
||||
|
@ -2691,7 +2692,8 @@ func_exit:
|
|||
} else {
|
||||
*doc_id = 0;
|
||||
|
||||
ib::error() << "(" << error << ") while getting next doc id.";
|
||||
ib::error() << "(" << error << ") while getting next doc id "
|
||||
"for table " << table->name;
|
||||
fts_sql_rollback(trx);
|
||||
|
||||
if (error == DB_DEADLOCK) {
|
||||
|
@ -2771,7 +2773,8 @@ fts_update_sync_doc_id(
|
|||
cache->synced_doc_id = doc_id;
|
||||
} else {
|
||||
ib::error() << "(" << error << ") while"
|
||||
" updating last doc id.";
|
||||
" updating last doc id for table"
|
||||
<< table->name;
|
||||
|
||||
fts_sql_rollback(trx);
|
||||
}
|
||||
|
@ -3482,7 +3485,8 @@ fts_add_doc_by_id(
|
|||
|
||||
}
|
||||
|
||||
offsets = rec_get_offsets(clust_rec, clust_index, NULL, true,
|
||||
offsets = rec_get_offsets(clust_rec, clust_index, NULL,
|
||||
clust_index->n_core_fields,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
for (ulint i = 0; i < num_idx; ++i) {
|
||||
|
@ -3996,7 +4000,8 @@ fts_sync_write_words(
|
|||
|
||||
if (UNIV_UNLIKELY(error != DB_SUCCESS) && !print_error) {
|
||||
ib::error() << "(" << error << ") writing"
|
||||
" word node to FTS auxiliary index table.";
|
||||
" word node to FTS auxiliary index table "
|
||||
<< table->name;
|
||||
print_error = TRUE;
|
||||
}
|
||||
}
|
||||
|
@ -4151,7 +4156,8 @@ fts_sync_commit(
|
|||
fts_sql_commit(trx);
|
||||
} else {
|
||||
fts_sql_rollback(trx);
|
||||
ib::error() << "(" << error << ") during SYNC.";
|
||||
ib::error() << "(" << error << ") during SYNC of "
|
||||
"table " << sync->table->name;
|
||||
}
|
||||
|
||||
if (UNIV_UNLIKELY(fts_enable_diag_print) && elapsed_time) {
|
||||
|
@ -4922,7 +4928,8 @@ fts_get_rows_count(
|
|||
trx->error_state = DB_SUCCESS;
|
||||
} else {
|
||||
ib::error() << "(" << error
|
||||
<< ") while reading FTS table.";
|
||||
<< ") while reading FTS table "
|
||||
<< table_name;
|
||||
|
||||
break; /* Exit the loop. */
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 2016, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2018, 2020, MariaDB Corporation.
|
||||
Copyright (c) 2018, 2021, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
|
@ -87,8 +87,9 @@ rtr_page_split_initialize_nodes(
|
|||
stop = task + n_recs;
|
||||
|
||||
rec = page_rec_get_next(page_get_infimum_rec(page));
|
||||
const bool is_leaf = page_is_leaf(page);
|
||||
*offsets = rec_get_offsets(rec, cursor->index, *offsets, is_leaf,
|
||||
const ulint n_core = page_is_leaf(page)
|
||||
? cursor->index->n_core_fields : 0;
|
||||
*offsets = rec_get_offsets(rec, cursor->index, *offsets, n_core,
|
||||
n_uniq, &heap);
|
||||
|
||||
source_cur = rec_get_nth_field(rec, *offsets, 0, &len);
|
||||
|
@ -101,7 +102,7 @@ rtr_page_split_initialize_nodes(
|
|||
|
||||
rec = page_rec_get_next(rec);
|
||||
*offsets = rec_get_offsets(rec, cursor->index, *offsets,
|
||||
is_leaf, n_uniq, &heap);
|
||||
n_core, n_uniq, &heap);
|
||||
source_cur = rec_get_nth_field(rec, *offsets, 0, &len);
|
||||
}
|
||||
|
||||
|
@ -308,7 +309,8 @@ rtr_update_mbr_field(
|
|||
page_zip = buf_block_get_page_zip(block);
|
||||
|
||||
child = btr_node_ptr_get_child_page_no(rec, offsets);
|
||||
const bool is_leaf = page_is_leaf(block->frame);
|
||||
const ulint n_core = page_is_leaf(block->frame)
|
||||
? index->n_core_fields : 0;
|
||||
|
||||
if (new_rec) {
|
||||
child_rec = new_rec;
|
||||
|
@ -324,7 +326,7 @@ rtr_update_mbr_field(
|
|||
if (cursor2) {
|
||||
rec_t* del_rec = btr_cur_get_rec(cursor2);
|
||||
offsets2 = rec_get_offsets(btr_cur_get_rec(cursor2),
|
||||
index, NULL, false,
|
||||
index, NULL, 0,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
del_page_no = btr_node_ptr_get_child_page_no(del_rec, offsets2);
|
||||
cur2_pos = page_rec_get_n_recs_before(btr_cur_get_rec(cursor2));
|
||||
|
@ -389,7 +391,7 @@ rtr_update_mbr_field(
|
|||
= page_rec_get_nth(page, cur2_pos);
|
||||
}
|
||||
offsets2 = rec_get_offsets(btr_cur_get_rec(cursor2),
|
||||
index, NULL, false,
|
||||
index, NULL, 0,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
ut_ad(del_page_no == btr_node_ptr_get_child_page_no(
|
||||
cursor2->page_cur.rec,
|
||||
|
@ -427,7 +429,7 @@ rtr_update_mbr_field(
|
|||
ut_ad(old_rec != insert_rec);
|
||||
|
||||
page_cur_position(old_rec, block, &page_cur);
|
||||
offsets2 = rec_get_offsets(old_rec, index, NULL, is_leaf,
|
||||
offsets2 = rec_get_offsets(old_rec, index, NULL, n_core,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
page_cur_delete_rec(&page_cur, index, offsets2, mtr);
|
||||
|
||||
|
@ -457,7 +459,7 @@ update_mbr:
|
|||
|
||||
cur2_rec = cursor2->page_cur.rec;
|
||||
offsets2 = rec_get_offsets(cur2_rec, index, NULL,
|
||||
is_leaf,
|
||||
n_core,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
cur2_rec_info = rec_get_info_bits(cur2_rec,
|
||||
|
@ -517,7 +519,7 @@ update_mbr:
|
|||
if (ins_suc) {
|
||||
btr_cur_position(index, insert_rec, block, cursor);
|
||||
offsets = rec_get_offsets(insert_rec,
|
||||
index, offsets, is_leaf,
|
||||
index, offsets, n_core,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
}
|
||||
|
||||
|
@ -532,7 +534,7 @@ update_mbr:
|
|||
cur2_rec = btr_cur_get_rec(cursor2);
|
||||
|
||||
offsets2 = rec_get_offsets(cur2_rec, index, NULL,
|
||||
is_leaf,
|
||||
n_core,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
/* If the cursor2 position is on a wrong rec, we
|
||||
|
@ -546,7 +548,7 @@ update_mbr:
|
|||
while (!page_rec_is_supremum(cur2_rec)) {
|
||||
offsets2 = rec_get_offsets(cur2_rec, index,
|
||||
NULL,
|
||||
is_leaf,
|
||||
n_core,
|
||||
ULINT_UNDEFINED,
|
||||
&heap);
|
||||
cur2_pno = btr_node_ptr_get_child_page_no(
|
||||
|
@ -836,7 +838,8 @@ rtr_split_page_move_rec_list(
|
|||
rec_move = static_cast<rtr_rec_move_t*>(mem_heap_alloc(
|
||||
heap,
|
||||
sizeof (*rec_move) * max_to_move));
|
||||
const bool is_leaf = page_is_leaf(page);
|
||||
const ulint n_core = page_is_leaf(page)
|
||||
? index->n_core_fields : 0;
|
||||
|
||||
/* Insert the recs in group 2 to new page. */
|
||||
for (cur_split_node = node_array;
|
||||
|
@ -846,10 +849,10 @@ rtr_split_page_move_rec_list(
|
|||
block, cur_split_node->key);
|
||||
|
||||
offsets = rec_get_offsets(cur_split_node->key,
|
||||
index, offsets, is_leaf,
|
||||
index, offsets, n_core,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
ut_ad(!is_leaf || cur_split_node->key != first_rec);
|
||||
ut_ad(!n_core || cur_split_node->key != first_rec);
|
||||
|
||||
rec = page_cur_insert_rec_low(
|
||||
page_cur_get_rec(&new_page_cursor),
|
||||
|
@ -884,7 +887,7 @@ rtr_split_page_move_rec_list(
|
|||
same temp-table in parallel.
|
||||
max_trx_id is ignored for temp tables because it not required
|
||||
for MVCC. */
|
||||
if (is_leaf && !index->table->is_temporary()) {
|
||||
if (n_core && !index->table->is_temporary()) {
|
||||
page_update_max_trx_id(new_block, NULL,
|
||||
page_get_max_trx_id(page),
|
||||
mtr);
|
||||
|
@ -937,7 +940,7 @@ rtr_split_page_move_rec_list(
|
|||
block, &page_cursor);
|
||||
offsets = rec_get_offsets(
|
||||
page_cur_get_rec(&page_cursor), index,
|
||||
offsets, is_leaf, ULINT_UNDEFINED,
|
||||
offsets, n_core, ULINT_UNDEFINED,
|
||||
&heap);
|
||||
page_cur_delete_rec(&page_cursor,
|
||||
index, offsets, mtr);
|
||||
|
@ -1136,6 +1139,9 @@ func_start:
|
|||
/* Update the lock table */
|
||||
lock_rtr_move_rec_list(new_block, block, rec_move, moved);
|
||||
|
||||
const ulint n_core = page_level
|
||||
? 0 : cursor->index->n_core_fields;
|
||||
|
||||
/* Delete recs in first group from the new page. */
|
||||
for (cur_split_node = rtr_split_node_array;
|
||||
cur_split_node < end_split_node - 1; ++cur_split_node) {
|
||||
|
@ -1154,7 +1160,7 @@ func_start:
|
|||
|
||||
*offsets = rec_get_offsets(
|
||||
page_cur_get_rec(page_cursor),
|
||||
cursor->index, *offsets, !page_level,
|
||||
cursor->index, *offsets, n_core,
|
||||
ULINT_UNDEFINED, heap);
|
||||
|
||||
page_cur_delete_rec(page_cursor,
|
||||
|
@ -1171,7 +1177,7 @@ func_start:
|
|||
block, page_cursor);
|
||||
*offsets = rec_get_offsets(
|
||||
page_cur_get_rec(page_cursor),
|
||||
cursor->index, *offsets, !page_level,
|
||||
cursor->index, *offsets, n_core,
|
||||
ULINT_UNDEFINED, heap);
|
||||
page_cur_delete_rec(page_cursor,
|
||||
cursor->index, *offsets, mtr);
|
||||
|
@ -1400,7 +1406,8 @@ rtr_page_copy_rec_list_end_no_locks(
|
|||
rec_offs offsets_2[REC_OFFS_NORMAL_SIZE];
|
||||
rec_offs* offsets2 = offsets_2;
|
||||
ulint moved = 0;
|
||||
bool is_leaf = page_is_leaf(new_page);
|
||||
const ulint n_core = page_is_leaf(new_page)
|
||||
? index->n_core_fields : 0;
|
||||
|
||||
rec_offs_init(offsets_1);
|
||||
rec_offs_init(offsets_2);
|
||||
|
@ -1429,14 +1436,14 @@ rtr_page_copy_rec_list_end_no_locks(
|
|||
cur_rec = page_rec_get_next(cur_rec);
|
||||
}
|
||||
|
||||
offsets1 = rec_get_offsets(cur1_rec, index, offsets1, is_leaf,
|
||||
offsets1 = rec_get_offsets(cur1_rec, index, offsets1, n_core,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
while (!page_rec_is_supremum(cur_rec)) {
|
||||
ulint cur_matched_fields = 0;
|
||||
int cmp;
|
||||
|
||||
offsets2 = rec_get_offsets(cur_rec, index, offsets2,
|
||||
is_leaf,
|
||||
n_core,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
cmp = cmp_rec_rec(cur1_rec, cur_rec,
|
||||
offsets1, offsets2, index, false,
|
||||
|
@ -1448,7 +1455,7 @@ rtr_page_copy_rec_list_end_no_locks(
|
|||
/* Skip small recs. */
|
||||
page_cur_move_to_next(&page_cur);
|
||||
cur_rec = page_cur_get_rec(&page_cur);
|
||||
} else if (is_leaf) {
|
||||
} else if (n_core) {
|
||||
if (rec_get_deleted_flag(cur1_rec,
|
||||
dict_table_is_comp(index->table))) {
|
||||
goto next;
|
||||
|
@ -1471,7 +1478,7 @@ rtr_page_copy_rec_list_end_no_locks(
|
|||
|
||||
cur_rec = page_cur_get_rec(&page_cur);
|
||||
|
||||
offsets1 = rec_get_offsets(cur1_rec, index, offsets1, is_leaf,
|
||||
offsets1 = rec_get_offsets(cur1_rec, index, offsets1, n_core,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
ins_rec = page_cur_insert_rec_low(cur_rec, index,
|
||||
|
@ -1527,7 +1534,8 @@ rtr_page_copy_rec_list_start_no_locks(
|
|||
rec_offs* offsets2 = offsets_2;
|
||||
page_cur_t page_cur;
|
||||
ulint moved = 0;
|
||||
bool is_leaf = page_is_leaf(buf_block_get_frame(block));
|
||||
const ulint n_core = page_is_leaf(buf_block_get_frame(block))
|
||||
? index->n_core_fields : 0;
|
||||
|
||||
rec_offs_init(offsets_1);
|
||||
rec_offs_init(offsets_2);
|
||||
|
@ -1547,14 +1555,14 @@ rtr_page_copy_rec_list_start_no_locks(
|
|||
cur_rec = page_rec_get_next(cur_rec);
|
||||
}
|
||||
|
||||
offsets1 = rec_get_offsets(cur1_rec, index, offsets1, is_leaf,
|
||||
offsets1 = rec_get_offsets(cur1_rec, index, offsets1, n_core,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
while (!page_rec_is_supremum(cur_rec)) {
|
||||
ulint cur_matched_fields = 0;
|
||||
|
||||
offsets2 = rec_get_offsets(cur_rec, index, offsets2,
|
||||
is_leaf,
|
||||
n_core,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
int cmp = cmp_rec_rec(cur1_rec, cur_rec,
|
||||
offsets1, offsets2, index, false,
|
||||
|
@ -1567,7 +1575,7 @@ rtr_page_copy_rec_list_start_no_locks(
|
|||
/* Skip small recs. */
|
||||
page_cur_move_to_next(&page_cur);
|
||||
cur_rec = page_cur_get_rec(&page_cur);
|
||||
} else if (is_leaf) {
|
||||
} else if (n_core) {
|
||||
if (rec_get_deleted_flag(
|
||||
cur1_rec,
|
||||
dict_table_is_comp(index->table))) {
|
||||
|
@ -1591,7 +1599,7 @@ rtr_page_copy_rec_list_start_no_locks(
|
|||
|
||||
cur_rec = page_cur_get_rec(&page_cur);
|
||||
|
||||
offsets1 = rec_get_offsets(cur1_rec, index, offsets1, is_leaf,
|
||||
offsets1 = rec_get_offsets(cur1_rec, index, offsets1, n_core,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
ins_rec = page_cur_insert_rec_low(cur_rec, index,
|
||||
|
@ -1745,7 +1753,7 @@ rtr_check_same_block(
|
|||
|
||||
while (!page_rec_is_supremum(rec)) {
|
||||
offsets = rec_get_offsets(
|
||||
rec, index, NULL, false, ULINT_UNDEFINED, &heap);
|
||||
rec, index, NULL, 0, ULINT_UNDEFINED, &heap);
|
||||
|
||||
if (btr_node_ptr_get_child_page_no(rec, offsets) == page_no) {
|
||||
btr_cur_position(index, rec, parentb, cursor);
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 2016, 2018, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2017, 2020, MariaDB Corporation.
|
||||
Copyright (c) 2017, 2021, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
|
@ -530,8 +530,7 @@ rtr_compare_cursor_rec(
|
|||
|
||||
rec = btr_cur_get_rec(cursor);
|
||||
|
||||
offsets = rec_get_offsets(
|
||||
rec, index, NULL, false, ULINT_UNDEFINED, heap);
|
||||
offsets = rec_get_offsets(rec, index, NULL, 0, ULINT_UNDEFINED, heap);
|
||||
|
||||
return(btr_node_ptr_get_child_page_no(rec, offsets) == page_no);
|
||||
}
|
||||
|
@ -836,7 +835,8 @@ rtr_page_get_father_node_ptr(
|
|||
user_rec = btr_cur_get_rec(cursor);
|
||||
ut_a(page_rec_is_user_rec(user_rec));
|
||||
|
||||
offsets = rec_get_offsets(user_rec, index, offsets, !level,
|
||||
offsets = rec_get_offsets(user_rec, index, offsets,
|
||||
level ? 0 : index->n_fields,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
rtr_get_mbr_from_rec(user_rec, offsets, &mbr);
|
||||
|
||||
|
@ -853,7 +853,7 @@ rtr_page_get_father_node_ptr(
|
|||
node_ptr = btr_cur_get_rec(cursor);
|
||||
ut_ad(!page_rec_is_comp(node_ptr)
|
||||
|| rec_get_status(node_ptr) == REC_STATUS_NODE_PTR);
|
||||
offsets = rec_get_offsets(node_ptr, index, offsets, false,
|
||||
offsets = rec_get_offsets(node_ptr, index, offsets, 0,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
ulint child_page = btr_node_ptr_get_child_page_no(node_ptr, offsets);
|
||||
|
@ -871,13 +871,14 @@ rtr_page_get_father_node_ptr(
|
|||
print_rec = page_rec_get_next(
|
||||
page_get_infimum_rec(page_align(user_rec)));
|
||||
offsets = rec_get_offsets(print_rec, index, offsets,
|
||||
page_rec_is_leaf(user_rec),
|
||||
page_rec_is_leaf(user_rec)
|
||||
? index->n_fields : 0,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
error << "; child ";
|
||||
rec_print(error.m_oss, print_rec,
|
||||
rec_get_info_bits(print_rec, rec_offs_comp(offsets)),
|
||||
offsets);
|
||||
offsets = rec_get_offsets(node_ptr, index, offsets, false,
|
||||
offsets = rec_get_offsets(node_ptr, index, offsets, 0,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
error << "; parent ";
|
||||
rec_print(error.m_oss, print_rec,
|
||||
|
@ -1309,10 +1310,12 @@ rtr_cur_restore_position(
|
|||
|
||||
heap = mem_heap_create(256);
|
||||
offsets1 = rec_get_offsets(
|
||||
r_cursor->old_rec, index, NULL, !level,
|
||||
r_cursor->old_rec, index, NULL,
|
||||
level ? 0 : r_cursor->old_n_fields,
|
||||
r_cursor->old_n_fields, &heap);
|
||||
offsets2 = rec_get_offsets(
|
||||
rec, index, NULL, !level,
|
||||
rec, index, NULL,
|
||||
level ? 0 : r_cursor->old_n_fields,
|
||||
r_cursor->old_n_fields, &heap);
|
||||
|
||||
comp = rec_offs_comp(offsets1);
|
||||
|
@ -1379,12 +1382,12 @@ search_again:
|
|||
|
||||
rec = btr_pcur_get_rec(r_cursor);
|
||||
|
||||
offsets1 = rec_get_offsets(
|
||||
r_cursor->old_rec, index, NULL, !level,
|
||||
r_cursor->old_n_fields, &heap);
|
||||
offsets2 = rec_get_offsets(
|
||||
rec, index, NULL, !level,
|
||||
r_cursor->old_n_fields, &heap);
|
||||
offsets1 = rec_get_offsets(r_cursor->old_rec, index, NULL,
|
||||
level ? 0 : r_cursor->old_n_fields,
|
||||
r_cursor->old_n_fields, &heap);
|
||||
offsets2 = rec_get_offsets(rec, index, NULL,
|
||||
level ? 0 : r_cursor->old_n_fields,
|
||||
r_cursor->old_n_fields, &heap);
|
||||
|
||||
comp = rec_offs_comp(offsets1);
|
||||
|
||||
|
@ -1673,7 +1676,7 @@ rtr_cur_search_with_match(
|
|||
page = buf_block_get_frame(block);
|
||||
|
||||
const ulint level = btr_page_get_level(page);
|
||||
const bool is_leaf = !level;
|
||||
const ulint n_core = level ? 0 : index->n_fields;
|
||||
|
||||
if (mode == PAGE_CUR_RTREE_LOCATE) {
|
||||
ut_ad(level != 0);
|
||||
|
@ -1695,7 +1698,7 @@ rtr_cur_search_with_match(
|
|||
|
||||
ulint new_rec_size = rec_get_converted_size(index, tuple, 0);
|
||||
|
||||
offsets = rec_get_offsets(rec, index, offsets, is_leaf,
|
||||
offsets = rec_get_offsets(rec, index, offsets, n_core,
|
||||
dtuple_get_n_fields_cmp(tuple),
|
||||
&heap);
|
||||
|
||||
|
@ -1716,10 +1719,10 @@ rtr_cur_search_with_match(
|
|||
}
|
||||
|
||||
while (!page_rec_is_supremum(rec)) {
|
||||
offsets = rec_get_offsets(rec, index, offsets, is_leaf,
|
||||
offsets = rec_get_offsets(rec, index, offsets, n_core,
|
||||
dtuple_get_n_fields_cmp(tuple),
|
||||
&heap);
|
||||
if (!is_leaf) {
|
||||
if (!n_core) {
|
||||
switch (mode) {
|
||||
case PAGE_CUR_CONTAIN:
|
||||
case PAGE_CUR_INTERSECT:
|
||||
|
@ -1800,7 +1803,7 @@ rtr_cur_search_with_match(
|
|||
to rtr_info->path for non-leaf nodes, or
|
||||
rtr_info->matches for leaf nodes */
|
||||
if (rtr_info && mode != PAGE_CUR_RTREE_INSERT) {
|
||||
if (!is_leaf) {
|
||||
if (!n_core) {
|
||||
ulint page_no;
|
||||
node_seq_t new_seq;
|
||||
bool is_loc;
|
||||
|
@ -1811,7 +1814,7 @@ rtr_cur_search_with_match(
|
|||
== PAGE_CUR_RTREE_GET_FATHER);
|
||||
|
||||
offsets = rec_get_offsets(
|
||||
rec, index, offsets, false,
|
||||
rec, index, offsets, 0,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
page_no = btr_node_ptr_get_child_page_no(
|
||||
|
@ -1860,7 +1863,8 @@ rtr_cur_search_with_match(
|
|||
|
||||
/* Collect matched records on page */
|
||||
offsets = rec_get_offsets(
|
||||
rec, index, offsets, true,
|
||||
rec, index, offsets,
|
||||
index->n_fields,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
rtr_leaf_push_match_rec(
|
||||
rec, rtr_info, offsets,
|
||||
|
@ -1883,7 +1887,7 @@ rtr_cur_search_with_match(
|
|||
|
||||
/* All records on page are searched */
|
||||
if (page_rec_is_supremum(rec)) {
|
||||
if (!is_leaf) {
|
||||
if (!n_core) {
|
||||
if (!found) {
|
||||
/* No match case, if it is for insertion,
|
||||
then we select the record that result in
|
||||
|
@ -1893,7 +1897,7 @@ rtr_cur_search_with_match(
|
|||
ut_ad(least_inc < DBL_MAX);
|
||||
offsets = rec_get_offsets(
|
||||
best_rec, index, offsets,
|
||||
false, ULINT_UNDEFINED, &heap);
|
||||
0, ULINT_UNDEFINED, &heap);
|
||||
child_no =
|
||||
btr_node_ptr_get_child_page_no(
|
||||
best_rec, offsets);
|
||||
|
@ -1945,11 +1949,11 @@ rtr_cur_search_with_match(
|
|||
/* Verify the record to be positioned is the same
|
||||
as the last record in matched_rec vector */
|
||||
offsets2 = rec_get_offsets(test_rec.r_rec, index,
|
||||
offsets2, true,
|
||||
offsets2, index->n_fields,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
offsets = rec_get_offsets(last_match_rec, index,
|
||||
offsets, true,
|
||||
offsets, index->n_fields,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
ut_ad(cmp_rec_rec(test_rec.r_rec, last_match_rec,
|
||||
|
@ -1966,9 +1970,8 @@ rtr_cur_search_with_match(
|
|||
ulint child_no;
|
||||
ut_ad(!last_match_rec && rec);
|
||||
|
||||
offsets = rec_get_offsets(
|
||||
rec, index, offsets, false,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
offsets = rec_get_offsets(rec, index, offsets, 0,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
child_no = btr_node_ptr_get_child_page_no(rec, offsets);
|
||||
|
||||
|
@ -1976,7 +1979,7 @@ rtr_cur_search_with_match(
|
|||
index, rtr_info->parent_path, level, child_no,
|
||||
block, rec, 0);
|
||||
|
||||
} else if (rtr_info && found && !is_leaf) {
|
||||
} else if (rtr_info && found && !n_core) {
|
||||
rec = last_match_rec;
|
||||
}
|
||||
|
||||
|
@ -1986,11 +1989,11 @@ rtr_cur_search_with_match(
|
|||
#ifdef UNIV_DEBUG
|
||||
/* Verify that we are positioned at the same child page as pushed in
|
||||
the path stack */
|
||||
if (!is_leaf && (!page_rec_is_supremum(rec) || found)
|
||||
if (!n_core && (!page_rec_is_supremum(rec) || found)
|
||||
&& mode != PAGE_CUR_RTREE_INSERT) {
|
||||
ulint page_no;
|
||||
|
||||
offsets = rec_get_offsets(rec, index, offsets, false,
|
||||
offsets = rec_get_offsets(rec, index, offsets, 0,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
page_no = btr_node_ptr_get_child_page_no(rec, offsets);
|
||||
|
||||
|
|
|
@ -8743,6 +8743,8 @@ wsrep_calc_row_hash(
|
|||
for (uint i = 0; i < table->s->fields; i++) {
|
||||
byte null_byte=0;
|
||||
byte true_byte=1;
|
||||
ulint col_type;
|
||||
ulint is_unsigned;
|
||||
|
||||
const Field* field = table->field[i];
|
||||
if (!field->stored_in_db()) {
|
||||
|
@ -8751,8 +8753,9 @@ wsrep_calc_row_hash(
|
|||
|
||||
ptr = (const byte*) row + get_field_offset(table, field);
|
||||
len = field->pack_length();
|
||||
col_type = get_innobase_type_from_mysql_type(&is_unsigned, field);
|
||||
|
||||
switch (prebuilt->table->cols[i].mtype) {
|
||||
switch (col_type) {
|
||||
|
||||
case DATA_BLOB:
|
||||
ptr = row_mysql_read_blob_ref(&len, ptr, len);
|
||||
|
|
|
@ -1060,13 +1060,6 @@ struct ha_innobase_inplace_ctx : public inplace_alter_handler_ctx
|
|||
@return whether the table will be rebuilt */
|
||||
bool need_rebuild () const { return(old_table != new_table); }
|
||||
|
||||
/** Clear uncommmitted added indexes after a failed operation. */
|
||||
void clear_added_indexes()
|
||||
{
|
||||
for (ulint i= 0; i < num_to_add_index; i++)
|
||||
add_index[i]->detach_columns(true);
|
||||
}
|
||||
|
||||
/** Convert table-rebuilding ALTER to instant ALTER. */
|
||||
void prepare_instant()
|
||||
{
|
||||
|
@ -1164,6 +1157,42 @@ struct ha_innobase_inplace_ctx : public inplace_alter_handler_ctx
|
|||
}
|
||||
}
|
||||
|
||||
/** @return whether the given column is being added */
|
||||
bool is_new_vcol(const dict_v_col_t &v_col) const
|
||||
{
|
||||
for (ulint i= 0; i < num_to_add_vcol; i++)
|
||||
if (&add_vcol[i] == &v_col)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
/** During rollback, make newly added indexes point to
|
||||
newly added virtual columns. */
|
||||
void clean_new_vcol_index()
|
||||
{
|
||||
ut_ad(old_table == new_table);
|
||||
const dict_index_t *index= dict_table_get_first_index(old_table);
|
||||
while ((index= dict_table_get_next_index(index)) != NULL)
|
||||
{
|
||||
if (!index->has_virtual() || index->is_committed())
|
||||
continue;
|
||||
ulint n_drop_new_vcol= index->get_new_n_vcol();
|
||||
for (ulint i= 0; n_drop_new_vcol && i < index->n_fields; i++)
|
||||
{
|
||||
dict_col_t *col= index->fields[i].col;
|
||||
/* Skip the non-virtual and old virtual columns */
|
||||
if (!col->is_virtual())
|
||||
continue;
|
||||
dict_v_col_t *vcol= reinterpret_cast<dict_v_col_t*>(col);
|
||||
if (!is_new_vcol(*vcol))
|
||||
continue;
|
||||
|
||||
index->fields[i].col= &index->new_vcol_info->
|
||||
add_drop_v_col(index->heap, vcol, --n_drop_new_vcol)->m_col;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
// Disable copying
|
||||
ha_innobase_inplace_ctx(const ha_innobase_inplace_ctx&);
|
||||
|
@ -3791,9 +3820,11 @@ innobase_fts_check_doc_id_index(
|
|||
for (index = dict_table_get_first_index(table);
|
||||
index; index = dict_table_get_next_index(index)) {
|
||||
|
||||
|
||||
/* Check if there exists a unique index with the name of
|
||||
FTS_DOC_ID_INDEX_NAME */
|
||||
if (innobase_strcasecmp(index->name, FTS_DOC_ID_INDEX_NAME)) {
|
||||
FTS_DOC_ID_INDEX_NAME and ignore the corrupted index */
|
||||
if (index->type & DICT_CORRUPT
|
||||
|| innobase_strcasecmp(index->name, FTS_DOC_ID_INDEX_NAME)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -4101,7 +4132,7 @@ online_retry_drop_indexes_low(
|
|||
ut_ad(table->get_ref_count() >= 1);
|
||||
|
||||
if (table->drop_aborted) {
|
||||
row_merge_drop_indexes(trx, table, TRUE);
|
||||
row_merge_drop_indexes(trx, table, true);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -5980,7 +6011,7 @@ add_all_virtual:
|
|||
|
||||
offsets = rec_get_offsets(
|
||||
btr_pcur_get_rec(&pcur), index, offsets,
|
||||
true, ULINT_UNDEFINED, &offsets_heap);
|
||||
index->n_core_fields, ULINT_UNDEFINED, &offsets_heap);
|
||||
if (big_rec) {
|
||||
if (err == DB_SUCCESS) {
|
||||
err = btr_store_big_rec_extern_fields(
|
||||
|
@ -6897,7 +6928,7 @@ new_table_failed:
|
|||
|
||||
for (ulint a = 0; a < ctx->num_to_add_index; a++) {
|
||||
dict_index_t* index = ctx->add_index[a];
|
||||
const bool has_new_v_col = index->has_new_v_col;
|
||||
const ulint n_v_col = index->get_new_n_vcol();
|
||||
index = create_index_dict(ctx->trx, index, add_v);
|
||||
error = ctx->trx->error_state;
|
||||
if (error != DB_SUCCESS) {
|
||||
|
@ -6927,7 +6958,9 @@ error_handling_drop_uncached_1:
|
|||
goto error_handling_drop_uncached_1;
|
||||
}
|
||||
index->parser = index_defs[a].parser;
|
||||
index->has_new_v_col = has_new_v_col;
|
||||
if (n_v_col) {
|
||||
index->assign_new_v_col(n_v_col);
|
||||
}
|
||||
/* Note the id of the transaction that created this
|
||||
index, we use it to restrict readers from accessing
|
||||
this index, to ensure read consistency. */
|
||||
|
@ -6997,7 +7030,7 @@ error_handling_drop_uncached_1:
|
|||
|
||||
for (ulint a = 0; a < ctx->num_to_add_index; a++) {
|
||||
dict_index_t* index = ctx->add_index[a];
|
||||
const bool has_new_v_col = index->has_new_v_col;
|
||||
const ulint n_v_col = index->get_new_n_vcol();
|
||||
DBUG_EXECUTE_IF(
|
||||
"create_index_metadata_fail",
|
||||
if (a + 1 == ctx->num_to_add_index) {
|
||||
|
@ -7029,7 +7062,9 @@ error_handling_drop_uncached:
|
|||
}
|
||||
|
||||
index->parser = index_defs[a].parser;
|
||||
index->has_new_v_col = has_new_v_col;
|
||||
if (n_v_col) {
|
||||
index->assign_new_v_col(n_v_col);
|
||||
}
|
||||
/* Note the id of the transaction that created this
|
||||
index, we use it to restrict readers from accessing
|
||||
this index, to ensure read consistency. */
|
||||
|
@ -7254,7 +7289,7 @@ error_handled:
|
|||
online_retry_drop_indexes_with_trx(user_table, ctx->trx);
|
||||
} else {
|
||||
ut_ad(!ctx->need_rebuild());
|
||||
row_merge_drop_indexes(ctx->trx, user_table, TRUE);
|
||||
row_merge_drop_indexes(ctx->trx, user_table, true);
|
||||
trx_commit_for_mysql(ctx->trx);
|
||||
}
|
||||
|
||||
|
@ -8617,7 +8652,6 @@ oom:
|
|||
that we hold at most a shared lock on the table. */
|
||||
m_prebuilt->trx->error_info = NULL;
|
||||
ctx->trx->error_state = DB_SUCCESS;
|
||||
ctx->clear_added_indexes();
|
||||
|
||||
DBUG_RETURN(true);
|
||||
}
|
||||
|
@ -8709,17 +8743,18 @@ temparary index prefix
|
|||
@param table the TABLE
|
||||
@param locked TRUE=table locked, FALSE=may need to do a lazy drop
|
||||
@param trx the transaction
|
||||
*/
|
||||
static MY_ATTRIBUTE((nonnull))
|
||||
@param alter_trx transaction which takes S-lock on the table
|
||||
while creating the index */
|
||||
static
|
||||
void
|
||||
innobase_rollback_sec_index(
|
||||
/*========================*/
|
||||
dict_table_t* user_table,
|
||||
const TABLE* table,
|
||||
ibool locked,
|
||||
trx_t* trx)
|
||||
dict_table_t* user_table,
|
||||
const TABLE* table,
|
||||
bool locked,
|
||||
trx_t* trx,
|
||||
const trx_t* alter_trx=NULL)
|
||||
{
|
||||
row_merge_drop_indexes(trx, user_table, locked);
|
||||
row_merge_drop_indexes(trx, user_table, locked, alter_trx);
|
||||
|
||||
/* Free the table->fts only if there is no FTS_DOC_ID
|
||||
in the table */
|
||||
|
@ -8814,7 +8849,12 @@ rollback_inplace_alter_table(
|
|||
DBUG_ASSERT(ctx->new_table == prebuilt->table);
|
||||
|
||||
innobase_rollback_sec_index(
|
||||
prebuilt->table, table, FALSE, ctx->trx);
|
||||
prebuilt->table, table,
|
||||
(ha_alter_info->alter_info->requested_lock
|
||||
== Alter_info::ALTER_TABLE_LOCK_EXCLUSIVE),
|
||||
ctx->trx, prebuilt->trx);
|
||||
|
||||
ctx->clean_new_vcol_index();
|
||||
}
|
||||
|
||||
trx_commit_for_mysql(ctx->trx);
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2016, 2020, MariaDB Corporation.
|
||||
Copyright (c) 2016, 2021, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
|
@ -3870,7 +3870,7 @@ dump:
|
|||
row_ins_sec_index_entry_by_modify(BTR_MODIFY_LEAF). */
|
||||
ut_ad(rec_get_deleted_flag(rec, page_is_comp(page)));
|
||||
|
||||
offsets = rec_get_offsets(rec, index, NULL, true,
|
||||
offsets = rec_get_offsets(rec, index, NULL, index->n_fields,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
update = row_upd_build_sec_rec_difference_binary(
|
||||
rec, index, offsets, entry, heap);
|
||||
|
@ -4043,7 +4043,8 @@ ibuf_delete(
|
|||
|
||||
ut_ad(ibuf_inside(mtr));
|
||||
ut_ad(dtuple_check_typed(entry));
|
||||
ut_ad(!dict_index_is_spatial(index));
|
||||
ut_ad(!index->is_spatial());
|
||||
ut_ad(!index->is_clust());
|
||||
|
||||
low_match = page_cur_search(block, index, entry, &page_cur);
|
||||
|
||||
|
@ -4062,8 +4063,8 @@ ibuf_delete(
|
|||
|
||||
rec_offs_init(offsets_);
|
||||
|
||||
offsets = rec_get_offsets(
|
||||
rec, index, offsets, true, ULINT_UNDEFINED, &heap);
|
||||
offsets = rec_get_offsets(rec, index, offsets, index->n_fields,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
if (page_get_n_recs(page) <= 1
|
||||
|| !(REC_INFO_DELETED_FLAG
|
||||
|
@ -4858,6 +4859,13 @@ dberr_t ibuf_check_bitmap_on_import(const trx_t* trx, fil_space_t* space)
|
|||
bitmap_page = ibuf_bitmap_get_map_page(
|
||||
page_id_t(space->id, page_no), zip_size, &mtr);
|
||||
|
||||
if (!bitmap_page) {
|
||||
mutex_exit(&ibuf_mutex);
|
||||
ibuf_exit(&mtr);
|
||||
mtr_commit(&mtr);
|
||||
return DB_CORRUPTION;
|
||||
}
|
||||
|
||||
if (buf_is_zeroes(span<const byte>(bitmap_page,
|
||||
physical_size))) {
|
||||
/* This means we got all-zero page instead of
|
||||
|
@ -4881,11 +4889,6 @@ dberr_t ibuf_check_bitmap_on_import(const trx_t* trx, fil_space_t* space)
|
|||
continue;
|
||||
}
|
||||
|
||||
if (!bitmap_page) {
|
||||
mutex_exit(&ibuf_mutex);
|
||||
return DB_CORRUPTION;
|
||||
}
|
||||
|
||||
for (i = FSP_IBUF_BITMAP_OFFSET + 1; i < physical_size; i++) {
|
||||
const ulint offset = page_no + i;
|
||||
const page_id_t cur_page_id(space->id, offset);
|
||||
|
|
|
@ -326,6 +326,8 @@ public:
|
|||
/** Re-latch all latches */
|
||||
void latch();
|
||||
|
||||
table_name_t table_name() { return m_index->table->name; }
|
||||
|
||||
private:
|
||||
/** Insert a tuple to a page in a level
|
||||
@param[in] tuple tuple to insert
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2017, 2020, MariaDB Corporation.
|
||||
Copyright (c) 2017, 2021, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
|
@ -497,8 +497,10 @@ struct btr_pcur_t{
|
|||
/** if cursor position is stored, contains an initial segment of the
|
||||
latest record cursor was positioned either on, before or after */
|
||||
rec_t* old_rec;
|
||||
/** btr_cur.index->n_core_fields when old_rec was copied */
|
||||
uint16 old_n_core_fields;
|
||||
/** number of fields in old_rec */
|
||||
ulint old_n_fields;
|
||||
uint16 old_n_fields;
|
||||
/** BTR_PCUR_ON, BTR_PCUR_BEFORE, or BTR_PCUR_AFTER, depending on
|
||||
whether cursor was on, before, or after the old_rec record */
|
||||
enum btr_pcur_pos_t rel_pos;
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
Copyright (c) 1996, 2018, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2012, Facebook Inc.
|
||||
Copyright (c) 2013, 2020, MariaDB Corporation.
|
||||
Copyright (c) 2013, 2021, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
Copyright (c) 1996, 2017, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2012, Facebook Inc.
|
||||
Copyright (c) 2013, 2020, MariaDB Corporation.
|
||||
Copyright (c) 2013, 2021, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
|
@ -795,6 +795,35 @@ struct dict_v_col_t{
|
|||
}
|
||||
};
|
||||
|
||||
/** Data structure for newly added virtual column in a index.
|
||||
It is used only during rollback_inplace_alter_table() of
|
||||
addition of index depending on newly added virtual columns
|
||||
and uses index heap. Should be freed when index is being
|
||||
removed from cache. */
|
||||
struct dict_add_v_col_info
|
||||
{
|
||||
ulint n_v_col;
|
||||
dict_v_col_t *v_col;
|
||||
|
||||
/** Add the newly added virtual column while rollbacking
|
||||
the index which contains new virtual columns
|
||||
@param col virtual column to be duplicated
|
||||
@param offset offset where to duplicate virtual column */
|
||||
dict_v_col_t* add_drop_v_col(mem_heap_t *heap, dict_v_col_t *col,
|
||||
ulint offset)
|
||||
{
|
||||
ut_ad(n_v_col);
|
||||
ut_ad(offset < n_v_col);
|
||||
if (!v_col)
|
||||
v_col= static_cast<dict_v_col_t*>
|
||||
(mem_heap_alloc(heap, n_v_col * sizeof *v_col));
|
||||
new (&v_col[offset]) dict_v_col_t();
|
||||
v_col[offset].m_col= col->m_col;
|
||||
v_col[offset].v_pos= col->v_pos;
|
||||
return &v_col[offset];
|
||||
}
|
||||
};
|
||||
|
||||
/** Data structure for newly added virtual column in a table */
|
||||
struct dict_add_v_col_t{
|
||||
/** number of new virtual column */
|
||||
|
@ -1039,9 +1068,13 @@ struct dict_index_t {
|
|||
dict_field_t* fields; /*!< array of field descriptions */
|
||||
st_mysql_ftparser*
|
||||
parser; /*!< fulltext parser plugin */
|
||||
bool has_new_v_col;
|
||||
/*!< whether it has a newly added virtual
|
||||
column in ALTER */
|
||||
|
||||
/** It just indicates whether newly added virtual column
|
||||
during alter. It stores column in case of alter failure.
|
||||
It should use heap from dict_index_t. It should be freed
|
||||
while removing the index from table. */
|
||||
dict_add_v_col_info* new_vcol_info;
|
||||
|
||||
bool index_fts_syncing;/*!< Whether the fts index is
|
||||
still syncing in the background;
|
||||
FIXME: remove this and use MDL */
|
||||
|
@ -1198,9 +1231,8 @@ public:
|
|||
/** @return whether the index is corrupted */
|
||||
inline bool is_corrupted() const;
|
||||
|
||||
/** Detach the virtual columns from the index that is to be removed.
|
||||
@param whether to reset fields[].col */
|
||||
void detach_columns(bool clear= false)
|
||||
/** Detach the virtual columns from the index that is to be removed. */
|
||||
void detach_columns()
|
||||
{
|
||||
if (!has_virtual() || !cached)
|
||||
return;
|
||||
|
@ -1210,8 +1242,6 @@ public:
|
|||
if (!col || !col->is_virtual())
|
||||
continue;
|
||||
col->detach(*this);
|
||||
if (clear)
|
||||
fields[i].col= nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1274,6 +1304,23 @@ public:
|
|||
bool
|
||||
vers_history_row(const rec_t* rec, bool &history_row);
|
||||
|
||||
/** Assign the number of new column to be added as a part
|
||||
of the index
|
||||
@param n_vcol number of virtual columns to be added */
|
||||
void assign_new_v_col(ulint n_vcol)
|
||||
{
|
||||
new_vcol_info= static_cast<dict_add_v_col_info*>
|
||||
(mem_heap_zalloc(heap, sizeof *new_vcol_info));
|
||||
new_vcol_info->n_v_col= n_vcol;
|
||||
}
|
||||
|
||||
/* @return whether index has new virtual column */
|
||||
bool has_new_v_col() const { return new_vcol_info; }
|
||||
|
||||
/* @return number of newly added virtual column */
|
||||
ulint get_new_n_vcol() const
|
||||
{ return new_vcol_info ? new_vcol_info->n_v_col : 0; }
|
||||
|
||||
/** Reconstruct the clustered index fields. */
|
||||
inline void reconstruct_fields();
|
||||
|
||||
|
@ -2286,6 +2333,17 @@ public:
|
|||
/** mysql_row_templ_t for base columns used for compute the virtual
|
||||
columns */
|
||||
dict_vcol_templ_t* vc_templ;
|
||||
|
||||
/* @return whether the table has any other transcation lock
|
||||
other than the given transaction */
|
||||
bool has_lock_other_than(const trx_t *trx) const
|
||||
{
|
||||
for (lock_t *lock= UT_LIST_GET_FIRST(locks); lock;
|
||||
lock= UT_LIST_GET_NEXT(un_member.tab_lock.locks, lock))
|
||||
if (lock->trx != trx)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
inline void dict_index_t::set_modified(mtr_t& mtr) const
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 2014, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2017, 2020, MariaDB Corporation.
|
||||
Copyright (c) 2017, 2021, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
|
@ -57,7 +57,8 @@ rtr_page_cal_mbr(
|
|||
page = buf_block_get_frame(block);
|
||||
|
||||
rec = page_rec_get_next(page_get_infimum_rec(page));
|
||||
offsets = rec_get_offsets(rec, index, offsets, page_is_leaf(page),
|
||||
offsets = rec_get_offsets(rec, index, offsets, page_is_leaf(page)
|
||||
? index->n_fields : 0,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
do {
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 1994, 2014, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2015, 2018, MariaDB Corporation.
|
||||
Copyright (c) 2015, 2021, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
|
@ -273,7 +273,8 @@ page_cur_tuple_insert(
|
|||
index, tuple, n_ext);
|
||||
|
||||
*offsets = rec_get_offsets(rec, index, *offsets,
|
||||
page_is_leaf(cursor->block->frame),
|
||||
page_is_leaf(cursor->block->frame)
|
||||
? index->n_core_fields : 0,
|
||||
ULINT_UNDEFINED, heap);
|
||||
ut_ad(size == rec_offs_size(*offsets));
|
||||
|
||||
|
|
|
@ -543,7 +543,7 @@ rec_get_n_extern_new(
|
|||
@param[in] index the index that the record belongs to
|
||||
@param[in,out] offsets array comprising offsets[0] allocated elements,
|
||||
or an array from rec_get_offsets(), or NULL
|
||||
@param[in] leaf whether this is a leaf-page record
|
||||
@param[in] n_core 0, or index->n_core_fields for leaf page
|
||||
@param[in] n_fields maximum number of offsets to compute
|
||||
(ULINT_UNDEFINED to compute all offsets)
|
||||
@param[in,out] heap memory heap
|
||||
|
@ -553,7 +553,7 @@ rec_get_offsets_func(
|
|||
const rec_t* rec,
|
||||
const dict_index_t* index,
|
||||
rec_offs* offsets,
|
||||
bool leaf,
|
||||
ulint n_core,
|
||||
ulint n_fields,
|
||||
#ifdef UNIV_DEBUG
|
||||
const char* file, /*!< in: file name where called */
|
||||
|
@ -1179,7 +1179,9 @@ rec_get_converted_size(
|
|||
The fields are copied into the memory heap.
|
||||
@param[out] tuple data tuple
|
||||
@param[in] rec index record, or a copy thereof
|
||||
@param[in] is_leaf whether rec is a leaf page record
|
||||
@param[in] index index of rec
|
||||
@param[in] n_core index->n_core_fields at the time rec was
|
||||
copied, or 0 if non-leaf page record
|
||||
@param[in] n_fields number of fields to copy
|
||||
@param[in,out] heap memory heap */
|
||||
void
|
||||
|
@ -1187,7 +1189,7 @@ rec_copy_prefix_to_dtuple(
|
|||
dtuple_t* tuple,
|
||||
const rec_t* rec,
|
||||
const dict_index_t* index,
|
||||
bool is_leaf,
|
||||
ulint n_core,
|
||||
ulint n_fields,
|
||||
mem_heap_t* heap)
|
||||
MY_ATTRIBUTE((nonnull));
|
||||
|
|
|
@ -167,18 +167,20 @@ row_merge_drop_indexes_dict(
|
|||
table_id_t table_id)/*!< in: table identifier */
|
||||
MY_ATTRIBUTE((nonnull));
|
||||
|
||||
/*********************************************************************//**
|
||||
Drop those indexes which were created before an error occurred.
|
||||
/** Drop indexes that were created before an error occurred.
|
||||
The data dictionary must have been locked exclusively by the caller,
|
||||
because the transaction will not be committed. */
|
||||
because the transaction will not be committed.
|
||||
@param trx dictionary transaction
|
||||
@param table table containing the indexes
|
||||
@param locked True if table is locked,
|
||||
false - may need to do lazy drop
|
||||
@param alter_trx Alter table transaction */
|
||||
void
|
||||
row_merge_drop_indexes(
|
||||
/*===================*/
|
||||
trx_t* trx, /*!< in/out: transaction */
|
||||
dict_table_t* table, /*!< in/out: table containing the indexes */
|
||||
ibool locked) /*!< in: TRUE=table locked,
|
||||
FALSE=may need to do a lazy drop */
|
||||
MY_ATTRIBUTE((nonnull));
|
||||
trx_t* trx,
|
||||
dict_table_t* table,
|
||||
bool locked,
|
||||
const trx_t* alter_trx=NULL);
|
||||
|
||||
/*********************************************************************//**
|
||||
Drop all partially created indexes during crash recovery. */
|
||||
|
|
|
@ -4481,7 +4481,8 @@ static void lock_rec_print(FILE* file, const lock_t* lock, mtr_t& mtr)
|
|||
ut_ad(!page_rec_is_metadata(rec));
|
||||
|
||||
offsets = rec_get_offsets(
|
||||
rec, lock->index, offsets, true,
|
||||
rec, lock->index, offsets,
|
||||
lock->index->n_core_fields,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
putc(' ', file);
|
||||
|
@ -5027,8 +5028,8 @@ loop:
|
|||
ut_ad(!lock_rec_get_nth_bit(lock, i)
|
||||
|| page_rec_is_leaf(rec));
|
||||
offsets = rec_get_offsets(rec, lock->index, offsets,
|
||||
true, ULINT_UNDEFINED,
|
||||
&heap);
|
||||
lock->index->n_core_fields,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
/* If this thread is holding the file space
|
||||
latch (fil_space_t::latch), the following
|
||||
|
@ -5359,7 +5360,8 @@ lock_rec_insert_check_and_lock(
|
|||
const rec_offs* offsets;
|
||||
rec_offs_init(offsets_);
|
||||
|
||||
offsets = rec_get_offsets(next_rec, index, offsets_, true,
|
||||
offsets = rec_get_offsets(next_rec, index, offsets_,
|
||||
index->n_core_fields,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
ut_ad(lock_rec_queue_validate(
|
||||
|
@ -5699,7 +5701,8 @@ lock_sec_rec_modify_check_and_lock(
|
|||
const rec_offs* offsets;
|
||||
rec_offs_init(offsets_);
|
||||
|
||||
offsets = rec_get_offsets(rec, index, offsets_, true,
|
||||
offsets = rec_get_offsets(rec, index, offsets_,
|
||||
index->n_core_fields,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
ut_ad(lock_rec_queue_validate(
|
||||
|
@ -5911,7 +5914,7 @@ lock_clust_rec_read_check_and_lock_alt(
|
|||
rec_offs_init(offsets_);
|
||||
|
||||
ut_ad(page_rec_is_leaf(rec));
|
||||
offsets = rec_get_offsets(rec, index, offsets, true,
|
||||
offsets = rec_get_offsets(rec, index, offsets, index->n_core_fields,
|
||||
ULINT_UNDEFINED, &tmp_heap);
|
||||
err = lock_clust_rec_read_check_and_lock(flags, block, rec, index,
|
||||
offsets, mode, gap_mode, thr);
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2012, Facebook Inc.
|
||||
Copyright (c) 2018, 2020, MariaDB Corporation.
|
||||
Copyright (c) 2018, 2021, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
|
@ -75,7 +75,7 @@ page_cur_try_search_shortcut(
|
|||
ut_ad(page_is_leaf(page));
|
||||
|
||||
rec = page_header_get_ptr(page, PAGE_LAST_INSERT);
|
||||
offsets = rec_get_offsets(rec, index, offsets, true,
|
||||
offsets = rec_get_offsets(rec, index, offsets, index->n_core_fields,
|
||||
dtuple_get_n_fields(tuple), &heap);
|
||||
|
||||
ut_ad(rec);
|
||||
|
@ -90,7 +90,8 @@ page_cur_try_search_shortcut(
|
|||
|
||||
next_rec = page_rec_get_next_const(rec);
|
||||
if (!page_rec_is_supremum(next_rec)) {
|
||||
offsets = rec_get_offsets(next_rec, index, offsets, true,
|
||||
offsets = rec_get_offsets(next_rec, index, offsets,
|
||||
index->n_core_fields,
|
||||
dtuple_get_n_fields(tuple), &heap);
|
||||
|
||||
if (cmp_dtuple_rec_with_match(tuple, next_rec, offsets,
|
||||
|
@ -159,7 +160,7 @@ page_cur_try_search_shortcut_bytes(
|
|||
ut_ad(page_is_leaf(page));
|
||||
|
||||
rec = page_header_get_ptr(page, PAGE_LAST_INSERT);
|
||||
offsets = rec_get_offsets(rec, index, offsets, true,
|
||||
offsets = rec_get_offsets(rec, index, offsets, index->n_core_fields,
|
||||
dtuple_get_n_fields(tuple), &heap);
|
||||
|
||||
ut_ad(rec);
|
||||
|
@ -180,7 +181,8 @@ page_cur_try_search_shortcut_bytes(
|
|||
|
||||
next_rec = page_rec_get_next_const(rec);
|
||||
if (!page_rec_is_supremum(next_rec)) {
|
||||
offsets = rec_get_offsets(next_rec, index, offsets, true,
|
||||
offsets = rec_get_offsets(next_rec, index, offsets,
|
||||
index->n_core_fields,
|
||||
dtuple_get_n_fields(tuple), &heap);
|
||||
|
||||
if (cmp_dtuple_rec_with_match_bytes(
|
||||
|
@ -321,14 +323,14 @@ page_cur_search_with_match(
|
|||
#endif /* UNIV_ZIP_DEBUG */
|
||||
|
||||
ut_d(page_check_dir(page));
|
||||
const bool is_leaf = page_is_leaf(page);
|
||||
const ulint n_core = page_is_leaf(page) ? index->n_core_fields : 0;
|
||||
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
if (is_leaf
|
||||
if (n_core
|
||||
&& page_get_direction(page) == PAGE_RIGHT
|
||||
&& page_header_get_offs(page, PAGE_LAST_INSERT)
|
||||
&& mode == PAGE_CUR_LE
|
||||
&& !dict_index_is_spatial(index)
|
||||
&& !index->is_spatial()
|
||||
&& page_header_get_field(page, PAGE_N_DIRECTION) > 3
|
||||
&& page_cur_try_search_shortcut(
|
||||
block, index, tuple,
|
||||
|
@ -344,10 +346,10 @@ page_cur_search_with_match(
|
|||
|
||||
/* If the mode is for R-tree indexes, use the special MBR
|
||||
related compare functions */
|
||||
if (dict_index_is_spatial(index) && mode > PAGE_CUR_LE) {
|
||||
if (index->is_spatial() && mode > PAGE_CUR_LE) {
|
||||
/* For leaf level insert, we still use the traditional
|
||||
compare function for now */
|
||||
if (mode == PAGE_CUR_RTREE_INSERT && is_leaf) {
|
||||
if (mode == PAGE_CUR_RTREE_INSERT && n_core) {
|
||||
mode = PAGE_CUR_LE;
|
||||
} else {
|
||||
rtr_cur_search_with_match(
|
||||
|
@ -392,7 +394,7 @@ page_cur_search_with_match(
|
|||
|
||||
offsets = offsets_;
|
||||
offsets = rec_get_offsets(
|
||||
mid_rec, index, offsets, is_leaf,
|
||||
mid_rec, index, offsets, n_core,
|
||||
dtuple_get_n_fields_cmp(tuple), &heap);
|
||||
|
||||
cmp = cmp_dtuple_rec_with_match(
|
||||
|
@ -446,7 +448,7 @@ up_slot_match:
|
|||
|
||||
offsets = offsets_;
|
||||
offsets = rec_get_offsets(
|
||||
mid_rec, index, offsets, is_leaf,
|
||||
mid_rec, index, offsets, n_core,
|
||||
dtuple_get_n_fields_cmp(tuple), &heap);
|
||||
|
||||
cmp = cmp_dtuple_rec_with_match(
|
||||
|
@ -627,7 +629,7 @@ page_cur_search_with_match_bytes(
|
|||
|
||||
/* Perform binary search until the lower and upper limit directory
|
||||
slots come to the distance 1 of each other */
|
||||
const bool is_leaf = page_is_leaf(page);
|
||||
const ulint n_core = page_is_leaf(page) ? index->n_core_fields : 0;
|
||||
|
||||
while (up - low > 1) {
|
||||
mid = (low + up) / 2;
|
||||
|
@ -639,7 +641,7 @@ page_cur_search_with_match_bytes(
|
|||
up_matched_fields, up_matched_bytes);
|
||||
|
||||
offsets = rec_get_offsets(
|
||||
mid_rec, index, offsets_, is_leaf,
|
||||
mid_rec, index, offsets_, n_core,
|
||||
dtuple_get_n_fields_cmp(tuple), &heap);
|
||||
|
||||
cmp = cmp_dtuple_rec_with_match_bytes(
|
||||
|
@ -707,7 +709,7 @@ up_slot_match:
|
|||
}
|
||||
|
||||
offsets = rec_get_offsets(
|
||||
mid_rec, index, offsets_, is_leaf,
|
||||
mid_rec, index, offsets_, n_core,
|
||||
dtuple_get_n_fields_cmp(tuple), &heap);
|
||||
|
||||
cmp = cmp_dtuple_rec_with_match_bytes(
|
||||
|
@ -817,7 +819,8 @@ page_cur_insert_rec_write_log(
|
|||
ut_ad(!page_rec_is_comp(insert_rec)
|
||||
== !dict_table_is_comp(index->table));
|
||||
|
||||
const bool is_leaf = page_rec_is_leaf(cursor_rec);
|
||||
const ulint n_core = page_rec_is_leaf(cursor_rec)
|
||||
? index->n_core_fields : 0;
|
||||
|
||||
{
|
||||
mem_heap_t* heap = NULL;
|
||||
|
@ -831,9 +834,9 @@ page_cur_insert_rec_write_log(
|
|||
rec_offs_init(ins_offs_);
|
||||
|
||||
cur_offs = rec_get_offsets(cursor_rec, index, cur_offs_,
|
||||
is_leaf, ULINT_UNDEFINED, &heap);
|
||||
n_core, ULINT_UNDEFINED, &heap);
|
||||
ins_offs = rec_get_offsets(insert_rec, index, ins_offs_,
|
||||
is_leaf, ULINT_UNDEFINED, &heap);
|
||||
n_core, ULINT_UNDEFINED, &heap);
|
||||
|
||||
extra_size = rec_offs_extra_size(ins_offs);
|
||||
cur_extra_size = rec_offs_extra_size(cur_offs);
|
||||
|
@ -1091,9 +1094,9 @@ page_cur_parse_insert_rec(
|
|||
/* Read from the log the inserted index record end segment which
|
||||
differs from the cursor record */
|
||||
|
||||
const bool is_leaf = page_is_leaf(page);
|
||||
const ulint n_core = page_is_leaf(page) ? index->n_core_fields : 0;
|
||||
|
||||
offsets = rec_get_offsets(cursor_rec, index, offsets, is_leaf,
|
||||
offsets = rec_get_offsets(cursor_rec, index, offsets, n_core,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
if (!(end_seg_len & 0x1UL)) {
|
||||
|
@ -1142,7 +1145,7 @@ page_cur_parse_insert_rec(
|
|||
page_cur_position(cursor_rec, block, &cursor);
|
||||
|
||||
offsets = rec_get_offsets(buf + origin_offset, index, offsets,
|
||||
is_leaf, ULINT_UNDEFINED, &heap);
|
||||
n_core, ULINT_UNDEFINED, &heap);
|
||||
if (UNIV_UNLIKELY(!page_cur_rec_insert(&cursor,
|
||||
buf + origin_offset,
|
||||
index, offsets, mtr))) {
|
||||
|
@ -1323,7 +1326,8 @@ page_cur_insert_rec_low(
|
|||
rec_offs_init(foffsets_);
|
||||
|
||||
foffsets = rec_get_offsets(
|
||||
free_rec, index, foffsets, page_is_leaf(page),
|
||||
free_rec, index, foffsets,
|
||||
page_is_leaf(page) ? index->n_core_fields : 0,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
if (rec_offs_size(foffsets) < rec_size) {
|
||||
if (UNIV_LIKELY_NULL(heap)) {
|
||||
|
@ -1736,7 +1740,8 @@ page_cur_insert_rec_zip(
|
|||
rec_offs_init(foffsets_);
|
||||
|
||||
foffsets = rec_get_offsets(free_rec, index, foffsets,
|
||||
page_rec_is_leaf(free_rec),
|
||||
page_rec_is_leaf(free_rec)
|
||||
? index->n_core_fields : 0,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
if (rec_offs_size(foffsets) < rec_size) {
|
||||
too_small:
|
||||
|
@ -2097,10 +2102,11 @@ page_copy_rec_list_end_to_created_page(
|
|||
slot_index = 0;
|
||||
n_recs = 0;
|
||||
|
||||
const bool is_leaf = page_is_leaf(new_page);
|
||||
const ulint n_core = page_is_leaf(new_page)
|
||||
? index->n_core_fields : 0;
|
||||
|
||||
do {
|
||||
offsets = rec_get_offsets(rec, index, offsets, is_leaf,
|
||||
offsets = rec_get_offsets(rec, index, offsets, n_core,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
insert_rec = rec_copy(heap_top, rec, offsets);
|
||||
|
||||
|
@ -2142,7 +2148,7 @@ page_copy_rec_list_end_to_created_page(
|
|||
|
||||
heap_top += rec_size;
|
||||
|
||||
rec_offs_make_valid(insert_rec, index, is_leaf, offsets);
|
||||
rec_offs_make_valid(insert_rec, index, n_core != 0, offsets);
|
||||
page_cur_insert_rec_write_log(insert_rec, rec_size, prev_rec,
|
||||
index, mtr);
|
||||
prev_rec = insert_rec;
|
||||
|
@ -2279,7 +2285,8 @@ page_cur_parse_delete_rec(
|
|||
|
||||
page_cur_delete_rec(&cursor, index,
|
||||
rec_get_offsets(rec, index, offsets_,
|
||||
page_rec_is_leaf(rec),
|
||||
page_rec_is_leaf(rec)
|
||||
? index->n_core_fields : 0,
|
||||
ULINT_UNDEFINED, &heap),
|
||||
mtr);
|
||||
if (UNIV_LIKELY_NULL(heap)) {
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2012, Facebook Inc.
|
||||
Copyright (c) 2017, 2020, MariaDB Corporation.
|
||||
Copyright (c) 2017, 2021, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
|
@ -540,7 +540,8 @@ page_copy_rec_list_end_no_locks(
|
|||
ut_a(page_is_comp(new_page) == page_rec_is_comp(rec));
|
||||
ut_a(mach_read_from_2(new_page + srv_page_size - 10) == (ulint)
|
||||
(page_is_comp(new_page) ? PAGE_NEW_INFIMUM : PAGE_OLD_INFIMUM));
|
||||
const bool is_leaf = page_is_leaf(block->frame);
|
||||
const ulint n_core = page_is_leaf(block->frame)
|
||||
? index->n_core_fields : 0;
|
||||
|
||||
cur2 = page_get_infimum_rec(buf_block_get_frame(new_block));
|
||||
|
||||
|
@ -548,7 +549,7 @@ page_copy_rec_list_end_no_locks(
|
|||
|
||||
while (!page_cur_is_after_last(&cur1)) {
|
||||
rec_t* ins_rec;
|
||||
offsets = rec_get_offsets(cur1.rec, index, offsets, is_leaf,
|
||||
offsets = rec_get_offsets(cur1.rec, index, offsets, n_core,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
ins_rec = page_cur_insert_rec_low(cur2, index,
|
||||
cur1.rec, offsets, mtr);
|
||||
|
@ -777,7 +778,7 @@ page_copy_rec_list_start(
|
|||
|
||||
cur2 = ret;
|
||||
|
||||
const bool is_leaf = page_rec_is_leaf(rec);
|
||||
const ulint n_core = page_rec_is_leaf(rec) ? index->n_core_fields : 0;
|
||||
|
||||
/* Copy records from the original page to the new page */
|
||||
if (index->is_spatial()) {
|
||||
|
@ -799,7 +800,7 @@ page_copy_rec_list_start(
|
|||
} else {
|
||||
while (page_cur_get_rec(&cur1) != rec) {
|
||||
offsets = rec_get_offsets(cur1.rec, index, offsets,
|
||||
is_leaf,
|
||||
n_core,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
cur2 = page_cur_insert_rec_low(cur2, index,
|
||||
cur1.rec, offsets, mtr);
|
||||
|
@ -819,7 +820,7 @@ page_copy_rec_list_start(
|
|||
same temp-table in parallel.
|
||||
max_trx_id is ignored for temp tables because it not required
|
||||
for MVCC. */
|
||||
if (is_leaf && dict_index_is_sec_or_ibuf(index)
|
||||
if (n_core && dict_index_is_sec_or_ibuf(index)
|
||||
&& !index->table->is_temporary()) {
|
||||
page_update_max_trx_id(new_block, NULL,
|
||||
page_get_max_trx_id(page_align(rec)),
|
||||
|
@ -1050,7 +1051,7 @@ delete_all:
|
|||
? MLOG_COMP_LIST_END_DELETE
|
||||
: MLOG_LIST_END_DELETE, mtr);
|
||||
|
||||
const bool is_leaf = page_is_leaf(page);
|
||||
const ulint n_core = page_is_leaf(page) ? index->n_core_fields : 0;
|
||||
|
||||
if (page_zip) {
|
||||
mtr_log_t log_mode;
|
||||
|
@ -1064,7 +1065,7 @@ delete_all:
|
|||
page_cur_t cur;
|
||||
page_cur_position(rec, block, &cur);
|
||||
|
||||
offsets = rec_get_offsets(rec, index, offsets, is_leaf,
|
||||
offsets = rec_get_offsets(rec, index, offsets, n_core,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
rec = rec_get_next_ptr(rec, TRUE);
|
||||
#ifdef UNIV_ZIP_DEBUG
|
||||
|
@ -1097,8 +1098,7 @@ delete_all:
|
|||
|
||||
do {
|
||||
ulint s;
|
||||
offsets = rec_get_offsets(rec2, index, offsets,
|
||||
is_leaf,
|
||||
offsets = rec_get_offsets(rec2, index, offsets, n_core,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
s = rec_offs_size(offsets);
|
||||
ut_ad(ulint(rec2 - page) + s
|
||||
|
@ -1244,11 +1244,12 @@ page_delete_rec_list_start(
|
|||
/* Individual deletes are not logged */
|
||||
|
||||
mtr_log_t log_mode = mtr_set_log_mode(mtr, MTR_LOG_NONE);
|
||||
const bool is_leaf = page_rec_is_leaf(rec);
|
||||
const ulint n_core = page_rec_is_leaf(rec)
|
||||
? index->n_core_fields : 0;
|
||||
|
||||
while (page_cur_get_rec(&cur1) != rec) {
|
||||
offsets = rec_get_offsets(page_cur_get_rec(&cur1), index,
|
||||
offsets, is_leaf,
|
||||
offsets, n_core,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
page_cur_delete_rec(&cur1, index, offsets, mtr);
|
||||
}
|
||||
|
@ -2461,9 +2462,10 @@ wrong_page_type:
|
|||
|
||||
rec = page_get_infimum_rec(page);
|
||||
|
||||
const ulint n_core = page_is_leaf(page) ? index->n_core_fields : 0;
|
||||
|
||||
for (;;) {
|
||||
offsets = rec_get_offsets(rec, index, offsets,
|
||||
page_is_leaf(page),
|
||||
offsets = rec_get_offsets(rec, index, offsets, n_core,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
if (page_is_comp(page) && page_rec_is_user_rec(rec)
|
||||
|
@ -2709,8 +2711,7 @@ n_owned_zero:
|
|||
rec = page_header_get_ptr(page, PAGE_FREE);
|
||||
|
||||
while (rec != NULL) {
|
||||
offsets = rec_get_offsets(rec, index, offsets,
|
||||
page_is_leaf(page),
|
||||
offsets = rec_get_offsets(rec, index, offsets, n_core,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
if (UNIV_UNLIKELY(!page_rec_validate(rec, offsets))) {
|
||||
ret = FALSE;
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
Copyright (c) 2005, 2016, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2012, Facebook Inc.
|
||||
Copyright (c) 2014, 2020, MariaDB Corporation.
|
||||
Copyright (c) 2014, 2021, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
|
@ -877,7 +877,7 @@ page_zip_compress_node_ptrs(
|
|||
do {
|
||||
const rec_t* rec = *recs++;
|
||||
|
||||
offsets = rec_get_offsets(rec, index, offsets, false,
|
||||
offsets = rec_get_offsets(rec, index, offsets, 0,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
/* Only leaf nodes may contain externally stored columns. */
|
||||
ut_ad(!rec_offs_any_extern(offsets));
|
||||
|
@ -1126,7 +1126,7 @@ page_zip_compress_clust(
|
|||
do {
|
||||
const rec_t* rec = *recs++;
|
||||
|
||||
offsets = rec_get_offsets(rec, index, offsets, true,
|
||||
offsets = rec_get_offsets(rec, index, offsets, index->n_fields,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
ut_ad(rec_offs_n_fields(offsets)
|
||||
== dict_index_get_n_fields(index));
|
||||
|
@ -2005,7 +2005,7 @@ page_zip_apply_log(
|
|||
sorted by address (indexed by
|
||||
heap_no - PAGE_HEAP_NO_USER_LOW) */
|
||||
ulint n_dense,/*!< in: size of recs[] */
|
||||
bool is_leaf,/*!< in: whether this is a leaf page */
|
||||
ulint n_core, /*!< in: index->n_fields, or 0 for non-leaf */
|
||||
ulint trx_id_col,/*!< in: column number of trx_id in the index,
|
||||
or ULINT_UNDEFINED if none */
|
||||
ulint heap_status,
|
||||
|
@ -2081,7 +2081,7 @@ page_zip_apply_log(
|
|||
/* Clear the data bytes of the record. */
|
||||
mem_heap_t* heap = NULL;
|
||||
rec_offs* offs;
|
||||
offs = rec_get_offsets(rec, index, offsets, is_leaf,
|
||||
offs = rec_get_offsets(rec, index, offsets, n_core,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
memset(rec, 0, rec_offs_data_size(offs));
|
||||
|
||||
|
@ -2099,7 +2099,7 @@ page_zip_apply_log(
|
|||
This will be overwritten in page_zip_set_extra_bytes(),
|
||||
called by page_zip_decompress_low(). */
|
||||
ut_d(rec[-REC_NEW_INFO_BITS] = 0);
|
||||
rec_offs_make_valid(rec, index, is_leaf, offsets);
|
||||
rec_offs_make_valid(rec, index, n_core != 0, offsets);
|
||||
|
||||
/* Copy the extra bytes (backwards). */
|
||||
{
|
||||
|
@ -2279,7 +2279,7 @@ page_zip_decompress_node_ptrs(
|
|||
}
|
||||
|
||||
/* Read the offsets. The status bits are needed here. */
|
||||
offsets = rec_get_offsets(rec, index, offsets, false,
|
||||
offsets = rec_get_offsets(rec, index, offsets, 0,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
/* Non-leaf nodes should not have any externally
|
||||
|
@ -2366,7 +2366,7 @@ zlib_done:
|
|||
const byte* mod_log_ptr;
|
||||
mod_log_ptr = page_zip_apply_log(d_stream->next_in,
|
||||
d_stream->avail_in + 1,
|
||||
recs, n_dense, false,
|
||||
recs, n_dense, 0,
|
||||
ULINT_UNDEFINED, heap_status,
|
||||
index, offsets);
|
||||
|
||||
|
@ -2397,7 +2397,7 @@ zlib_done:
|
|||
for (slot = 0; slot < n_dense; slot++) {
|
||||
rec_t* rec = recs[slot];
|
||||
|
||||
offsets = rec_get_offsets(rec, index, offsets, false,
|
||||
offsets = rec_get_offsets(rec, index, offsets, 0,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
/* Non-leaf nodes should not have any externally
|
||||
stored columns. */
|
||||
|
@ -2519,7 +2519,8 @@ zlib_done:
|
|||
const byte* mod_log_ptr;
|
||||
mod_log_ptr = page_zip_apply_log(d_stream->next_in,
|
||||
d_stream->avail_in + 1,
|
||||
recs, n_dense, true,
|
||||
recs, n_dense,
|
||||
index->n_fields,
|
||||
ULINT_UNDEFINED, heap_status,
|
||||
index, offsets);
|
||||
|
||||
|
@ -2722,7 +2723,7 @@ page_zip_decompress_clust(
|
|||
}
|
||||
|
||||
/* Read the offsets. The status bits are needed here. */
|
||||
offsets = rec_get_offsets(rec, index, offsets, true,
|
||||
offsets = rec_get_offsets(rec, index, offsets, index->n_fields,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
/* This is a leaf page in a clustered index. */
|
||||
|
@ -2849,7 +2850,8 @@ zlib_done:
|
|||
const byte* mod_log_ptr;
|
||||
mod_log_ptr = page_zip_apply_log(d_stream->next_in,
|
||||
d_stream->avail_in + 1,
|
||||
recs, n_dense, true,
|
||||
recs, n_dense,
|
||||
index->n_fields,
|
||||
trx_id_col, heap_status,
|
||||
index, offsets);
|
||||
|
||||
|
@ -2885,7 +2887,7 @@ zlib_done:
|
|||
rec_t* rec = recs[slot];
|
||||
bool exists = !page_zip_dir_find_free(
|
||||
page_zip, page_offset(rec));
|
||||
offsets = rec_get_offsets(rec, index, offsets, true,
|
||||
offsets = rec_get_offsets(rec, index, offsets, index->n_fields,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
dst = rec_get_nth_field(rec, offsets,
|
||||
|
@ -3409,7 +3411,7 @@ page_zip_validate_low(
|
|||
page + PAGE_NEW_INFIMUM, TRUE);
|
||||
trec = page_rec_get_next_low(
|
||||
temp_page + PAGE_NEW_INFIMUM, TRUE);
|
||||
const bool is_leaf = page_is_leaf(page);
|
||||
const ulint n_core = page_is_leaf(page) ? index->n_fields : 0;
|
||||
|
||||
do {
|
||||
if (page_offset(rec) != page_offset(trec)) {
|
||||
|
@ -3424,7 +3426,7 @@ page_zip_validate_low(
|
|||
if (index) {
|
||||
/* Compare the data. */
|
||||
offsets = rec_get_offsets(
|
||||
rec, index, offsets, is_leaf,
|
||||
rec, index, offsets, n_core,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
if (memcmp(rec - rec_offs_extra_size(offsets),
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2017, 2020, MariaDB Corporation.
|
||||
Copyright (c) 2017, 2021, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
|
@ -273,9 +273,9 @@ rec_init_offsets_comp_ordinary(
|
|||
ulint n_fields = n_core;
|
||||
ulint null_mask = 1;
|
||||
|
||||
ut_ad(index->n_core_fields >= n_core);
|
||||
ut_ad(n_core > 0);
|
||||
ut_ad(index->n_fields >= n_core);
|
||||
ut_ad(index->n_core_fields >= n_core);
|
||||
ut_ad(index->n_fields >= index->n_core_fields);
|
||||
ut_ad(index->n_core_null_bytes <= UT_BITS_IN_BYTES(index->n_nullable));
|
||||
ut_ad(format == REC_LEAF_TEMP || format == REC_LEAF_TEMP_INSTANT
|
||||
|| dict_table_is_comp(index->table));
|
||||
|
@ -283,6 +283,11 @@ rec_init_offsets_comp_ordinary(
|
|||
|| index->n_fields == rec_offs_n_fields(offsets));
|
||||
ut_d(ulint n_null= 0);
|
||||
|
||||
const unsigned n_core_null_bytes = UNIV_UNLIKELY(index->n_core_fields
|
||||
!= n_core)
|
||||
? UT_BITS_IN_BYTES(unsigned(index->get_n_nullable(n_core)))
|
||||
: index->n_core_null_bytes;
|
||||
|
||||
if (mblob) {
|
||||
ut_ad(index->is_dummy || index->table->instant);
|
||||
ut_ad(index->is_dummy || index->is_instant());
|
||||
|
@ -297,7 +302,7 @@ rec_init_offsets_comp_ordinary(
|
|||
const ulint n_null_bytes = UT_BITS_IN_BYTES(n_nullable);
|
||||
ut_d(n_null = n_nullable);
|
||||
ut_ad(n_null <= index->n_nullable);
|
||||
ut_ad(n_null_bytes >= index->n_core_null_bytes
|
||||
ut_ad(n_null_bytes >= n_core_null_bytes
|
||||
|| n_core < index->n_core_fields);
|
||||
lens = --nulls - n_null_bytes;
|
||||
goto start;
|
||||
|
@ -314,10 +319,10 @@ rec_init_offsets_comp_ordinary(
|
|||
case REC_LEAF_ORDINARY:
|
||||
nulls -= REC_N_NEW_EXTRA_BYTES;
|
||||
ordinary:
|
||||
lens = --nulls - index->n_core_null_bytes;
|
||||
lens = --nulls - n_core_null_bytes;
|
||||
|
||||
ut_d(n_null = std::min<uint>(index->n_core_null_bytes * 8U,
|
||||
index->n_nullable));
|
||||
ut_d(n_null = std::min(n_core_null_bytes * 8U,
|
||||
index->n_nullable));
|
||||
break;
|
||||
case REC_LEAF_INSTANT:
|
||||
nulls -= REC_N_NEW_EXTRA_BYTES;
|
||||
|
@ -330,7 +335,7 @@ ordinary:
|
|||
const ulint n_null_bytes = UT_BITS_IN_BYTES(n_nullable);
|
||||
ut_d(n_null = n_nullable);
|
||||
ut_ad(n_null <= index->n_nullable);
|
||||
ut_ad(n_null_bytes >= index->n_core_null_bytes
|
||||
ut_ad(n_null_bytes >= n_core_null_bytes
|
||||
|| n_core < index->n_core_fields);
|
||||
lens = --nulls - n_null_bytes;
|
||||
}
|
||||
|
@ -584,14 +589,14 @@ is (SQL_NULL), the field i is NULL. When the type of the offset at [i+1]
|
|||
is (STORED_OFFPAGE), the field i is stored externally.
|
||||
@param[in] rec record
|
||||
@param[in] index the index that the record belongs in
|
||||
@param[in] leaf whether the record resides in a leaf page
|
||||
@param[in] n_core 0, or index->n_core_fields for leaf page
|
||||
@param[in,out] offsets array of offsets, with valid rec_offs_n_fields() */
|
||||
static
|
||||
void
|
||||
rec_init_offsets(
|
||||
const rec_t* rec,
|
||||
const dict_index_t* index,
|
||||
bool leaf,
|
||||
ulint n_core,
|
||||
rec_offs* offsets)
|
||||
{
|
||||
ulint i = 0;
|
||||
|
@ -606,6 +611,8 @@ rec_init_offsets(
|
|||
|| index->in_instant_init);
|
||||
ut_d(memcpy(&offsets[RECORD_OFFSET], &rec, sizeof(rec)));
|
||||
ut_d(memcpy(&offsets[INDEX_OFFSET], &index, sizeof(index)));
|
||||
ut_ad(index->n_fields >= n_core);
|
||||
ut_ad(index->n_core_fields >= n_core);
|
||||
|
||||
if (dict_table_is_comp(index->table)) {
|
||||
const byte* nulls;
|
||||
|
@ -624,23 +631,21 @@ rec_init_offsets(
|
|||
rec_offs_base(offsets)[1] = 8;
|
||||
return;
|
||||
case REC_STATUS_NODE_PTR:
|
||||
ut_ad(!leaf);
|
||||
ut_ad(!n_core);
|
||||
n_node_ptr_field
|
||||
= dict_index_get_n_unique_in_tree_nonleaf(
|
||||
index);
|
||||
break;
|
||||
case REC_STATUS_INSTANT:
|
||||
ut_ad(leaf);
|
||||
ut_ad(index->is_instant());
|
||||
rec_init_offsets_comp_ordinary(rec, index, offsets,
|
||||
index->n_core_fields,
|
||||
n_core,
|
||||
NULL,
|
||||
REC_LEAF_INSTANT);
|
||||
return;
|
||||
case REC_STATUS_ORDINARY:
|
||||
ut_ad(leaf);
|
||||
rec_init_offsets_comp_ordinary(rec, index, offsets,
|
||||
index->n_core_fields,
|
||||
n_core,
|
||||
NULL,
|
||||
REC_LEAF_ORDINARY);
|
||||
return;
|
||||
|
@ -797,7 +802,7 @@ resolved:
|
|||
@param[in] index the index that the record belongs to
|
||||
@param[in,out] offsets array comprising offsets[0] allocated elements,
|
||||
or an array from rec_get_offsets(), or NULL
|
||||
@param[in] leaf whether this is a leaf-page record
|
||||
@param[in] n_core 0, or index->n_core_fields for leaf page
|
||||
@param[in] n_fields maximum number of offsets to compute
|
||||
(ULINT_UNDEFINED to compute all offsets)
|
||||
@param[in,out] heap memory heap
|
||||
|
@ -807,7 +812,7 @@ rec_get_offsets_func(
|
|||
const rec_t* rec,
|
||||
const dict_index_t* index,
|
||||
rec_offs* offsets,
|
||||
bool leaf,
|
||||
ulint n_core,
|
||||
ulint n_fields,
|
||||
#ifdef UNIV_DEBUG
|
||||
const char* file, /*!< in: file name where called */
|
||||
|
@ -819,6 +824,15 @@ rec_get_offsets_func(
|
|||
ulint size;
|
||||
bool alter_metadata = false;
|
||||
|
||||
ut_ad(index->n_core_fields >= n_core);
|
||||
/* This assertion was relaxed for the btr_cur_open_at_index_side()
|
||||
call in btr_cur_instant_init_low(). We cannot invoke
|
||||
index->is_instant(), because the same assertion would fail there
|
||||
until btr_cur_instant_init_low() has invoked
|
||||
dict_table_t::deserialise_columns(). */
|
||||
ut_ad(index->n_fields >= index->n_core_fields
|
||||
|| index->in_instant_init);
|
||||
|
||||
if (dict_table_is_comp(index->table)) {
|
||||
switch (UNIV_EXPECT(rec_get_status(rec),
|
||||
REC_STATUS_ORDINARY)) {
|
||||
|
@ -826,14 +840,14 @@ rec_get_offsets_func(
|
|||
alter_metadata = rec_is_alter_metadata(rec, true);
|
||||
/* fall through */
|
||||
case REC_STATUS_ORDINARY:
|
||||
ut_ad(leaf);
|
||||
ut_ad(n_core);
|
||||
n = dict_index_get_n_fields(index) + alter_metadata;
|
||||
break;
|
||||
case REC_STATUS_NODE_PTR:
|
||||
/* Node pointer records consist of the
|
||||
uniquely identifying fields of the record
|
||||
followed by a child page number field. */
|
||||
ut_ad(!leaf);
|
||||
ut_ad(!n_core);
|
||||
n = dict_index_get_n_unique_in_tree_nonleaf(index) + 1;
|
||||
break;
|
||||
case REC_STATUS_INFIMUM:
|
||||
|
@ -862,19 +876,19 @@ rec_get_offsets_func(
|
|||
>= PAGE_HEAP_NO_USER_LOW;
|
||||
/* The infimum and supremum records carry 1 field. */
|
||||
ut_ad(is_user_rec || n == 1);
|
||||
ut_ad(!is_user_rec || leaf || index->is_dummy
|
||||
ut_ad(!is_user_rec || n_core || index->is_dummy
|
||||
|| dict_index_is_ibuf(index)
|
||||
|| n == n_fields /* dict_stats_analyze_index_level() */
|
||||
|| n
|
||||
== dict_index_get_n_unique_in_tree_nonleaf(index) + 1);
|
||||
ut_ad(!is_user_rec || !leaf || index->is_dummy
|
||||
ut_ad(!is_user_rec || !n_core || index->is_dummy
|
||||
|| dict_index_is_ibuf(index)
|
||||
|| n == n_fields /* btr_pcur_restore_position() */
|
||||
|| (n + (index->id == DICT_INDEXES_ID)
|
||||
>= index->n_core_fields && n <= index->n_fields
|
||||
>= n_core && n <= index->n_fields
|
||||
+ unsigned(rec_is_alter_metadata(rec, false))));
|
||||
|
||||
if (is_user_rec && leaf && n < index->n_fields) {
|
||||
if (is_user_rec && n_core && n < index->n_fields) {
|
||||
ut_ad(!index->is_dummy);
|
||||
ut_ad(!dict_index_is_ibuf(index));
|
||||
n = index->n_fields;
|
||||
|
@ -908,17 +922,17 @@ rec_get_offsets_func(
|
|||
memcpy(&offsets[RECORD_OFFSET], &rec, sizeof rec);
|
||||
memcpy(&offsets[INDEX_OFFSET], &index, sizeof index);
|
||||
#endif /* UNIV_DEBUG */
|
||||
ut_ad(leaf);
|
||||
ut_ad(n_core);
|
||||
ut_ad(index->is_dummy || index->table->instant);
|
||||
ut_ad(index->is_dummy || index->is_instant());
|
||||
ut_ad(rec_offs_n_fields(offsets)
|
||||
<= ulint(index->n_fields) + 1);
|
||||
rec_init_offsets_comp_ordinary<true>(rec, index, offsets,
|
||||
index->n_core_fields,
|
||||
NULL,
|
||||
nullptr,
|
||||
REC_LEAF_INSTANT);
|
||||
} else {
|
||||
rec_init_offsets(rec, index, leaf, offsets);
|
||||
rec_init_offsets(rec, index, n_core, offsets);
|
||||
}
|
||||
return offsets;
|
||||
}
|
||||
|
@ -1876,7 +1890,9 @@ template void rec_convert_dtuple_to_temp<true>(
|
|||
The fields are copied into the memory heap.
|
||||
@param[out] tuple data tuple
|
||||
@param[in] rec index record, or a copy thereof
|
||||
@param[in] is_leaf whether rec is a leaf page record
|
||||
@param[in] index index of rec
|
||||
@param[in] n_core index->n_core_fields at the time rec was
|
||||
copied, or 0 if non-leaf page record
|
||||
@param[in] n_fields number of fields to copy
|
||||
@param[in,out] heap memory heap */
|
||||
void
|
||||
|
@ -1884,7 +1900,7 @@ rec_copy_prefix_to_dtuple(
|
|||
dtuple_t* tuple,
|
||||
const rec_t* rec,
|
||||
const dict_index_t* index,
|
||||
bool is_leaf,
|
||||
ulint n_core,
|
||||
ulint n_fields,
|
||||
mem_heap_t* heap)
|
||||
{
|
||||
|
@ -1892,10 +1908,11 @@ rec_copy_prefix_to_dtuple(
|
|||
rec_offs* offsets = offsets_;
|
||||
rec_offs_init(offsets_);
|
||||
|
||||
ut_ad(is_leaf || n_fields
|
||||
ut_ad(n_core <= index->n_core_fields);
|
||||
ut_ad(n_core || n_fields
|
||||
<= dict_index_get_n_unique_in_tree_nonleaf(index) + 1);
|
||||
|
||||
offsets = rec_get_offsets(rec, index, offsets, is_leaf,
|
||||
offsets = rec_get_offsets(rec, index, offsets, n_core,
|
||||
n_fields, &heap);
|
||||
|
||||
ut_ad(rec_validate(rec, offsets));
|
||||
|
@ -2535,7 +2552,8 @@ rec_print(
|
|||
|
||||
rec_print_new(file, rec,
|
||||
rec_get_offsets(rec, index, offsets_,
|
||||
page_rec_is_leaf(rec),
|
||||
page_rec_is_leaf(rec)
|
||||
? index->n_core_fields : 0,
|
||||
ULINT_UNDEFINED, &heap));
|
||||
if (UNIV_LIKELY_NULL(heap)) {
|
||||
mem_heap_free(heap);
|
||||
|
@ -2611,7 +2629,8 @@ operator<<(std::ostream& o, const rec_index_print& r)
|
|||
{
|
||||
mem_heap_t* heap = NULL;
|
||||
rec_offs* offsets = rec_get_offsets(
|
||||
r.m_rec, r.m_index, NULL, page_rec_is_leaf(r.m_rec),
|
||||
r.m_rec, r.m_index, NULL, page_rec_is_leaf(r.m_rec)
|
||||
? r.m_index->n_core_fields : 0,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
rec_print(o, r.m_rec,
|
||||
rec_get_info_bits(r.m_rec, rec_offs_comp(offsets)),
|
||||
|
@ -2650,7 +2669,7 @@ rec_get_trx_id(
|
|||
rec_offs_init(offsets_);
|
||||
rec_offs* offsets = offsets_;
|
||||
|
||||
offsets = rec_get_offsets(rec, index, offsets, true,
|
||||
offsets = rec_get_offsets(rec, index, offsets, index->n_core_fields,
|
||||
index->db_trx_id() + 1, &heap);
|
||||
|
||||
trx_id = rec_get_nth_field(rec, offsets, index->db_trx_id(), &len);
|
||||
|
@ -2701,7 +2720,8 @@ wsrep_rec_get_foreign_key(
|
|||
ut_ad(index_ref);
|
||||
|
||||
rec_offs_init(offsets_);
|
||||
offsets = rec_get_offsets(rec, index_for, offsets_, true,
|
||||
offsets = rec_get_offsets(rec, index_for, offsets_,
|
||||
index_for->n_core_fields,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
ut_ad(rec_offs_validate(rec, NULL, offsets));
|
||||
|
|
|
@ -925,7 +925,7 @@ loop:
|
|||
<< " records, the sort queue has "
|
||||
<< UT_LIST_GET_LEN(psort_info->fts_doc_list)
|
||||
<< " records. But sort cannot get the next"
|
||||
" records";
|
||||
" records during alter table " << table->name;
|
||||
goto exit;
|
||||
}
|
||||
} else if (psort_info->state == FTS_PARENT_EXITING) {
|
||||
|
@ -1221,7 +1221,9 @@ row_merge_write_fts_word(
|
|||
|
||||
if (UNIV_UNLIKELY(error != DB_SUCCESS)) {
|
||||
ib::error() << "Failed to write word to FTS auxiliary"
|
||||
" index table, error " << error;
|
||||
" index table "
|
||||
<< ins_ctx->btr_bulk->table_name()
|
||||
<< ", error " << error;
|
||||
ret = error;
|
||||
}
|
||||
|
||||
|
|
|
@ -1818,7 +1818,8 @@ PageConverter::update_records(
|
|||
|
||||
if (deleted || clust_index) {
|
||||
m_offsets = rec_get_offsets(
|
||||
rec, m_index->m_srv_index, m_offsets, true,
|
||||
rec, m_index->m_srv_index, m_offsets,
|
||||
m_index->m_srv_index->n_core_fields,
|
||||
ULINT_UNDEFINED, &m_heap);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2016, 2020, MariaDB Corporation.
|
||||
Copyright (c) 2016, 2021, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
|
@ -883,7 +883,7 @@ row_ins_foreign_fill_virtual(
|
|||
rec_offs offsets_[REC_OFFS_NORMAL_SIZE];
|
||||
rec_offs_init(offsets_);
|
||||
const rec_offs* offsets =
|
||||
rec_get_offsets(rec, index, offsets_, true,
|
||||
rec_get_offsets(rec, index, offsets_, index->n_core_fields,
|
||||
ULINT_UNDEFINED, &cascade->heap);
|
||||
TABLE* mysql_table= NULL;
|
||||
upd_t* update = cascade->update;
|
||||
|
@ -1197,7 +1197,8 @@ row_ins_foreign_check_on_constraint(
|
|||
if (table->fts) {
|
||||
doc_id = fts_get_doc_id_from_rec(
|
||||
clust_rec, clust_index,
|
||||
rec_get_offsets(clust_rec, clust_index, NULL, true,
|
||||
rec_get_offsets(clust_rec, clust_index, NULL,
|
||||
clust_index->n_core_fields,
|
||||
ULINT_UNDEFINED, &tmp_heap));
|
||||
}
|
||||
|
||||
|
@ -1639,7 +1640,8 @@ row_ins_check_foreign_constraint(
|
|||
continue;
|
||||
}
|
||||
|
||||
offsets = rec_get_offsets(rec, check_index, offsets, true,
|
||||
offsets = rec_get_offsets(rec, check_index, offsets,
|
||||
check_index->n_core_fields,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
if (page_rec_is_supremum(rec)) {
|
||||
|
@ -2127,7 +2129,8 @@ row_ins_scan_sec_index_for_duplicate(
|
|||
continue;
|
||||
}
|
||||
|
||||
offsets = rec_get_offsets(rec, index, offsets, true,
|
||||
offsets = rec_get_offsets(rec, index, offsets,
|
||||
index->n_core_fields,
|
||||
ULINT_UNDEFINED, &offsets_heap);
|
||||
|
||||
if (flags & BTR_NO_LOCKING_FLAG) {
|
||||
|
@ -2264,7 +2267,8 @@ row_ins_duplicate_error_in_clust_online(
|
|||
ut_ad(!cursor->index->is_instant());
|
||||
|
||||
if (cursor->low_match >= n_uniq && !page_rec_is_infimum(rec)) {
|
||||
*offsets = rec_get_offsets(rec, cursor->index, *offsets, true,
|
||||
*offsets = rec_get_offsets(rec, cursor->index, *offsets,
|
||||
cursor->index->n_fields,
|
||||
ULINT_UNDEFINED, heap);
|
||||
err = row_ins_duplicate_online(n_uniq, entry, rec, *offsets);
|
||||
if (err != DB_SUCCESS) {
|
||||
|
@ -2275,7 +2279,8 @@ row_ins_duplicate_error_in_clust_online(
|
|||
rec = page_rec_get_next_const(btr_cur_get_rec(cursor));
|
||||
|
||||
if (cursor->up_match >= n_uniq && !page_rec_is_supremum(rec)) {
|
||||
*offsets = rec_get_offsets(rec, cursor->index, *offsets, true,
|
||||
*offsets = rec_get_offsets(rec, cursor->index, *offsets,
|
||||
cursor->index->n_fields,
|
||||
ULINT_UNDEFINED, heap);
|
||||
err = row_ins_duplicate_online(n_uniq, entry, rec, *offsets);
|
||||
}
|
||||
|
@ -2331,7 +2336,7 @@ row_ins_duplicate_error_in_clust(
|
|||
|
||||
if (!page_rec_is_infimum(rec)) {
|
||||
offsets = rec_get_offsets(rec, cursor->index, offsets,
|
||||
true,
|
||||
cursor->index->n_core_fields,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
/* We set a lock on the possible duplicate: this
|
||||
|
@ -2397,7 +2402,7 @@ duplicate:
|
|||
|
||||
if (!page_rec_is_supremum(rec)) {
|
||||
offsets = rec_get_offsets(rec, cursor->index, offsets,
|
||||
true,
|
||||
cursor->index->n_core_fields,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
if (trx->duplicates) {
|
||||
|
@ -2514,7 +2519,7 @@ row_ins_index_entry_big_rec(
|
|||
btr_pcur_open(index, entry, PAGE_CUR_LE, BTR_MODIFY_TREE,
|
||||
&pcur, &mtr);
|
||||
rec = btr_pcur_get_rec(&pcur);
|
||||
offsets = rec_get_offsets(rec, index, offsets, true,
|
||||
offsets = rec_get_offsets(rec, index, offsets, index->n_core_fields,
|
||||
ULINT_UNDEFINED, heap);
|
||||
|
||||
DEBUG_SYNC_C_IF_THD(thd, "before_row_ins_extern");
|
||||
|
@ -3070,7 +3075,8 @@ row_ins_sec_index_entry_low(
|
|||
prefix, we must convert the insert into a modify of an
|
||||
existing record */
|
||||
offsets = rec_get_offsets(
|
||||
btr_cur_get_rec(&cursor), index, offsets, true,
|
||||
btr_cur_get_rec(&cursor), index, offsets,
|
||||
index->n_core_fields,
|
||||
ULINT_UNDEFINED, &offsets_heap);
|
||||
|
||||
err = row_ins_sec_index_entry_by_modify(
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 2011, 2018, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2017, 2020, MariaDB Corporation.
|
||||
Copyright (c) 2017, 2021, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
|
@ -1259,7 +1259,8 @@ row_log_table_get_pk(
|
|||
|
||||
if (!offsets) {
|
||||
offsets = rec_get_offsets(
|
||||
rec, index, NULL, true,
|
||||
rec, index, nullptr,
|
||||
index->n_core_fields,
|
||||
index->db_trx_id() + 1, heap);
|
||||
}
|
||||
|
||||
|
@ -1309,7 +1310,8 @@ row_log_table_get_pk(
|
|||
}
|
||||
|
||||
if (!offsets) {
|
||||
offsets = rec_get_offsets(rec, index, NULL, true,
|
||||
offsets = rec_get_offsets(rec, index, nullptr,
|
||||
index->n_core_fields,
|
||||
ULINT_UNDEFINED, heap);
|
||||
}
|
||||
|
||||
|
@ -1986,7 +1988,8 @@ all_done:
|
|||
return(DB_SUCCESS);
|
||||
}
|
||||
|
||||
offsets = rec_get_offsets(btr_pcur_get_rec(&pcur), index, NULL, true,
|
||||
offsets = rec_get_offsets(btr_pcur_get_rec(&pcur), index, nullptr,
|
||||
index->n_core_fields,
|
||||
ULINT_UNDEFINED, &offsets_heap);
|
||||
#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG
|
||||
ut_a(!rec_offs_any_null_extern(btr_pcur_get_rec(&pcur), offsets));
|
||||
|
@ -2184,7 +2187,7 @@ func_exit_committed:
|
|||
|
||||
/* Prepare to update (or delete) the record. */
|
||||
rec_offs* cur_offsets = rec_get_offsets(
|
||||
btr_pcur_get_rec(&pcur), index, NULL, true,
|
||||
btr_pcur_get_rec(&pcur), index, nullptr, index->n_core_fields,
|
||||
ULINT_UNDEFINED, &offsets_heap);
|
||||
|
||||
if (!log->same_pk) {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue