From dc42b3c4d9546153e2f0049393e3771e21551679 Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Fri, 11 Jan 2019 01:44:07 +0100 Subject: [PATCH 01/37] Backport MDEV-17504 to 5.5 mysql_install_db.exe should not remove datadir, if it was not created by it. --- sql/CMakeLists.txt | 2 +- sql/mysql_install_db.cc | 72 +++++++++++++++++++++++++++++++++++++---- 2 files changed, 67 insertions(+), 7 deletions(-) diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt index 02196a7e366..6648b7a2612 100644 --- a/sql/CMakeLists.txt +++ b/sql/CMakeLists.txt @@ -351,7 +351,7 @@ IF(WIN32) ${CMAKE_CURRENT_BINARY_DIR}/mysql_bootstrap_sql.c COMPONENT Server ) - TARGET_LINK_LIBRARIES(mysql_install_db mysys) + TARGET_LINK_LIBRARIES(mysql_install_db mysys shlwapi) ADD_LIBRARY(winservice STATIC winservice.c) TARGET_LINK_LIBRARIES(winservice shell32) diff --git a/sql/mysql_install_db.cc b/sql/mysql_install_db.cc index 9c1a234241f..9d2b261b46c 100644 --- a/sql/mysql_install_db.cc +++ b/sql/mysql_install_db.cc @@ -28,6 +28,8 @@ #include #include #include +struct IUnknown; +#include #define USAGETEXT \ "mysql_install_db.exe Ver 1.00 for Windows\n" \ @@ -532,20 +534,78 @@ static int create_db_instance() DWORD cwd_len= MAX_PATH; char cmdline[3*MAX_PATH]; FILE *in; + bool cleanup_datadir= true; + DWORD last_error; verbose("Running bootstrap"); GetCurrentDirectory(cwd_len, cwd); - CreateDirectory(opt_datadir, NULL); /*ignore error, it might already exist */ + + /* Create datadir and datadir/mysql, if they do not already exist. */ + + if (!CreateDirectory(opt_datadir, NULL) && (GetLastError() != ERROR_ALREADY_EXISTS)) + { + last_error = GetLastError(); + switch(last_error) + { + case ERROR_ACCESS_DENIED: + die("Can't create data directory '%s' (access denied)\n", + opt_datadir); + break; + case ERROR_PATH_NOT_FOUND: + die("Can't create data directory '%s' " + "(one or more intermediate directories do not exist)\n", + opt_datadir); + break; + default: + die("Can't create data directory '%s', last error %u\n", + opt_datadir, last_error); + break; + } + } if (!SetCurrentDirectory(opt_datadir)) { - die("Cannot set current directory to '%s'\n",opt_datadir); - return -1; + last_error = GetLastError(); + switch (last_error) + { + case ERROR_DIRECTORY: + die("Can't set current directory to '%s', the path is not a valid directory \n", + opt_datadir); + break; + default: + die("Can' set current directory to '%s', last error %u\n", + opt_datadir, last_error); + break; + } } - CreateDirectory("mysql",NULL); - CreateDirectory("test", NULL); + if (PathIsDirectoryEmpty(opt_datadir)) + { + cleanup_datadir= false; + } + + if (!CreateDirectory("mysql",NULL)) + { + last_error = GetLastError(); + DWORD attributes; + switch(last_error) + { + case ERROR_ACCESS_DENIED: + die("Can't create subdirectory 'mysql' in '%s' (access denied)\n",opt_datadir); + break; + case ERROR_ALREADY_EXISTS: + attributes = GetFileAttributes("mysql"); + + if (attributes == INVALID_FILE_ATTRIBUTES) + die("GetFileAttributes() failed for existing file '%s\\mysql', last error %u", + opt_datadir, GetLastError()); + else if (!(attributes & FILE_ATTRIBUTE_DIRECTORY)) + die("File '%s\\mysql' exists, but it is not a directory", opt_datadir); + + break; + } + } /* Set data directory permissions for both current user and @@ -642,7 +702,7 @@ static int create_db_instance() } end: - if (ret) + if (ret && cleanup_datadir) { SetCurrentDirectory(cwd); clean_directory(opt_datadir); From 235374aee3c4b08d34026c2bcd7d88db515966cb Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Tue, 15 Jan 2019 18:44:03 +0100 Subject: [PATCH 02/37] MDEV-18254 upgrade HeidiSQL to 9.5 --- win/packaging/heidisql.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/win/packaging/heidisql.cmake b/win/packaging/heidisql.cmake index 772834e7c7d..29ecdd8eb4f 100644 --- a/win/packaging/heidisql.cmake +++ b/win/packaging/heidisql.cmake @@ -1,4 +1,4 @@ -SET(HEIDISQL_BASE_NAME "HeidiSQL_9.4_Portable") +SET(HEIDISQL_BASE_NAME "HeidiSQL_9.5_Portable") SET(HEIDISQL_ZIP "${HEIDISQL_BASE_NAME}.zip") SET(HEIDISQL_URL "http://www.heidisql.com/downloads/releases/${HEIDISQL_ZIP}") SET(HEIDISQL_DOWNLOAD_DIR ${THIRD_PARTY_DOWNLOAD_LOCATION}/${HEIDISQL_BASE_NAME}) From 1ecccb509c9dfa57976a2e2c3af07753a5356188 Mon Sep 17 00:00:00 2001 From: Oleksandr Byelkin Date: Wed, 16 Jan 2019 13:16:41 +0100 Subject: [PATCH 03/37] MDEV-17085: CHECKSUM TABLE EXTENDED does not work correctly The problem was in calculating of the mask to clear unused null bits in case of using full byte. --- mysql-test/r/row-checksum-old.result | 16 ++++++++++++++++ mysql-test/r/row-checksum.result | 16 ++++++++++++++++ mysql-test/t/row-checksum.test | 17 +++++++++++++++++ sql/sql_table.cc | 5 ++++- 4 files changed, 53 insertions(+), 1 deletion(-) diff --git a/mysql-test/r/row-checksum-old.result b/mysql-test/r/row-checksum-old.result index ef523463860..920c5dbe838 100644 --- a/mysql-test/r/row-checksum-old.result +++ b/mysql-test/r/row-checksum-old.result @@ -85,3 +85,19 @@ checksum table t1 extended; Table Checksum test.t1 4108368782 drop table t1; +# +# MDEV-17085: CHECKSUM TABLE EXTENDED does not work correctly +# +CREATE TABLE t1 ( c1 int NOT NULL, c2 int NOT NULL, c4 varchar(20), c5 varchar(20), c6 varchar(20), c7 varchar(20), c8 varchar(20), c9 varchar(20), c10 varchar(20), c11 varchar(20), c12 varchar(20), c13 varchar(20), c14 varchar(20), c15 varchar(20), c16 varchar(20), c19 int NOT NULL, c20 int NOT NULL, c21 varchar(20), c22 VARCHAR(20), c23 varchar(20)); +insert into t1 values (5,0,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,0,0,"dog",NULL,NULL); +# Important is that checksum is different from following +CHECKSUM TABLE t1 EXTENDED; +Table Checksum +test.t1 2514025256 +UPDATE t1 SET c21='cat' WHERE c1=5; +# Important is that checksum is different from above +CHECKSUM TABLE t1 EXTENDED; +Table Checksum +test.t1 2326430205 +drop table t1; +# End of 5.5 tests diff --git a/mysql-test/r/row-checksum.result b/mysql-test/r/row-checksum.result index fb8a1260a1d..0f8311b703a 100644 --- a/mysql-test/r/row-checksum.result +++ b/mysql-test/r/row-checksum.result @@ -85,3 +85,19 @@ checksum table t1 extended; Table Checksum test.t1 3885665021 drop table t1; +# +# MDEV-17085: CHECKSUM TABLE EXTENDED does not work correctly +# +CREATE TABLE t1 ( c1 int NOT NULL, c2 int NOT NULL, c4 varchar(20), c5 varchar(20), c6 varchar(20), c7 varchar(20), c8 varchar(20), c9 varchar(20), c10 varchar(20), c11 varchar(20), c12 varchar(20), c13 varchar(20), c14 varchar(20), c15 varchar(20), c16 varchar(20), c19 int NOT NULL, c20 int NOT NULL, c21 varchar(20), c22 VARCHAR(20), c23 varchar(20)); +insert into t1 values (5,0,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,0,0,"dog",NULL,NULL); +# Important is that checksum is different from following +CHECKSUM TABLE t1 EXTENDED; +Table Checksum +test.t1 2514025256 +UPDATE t1 SET c21='cat' WHERE c1=5; +# Important is that checksum is different from above +CHECKSUM TABLE t1 EXTENDED; +Table Checksum +test.t1 2326430205 +drop table t1; +# End of 5.5 tests diff --git a/mysql-test/t/row-checksum.test b/mysql-test/t/row-checksum.test index 920a2384aa8..6b79827d066 100644 --- a/mysql-test/t/row-checksum.test +++ b/mysql-test/t/row-checksum.test @@ -60,3 +60,20 @@ checksum table t1; checksum table t1 quick; checksum table t1 extended; drop table t1; + +--echo # +--echo # MDEV-17085: CHECKSUM TABLE EXTENDED does not work correctly +--echo # + +CREATE TABLE t1 ( c1 int NOT NULL, c2 int NOT NULL, c4 varchar(20), c5 varchar(20), c6 varchar(20), c7 varchar(20), c8 varchar(20), c9 varchar(20), c10 varchar(20), c11 varchar(20), c12 varchar(20), c13 varchar(20), c14 varchar(20), c15 varchar(20), c16 varchar(20), c19 int NOT NULL, c20 int NOT NULL, c21 varchar(20), c22 VARCHAR(20), c23 varchar(20)); + +insert into t1 values (5,0,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,0,0,"dog",NULL,NULL); +--echo # Important is that checksum is different from following +CHECKSUM TABLE t1 EXTENDED; +UPDATE t1 SET c21='cat' WHERE c1=5; +--echo # Important is that checksum is different from above +CHECKSUM TABLE t1 EXTENDED; + +drop table t1; + +--echo # End of 5.5 tests diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 1b83b513c2d..d3448c167c4 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -7844,7 +7844,10 @@ bool mysql_checksum_table(THD *thd, TABLE_LIST *tables, { /* calculating table's checksum */ ha_checksum crc= 0; - uchar null_mask=256 - (1 << t->s->last_null_bit_pos); + DBUG_ASSERT(t->s->last_null_bit_pos < 8); + uchar null_mask= (t->s->last_null_bit_pos ? + (256 - (1 << t->s->last_null_bit_pos)): + 0); t->use_all_columns(); From 459d6da86955c89e96f6e9a8d3bc2a9b1756629b Mon Sep 17 00:00:00 2001 From: Natanael Copa Date: Wed, 16 Jan 2019 14:28:37 +0000 Subject: [PATCH 04/37] MDEV-18269 - fix off-by-one bug in unittest Fix the off-by-one overflow which was introduced with commit b0fd06a6f2721 (MDEV-15670 - unit.my_atomic failed in buildbot with Signal 11 thrown) Closes #1098. --- unittest/mysys/thr_template.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unittest/mysys/thr_template.c b/unittest/mysys/thr_template.c index d1bc0868ca0..0e06bf6e731 100644 --- a/unittest/mysys/thr_template.c +++ b/unittest/mysys/thr_template.c @@ -34,7 +34,7 @@ void test_concurrently(const char *test, pthread_handler handler, int n, int m) bad= 0; diag("Testing %s with %d threads, %d iterations... ", test, n, m); - for (i= n; i; i--) + for (i= 0; i < n; i++) { if (pthread_create(&threads[i], 0, handler, &m) != 0) { @@ -43,7 +43,7 @@ void test_concurrently(const char *test, pthread_handler handler, int n, int m) } } - for (i= n; i; i--) + for (i= 0; i < n; i++) pthread_join(threads[i], 0); now= my_interval_timer() - now; From 78f62e9079b6ad3705bb2abb7b48b31143297e86 Mon Sep 17 00:00:00 2001 From: Alexander Kuleshov Date: Fri, 4 Jan 2019 13:32:51 +0600 Subject: [PATCH 05/37] remove duplicated paragraph from mysql_install_db.sh Signed-off-by: Alexander Kuleshov --- scripts/mysql_install_db.sh | 3 --- 1 file changed, 3 deletions(-) diff --git a/scripts/mysql_install_db.sh b/scripts/mysql_install_db.sh index 0272a19931f..f56de12d931 100644 --- a/scripts/mysql_install_db.sh +++ b/scripts/mysql_install_db.sh @@ -207,9 +207,6 @@ cannot_find_file() done fi - echo - echo "If you compiled from source, you need to run 'make install' to" - echo "copy the software into the correct location ready for operation." echo echo "If you compiled from source, you need to either run 'make install' to" echo "copy the software into the correct location ready for operation." From e292d1a800312f4e0330a519e0d980e27a7172f3 Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Sat, 19 Jan 2019 14:01:09 +0100 Subject: [PATCH 06/37] Avoid noisy Clang 7 warning about unused variable. Patch by Eugene Kosov. --- include/my_valgrind.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/my_valgrind.h b/include/my_valgrind.h index 5d08a271d4a..a85e610f049 100644 --- a/include/my_valgrind.h +++ b/include/my_valgrind.h @@ -42,7 +42,7 @@ https://github.com/google/sanitizers/wiki/AddressSanitizerManualPoisoning */ # define MEM_CHECK_ADDRESSABLE(a,len) ((void) 0) # define MEM_CHECK_DEFINED(a,len) ((void) 0) #else -# define MEM_UNDEFINED(a,len) ((void) 0) +# define MEM_UNDEFINED(a,len) ((void) (a), (void) (len)) # define MEM_NOACCESS(a,len) ((void) 0) # define MEM_CHECK_ADDRESSABLE(a,len) ((void) 0) # define MEM_CHECK_DEFINED(a,len) ((void) 0) @@ -51,7 +51,7 @@ https://github.com/google/sanitizers/wiki/AddressSanitizerManualPoisoning */ #ifndef DBUG_OFF #define TRASH_FILL(A,B,C) do { const size_t trash_tmp= (B); MEM_UNDEFINED(A, trash_tmp); memset(A, C, trash_tmp); } while (0) #else -#define TRASH_FILL(A,B,C) do { const size_t trash_tmp __attribute__((unused))= (B); MEM_UNDEFINED(A,trash_tmp); } while (0) +#define TRASH_FILL(A,B,C) do { MEM_UNDEFINED((A), (B)); } while (0) #endif #define TRASH_ALLOC(A,B) do { TRASH_FILL(A,B,0xA5); MEM_UNDEFINED(A,B); } while(0) #define TRASH_FREE(A,B) do { TRASH_FILL(A,B,0x8F); MEM_NOACCESS(A,B); } while(0) From 0d3c49ef5de3fa7356851dd7a05052f5360d0ae6 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Mon, 14 Jan 2019 12:33:52 +0100 Subject: [PATCH 07/37] MDEV-17615 cmake ssl error on musl/libressl don't shortcut trying to test for openssl version, test what is actually needed for a code to compile --- cmake/ssl.cmake | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/cmake/ssl.cmake b/cmake/ssl.cmake index 0c6cde31299..60d2cb48387 100644 --- a/cmake/ssl.cmake +++ b/cmake/ssl.cmake @@ -71,15 +71,23 @@ MACRO (MYSQL_CHECK_SSL) FIND_LIBRARY(CRYPTO_LIBRARY crypto) MARK_AS_ADVANCED(CRYPTO_LIBRARY) INCLUDE(CheckSymbolExists) + INCLUDE(CheckCSourceCompiles) SET(CMAKE_REQUIRED_INCLUDES ${OPENSSL_INCLUDE_DIR}) SET(CMAKE_REQUIRED_LIBRARIES ${OPENSSL_LIBRARIES}) CHECK_SYMBOL_EXISTS(SHA512_DIGEST_LENGTH "openssl/sha.h" HAVE_SHA512_DIGEST_LENGTH) CHECK_SYMBOL_EXISTS(ERR_remove_thread_state "openssl/err.h" HAVE_ERR_remove_thread_state) + CHECK_C_SOURCE_COMPILES(" + #include + int main() + { + DH dh; + return sizeof(dh.version); + }" OLD_OPENSSL_API) SET(CMAKE_REQUIRED_INCLUDES) SET(CMAKE_REQUIRED_LIBRARIES) - IF(OPENSSL_FOUND AND OPENSSL_VERSION VERSION_LESS "1.1.0" AND + IF(OPENSSL_FOUND AND OLD_OPENSSL_API AND CRYPTO_LIBRARY AND HAVE_SHA512_DIGEST_LENGTH) SET(SSL_SOURCES "") SET(SSL_LIBRARIES ${OPENSSL_LIBRARIES} ${CRYPTO_LIBRARY}) From 50e593386fcbaa1ca7bd2ed9fdfc51fd5102cdab Mon Sep 17 00:00:00 2001 From: Daniel Black Date: Fri, 11 Jan 2019 19:35:46 +1100 Subject: [PATCH 08/37] MDEV-14580: mysql_install_db elements based on dirname of mysql_install_db Closes #1086 --- scripts/mysql_install_db.sh | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/scripts/mysql_install_db.sh b/scripts/mysql_install_db.sh index f56de12d931..f1249e1d06b 100644 --- a/scripts/mysql_install_db.sh +++ b/scripts/mysql_install_db.sh @@ -253,6 +253,9 @@ then cannot_find_file my_print_defaults $basedir/bin $basedir/extra exit 1 fi +elif test -x "$(dirname $0)/../@bindir@/my_print_defaults" +then + print_defaults="$(dirname $0)/../@bindir@/my_print_defaults" else print_defaults="@bindir@/my_print_defaults" fi @@ -304,6 +307,14 @@ then cannot_find_file fill_help_tables.sql @pkgdata_locations@ exit 1 fi +# relative from where the script was run for a relocatable install +elif test -x "$(dirname $0)/../@INSTALL_SBINDIR@/mysqld" +then + basedir="$(dirname $0)/../" + bindir="$basedir/@INSTALL_SBINDIR@" + resolveip="$bindir/resolveip" + mysqld="$basedir/@INSTALL_SBINDIR@/mysqld" + pkgdatadir="$basedir/@INSTALL_MYSQLSHAREDIR@" else basedir="@prefix@" bindir="@bindir@" From 9c5be7d131f7eb7f27df722463faa2cd8135fd1b Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Mon, 14 Jan 2019 15:55:21 +0100 Subject: [PATCH 09/37] MDEV-14580: mysql_install_db elements based on dirname of mysql_install_db Avoid introducing new dependencies or new syntax. That is, don't use $(...) and don't assume dirname is present. And remove unsighty /foo/bar/../xyz from the path. Use dirname instead of ../ --- scripts/mysql_install_db.sh | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/scripts/mysql_install_db.sh b/scripts/mysql_install_db.sh index f1249e1d06b..9f00562f4bd 100644 --- a/scripts/mysql_install_db.sh +++ b/scripts/mysql_install_db.sh @@ -36,6 +36,9 @@ in_rpm=0 ip_only=0 cross_bootstrap=0 +dirname0=`dirname $0 2>/dev/null` +dirname0=`dirname $dirname0 2>/dev/null` + usage() { cat < Date: Wed, 23 Jan 2019 09:51:06 +0200 Subject: [PATCH 10/37] MDEV-18349 InnoDB file size changes are not safe when file system crashes fil_extend_space_to_desired_size(): Invoke fsync() after posix_fallocate() in order to durably extend the file in a crash-safe file system. --- storage/innobase/fil/fil0fil.c | 4 +++- storage/xtradb/fil/fil0fil.c | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/storage/innobase/fil/fil0fil.c b/storage/innobase/fil/fil0fil.c index 4006ce4acce..32bf0b8ccd8 100644 --- a/storage/innobase/fil/fil0fil.c +++ b/storage/innobase/fil/fil0fil.c @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1995, 2013, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2014, 2017, MariaDB Corporation. All Rights Reserved. +Copyright (c) 2014, 2019, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -4155,6 +4155,8 @@ fil_extend_space_to_desired_size( " failed with error %d\n", node->name, start_offset, len + start_offset, err); + } else { + os_file_flush(node->handle); } mutex_enter(&fil_system->mutex); diff --git a/storage/xtradb/fil/fil0fil.c b/storage/xtradb/fil/fil0fil.c index 004a80e9b13..fc305c7e01f 100644 --- a/storage/xtradb/fil/fil0fil.c +++ b/storage/xtradb/fil/fil0fil.c @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1995, 2013, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2014, 2017, MariaDB Corporation. All Rights Reserved. +Copyright (c) 2014, 2019, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -4990,6 +4990,8 @@ fil_extend_space_to_desired_size( " failed with error %d\n", node->name, start_offset, len + start_offset, err); + } else { + os_file_flush(node->handle, TRUE); } mutex_enter(&fil_system->mutex); From 6de2928d5bf912ace5fb5a1e2254025efe202b67 Mon Sep 17 00:00:00 2001 From: Aditya A Date: Mon, 10 Sep 2018 16:00:29 +0530 Subject: [PATCH 11/37] Bug #28178776 COMPARISON OF UNINITAILIZED MEMORY IN LOG_IN_USE PROBLEM ------- Memory sanitizer reports uninitialized comparisons in log_in_use(), because strings are compared with memcmp() instead of strncmp. FIX --- Use strncmp() to compare strings --- sql/log.cc | 6 +++--- sql/sql_repl.cc | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/sql/log.cc b/sql/log.cc index 2504b5e2d06..7db4985ad48 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -1,5 +1,5 @@ -/* Copyright (c) 2000, 2016, Oracle and/or its affiliates. - Copyright (c) 2009, 2016, MariaDB +/* Copyright (c) 2000, 2018, Oracle and/or its affiliates. + Copyright (c) 2009, 2019, MariaDB Corporation This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -3413,7 +3413,7 @@ int MYSQL_BIN_LOG::find_log_pos(LOG_INFO *linfo, const char *log_name, // if the log entry matches, null string matching anything if (!log_name || (log_name_len == fname_len-1 && full_fname[log_name_len] == '\n' && - !memcmp(full_fname, full_log_name, log_name_len))) + !strncmp(full_fname, full_log_name, log_name_len))) { DBUG_PRINT("info", ("Found log file entry")); full_fname[fname_len-1]= 0; // remove last \n diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc index ca6e8d15e7a..cb4904bb5a6 100644 --- a/sql/sql_repl.cc +++ b/sql/sql_repl.cc @@ -1,5 +1,5 @@ -/* Copyright (c) 2000, 2017, Oracle and/or its affiliates. - Copyright (c) 2008, 2017, MariaDB Corporation +/* Copyright (c) 2000, 2018, Oracle and/or its affiliates. + Copyright (c) 2008, 2019, MariaDB Corporation This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -365,7 +365,7 @@ bool log_in_use(const char* log_name) if ((linfo = tmp->current_linfo)) { mysql_mutex_lock(&linfo->lock); - result = !memcmp(log_name, linfo->log_file_name, log_name_len); + result = !strncmp(log_name, linfo->log_file_name, log_name_len); mysql_mutex_unlock(&linfo->lock); if (result) break; From b20d94da356fa274a49ac38497e0cb20ce760d93 Mon Sep 17 00:00:00 2001 From: Sreeharsha Ramanavarapu Date: Tue, 9 Oct 2018 12:03:35 +0530 Subject: [PATCH 12/37] Bug #28499924: INCORRECT BEHAVIOR WITH UNION IN SUBQUERY Issue: ------ When a subquery contains UNION the count of the number of subquery columns is calculated incorrectly. Only the first query block in the subquery's UNION is considered and an array indexing goes out-of-bounds, and this is caught by an assert. Solution: --------- Sum up the columns from all query blocks of the query expression. Change specific to 5.6/5.5: --------------------------- The "child" points to the last query block of the UNION (as opposed to 5.7+ where it points to the first member of UNION). So "child->master_unit()->first_select()" is used to reach the first query block of UNION. --- sql/sql_yacc.yy | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 34f7c6e3481..ba0041cf477 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -14627,19 +14627,21 @@ subselect_end: lex->current_select = lex->current_select->return_after_parsing(); lex->nest_level--; lex->current_select->n_child_sum_items += child->n_sum_items; - /* - A subselect can add fields to an outer select. Reserve space for - them. - */ - lex->current_select->select_n_where_fields+= - child->select_n_where_fields; /* - Aggregate functions in having clause may add fields to an outer - select. Count them also. + A subquery (and all the subsequent query blocks in a UNION) can + add columns to an outer query block. Reserve space for them. + Aggregate functions in having clause can also add fields to an + outer select. */ - lex->current_select->select_n_having_items+= - child->select_n_having_items; + for (SELECT_LEX *temp= child->master_unit()->first_select(); + temp != NULL; temp= temp->next_select()) + { + lex->current_select->select_n_where_fields+= + temp->select_n_where_fields; + lex->current_select->select_n_having_items+= + temp->select_n_having_items; + } } ; From a8da66f8c56211417289b0e40a10faf49e225a54 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Tue, 22 Jan 2019 00:15:57 +0100 Subject: [PATCH 13/37] Bug #28499924: INCORRECT BEHAVIOR WITH UNION IN SUBQUERY test case --- mysql-test/r/subselect2.result | 22 ++++++++++++++++++++++ mysql-test/t/subselect2.test | 20 ++++++++++++++++++++ 2 files changed, 42 insertions(+) diff --git a/mysql-test/r/subselect2.result b/mysql-test/r/subselect2.result index 64bd86707cc..8d555aa798b 100644 --- a/mysql-test/r/subselect2.result +++ b/mysql-test/r/subselect2.result @@ -394,3 +394,25 @@ select null in (select a from t1 where a < out3.a union select a from t2 where (select a from t3) +1 < out3.a+1) from t3 out3; ERROR 21000: Subquery returns more than 1 row drop table t1, t2, t3; +CREATE TABLE t1( +q11 int, q12 int, q13 int, q14 int, q15 int, q16 int, q17 int, q18 int, q19 int, +q21 int, q22 int, q23 int, q24 int, q25 int, q26 int, q27 int, q28 int, q29 int, +f1 int +); +CREATE TABLE t2(f2 int, f21 int, f3 timestamp, f4 int, f5 int, f6 int); +INSERT INTO t1 (f1) VALUES (1),(1),(2),(2); +INSERT INTO t2 VALUES (1,1,"2004-02-29 11:11:11",0,0,0), (2,2,"2004-02-29 11:11:11",0,0,0); +SELECT f1, +(SELECT t.f21 from t2 t where max( +q11+q12+q13+q14+q15+q16+q17+q18+q19+ +q21+q22+q23+q24+q25+q26+q27+q28+q29) = t.f2 UNION +SELECT t.f3 FROM t2 AS t WHERE t1.f1=t.f2 AND t.f3=MAX(t1.f1) UNION +SELECT 1 LIMIT 1) AS test +FROM t1 GROUP BY f1; +f1 test +1 1 +2 1 +Warnings: +Warning 1292 Incorrect datetime value: '1' +Warning 1292 Incorrect datetime value: '2' +DROP TABLE t1,t2; diff --git a/mysql-test/t/subselect2.test b/mysql-test/t/subselect2.test index ae210b865a2..73b0e77ade6 100644 --- a/mysql-test/t/subselect2.test +++ b/mysql-test/t/subselect2.test @@ -411,3 +411,23 @@ insert into t3 select a from t1; select null in (select a from t1 where a < out3.a union select a from t2 where (select a from t3) +1 < out3.a+1) from t3 out3; drop table t1, t2, t3; + +# +# Bug #28499924: INCORRECT BEHAVIOR WITH UNION IN SUBQUERY +# +CREATE TABLE t1( + q11 int, q12 int, q13 int, q14 int, q15 int, q16 int, q17 int, q18 int, q19 int, + q21 int, q22 int, q23 int, q24 int, q25 int, q26 int, q27 int, q28 int, q29 int, + f1 int +); +CREATE TABLE t2(f2 int, f21 int, f3 timestamp, f4 int, f5 int, f6 int); +INSERT INTO t1 (f1) VALUES (1),(1),(2),(2); +INSERT INTO t2 VALUES (1,1,"2004-02-29 11:11:11",0,0,0), (2,2,"2004-02-29 11:11:11",0,0,0); +SELECT f1, + (SELECT t.f21 from t2 t where max( + q11+q12+q13+q14+q15+q16+q17+q18+q19+ + q21+q22+q23+q24+q25+q26+q27+q28+q29) = t.f2 UNION + SELECT t.f3 FROM t2 AS t WHERE t1.f1=t.f2 AND t.f3=MAX(t1.f1) UNION + SELECT 1 LIMIT 1) AS test + FROM t1 GROUP BY f1; +DROP TABLE t1,t2; From 949559285efff44ba49b478ee766e0fe0a5a9b79 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Wed, 23 Jan 2019 10:09:49 +0100 Subject: [PATCH 14/37] MDEV-18059 `support-files/mysql.server.sh stop` must run as root don't run `su - mysql` is $USER is already mysql --- support-files/mysql.server.sh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/support-files/mysql.server.sh b/support-files/mysql.server.sh index c1d85ba2664..f3990967b87 100644 --- a/support-files/mysql.server.sh +++ b/support-files/mysql.server.sh @@ -187,7 +187,11 @@ fi user='@MYSQLD_USER@' su_kill() { - su - $user -s /bin/sh -c "kill $*" >/dev/null 2>&1 + if test "$USER" = "$user"; then + kill $* >/dev/null 2>&1 + else + su - $user -s /bin/sh -c "kill $*" >/dev/null 2>&1 + fi } # From ad220b96fb01dbb6acf7e51bdd8d4d6362d96ea7 Mon Sep 17 00:00:00 2001 From: Eugene Kosov Date: Mon, 2 Jul 2018 12:26:22 +0300 Subject: [PATCH 15/37] MDEV-16658 Memory leak in mysqltest on connect failure Close connection handler on connection failure. This fixes 14 failing tests in main suite under clang+ASAN build. ASAN report for main.connect looks like this: ================================================================= ==25495==ERROR: LeakSanitizer: detected memory leaks Direct leak of 146280 byte(s) in 115 object(s) allocated from: #0 0x4fba47 in calloc /fun/cpp_projects/llvm_toolchain/llvm/projects/compiler-rt/lib/asan/asan_malloc_linux.cc:138 #1 0x5a7a02 in mysql_init /work/mariadb/libmariadb/libmariadb/mariadb_lib.c:977:26 #2 0x570a7a in do_connect(st_command*) /work/mariadb/client/mysqltest.cc:6096:26 #3 0x584c39 in main /work/mariadb/client/mysqltest.cc:9321:9 #4 0x7fd15514db96 in __libc_start_main /build/glibc-OTsEL5/glibc-2.27/csu/../csu/libc-start.c:310 Indirect leak of 7065600 byte(s) in 115 object(s) allocated from: #0 0x4fb80f in __interceptor_malloc /fun/cpp_projects/llvm_toolchain/llvm/projects/compiler-rt/lib/asan/asan_malloc_linux.cc:129 #1 0x637a83 in my_context_init /work/mariadb/libmariadb/libmariadb/ma_context.c:367:23 #2 0x59fd16 in mysql_optionsv /work/mariadb/libmariadb/libmariadb/mariadb_lib.c:2738:9 #3 0x5bc1d4 in mysql_options /work/mariadb/libmariadb/libmariadb/mariadb_lib.c:3242:10 #4 0x570b94 in do_connect(st_command*) /work/mariadb/client/mysqltest.cc:6103:7 #5 0x584c39 in main /work/mariadb/client/mysqltest.cc:9321:9 #6 0x7fd15514db96 in __libc_start_main /build/glibc-OTsEL5/glibc-2.27/csu/../csu/libc-start.c:310 Indirect leak of 940240 byte(s) in 115 object(s) allocated from: #0 0x4fb80f in __interceptor_malloc /fun/cpp_projects/llvm_toolchain/llvm/projects/compiler-rt/lib/asan/asan_malloc_linux.cc:129 #1 0x64386e in ma_init_dynamic_array /work/mariadb/libmariadb/libmariadb/ma_array.c:49:31 #2 0x649ead in _hash_init /work/mariadb/libmariadb/libmariadb/ma_hash.c:52:7 #3 0x5a3080 in mysql_optionsv /work/mariadb/libmariadb/libmariadb/mariadb_lib.c:2938:13 #4 0x5bc20c in mysql_options4 /work/mariadb/libmariadb/libmariadb/mariadb_lib.c:3248:10 #5 0x56f63b in connect_n_handle_errors(st_command*, st_mysql*, char const*, char const*, char const*, char const*, int, char const*) /work/mariadb/client/mysqltest.cc:5874:3 #6 0x57146b in do_connect(st_command*) /work/mariadb/client/mysqltest.cc:6193:7 #7 0x584c39 in main /work/mariadb/client/mysqltest.cc:9321:9 #8 0x7fd15514db96 in __libc_start_main /build/glibc-OTsEL5/glibc-2.27/csu/../csu/libc-start.c:310 ... Closes #809 --- client/mysqltest.cc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/client/mysqltest.cc b/client/mysqltest.cc index 088afed41b2..2b7401878ef 100644 --- a/client/mysqltest.cc +++ b/client/mysqltest.cc @@ -6129,6 +6129,11 @@ void do_connect(struct st_command *command) if (con_slot == next_con) next_con++; /* if we used the next_con slot, advance the pointer */ } + else // Failed to connect. Free the memory. + { + mysql_close(con_slot->mysql); + con_slot->mysql= NULL; + } dynstr_free(&ds_connection_name); dynstr_free(&ds_host); From 1abdc0e435e4e7e71257e246972a3b3015df287c Mon Sep 17 00:00:00 2001 From: Oleksandr Byelkin Date: Thu, 24 Jan 2019 15:47:27 +0100 Subject: [PATCH 16/37] 5.6.43 --- mysql-test/suite/perfschema/r/dml_setup_instruments.result | 4 +++- mysql-test/suite/perfschema/t/dml_setup_instruments.test | 5 ++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/mysql-test/suite/perfschema/r/dml_setup_instruments.result b/mysql-test/suite/perfschema/r/dml_setup_instruments.result index 310ff60aa5b..7bc7ca785d6 100644 --- a/mysql-test/suite/perfschema/r/dml_setup_instruments.result +++ b/mysql-test/suite/perfschema/r/dml_setup_instruments.result @@ -16,7 +16,9 @@ wait/synch/mutex/sql/LOCK_crypt YES YES wait/synch/mutex/sql/LOCK_delayed_create YES YES select * from performance_schema.setup_instruments where name like 'Wait/Synch/Rwlock/sql/%' - and name not in ('wait/synch/rwlock/sql/CRYPTO_dynlock_value::lock') + and name not in ( +'wait/synch/rwlock/sql/CRYPTO_dynlock_value::lock', +'wait/synch/rwlock/sql/LOCK_named_pipe_full_access_group') order by name limit 10; NAME ENABLED TIMED wait/synch/rwlock/sql/Binlog_relay_IO_delegate::lock YES YES diff --git a/mysql-test/suite/perfschema/t/dml_setup_instruments.test b/mysql-test/suite/perfschema/t/dml_setup_instruments.test index 8a4f11ba51f..18c260e1555 100644 --- a/mysql-test/suite/perfschema/t/dml_setup_instruments.test +++ b/mysql-test/suite/perfschema/t/dml_setup_instruments.test @@ -22,10 +22,13 @@ select * from performance_schema.setup_instruments order by name limit 10; # CRYPTO_dynlock_value::lock is dependent on the build (SSL) +# LOCK_named_pipe_full_access_group is dependent on the build (Windows) select * from performance_schema.setup_instruments where name like 'Wait/Synch/Rwlock/sql/%' - and name not in ('wait/synch/rwlock/sql/CRYPTO_dynlock_value::lock') + and name not in ( + 'wait/synch/rwlock/sql/CRYPTO_dynlock_value::lock', + 'wait/synch/rwlock/sql/LOCK_named_pipe_full_access_group') order by name limit 10; # COND_handler_count is dependent on the build (Windows only) From 036ca990abddbc9b93f45904ccabd5ec4bd5b396 Mon Sep 17 00:00:00 2001 From: Varun Gupta Date: Thu, 24 Jan 2019 20:47:46 +0530 Subject: [PATCH 17/37] MDEV-18255: Server crashes in Bitmap<64u>::intersect Calling st_select_lex::update_used_tables in JOIN::optimize_unflattened_subqueries only when we are sure that the join have not been cleaned up. This can happen for a case when we have a non-merged semi-join and an impossible where which would lead to the cleanup of the join which has the non-merged semi-join --- mysql-test/r/subselect_mat.result | 16 ++++++++++++++++ mysql-test/t/subselect_mat.test | 13 +++++++++++++ sql/sql_lex.cc | 3 ++- 3 files changed, 31 insertions(+), 1 deletion(-) diff --git a/mysql-test/r/subselect_mat.result b/mysql-test/r/subselect_mat.result index aa0ac73abd2..7907b86135e 100644 --- a/mysql-test/r/subselect_mat.result +++ b/mysql-test/r/subselect_mat.result @@ -2822,3 +2822,19 @@ id select_type table type possible_keys key key_len ref rows Extra SELECT * FROM t2 WHERE f IN ( SELECT LEFT('foo',0) FROM t1 ORDER BY 1 ); f DROP TABLE t1, t2; +# +# MDEV-18255: Server crashes in Bitmap<64u>::intersect +# +create table t1 (v1 varchar(1)) engine=myisam ; +create table t2 (v1 varchar(1)) engine=myisam ; +explain +select 1 from t1 where exists +(select 1 from t1 where t1.v1 in (select t2.v1 from t2 having t2.v1 < 'j')) ; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE +2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL no matching row in const table +3 MATERIALIZED NULL NULL NULL NULL NULL NULL NULL no matching row in const table +select 1 from t1 where exists +(select 1 from t1 where t1.v1 in (select t2.v1 from t2 having t2.v1 < 'j')) ; +1 +drop table t1,t2; diff --git a/mysql-test/t/subselect_mat.test b/mysql-test/t/subselect_mat.test index 5211f35b48b..66a6cc97acb 100644 --- a/mysql-test/t/subselect_mat.test +++ b/mysql-test/t/subselect_mat.test @@ -267,3 +267,16 @@ explain SELECT * FROM t2 WHERE f IN ( SELECT LEFT('foo',0) FROM t1 ORDER BY 1 ); SELECT * FROM t2 WHERE f IN ( SELECT LEFT('foo',0) FROM t1 ORDER BY 1 ); DROP TABLE t1, t2; + +--echo # +--echo # MDEV-18255: Server crashes in Bitmap<64u>::intersect +--echo # +create table t1 (v1 varchar(1)) engine=myisam ; +create table t2 (v1 varchar(1)) engine=myisam ; + +explain +select 1 from t1 where exists + (select 1 from t1 where t1.v1 in (select t2.v1 from t2 having t2.v1 < 'j')) ; +select 1 from t1 where exists + (select 1 from t1 where t1.v1 in (select t2.v1 from t2 having t2.v1 < 'j')) ; +drop table t1,t2; diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index 08c169c5999..2fb239ed498 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -3551,7 +3551,8 @@ bool st_select_lex::optimize_unflattened_subqueries(bool const_only) inner_join->select_options|= SELECT_DESCRIBE; } res= inner_join->optimize(); - sl->update_used_tables(); + if (!inner_join->cleaned) + sl->update_used_tables(); sl->update_correlated_cache(); is_correlated_unit|= sl->is_correlated; inner_join->select_options= save_options; From 3262afc6c50bdee489dd35feb8c5254dbc93494b Mon Sep 17 00:00:00 2001 From: Oleksandr Byelkin Date: Thu, 24 Jan 2019 16:48:39 +0100 Subject: [PATCH 18/37] 5.6.42-84.2 --- storage/xtradb/handler/ha_innodb.cc | 5 +++ storage/xtradb/handler/handler0alter.cc | 35 ++++++++++++++++----- storage/xtradb/include/os0file.h | 6 +++- storage/xtradb/include/univ.i | 2 +- storage/xtradb/log/log0online.cc | 41 ++++++++++++++++++++++++- storage/xtradb/row/row0merge.cc | 17 ++++++++-- storage/xtradb/row/row0mysql.cc | 15 ++------- storage/xtradb/row/row0sel.cc | 4 +-- 8 files changed, 99 insertions(+), 26 deletions(-) diff --git a/storage/xtradb/handler/ha_innodb.cc b/storage/xtradb/handler/ha_innodb.cc index 053a181e26c..1ff50136859 100644 --- a/storage/xtradb/handler/ha_innodb.cc +++ b/storage/xtradb/handler/ha_innodb.cc @@ -2322,6 +2322,11 @@ innobase_get_lower_case_table_names(void) { return(lower_case_table_names); } +/** return one of the tmpdir path +@return tmpdir path*/ +UNIV_INTERN +char* +innobase_mysql_tmpdir(void) { return (mysql_tmpdir); } /** Create a temporary file in the location specified by the parameter path. If the path is null, then it will be created in tmpdir. diff --git a/storage/xtradb/handler/handler0alter.cc b/storage/xtradb/handler/handler0alter.cc index 517dd30410b..856f8bac8de 100644 --- a/storage/xtradb/handler/handler0alter.cc +++ b/storage/xtradb/handler/handler0alter.cc @@ -4056,11 +4056,23 @@ oom: table. Either way, we should be seeing and reporting a bogus duplicate key error. */ dup_key = NULL; - } else { - DBUG_ASSERT(prebuilt->trx->error_key_num - < ha_alter_info->key_count); + } else if (prebuilt->trx->error_key_num == 0) { dup_key = &ha_alter_info->key_info_buffer[ prebuilt->trx->error_key_num]; + } else { + /* Check if there is generated cluster index column */ + if (ctx->num_to_add_index > ha_alter_info->key_count) { + DBUG_ASSERT(prebuilt->trx->error_key_num + <= ha_alter_info->key_count); + dup_key = &ha_alter_info->key_info_buffer[ + prebuilt->trx->error_key_num - 1]; + } + else { + DBUG_ASSERT(prebuilt->trx->error_key_num + < ha_alter_info->key_count); + dup_key = &ha_alter_info->key_info_buffer[ + prebuilt->trx->error_key_num]; + } } print_keydup_error(altered_table, dup_key, MYF(0)); break; @@ -4981,11 +4993,20 @@ commit_try_rebuild( FTS_DOC_ID. */ dup_key = NULL; } else { - DBUG_ASSERT(err_key < - ha_alter_info->key_count); - dup_key = &ha_alter_info - ->key_info_buffer[err_key]; + if (ctx->num_to_add_index > ha_alter_info->key_count) { + DBUG_ASSERT(err_key <= + ha_alter_info->key_count); + dup_key = &ha_alter_info + ->key_info_buffer[err_key - 1]; + } + else { + DBUG_ASSERT(err_key < + ha_alter_info->key_count); + dup_key = &ha_alter_info + ->key_info_buffer[err_key]; + } } + print_keydup_error(altered_table, dup_key, MYF(0)); DBUG_RETURN(true); case DB_ONLINE_LOG_TOO_BIG: diff --git a/storage/xtradb/include/os0file.h b/storage/xtradb/include/os0file.h index f7531d99f38..bc7e468b776 100644 --- a/storage/xtradb/include/os0file.h +++ b/storage/xtradb/include/os0file.h @@ -1,6 +1,6 @@ /*********************************************************************** -Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2018, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2009, Percona Inc. Portions of this file contain modifications contributed and copyrighted @@ -1531,6 +1531,10 @@ os_file_get_status( file can be opened in RW mode */ #if !defined(UNIV_HOTBACKUP) + +/** return one of the tmpdir path + @return tmpdir path*/ +char *innobase_mysql_tmpdir(void); /** Create a temporary file in the location specified by the parameter path. If the path is null, then it will be created in tmpdir. @param[in] path location for creating temporary file diff --git a/storage/xtradb/include/univ.i b/storage/xtradb/include/univ.i index f625ea46433..4904e174b87 100644 --- a/storage/xtradb/include/univ.i +++ b/storage/xtradb/include/univ.i @@ -47,7 +47,7 @@ Created 1/20/1994 Heikki Tuuri #define INNODB_VERSION_BUGFIX MYSQL_VERSION_PATCH #ifndef PERCONA_INNODB_VERSION -#define PERCONA_INNODB_VERSION 84.1 +#define PERCONA_INNODB_VERSION 84.2 #endif /* Enable UNIV_LOG_ARCHIVE in XtraDB */ diff --git a/storage/xtradb/log/log0online.cc b/storage/xtradb/log/log0online.cc index e3cfbfc2088..a3d76e79b2a 100644 --- a/storage/xtradb/log/log0online.cc +++ b/storage/xtradb/log/log0online.cc @@ -1880,6 +1880,8 @@ log_online_purge_changed_page_bitmaps( for (i = 0; i < bitmap_files.count; i++) { + char full_bmp_file_name[2 * FN_REFLEN + 2]; + /* We consider the end LSN of the current bitmap, derived from the start LSN of the subsequent bitmap file, to determine whether to remove the current bitmap. Note that bitmap_files @@ -1895,8 +1897,45 @@ log_online_purge_changed_page_bitmaps( break; } + + /* In some non-trivial cases the sequence of .xdb files may + have gaps. For instance: + ib_modified_log_1_0.xdb + ib_modified_log_2_.xdb + ib_modified_log_4_.xdb + Adding this check as a safety precaution. */ + if (bitmap_files.files[i].name[0] == '\0') + continue; + + /* If redo log tracking is enabled, reuse 'bmp_file_home' + from 'log_bmp_sys'. Otherwise, compose the full '.xdb' file + path from 'srv_data_home', adding a path separator if + necessary. */ + if (log_bmp_sys != NULL) { + ut_snprintf(full_bmp_file_name, + sizeof(full_bmp_file_name), + "%s%s", log_bmp_sys->bmp_file_home, + bitmap_files.files[i].name); + } + else { + char separator[2] = {0, 0}; + const size_t srv_data_home_len = + strlen(srv_data_home); + + ut_a(srv_data_home_len < FN_REFLEN); + if (srv_data_home_len != 0 && + srv_data_home[srv_data_home_len - 1] != + SRV_PATH_SEPARATOR) { + separator[0] = SRV_PATH_SEPARATOR; + } + ut_snprintf(full_bmp_file_name, + sizeof(full_bmp_file_name), "%s%s%s", + srv_data_home, separator, + bitmap_files.files[i].name); + } + if (!os_file_delete_if_exists(innodb_file_bmp_key, - bitmap_files.files[i].name)) { + full_bmp_file_name)) { os_file_get_last_error(TRUE); result = TRUE; diff --git a/storage/xtradb/row/row0merge.cc b/storage/xtradb/row/row0merge.cc index b000f313d67..507709ce12b 100644 --- a/storage/xtradb/row/row0merge.cc +++ b/storage/xtradb/row/row0merge.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2005, 2017, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2005, 2018, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -3129,15 +3129,27 @@ row_merge_file_create_low( const char* path) { int fd; + char filename[] = "Innodb Merge Temp File\0"; + char* filepath = NULL; + int path_len; + if (path == NULL) { + path = innobase_mysql_tmpdir(); + } #ifdef UNIV_PFS_IO /* This temp file open does not go through normal file APIs, add instrumentation to register with performance schema */ + path_len = strlen(path) + sizeof "/" + strlen(filename)+1; + filepath = static_cast(mem_alloc(path_len)); + memcpy(filepath,path,strlen(path)); + ut_snprintf(filepath + strlen(path),path_len - strlen(path), + "%c%s",'/',filename); struct PSI_file_locker* locker = NULL; + PSI_file_locker_state state; locker = PSI_FILE_CALL(get_thread_file_name_locker)( &state, innodb_file_temp_key, PSI_FILE_OPEN, - "Innodb Merge Temp File", &locker); + filepath, &locker); if (locker != NULL) { PSI_FILE_CALL(start_file_open_wait)(locker, __FILE__, @@ -3150,6 +3162,7 @@ row_merge_file_create_low( PSI_FILE_CALL(end_file_open_wait_and_bind_to_descriptor)( locker, fd); } + mem_free(filepath); #endif if (fd < 0) { diff --git a/storage/xtradb/row/row0mysql.cc b/storage/xtradb/row/row0mysql.cc index ddde8b8f730..9fc5c669ba0 100644 --- a/storage/xtradb/row/row0mysql.cc +++ b/storage/xtradb/row/row0mysql.cc @@ -5500,18 +5500,6 @@ row_rename_table_for_mysql( goto funct_exit; } - /* Wait for background fts sync to finish */ - for (retry = 1; dict_fts_index_syncing(table); ++retry) { - DICT_BG_YIELD(trx); - if (retry % 100 == 0) { - ib_logf(IB_LOG_LEVEL_INFO, - "Unable to rename table %s to new name" - " %s because FTS sync is running on table." - " Retrying\n", - old_name, new_name); - } - } - /* We use the private SQL parser of Innobase to generate the query graphs needed in updating the dictionary data from system tables. */ @@ -5669,6 +5657,9 @@ row_rename_table_for_mysql( " = TO_BINARY(:old_table_name);\n" "END;\n" , FALSE, trx); + if (err != DB_SUCCESS) { + goto end; + } } else if (n_constraints_to_drop > 0) { /* Drop some constraints of tmp tables. */ diff --git a/storage/xtradb/row/row0sel.cc b/storage/xtradb/row/row0sel.cc index 5db61ecdd56..0edad710d79 100644 --- a/storage/xtradb/row/row0sel.cc +++ b/storage/xtradb/row/row0sel.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1997, 2017, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1997, 2018, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, Google Inc. Portions of this file contain modifications contributed and copyrighted by @@ -4416,7 +4416,7 @@ rec_loop: passed to InnoDB when there is no ICP and number of loops in row_search_for_mysql for rows found but not reporting due to search views etc. */ - if (prev_rec != NULL + if (prev_rec != NULL && !prebuilt->innodb_api && prebuilt->mysql_handler->end_range != NULL && prebuilt->idx_cond == NULL && end_loop >= 100) { From 13802fef831790c4b63a3ddbf96e516eff422464 Mon Sep 17 00:00:00 2001 From: Oleksandr Byelkin Date: Thu, 24 Jan 2019 17:31:13 +0100 Subject: [PATCH 19/37] 5.6.42-84.2 --- storage/tokudb/PerconaFT/COPYING.APACHEv2 | 174 ++ storage/tokudb/PerconaFT/README.md | 5 +- storage/tokudb/PerconaFT/ft/txn/txn_manager.h | 4 +- .../PerconaFT/locktree/concurrent_tree.cc | 14 + .../PerconaFT/locktree/concurrent_tree.h | 14 + storage/tokudb/PerconaFT/locktree/keyrange.cc | 13 + storage/tokudb/PerconaFT/locktree/keyrange.h | 13 + .../tokudb/PerconaFT/locktree/lock_request.cc | 13 + .../tokudb/PerconaFT/locktree/lock_request.h | 13 + storage/tokudb/PerconaFT/locktree/locktree.cc | 13 + storage/tokudb/PerconaFT/locktree/locktree.h | 13 + storage/tokudb/PerconaFT/locktree/manager.cc | 13 + .../tokudb/PerconaFT/locktree/range_buffer.cc | 13 + .../tokudb/PerconaFT/locktree/range_buffer.h | 13 + storage/tokudb/PerconaFT/locktree/treenode.cc | 13 + storage/tokudb/PerconaFT/locktree/treenode.h | 13 + .../tokudb/PerconaFT/locktree/txnid_set.cc | 13 + storage/tokudb/PerconaFT/locktree/txnid_set.h | 13 + storage/tokudb/PerconaFT/locktree/wfg.cc | 13 + storage/tokudb/PerconaFT/locktree/wfg.h | 13 + .../PerconaFT/portability/toku_instr_mysql.cc | 12 +- .../PerconaFT/portability/toku_instr_mysql.h | 11 +- .../PerconaFT/portability/toku_pthread.h | 78 +- storage/tokudb/PerconaFT/tools/CMakeLists.txt | 8 +- .../tokudb/PerconaFT/util/growable_array.h | 13 + storage/tokudb/PerconaFT/util/omt.cc | 2373 +++++++++-------- storage/tokudb/PerconaFT/util/omt.h | 13 + storage/tokudb/ha_tokudb.cc | 10 + storage/tokudb/hatoku_hton.cc | 4 +- storage/tokudb/hatoku_hton.h | 1 - .../mysql-test/tokudb_bugs/r/PS-4979.result | 2 + .../mysql-test/tokudb_bugs/t/PS-4979.test | 12 + storage/tokudb/tokudb_background.cc | 4 +- storage/tokudb/tokudb_sysvars.cc | 14 +- storage/tokudb/tokudb_sysvars.h | 4 +- 35 files changed, 1834 insertions(+), 1131 deletions(-) create mode 100644 storage/tokudb/PerconaFT/COPYING.APACHEv2 create mode 100644 storage/tokudb/mysql-test/tokudb_bugs/r/PS-4979.result create mode 100644 storage/tokudb/mysql-test/tokudb_bugs/t/PS-4979.test diff --git a/storage/tokudb/PerconaFT/COPYING.APACHEv2 b/storage/tokudb/PerconaFT/COPYING.APACHEv2 new file mode 100644 index 00000000000..ecbfc770fa9 --- /dev/null +++ b/storage/tokudb/PerconaFT/COPYING.APACHEv2 @@ -0,0 +1,174 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/storage/tokudb/PerconaFT/README.md b/storage/tokudb/PerconaFT/README.md index ffb646b67af..26333df877e 100644 --- a/storage/tokudb/PerconaFT/README.md +++ b/storage/tokudb/PerconaFT/README.md @@ -104,11 +104,14 @@ All source code and test contributions must be provided under a [BSD 2-Clause][b License ------- +Portions of the PerconaFT library (the 'locktree' and 'omt') are available under the Apache version 2 license. PerconaFT is available under the GPL version 2, and AGPL version 3. -See [COPYING.AGPLv3][agpllicense], +See [COPYING.APACHEv2][apachelicense], +[COPYING.AGPLv3][agpllicense], [COPYING.GPLv2][gpllicense], and [PATENTS][patents]. +[apachelicense]: http://github.com/Percona/PerconaFT/blob/master/COPYING.APACHEv2 [agpllicense]: http://github.com/Percona/PerconaFT/blob/master/COPYING.AGPLv3 [gpllicense]: http://github.com/Percona/PerconaFT/blob/master/COPYING.GPLv2 [patents]: http://github.com/Percona/PerconaFT/blob/master/PATENTS diff --git a/storage/tokudb/PerconaFT/ft/txn/txn_manager.h b/storage/tokudb/PerconaFT/ft/txn/txn_manager.h index 7cdc52c4f43..25fa6032112 100644 --- a/storage/tokudb/PerconaFT/ft/txn/txn_manager.h +++ b/storage/tokudb/PerconaFT/ft/txn/txn_manager.h @@ -46,11 +46,11 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved. void set_test_txn_sync_callback(void (*) (pthread_t, void*), void*); #define toku_test_txn_sync_callback(a) ((test_txn_sync_callback)? test_txn_sync_callback( a,test_txn_sync_callback_extra) : (void) 0) -#if TOKU_DEBUG_TXN_SYNC +#if defined(TOKU_DEBUG_TXN_SYNC) #define toku_debug_txn_sync(a) toku_test_txn_sync_callback(a) #else #define toku_debug_txn_sync(a) ((void) 0) -#endif +#endif // defined(TOKU_DEBUG_TXN_SYNC) typedef struct txn_manager *TXN_MANAGER; diff --git a/storage/tokudb/PerconaFT/locktree/concurrent_tree.cc b/storage/tokudb/PerconaFT/locktree/concurrent_tree.cc index 9347267db49..e07f32c98fb 100644 --- a/storage/tokudb/PerconaFT/locktree/concurrent_tree.cc +++ b/storage/tokudb/PerconaFT/locktree/concurrent_tree.cc @@ -32,6 +32,20 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved. You should have received a copy of the GNU Affero General Public License along with PerconaFT. If not, see . + +---------------------------------------- + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. ======= */ #ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved." diff --git a/storage/tokudb/PerconaFT/locktree/concurrent_tree.h b/storage/tokudb/PerconaFT/locktree/concurrent_tree.h index 1eb339b7317..66a7ff176bb 100644 --- a/storage/tokudb/PerconaFT/locktree/concurrent_tree.h +++ b/storage/tokudb/PerconaFT/locktree/concurrent_tree.h @@ -32,6 +32,20 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved. You should have received a copy of the GNU Affero General Public License along with PerconaFT. If not, see . + +---------------------------------------- + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. ======= */ #ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved." diff --git a/storage/tokudb/PerconaFT/locktree/keyrange.cc b/storage/tokudb/PerconaFT/locktree/keyrange.cc index 8c2a69d4703..2b4b3bbd4fd 100644 --- a/storage/tokudb/PerconaFT/locktree/keyrange.cc +++ b/storage/tokudb/PerconaFT/locktree/keyrange.cc @@ -32,6 +32,19 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved. You should have received a copy of the GNU Affero General Public License along with PerconaFT. If not, see . + +---------------------------------------- + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and ======= */ #ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved." diff --git a/storage/tokudb/PerconaFT/locktree/keyrange.h b/storage/tokudb/PerconaFT/locktree/keyrange.h index 079ac3d7a80..a454287cbc8 100644 --- a/storage/tokudb/PerconaFT/locktree/keyrange.h +++ b/storage/tokudb/PerconaFT/locktree/keyrange.h @@ -32,6 +32,19 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved. You should have received a copy of the GNU Affero General Public License along with PerconaFT. If not, see . + +---------------------------------------- + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and ======= */ #ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved." diff --git a/storage/tokudb/PerconaFT/locktree/lock_request.cc b/storage/tokudb/PerconaFT/locktree/lock_request.cc index 8d49ccf8a1f..3d4d43b9e25 100644 --- a/storage/tokudb/PerconaFT/locktree/lock_request.cc +++ b/storage/tokudb/PerconaFT/locktree/lock_request.cc @@ -32,6 +32,19 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved. You should have received a copy of the GNU Affero General Public License along with PerconaFT. If not, see . + +---------------------------------------- + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and ======= */ #ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved." diff --git a/storage/tokudb/PerconaFT/locktree/lock_request.h b/storage/tokudb/PerconaFT/locktree/lock_request.h index a8d8cb7785b..91a6ff12b52 100644 --- a/storage/tokudb/PerconaFT/locktree/lock_request.h +++ b/storage/tokudb/PerconaFT/locktree/lock_request.h @@ -32,6 +32,19 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved. You should have received a copy of the GNU Affero General Public License along with PerconaFT. If not, see . + +---------------------------------------- + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and ======= */ #ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved." diff --git a/storage/tokudb/PerconaFT/locktree/locktree.cc b/storage/tokudb/PerconaFT/locktree/locktree.cc index 069aae26f66..8ba3f0f00ae 100644 --- a/storage/tokudb/PerconaFT/locktree/locktree.cc +++ b/storage/tokudb/PerconaFT/locktree/locktree.cc @@ -32,6 +32,19 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved. You should have received a copy of the GNU Affero General Public License along with PerconaFT. If not, see . + +---------------------------------------- + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and ======= */ #ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved." diff --git a/storage/tokudb/PerconaFT/locktree/locktree.h b/storage/tokudb/PerconaFT/locktree/locktree.h index 1ba7a51b124..7006b6fb01d 100644 --- a/storage/tokudb/PerconaFT/locktree/locktree.h +++ b/storage/tokudb/PerconaFT/locktree/locktree.h @@ -32,6 +32,19 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved. You should have received a copy of the GNU Affero General Public License along with PerconaFT. If not, see . + +---------------------------------------- + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and ======= */ #ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved." diff --git a/storage/tokudb/PerconaFT/locktree/manager.cc b/storage/tokudb/PerconaFT/locktree/manager.cc index 6bb5c77bf32..21f8dc6cf01 100644 --- a/storage/tokudb/PerconaFT/locktree/manager.cc +++ b/storage/tokudb/PerconaFT/locktree/manager.cc @@ -32,6 +32,19 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved. You should have received a copy of the GNU Affero General Public License along with PerconaFT. If not, see . + +---------------------------------------- + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and ======= */ #ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved." diff --git a/storage/tokudb/PerconaFT/locktree/range_buffer.cc b/storage/tokudb/PerconaFT/locktree/range_buffer.cc index 3ddfd0faf97..d1f14fc4a52 100644 --- a/storage/tokudb/PerconaFT/locktree/range_buffer.cc +++ b/storage/tokudb/PerconaFT/locktree/range_buffer.cc @@ -32,6 +32,19 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved. You should have received a copy of the GNU Affero General Public License along with PerconaFT. If not, see . + +---------------------------------------- + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and ======= */ #ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved." diff --git a/storage/tokudb/PerconaFT/locktree/range_buffer.h b/storage/tokudb/PerconaFT/locktree/range_buffer.h index b0e36968e73..811b0f85e69 100644 --- a/storage/tokudb/PerconaFT/locktree/range_buffer.h +++ b/storage/tokudb/PerconaFT/locktree/range_buffer.h @@ -32,6 +32,19 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved. You should have received a copy of the GNU Affero General Public License along with PerconaFT. If not, see . + +---------------------------------------- + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and ======= */ #ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved." diff --git a/storage/tokudb/PerconaFT/locktree/treenode.cc b/storage/tokudb/PerconaFT/locktree/treenode.cc index cc3a4969643..0247242f975 100644 --- a/storage/tokudb/PerconaFT/locktree/treenode.cc +++ b/storage/tokudb/PerconaFT/locktree/treenode.cc @@ -32,6 +32,19 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved. You should have received a copy of the GNU Affero General Public License along with PerconaFT. If not, see . + +---------------------------------------- + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and ======= */ #ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved." diff --git a/storage/tokudb/PerconaFT/locktree/treenode.h b/storage/tokudb/PerconaFT/locktree/treenode.h index 08aad2b6636..981e8b5a9cf 100644 --- a/storage/tokudb/PerconaFT/locktree/treenode.h +++ b/storage/tokudb/PerconaFT/locktree/treenode.h @@ -32,6 +32,19 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved. You should have received a copy of the GNU Affero General Public License along with PerconaFT. If not, see . + +---------------------------------------- + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and ======= */ #ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved." diff --git a/storage/tokudb/PerconaFT/locktree/txnid_set.cc b/storage/tokudb/PerconaFT/locktree/txnid_set.cc index 82b59453156..bd4e9723155 100644 --- a/storage/tokudb/PerconaFT/locktree/txnid_set.cc +++ b/storage/tokudb/PerconaFT/locktree/txnid_set.cc @@ -32,6 +32,19 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved. You should have received a copy of the GNU Affero General Public License along with PerconaFT. If not, see . + +---------------------------------------- + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and ======= */ #ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved." diff --git a/storage/tokudb/PerconaFT/locktree/txnid_set.h b/storage/tokudb/PerconaFT/locktree/txnid_set.h index 109d7f798e4..81fd45b6dde 100644 --- a/storage/tokudb/PerconaFT/locktree/txnid_set.h +++ b/storage/tokudb/PerconaFT/locktree/txnid_set.h @@ -32,6 +32,19 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved. You should have received a copy of the GNU Affero General Public License along with PerconaFT. If not, see . + +---------------------------------------- + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and ======= */ #ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved." diff --git a/storage/tokudb/PerconaFT/locktree/wfg.cc b/storage/tokudb/PerconaFT/locktree/wfg.cc index 9a234f50060..26b7a3b5295 100644 --- a/storage/tokudb/PerconaFT/locktree/wfg.cc +++ b/storage/tokudb/PerconaFT/locktree/wfg.cc @@ -32,6 +32,19 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved. You should have received a copy of the GNU Affero General Public License along with PerconaFT. If not, see . + +---------------------------------------- + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and ======= */ #ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved." diff --git a/storage/tokudb/PerconaFT/locktree/wfg.h b/storage/tokudb/PerconaFT/locktree/wfg.h index c56886e1362..5c1599592e6 100644 --- a/storage/tokudb/PerconaFT/locktree/wfg.h +++ b/storage/tokudb/PerconaFT/locktree/wfg.h @@ -32,6 +32,19 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved. You should have received a copy of the GNU Affero General Public License along with PerconaFT. If not, see . + +---------------------------------------- + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and ======= */ #ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved." diff --git a/storage/tokudb/PerconaFT/portability/toku_instr_mysql.cc b/storage/tokudb/PerconaFT/portability/toku_instr_mysql.cc index 6f69c3c31b9..b5305ffaff4 100644 --- a/storage/tokudb/PerconaFT/portability/toku_instr_mysql.cc +++ b/storage/tokudb/PerconaFT/portability/toku_instr_mysql.cc @@ -184,9 +184,9 @@ void toku_instr_file_io_end(toku_io_instrumentation &io_instr, ssize_t count) { void toku_instr_mutex_init(const toku_instr_key &key, toku_mutex_t &mutex) { mutex.psi_mutex = PSI_MUTEX_CALL(init_mutex)(key.id(), &mutex.pmutex); -#if TOKU_PTHREAD_DEBUG +#if defined(TOKU_PTHREAD_DEBUG) mutex.instr_key_id = key.id(); -#endif +#endif // defined(TOKU_PTHREAD_DEBUG) } void toku_instr_mutex_destroy(PSI_mutex *&mutex_instr) { @@ -242,9 +242,9 @@ void toku_instr_mutex_unlock(PSI_mutex *mutex_instr) { void toku_instr_cond_init(const toku_instr_key &key, toku_cond_t &cond) { cond.psi_cond = PSI_COND_CALL(init_cond)(key.id(), &cond.pcond); -#if TOKU_PTHREAD_DEBUG +#if defined(TOKU_PTHREAD_DEBUG) cond.instr_key_id = key.id(); -#endif +#endif // // defined(TOKU_PTHREAD_DEBUG) } void toku_instr_cond_destroy(PSI_cond *&cond_instr) { @@ -295,9 +295,9 @@ void toku_instr_cond_broadcast(const toku_cond_t &cond) { void toku_instr_rwlock_init(const toku_instr_key &key, toku_pthread_rwlock_t &rwlock) { rwlock.psi_rwlock = PSI_RWLOCK_CALL(init_rwlock)(key.id(), &rwlock.rwlock); -#if TOKU_PTHREAD_DEBUG +#if defined(TOKU_PTHREAD_DEBUG) rwlock.instr_key_id = key.id(); -#endif +#endif // defined(TOKU_PTHREAD_DEBUG) } void toku_instr_rwlock_destroy(PSI_rwlock *&rwlock_instr) { diff --git a/storage/tokudb/PerconaFT/portability/toku_instr_mysql.h b/storage/tokudb/PerconaFT/portability/toku_instr_mysql.h index d6b0ed35ce9..695624acd6d 100644 --- a/storage/tokudb/PerconaFT/portability/toku_instr_mysql.h +++ b/storage/tokudb/PerconaFT/portability/toku_instr_mysql.h @@ -12,8 +12,15 @@ // undefine them here to avoid compilation errors. #undef __STDC_FORMAT_MACROS #undef __STDC_LIMIT_MACROS -#include // PSI_file -#include // PSI_mutex +#include "mysql/psi/mysql_file.h" // PSI_file +#include "mysql/psi/mysql_thread.h" // PSI_mutex +#include "mysql/psi/mysql_stage.h" // PSI_stage + +#if (MYSQL_VERSION_ID >= 80000) +#include "mysql/psi/mysql_cond.h" +#include "mysql/psi/mysql_mutex.h" +#include "mysql/psi/mysql_rwlock.h" +#endif // (MYSQL_VERSION_ID >= nn) #ifndef HAVE_PSI_MUTEX_INTERFACE #error HAVE_PSI_MUTEX_INTERFACE required diff --git a/storage/tokudb/PerconaFT/portability/toku_pthread.h b/storage/tokudb/PerconaFT/portability/toku_pthread.h index e3bd3bce598..da956097d05 100644 --- a/storage/tokudb/PerconaFT/portability/toku_pthread.h +++ b/storage/tokudb/PerconaFT/portability/toku_pthread.h @@ -64,23 +64,23 @@ struct toku_mutex_t { pthread_mutex_t pmutex; struct PSI_mutex *psi_mutex; /* The performance schema instrumentation hook */ -#if TOKU_PTHREAD_DEBUG +#if defined(TOKU_PTHREAD_DEBUG) pthread_t owner; // = pthread_self(); // for debugging bool locked; bool valid; pfs_key_t instr_key_id; -#endif +#endif // defined(TOKU_PTHREAD_DEBUG) }; struct toku_cond_t { pthread_cond_t pcond; struct PSI_cond *psi_cond; -#if TOKU_PTHREAD_DEBUG +#if defined(TOKU_PTHREAD_DEBUG) pfs_key_t instr_key_id; -#endif +#endif // defined(TOKU_PTHREAD_DEBUG) }; -#ifdef TOKU_PTHREAD_DEBUG +#if defined(TOKU_PTHREAD_DEBUG) #define TOKU_COND_INITIALIZER \ { \ .pcond = PTHREAD_COND_INITIALIZER, .psi_cond = nullptr, \ @@ -89,14 +89,14 @@ struct toku_cond_t { #else #define TOKU_COND_INITIALIZER \ { .pcond = PTHREAD_COND_INITIALIZER, .psi_cond = nullptr } -#endif +#endif // defined(TOKU_PTHREAD_DEBUG) struct toku_pthread_rwlock_t { pthread_rwlock_t rwlock; struct PSI_rwlock *psi_rwlock; -#if TOKU_PTHREAD_DEBUG +#if defined(TOKU_PTHREAD_DEBUG) pfs_key_t instr_key_id; -#endif +#endif // defined(TOKU_PTHREAD_DEBUG) }; typedef struct toku_mutex_aligned { @@ -117,7 +117,7 @@ typedef struct toku_mutex_aligned { #define ZERO_MUTEX_INITIALIZER \ {} -#if TOKU_PTHREAD_DEBUG +#if defined(TOKU_PTHREAD_DEBUG) #define TOKU_MUTEX_INITIALIZER \ { \ .pmutex = PTHREAD_MUTEX_INITIALIZER, .psi_mutex = nullptr, .owner = 0, \ @@ -126,12 +126,12 @@ typedef struct toku_mutex_aligned { #else #define TOKU_MUTEX_INITIALIZER \ { .pmutex = PTHREAD_MUTEX_INITIALIZER, .psi_mutex = nullptr } -#endif +#endif // defined(TOKU_PTHREAD_DEBUG) // Darwin doesn't provide adaptive mutexes #if defined(__APPLE__) #define TOKU_MUTEX_ADAPTIVE PTHREAD_MUTEX_DEFAULT -#if TOKU_PTHREAD_DEBUG +#if defined(TOKU_PTHREAD_DEBUG) #define TOKU_ADAPTIVE_MUTEX_INITIALIZER \ { \ .pmutex = PTHREAD_MUTEX_INITIALIZER, .psi_mutex = nullptr, .owner = 0, \ @@ -140,10 +140,10 @@ typedef struct toku_mutex_aligned { #else #define TOKU_ADAPTIVE_MUTEX_INITIALIZER \ { .pmutex = PTHREAD_MUTEX_INITIALIZER, .psi_mutex = nullptr } -#endif +#endif // defined(TOKU_PTHREAD_DEBUG) #else // __FreeBSD__, __linux__, at least #define TOKU_MUTEX_ADAPTIVE PTHREAD_MUTEX_ADAPTIVE_NP -#if TOKU_PTHREAD_DEBUG +#if defined(TOKU_PTHREAD_DEBUG) #define TOKU_ADAPTIVE_MUTEX_INITIALIZER \ { \ .pmutex = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP, .psi_mutex = nullptr, \ @@ -152,8 +152,8 @@ typedef struct toku_mutex_aligned { #else #define TOKU_ADAPTIVE_MUTEX_INITIALIZER \ { .pmutex = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP, .psi_mutex = nullptr } -#endif -#endif +#endif // defined(TOKU_PTHREAD_DEBUG) +#endif // defined(__APPLE__) // Different OSes implement mutexes as different amounts of nested structs. // C++ will fill out all missing values with zeroes if you provide at least one @@ -188,7 +188,7 @@ toku_mutexattr_destroy(toku_pthread_mutexattr_t *attr) { assert_zero(r); } -#if TOKU_PTHREAD_DEBUG +#if defined(TOKU_PTHREAD_DEBUG) static inline void toku_mutex_assert_locked(const toku_mutex_t *mutex) { invariant(mutex->locked); invariant(mutex->owner == pthread_self()); @@ -197,7 +197,7 @@ static inline void toku_mutex_assert_locked(const toku_mutex_t *mutex) { static inline void toku_mutex_assert_locked(const toku_mutex_t *mutex __attribute__((unused))) { } -#endif +#endif // defined(TOKU_PTHREAD_DEBUG) // asserting that a mutex is unlocked only makes sense // if the calling thread can guaruntee that no other threads @@ -207,7 +207,7 @@ toku_mutex_assert_locked(const toku_mutex_t *mutex __attribute__((unused))) { // when a node is locked the caller knows that no other threads // can be trying to lock its childrens' mutexes. the children // are in one of two fixed states: locked or unlocked. -#if TOKU_PTHREAD_DEBUG +#if defined(TOKU_PTHREAD_DEBUG) static inline void toku_mutex_assert_unlocked(toku_mutex_t *mutex) { invariant(mutex->owner == 0); @@ -216,7 +216,7 @@ toku_mutex_assert_unlocked(toku_mutex_t *mutex) { #else static inline void toku_mutex_assert_unlocked(toku_mutex_t *mutex __attribute__((unused))) {} -#endif +#endif // defined(TOKU_PTHREAD_DEBUG) #define toku_mutex_lock(M) \ toku_mutex_lock_with_source_location(M, __FILE__, __LINE__) @@ -231,13 +231,13 @@ static inline void toku_cond_init(toku_cond_t *cond, toku_mutex_trylock_with_source_location(M, __FILE__, __LINE__) inline void toku_mutex_unlock(toku_mutex_t *mutex) { -#if TOKU_PTHREAD_DEBUG +#if defined(TOKU_PTHREAD_DEBUG) invariant(mutex->owner == pthread_self()); invariant(mutex->valid); invariant(mutex->locked); mutex->locked = false; mutex->owner = 0; -#endif +#endif // defined(TOKU_PTHREAD_DEBUG) toku_instr_mutex_unlock(mutex->psi_mutex); int r = pthread_mutex_unlock(&mutex->pmutex); assert_zero(r); @@ -254,13 +254,13 @@ inline void toku_mutex_lock_with_source_location(toku_mutex_t *mutex, toku_instr_mutex_lock_end(mutex_instr, r); assert_zero(r); -#if TOKU_PTHREAD_DEBUG +#if defined(TOKU_PTHREAD_DEBUG) invariant(mutex->valid); invariant(!mutex->locked); invariant(mutex->owner == 0); mutex->locked = true; mutex->owner = pthread_self(); -#endif +#endif // defined(TOKU_PTHREAD_DEBUG) } inline int toku_mutex_trylock_with_source_location(toku_mutex_t *mutex, @@ -273,7 +273,7 @@ inline int toku_mutex_trylock_with_source_location(toku_mutex_t *mutex, const int r = pthread_mutex_lock(&mutex->pmutex); toku_instr_mutex_lock_end(mutex_instr, r); -#if TOKU_PTHREAD_DEBUG +#if defined(TOKU_PTHREAD_DEBUG) if (r == 0) { invariant(mutex->valid); invariant(!mutex->locked); @@ -281,7 +281,7 @@ inline int toku_mutex_trylock_with_source_location(toku_mutex_t *mutex, mutex->locked = true; mutex->owner = pthread_self(); } -#endif +#endif // defined(TOKU_PTHREAD_DEBUG) return r; } @@ -310,11 +310,11 @@ inline void toku_cond_wait_with_source_location(toku_cond_t *cond, const char *src_file, uint src_line) { -#if TOKU_PTHREAD_DEBUG +#if defined(TOKU_PTHREAD_DEBUG) invariant(mutex->locked); mutex->locked = false; mutex->owner = 0; -#endif +#endif // defined(TOKU_PTHREAD_DEBUG) /* Instrumentation start */ toku_cond_instrumentation cond_instr; @@ -332,11 +332,11 @@ inline void toku_cond_wait_with_source_location(toku_cond_t *cond, toku_instr_cond_wait_end(cond_instr, r); assert_zero(r); -#if TOKU_PTHREAD_DEBUG +#if defined(TOKU_PTHREAD_DEBUG) invariant(!mutex->locked); mutex->locked = true; mutex->owner = pthread_self(); -#endif +#endif // defined(TOKU_PTHREAD_DEBUG) } inline int toku_cond_timedwait_with_source_location(toku_cond_t *cond, @@ -344,11 +344,11 @@ inline int toku_cond_timedwait_with_source_location(toku_cond_t *cond, toku_timespec_t *wakeup_at, const char *src_file, uint src_line) { -#if TOKU_PTHREAD_DEBUG +#if defined(TOKU_PTHREAD_DEBUG) invariant(mutex->locked); mutex->locked = false; mutex->owner = 0; -#endif +#endif // defined(TOKU_PTHREAD_DEBUG) /* Instrumentation start */ toku_cond_instrumentation cond_instr; @@ -366,11 +366,11 @@ inline int toku_cond_timedwait_with_source_location(toku_cond_t *cond, /* Instrumentation end */ toku_instr_cond_wait_end(cond_instr, r); -#if TOKU_PTHREAD_DEBUG +#if defined(TOKU_PTHREAD_DEBUG) invariant(!mutex->locked); mutex->locked = true; mutex->owner = pthread_self(); -#endif +#endif // defined(TOKU_PTHREAD_DEBUG) return r; } @@ -389,26 +389,26 @@ inline void toku_cond_broadcast(toku_cond_t *cond) { inline void toku_mutex_init(const toku_instr_key &key, toku_mutex_t *mutex, const toku_pthread_mutexattr_t *attr) { -#if TOKU_PTHREAD_DEBUG +#if defined(TOKU_PTHREAD_DEBUG) mutex->valid = true; -#endif +#endif // defined(TOKU_PTHREAD_DEBUG) toku_instr_mutex_init(key, *mutex); const int r = pthread_mutex_init(&mutex->pmutex, attr); assert_zero(r); -#if TOKU_PTHREAD_DEBUG +#if defined(TOKU_PTHREAD_DEBUG) mutex->locked = false; invariant(mutex->valid); mutex->valid = true; mutex->owner = 0; -#endif +#endif // defined(TOKU_PTHREAD_DEBUG) } inline void toku_mutex_destroy(toku_mutex_t *mutex) { -#if TOKU_PTHREAD_DEBUG +#if defined(TOKU_PTHREAD_DEBUG) invariant(mutex->valid); mutex->valid = false; invariant(!mutex->locked); -#endif +#endif // defined(TOKU_PTHREAD_DEBUG) toku_instr_mutex_destroy(mutex->psi_mutex); int r = pthread_mutex_destroy(&mutex->pmutex); assert_zero(r); diff --git a/storage/tokudb/PerconaFT/tools/CMakeLists.txt b/storage/tokudb/PerconaFT/tools/CMakeLists.txt index d54c2c21827..710a55a5957 100644 --- a/storage/tokudb/PerconaFT/tools/CMakeLists.txt +++ b/storage/tokudb/PerconaFT/tools/CMakeLists.txt @@ -15,16 +15,12 @@ foreach(tool ${tools}) if ((CMAKE_BUILD_TYPE MATCHES "Debug") AND (CMAKE_CXX_FLAGS_DEBUG MATCHES " -DENABLED_DEBUG_SYNC")) if (MYSQL_BASE_VERSION VERSION_EQUAL "8.0") - target_link_libraries(${tool} sql_main sql_gis sql_main binlog rpl master slave ${ICU_LIBRARIES}) + target_link_libraries(${tool} sql_main sql_gis sql_main sql_dd sql_gis binlog rpl master slave ${ICU_LIBRARIES}) else () target_link_libraries(${tool} sql binlog rpl master slave) endif () else () - if (MYSQL_BASE_VERSION VERSION_EQUAL "8.0") - target_link_libraries(${tool} mysqlclient) - else () - target_link_libraries(${tool} perconaserverclient) - endif () + target_link_libraries(${tool} perconaserverclient) endif () endif () diff --git a/storage/tokudb/PerconaFT/util/growable_array.h b/storage/tokudb/PerconaFT/util/growable_array.h index e8873ae4abd..ad60ea6395b 100644 --- a/storage/tokudb/PerconaFT/util/growable_array.h +++ b/storage/tokudb/PerconaFT/util/growable_array.h @@ -32,6 +32,19 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved. You should have received a copy of the GNU Affero General Public License along with PerconaFT. If not, see . + +---------------------------------------- + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and ======= */ #ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved." diff --git a/storage/tokudb/PerconaFT/util/omt.cc b/storage/tokudb/PerconaFT/util/omt.cc index 1fae0712c77..846c4df7f54 100644 --- a/storage/tokudb/PerconaFT/util/omt.cc +++ b/storage/tokudb/PerconaFT/util/omt.cc @@ -32,1105 +32,1356 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved. You should have received a copy of the GNU Affero General Public License along with PerconaFT. If not, see . + +---------------------------------------- + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and ======= */ -#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved." +#ident \ + "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved." -#include #include +#include #include namespace toku { -template -void omt::create(void) { - this->create_internal(2); - if (supports_marks) { - this->convert_to_tree(); - } -} - -template -void omt::create_no_array(void) { - if (!supports_marks) { - this->create_internal_no_array(0); - } else { - this->is_array = false; - this->capacity = 0; - this->d.t.nodes = nullptr; - this->d.t.root.set_to_null(); - this->d.t.free_idx = 0; - } -} - -template -void omt::create_from_sorted_array(const omtdata_t *const values, const uint32_t numvalues) { - this->create_internal(numvalues); - memcpy(this->d.a.values, values, numvalues * (sizeof values[0])); - this->d.a.num_values = numvalues; - if (supports_marks) { - this->convert_to_tree(); - } -} - -template -void omt::create_steal_sorted_array(omtdata_t **const values, const uint32_t numvalues, const uint32_t new_capacity) { - paranoid_invariant_notnull(values); - this->create_internal_no_array(new_capacity); - this->d.a.num_values = numvalues; - this->d.a.values = *values; - *values = nullptr; - if (supports_marks) { - this->convert_to_tree(); - } -} - -template -int omt::split_at(omt *const newomt, const uint32_t idx) { - barf_if_marked(*this); - paranoid_invariant_notnull(newomt); - if (idx > this->size()) { return EINVAL; } - this->convert_to_array(); - const uint32_t newsize = this->size() - idx; - newomt->create_from_sorted_array(&this->d.a.values[this->d.a.start_idx + idx], newsize); - this->d.a.num_values = idx; - this->maybe_resize_array(idx); - if (supports_marks) { - this->convert_to_tree(); - } - return 0; -} - -template -void omt::merge(omt *const leftomt, omt *const rightomt) { - barf_if_marked(*this); - paranoid_invariant_notnull(leftomt); - paranoid_invariant_notnull(rightomt); - const uint32_t leftsize = leftomt->size(); - const uint32_t rightsize = rightomt->size(); - const uint32_t newsize = leftsize + rightsize; - - if (leftomt->is_array) { - if (leftomt->capacity - (leftomt->d.a.start_idx + leftomt->d.a.num_values) >= rightsize) { - this->create_steal_sorted_array(&leftomt->d.a.values, leftomt->d.a.num_values, leftomt->capacity); - this->d.a.start_idx = leftomt->d.a.start_idx; - } else { - this->create_internal(newsize); - memcpy(&this->d.a.values[0], - &leftomt->d.a.values[leftomt->d.a.start_idx], - leftomt->d.a.num_values * (sizeof this->d.a.values[0])); - } - } else { - this->create_internal(newsize); - leftomt->fill_array_with_subtree_values(&this->d.a.values[0], leftomt->d.t.root); - } - leftomt->destroy(); - this->d.a.num_values = leftsize; - - if (rightomt->is_array) { - memcpy(&this->d.a.values[this->d.a.start_idx + this->d.a.num_values], - &rightomt->d.a.values[rightomt->d.a.start_idx], - rightomt->d.a.num_values * (sizeof this->d.a.values[0])); - } else { - rightomt->fill_array_with_subtree_values(&this->d.a.values[this->d.a.start_idx + this->d.a.num_values], - rightomt->d.t.root); - } - rightomt->destroy(); - this->d.a.num_values += rightsize; - paranoid_invariant(this->size() == newsize); - if (supports_marks) { - this->convert_to_tree(); - } -} - -template -void omt::clone(const omt &src) { - barf_if_marked(*this); - this->create_internal(src.size()); - if (src.is_array) { - memcpy(&this->d.a.values[0], &src.d.a.values[src.d.a.start_idx], src.d.a.num_values * (sizeof this->d.a.values[0])); - } else { - src.fill_array_with_subtree_values(&this->d.a.values[0], src.d.t.root); - } - this->d.a.num_values = src.size(); - if (supports_marks) { - this->convert_to_tree(); - } -} - -template -void omt::clear(void) { - if (this->is_array) { - this->d.a.start_idx = 0; - this->d.a.num_values = 0; - } else { - this->d.t.root.set_to_null(); - this->d.t.free_idx = 0; - } -} - -template -void omt::destroy(void) { - this->clear(); - this->capacity = 0; - if (this->is_array) { - if (this->d.a.values != nullptr) { - toku_free(this->d.a.values); - } - this->d.a.values = nullptr; - } else { - if (this->d.t.nodes != nullptr) { - toku_free(this->d.t.nodes); - } - this->d.t.nodes = nullptr; - } -} - -template -uint32_t omt::size(void) const { - if (this->is_array) { - return this->d.a.num_values; - } else { - return this->nweight(this->d.t.root); - } -} - - -template -template -int omt::insert(const omtdata_t &value, const omtcmp_t &v, uint32_t *const idx) { - int r; - uint32_t insert_idx; - - r = this->find_zero(v, nullptr, &insert_idx); - if (r==0) { - if (idx) *idx = insert_idx; - return DB_KEYEXIST; - } - if (r != DB_NOTFOUND) return r; - - if ((r = this->insert_at(value, insert_idx))) return r; - if (idx) *idx = insert_idx; - - return 0; -} - -// The following 3 functions implement a static if for us. -template -static void barf_if_marked(const omt &UU(omt)) { -} - -template -static void barf_if_marked(const omt &omt) { - invariant(!omt.has_marks()); -} - -template -bool omt::has_marks(void) const { - static_assert(supports_marks, "Does not support marks"); - if (this->d.t.root.is_null()) { - return false; - } - const omt_node &node = this->d.t.nodes[this->d.t.root.get_index()]; - return node.get_marks_below() || node.get_marked(); -} - -template -int omt::insert_at(const omtdata_t &value, const uint32_t idx) { - barf_if_marked(*this); - if (idx > this->size()) { return EINVAL; } - - this->maybe_resize_or_convert(this->size() + 1); - if (this->is_array && idx != this->d.a.num_values && - (idx != 0 || this->d.a.start_idx == 0)) { - this->convert_to_tree(); - } - if (this->is_array) { - if (idx == this->d.a.num_values) { - this->d.a.values[this->d.a.start_idx + this->d.a.num_values] = value; - } - else { - this->d.a.values[--this->d.a.start_idx] = value; - } - this->d.a.num_values++; - } - else { - subtree *rebalance_subtree = nullptr; - this->insert_internal(&this->d.t.root, value, idx, &rebalance_subtree); - if (rebalance_subtree != nullptr) { - this->rebalance(rebalance_subtree); - } - } - return 0; -} - -template -int omt::set_at(const omtdata_t &value, const uint32_t idx) { - barf_if_marked(*this); - if (idx >= this->size()) { return EINVAL; } - - if (this->is_array) { - this->set_at_internal_array(value, idx); - } else { - this->set_at_internal(this->d.t.root, value, idx); - } - return 0; -} - -template -int omt::delete_at(const uint32_t idx) { - barf_if_marked(*this); - if (idx >= this->size()) { return EINVAL; } - - this->maybe_resize_or_convert(this->size() - 1); - if (this->is_array && idx != 0 && idx != this->d.a.num_values - 1) { - this->convert_to_tree(); - } - if (this->is_array) { - //Testing for 0 does not rule out it being the last entry. - //Test explicitly for num_values-1 - if (idx != this->d.a.num_values - 1) { - this->d.a.start_idx++; - } - this->d.a.num_values--; - } else { - subtree *rebalance_subtree = nullptr; - this->delete_internal(&this->d.t.root, idx, nullptr, &rebalance_subtree); - if (rebalance_subtree != nullptr) { - this->rebalance(rebalance_subtree); - } - } - return 0; -} - -template -template -int omt::iterate(iterate_extra_t *const iterate_extra) const { - return this->iterate_on_range(0, this->size(), iterate_extra); -} - -template -template -int omt::iterate_on_range(const uint32_t left, const uint32_t right, iterate_extra_t *const iterate_extra) const { - if (right > this->size()) { return EINVAL; } - if (left == right) { return 0; } - if (this->is_array) { - return this->iterate_internal_array(left, right, iterate_extra); - } - return this->iterate_internal(left, right, this->d.t.root, 0, iterate_extra); -} - -template -template -int omt::iterate_and_mark_range(const uint32_t left, const uint32_t right, iterate_extra_t *const iterate_extra) { - static_assert(supports_marks, "does not support marks"); - if (right > this->size()) { return EINVAL; } - if (left == right) { return 0; } - paranoid_invariant(!this->is_array); - return this->iterate_and_mark_range_internal(left, right, this->d.t.root, 0, iterate_extra); -} - -//TODO: We can optimize this if we steal 3 bits. 1 bit: this node is marked. 1 bit: left subtree has marks. 1 bit: right subtree has marks. -template -template -int omt::iterate_over_marked(iterate_extra_t *const iterate_extra) const { - static_assert(supports_marks, "does not support marks"); - paranoid_invariant(!this->is_array); - return this->iterate_over_marked_internal(this->d.t.root, 0, iterate_extra); -} - -template -void omt::unmark(const subtree &subtree, const uint32_t index, GrowableArray *const indexes) { - if (subtree.is_null()) { return; } - omt_node &n = this->d.t.nodes[subtree.get_index()]; - const uint32_t index_root = index + this->nweight(n.left); - - const bool below = n.get_marks_below(); - if (below) { - this->unmark(n.left, index, indexes); - } - if (n.get_marked()) { - indexes->push(index_root); - } - n.clear_stolen_bits(); - if (below) { - this->unmark(n.right, index_root + 1, indexes); - } -} - -template -void omt::delete_all_marked(void) { - static_assert(supports_marks, "does not support marks"); - if (!this->has_marks()) { - return; - } - paranoid_invariant(!this->is_array); - GrowableArray marked_indexes; - marked_indexes.init(); - - // Remove all marks. - // We need to delete all the stolen bits before calling delete_at to prevent barfing. - this->unmark(this->d.t.root, 0, &marked_indexes); - - for (uint32_t i = 0; i < marked_indexes.get_size(); i++) { - // Delete from left to right, shift by number already deleted. - // Alternative is delete from right to left. - int r = this->delete_at(marked_indexes.fetch_unchecked(i) - i); - lazy_assert_zero(r); - } - marked_indexes.deinit(); - barf_if_marked(*this); -} - -template -uint32_t omt::verify_marks_consistent_internal(const subtree &subtree, const bool UU(allow_marks)) const { - if (subtree.is_null()) { - return 0; - } - const omt_node &node = this->d.t.nodes[subtree.get_index()]; - uint32_t num_marks = verify_marks_consistent_internal(node.left, node.get_marks_below()); - num_marks += verify_marks_consistent_internal(node.right, node.get_marks_below()); - if (node.get_marks_below()) { - paranoid_invariant(allow_marks); - paranoid_invariant(num_marks > 0); - } else { - // redundant with invariant below, but nice to have explicitly - paranoid_invariant(num_marks == 0); - } - if (node.get_marked()) { - paranoid_invariant(allow_marks); - ++num_marks; - } - return num_marks; -} - -template -void omt::verify_marks_consistent(void) const { - static_assert(supports_marks, "does not support marks"); - paranoid_invariant(!this->is_array); - this->verify_marks_consistent_internal(this->d.t.root, true); -} - -template -template -void omt::iterate_ptr(iterate_extra_t *const iterate_extra) { - if (this->is_array) { - this->iterate_ptr_internal_array(0, this->size(), iterate_extra); - } else { - this->iterate_ptr_internal(0, this->size(), this->d.t.root, 0, iterate_extra); - } -} - -template -int omt::fetch(const uint32_t idx, omtdataout_t *const value) const { - if (idx >= this->size()) { return EINVAL; } - if (this->is_array) { - this->fetch_internal_array(idx, value); - } else { - this->fetch_internal(this->d.t.root, idx, value); - } - return 0; -} - -template -template -int omt::find_zero(const omtcmp_t &extra, omtdataout_t *const value, uint32_t *const idxp) const { - uint32_t tmp_index; - uint32_t *const child_idxp = (idxp != nullptr) ? idxp : &tmp_index; - int r; - if (this->is_array) { - r = this->find_internal_zero_array(extra, value, child_idxp); - } - else { - r = this->find_internal_zero(this->d.t.root, extra, value, child_idxp); - } - return r; -} - -template -template -int omt::find(const omtcmp_t &extra, int direction, omtdataout_t *const value, uint32_t *const idxp) const { - uint32_t tmp_index; - uint32_t *const child_idxp = (idxp != nullptr) ? idxp : &tmp_index; - paranoid_invariant(direction != 0); - if (direction < 0) { - if (this->is_array) { - return this->find_internal_minus_array(extra, value, child_idxp); - } else { - return this->find_internal_minus(this->d.t.root, extra, value, child_idxp); - } - } else { - if (this->is_array) { - return this->find_internal_plus_array(extra, value, child_idxp); - } else { - return this->find_internal_plus(this->d.t.root, extra, value, child_idxp); - } - } -} - -template -size_t omt::memory_size(void) { - if (this->is_array) { - return (sizeof *this) + this->capacity * (sizeof this->d.a.values[0]); - } - return (sizeof *this) + this->capacity * (sizeof this->d.t.nodes[0]); -} - - -template -void omt::create_internal_no_array(const uint32_t new_capacity) { - this->is_array = true; - this->d.a.start_idx = 0; - this->d.a.num_values = 0; - this->d.a.values = nullptr; - this->capacity = new_capacity; -} - -template -void omt::create_internal(const uint32_t new_capacity) { - this->create_internal_no_array(new_capacity); - XMALLOC_N(this->capacity, this->d.a.values); -} - -template -uint32_t omt::nweight(const subtree &subtree) const { - if (subtree.is_null()) { - return 0; - } else { - return this->d.t.nodes[subtree.get_index()].weight; - } -} - -template -typename omt::node_idx omt::node_malloc(void) { - paranoid_invariant(this->d.t.free_idx < this->capacity); - omt_node &n = this->d.t.nodes[this->d.t.free_idx]; - n.clear_stolen_bits(); - return this->d.t.free_idx++; -} - -template -void omt::node_free(const node_idx UU(idx)) { - paranoid_invariant(idx < this->capacity); -} - -template -void omt::maybe_resize_array(const uint32_t n) { - const uint32_t new_size = n<=2 ? 4 : 2*n; - const uint32_t room = this->capacity - this->d.a.start_idx; - - if (room < n || this->capacity / 2 >= new_size) { - omtdata_t *XMALLOC_N(new_size, tmp_values); - memcpy(tmp_values, &this->d.a.values[this->d.a.start_idx], - this->d.a.num_values * (sizeof tmp_values[0])); - this->d.a.start_idx = 0; - this->capacity = new_size; - toku_free(this->d.a.values); - this->d.a.values = tmp_values; - } -} - -template -void omt::fill_array_with_subtree_values(omtdata_t *const array, const subtree &subtree) const { - if (subtree.is_null()) return; - const omt_node &tree = this->d.t.nodes[subtree.get_index()]; - this->fill_array_with_subtree_values(&array[0], tree.left); - array[this->nweight(tree.left)] = tree.value; - this->fill_array_with_subtree_values(&array[this->nweight(tree.left) + 1], tree.right); -} - -template -void omt::convert_to_array(void) { - if (!this->is_array) { - const uint32_t num_values = this->size(); - uint32_t new_size = 2*num_values; - new_size = new_size < 4 ? 4 : new_size; - - omtdata_t *XMALLOC_N(new_size, tmp_values); - this->fill_array_with_subtree_values(tmp_values, this->d.t.root); - toku_free(this->d.t.nodes); - this->is_array = true; - this->capacity = new_size; - this->d.a.num_values = num_values; - this->d.a.values = tmp_values; - this->d.a.start_idx = 0; - } -} - -template -void omt::rebuild_from_sorted_array(subtree *const subtree, const omtdata_t *const values, const uint32_t numvalues) { - if (numvalues==0) { - subtree->set_to_null(); - } else { - const uint32_t halfway = numvalues/2; - const node_idx newidx = this->node_malloc(); - omt_node *const newnode = &this->d.t.nodes[newidx]; - newnode->weight = numvalues; - newnode->value = values[halfway]; - subtree->set_index(newidx); - // update everything before the recursive calls so the second call can be a tail call. - this->rebuild_from_sorted_array(&newnode->left, &values[0], halfway); - this->rebuild_from_sorted_array(&newnode->right, &values[halfway+1], numvalues - (halfway+1)); - } -} - -template -void omt::convert_to_tree(void) { - if (this->is_array) { - const uint32_t num_nodes = this->size(); - uint32_t new_size = num_nodes*2; - new_size = new_size < 4 ? 4 : new_size; - - omt_node *XMALLOC_N(new_size, new_nodes); - omtdata_t *const values = this->d.a.values; - omtdata_t *const tmp_values = &values[this->d.a.start_idx]; - this->is_array = false; - this->d.t.nodes = new_nodes; - this->capacity = new_size; - this->d.t.free_idx = 0; - this->d.t.root.set_to_null(); - this->rebuild_from_sorted_array(&this->d.t.root, tmp_values, num_nodes); - toku_free(values); - } -} - -template -void omt::maybe_resize_or_convert(const uint32_t n) { - if (this->is_array) { - this->maybe_resize_array(n); - } else { - const uint32_t new_size = n<=2 ? 4 : 2*n; - const uint32_t num_nodes = this->nweight(this->d.t.root); - if ((this->capacity/2 >= new_size) || - (this->d.t.free_idx >= this->capacity && num_nodes < n) || - (this->capacityconvert_to_array(); - // if we had a free list, the "supports_marks" version could - // just resize, as it is now, we have to convert to and back - // from an array. - if (supports_marks) { - this->convert_to_tree(); - } - } - } -} - -template -bool omt::will_need_rebalance(const subtree &subtree, const int leftmod, const int rightmod) const { - if (subtree.is_null()) { return false; } - const omt_node &n = this->d.t.nodes[subtree.get_index()]; - // one of the 1's is for the root. - // the other is to take ceil(n/2) - const uint32_t weight_left = this->nweight(n.left) + leftmod; - const uint32_t weight_right = this->nweight(n.right) + rightmod; - return ((1+weight_left < (1+1+weight_right)/2) - || - (1+weight_right < (1+1+weight_left)/2)); -} - -template -void omt::insert_internal(subtree *const subtreep, const omtdata_t &value, const uint32_t idx, subtree **const rebalance_subtree) { - if (subtreep->is_null()) { - paranoid_invariant_zero(idx); - const node_idx newidx = this->node_malloc(); - omt_node *const newnode = &this->d.t.nodes[newidx]; - newnode->weight = 1; - newnode->left.set_to_null(); - newnode->right.set_to_null(); - newnode->value = value; - subtreep->set_index(newidx); - } else { - omt_node &n = this->d.t.nodes[subtreep->get_index()]; - n.weight++; - if (idx <= this->nweight(n.left)) { - if (*rebalance_subtree == nullptr && this->will_need_rebalance(*subtreep, 1, 0)) { - *rebalance_subtree = subtreep; - } - this->insert_internal(&n.left, value, idx, rebalance_subtree); - } else { - if (*rebalance_subtree == nullptr && this->will_need_rebalance(*subtreep, 0, 1)) { - *rebalance_subtree = subtreep; - } - const uint32_t sub_index = idx - this->nweight(n.left) - 1; - this->insert_internal(&n.right, value, sub_index, rebalance_subtree); - } - } -} - -template -void omt::set_at_internal_array(const omtdata_t &value, const uint32_t idx) { - this->d.a.values[this->d.a.start_idx + idx] = value; -} - -template -void omt::set_at_internal(const subtree &subtree, const omtdata_t &value, const uint32_t idx) { - paranoid_invariant(!subtree.is_null()); - omt_node &n = this->d.t.nodes[subtree.get_index()]; - const uint32_t leftweight = this->nweight(n.left); - if (idx < leftweight) { - this->set_at_internal(n.left, value, idx); - } else if (idx == leftweight) { - n.value = value; - } else { - this->set_at_internal(n.right, value, idx - leftweight - 1); - } -} - -template -void omt::delete_internal(subtree *const subtreep, const uint32_t idx, omt_node *const copyn, subtree **const rebalance_subtree) { - paranoid_invariant_notnull(subtreep); - paranoid_invariant_notnull(rebalance_subtree); - paranoid_invariant(!subtreep->is_null()); - omt_node &n = this->d.t.nodes[subtreep->get_index()]; - const uint32_t leftweight = this->nweight(n.left); - if (idx < leftweight) { - n.weight--; - if (*rebalance_subtree == nullptr && this->will_need_rebalance(*subtreep, -1, 0)) { - *rebalance_subtree = subtreep; - } - this->delete_internal(&n.left, idx, copyn, rebalance_subtree); - } else if (idx == leftweight) { - if (n.left.is_null()) { - const uint32_t oldidx = subtreep->get_index(); - *subtreep = n.right; - if (copyn != nullptr) { - copyn->value = n.value; - } - this->node_free(oldidx); - } else if (n.right.is_null()) { - const uint32_t oldidx = subtreep->get_index(); - *subtreep = n.left; - if (copyn != nullptr) { - copyn->value = n.value; - } - this->node_free(oldidx); - } else { - if (*rebalance_subtree == nullptr && this->will_need_rebalance(*subtreep, 0, -1)) { - *rebalance_subtree = subtreep; - } - // don't need to copy up value, it's only used by this - // next call, and when that gets to the bottom there - // won't be any more recursion - n.weight--; - this->delete_internal(&n.right, 0, &n, rebalance_subtree); - } - } else { - n.weight--; - if (*rebalance_subtree == nullptr && this->will_need_rebalance(*subtreep, 0, -1)) { - *rebalance_subtree = subtreep; - } - this->delete_internal(&n.right, idx - leftweight - 1, copyn, rebalance_subtree); - } -} - -template -template -int omt::iterate_internal_array(const uint32_t left, const uint32_t right, - iterate_extra_t *const iterate_extra) const { - int r; - for (uint32_t i = left; i < right; ++i) { - r = f(this->d.a.values[this->d.a.start_idx + i], i, iterate_extra); - if (r != 0) { - return r; - } - } - return 0; -} - -template -template -void omt::iterate_ptr_internal(const uint32_t left, const uint32_t right, - const subtree &subtree, const uint32_t idx, - iterate_extra_t *const iterate_extra) { - if (!subtree.is_null()) { - omt_node &n = this->d.t.nodes[subtree.get_index()]; - const uint32_t idx_root = idx + this->nweight(n.left); - if (left < idx_root) { - this->iterate_ptr_internal(left, right, n.left, idx, iterate_extra); - } - if (left <= idx_root && idx_root < right) { - int r = f(&n.value, idx_root, iterate_extra); - lazy_assert_zero(r); - } - if (idx_root + 1 < right) { - this->iterate_ptr_internal(left, right, n.right, idx_root + 1, iterate_extra); - } - } -} - -template -template -void omt::iterate_ptr_internal_array(const uint32_t left, const uint32_t right, - iterate_extra_t *const iterate_extra) { - for (uint32_t i = left; i < right; ++i) { - int r = f(&this->d.a.values[this->d.a.start_idx + i], i, iterate_extra); - lazy_assert_zero(r); - } -} - -template -template -int omt::iterate_internal(const uint32_t left, const uint32_t right, - const subtree &subtree, const uint32_t idx, - iterate_extra_t *const iterate_extra) const { - if (subtree.is_null()) { return 0; } - int r; - const omt_node &n = this->d.t.nodes[subtree.get_index()]; - const uint32_t idx_root = idx + this->nweight(n.left); - if (left < idx_root) { - r = this->iterate_internal(left, right, n.left, idx, iterate_extra); - if (r != 0) { return r; } - } - if (left <= idx_root && idx_root < right) { - r = f(n.value, idx_root, iterate_extra); - if (r != 0) { return r; } - } - if (idx_root + 1 < right) { - return this->iterate_internal(left, right, n.right, idx_root + 1, iterate_extra); - } - return 0; -} - -template -template -int omt::iterate_and_mark_range_internal(const uint32_t left, const uint32_t right, - const subtree &subtree, const uint32_t idx, - iterate_extra_t *const iterate_extra) { - paranoid_invariant(!subtree.is_null()); - int r; - omt_node &n = this->d.t.nodes[subtree.get_index()]; - const uint32_t idx_root = idx + this->nweight(n.left); - if (left < idx_root && !n.left.is_null()) { - n.set_marks_below_bit(); - r = this->iterate_and_mark_range_internal(left, right, n.left, idx, iterate_extra); - if (r != 0) { return r; } - } - if (left <= idx_root && idx_root < right) { - n.set_marked_bit(); - r = f(n.value, idx_root, iterate_extra); - if (r != 0) { return r; } - } - if (idx_root + 1 < right && !n.right.is_null()) { - n.set_marks_below_bit(); - return this->iterate_and_mark_range_internal(left, right, n.right, idx_root + 1, iterate_extra); - } - return 0; -} - -template -template -int omt::iterate_over_marked_internal(const subtree &subtree, const uint32_t idx, - iterate_extra_t *const iterate_extra) const { - if (subtree.is_null()) { return 0; } - int r; - const omt_node &n = this->d.t.nodes[subtree.get_index()]; - const uint32_t idx_root = idx + this->nweight(n.left); - if (n.get_marks_below()) { - r = this->iterate_over_marked_internal(n.left, idx, iterate_extra); - if (r != 0) { return r; } - } - if (n.get_marked()) { - r = f(n.value, idx_root, iterate_extra); - if (r != 0) { return r; } - } - if (n.get_marks_below()) { - return this->iterate_over_marked_internal(n.right, idx_root + 1, iterate_extra); - } - return 0; -} - -template -void omt::fetch_internal_array(const uint32_t i, omtdataout_t *const value) const { - if (value != nullptr) { - copyout(value, &this->d.a.values[this->d.a.start_idx + i]); - } -} - -template -void omt::fetch_internal(const subtree &subtree, const uint32_t i, omtdataout_t *const value) const { - omt_node &n = this->d.t.nodes[subtree.get_index()]; - const uint32_t leftweight = this->nweight(n.left); - if (i < leftweight) { - this->fetch_internal(n.left, i, value); - } else if (i == leftweight) { - if (value != nullptr) { - copyout(value, &n); - } - } else { - this->fetch_internal(n.right, i - leftweight - 1, value); - } -} - -template -void omt::fill_array_with_subtree_idxs(node_idx *const array, const subtree &subtree) const { - if (!subtree.is_null()) { - const omt_node &tree = this->d.t.nodes[subtree.get_index()]; - this->fill_array_with_subtree_idxs(&array[0], tree.left); - array[this->nweight(tree.left)] = subtree.get_index(); - this->fill_array_with_subtree_idxs(&array[this->nweight(tree.left) + 1], tree.right); - } -} - -template -void omt::rebuild_subtree_from_idxs(subtree *const subtree, const node_idx *const idxs, const uint32_t numvalues) { - if (numvalues==0) { - subtree->set_to_null(); - } else { - uint32_t halfway = numvalues/2; - subtree->set_index(idxs[halfway]); - //node_idx newidx = idxs[halfway]; - omt_node &newnode = this->d.t.nodes[subtree->get_index()]; - newnode.weight = numvalues; - // value is already in there. - this->rebuild_subtree_from_idxs(&newnode.left, &idxs[0], halfway); - this->rebuild_subtree_from_idxs(&newnode.right, &idxs[halfway+1], numvalues-(halfway+1)); - //n_idx = newidx; - } -} - -template -void omt::rebalance(subtree *const subtree) { - node_idx idx = subtree->get_index(); - if (idx==this->d.t.root.get_index()) { - //Try to convert to an array. - //If this fails, (malloc) nothing will have changed. - //In the failure case we continue on to the standard rebalance - //algorithm. - this->convert_to_array(); + template + void omt::create(void) { + this->create_internal(2); if (supports_marks) { this->convert_to_tree(); } - } else { - const omt_node &n = this->d.t.nodes[idx]; - node_idx *tmp_array; - size_t mem_needed = n.weight * (sizeof tmp_array[0]); - size_t mem_free = (this->capacity - this->d.t.free_idx) * (sizeof this->d.t.nodes[0]); - bool malloced; - if (mem_needed<=mem_free) { - //There is sufficient free space at the end of the nodes array - //to hold enough node indexes to rebalance. - malloced = false; - tmp_array = reinterpret_cast(&this->d.t.nodes[this->d.t.free_idx]); - } - else { - malloced = true; - XMALLOC_N(n.weight, tmp_array); - } - this->fill_array_with_subtree_idxs(tmp_array, *subtree); - this->rebuild_subtree_from_idxs(subtree, tmp_array, n.weight); - if (malloced) toku_free(tmp_array); } -} -template -void omt::copyout(omtdata_t *const out, const omt_node *const n) { - *out = n->value; -} - -template -void omt::copyout(omtdata_t **const out, omt_node *const n) { - *out = &n->value; -} - -template -void omt::copyout(omtdata_t *const out, const omtdata_t *const stored_value_ptr) { - *out = *stored_value_ptr; -} - -template -void omt::copyout(omtdata_t **const out, omtdata_t *const stored_value_ptr) { - *out = stored_value_ptr; -} - -template -template -int omt::find_internal_zero_array(const omtcmp_t &extra, omtdataout_t *const value, uint32_t *const idxp) const { - paranoid_invariant_notnull(idxp); - uint32_t min = this->d.a.start_idx; - uint32_t limit = this->d.a.start_idx + this->d.a.num_values; - uint32_t best_pos = subtree::NODE_NULL; - uint32_t best_zero = subtree::NODE_NULL; - - while (min!=limit) { - uint32_t mid = (min + limit) / 2; - int hv = h(this->d.a.values[mid], extra); - if (hv<0) { - min = mid+1; - } - else if (hv>0) { - best_pos = mid; - limit = mid; - } - else { - best_zero = mid; - limit = mid; + template + void omt::create_no_array(void) { + if (!supports_marks) { + this->create_internal_no_array(0); + } else { + this->is_array = false; + this->capacity = 0; + this->d.t.nodes = nullptr; + this->d.t.root.set_to_null(); + this->d.t.free_idx = 0; } } - if (best_zero!=subtree::NODE_NULL) { - //Found a zero - if (value != nullptr) { - copyout(value, &this->d.a.values[best_zero]); + + template + void omt::create_from_sorted_array( + const omtdata_t *const values, + const uint32_t numvalues) { + this->create_internal(numvalues); + memcpy(this->d.a.values, values, numvalues * (sizeof values[0])); + this->d.a.num_values = numvalues; + if (supports_marks) { + this->convert_to_tree(); + } + } + + template + void + omt::create_steal_sorted_array( + omtdata_t **const values, + const uint32_t numvalues, + const uint32_t new_capacity) { + paranoid_invariant_notnull(values); + this->create_internal_no_array(new_capacity); + this->d.a.num_values = numvalues; + this->d.a.values = *values; + *values = nullptr; + if (supports_marks) { + this->convert_to_tree(); + } + } + + template + int omt::split_at( + omt *const newomt, + const uint32_t idx) { + barf_if_marked(*this); + paranoid_invariant_notnull(newomt); + if (idx > this->size()) { + return EINVAL; + } + this->convert_to_array(); + const uint32_t newsize = this->size() - idx; + newomt->create_from_sorted_array( + &this->d.a.values[this->d.a.start_idx + idx], newsize); + this->d.a.num_values = idx; + this->maybe_resize_array(idx); + if (supports_marks) { + this->convert_to_tree(); } - *idxp = best_zero - this->d.a.start_idx; return 0; } - if (best_pos!=subtree::NODE_NULL) *idxp = best_pos - this->d.a.start_idx; - else *idxp = this->d.a.num_values; - return DB_NOTFOUND; -} -template -template -int omt::find_internal_zero(const subtree &subtree, const omtcmp_t &extra, omtdataout_t *const value, uint32_t *const idxp) const { - paranoid_invariant_notnull(idxp); - if (subtree.is_null()) { - *idxp = 0; - return DB_NOTFOUND; + template + void omt::merge( + omt *const leftomt, + omt *const rightomt) { + barf_if_marked(*this); + paranoid_invariant_notnull(leftomt); + paranoid_invariant_notnull(rightomt); + const uint32_t leftsize = leftomt->size(); + const uint32_t rightsize = rightomt->size(); + const uint32_t newsize = leftsize + rightsize; + + if (leftomt->is_array) { + if (leftomt->capacity - + (leftomt->d.a.start_idx + leftomt->d.a.num_values) >= + rightsize) { + this->create_steal_sorted_array(&leftomt->d.a.values, + leftomt->d.a.num_values, + leftomt->capacity); + this->d.a.start_idx = leftomt->d.a.start_idx; + } else { + this->create_internal(newsize); + memcpy(&this->d.a.values[0], + &leftomt->d.a.values[leftomt->d.a.start_idx], + leftomt->d.a.num_values * (sizeof this->d.a.values[0])); + } + } else { + this->create_internal(newsize); + leftomt->fill_array_with_subtree_values(&this->d.a.values[0], + leftomt->d.t.root); + } + leftomt->destroy(); + this->d.a.num_values = leftsize; + + if (rightomt->is_array) { + memcpy( + &this->d.a.values[this->d.a.start_idx + this->d.a.num_values], + &rightomt->d.a.values[rightomt->d.a.start_idx], + rightomt->d.a.num_values * (sizeof this->d.a.values[0])); + } else { + rightomt->fill_array_with_subtree_values( + &this->d.a.values[this->d.a.start_idx + this->d.a.num_values], + rightomt->d.t.root); + } + rightomt->destroy(); + this->d.a.num_values += rightsize; + paranoid_invariant(this->size() == newsize); + if (supports_marks) { + this->convert_to_tree(); + } } - omt_node &n = this->d.t.nodes[subtree.get_index()]; - int hv = h(n.value, extra); - if (hv<0) { - int r = this->find_internal_zero(n.right, extra, value, idxp); - *idxp += this->nweight(n.left)+1; + + template + void omt::clone(const omt &src) { + barf_if_marked(*this); + this->create_internal(src.size()); + if (src.is_array) { + memcpy(&this->d.a.values[0], + &src.d.a.values[src.d.a.start_idx], + src.d.a.num_values * (sizeof this->d.a.values[0])); + } else { + src.fill_array_with_subtree_values(&this->d.a.values[0], + src.d.t.root); + } + this->d.a.num_values = src.size(); + if (supports_marks) { + this->convert_to_tree(); + } + } + + template + void omt::clear(void) { + if (this->is_array) { + this->d.a.start_idx = 0; + this->d.a.num_values = 0; + } else { + this->d.t.root.set_to_null(); + this->d.t.free_idx = 0; + } + } + + template + void omt::destroy(void) { + this->clear(); + this->capacity = 0; + if (this->is_array) { + if (this->d.a.values != nullptr) { + toku_free(this->d.a.values); + } + this->d.a.values = nullptr; + } else { + if (this->d.t.nodes != nullptr) { + toku_free(this->d.t.nodes); + } + this->d.t.nodes = nullptr; + } + } + + template + uint32_t omt::size(void) const { + if (this->is_array) { + return this->d.a.num_values; + } else { + return this->nweight(this->d.t.root); + } + } + + template + template + int omt::insert( + const omtdata_t &value, + const omtcmp_t &v, + uint32_t *const idx) { + int r; + uint32_t insert_idx; + + r = this->find_zero(v, nullptr, &insert_idx); + if (r == 0) { + if (idx) + *idx = insert_idx; + return DB_KEYEXIST; + } + if (r != DB_NOTFOUND) + return r; + + if ((r = this->insert_at(value, insert_idx))) + return r; + if (idx) + *idx = insert_idx; + + return 0; + } + + // The following 3 functions implement a static if for us. + template + static void barf_if_marked( + const omt &UU(omt)) {} + + template + static void barf_if_marked(const omt &omt) { + invariant(!omt.has_marks()); + } + + template + bool omt::has_marks(void) const { + static_assert(supports_marks, "Does not support marks"); + if (this->d.t.root.is_null()) { + return false; + } + const omt_node &node = this->d.t.nodes[this->d.t.root.get_index()]; + return node.get_marks_below() || node.get_marked(); + } + + template + int omt::insert_at( + const omtdata_t &value, + const uint32_t idx) { + barf_if_marked(*this); + if (idx > this->size()) { + return EINVAL; + } + + this->maybe_resize_or_convert(this->size() + 1); + if (this->is_array && idx != this->d.a.num_values && + (idx != 0 || this->d.a.start_idx == 0)) { + this->convert_to_tree(); + } + if (this->is_array) { + if (idx == this->d.a.num_values) { + this->d.a.values[this->d.a.start_idx + this->d.a.num_values] = + value; + } else { + this->d.a.values[--this->d.a.start_idx] = value; + } + this->d.a.num_values++; + } else { + subtree *rebalance_subtree = nullptr; + this->insert_internal( + &this->d.t.root, value, idx, &rebalance_subtree); + if (rebalance_subtree != nullptr) { + this->rebalance(rebalance_subtree); + } + } + return 0; + } + + template + int omt::set_at( + const omtdata_t &value, + const uint32_t idx) { + barf_if_marked(*this); + if (idx >= this->size()) { + return EINVAL; + } + + if (this->is_array) { + this->set_at_internal_array(value, idx); + } else { + this->set_at_internal(this->d.t.root, value, idx); + } + return 0; + } + + template + int omt::delete_at( + const uint32_t idx) { + barf_if_marked(*this); + if (idx >= this->size()) { + return EINVAL; + } + + this->maybe_resize_or_convert(this->size() - 1); + if (this->is_array && idx != 0 && idx != this->d.a.num_values - 1) { + this->convert_to_tree(); + } + if (this->is_array) { + // Testing for 0 does not rule out it being the last entry. + // Test explicitly for num_values-1 + if (idx != this->d.a.num_values - 1) { + this->d.a.start_idx++; + } + this->d.a.num_values--; + } else { + subtree *rebalance_subtree = nullptr; + this->delete_internal( + &this->d.t.root, idx, nullptr, &rebalance_subtree); + if (rebalance_subtree != nullptr) { + this->rebalance(rebalance_subtree); + } + } + return 0; + } + + template + template < + typename iterate_extra_t, + int (*f)(const omtdata_t &, const uint32_t, iterate_extra_t *const)> + int omt::iterate( + iterate_extra_t *const iterate_extra) const { + return this->iterate_on_range( + 0, this->size(), iterate_extra); + } + + template + template < + typename iterate_extra_t, + int (*f)(const omtdata_t &, const uint32_t, iterate_extra_t *const)> + int omt::iterate_on_range( + const uint32_t left, + const uint32_t right, + iterate_extra_t *const iterate_extra) const { + if (right > this->size()) { + return EINVAL; + } + if (left == right) { + return 0; + } + if (this->is_array) { + return this->iterate_internal_array( + left, right, iterate_extra); + } + return this->iterate_internal( + left, right, this->d.t.root, 0, iterate_extra); + } + + template + template < + typename iterate_extra_t, + int (*f)(const omtdata_t &, const uint32_t, iterate_extra_t *const)> + int omt::iterate_and_mark_range( + const uint32_t left, + const uint32_t right, + iterate_extra_t *const iterate_extra) { + static_assert(supports_marks, "does not support marks"); + if (right > this->size()) { + return EINVAL; + } + if (left == right) { + return 0; + } + paranoid_invariant(!this->is_array); + return this->iterate_and_mark_range_internal( + left, right, this->d.t.root, 0, iterate_extra); + } + + // TODO: We can optimize this if we steal 3 bits. 1 bit: this node is + // marked. 1 bit: left subtree has marks. 1 bit: right subtree has marks. + template + template < + typename iterate_extra_t, + int (*f)(const omtdata_t &, const uint32_t, iterate_extra_t *const)> + int omt::iterate_over_marked( + iterate_extra_t *const iterate_extra) const { + static_assert(supports_marks, "does not support marks"); + paranoid_invariant(!this->is_array); + return this->iterate_over_marked_internal( + this->d.t.root, 0, iterate_extra); + } + + template + void omt::unmark( + const subtree &st, + const uint32_t index, + GrowableArray *const indexes) { + if (st.is_null()) { + return; + } + omt_node &n = this->d.t.nodes[st.get_index()]; + const uint32_t index_root = index + this->nweight(n.left); + + const bool below = n.get_marks_below(); + if (below) { + this->unmark(n.left, index, indexes); + } + if (n.get_marked()) { + indexes->push(index_root); + } + n.clear_stolen_bits(); + if (below) { + this->unmark(n.right, index_root + 1, indexes); + } + } + + template + void omt::delete_all_marked(void) { + static_assert(supports_marks, "does not support marks"); + if (!this->has_marks()) { + return; + } + paranoid_invariant(!this->is_array); + GrowableArray marked_indexes; + marked_indexes.init(); + + // Remove all marks. + // We need to delete all the stolen bits before calling delete_at to + // prevent barfing. + this->unmark(this->d.t.root, 0, &marked_indexes); + + for (uint32_t i = 0; i < marked_indexes.get_size(); i++) { + // Delete from left to right, shift by number already deleted. + // Alternative is delete from right to left. + int r = this->delete_at(marked_indexes.fetch_unchecked(i) - i); + lazy_assert_zero(r); + } + marked_indexes.deinit(); + barf_if_marked(*this); + } + + template + uint32_t omt:: + verify_marks_consistent_internal(const subtree &st, + const bool UU(allow_marks)) const { + if (st.is_null()) { + return 0; + } + const omt_node &node = this->d.t.nodes[st.get_index()]; + uint32_t num_marks = + verify_marks_consistent_internal(node.left, node.get_marks_below()); + num_marks += verify_marks_consistent_internal(node.right, + node.get_marks_below()); + if (node.get_marks_below()) { + paranoid_invariant(allow_marks); + paranoid_invariant(num_marks > 0); + } else { + // redundant with invariant below, but nice to have explicitly + paranoid_invariant(num_marks == 0); + } + if (node.get_marked()) { + paranoid_invariant(allow_marks); + ++num_marks; + } + return num_marks; + } + + template + void omt::verify_marks_consistent( + void) const { + static_assert(supports_marks, "does not support marks"); + paranoid_invariant(!this->is_array); + this->verify_marks_consistent_internal(this->d.t.root, true); + } + + template + template + void omt::iterate_ptr( + iterate_extra_t *const iterate_extra) { + if (this->is_array) { + this->iterate_ptr_internal_array( + 0, this->size(), iterate_extra); + } else { + this->iterate_ptr_internal( + 0, this->size(), this->d.t.root, 0, iterate_extra); + } + } + + template + int omt::fetch( + const uint32_t idx, + omtdataout_t *const value) const { + if (idx >= this->size()) { + return EINVAL; + } + if (this->is_array) { + this->fetch_internal_array(idx, value); + } else { + this->fetch_internal(this->d.t.root, idx, value); + } + return 0; + } + + template + template + int omt::find_zero( + const omtcmp_t &extra, + omtdataout_t *const value, + uint32_t *const idxp) const { + uint32_t tmp_index; + uint32_t *const child_idxp = (idxp != nullptr) ? idxp : &tmp_index; + int r; + if (this->is_array) { + r = this->find_internal_zero_array( + extra, value, child_idxp); + } else { + r = this->find_internal_zero( + this->d.t.root, extra, value, child_idxp); + } return r; - } else if (hv>0) { - return this->find_internal_zero(n.left, extra, value, idxp); - } else { - int r = this->find_internal_zero(n.left, extra, value, idxp); - if (r==DB_NOTFOUND) { - *idxp = this->nweight(n.left); + } + + template + template + int omt::find( + const omtcmp_t &extra, + int direction, + omtdataout_t *const value, + uint32_t *const idxp) const { + uint32_t tmp_index; + uint32_t *const child_idxp = (idxp != nullptr) ? idxp : &tmp_index; + paranoid_invariant(direction != 0); + if (direction < 0) { + if (this->is_array) { + return this->find_internal_minus_array( + extra, value, child_idxp); + } else { + return this->find_internal_minus( + this->d.t.root, extra, value, child_idxp); + } + } else { + if (this->is_array) { + return this->find_internal_plus_array( + extra, value, child_idxp); + } else { + return this->find_internal_plus( + this->d.t.root, extra, value, child_idxp); + } + } + } + + template + size_t omt::memory_size(void) { + if (this->is_array) { + return (sizeof *this) + + this->capacity * (sizeof this->d.a.values[0]); + } + return (sizeof *this) + this->capacity * (sizeof this->d.t.nodes[0]); + } + + template + void omt::create_internal_no_array( + const uint32_t new_capacity) { + this->is_array = true; + this->d.a.start_idx = 0; + this->d.a.num_values = 0; + this->d.a.values = nullptr; + this->capacity = new_capacity; + } + + template + void omt::create_internal( + const uint32_t new_capacity) { + this->create_internal_no_array(new_capacity); + XMALLOC_N(this->capacity, this->d.a.values); + } + + template + uint32_t omt::nweight( + const subtree &st) const { + if (st.is_null()) { + return 0; + } else { + return this->d.t.nodes[st.get_index()].weight; + } + } + + template + typename omt::node_idx + omt::node_malloc(void) { + paranoid_invariant(this->d.t.free_idx < this->capacity); + omt_node &n = this->d.t.nodes[this->d.t.free_idx]; + n.clear_stolen_bits(); + return this->d.t.free_idx++; + } + + template + void omt::node_free( + const node_idx UU(idx)) { + paranoid_invariant(idx < this->capacity); + } + + template + void omt::maybe_resize_array( + const uint32_t n) { + const uint32_t new_size = n <= 2 ? 4 : 2 * n; + const uint32_t room = this->capacity - this->d.a.start_idx; + + if (room < n || this->capacity / 2 >= new_size) { + omtdata_t *XMALLOC_N(new_size, tmp_values); + memcpy(tmp_values, + &this->d.a.values[this->d.a.start_idx], + this->d.a.num_values * (sizeof tmp_values[0])); + this->d.a.start_idx = 0; + this->capacity = new_size; + toku_free(this->d.a.values); + this->d.a.values = tmp_values; + } + } + + template + void omt:: + fill_array_with_subtree_values(omtdata_t *const array, + const subtree &st) const { + if (st.is_null()) + return; + const omt_node &tree = this->d.t.nodes[st.get_index()]; + this->fill_array_with_subtree_values(&array[0], tree.left); + array[this->nweight(tree.left)] = tree.value; + this->fill_array_with_subtree_values( + &array[this->nweight(tree.left) + 1], tree.right); + } + + template + void omt::convert_to_array(void) { + if (!this->is_array) { + const uint32_t num_values = this->size(); + uint32_t new_size = 2 * num_values; + new_size = new_size < 4 ? 4 : new_size; + + omtdata_t *XMALLOC_N(new_size, tmp_values); + this->fill_array_with_subtree_values(tmp_values, this->d.t.root); + toku_free(this->d.t.nodes); + this->is_array = true; + this->capacity = new_size; + this->d.a.num_values = num_values; + this->d.a.values = tmp_values; + this->d.a.start_idx = 0; + } + } + + template + void + omt::rebuild_from_sorted_array( + subtree *const st, + const omtdata_t *const values, + const uint32_t numvalues) { + if (numvalues == 0) { + st->set_to_null(); + } else { + const uint32_t halfway = numvalues / 2; + const node_idx newidx = this->node_malloc(); + omt_node *const newnode = &this->d.t.nodes[newidx]; + newnode->weight = numvalues; + newnode->value = values[halfway]; + st->set_index(newidx); + // update everything before the recursive calls so the second call + // can be a tail call. + this->rebuild_from_sorted_array( + &newnode->left, &values[0], halfway); + this->rebuild_from_sorted_array(&newnode->right, + &values[halfway + 1], + numvalues - (halfway + 1)); + } + } + + template + void omt::convert_to_tree(void) { + if (this->is_array) { + const uint32_t num_nodes = this->size(); + uint32_t new_size = num_nodes * 2; + new_size = new_size < 4 ? 4 : new_size; + + omt_node *XMALLOC_N(new_size, new_nodes); + omtdata_t *const values = this->d.a.values; + omtdata_t *const tmp_values = &values[this->d.a.start_idx]; + this->is_array = false; + this->d.t.nodes = new_nodes; + this->capacity = new_size; + this->d.t.free_idx = 0; + this->d.t.root.set_to_null(); + this->rebuild_from_sorted_array( + &this->d.t.root, tmp_values, num_nodes); + toku_free(values); + } + } + + template + void omt::maybe_resize_or_convert( + const uint32_t n) { + if (this->is_array) { + this->maybe_resize_array(n); + } else { + const uint32_t new_size = n <= 2 ? 4 : 2 * n; + const uint32_t num_nodes = this->nweight(this->d.t.root); + if ((this->capacity / 2 >= new_size) || + (this->d.t.free_idx >= this->capacity && num_nodes < n) || + (this->capacity < n)) { + this->convert_to_array(); + // if we had a free list, the "supports_marks" version could + // just resize, as it is now, we have to convert to and back + // from an array. + if (supports_marks) { + this->convert_to_tree(); + } + } + } + } + + template + bool omt::will_need_rebalance( + const subtree &st, + const int leftmod, + const int rightmod) const { + if (st.is_null()) { + return false; + } + const omt_node &n = this->d.t.nodes[st.get_index()]; + // one of the 1's is for the root. + // the other is to take ceil(n/2) + const uint32_t weight_left = this->nweight(n.left) + leftmod; + const uint32_t weight_right = this->nweight(n.right) + rightmod; + return ((1 + weight_left < (1 + 1 + weight_right) / 2) || + (1 + weight_right < (1 + 1 + weight_left) / 2)); + } + + template + void omt::insert_internal( + subtree *const subtreep, + const omtdata_t &value, + const uint32_t idx, + subtree **const rebalance_subtree) { + if (subtreep->is_null()) { + paranoid_invariant_zero(idx); + const node_idx newidx = this->node_malloc(); + omt_node *const newnode = &this->d.t.nodes[newidx]; + newnode->weight = 1; + newnode->left.set_to_null(); + newnode->right.set_to_null(); + newnode->value = value; + subtreep->set_index(newidx); + } else { + omt_node &n = this->d.t.nodes[subtreep->get_index()]; + n.weight++; + if (idx <= this->nweight(n.left)) { + if (*rebalance_subtree == nullptr && + this->will_need_rebalance(*subtreep, 1, 0)) { + *rebalance_subtree = subtreep; + } + this->insert_internal(&n.left, value, idx, rebalance_subtree); + } else { + if (*rebalance_subtree == nullptr && + this->will_need_rebalance(*subtreep, 0, 1)) { + *rebalance_subtree = subtreep; + } + const uint32_t sub_index = idx - this->nweight(n.left) - 1; + this->insert_internal( + &n.right, value, sub_index, rebalance_subtree); + } + } + } + + template + void omt::set_at_internal_array( + const omtdata_t &value, + const uint32_t idx) { + this->d.a.values[this->d.a.start_idx + idx] = value; + } + + template + void omt::set_at_internal( + const subtree &st, + const omtdata_t &value, + const uint32_t idx) { + paranoid_invariant(!st.is_null()); + omt_node &n = this->d.t.nodes[st.get_index()]; + const uint32_t leftweight = this->nweight(n.left); + if (idx < leftweight) { + this->set_at_internal(n.left, value, idx); + } else if (idx == leftweight) { + n.value = value; + } else { + this->set_at_internal(n.right, value, idx - leftweight - 1); + } + } + + template + void omt::delete_internal( + subtree *const subtreep, + const uint32_t idx, + omt_node *const copyn, + subtree **const rebalance_subtree) { + paranoid_invariant_notnull(subtreep); + paranoid_invariant_notnull(rebalance_subtree); + paranoid_invariant(!subtreep->is_null()); + omt_node &n = this->d.t.nodes[subtreep->get_index()]; + const uint32_t leftweight = this->nweight(n.left); + if (idx < leftweight) { + n.weight--; + if (*rebalance_subtree == nullptr && + this->will_need_rebalance(*subtreep, -1, 0)) { + *rebalance_subtree = subtreep; + } + this->delete_internal(&n.left, idx, copyn, rebalance_subtree); + } else if (idx == leftweight) { + if (n.left.is_null()) { + const uint32_t oldidx = subtreep->get_index(); + *subtreep = n.right; + if (copyn != nullptr) { + copyn->value = n.value; + } + this->node_free(oldidx); + } else if (n.right.is_null()) { + const uint32_t oldidx = subtreep->get_index(); + *subtreep = n.left; + if (copyn != nullptr) { + copyn->value = n.value; + } + this->node_free(oldidx); + } else { + if (*rebalance_subtree == nullptr && + this->will_need_rebalance(*subtreep, 0, -1)) { + *rebalance_subtree = subtreep; + } + // don't need to copy up value, it's only used by this + // next call, and when that gets to the bottom there + // won't be any more recursion + n.weight--; + this->delete_internal(&n.right, 0, &n, rebalance_subtree); + } + } else { + n.weight--; + if (*rebalance_subtree == nullptr && + this->will_need_rebalance(*subtreep, 0, -1)) { + *rebalance_subtree = subtreep; + } + this->delete_internal( + &n.right, idx - leftweight - 1, copyn, rebalance_subtree); + } + } + + template + template < + typename iterate_extra_t, + int (*f)(const omtdata_t &, const uint32_t, iterate_extra_t *const)> + int omt::iterate_internal_array( + const uint32_t left, + const uint32_t right, + iterate_extra_t *const iterate_extra) const { + int r; + for (uint32_t i = left; i < right; ++i) { + r = f(this->d.a.values[this->d.a.start_idx + i], i, iterate_extra); + if (r != 0) { + return r; + } + } + return 0; + } + + template + template + void omt::iterate_ptr_internal( + const uint32_t left, + const uint32_t right, + const subtree &st, + const uint32_t idx, + iterate_extra_t *const iterate_extra) { + if (!st.is_null()) { + omt_node &n = this->d.t.nodes[st.get_index()]; + const uint32_t idx_root = idx + this->nweight(n.left); + if (left < idx_root) { + this->iterate_ptr_internal( + left, right, n.left, idx, iterate_extra); + } + if (left <= idx_root && idx_root < right) { + int r = f(&n.value, idx_root, iterate_extra); + lazy_assert_zero(r); + } + if (idx_root + 1 < right) { + this->iterate_ptr_internal( + left, right, n.right, idx_root + 1, iterate_extra); + } + } + } + + template + template + void + omt::iterate_ptr_internal_array( + const uint32_t left, + const uint32_t right, + iterate_extra_t *const iterate_extra) { + for (uint32_t i = left; i < right; ++i) { + int r = + f(&this->d.a.values[this->d.a.start_idx + i], i, iterate_extra); + lazy_assert_zero(r); + } + } + + template + template < + typename iterate_extra_t, + int (*f)(const omtdata_t &, const uint32_t, iterate_extra_t *const)> + int omt::iterate_internal( + const uint32_t left, + const uint32_t right, + const subtree &st, + const uint32_t idx, + iterate_extra_t *const iterate_extra) const { + if (st.is_null()) { + return 0; + } + int r; + const omt_node &n = this->d.t.nodes[st.get_index()]; + const uint32_t idx_root = idx + this->nweight(n.left); + if (left < idx_root) { + r = this->iterate_internal( + left, right, n.left, idx, iterate_extra); + if (r != 0) { + return r; + } + } + if (left <= idx_root && idx_root < right) { + r = f(n.value, idx_root, iterate_extra); + if (r != 0) { + return r; + } + } + if (idx_root + 1 < right) { + return this->iterate_internal( + left, right, n.right, idx_root + 1, iterate_extra); + } + return 0; + } + + template + template < + typename iterate_extra_t, + int (*f)(const omtdata_t &, const uint32_t, iterate_extra_t *const)> + int omt:: + iterate_and_mark_range_internal(const uint32_t left, + const uint32_t right, + const subtree &st, + const uint32_t idx, + iterate_extra_t *const iterate_extra) { + paranoid_invariant(!st.is_null()); + int r; + omt_node &n = this->d.t.nodes[st.get_index()]; + const uint32_t idx_root = idx + this->nweight(n.left); + if (left < idx_root && !n.left.is_null()) { + n.set_marks_below_bit(); + r = this->iterate_and_mark_range_internal( + left, right, n.left, idx, iterate_extra); + if (r != 0) { + return r; + } + } + if (left <= idx_root && idx_root < right) { + n.set_marked_bit(); + r = f(n.value, idx_root, iterate_extra); + if (r != 0) { + return r; + } + } + if (idx_root + 1 < right && !n.right.is_null()) { + n.set_marks_below_bit(); + return this->iterate_and_mark_range_internal( + left, right, n.right, idx_root + 1, iterate_extra); + } + return 0; + } + + template + template < + typename iterate_extra_t, + int (*f)(const omtdata_t &, const uint32_t, iterate_extra_t *const)> + int + omt::iterate_over_marked_internal( + const subtree &st, + const uint32_t idx, + iterate_extra_t *const iterate_extra) const { + if (st.is_null()) { + return 0; + } + int r; + const omt_node &n = this->d.t.nodes[st.get_index()]; + const uint32_t idx_root = idx + this->nweight(n.left); + if (n.get_marks_below()) { + r = this->iterate_over_marked_internal( + n.left, idx, iterate_extra); + if (r != 0) { + return r; + } + } + if (n.get_marked()) { + r = f(n.value, idx_root, iterate_extra); + if (r != 0) { + return r; + } + } + if (n.get_marks_below()) { + return this->iterate_over_marked_internal( + n.right, idx_root + 1, iterate_extra); + } + return 0; + } + + template + void omt::fetch_internal_array( + const uint32_t i, + omtdataout_t *const value) const { + if (value != nullptr) { + copyout(value, &this->d.a.values[this->d.a.start_idx + i]); + } + } + + template + void omt::fetch_internal( + const subtree &st, + const uint32_t i, + omtdataout_t *const value) const { + omt_node &n = this->d.t.nodes[st.get_index()]; + const uint32_t leftweight = this->nweight(n.left); + if (i < leftweight) { + this->fetch_internal(n.left, i, value); + } else if (i == leftweight) { if (value != nullptr) { copyout(value, &n); } - r = 0; - } - return r; - } -} - -template -template -int omt::find_internal_plus_array(const omtcmp_t &extra, omtdataout_t *const value, uint32_t *const idxp) const { - paranoid_invariant_notnull(idxp); - uint32_t min = this->d.a.start_idx; - uint32_t limit = this->d.a.start_idx + this->d.a.num_values; - uint32_t best = subtree::NODE_NULL; - - while (min != limit) { - const uint32_t mid = (min + limit) / 2; - const int hv = h(this->d.a.values[mid], extra); - if (hv > 0) { - best = mid; - limit = mid; } else { - min = mid + 1; + this->fetch_internal(n.right, i - leftweight - 1, value); } } - if (best == subtree::NODE_NULL) { return DB_NOTFOUND; } - if (value != nullptr) { - copyout(value, &this->d.a.values[best]); - } - *idxp = best - this->d.a.start_idx; - return 0; -} -template -template -int omt::find_internal_plus(const subtree &subtree, const omtcmp_t &extra, omtdataout_t *const value, uint32_t *const idxp) const { - paranoid_invariant_notnull(idxp); - if (subtree.is_null()) { + template + void + omt::fill_array_with_subtree_idxs( + node_idx *const array, + const subtree &st) const { + if (!st.is_null()) { + const omt_node &tree = this->d.t.nodes[st.get_index()]; + this->fill_array_with_subtree_idxs(&array[0], tree.left); + array[this->nweight(tree.left)] = st.get_index(); + this->fill_array_with_subtree_idxs( + &array[this->nweight(tree.left) + 1], tree.right); + } + } + + template + void + omt::rebuild_subtree_from_idxs( + subtree *const st, + const node_idx *const idxs, + const uint32_t numvalues) { + if (numvalues == 0) { + st->set_to_null(); + } else { + uint32_t halfway = numvalues / 2; + st->set_index(idxs[halfway]); + // node_idx newidx = idxs[halfway]; + omt_node &newnode = this->d.t.nodes[st->get_index()]; + newnode.weight = numvalues; + // value is already in there. + this->rebuild_subtree_from_idxs(&newnode.left, &idxs[0], halfway); + this->rebuild_subtree_from_idxs( + &newnode.right, &idxs[halfway + 1], numvalues - (halfway + 1)); + // n_idx = newidx; + } + } + + template + void omt::rebalance( + subtree *const st) { + node_idx idx = st->get_index(); + if (idx == this->d.t.root.get_index()) { + // Try to convert to an array. + // If this fails, (malloc) nothing will have changed. + // In the failure case we continue on to the standard rebalance + // algorithm. + this->convert_to_array(); + if (supports_marks) { + this->convert_to_tree(); + } + } else { + const omt_node &n = this->d.t.nodes[idx]; + node_idx *tmp_array; + size_t mem_needed = n.weight * (sizeof tmp_array[0]); + size_t mem_free = (this->capacity - this->d.t.free_idx) * + (sizeof this->d.t.nodes[0]); + bool malloced; + if (mem_needed <= mem_free) { + // There is sufficient free space at the end of the nodes array + // to hold enough node indexes to rebalance. + malloced = false; + tmp_array = reinterpret_cast( + &this->d.t.nodes[this->d.t.free_idx]); + } else { + malloced = true; + XMALLOC_N(n.weight, tmp_array); + } + this->fill_array_with_subtree_idxs(tmp_array, *st); + this->rebuild_subtree_from_idxs(st, tmp_array, n.weight); + if (malloced) + toku_free(tmp_array); + } + } + + template + void omt::copyout( + omtdata_t *const out, + const omt_node *const n) { + *out = n->value; + } + + template + void omt::copyout( + omtdata_t **const out, + omt_node *const n) { + *out = &n->value; + } + + template + void omt::copyout( + omtdata_t *const out, + const omtdata_t *const stored_value_ptr) { + *out = *stored_value_ptr; + } + + template + void omt::copyout( + omtdata_t **const out, + omtdata_t *const stored_value_ptr) { + *out = stored_value_ptr; + } + + template + template + int omt::find_internal_zero_array( + const omtcmp_t &extra, + omtdataout_t *const value, + uint32_t *const idxp) const { + paranoid_invariant_notnull(idxp); + uint32_t min = this->d.a.start_idx; + uint32_t limit = this->d.a.start_idx + this->d.a.num_values; + uint32_t best_pos = subtree::NODE_NULL; + uint32_t best_zero = subtree::NODE_NULL; + + while (min != limit) { + uint32_t mid = (min + limit) / 2; + int hv = h(this->d.a.values[mid], extra); + if (hv < 0) { + min = mid + 1; + } else if (hv > 0) { + best_pos = mid; + limit = mid; + } else { + best_zero = mid; + limit = mid; + } + } + if (best_zero != subtree::NODE_NULL) { + // Found a zero + if (value != nullptr) { + copyout(value, &this->d.a.values[best_zero]); + } + *idxp = best_zero - this->d.a.start_idx; + return 0; + } + if (best_pos != subtree::NODE_NULL) + *idxp = best_pos - this->d.a.start_idx; + else + *idxp = this->d.a.num_values; return DB_NOTFOUND; } - omt_node *const n = &this->d.t.nodes[subtree.get_index()]; - int hv = h(n->value, extra); - int r; - if (hv > 0) { - r = this->find_internal_plus(n->left, extra, value, idxp); - if (r == DB_NOTFOUND) { - *idxp = this->nweight(n->left); - if (value != nullptr) { - copyout(value, n); - } - r = 0; - } - } else { - r = this->find_internal_plus(n->right, extra, value, idxp); - if (r == 0) { - *idxp += this->nweight(n->left) + 1; - } - } - return r; -} -template -template -int omt::find_internal_minus_array(const omtcmp_t &extra, omtdataout_t *const value, uint32_t *const idxp) const { - paranoid_invariant_notnull(idxp); - uint32_t min = this->d.a.start_idx; - uint32_t limit = this->d.a.start_idx + this->d.a.num_values; - uint32_t best = subtree::NODE_NULL; - - while (min != limit) { - const uint32_t mid = (min + limit) / 2; - const int hv = h(this->d.a.values[mid], extra); + template + template + int omt::find_internal_zero( + const subtree &st, + const omtcmp_t &extra, + omtdataout_t *const value, + uint32_t *const idxp) const { + paranoid_invariant_notnull(idxp); + if (st.is_null()) { + *idxp = 0; + return DB_NOTFOUND; + } + omt_node &n = this->d.t.nodes[st.get_index()]; + int hv = h(n.value, extra); if (hv < 0) { - best = mid; - min = mid + 1; + int r = this->find_internal_zero( + n.right, extra, value, idxp); + *idxp += this->nweight(n.left) + 1; + return r; + } else if (hv > 0) { + return this->find_internal_zero( + n.left, extra, value, idxp); } else { - limit = mid; + int r = this->find_internal_zero( + n.left, extra, value, idxp); + if (r == DB_NOTFOUND) { + *idxp = this->nweight(n.left); + if (value != nullptr) { + copyout(value, &n); + } + r = 0; + } + return r; } } - if (best == subtree::NODE_NULL) { return DB_NOTFOUND; } - if (value != nullptr) { - copyout(value, &this->d.a.values[best]); - } - *idxp = best - this->d.a.start_idx; - return 0; -} -template -template -int omt::find_internal_minus(const subtree &subtree, const omtcmp_t &extra, omtdataout_t *const value, uint32_t *const idxp) const { - paranoid_invariant_notnull(idxp); - if (subtree.is_null()) { - return DB_NOTFOUND; - } - omt_node *const n = &this->d.t.nodes[subtree.get_index()]; - int hv = h(n->value, extra); - if (hv < 0) { - int r = this->find_internal_minus(n->right, extra, value, idxp); - if (r == 0) { - *idxp += this->nweight(n->left) + 1; - } else if (r == DB_NOTFOUND) { - *idxp = this->nweight(n->left); - if (value != nullptr) { - copyout(value, n); + template + template + int omt::find_internal_plus_array( + const omtcmp_t &extra, + omtdataout_t *const value, + uint32_t *const idxp) const { + paranoid_invariant_notnull(idxp); + uint32_t min = this->d.a.start_idx; + uint32_t limit = this->d.a.start_idx + this->d.a.num_values; + uint32_t best = subtree::NODE_NULL; + + while (min != limit) { + const uint32_t mid = (min + limit) / 2; + const int hv = h(this->d.a.values[mid], extra); + if (hv > 0) { + best = mid; + limit = mid; + } else { + min = mid + 1; + } + } + if (best == subtree::NODE_NULL) { + return DB_NOTFOUND; + } + if (value != nullptr) { + copyout(value, &this->d.a.values[best]); + } + *idxp = best - this->d.a.start_idx; + return 0; + } + + template + template + int omt::find_internal_plus( + const subtree &st, + const omtcmp_t &extra, + omtdataout_t *const value, + uint32_t *const idxp) const { + paranoid_invariant_notnull(idxp); + if (st.is_null()) { + return DB_NOTFOUND; + } + omt_node *const n = &this->d.t.nodes[st.get_index()]; + int hv = h(n->value, extra); + int r; + if (hv > 0) { + r = this->find_internal_plus( + n->left, extra, value, idxp); + if (r == DB_NOTFOUND) { + *idxp = this->nweight(n->left); + if (value != nullptr) { + copyout(value, n); + } + r = 0; + } + } else { + r = this->find_internal_plus( + n->right, extra, value, idxp); + if (r == 0) { + *idxp += this->nweight(n->left) + 1; } - r = 0; } return r; - } else { - return this->find_internal_minus(n->left, extra, value, idxp); } -} -} // namespace toku + + template + template + int omt::find_internal_minus_array( + const omtcmp_t &extra, + omtdataout_t *const value, + uint32_t *const idxp) const { + paranoid_invariant_notnull(idxp); + uint32_t min = this->d.a.start_idx; + uint32_t limit = this->d.a.start_idx + this->d.a.num_values; + uint32_t best = subtree::NODE_NULL; + + while (min != limit) { + const uint32_t mid = (min + limit) / 2; + const int hv = h(this->d.a.values[mid], extra); + if (hv < 0) { + best = mid; + min = mid + 1; + } else { + limit = mid; + } + } + if (best == subtree::NODE_NULL) { + return DB_NOTFOUND; + } + if (value != nullptr) { + copyout(value, &this->d.a.values[best]); + } + *idxp = best - this->d.a.start_idx; + return 0; + } + + template + template + int omt::find_internal_minus( + const subtree &st, + const omtcmp_t &extra, + omtdataout_t *const value, + uint32_t *const idxp) const { + paranoid_invariant_notnull(idxp); + if (st.is_null()) { + return DB_NOTFOUND; + } + omt_node *const n = &this->d.t.nodes[st.get_index()]; + int hv = h(n->value, extra); + if (hv < 0) { + int r = this->find_internal_minus( + n->right, extra, value, idxp); + if (r == 0) { + *idxp += this->nweight(n->left) + 1; + } else if (r == DB_NOTFOUND) { + *idxp = this->nweight(n->left); + if (value != nullptr) { + copyout(value, n); + } + r = 0; + } + return r; + } else { + return this->find_internal_minus( + n->left, extra, value, idxp); + } + } +} // namespace toku diff --git a/storage/tokudb/PerconaFT/util/omt.h b/storage/tokudb/PerconaFT/util/omt.h index c7ed2ca546f..dc2fd9b7162 100644 --- a/storage/tokudb/PerconaFT/util/omt.h +++ b/storage/tokudb/PerconaFT/util/omt.h @@ -32,6 +32,19 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved. You should have received a copy of the GNU Affero General Public License along with PerconaFT. If not, see . + +---------------------------------------- + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and ======= */ #ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved." diff --git a/storage/tokudb/ha_tokudb.cc b/storage/tokudb/ha_tokudb.cc index 548ac5c7b09..babbe825f2e 100644 --- a/storage/tokudb/ha_tokudb.cc +++ b/storage/tokudb/ha_tokudb.cc @@ -7252,6 +7252,16 @@ int ha_tokudb::create( tokudb_trx_data *trx = NULL; THD* thd = ha_thd(); + String database_name, table_name, dictionary_name; + tokudb_split_dname(name, database_name, table_name, dictionary_name); + if (database_name.is_empty() || table_name.is_empty()) { + push_warning_printf(thd, + Sql_condition::WARN_LEVEL_WARN, + ER_TABLE_NAME, + "TokuDB: Table Name or Database Name is empty"); + DBUG_RETURN(ER_TABLE_NAME); + } + memset(&kc_info, 0, sizeof(kc_info)); #if 100000 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 100999 diff --git a/storage/tokudb/hatoku_hton.cc b/storage/tokudb/hatoku_hton.cc index 610c9e07be0..a1d6597e33a 100644 --- a/storage/tokudb/hatoku_hton.cc +++ b/storage/tokudb/hatoku_hton.cc @@ -575,10 +575,10 @@ static int tokudb_init_func(void *p) { db_env->set_update(db_env, tokudb_update_fun); - db_env_set_direct_io(tokudb::sysvars::directio == TRUE); + db_env_set_direct_io(tokudb::sysvars::directio); db_env_set_compress_buffers_before_eviction( - tokudb::sysvars::compress_buffers_before_eviction == TRUE); + tokudb::sysvars::compress_buffers_before_eviction); db_env->change_fsync_log_period(db_env, tokudb::sysvars::fsync_log_period); diff --git a/storage/tokudb/hatoku_hton.h b/storage/tokudb/hatoku_hton.h index c5b6aab1769..e90af067b00 100644 --- a/storage/tokudb/hatoku_hton.h +++ b/storage/tokudb/hatoku_hton.h @@ -190,7 +190,6 @@ inline bool tokudb_killed_thd_callback(void* extra, return thd_killed(thd) != 0; } -extern HASH tokudb_open_tables; extern const char* tokudb_hton_name; extern int tokudb_hton_initialized; extern tokudb::thread::rwlock_t tokudb_hton_initialized_lock; diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/PS-4979.result b/storage/tokudb/mysql-test/tokudb_bugs/r/PS-4979.result new file mode 100644 index 00000000000..f0d2f93f630 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb_bugs/r/PS-4979.result @@ -0,0 +1,2 @@ +CREATE TABLE `#mysql50#q.q`(f1 INT KEY) ENGINE=TOKUDB; +ERROR HY000: Got error 1632 from storage engine diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/PS-4979.test b/storage/tokudb/mysql-test/tokudb_bugs/t/PS-4979.test new file mode 100644 index 00000000000..1e4b5d11922 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb_bugs/t/PS-4979.test @@ -0,0 +1,12 @@ +--source include/have_tokudb.inc +# PS-4979 : Dropping TokuDB table with non-alphanumeric characters could lead +# to a crash +# +# `#mysql50#q.q` is an invalid table name, but the server side doesn't detect it +# and complain. Instead it passes in an empty table name to the engine. The +# engine expects a table name in the form of a relative path like +# "./databasename/tablename". InnoDB detects this in parsing the table name +# during the creation and returns an error. + +--error ER_GET_ERRNO +CREATE TABLE `#mysql50#q.q`(f1 INT KEY) ENGINE=TOKUDB; diff --git a/storage/tokudb/tokudb_background.cc b/storage/tokudb/tokudb_background.cc index 13e0e9321cc..19f03dbca65 100644 --- a/storage/tokudb/tokudb_background.cc +++ b/storage/tokudb/tokudb_background.cc @@ -182,14 +182,14 @@ void* job_manager_t::real_thread_func() { if (res == tokudb::thread::semaphore_t::E_INTERRUPTED || _shutdown) { break; } else if (res == tokudb::thread::semaphore_t::E_SIGNALLED) { -#if TOKUDB_DEBUG +#if defined(TOKUDB_DEBUG) if (TOKUDB_UNLIKELY( tokudb::sysvars::debug_pause_background_job_manager)) { _sem.signal(); tokudb::time::sleep_microsec(250000); continue; } -#endif // TOKUDB_DEBUG +#endif // defined(TOKUDB_DEBUG) mutex_t_lock(_mutex); assert_debug(_background_jobs.size() > 0); diff --git a/storage/tokudb/tokudb_sysvars.cc b/storage/tokudb/tokudb_sysvars.cc index e8e9f908275..d1f58d012ec 100644 --- a/storage/tokudb/tokudb_sysvars.cc +++ b/storage/tokudb/tokudb_sysvars.cc @@ -662,13 +662,13 @@ static MYSQL_THDVAR_ULONGLONG( ~0ULL, 1); -static MYSQL_THDVAR_STR( - last_lock_timeout, - PLUGIN_VAR_MEMALLOC, - "last lock timeout", - NULL, - NULL, - NULL); +static MYSQL_THDVAR_STR(last_lock_timeout, + PLUGIN_VAR_MEMALLOC | PLUGIN_VAR_NOCMDOPT | + PLUGIN_VAR_READONLY, + "last lock timeout", + NULL, + NULL, + NULL); static MYSQL_THDVAR_BOOL( load_save_space, diff --git a/storage/tokudb/tokudb_sysvars.h b/storage/tokudb/tokudb_sysvars.h index d81d5fd7999..6f09f296a80 100644 --- a/storage/tokudb/tokudb_sysvars.h +++ b/storage/tokudb/tokudb_sysvars.h @@ -93,10 +93,10 @@ extern my_bool gdb_on_fatal; extern my_bool check_jemalloc; -#if TOKUDB_DEBUG +#if defined(TOKUDB_DEBUG) // used to control background job manager extern my_bool debug_pause_background_job_manager; -#endif // TOKUDB_DEBUG +#endif // defined(TOKUDB_DEBUG) // session/thread my_bool alter_print_error(THD* thd); From 013186eb968b4d8d0db661a6821c74193d40f43a Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Thu, 24 Jan 2019 10:51:40 +0100 Subject: [PATCH 20/37] compiler warning --- sql/log_event.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/log_event.cc b/sql/log_event.cc index 22638a1a44c..4a49a1ef740 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -11102,7 +11102,7 @@ int Table_map_log_event::do_apply_event(rpl_group_info *rgi) table_list->updating= 1; table_list->required_type= FRMTYPE_TABLE; - DBUG_PRINT("debug", ("table: %s is mapped to %u", table_list->table_name, + DBUG_PRINT("debug", ("table: %s is mapped to %llu", table_list->table_name, table_list->table_id)); #ifdef RBR_TRIGGERS table_list->master_had_triggers= ((m_flags & TM_BIT_HAS_TRIGGERS_F) ? 1 : 0); From 38ad46e0050939b0c4882eb0a339c84c4db8beb0 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Thu, 24 Jan 2019 13:31:05 +0100 Subject: [PATCH 21/37] cleanup: fill_alter_inplace_info remove attempts to track "candidate keys", use what was already decided in create_table_impl(). --- sql/sql_table.cc | 64 ++++++++++-------------------------------------- 1 file changed, 13 insertions(+), 51 deletions(-) diff --git a/sql/sql_table.cc b/sql/sql_table.cc index df1ff8eaf5d..1b684cbd5db 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -5691,7 +5691,8 @@ static bool is_candidate_key(KEY *key) KEY_PART_INFO *key_part; KEY_PART_INFO *key_part_end= key->key_part + key->user_defined_key_parts; - if (!(key->flags & HA_NOSAME) || (key->flags & HA_NULL_PART_KEY)) + if (!(key->flags & HA_NOSAME) || (key->flags & HA_NULL_PART_KEY) || + (key->flags & HA_KEY_HAS_PART_KEY_SEG)) return false; for (key_part= key->key_part; key_part < key_part_end; key_part++) @@ -6157,9 +6158,7 @@ static int compare_uint(const uint *s, const uint *t) @retval false success */ -static bool fill_alter_inplace_info(THD *thd, - TABLE *table, - bool varchar, +static bool fill_alter_inplace_info(THD *thd, TABLE *table, bool varchar, Alter_inplace_info *ha_alter_info) { Field **f_ptr, *field; @@ -6167,7 +6166,6 @@ static bool fill_alter_inplace_info(THD *thd, Create_field *new_field; KEY_PART_INFO *key_part, *new_part; KEY_PART_INFO *end; - uint candidate_key_count= 0; Alter_info *alter_info= ha_alter_info->alter_info; DBUG_ENTER("fill_alter_inplace_info"); @@ -6443,8 +6441,13 @@ static bool fill_alter_inplace_info(THD *thd, Primary key index for the new table */ const KEY* const new_pk= (ha_alter_info->key_count > 0 && - is_candidate_key(ha_alter_info->key_info_buffer)) ? + (!my_strcasecmp(system_charset_info, + ha_alter_info->key_info_buffer->name, + primary_key_name) || + is_candidate_key(ha_alter_info->key_info_buffer))) ? ha_alter_info->key_info_buffer : NULL; + const KEY *const old_pk= table->s->primary_key == MAX_KEY ? NULL : + table->key_info + table->s->primary_key; DBUG_PRINT("info", ("index count old: %d new: %d", table->s->keys, ha_alter_info->key_count)); @@ -6526,8 +6529,7 @@ static bool fill_alter_inplace_info(THD *thd, (i) Old table doesn't have primary key, new table has it and vice-versa (ii) Primary key changed to another existing index */ - if ((new_key == new_pk) != - ((uint) (table_key - table->key_info) == table->s->primary_key)) + if ((new_key == new_pk) != (table_key == old_pk)) goto index_changed; continue; @@ -6581,22 +6583,6 @@ static bool fill_alter_inplace_info(THD *thd, /* Now let us calculate flags for storage engine API. */ - /* Count all existing candidate keys. */ - for (table_key= table->key_info; table_key < table_key_end; table_key++) - { - /* - Check if key is a candidate key, This key is either already primary key - or could be promoted to primary key if the original primary key is - dropped. - In MySQL one is allowed to create primary key with partial fields (i.e. - primary key which is not considered candidate). For simplicity we count - such key as a candidate key here. - */ - if (((uint) (table_key - table->key_info) == table->s->primary_key) || - is_candidate_key(table_key)) - candidate_key_count++; - } - /* Figure out what kind of indexes we are dropping. */ KEY **dropped_key; KEY **dropped_key_end= ha_alter_info->index_drop_buffer + @@ -6609,21 +6595,10 @@ static bool fill_alter_inplace_info(THD *thd, if (table_key->flags & HA_NOSAME) { - /* - Unique key. Check for PRIMARY KEY. Also see comment about primary - and candidate keys above. - */ - if ((uint) (table_key - table->key_info) == table->s->primary_key) - { + if (table_key == old_pk) ha_alter_info->handler_flags|= Alter_inplace_info::DROP_PK_INDEX; - candidate_key_count--; - } else - { ha_alter_info->handler_flags|= Alter_inplace_info::DROP_UNIQUE_INDEX; - if (is_candidate_key(table_key)) - candidate_key_count--; - } } else ha_alter_info->handler_flags|= Alter_inplace_info::DROP_INDEX; @@ -6636,23 +6611,10 @@ static bool fill_alter_inplace_info(THD *thd, if (new_key->flags & HA_NOSAME) { - bool is_pk= !my_strcasecmp(system_charset_info, new_key->name, primary_key_name); - - if ((!(new_key->flags & HA_KEY_HAS_PART_KEY_SEG) && - !(new_key->flags & HA_NULL_PART_KEY)) || - is_pk) - { - /* Candidate key or primary key! */ - if (candidate_key_count == 0 || is_pk) - ha_alter_info->handler_flags|= Alter_inplace_info::ADD_PK_INDEX; - else - ha_alter_info->handler_flags|= Alter_inplace_info::ADD_UNIQUE_INDEX; - candidate_key_count++; - } + if (new_key == new_pk) + ha_alter_info->handler_flags|= Alter_inplace_info::ADD_PK_INDEX; else - { ha_alter_info->handler_flags|= Alter_inplace_info::ADD_UNIQUE_INDEX; - } } else ha_alter_info->handler_flags|= Alter_inplace_info::ADD_INDEX; From e6fcd7230954c6111bba63e7f7201fc81e50178e Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Sun, 2 Dec 2018 00:25:05 +0100 Subject: [PATCH 22/37] Squashed commit of connect/10.0: commit 6a6a1f37798 Author: Olivier Bertrand Date: Fri Jan 4 12:31:52 2019 +0100 - Fix a few bug mainly concerning discovery and call from OEM (and prepare new table types) modified: storage/connect/tabjson.cpp modified: storage/connect/tabjson.h modified: storage/connect/tabxml.cpp modified: storage/connect/tabxml.h - Fix wrong line estimate modified: storage/connect/mysql-test/connect/r/part_table.result modified: storage/connect/mysql-test/connect/t/part_table.test commit bd7d2e912d9 Author: Olivier Bertrand Date: Tue Dec 4 23:35:09 2018 +0100 Fix wrong version number commit 4933680e7ab Author: Olivier Bertrand Date: Sun Dec 2 00:25:05 2018 +0100 - Make PlugSubAlloc to be exportable Suppress unused parameter from PlugSubSet modified: storage/connect/global.h modified: storage/connect/plugutil.cpp modified: storage/connect/jsonudf.cpp modified: storage/connect/tabjson.cpp modified: storage/connect/user_connect.cc - Fix a bug making column catalog XML tables fail modified: storage/connect/tabxml.cpp - Comment out wrong message modified: storage/connect/ha_connect.cc - Update error message when sorting an ODBC table fails modified: storage/connect/tabodbc.cpp - Add error message when gettting an address from an OEM fails. modified: storage/connect/reldef.cpp - Make some modifications useful for OEM module writting Export discovery functions for CSV, JDBC and XML Remove unuseful include from tabjson.h Move TDBXML::data_charset function from header file to source modified: storage/connect/tabfmt.h modified: storage/connect/tabjson.h modified: storage/connect/tabxml.cpp modified: storage/connect/tabxml.h - Update test result modified: storage/connect/mysql-test/connect/r/jdbc_oracle.result --- storage/connect/global.h | 6 +- storage/connect/ha_connect.cc | 4 +- storage/connect/jsonudf.cpp | 12 +- .../mysql-test/connect/r/jdbc_oracle.result | 18 +- .../connect/r/jdbc_postgresql.result | 10 +- .../mysql-test/connect/r/part_table.result | 4 +- .../mysql-test/connect/t/part_table.test | 2 +- storage/connect/plugutil.cpp | 28 +- storage/connect/reldef.cpp | 11 +- storage/connect/tabfmt.h | 2 +- storage/connect/tabjson.cpp | 52 +-- storage/connect/tabjson.h | 8 +- storage/connect/tabodbc.cpp | 305 +++++++++--------- storage/connect/tabxml.cpp | 248 ++++++++------ storage/connect/tabxml.h | 6 +- storage/connect/user_connect.cc | 4 +- 16 files changed, 398 insertions(+), 322 deletions(-) diff --git a/storage/connect/global.h b/storage/connect/global.h index 36e8a311124..dc1e149745f 100644 --- a/storage/connect/global.h +++ b/storage/connect/global.h @@ -219,11 +219,11 @@ DllExport LPCSTR PlugSetPath(LPSTR to, LPCSTR prefix, LPCSTR name, LPCSTR dir); DllExport BOOL PlugIsAbsolutePath(LPCSTR path); DllExport bool AllocSarea(PGLOBAL, uint); DllExport void FreeSarea(PGLOBAL); -DllExport BOOL PlugSubSet(PGLOBAL, void *, uint); +DllExport BOOL PlugSubSet(void *, uint); +DllExport void *PlugSubAlloc(PGLOBAL, void *, size_t); DllExport char *PlugDup(PGLOBAL g, const char *str); DllExport void *MakePtr(void *, OFFSET); DllExport void htrc(char const *fmt, ...); -//DllExport int GetTraceValue(void); DllExport uint GetTraceValue(void); #if defined(__cplusplus) @@ -233,6 +233,6 @@ DllExport uint GetTraceValue(void); /***********************************************************************/ /* Non exported routine declarations. */ /***********************************************************************/ -void *PlugSubAlloc(PGLOBAL, void *, size_t); // Does throw +//void *PlugSubAlloc(PGLOBAL, void *, size_t); // Does throw /*-------------------------- End of Global.H --------------------------*/ diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc index bf890724d5e..1e826f67573 100644 --- a/storage/connect/ha_connect.cc +++ b/storage/connect/ha_connect.cc @@ -4191,7 +4191,7 @@ int ha_connect::rnd_pos(uchar *buf, uchar *pos) rc= rnd_next(buf); } else { PGLOBAL g = GetPlug((table) ? table->in_use : NULL, xp); - strcpy(g->Message, "Not supported by this table type"); +// strcpy(g->Message, "Not supported by this table type"); my_message(ER_ILLEGAL_HA, g->Message, MYF(0)); rc= HA_ERR_INTERNAL_ERROR; } // endif SetRecpos @@ -7307,7 +7307,7 @@ maria_declare_plugin(connect) PLUGIN_LICENSE_GPL, connect_init_func, /* Plugin Init */ connect_done_func, /* Plugin Deinit */ - 0x0107, /* version number (1.05) */ + 0x0106, /* version number (1.06) */ NULL, /* status variables */ connect_system_variables, /* system variables */ "1.06.0008", /* string version */ diff --git a/storage/connect/jsonudf.cpp b/storage/connect/jsonudf.cpp index 26455d572b6..d5a3a840173 100644 --- a/storage/connect/jsonudf.cpp +++ b/storage/connect/jsonudf.cpp @@ -3055,7 +3055,7 @@ my_bool json_array_grp_init(UDF_INIT *initid, UDF_ARGS *args, char *message) PGLOBAL g = (PGLOBAL)initid->ptr; - PlugSubSet(g, g->Sarea, g->Sarea_Size); + PlugSubSet(g->Sarea, g->Sarea_Size); g->Activityp = (PACTIVITY)JsonNew(g, TYPE_JAR); g->N = (int)n; return false; @@ -3098,7 +3098,7 @@ void json_array_grp_clear(UDF_INIT *initid, char*, char*) { PGLOBAL g = (PGLOBAL)initid->ptr; - PlugSubSet(g, g->Sarea, g->Sarea_Size); + PlugSubSet(g->Sarea, g->Sarea_Size); g->Activityp = (PACTIVITY)JsonNew(g, TYPE_JAR); g->N = GetJsonGroupSize(); } // end of json_array_grp_clear @@ -3132,7 +3132,7 @@ my_bool json_object_grp_init(UDF_INIT *initid, UDF_ARGS *args, char *message) PGLOBAL g = (PGLOBAL)initid->ptr; - PlugSubSet(g, g->Sarea, g->Sarea_Size); + PlugSubSet(g->Sarea, g->Sarea_Size); g->Activityp = (PACTIVITY)JsonNew(g, TYPE_JOB); g->N = (int)n; return false; @@ -3169,7 +3169,7 @@ void json_object_grp_clear(UDF_INIT *initid, char*, char*) { PGLOBAL g = (PGLOBAL)initid->ptr; - PlugSubSet(g, g->Sarea, g->Sarea_Size); + PlugSubSet(g->Sarea, g->Sarea_Size); g->Activityp = (PACTIVITY)JsonNew(g, TYPE_JOB); g->N = GetJsonGroupSize(); } // end of json_object_grp_clear @@ -4418,7 +4418,7 @@ char *json_file(UDF_INIT *initid, UDF_ARGS *args, char *result, } else if (initid->const_item) g->N = 1; - PlugSubSet(g, g->Sarea, g->Sarea_Size); + PlugSubSet(g->Sarea, g->Sarea_Size); fn = MakePSZ(g, args, 0); if (args->arg_count > 1) { @@ -5662,7 +5662,7 @@ char *jbin_file(UDF_INIT *initid, UDF_ARGS *args, char *result, if (bsp && !bsp->Changed) goto fin; - PlugSubSet(g, g->Sarea, g->Sarea_Size); + PlugSubSet(g->Sarea, g->Sarea_Size); g->Xchk = NULL; fn = MakePSZ(g, args, 0); pretty = (args->arg_count > 2 && args->args[2]) ? (int)*(longlong*)args->args[2] : 3; diff --git a/storage/connect/mysql-test/connect/r/jdbc_oracle.result b/storage/connect/mysql-test/connect/r/jdbc_oracle.result index 2e36891a037..ec314c5f072 100644 --- a/storage/connect/mysql-test/connect/r/jdbc_oracle.result +++ b/storage/connect/mysql-test/connect/r/jdbc_oracle.result @@ -8,12 +8,19 @@ SELECT * FROM t2 WHERE command = 'drop table employee'; command number message drop table employee 0 Execute: java.sql.SQLSyntaxErrorException: ORA-00942: table or view does not exist +Warnings: +Warning 1105 Execute: java.sql.SQLSyntaxErrorException: ORA-00942: table or view does not exist + SELECT * FROM t2 WHERE command = 'create table employee (id int not null, name varchar(32), title char(16), salary number(8,2))'; command number message create table employee (id int not null, name varchar(32), title char(16), salary number(8,2)) 0 Affected rows +Warnings: +Warning 1105 Affected rows SELECT * FROM t2 WHERE command = "insert into employee values(4567,'Johnson', 'Engineer', 12560.50)"; command number message insert into employee values(4567,'Johnson', 'Engineer', 12560.50) 1 Affected rows +Warnings: +Warning 1105 Affected rows CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC CATFUNC=tables CONNECTION='jdbc:oracle:thin:@localhost:1521:xe' OPTION_LIST='User=system,Password=manager'; @@ -27,8 +34,8 @@ OPTION_LIST='User=system,Password=manager'; SELECT * FROM t1; Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Radix Nullable Remarks NULL SYSTEM EMPLOYEE ID 3 NUMBER 38 0 0 10 0 NULL -NULL SYSTEM EMPLOYEE NAME 12 VARCHAR2 32 0 0 10 1 NULL -NULL SYSTEM EMPLOYEE TITLE 1 CHAR 16 0 0 10 1 NULL +NULL SYSTEM EMPLOYEE NAME 12 VARCHAR2 32 0 NULL 10 1 NULL +NULL SYSTEM EMPLOYEE TITLE 1 CHAR 16 0 NULL 10 1 NULL NULL SYSTEM EMPLOYEE SALARY 3 NUMBER 8 0 2 10 1 NULL DROP TABLE t1; CREATE SERVER 'oracle' FOREIGN DATA WRAPPER 'oracle.jdbc.driver.OracleDriver' OPTIONS ( @@ -52,7 +59,7 @@ Note 1105 EMPLOYEE: 1 affected rows SELECT * FROM t1; ID NAME TITLE SALARY 4567 Trump Engineer 12560.50 -6214 Clinton Retired 0.00 +6214 Clinton Retired NULL DELETE FROM t1 WHERE id = 6214; Warnings: Note 1105 EMPLOYEE: 1 affected rows @@ -63,8 +70,7 @@ DROP TABLE t1; SELECT * FROM t2 WHERE command = 'drop table employee'; command number message drop table employee 0 Affected rows +Warnings: +Warning 1105 Affected rows DROP TABLE t2; DROP SERVER 'oracle'; -SET GLOBAL connect_jvm_path=NULL; -SET GLOBAL connect_class_path=NULL; -SET GLOBAL time_zone = SYSTEM; diff --git a/storage/connect/mysql-test/connect/r/jdbc_postgresql.result b/storage/connect/mysql-test/connect/r/jdbc_postgresql.result index 7969672dd66..bec1dc8725b 100644 --- a/storage/connect/mysql-test/connect/r/jdbc_postgresql.result +++ b/storage/connect/mysql-test/connect/r/jdbc_postgresql.result @@ -1,4 +1,4 @@ -SET GLOBAL connect_class_path='C:/MariaDB-10.2/MariaDB/storage/connect/mysql-test/connect/std_data/JavaWrappers.jar;C:/Jconnectors/postgresql-42.2.1.jar'; +SET GLOBAL connect_class_path='C:/MariaDB-10.0/MariaDB/storage/connect/mysql-test/connect/std_data/JavaWrappers.jar;C:/Jconnectors/postgresql-42.2.1.jar'; CREATE TABLE t2 ( command varchar(128) not null, number int(5) not null flag=1, @@ -9,12 +9,18 @@ OPTION_LIST='Execsrc=1'; SELECT * FROM t2 WHERE command='drop table employee'; command number message drop table employee 0 Execute: org.postgresql.util.PSQLException: ERREUR: la table « employee » n'existe pas +Warnings: +Warning 1105 Execute: org.postgresql.util.PSQLException: ERREUR: la table « employee » n'existe pas SELECT * FROM t2 WHERE command = 'create table employee (id int not null, name varchar(32), title char(16), salary decimal(8,2))'; command number message create table employee (id int not null, name varchar(32), title char(16), salary decimal(8,2)) 0 Affected rows +Warnings: +Warning 1105 Affected rows SELECT * FROM t2 WHERE command = "insert into employee values(4567,'Johnson', 'Engineer', 12560.50)"; command number message insert into employee values(4567,'Johnson', 'Engineer', 12560.50) 1 Affected rows +Warnings: +Warning 1105 Affected rows CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC CATFUNC=tables CONNECTION='jdbc:postgresql://localhost/test?user=postgres&password=tinono' OPTION_LIST='Tabtype=TABLE,Maxres=10'; @@ -63,4 +69,6 @@ DROP SERVER 'postgresql'; SELECT * FROM t2 WHERE command='drop table employee'; command number message drop table employee 0 Affected rows +Warnings: +Warning 1105 Affected rows DROP TABLE t2; diff --git a/storage/connect/mysql-test/connect/r/part_table.result b/storage/connect/mysql-test/connect/r/part_table.result index f3a556ae784..ee17a1d32b9 100644 --- a/storage/connect/mysql-test/connect/r/part_table.result +++ b/storage/connect/mysql-test/connect/r/part_table.result @@ -23,7 +23,7 @@ id msg CREATE TABLE xt3 ( id INT KEY NOT NULL, msg VARCHAR(32)) -ENGINE=CONNECT TABLE_TYPE=CSV AVG_ROW_LENGTH=10; +ENGINE=CONNECT TABLE_TYPE=CSV AVG_ROW_LENGTH=6; Warnings: Warning 1105 No file name. Table will use xt3.csv INSERT INTO xt3 VALUES(60,'sixty'),(81,'eighty one'),(72,'seventy two'); @@ -92,7 +92,7 @@ id msg EXPLAIN PARTITIONS SELECT * FROM t1 WHERE id = 81; id select_type table partitions type possible_keys key key_len ref rows Extra -1 SIMPLE t1 3 ALL NULL NULL NULL NULL 4 Using where +1 SIMPLE t1 3 ALL NULL NULL NULL NULL 6 Using where DELETE FROM t1; Warnings: Note 1105 xt1: 4 affected rows diff --git a/storage/connect/mysql-test/connect/t/part_table.test b/storage/connect/mysql-test/connect/t/part_table.test index 5edd5766bd6..0fb2a11f0f9 100644 --- a/storage/connect/mysql-test/connect/t/part_table.test +++ b/storage/connect/mysql-test/connect/t/part_table.test @@ -22,7 +22,7 @@ SELECT * FROM xt2; CREATE TABLE xt3 ( id INT KEY NOT NULL, msg VARCHAR(32)) -ENGINE=CONNECT TABLE_TYPE=CSV AVG_ROW_LENGTH=10; +ENGINE=CONNECT TABLE_TYPE=CSV AVG_ROW_LENGTH=6; INSERT INTO xt3 VALUES(60,'sixty'),(81,'eighty one'),(72,'seventy two'); SELECT * FROM xt3; diff --git a/storage/connect/plugutil.cpp b/storage/connect/plugutil.cpp index 887527e38ab..048f00be75f 100644 --- a/storage/connect/plugutil.cpp +++ b/storage/connect/plugutil.cpp @@ -514,27 +514,31 @@ void FreeSarea(PGLOBAL g) /* Here there should be some verification done such as validity of */ /* the address and size not larger than memory size. */ /***********************************************************************/ -BOOL PlugSubSet(PGLOBAL g __attribute__((unused)), void *memp, uint size) +BOOL PlugSubSet(void *memp, uint size) { PPOOLHEADER pph = (PPOOLHEADER)memp; pph->To_Free = (OFFSET)sizeof(POOLHEADER); pph->FreeBlk = size - pph->To_Free; - return FALSE; } /* end of PlugSubSet */ +/***********************************************************************/ +/* Use it to export a function that do throwing. */ +/***********************************************************************/ +void *DoThrow(int n) +{ + throw n; +} /* end of DoThrow */ + /***********************************************************************/ /* Program for sub-allocating one item in a storage area. */ -/* Note: SubAlloc routines of OS/2 are no more used to increase the */ -/* code portability and avoid problems when a grammar compiled under */ -/* one version of OS/2 is used under another version. */ -/* The simple way things are done here is also based on the fact */ -/* that no freeing of suballocated blocks is permitted in Plug. */ +/* The simple way things are done here is based on the fact */ +/* that no freeing of suballocated blocks is permitted in CONNECT. */ /***********************************************************************/ void *PlugSubAlloc(PGLOBAL g, void *memp, size_t size) - { - PPOOLHEADER pph; /* Points on area header. */ +{ + PPOOLHEADER pph; /* Points on area header. */ if (!memp) /*******************************************************************/ @@ -559,8 +563,8 @@ void *PlugSubAlloc(PGLOBAL g, void *memp, size_t size) if (trace(1)) htrc("PlugSubAlloc: %s\n", g->Message); - throw 1234; - } /* endif size OS32 code */ + DoThrow(1234); + } /* endif size OS32 code */ /*********************************************************************/ /* Do the suballocation the simplest way. */ @@ -574,7 +578,7 @@ void *PlugSubAlloc(PGLOBAL g, void *memp, size_t size) memp, pph->To_Free, pph->FreeBlk); return (memp); - } /* end of PlugSubAlloc */ +} /* end of PlugSubAlloc */ /***********************************************************************/ /* Program for sub-allocating and copying a string in a storage area. */ diff --git a/storage/connect/reldef.cpp b/storage/connect/reldef.cpp index e4f169575f8..30d8063d1a6 100644 --- a/storage/connect/reldef.cpp +++ b/storage/connect/reldef.cpp @@ -522,8 +522,15 @@ PTABDEF OEMDEF::GetXdef(PGLOBAL g) // Get the function returning an instance of the external DEF class if (!(getdef = (XGETDEF)GetProcAddress((HINSTANCE)Hdll, getname))) { - sprintf(g->Message, MSG(PROCADD_ERROR), GetLastError(), getname); - FreeLibrary((HMODULE)Hdll); + char buf[256]; + DWORD rc = GetLastError(); + + sprintf(g->Message, MSG(PROCADD_ERROR), rc, getname); + FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | + FORMAT_MESSAGE_IGNORE_INSERTS, NULL, rc, 0, + (LPTSTR)buf, sizeof(buf), NULL); + strcat(strcat(g->Message, ": "), buf); + FreeLibrary((HMODULE)Hdll); return NULL; } // endif getdef #else // !__WIN__ diff --git a/storage/connect/tabfmt.h b/storage/connect/tabfmt.h index 396bba568ff..10f0757c60b 100644 --- a/storage/connect/tabfmt.h +++ b/storage/connect/tabfmt.h @@ -13,7 +13,7 @@ typedef class TDBFMT *PTDBFMT; /***********************************************************************/ /* Functions used externally. */ /***********************************************************************/ -PQRYRES CSVColumns(PGLOBAL g, PCSZ dp, PTOS topt, bool info); +DllExport PQRYRES CSVColumns(PGLOBAL g, PCSZ dp, PTOS topt, bool info); /***********************************************************************/ /* CSV table. */ diff --git a/storage/connect/tabjson.cpp b/storage/connect/tabjson.cpp index 9e4f5ab987d..c0d36efcf42 100644 --- a/storage/connect/tabjson.cpp +++ b/storage/connect/tabjson.cpp @@ -1,6 +1,6 @@ /************* tabjson C++ Program Source Code File (.CPP) *************/ -/* PROGRAM NAME: tabjson Version 1.5 */ -/* (C) Copyright to the author Olivier BERTRAND 2014 - 2017 */ +/* PROGRAM NAME: tabjson Version 1.6 */ +/* (C) Copyright to the author Olivier BERTRAND 2014 - 2018 */ /* This program are the JSON class DB execution routines. */ /***********************************************************************/ @@ -173,6 +173,7 @@ JSONDISC::JSONDISC(PGLOBAL g, uint *lg) int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt) { + char filename[_MAX_PATH]; bool mgo = (GetTypeID(topt->type) == TAB_MONGO); PCSZ level = GetStringTableOption(g, topt, "Level", NULL); @@ -209,6 +210,12 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt) return 0; } // endif Fn + if (tdp->Fn) { + // We used the file name relative to recorded datapath + PlugSetPath(filename, tdp->Fn, tdp->GetPath()); + tdp->Fn = PlugDup(g, filename); + } // endif Fn + if (trace(1)) htrc("File %s objname=%s pretty=%d lvl=%d\n", tdp->Fn, tdp->Objname, tdp->Pretty, lvl); @@ -299,7 +306,7 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt) memset(G, 0, sizeof(GLOBAL)); G->Sarea_Size = tdp->Lrecl * 10; G->Sarea = PlugSubAlloc(g, NULL, G->Sarea_Size); - PlugSubSet(G, G->Sarea, G->Sarea_Size); + PlugSubSet(G->Sarea, G->Sarea_Size); G->jump_level = 0; tjnp->SetG(G); @@ -342,7 +349,7 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt) strncpy(colname, jpp->GetKey(), 64); fmt[bf] = 0; - if (Find(g, jpp->GetVal(), MY_MIN(lvl, 0))) + if (Find(g, jpp->GetVal(), colname, MY_MIN(lvl, 0))) goto err; } // endfor jpp @@ -385,7 +392,7 @@ err: return 0; } // end of GetColumns -bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, int j) +bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, PCSZ key, int j) { char *p, *pc = colname + strlen(colname); int ars; @@ -413,12 +420,14 @@ bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, int j) job = (PJOB)jsp; for (PJPR jrp = job->GetFirst(); jrp; jrp = jrp->GetNext()) { - if (*jrp->GetKey() != '$') { - strncat(strncat(fmt, sep, 128), jrp->GetKey(), 128); - strncat(strncat(colname, "_", 64), jrp->GetKey(), 64); + PCSZ k = jrp->GetKey(); + + if (*k != '$') { + strncat(strncat(fmt, sep, 128), k, 128); + strncat(strncat(colname, "_", 64), k, 64); } // endif Key - if (Find(g, jrp->GetVal(), j + 1)) + if (Find(g, jrp->GetVal(), k, j + 1)) return true; *p = *pc = 0; @@ -428,13 +437,13 @@ bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, int j) case TYPE_JAR: jar = (PJAR)jsp; - if (all || (tdp->Xcol && !stricmp(tdp->Xcol, colname))) + if (all || (tdp->Xcol && !stricmp(tdp->Xcol, key))) ars = jar->GetSize(false); else ars = MY_MIN(jar->GetSize(false), 1); for (int k = 0; k < ars; k++) { - if (!tdp->Xcol || stricmp(tdp->Xcol, colname)) { + if (!tdp->Xcol || stricmp(tdp->Xcol, key)) { sprintf(buf, "%d", k); if (tdp->Uri) @@ -448,7 +457,7 @@ bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, int j) } else strncat(fmt, (tdp->Uri ? sep : "[*]"), 128); - if (Find(g, jar->GetValue(k), j)) + if (Find(g, jar->GetValue(k), "", j)) return true; *p = *pc = 0; @@ -522,7 +531,9 @@ void JSONDISC::AddColumn(PGLOBAL g) n++; } // endif jcp - pjcp = jcp; + if (jcp) + pjcp = jcp; + } // end of AddColumn @@ -549,7 +560,7 @@ JSONDEF::JSONDEF(void) /***********************************************************************/ /* DefineAM: define specific AM block values. */ /***********************************************************************/ -bool JSONDEF::DefineAM(PGLOBAL g, LPCSTR, int poff) +bool JSONDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff) { Schema = GetStringCatInfo(g, "DBname", Schema); Jmode = (JMODE)GetIntCatInfo("Jmode", MODE_OBJECT); @@ -561,7 +572,8 @@ bool JSONDEF::DefineAM(PGLOBAL g, LPCSTR, int poff) Sep = *GetStringCatInfo(g, "Separator", "."); Accept = GetBoolCatInfo("Accept", false); - if (Uri = GetStringCatInfo(g, "Connect", NULL)) { + // Don't use url as uri when called from REST OEM module + if (stricmp(am, "REST") && (Uri = GetStringCatInfo(g, "Connect", NULL))) { #if defined(JAVA_SUPPORT) || defined(CMGO_SUPPORT) Collname = GetStringCatInfo(g, "Name", (Catfunc & (FNC_TABLE | FNC_COL)) ? NULL : Name); @@ -670,7 +682,7 @@ PTDB JSONDEF::GetTable(PGLOBAL g, MODE m) memset(G, 0, sizeof(GLOBAL)); G->Sarea_Size = Lrecl * 10; G->Sarea = PlugSubAlloc(g, NULL, G->Sarea_Size); - PlugSubSet(G, G->Sarea, G->Sarea_Size); + PlugSubSet(G->Sarea, G->Sarea_Size); G->jump_level = 0; ((TDBJSN*)tdbp)->G = G; } else { @@ -963,7 +975,7 @@ int TDBJSN::ReadDB(PGLOBAL g) return rc; // Recover the memory used for parsing - PlugSubSet(G, G->Sarea, G->Sarea_Size); + PlugSubSet(G->Sarea, G->Sarea_Size); if ((Row = ParseJson(G, To_Line, strlen(To_Line), &Pretty, &Comma))) { Row = FindRow(g); @@ -1079,13 +1091,13 @@ int TDBJSN::MakeTopTree(PGLOBAL g, PJSON jsp) } // end of PrepareWriting /***********************************************************************/ -/* WriteDB: Data Base write routine for DOS access method. */ +/* WriteDB: Data Base write routine for JSON access method. */ /***********************************************************************/ int TDBJSN::WriteDB(PGLOBAL g) { int rc = TDBDOS::WriteDB(g); - PlugSubSet(G, G->Sarea, G->Sarea_Size); + PlugSubSet(G->Sarea, G->Sarea_Size); Row->Clear(); return rc; } // end of WriteDB @@ -2340,7 +2352,7 @@ void TDBJSON::CloseDB(PGLOBAL g) TDBJCL::TDBJCL(PJDEF tdp) : TDBCAT(tdp) { Topt = tdp->GetTopt(); - Db = tdp->Schema; + Db = tdp->Schema; Dsn = tdp->Uri; } // end of TDBJCL constructor diff --git a/storage/connect/tabjson.h b/storage/connect/tabjson.h index 2ff72905e86..8721a2a5ab7 100644 --- a/storage/connect/tabjson.h +++ b/storage/connect/tabjson.h @@ -1,11 +1,11 @@ /*************** tabjson H Declares Source Code File (.H) **************/ /* Name: tabjson.h Version 1.3 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 2014 - 2017 */ +/* (C) Copyright to the author Olivier BERTRAND 2014 - 2018 */ /* */ /* This file contains the JSON classes declares. */ /***********************************************************************/ -#include "osutil.h" +//#include "osutil.h" // Unuseful and bad for OEM #include "block.h" #include "colblk.h" #include "json.h" @@ -16,7 +16,7 @@ typedef class JSONDEF *PJDEF; typedef class TDBJSON *PJTDB; typedef class JSONCOL *PJCOL; class TDBJSN; -PQRYRES JSONColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt, bool info); +DllExport PQRYRES JSONColumns(PGLOBAL, PCSZ, PCSZ, PTOS, bool); /***********************************************************************/ /* The JSON tree node. Can be an Object or an Array. */ @@ -52,7 +52,7 @@ public: // Functions int GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt); - bool Find(PGLOBAL g, PJVAL jvp, int j); + bool Find(PGLOBAL g, PJVAL jvp, PCSZ key, int j); void AddColumn(PGLOBAL g); // Members diff --git a/storage/connect/tabodbc.cpp b/storage/connect/tabodbc.cpp index fddfb0c0420..0fa117c3d2f 100644 --- a/storage/connect/tabodbc.cpp +++ b/storage/connect/tabodbc.cpp @@ -5,7 +5,7 @@ /* */ /* COPYRIGHT: */ /* ---------- */ -/* (C) Copyright to the author Olivier BERTRAND 2000-2017 */ +/* (C) Copyright to the author Olivier BERTRAND 2000-2018 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ @@ -95,23 +95,23 @@ bool ExactInfo(void); /* Constructor. */ /***********************************************************************/ ODBCDEF::ODBCDEF(void) - { +{ Connect = NULL; Catver = 0; UseCnc = false; - } // end of ODBCDEF constructor +} // end of ODBCDEF constructor /***********************************************************************/ /* DefineAM: define specific AM block values from XDB file. */ /***********************************************************************/ bool ODBCDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff) - { +{ Desc = Connect = GetStringCatInfo(g, "Connect", NULL); if (!Connect && !Catfunc) { sprintf(g->Message, "Missing connection for ODBC table %s", Name); return true; - } // endif Connect + } // endif Connect if (EXTDEF::DefineAM(g, am, poff)) return true; @@ -123,13 +123,13 @@ bool ODBCDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff) Qto= GetIntCatInfo("QueryTimeout", DEFAULT_QUERY_TIMEOUT); UseCnc = GetBoolCatInfo("UseDSN", false); return false; - } // end of DefineAM +} // end of DefineAM /***********************************************************************/ /* GetTable: makes a new Table Description Block. */ /***********************************************************************/ PTDB ODBCDEF::GetTable(PGLOBAL g, MODE m) - { +{ PTDB tdbp = NULL; /*********************************************************************/ @@ -158,10 +158,10 @@ PTDB ODBCDEF::GetTable(PGLOBAL g, MODE m) tdbp = new(g) TDBMUL(tdbp); else if (Multiple == 2) strcpy(g->Message, MSG(NO_ODBC_MUL)); - } // endswitch Catfunc + } // endswitch Catfunc return tdbp; - } // end of GetTable +} // end of GetTable /* -------------------------- Class TDBODBC -------------------------- */ @@ -169,7 +169,7 @@ PTDB ODBCDEF::GetTable(PGLOBAL g, MODE m) /* Implementation of the TDBODBC class. */ /***********************************************************************/ TDBODBC::TDBODBC(PODEF tdp) : TDBEXT(tdp) - { +{ Ocp = NULL; Cnp = NULL; @@ -191,19 +191,19 @@ TDBODBC::TDBODBC(PODEF tdp) : TDBEXT(tdp) Ops.UseCnc = false; } // endif tdp - } // end of TDBODBC standard constructor +} // end of TDBODBC standard constructor TDBODBC::TDBODBC(PTDBODBC tdbp) : TDBEXT(tdbp) - { +{ Ocp = tdbp->Ocp; // is that right ? Cnp = tdbp->Cnp; Connect = tdbp->Connect; Ops = tdbp->Ops; - } // end of TDBODBC copy constructor +} // end of TDBODBC copy constructor // Method PTDB TDBODBC::Clone(PTABS t) - { +{ PTDB tp; PODBCCOL cp1, cp2; PGLOBAL g = t->G; // Is this really useful ??? @@ -213,18 +213,18 @@ PTDB TDBODBC::Clone(PTABS t) for (cp1 = (PODBCCOL)Columns; cp1; cp1 = (PODBCCOL)cp1->GetNext()) { cp2 = new(g) ODBCCOL(cp1, tp); // Make a copy NewPointer(t, cp1, cp2); - } // endfor cp1 + } // endfor cp1 return tp; - } // end of CopyOne +} // end of CopyOne /***********************************************************************/ /* Allocate ODBC column description block. */ /***********************************************************************/ PCOL TDBODBC::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n) - { +{ return new(g) ODBCCOL(cdp, this, cprec, n); - } // end of MakeCol +} // end of MakeCol /***********************************************************************/ /* Extract the filename from connect string and return it. */ @@ -232,7 +232,7 @@ PCOL TDBODBC::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n) /* with a place holder to be used by SetFile. */ /***********************************************************************/ PCSZ TDBODBC::GetFile(PGLOBAL g) - { +{ if (Connect) { char *p1, *p2; int i; @@ -263,18 +263,18 @@ PCSZ TDBODBC::GetFile(PGLOBAL g) memcpy(MulConn, Connect, p1 - Connect); MulConn[p1 - Connect] = '\0'; strcat(strcat(MulConn, "%s"), (p2) ? p2 : ";"); - } // endif p1 + } // endif p1 - } // endif Connect + } // endif Connect return (DBQ) ? DBQ : (PSZ)"???"; - } // end of GetFile +} // end of GetFile /***********************************************************************/ /* Set DBQ and get the new file name into the connect string. */ /***********************************************************************/ void TDBODBC::SetFile(PGLOBAL g, PCSZ fn) - { +{ if (MulConn) { int n = strlen(MulConn) + strlen(fn) - 1; @@ -283,20 +283,20 @@ void TDBODBC::SetFile(PGLOBAL g, PCSZ fn) // of having to reallocate it is reduced. BufSize = n + 6; Connect = (char*)PlugSubAlloc(g, NULL, BufSize); - } // endif n + } // endif n // Make the complete connect string sprintf(Connect, MulConn, fn); - } // endif MultConn + } // endif MultConn DBQ = PlugDup(g, fn); - } // end of SetFile +} // end of SetFile /***********************************************************************/ /* MakeInsert: make the Insert statement used with ODBC connection. */ /***********************************************************************/ bool TDBODBC::MakeInsert(PGLOBAL g) - { +{ PCSZ schmp = NULL; char *catp = NULL, buf[NAM_LEN * 3]; int len = 0; @@ -377,7 +377,7 @@ bool TDBODBC::MakeInsert(PGLOBAL g) } else Query->Append(buf); - } // endfor colp + } // endfor colp Query->Append(") VALUES ("); @@ -390,32 +390,32 @@ bool TDBODBC::MakeInsert(PGLOBAL g) Query->RepLast(')'); return oom; - } // end of MakeInsert +} // end of MakeInsert /***********************************************************************/ /* ODBC Bind Parameter function. */ /***********************************************************************/ bool TDBODBC::BindParameters(PGLOBAL g) - { - PODBCCOL colp; +{ + PODBCCOL colp; - for (colp = (PODBCCOL)Columns; colp; colp = (PODBCCOL)colp->Next) { - colp->AllocateBuffers(g, 0); + for (colp = (PODBCCOL)Columns; colp; colp = (PODBCCOL)colp->Next) { + colp->AllocateBuffers(g, 0); - if (Ocp->BindParam(colp)) - return true; + if (Ocp->BindParam(colp)) + return true; - } // endfor colp + } // endfor colp - return false; - } // end of BindParameters + return false; +} // end of BindParameters #if 0 /***********************************************************************/ /* MakeUpdate: make the SQL statement to send to ODBC connection. */ /***********************************************************************/ char *TDBODBC::MakeUpdate(PGLOBAL g) - { +{ char *qc, *stmt = NULL, cmd[8], tab[96], end[1024]; stmt = (char*)PlugSubAlloc(g, NULL, strlen(Qrystr) + 64); @@ -440,60 +440,60 @@ char *TDBODBC::MakeUpdate(PGLOBAL g) strcat(stmt, end); return stmt; - } // end of MakeUpdate +} // end of MakeUpdate /***********************************************************************/ /* MakeDelete: make the SQL statement to send to ODBC connection. */ /***********************************************************************/ char *TDBODBC::MakeDelete(PGLOBAL g) - { - char *qc, *stmt = NULL, cmd[8], from[8], tab[96], end[512]; +{ + char *qc, *stmt = NULL, cmd[8], from[8], tab[96], end[512]; - stmt = (char*)PlugSubAlloc(g, NULL, strlen(Qrystr) + 64); - memset(end, 0, sizeof(end)); + stmt = (char*)PlugSubAlloc(g, NULL, strlen(Qrystr) + 64); + memset(end, 0, sizeof(end)); - if (sscanf(Qrystr, "%s %s `%[^`]`%511c", cmd, from, tab, end) > 2 || - sscanf(Qrystr, "%s %s \"%[^\"]\"%511c", cmd, from, tab, end) > 2) - qc = Ocp->GetQuoteChar(); - else if (sscanf(Qrystr, "%s %s %s%511c", cmd, from, tab, end) > 2) - qc = (Quoted) ? Quote : ""; - else { - strcpy(g->Message, "Cannot use this DELETE command"); - return NULL; - } // endif sscanf + if (sscanf(Qrystr, "%s %s `%[^`]`%511c", cmd, from, tab, end) > 2 || + sscanf(Qrystr, "%s %s \"%[^\"]\"%511c", cmd, from, tab, end) > 2) + qc = Ocp->GetQuoteChar(); + else if (sscanf(Qrystr, "%s %s %s%511c", cmd, from, tab, end) > 2) + qc = (Quoted) ? Quote : ""; + else { + strcpy(g->Message, "Cannot use this DELETE command"); + return NULL; + } // endif sscanf - assert(!stricmp(cmd, "delete") && !stricmp(from, "from")); - strcat(strcat(strcat(strcpy(stmt, "DELETE FROM "), qc), TableName), qc); + assert(!stricmp(cmd, "delete") && !stricmp(from, "from")); + strcat(strcat(strcat(strcpy(stmt, "DELETE FROM "), qc), TableName), qc); - if (*end) { - for (int i = 0; end[i]; i++) - if (end[i] == '`') - end[i] = *qc; + if (*end) { + for (int i = 0; end[i]; i++) + if (end[i] == '`') + end[i] = *qc; - strcat(stmt, end); - } // endif end + strcat(stmt, end); + } // endif end - return stmt; - } // end of MakeDelete + return stmt; +} // end of MakeDelete #endif // 0 /***********************************************************************/ /* ResetSize: call by TDBMUL when calculating size estimate. */ /***********************************************************************/ void TDBODBC::ResetSize(void) - { +{ MaxSize = -1; if (Ocp && Ocp->IsOpen()) Ocp->Close(); - } // end of ResetSize +} // end of ResetSize /***********************************************************************/ /* ODBC Cardinality: returns table size in number of rows. */ /***********************************************************************/ int TDBODBC::Cardinality(PGLOBAL g) - { +{ if (!g) return (Mode == MODE_ANY && !Srcdef) ? 1 : 0; @@ -526,7 +526,7 @@ int TDBODBC::Cardinality(PGLOBAL g) Cardinal = 10; // To make MySQL happy return Cardinal; - } // end of Cardinality +} // end of Cardinality /***********************************************************************/ /* ODBC Access Method opening routine. */ @@ -535,7 +535,7 @@ int TDBODBC::Cardinality(PGLOBAL g) /* join block of next table if it exists or else are discarted. */ /***********************************************************************/ bool TDBODBC::OpenDB(PGLOBAL g) - { +{ bool rc = true; if (trace(1)) @@ -571,7 +571,7 @@ bool TDBODBC::OpenDB(PGLOBAL g) Fpos = 0; Curpos = 1; return false; - } // endif use + } // endif use /*********************************************************************/ /* Open an ODBC connection for this table. */ @@ -593,7 +593,7 @@ bool TDBODBC::OpenDB(PGLOBAL g) Use = USE_OPEN; // Do it now in case we are recursively called /*********************************************************************/ - /* Make the command and allocate whatever is used for getting results. */ + /* Make the command and allocate whatever is used for getting results*/ /*********************************************************************/ if (Mode == MODE_READ || Mode == MODE_READX) { if (Memory > 1 && !Srcdef) { @@ -624,7 +624,7 @@ bool TDBODBC::OpenDB(PGLOBAL g) } else return true; - } // endif Memory + } // endif Memory if (!(rc = MakeSQL(g, false))) { for (PODBCCOL colp = (PODBCCOL)Columns; colp; @@ -635,7 +635,7 @@ bool TDBODBC::OpenDB(PGLOBAL g) rc = (Mode == MODE_READ) ? ((Rows = Ocp->ExecDirectSQL(Query->GetStr(), (PODBCCOL)Columns)) < 0) : false; - } // endif rc + } // endif rc } else if (Mode == MODE_INSERT) { if (!(rc = MakeInsert(g))) { @@ -645,7 +645,7 @@ bool TDBODBC::OpenDB(PGLOBAL g) } else rc = BindParameters(g); - } // endif rc + } // endif rc } else if (Mode == MODE_UPDATE || Mode == MODE_DELETE) { rc = false; // wait for CheckCond before calling MakeCommand(g); @@ -655,30 +655,30 @@ bool TDBODBC::OpenDB(PGLOBAL g) if (rc) { Ocp->Close(); return true; - } // endif rc + } // endif rc /*********************************************************************/ /* Reset statistics values. */ /*********************************************************************/ num_read = num_there = num_eq[0] = num_eq[1] = 0; return false; - } // end of OpenDB +} // end of OpenDB #if 0 /***********************************************************************/ /* GetRecpos: return the position of last read record. */ /***********************************************************************/ int TDBODBC::GetRecpos(void) - { +{ return Fpos; - } // end of GetRecpos +} // end of GetRecpos #endif // 0 /***********************************************************************/ /* SetRecpos: set the position of next read record. */ /***********************************************************************/ bool TDBODBC::SetRecpos(PGLOBAL g, int recpos) - { +{ if (Ocp->m_Full) { Fpos = 0; CurNum = recpos - 1; @@ -696,14 +696,15 @@ bool TDBODBC::SetRecpos(PGLOBAL g, int recpos) } // endif recpos } else { - strcpy(g->Message, "This action requires a scrollable cursor"); + strcpy(g->Message, + "This action requires Memory setting or a scrollable cursor"); return true; } // endif's // Indicate the table position was externally set Placed = true; return false; - } // end of SetRecpos +} // end of SetRecpos /***********************************************************************/ /* Data Base indexed read routine for ODBC access method. */ @@ -721,7 +722,7 @@ bool TDBODBC::ReadKey(PGLOBAL g, OPVAL op, const key_range *kr) Rows = Ocp->ExecDirectSQL((char*)Query->GetStr(), (PODBCCOL)Columns); Mode = MODE_READ; return (Rows < 0); - } // endif key + } // endif key return false; } else { @@ -737,7 +738,7 @@ bool TDBODBC::ReadKey(PGLOBAL g, OPVAL op, const key_range *kr) if ((To_CondFil = hc->CheckCond(g, To_CondFil, Cond))) PlugSubAlloc(g, NULL, strlen(To_CondFil->Body) + 1); - } // endif active_index + } // endif active_index if (To_CondFil) if (Query->Append(" AND ") || Query->Append(To_CondFil->Body)) { @@ -762,7 +763,7 @@ bool TDBODBC::ReadKey(PGLOBAL g, OPVAL op, const key_range *kr) /* VRDNDOS: Data Base read routine for odbc access method. */ /***********************************************************************/ int TDBODBC::ReadDB(PGLOBAL g) - { +{ int rc; if (trace(2)) @@ -784,7 +785,7 @@ int TDBODBC::ReadDB(PGLOBAL g) } else return RC_FX; // Error - } // endif Mode + } // endif Mode /*********************************************************************/ /* Now start the reading process. */ @@ -813,7 +814,7 @@ int TDBODBC::ReadDB(PGLOBAL g) Qrp->Nblin++; Fpos++; // Used for memory and pos - } // endif rc + } // endif rc } // endif Placed @@ -821,13 +822,13 @@ int TDBODBC::ReadDB(PGLOBAL g) htrc(" Read: Rbuf=%d rc=%d\n", Rbuf, rc); return rc; - } // end of ReadDB +} // end of ReadDB /***********************************************************************/ /* Data Base Insert write routine for ODBC access method. */ /***********************************************************************/ int TDBODBC::WriteDB(PGLOBAL g) - { +{ int n = Ocp->ExecuteSQL(); if (n < 0) { @@ -837,13 +838,13 @@ int TDBODBC::WriteDB(PGLOBAL g) AftRows += n; return RC_OK; - } // end of WriteDB +} // end of WriteDB /***********************************************************************/ /* Data Base delete line routine for ODBC access method. */ /***********************************************************************/ int TDBODBC::DeleteDB(PGLOBAL g, int irc) - { +{ if (irc == RC_FX) { if (!Query && MakeCommand(g)) return RC_FX; @@ -863,13 +864,13 @@ int TDBODBC::DeleteDB(PGLOBAL g, int irc) } else return RC_OK; // Ignore - } // end of DeleteDB +} // end of DeleteDB /***********************************************************************/ /* Data Base close routine for ODBC access method. */ /***********************************************************************/ void TDBODBC::CloseDB(PGLOBAL g) - { +{ if (Ocp) Ocp->Close(); @@ -877,7 +878,7 @@ void TDBODBC::CloseDB(PGLOBAL g) if (trace(1)) htrc("ODBC CloseDB: closing %s\n", Name); - } // end of CloseDB +} // end of CloseDB /* --------------------------- ODBCCOL ------------------------------- */ @@ -886,33 +887,33 @@ void TDBODBC::CloseDB(PGLOBAL g) /***********************************************************************/ ODBCCOL::ODBCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am) : EXTCOL(cdp, tdbp, cprec, i, am) - { +{ // Set additional ODBC access method information for column. Slen = 0; StrLen = &Slen; Sqlbuf = NULL; - } // end of ODBCCOL constructor +} // end of ODBCCOL constructor /***********************************************************************/ /* ODBCCOL private constructor. */ /***********************************************************************/ ODBCCOL::ODBCCOL(void) : EXTCOL() - { +{ Slen = 0; StrLen = &Slen; Sqlbuf = NULL; - } // end of ODBCCOL constructor +} // end of ODBCCOL constructor /***********************************************************************/ /* ODBCCOL constructor used for copying columns. */ /* tdbp is the pointer to the new table descriptor. */ /***********************************************************************/ ODBCCOL::ODBCCOL(ODBCCOL *col1, PTDB tdbp) : EXTCOL(col1, tdbp) - { +{ Slen = col1->Slen; StrLen = col1->StrLen; Sqlbuf = col1->Sqlbuf; - } // end of ODBCCOL copy constructor +} // end of ODBCCOL copy constructor /***********************************************************************/ /* ReadColumn: when SQLFetch is used there is nothing to do as the */ @@ -920,7 +921,7 @@ ODBCCOL::ODBCCOL(ODBCCOL *col1, PTDB tdbp) : EXTCOL(col1, tdbp) /* when calculating MaxSize (Bufp is NULL even when Rows is not). */ /***********************************************************************/ void ODBCCOL::ReadColumn(PGLOBAL g) - { +{ PTDBODBC tdbp = (PTDBODBC)To_Tdb; int i = tdbp->Fpos - 1, n = tdbp->CurNum; @@ -953,7 +954,7 @@ void ODBCCOL::ReadColumn(PGLOBAL g) else Value->SetValue_pvblk(Blkp, n); - } // endif Bufp + } // endif Bufp if (Buf_Type == TYPE_DATE) { struct tm dbtime; @@ -980,7 +981,7 @@ void ODBCCOL::ReadColumn(PGLOBAL g) htrc("ODBC Column %s: rows=%d buf=%p type=%d value=%s\n", Name, tdbp->Rows, Bufp, Buf_Type, Value->GetCharString(buf)); - } // endif trace + } // endif trace put: if (tdbp->Memory != 2) @@ -997,7 +998,7 @@ void ODBCCOL::ReadColumn(PGLOBAL g) } else Crp->Kdata->SetValue(Value, i); - } // end of ReadColumn +} // end of ReadColumn /***********************************************************************/ /* AllocateBuffers: allocate the extended buffer for SQLExtendedFetch */ @@ -1005,7 +1006,7 @@ void ODBCCOL::ReadColumn(PGLOBAL g) /* for the ending null character. */ /***********************************************************************/ void ODBCCOL::AllocateBuffers(PGLOBAL g, int rows) - { +{ if (Buf_Type == TYPE_DATE) Sqlbuf = (TIMESTAMP_STRUCT*)PlugSubAlloc(g, NULL, sizeof(TIMESTAMP_STRUCT)); @@ -1019,31 +1020,31 @@ void ODBCCOL::AllocateBuffers(PGLOBAL g, int rows) Blkp = AllocValBlock(g, NULL, Buf_Type, rows, GetBuflen(), GetScale(), true, false, false); Bufp = Blkp->GetValPointer(); - } // endelse + } // endelse if (rows > 1) StrLen = (SQLLEN *)PlugSubAlloc(g, NULL, rows * sizeof(SQLLEN)); - } // end of AllocateBuffers +} // end of AllocateBuffers /***********************************************************************/ /* Returns the buffer to use for Fetch or Extended Fetch. */ /***********************************************************************/ void *ODBCCOL::GetBuffer(DWORD rows) - { +{ if (rows && To_Tdb) { assert(rows == (DWORD)((TDBODBC*)To_Tdb)->Rows); return Bufp; } else return (Buf_Type == TYPE_DATE) ? Sqlbuf : Value->GetTo_Val(); - } // end of GetBuffer +} // end of GetBuffer /***********************************************************************/ /* Returns the buffer length to use for Fetch or Extended Fetch. */ /***********************************************************************/ SWORD ODBCCOL::GetBuflen(void) - { +{ SWORD flen; switch (Buf_Type) { @@ -1059,13 +1060,13 @@ SWORD ODBCCOL::GetBuflen(void) } // endswitch Buf_Type return flen; - } // end of GetBuflen +} // end of GetBuflen /***********************************************************************/ /* WriteColumn: make sure the bind buffer is updated. */ /***********************************************************************/ void ODBCCOL::WriteColumn(PGLOBAL g) - { +{ /*********************************************************************/ /* Do convert the column value if necessary. */ /*********************************************************************/ @@ -1095,7 +1096,7 @@ void ODBCCOL::WriteColumn(PGLOBAL g) *StrLen = (Value->IsNull()) ? SQL_NULL_DATA : (IsTypeChar(Buf_Type)) ? SQL_NTS : 0; - } // end of WriteColumn +} // end of WriteColumn /* -------------------------- Class TDBXDBC -------------------------- */ @@ -1119,7 +1120,7 @@ TDBXDBC::TDBXDBC(PTDBXDBC tdbp) : TDBODBC(tdbp) } // end of TDBXDBC copy constructor PTDB TDBXDBC::Clone(PTABS t) - { +{ PTDB tp; PXSRCCOL cp1, cp2; PGLOBAL g = t->G; // Is this really useful ??? @@ -1129,29 +1130,29 @@ PTDB TDBXDBC::Clone(PTABS t) for (cp1 = (PXSRCCOL)Columns; cp1; cp1 = (PXSRCCOL)cp1->GetNext()) { cp2 = new(g) XSRCCOL(cp1, tp); // Make a copy NewPointer(t, cp1, cp2); - } // endfor cp1 + } // endfor cp1 return tp; - } // end of CopyOne +} // end of CopyOne /***********************************************************************/ /* Allocate XSRC column description block. */ /***********************************************************************/ PCOL TDBXDBC::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n) - { +{ PXSRCCOL colp = new(g) XSRCCOL(cdp, this, cprec, n); if (!colp->Flag) Cmdcol = colp->GetName(); return colp; - } // end of MakeCol +} // end of MakeCol /***********************************************************************/ /* MakeCMD: make the SQL statement to send to ODBC connection. */ /***********************************************************************/ PCMD TDBXDBC::MakeCMD(PGLOBAL g) - { +{ PCMD xcmd = NULL; if (To_CondFil) { @@ -1171,14 +1172,14 @@ PCMD TDBXDBC::MakeCMD(PGLOBAL g) xcmd = new(g) CMD(g, Srcdef); return xcmd; - } // end of MakeCMD +} // end of MakeCMD #if 0 /***********************************************************************/ /* ODBC Bind Parameter function. */ /***********************************************************************/ bool TDBXDBC::BindParameters(PGLOBAL g) - { +{ PODBCCOL colp; for (colp = (PODBCCOL)Columns; colp; colp = (PODBCCOL)colp->Next) { @@ -1190,19 +1191,19 @@ bool TDBXDBC::BindParameters(PGLOBAL g) } // endfor colp return false; - } // end of BindParameters +} // end of BindParameters #endif // 0 /***********************************************************************/ /* XDBC GetMaxSize: returns table size (not always one row). */ /***********************************************************************/ int TDBXDBC::GetMaxSize(PGLOBAL g) - { +{ if (MaxSize < 0) MaxSize = 10; // Just a guess return MaxSize; - } // end of GetMaxSize +} // end of GetMaxSize /***********************************************************************/ /* ODBC Access Method opening routine. */ @@ -1211,7 +1212,7 @@ int TDBXDBC::GetMaxSize(PGLOBAL g) /* join block of next table if it exists or else are discarted. */ /***********************************************************************/ bool TDBXDBC::OpenDB(PGLOBAL g) - { +{ bool rc = false; if (trace(1)) @@ -1221,7 +1222,7 @@ bool TDBXDBC::OpenDB(PGLOBAL g) if (Use == USE_OPEN) { strcpy(g->Message, "Multiple execution is not allowed"); return true; - } // endif use + } // endif use /*********************************************************************/ /* Open an ODBC connection for this table. */ @@ -1243,7 +1244,7 @@ bool TDBXDBC::OpenDB(PGLOBAL g) if (Mode != MODE_READ && Mode != MODE_READX) { strcpy(g->Message, "No INSERT/DELETE/UPDATE of XDBC tables"); return true; - } // endif Mode + } // endif Mode /*********************************************************************/ /* Get the command to execute. */ @@ -1256,13 +1257,13 @@ bool TDBXDBC::OpenDB(PGLOBAL g) Rows = 1; return false; - } // end of OpenDB +} // end of OpenDB /***********************************************************************/ /* ReadDB: Data Base read routine for xdbc access method. */ /***********************************************************************/ int TDBXDBC::ReadDB(PGLOBAL g) - { +{ if (Cmdlist) { if (!Query) Query = new(g)STRING(g, 0, Cmdlist->Cmd); @@ -1280,25 +1281,25 @@ int TDBXDBC::ReadDB(PGLOBAL g) return RC_EF; } // endif Cmdlist - } // end of ReadDB +} // end of ReadDB /***********************************************************************/ -/* Data Base delete line routine for ODBC access method. */ +/* Data Base write line routine for XDBC access method. */ /***********************************************************************/ int TDBXDBC::WriteDB(PGLOBAL g) - { +{ strcpy(g->Message, "Execsrc tables are read only"); return RC_FX; - } // end of DeleteDB +} // end of DeleteDB /***********************************************************************/ -/* Data Base delete line routine for ODBC access method. */ +/* Data Base delete line routine for XDBC access method. */ /***********************************************************************/ int TDBXDBC::DeleteDB(PGLOBAL g, int irc) - { +{ strcpy(g->Message, MSG(NO_ODBC_DELETE)); return RC_FX; - } // end of DeleteDB +} // end of DeleteDB /* --------------------------- XSRCCOL ------------------------------- */ @@ -1307,25 +1308,25 @@ int TDBXDBC::DeleteDB(PGLOBAL g, int irc) /***********************************************************************/ XSRCCOL::XSRCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am) : ODBCCOL(cdp, tdbp, cprec, i, am) - { +{ // Set additional ODBC access method information for column. Flag = cdp->GetOffset(); - } // end of XSRCCOL constructor +} // end of XSRCCOL constructor /***********************************************************************/ /* XSRCCOL constructor used for copying columns. */ /* tdbp is the pointer to the new table descriptor. */ /***********************************************************************/ XSRCCOL::XSRCCOL(XSRCCOL *col1, PTDB tdbp) : ODBCCOL(col1, tdbp) - { +{ Flag = col1->Flag; - } // end of XSRCCOL copy constructor +} // end of XSRCCOL copy constructor /***********************************************************************/ /* ReadColumn: set column value according to Flag. */ /***********************************************************************/ void XSRCCOL::ReadColumn(PGLOBAL g) - { +{ PTDBXDBC tdbp = (PTDBXDBC)To_Tdb; switch (Flag) { @@ -1335,15 +1336,15 @@ void XSRCCOL::ReadColumn(PGLOBAL g) default: Value->SetValue_psz("Invalid Flag"); break; } // endswitch Flag - } // end of ReadColumn +} // end of ReadColumn /***********************************************************************/ /* WriteColumn: Should never be called. */ /***********************************************************************/ void XSRCCOL::WriteColumn(PGLOBAL g) - { +{ // Should never be called - } // end of WriteColumn +} // end of WriteColumn /* ---------------------------TDBDRV class --------------------------- */ @@ -1351,9 +1352,9 @@ void XSRCCOL::WriteColumn(PGLOBAL g) /* GetResult: Get the list of ODBC drivers. */ /***********************************************************************/ PQRYRES TDBDRV::GetResult(PGLOBAL g) - { +{ return ODBCDrivers(g, Maxres, false); - } // end of GetResult +} // end of GetResult /* ---------------------------TDBSRC class --------------------------- */ @@ -1361,9 +1362,9 @@ PQRYRES TDBDRV::GetResult(PGLOBAL g) /* GetResult: Get the list of ODBC data sources. */ /***********************************************************************/ PQRYRES TDBSRC::GetResult(PGLOBAL g) - { +{ return ODBCDataSources(g, Maxres, false); - } // end of GetResult +} // end of GetResult /* ---------------------------TDBOTB class --------------------------- */ @@ -1371,7 +1372,7 @@ PQRYRES TDBSRC::GetResult(PGLOBAL g) /* TDBOTB class constructor. */ /***********************************************************************/ TDBOTB::TDBOTB(PODEF tdp) : TDBDRV(tdp) - { +{ Dsn = tdp->GetConnect(); Schema = tdp->GetTabschema(); Tab = tdp->GetTabname(); @@ -1381,15 +1382,15 @@ TDBOTB::TDBOTB(PODEF tdp) : TDBDRV(tdp) Ops.Cto = tdp->Cto; Ops.Qto = tdp->Qto; Ops.UseCnc = tdp->UseCnc; - } // end of TDBOTB constructor +} // end of TDBOTB constructor /***********************************************************************/ /* GetResult: Get the list of ODBC tables. */ /***********************************************************************/ PQRYRES TDBOTB::GetResult(PGLOBAL g) - { +{ return ODBCTables(g, Dsn, Schema, Tab, Tabtyp, Maxres, false, &Ops); - } // end of GetResult +} // end of GetResult /* ---------------------------TDBOCL class --------------------------- */ @@ -1405,8 +1406,8 @@ TDBOCL::TDBOCL(PODEF tdp) : TDBOTB(tdp) /* GetResult: Get the list of ODBC table columns. */ /***********************************************************************/ PQRYRES TDBOCL::GetResult(PGLOBAL g) - { +{ return ODBCColumns(g, Dsn, Schema, Tab, Colpat, Maxres, false, &Ops); - } // end of GetResult +} // end of GetResult /* ------------------------ End of Tabodbc --------------------------- */ diff --git a/storage/connect/tabxml.cpp b/storage/connect/tabxml.cpp index c96e0844497..d808bd5ecd4 100644 --- a/storage/connect/tabxml.cpp +++ b/storage/connect/tabxml.cpp @@ -163,8 +163,11 @@ PQRYRES XMLColumns(PGLOBAL g, char *db, char *tab, PTOS topt, bool info) return NULL; tdp->Tabname = tab; + tdp->Tabname = (char*)GetStringTableOption(g, topt, "Tabname", tab); + tdp->Rowname = (char*)GetStringTableOption(g, topt, "Rownode", NULL); tdp->Zipped = GetBooleanTableOption(g, topt, "Zipped", false); tdp->Entry = GetStringTableOption(g, topt, "Entry", NULL); + tdp->Skip = GetBooleanTableOption(g, topt, "Skipnull", false); if (!(op = GetStringTableOption(g, topt, "Xmlsup", NULL))) #if defined(__WIN__) @@ -280,7 +283,9 @@ PQRYRES XMLColumns(PGLOBAL g, char *db, char *tab, PTOS topt, bool info) if (!vp->atp) node = vp->nl->GetItem(g, vp->k++, tdp->Usedom ? node : NULL); - strncat(fmt, colname, XLEN(fmt)); + if (!j) + strncat(fmt, colname, XLEN(fmt)); + strncat(fmt, "/", XLEN(fmt)); strncat(xcol->Name, "_", XLEN(xcol->Name)); j++; @@ -302,6 +307,7 @@ PQRYRES XMLColumns(PGLOBAL g, char *db, char *tab, PTOS topt, bool info) case RC_INFO: PushWarning(g, txmp); case RC_OK: + xcol->Cbn = !strlen(buf); break; default: goto err; @@ -327,9 +333,9 @@ PQRYRES XMLColumns(PGLOBAL g, char *db, char *tab, PTOS topt, bool info) xcp->Len = MY_MAX(xcp->Len, xcol->Len); xcp->Scale = MY_MAX(xcp->Scale, xcol->Scale); - xcp->Cbn |= xcol->Cbn; + xcp->Cbn |= (xcol->Cbn || !xcol->Len); xcp->Found = true; - } else { + } else if(xcol->Len || !tdp->Skip) { // New column xcp = new(g) XMCOL(g, xcol, fmt, i); length[0] = MY_MAX(length[0], strlen(xcol->Name)); @@ -344,7 +350,8 @@ PQRYRES XMLColumns(PGLOBAL g, char *db, char *tab, PTOS topt, bool info) n++; } // endif xcp - pxcp = xcp; + if (xcp) + pxcp = xcp; if (vp->atp) vp->atp = vp->atp->GetNext(g); @@ -445,6 +452,7 @@ XMLDEF::XMLDEF(void) Usedom = false; Zipped = false; Mulentries = false; + Skip = false; } // end of XMLDEF constructor /***********************************************************************/ @@ -681,6 +689,14 @@ PTDB TDBXML::Clone(PTABS t) return tp; } // end of Clone +/***********************************************************************/ +/* Must not be in tabxml.h because of OEM tables */ +/***********************************************************************/ +const CHARSET_INFO *TDBXML::data_charset() +{ + return &my_charset_utf8_general_ci; +} // end of data_charset + /***********************************************************************/ /* Allocate XML column description block. */ /***********************************************************************/ @@ -806,127 +822,141 @@ bool TDBXML::Initialize(PGLOBAL g) } // endif Bufdone #if !defined(UNIX) - if (!Root) try { + if (!Root) try { #else - if (!Root) { + if (!Root) { #endif - char tabpath[64], filename[_MAX_PATH]; + char tabpath[64], filename[_MAX_PATH]; - // We used the file name relative to recorded datapath - PlugSetPath(filename, Xfile, GetPath()); + // We used the file name relative to recorded datapath + PlugSetPath(filename, Xfile, GetPath()); - // Load or re-use the table file - rc = LoadTableFile(g, filename); + // Load or re-use the table file + rc = LoadTableFile(g, filename); - if (rc == RC_OK) { - // Get root node - if (!(Root = Docp->GetRoot(g))) { - // This should never happen as load should have failed - strcpy(g->Message, MSG(EMPTY_DOC)); - goto error; - } // endif Root + if (rc == RC_OK) { + // Get root node + if (!(Root = Docp->GetRoot(g))) { + // This should never happen as load should have failed + strcpy(g->Message, MSG(EMPTY_DOC)); + goto error; + } // endif Root - // If tabname is not an Xpath, - // construct one that will find it anywhere - if (!strchr(Tabname, '/')) - strcat(strcpy(tabpath, "//"), Tabname); - else - strcpy(tabpath, Tabname); + // If tabname is not an Xpath, + // construct one that will find it anywhere + if (!strchr(Tabname, '/')) + strcat(strcpy(tabpath, "//"), Tabname); + else + strcpy(tabpath, Tabname); - // Evaluate table xpath - if ((TabNode = Root->SelectSingleNode(g, tabpath))) { - if (TabNode->GetType() != XML_ELEMENT_NODE) { - sprintf(g->Message, MSG(BAD_NODE_TYPE), TabNode->GetType()); - goto error; - } // endif Type + // Evaluate table xpath + if ((TabNode = Root->SelectSingleNode(g, tabpath))) { + if (TabNode->GetType() != XML_ELEMENT_NODE) { + sprintf(g->Message, MSG(BAD_NODE_TYPE), TabNode->GetType()); + goto error; + } // endif Type - } else if (Mode == MODE_INSERT && XmlDB) { - // We are adding a new table to a multi-table file + } else if (Mode == MODE_INSERT && XmlDB) { + // We are adding a new table to a multi-table file - // If XmlDB is not an Xpath, - // construct one that will find it anywhere - if (!strchr(XmlDB, '/')) - strcat(strcpy(tabpath, "//"), XmlDB); - else - strcpy(tabpath, XmlDB); + // If XmlDB is not an Xpath, + // construct one that will find it anywhere + if (!strchr(XmlDB, '/')) + strcat(strcpy(tabpath, "//"), XmlDB); + else + strcpy(tabpath, XmlDB); - if (!(DBnode = Root->SelectSingleNode(g, tabpath))) { - // DB node does not exist yet; we cannot create it - // because we don't know where it should be placed - sprintf(g->Message, MSG(MISSING_NODE), XmlDB, Xfile); - goto error; - } // endif DBnode + if (!(DBnode = Root->SelectSingleNode(g, tabpath))) { + // DB node does not exist yet; we cannot create it + // because we don't know where it should be placed + sprintf(g->Message, MSG(MISSING_NODE), XmlDB, Xfile); + goto error; + } // endif DBnode - if (!(TabNode = DBnode->AddChildNode(g, Tabname))) { - sprintf(g->Message, MSG(FAIL_ADD_NODE), Tabname); - goto error; - } // endif TabNode + if (!(TabNode = DBnode->AddChildNode(g, Tabname))) { + sprintf(g->Message, MSG(FAIL_ADD_NODE), Tabname); + goto error; + } // endif TabNode - DBnode->AddText(g, "\n"); - } else - TabNode = Root; // Try this ? + DBnode->AddText(g, "\n"); + } else { + TabNode = Root; // Try this ? + Tabname = TabNode->GetName(g); + } // endif's - } else if (rc == RC_NF || rc == RC_EF) { - // The XML file does not exist or is void - if (Mode == MODE_INSERT) { - // New Document - char buf[64]; + } else if (rc == RC_NF || rc == RC_EF) { + // The XML file does not exist or is void + if (Mode == MODE_INSERT) { + // New Document + char buf[64]; - // Create the XML node - if (Docp->NewDoc(g, "1.0")) { - strcpy(g->Message, MSG(NEW_DOC_FAILED)); - goto error; - } // endif NewDoc + // Create the XML node + if (Docp->NewDoc(g, "1.0")) { + strcpy(g->Message, MSG(NEW_DOC_FAILED)); + goto error; + } // endif NewDoc - // Now we can link the Xblock - To_Xb = Docp->LinkXblock(g, Mode, rc, filename); + // Now we can link the Xblock + To_Xb = Docp->LinkXblock(g, Mode, rc, filename); - // Add a CONNECT comment node - strcpy(buf, " Created by the MariaDB CONNECT Storage Engine"); - Docp->AddComment(g, buf); + // Add a CONNECT comment node + strcpy(buf, " Created by the MariaDB CONNECT Storage Engine"); + Docp->AddComment(g, buf); - if (XmlDB) { - // This is a multi-table file - DBnode = Root = Docp->NewRoot(g, XmlDB); - DBnode->AddText(g, "\n"); - TabNode = DBnode->AddChildNode(g, Tabname); - DBnode->AddText(g, "\n"); - } else - TabNode = Root = Docp->NewRoot(g, Tabname); + if (XmlDB) { + // This is a multi-table file + DBnode = Root = Docp->NewRoot(g, XmlDB); + DBnode->AddText(g, "\n"); + TabNode = DBnode->AddChildNode(g, Tabname); + DBnode->AddText(g, "\n"); + } else + TabNode = Root = Docp->NewRoot(g, Tabname); - if (TabNode == NULL || Root == NULL) { - strcpy(g->Message, MSG(XML_INIT_ERROR)); - goto error; - } else if (SetTabNode(g)) - goto error; + if (TabNode == NULL || Root == NULL) { + strcpy(g->Message, MSG(XML_INIT_ERROR)); + goto error; + } else if (SetTabNode(g)) + goto error; - } else { - sprintf(g->Message, MSG(FILE_UNFOUND), Xfile); + } else { + sprintf(g->Message, MSG(FILE_UNFOUND), Xfile); - if (Mode == MODE_READ) { - PushWarning(g, this); - Void = true; - } // endif Mode + if (Mode == MODE_READ) { + PushWarning(g, this); + Void = true; + } // endif Mode - goto error; - } // endif Mode + goto error; + } // endif Mode - } else if (rc == RC_INFO) { - // Loading failed - sprintf(g->Message, MSG(LOADING_FAILED), Xfile); - goto error; - } else // (rc == RC_FX) - goto error; + } else if (rc == RC_INFO) { + // Loading failed + sprintf(g->Message, MSG(LOADING_FAILED), Xfile); + goto error; + } else // (rc == RC_FX) + goto error; - // Get row node list - if (Rowname) - Nlist = TabNode->SelectNodes(g, Rowname); - else - Nlist = TabNode->GetChildElements(g); + if (!Rowname) { + for (PXNODE n = TabNode->GetChild(g); n; n = n->GetNext(g)) + if (n->GetType() == XML_ELEMENT_NODE) { + Rowname = n->GetName(g); + break; + } // endif Type - Docp->SetNofree(true); // For libxml2 + if (!Rowname) + Rowname = TabNode->GetName(g); + } // endif Rowname + + // Get row node list + if (strcmp(Rowname, Tabname)) + Nlist = TabNode->SelectNodes(g, Rowname); + else + Nrow = 1; + + + Docp->SetNofree(true); // For libxml2 #if defined(__WIN__) - } catch(_com_error e) { + } catch (_com_error e) { // We come here if a DOM command threw an error char buf[128]; @@ -1213,10 +1243,14 @@ int TDBXML::ReadDB(PGLOBAL g) htrc("TDBXML ReadDB: Irow=%d RowNode=%p\n", Irow, RowNode); // Get the new row node - if ((RowNode = Nlist->GetItem(g, Irow, RowNode)) == NULL) { - sprintf(g->Message, MSG(MISSING_ROWNODE), Irow); - return RC_FX; - } // endif RowNode + if (Nlist) { + if ((RowNode = Nlist->GetItem(g, Irow, RowNode)) == NULL) { + sprintf(g->Message, MSG(MISSING_ROWNODE), Irow); + return RC_FX; + } // endif RowNode + + } else + RowNode = TabNode; if (Colname && Coltype == 2) Clist = RowNode->SelectNodes(g, Colname, Clist); @@ -1271,6 +1305,7 @@ int TDBXML::WriteDB(PGLOBAL g) /***********************************************************************/ int TDBXML::DeleteDB(PGLOBAL g, int irc) { + // TODO: Handle null Nlist if (irc == RC_FX) { // Delete all rows for (Irow = 0; Irow < Nrow; Irow++) @@ -2209,8 +2244,9 @@ void XPOSCOL::WriteColumn(PGLOBAL g) TDBXCT::TDBXCT(PXMLDEF tdp) : TDBCAT(tdp) { Topt = tdp->GetTopt(); - Db = (char*)tdp->GetDB(); - Tabn = tdp->Tabname; + //Db = (char*)tdp->GetDB(); + Db = (char*)tdp->Schema; + Tabn = tdp->Tabname; } // end of TDBXCT constructor /***********************************************************************/ diff --git a/storage/connect/tabxml.h b/storage/connect/tabxml.h index f55b7d98de7..fb3913f08ea 100644 --- a/storage/connect/tabxml.h +++ b/storage/connect/tabxml.h @@ -9,6 +9,8 @@ typedef class XMLDEF *PXMLDEF; typedef class TDBXML *PTDBXML; typedef class XMLCOL *PXMLCOL; +DllExport PQRYRES XMLColumns(PGLOBAL, char *, char *, PTOS, bool); + /* --------------------------- XML classes --------------------------- */ /***********************************************************************/ @@ -50,6 +52,7 @@ class DllExport XMLDEF : public TABDEF { /* Logical table description */ bool Usedom; /* True: DOM, False: libxml2 */ bool Zipped; /* True: Zipped XML file(s) */ bool Mulentries; /* True: multiple entries in zip file*/ + bool Skip; /* Skip null columns */ }; // end of XMLDEF #if defined(INCLUDE_TDBXML) @@ -100,8 +103,7 @@ class DllExport TDBXML : public TDBASE { virtual int DeleteDB(PGLOBAL g, int irc); virtual void CloseDB(PGLOBAL g); virtual int CheckWrite(PGLOBAL g) {Checked = true; return 0;} - virtual const CHARSET_INFO *data_charset() - {return &my_charset_utf8_general_ci;} + virtual const CHARSET_INFO *data_charset(); protected: // Members diff --git a/storage/connect/user_connect.cc b/storage/connect/user_connect.cc index e2d3b664aeb..a2a8faf9b38 100644 --- a/storage/connect/user_connect.cc +++ b/storage/connect/user_connect.cc @@ -107,7 +107,7 @@ bool user_connect::user_init() g= PlugInit(NULL, worksize); // Check whether the initialization is complete - if (!g || !g->Sarea || PlugSubSet(g, g->Sarea, g->Sarea_Size) + if (!g || !g->Sarea || PlugSubSet(g->Sarea, g->Sarea_Size) || !(dup= PlgMakeUser(g))) { if (g) printf("%s\n", g->Message); @@ -172,7 +172,7 @@ bool user_connect::CheckCleanup(bool force) } // endif worksize - PlugSubSet(g, g->Sarea, g->Sarea_Size); + PlugSubSet(g->Sarea, g->Sarea_Size); g->Xchk = NULL; g->Createas = 0; g->Alchecked = 0; From 21f9037186f8a4bfb45486b9c28dd146e9df0e00 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vicen=C8=9Biu=20Ciorbaru?= Date: Thu, 24 Jan 2019 00:58:20 +0200 Subject: [PATCH 23/37] MDEV-18360 Prevent set_max_open_files from allocating too many files If the rlimit.rlim_cur value returned by getrlimit is not the RLIM_INFINITY magic constant, but a *very* large number, we can allocate too many open files. Restrict set_max_open_files to only return at most max_file_limit, as passed via its parameter. --- mysys/my_file.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/mysys/my_file.c b/mysys/my_file.c index 8d01285a94b..b3aef8494cb 100644 --- a/mysys/my_file.c +++ b/mysys/my_file.c @@ -52,10 +52,9 @@ static uint set_max_open_files(uint max_file_limit) DBUG_PRINT("info", ("rlim_cur: %u rlim_max: %u", (uint) rlimit.rlim_cur, (uint) rlimit.rlim_max)); - if ((ulonglong) rlimit.rlim_cur == (ulonglong) RLIM_INFINITY) - rlimit.rlim_cur = max_file_limit; - if (rlimit.rlim_cur >= max_file_limit) - DBUG_RETURN(rlimit.rlim_cur); /* purecov: inspected */ + if ((ulonglong) rlimit.rlim_cur == (ulonglong) RLIM_INFINITY || + rlimit.rlim_cur >= max_file_limit) + DBUG_RETURN(max_file_limit); rlimit.rlim_cur= rlimit.rlim_max= max_file_limit; if (setrlimit(RLIMIT_NOFILE, &rlimit)) max_file_limit= old_cur; /* Use original value */ From 2175bfce3e9da8332f10ab0e0286dc93915533a2 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Fri, 25 Jan 2019 23:12:35 +0100 Subject: [PATCH 24/37] Crude "auto-load-data-local-infile" mode Disable LOAD DATA LOCAL INFILE suport by default and auto-enable it for the duration of one query, if the query string starts with the word "load". In all other cases the application should enable LOAD DATA LOCAL INFILE support explicitly. --- CMakeLists.txt | 10 +++++-- client/mysqltest.cc | 2 -- .../build_configurations/mysql_release.cmake | 1 - config.h.cmake | 6 +++- include/mysql.h | 2 +- include/mysql.h.pp | 2 +- mysql-test/r/mysql.result | 26 ++++++++++++++++ mysql-test/t/mysql.test | 22 ++++++++++++++ sql-common/client.c | 30 +++++++++++++++++-- 9 files changed, 90 insertions(+), 11 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index b15512683fa..bc4c4e4f103 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -256,9 +256,15 @@ IF(HAVE_GGDB3) SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -ggdb3") ENDIF() -OPTION(ENABLED_LOCAL_INFILE - "If we should should enable LOAD DATA LOCAL by default" ${IF_WIN}) +SET(ENABLED_LOCAL_INFILE "AUTO" CACHE STRING "If we should should enable LOAD DATA LOCAL by default (OFF/ON/AUTO)") MARK_AS_ADVANCED(ENABLED_LOCAL_INFILE) +IF (ENABLED_LOCAL_INFILE MATCHES "^(0|FALSE)$") + SET(ENABLED_LOCAL_INFILE OFF) +ELSEIF(ENABLED_LOCAL_INFILE MATCHES "^(1|TRUE)$") + SET(ENABLED_LOCAL_INFILE ON) +ELSEIF (NOT ENABLED_LOCAL_INFILE MATCHES "^(ON|OFF|AUTO)$") + MESSAGE(FATAL_ERROR "ENABLED_LOCAL_INFILE must be one of OFF, ON, AUTO") +ENDIF() OPTION(WITH_FAST_MUTEXES "Compile with fast mutexes" OFF) MARK_AS_ADVANCED(WITH_FAST_MUTEXES) diff --git a/client/mysqltest.cc b/client/mysqltest.cc index 2b7401878ef..842bde3b99e 100644 --- a/client/mysqltest.cc +++ b/client/mysqltest.cc @@ -6045,7 +6045,6 @@ void do_connect(struct st_command *command) #endif if (opt_compress || con_compress) mysql_options(con_slot->mysql, MYSQL_OPT_COMPRESS, NullS); - mysql_options(con_slot->mysql, MYSQL_OPT_LOCAL_INFILE, 0); mysql_options(con_slot->mysql, MYSQL_SET_CHARSET_NAME, charset_info->csname); if (opt_charsets_dir) @@ -9110,7 +9109,6 @@ int main(int argc, char **argv) (void *) &opt_connect_timeout); if (opt_compress) mysql_options(con->mysql,MYSQL_OPT_COMPRESS,NullS); - mysql_options(con->mysql, MYSQL_OPT_LOCAL_INFILE, 0); mysql_options(con->mysql, MYSQL_SET_CHARSET_NAME, charset_info->csname); if (opt_charsets_dir) diff --git a/cmake/build_configurations/mysql_release.cmake b/cmake/build_configurations/mysql_release.cmake index 39ad0e68d35..a464631e9a2 100644 --- a/cmake/build_configurations/mysql_release.cmake +++ b/cmake/build_configurations/mysql_release.cmake @@ -94,7 +94,6 @@ IF(FEATURE_SET) ENDFOREACH() ENDIF() -OPTION(ENABLED_LOCAL_INFILE "" ON) IF(RPM) SET(WITH_SSL system CACHE STRING "") SET(WITH_ZLIB system CACHE STRING "") diff --git a/config.h.cmake b/config.h.cmake index 99a2ebdd093..271d77f2e5a 100644 --- a/config.h.cmake +++ b/config.h.cmake @@ -530,7 +530,11 @@ /* MySQL features */ -#cmakedefine ENABLED_LOCAL_INFILE 1 +#define LOCAL_INFILE_MODE_OFF 0 +#define LOCAL_INFILE_MODE_ON 1 +#define LOCAL_INFILE_MODE_AUTO 2 +#define ENABLED_LOCAL_INFILE LOCAL_INFILE_MODE_@ENABLED_LOCAL_INFILE@ + #cmakedefine ENABLED_PROFILING 1 #cmakedefine EXTRA_DEBUG 1 #cmakedefine BACKUP_TEST 1 diff --git a/include/mysql.h b/include/mysql.h index 2f205ec6463..1ed6ffe67c8 100644 --- a/include/mysql.h +++ b/include/mysql.h @@ -274,7 +274,7 @@ typedef struct st_mysql /* session-wide random string */ char scramble[SCRAMBLE_LENGTH+1]; - my_bool unused1; + my_bool auto_local_infile; void *unused2, *unused3, *unused4, *unused5; LIST *stmts; /* list of all statements */ diff --git a/include/mysql.h.pp b/include/mysql.h.pp index 4f7407095c9..8bff18d7bb0 100644 --- a/include/mysql.h.pp +++ b/include/mysql.h.pp @@ -341,7 +341,7 @@ typedef struct st_mysql my_bool free_me; my_bool reconnect; char scramble[20 +1]; - my_bool unused1; + my_bool auto_local_infile; void *unused2, *unused3, *unused4, *unused5; LIST *stmts; const struct st_mysql_methods *methods; diff --git a/mysql-test/r/mysql.result b/mysql-test/r/mysql.result index 8a24128daa2..ffa5d020153 100644 --- a/mysql-test/r/mysql.result +++ b/mysql-test/r/mysql.result @@ -587,3 +587,29 @@ a 2 drop table "a1\""b1"; set sql_mode=default; +create table t1 (a text); +select count(*) from t1; +count(*) +41 +truncate table t1; +select count(*) from t1; +count(*) +41 +truncate table t1; +select count(*) from t1; +count(*) +0 +truncate table t1; +select count(*) from t1; +count(*) +0 +truncate table t1; +select count(*) from t1; +count(*) +41 +truncate table t1; +select count(*) from t1; +count(*) +0 +truncate table t1; +drop table t1; diff --git a/mysql-test/t/mysql.test b/mysql-test/t/mysql.test index 87756768c7f..f526a3fea91 100644 --- a/mysql-test/t/mysql.test +++ b/mysql-test/t/mysql.test @@ -656,3 +656,25 @@ show create table "a1\""b1"; select * from "a1\""b1"; drop table "a1\""b1"; set sql_mode=default; + +# +# mysql --local-infile +# +--let $ldli = load data local infile '$MYSQLTEST_VARDIR/tmp/bug.sql' into table test.t1; +create table t1 (a text); +--exec $MYSQL -e "$ldli" +select count(*) from t1; truncate table t1; +--exec $MYSQL --enable-local-infile -e "$ldli" +select count(*) from t1; truncate table t1; +--error 1 +--exec $MYSQL --disable-local-infile -e "$ldli" +select count(*) from t1; truncate table t1; +--error 1 +--exec $MYSQL -e "/*q*/$ldli" +select count(*) from t1; truncate table t1; +--exec $MYSQL --enable-local-infile -e "/*q*/$ldli" +select count(*) from t1; truncate table t1; +--error 1 +--exec $MYSQL --disable-local-infile -e "/*q*/$ldli" +select count(*) from t1; truncate table t1; +drop table t1; diff --git a/sql-common/client.c b/sql-common/client.c index 952b6a199ee..bec778e7d51 100644 --- a/sql-common/client.c +++ b/sql-common/client.c @@ -115,6 +115,12 @@ my_bool net_flush(NET *net); #include #include +typedef enum { + ALWAYS_ACCEPT, /* heuristics is disabled, use CLIENT_LOCAL_FILES */ + WAIT_FOR_QUERY, /* heuristics is enabled, not sending files */ + ACCEPT_FILE_REQUEST /* heuristics is enabled, ready to send a file */ +} auto_local_infile_state; + #define native_password_plugin_name "mysql_native_password" #define old_password_plugin_name "mysql_old_password" @@ -1765,8 +1771,10 @@ mysql_init(MYSQL *mysql) --enable-local-infile */ -#if defined(ENABLED_LOCAL_INFILE) && !defined(MYSQL_SERVER) +#if ENABLED_LOCAL_INFILE && !defined(MYSQL_SERVER) mysql->options.client_flag|= CLIENT_LOCAL_FILES; + mysql->auto_local_infile= ENABLED_LOCAL_INFILE == LOCAL_INFILE_MODE_AUTO + ? WAIT_FOR_QUERY : ALWAYS_ACCEPT; #endif #ifdef HAVE_SMEM @@ -3951,8 +3959,14 @@ static my_bool cli_read_query_result(MYSQL *mysql) ulong field_count; MYSQL_DATA *fields; ulong length; +#ifdef MYSQL_CLIENT + my_bool can_local_infile= mysql->auto_local_infile != WAIT_FOR_QUERY; +#endif DBUG_ENTER("cli_read_query_result"); + if (mysql->auto_local_infile == ACCEPT_FILE_REQUEST) + mysql->auto_local_infile= WAIT_FOR_QUERY; + if ((length = cli_safe_read(mysql)) == packet_error) DBUG_RETURN(1); free_old_query(mysql); /* Free old result */ @@ -3989,7 +4003,8 @@ get_info: { int error; - if (!(mysql->options.client_flag & CLIENT_LOCAL_FILES)) + if (!(mysql->options.client_flag & CLIENT_LOCAL_FILES) || + !can_local_infile) { set_mysql_error(mysql, CR_MALFORMED_PACKET, unknown_sqlstate); DBUG_RETURN(1); @@ -4027,6 +4042,13 @@ int STDCALL mysql_send_query(MYSQL* mysql, const char* query, ulong length) { DBUG_ENTER("mysql_send_query"); + if (mysql->options.client_flag & CLIENT_LOCAL_FILES && + mysql->auto_local_infile == WAIT_FOR_QUERY && + (*query == 'l' || *query == 'L')) + { + if (strncasecmp(query, STRING_WITH_LEN("load")) == 0) + mysql->auto_local_infile= ACCEPT_FILE_REQUEST; + } DBUG_RETURN(simple_command(mysql, COM_QUERY, (uchar*) query, length, 1)); } @@ -4241,10 +4263,12 @@ mysql_options(MYSQL *mysql,enum mysql_option option, const void *arg) mysql->options.protocol=MYSQL_PROTOCOL_PIPE; /* Force named pipe */ break; case MYSQL_OPT_LOCAL_INFILE: /* Allow LOAD DATA LOCAL ?*/ - if (!arg || test(*(uint*) arg)) + if (!arg || *(uint*) arg) mysql->options.client_flag|= CLIENT_LOCAL_FILES; else mysql->options.client_flag&= ~CLIENT_LOCAL_FILES; + mysql->auto_local_infile= arg && *(uint*)arg == LOCAL_INFILE_MODE_AUTO + ? WAIT_FOR_QUERY : ALWAYS_ACCEPT; break; case MYSQL_INIT_COMMAND: add_init_command(&mysql->options,arg); From 94b68b35f46918fb1e145cad6387c9fcd045c392 Mon Sep 17 00:00:00 2001 From: Oleksandr Byelkin Date: Mon, 28 Jan 2019 15:39:27 +0100 Subject: [PATCH 25/37] Reverting part of da34c7de5dacac85c4dc1f714bcd7edf3b7fe5f9 that was already fixed by MDEV-17531 by Marko --- storage/innobase/row/row0mysql.cc | 3 --- 1 file changed, 3 deletions(-) diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc index 68329658618..f623845f289 100644 --- a/storage/innobase/row/row0mysql.cc +++ b/storage/innobase/row/row0mysql.cc @@ -5129,9 +5129,6 @@ row_rename_table_for_mysql( " = TO_BINARY(:old_table_name);\n" "END;\n" , FALSE, trx); - if (err != DB_SUCCESS) { - goto end; - } } else if (n_constraints_to_drop > 0) { /* Drop some constraints of tmp tables. */ From 724b09d5e726be612be6154a900bd907897d4124 Mon Sep 17 00:00:00 2001 From: Oleksandr Byelkin Date: Mon, 28 Jan 2019 15:42:16 +0100 Subject: [PATCH 26/37] Version fix after merge --- storage/innobase/include/univ.i | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/innobase/include/univ.i b/storage/innobase/include/univ.i index 5027b9cab5e..f1bf416b370 100644 --- a/storage/innobase/include/univ.i +++ b/storage/innobase/include/univ.i @@ -45,7 +45,7 @@ Created 1/20/1994 Heikki Tuuri #define INNODB_VERSION_MAJOR 5 #define INNODB_VERSION_MINOR 6 -#define INNODB_VERSION_BUGFIX 42 +#define INNODB_VERSION_BUGFIX 43 /* The following is the InnoDB version as shown in SELECT plugin_version FROM information_schema.plugins; From 8c2f3e0c16a4b9c2961a474f399b88be5ec330d1 Mon Sep 17 00:00:00 2001 From: Oleksandr Byelkin Date: Mon, 28 Jan 2019 20:17:54 +0100 Subject: [PATCH 27/37] Fix detection of version in tokudb --- storage/tokudb/PerconaFT/portability/toku_instr_mysql.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/tokudb/PerconaFT/portability/toku_instr_mysql.h b/storage/tokudb/PerconaFT/portability/toku_instr_mysql.h index 695624acd6d..beb833a163c 100644 --- a/storage/tokudb/PerconaFT/portability/toku_instr_mysql.h +++ b/storage/tokudb/PerconaFT/portability/toku_instr_mysql.h @@ -16,7 +16,7 @@ #include "mysql/psi/mysql_thread.h" // PSI_mutex #include "mysql/psi/mysql_stage.h" // PSI_stage -#if (MYSQL_VERSION_ID >= 80000) +#if (MYSQL_VERSION_ID >= 80000) && ( MYSQL_VERSION_ID <= 100000) #include "mysql/psi/mysql_cond.h" #include "mysql/psi/mysql_mutex.h" #include "mysql/psi/mysql_rwlock.h" From eff71f39ddc117d09da5465f7ea9fe007ed89009 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Mon, 28 Jan 2019 11:51:12 +0100 Subject: [PATCH 28/37] disable an old test @@open_files_limit now behaves differenly and cannot be used to skip the test anymore. --- mysql-test/disabled.def | 1 + 1 file changed, 1 insertion(+) diff --git a/mysql-test/disabled.def b/mysql-test/disabled.def index c7fa62ff7dd..d4856f0b7aa 100644 --- a/mysql-test/disabled.def +++ b/mysql-test/disabled.def @@ -15,3 +15,4 @@ read_many_rows_innodb : Bug#11748886 2010-11-15 mattiasj report already exist mysql_embedded : Bug#12561297 2011-05-14 Anitha Dependent on PB2 changes - eventum#41836 file_contents : MDEV-6526 these files are not installed anymore lowercase_fs_on : lower_case_table_names=0 is not an error until 10.1 +partition_open_files_limit : open_files_limit check broken by MDEV-18360 From c991939bab7677b0af2ac3fd8c504d858e44e8dd Mon Sep 17 00:00:00 2001 From: Oleksandr Byelkin Date: Tue, 29 Jan 2019 09:34:08 +0100 Subject: [PATCH 29/37] MariaDB detect incorrect table name --- storage/tokudb/mysql-test/tokudb_bugs/r/PS-4979.result | 2 +- storage/tokudb/mysql-test/tokudb_bugs/t/PS-4979.test | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/PS-4979.result b/storage/tokudb/mysql-test/tokudb_bugs/r/PS-4979.result index f0d2f93f630..5bf7a270fe5 100644 --- a/storage/tokudb/mysql-test/tokudb_bugs/r/PS-4979.result +++ b/storage/tokudb/mysql-test/tokudb_bugs/r/PS-4979.result @@ -1,2 +1,2 @@ CREATE TABLE `#mysql50#q.q`(f1 INT KEY) ENGINE=TOKUDB; -ERROR HY000: Got error 1632 from storage engine +ERROR 42000: Incorrect table name '#mysql50#q.q' diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/PS-4979.test b/storage/tokudb/mysql-test/tokudb_bugs/t/PS-4979.test index 1e4b5d11922..cb902f6e52a 100644 --- a/storage/tokudb/mysql-test/tokudb_bugs/t/PS-4979.test +++ b/storage/tokudb/mysql-test/tokudb_bugs/t/PS-4979.test @@ -7,6 +7,7 @@ # engine expects a table name in the form of a relative path like # "./databasename/tablename". InnoDB detects this in parsing the table name # during the creation and returns an error. +# MariaDB server detect above error. ---error ER_GET_ERRNO +--error ER_WRONG_TABLE_NAME CREATE TABLE `#mysql50#q.q`(f1 INT KEY) ENGINE=TOKUDB; From f877f6b49d97c02b307f83770c47c613c4bd669f Mon Sep 17 00:00:00 2001 From: Oleksandr Byelkin Date: Tue, 29 Jan 2019 11:50:07 +0100 Subject: [PATCH 30/37] Fix xtradb version after merge --- storage/xtradb/include/univ.i | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/xtradb/include/univ.i b/storage/xtradb/include/univ.i index 976bed244a0..6522a19c128 100644 --- a/storage/xtradb/include/univ.i +++ b/storage/xtradb/include/univ.i @@ -45,7 +45,7 @@ Created 1/20/1994 Heikki Tuuri #define INNODB_VERSION_MAJOR 5 #define INNODB_VERSION_MINOR 6 -#define INNODB_VERSION_BUGFIX 39 +#define INNODB_VERSION_BUGFIX 42 #ifndef PERCONA_INNODB_VERSION #define PERCONA_INNODB_VERSION 84.2 From 5e06ee41a46dd9f336e73c0f9b6622c5ea5d548f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Tue, 29 Jan 2019 14:07:59 +0200 Subject: [PATCH 31/37] MDEV-18222: Duplicated call to dict_foreign_remove_from_cache() innobase_rename_column_try(): Declare fk_evict as std::set instead of std::list, in order to filter out duplicates. --- mysql-test/suite/innodb/r/foreign_key.result | 12 ++++++++++++ mysql-test/suite/innodb/t/foreign_key.test | 13 +++++++++++++ storage/innobase/handler/handler0alter.cc | 8 ++++---- storage/xtradb/handler/handler0alter.cc | 8 ++++---- 4 files changed, 33 insertions(+), 8 deletions(-) diff --git a/mysql-test/suite/innodb/r/foreign_key.result b/mysql-test/suite/innodb/r/foreign_key.result index b6462000b46..f62a251161a 100644 --- a/mysql-test/suite/innodb/r/foreign_key.result +++ b/mysql-test/suite/innodb/r/foreign_key.result @@ -49,3 +49,15 @@ INSERT INTO t3 SET a=1; kill query @id; ERROR 70100: Query execution was interrupted DROP TABLE t3,t1; +# +# MDEV-18222 InnoDB: Failing assertion: heap->magic_n == MEM_BLOCK_MAGIC_N +# or ASAN heap-use-after-free in dict_foreign_remove_from_cache upon CHANGE COLUMN +# +CREATE TABLE t1 (a INT, UNIQUE(a), KEY(a)) ENGINE=InnoDB; +ALTER TABLE t1 ADD FOREIGN KEY (a) REFERENCES t1 (a); +SET SESSION FOREIGN_KEY_CHECKS = OFF; +ALTER TABLE t1 CHANGE COLUMN a a TIME NOT NULL; +ALTER TABLE t1 ADD pk INT NOT NULL AUTO_INCREMENT PRIMARY KEY; +ALTER TABLE t1 CHANGE COLUMN a b TIME; +SET SESSION FOREIGN_KEY_CHECKS = ON; +DROP TABLE t1; diff --git a/mysql-test/suite/innodb/t/foreign_key.test b/mysql-test/suite/innodb/t/foreign_key.test index c1a92697dab..dc55a5c3a96 100644 --- a/mysql-test/suite/innodb/t/foreign_key.test +++ b/mysql-test/suite/innodb/t/foreign_key.test @@ -73,3 +73,16 @@ reap; disconnect fk; DROP TABLE t3,t1; + +--echo # +--echo # MDEV-18222 InnoDB: Failing assertion: heap->magic_n == MEM_BLOCK_MAGIC_N +--echo # or ASAN heap-use-after-free in dict_foreign_remove_from_cache upon CHANGE COLUMN +--echo # +CREATE TABLE t1 (a INT, UNIQUE(a), KEY(a)) ENGINE=InnoDB; +ALTER TABLE t1 ADD FOREIGN KEY (a) REFERENCES t1 (a); +SET SESSION FOREIGN_KEY_CHECKS = OFF; +ALTER TABLE t1 CHANGE COLUMN a a TIME NOT NULL; +ALTER TABLE t1 ADD pk INT NOT NULL AUTO_INCREMENT PRIMARY KEY; +ALTER TABLE t1 CHANGE COLUMN a b TIME; +SET SESSION FOREIGN_KEY_CHECKS = ON; +DROP TABLE t1; diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc index 25019b8f964..d67defa56e8 100644 --- a/storage/innobase/handler/handler0alter.cc +++ b/storage/innobase/handler/handler0alter.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2005, 2018, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, 2018, MariaDB Corporation. +Copyright (c) 2017, 2019, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -4579,7 +4579,7 @@ err_exit: rename_foreign: trx->op_info = "renaming column in SYS_FOREIGN_COLS"; - std::list fk_evict; + std::set fk_evict; bool foreign_modified; for (dict_foreign_set::const_iterator it = user_table->foreign_set.begin(); @@ -4619,7 +4619,7 @@ rename_foreign: } if (foreign_modified) { - fk_evict.push_back(foreign); + fk_evict.insert(foreign); } } @@ -4661,7 +4661,7 @@ rename_foreign: } if (foreign_modified) { - fk_evict.push_back(foreign); + fk_evict.insert(foreign); } } diff --git a/storage/xtradb/handler/handler0alter.cc b/storage/xtradb/handler/handler0alter.cc index e932e2cbbdb..b7705691949 100644 --- a/storage/xtradb/handler/handler0alter.cc +++ b/storage/xtradb/handler/handler0alter.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2005, 2018, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, 2018, MariaDB Corporation. +Copyright (c) 2017, 2019, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -4593,7 +4593,7 @@ err_exit: rename_foreign: trx->op_info = "renaming column in SYS_FOREIGN_COLS"; - std::list fk_evict; + std::set fk_evict; bool foreign_modified; for (dict_foreign_set::const_iterator it = user_table->foreign_set.begin(); @@ -4633,7 +4633,7 @@ rename_foreign: } if (foreign_modified) { - fk_evict.push_back(foreign); + fk_evict.insert(foreign); } } @@ -4675,7 +4675,7 @@ rename_foreign: } if (foreign_modified) { - fk_evict.push_back(foreign); + fk_evict.insert(foreign); } } From 6699cac0bf10464feab631ff3909ca8c66405628 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Tue, 29 Jan 2019 14:14:57 +0200 Subject: [PATCH 32/37] MDEV-18256 Duplicated call to dict_foreign_remove_from_cache() ha_innobase::prepare_inplace_alter_table(): Filter out duplicates from ha_alter_info->alter_info->drop_list.elements. --- mysql-test/suite/innodb/r/foreign_key.result | 9 +++++++++ mysql-test/suite/innodb/t/foreign_key.test | 10 ++++++++++ storage/innobase/handler/handler0alter.cc | 18 +++++++++++++----- storage/xtradb/handler/handler0alter.cc | 18 +++++++++++++----- 4 files changed, 45 insertions(+), 10 deletions(-) diff --git a/mysql-test/suite/innodb/r/foreign_key.result b/mysql-test/suite/innodb/r/foreign_key.result index f62a251161a..6573d744714 100644 --- a/mysql-test/suite/innodb/r/foreign_key.result +++ b/mysql-test/suite/innodb/r/foreign_key.result @@ -61,3 +61,12 @@ ALTER TABLE t1 ADD pk INT NOT NULL AUTO_INCREMENT PRIMARY KEY; ALTER TABLE t1 CHANGE COLUMN a b TIME; SET SESSION FOREIGN_KEY_CHECKS = ON; DROP TABLE t1; +# +# MDEV-18256 InnoDB: Failing assertion: heap->magic_n == MEM_BLOCK_MAGIC_N +# upon DROP FOREIGN KEY +# +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB; +CREATE TABLE t2 (b INT PRIMARY KEY, FOREIGN KEY fk1 (b) REFERENCES t1 (a)) +ENGINE=InnoDB; +ALTER TABLE t2 DROP FOREIGN KEY fk1, DROP FOREIGN KEY fk1; +DROP TABLE t2, t1; diff --git a/mysql-test/suite/innodb/t/foreign_key.test b/mysql-test/suite/innodb/t/foreign_key.test index dc55a5c3a96..aa35e3abf00 100644 --- a/mysql-test/suite/innodb/t/foreign_key.test +++ b/mysql-test/suite/innodb/t/foreign_key.test @@ -86,3 +86,13 @@ ALTER TABLE t1 ADD pk INT NOT NULL AUTO_INCREMENT PRIMARY KEY; ALTER TABLE t1 CHANGE COLUMN a b TIME; SET SESSION FOREIGN_KEY_CHECKS = ON; DROP TABLE t1; + +--echo # +--echo # MDEV-18256 InnoDB: Failing assertion: heap->magic_n == MEM_BLOCK_MAGIC_N +--echo # upon DROP FOREIGN KEY +--echo # +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB; +CREATE TABLE t2 (b INT PRIMARY KEY, FOREIGN KEY fk1 (b) REFERENCES t1 (a)) +ENGINE=InnoDB; +ALTER TABLE t2 DROP FOREIGN KEY fk1, DROP FOREIGN KEY fk1; +DROP TABLE t2, t1; diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc index d67defa56e8..17e2810b649 100644 --- a/storage/innobase/handler/handler0alter.cc +++ b/storage/innobase/handler/handler0alter.cc @@ -3690,12 +3690,14 @@ check_if_ok_to_rename: continue; } + dict_foreign_t* foreign; + for (dict_foreign_set::iterator it = prebuilt->table->foreign_set.begin(); it != prebuilt->table->foreign_set.end(); ++it) { - dict_foreign_t* foreign = *it; + foreign = *it; const char* fid = strchr(foreign->id, '/'); DBUG_ASSERT(fid); @@ -3706,7 +3708,6 @@ check_if_ok_to_rename: if (!my_strcasecmp(system_charset_info, fid, drop->name)) { - drop_fk[n_drop_fk++] = foreign; goto found_fk; } } @@ -3715,12 +3716,19 @@ check_if_ok_to_rename: drop->name); goto err_exit; found_fk: + for (ulint i = n_drop_fk; i--; ) { + if (drop_fk[i] == foreign) { + goto dup_fk; + } + } + drop_fk[n_drop_fk++] = foreign; +dup_fk: continue; } DBUG_ASSERT(n_drop_fk > 0); DBUG_ASSERT(n_drop_fk - == ha_alter_info->alter_info->drop_list.elements); + <= ha_alter_info->alter_info->drop_list.elements); } else { drop_fk = NULL; } @@ -5057,7 +5065,7 @@ commit_try_rebuild( & Alter_inplace_info::DROP_FOREIGN_KEY) || ctx->num_to_drop_fk > 0); DBUG_ASSERT(ctx->num_to_drop_fk - == ha_alter_info->alter_info->drop_list.elements); + <= ha_alter_info->alter_info->drop_list.elements); for (dict_index_t* index = dict_table_get_first_index(rebuilt_table); index; @@ -5309,7 +5317,7 @@ commit_try_norebuild( & Alter_inplace_info::DROP_FOREIGN_KEY) || ctx->num_to_drop_fk > 0); DBUG_ASSERT(ctx->num_to_drop_fk - == ha_alter_info->alter_info->drop_list.elements); + <= ha_alter_info->alter_info->drop_list.elements); for (ulint i = 0; i < ctx->num_to_add_index; i++) { dict_index_t* index = ctx->add_index[i]; diff --git a/storage/xtradb/handler/handler0alter.cc b/storage/xtradb/handler/handler0alter.cc index b7705691949..c27cd7f1b40 100644 --- a/storage/xtradb/handler/handler0alter.cc +++ b/storage/xtradb/handler/handler0alter.cc @@ -3704,12 +3704,14 @@ check_if_ok_to_rename: continue; } + dict_foreign_t* foreign; + for (dict_foreign_set::iterator it = prebuilt->table->foreign_set.begin(); it != prebuilt->table->foreign_set.end(); ++it) { - dict_foreign_t* foreign = *it; + foreign = *it; const char* fid = strchr(foreign->id, '/'); DBUG_ASSERT(fid); @@ -3720,7 +3722,6 @@ check_if_ok_to_rename: if (!my_strcasecmp(system_charset_info, fid, drop->name)) { - drop_fk[n_drop_fk++] = foreign; goto found_fk; } } @@ -3729,12 +3730,19 @@ check_if_ok_to_rename: drop->name); goto err_exit; found_fk: + for (ulint i = n_drop_fk; i--; ) { + if (drop_fk[i] == foreign) { + goto dup_fk; + } + } + drop_fk[n_drop_fk++] = foreign; +dup_fk: continue; } DBUG_ASSERT(n_drop_fk > 0); DBUG_ASSERT(n_drop_fk - == ha_alter_info->alter_info->drop_list.elements); + <= ha_alter_info->alter_info->drop_list.elements); } else { drop_fk = NULL; } @@ -5071,7 +5079,7 @@ commit_try_rebuild( & Alter_inplace_info::DROP_FOREIGN_KEY) || ctx->num_to_drop_fk > 0); DBUG_ASSERT(ctx->num_to_drop_fk - == ha_alter_info->alter_info->drop_list.elements); + <= ha_alter_info->alter_info->drop_list.elements); for (dict_index_t* index = dict_table_get_first_index(rebuilt_table); index; @@ -5325,7 +5333,7 @@ commit_try_norebuild( & Alter_inplace_info::DROP_FOREIGN_KEY) || ctx->num_to_drop_fk > 0); DBUG_ASSERT(ctx->num_to_drop_fk - == ha_alter_info->alter_info->drop_list.elements); + <= ha_alter_info->alter_info->drop_list.elements); for (ulint i = 0; i < ctx->num_to_add_index; i++) { dict_index_t* index = ctx->add_index[i]; From 1522ee2949ae304ad9092894896a6272dc08bb39 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Tue, 29 Jan 2019 15:00:41 +0200 Subject: [PATCH 33/37] MDEV-18016: Assertion failure on ALTER TABLE after foreign_key_checks=0 ha_innobase::commit_inplace_alter_table(): Do not crash if innobase_update_foreign_cache() returns an error. It can return an error on ALTER TABLE if an inconsistent FOREIGN KEY constraint was created earlier when SET foreign_key_checks=0 was in effect. Instead, report a warning to the client that constraints cannot be loaded. --- mysql-test/suite/innodb/r/foreign_key.result | 12 ++++ mysql-test/suite/innodb/t/foreign_key.test | 8 +++ storage/innobase/handler/handler0alter.cc | 58 ++++++++------------ storage/xtradb/handler/handler0alter.cc | 58 ++++++++------------ 4 files changed, 68 insertions(+), 68 deletions(-) diff --git a/mysql-test/suite/innodb/r/foreign_key.result b/mysql-test/suite/innodb/r/foreign_key.result index 6573d744714..4e253261f2e 100644 --- a/mysql-test/suite/innodb/r/foreign_key.result +++ b/mysql-test/suite/innodb/r/foreign_key.result @@ -70,3 +70,15 @@ CREATE TABLE t2 (b INT PRIMARY KEY, FOREIGN KEY fk1 (b) REFERENCES t1 (a)) ENGINE=InnoDB; ALTER TABLE t2 DROP FOREIGN KEY fk1, DROP FOREIGN KEY fk1; DROP TABLE t2, t1; +CREATE TABLE t1 (f VARCHAR(256)) ENGINE=InnoDB; +SET SESSION FOREIGN_KEY_CHECKS = OFF; +ALTER TABLE t1 ADD FOREIGN KEY (f) REFERENCES non_existing_table (x); +SET SESSION FOREIGN_KEY_CHECKS = ON; +ALTER TABLE t1 ADD FULLTEXT INDEX ft1 (f); +Warnings: +Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID +Warning 1088 failed to load FOREIGN KEY constraints +ALTER TABLE t1 ADD FULLTEXT INDEX ft2 (f); +Warnings: +Warning 1088 failed to load FOREIGN KEY constraints +DROP TABLE t1; diff --git a/mysql-test/suite/innodb/t/foreign_key.test b/mysql-test/suite/innodb/t/foreign_key.test index aa35e3abf00..b4e2ee1bbe7 100644 --- a/mysql-test/suite/innodb/t/foreign_key.test +++ b/mysql-test/suite/innodb/t/foreign_key.test @@ -96,3 +96,11 @@ CREATE TABLE t2 (b INT PRIMARY KEY, FOREIGN KEY fk1 (b) REFERENCES t1 (a)) ENGINE=InnoDB; ALTER TABLE t2 DROP FOREIGN KEY fk1, DROP FOREIGN KEY fk1; DROP TABLE t2, t1; + +CREATE TABLE t1 (f VARCHAR(256)) ENGINE=InnoDB; +SET SESSION FOREIGN_KEY_CHECKS = OFF; +ALTER TABLE t1 ADD FOREIGN KEY (f) REFERENCES non_existing_table (x); +SET SESSION FOREIGN_KEY_CHECKS = ON; +ALTER TABLE t1 ADD FULLTEXT INDEX ft1 (f); +ALTER TABLE t1 ADD FULLTEXT INDEX ft2 (f); +DROP TABLE t1; diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc index 17e2810b649..40a04c8848f 100644 --- a/storage/innobase/handler/handler0alter.cc +++ b/storage/innobase/handler/handler0alter.cc @@ -5638,7 +5638,6 @@ ha_innobase::commit_inplace_alter_table( Alter_inplace_info* ha_alter_info, bool commit) { - dberr_t error; ha_innobase_inplace_ctx* ctx0 = static_cast (ha_alter_info->handler_ctx); @@ -5705,7 +5704,7 @@ ha_innobase::commit_inplace_alter_table( transactions collected during crash recovery could be holding InnoDB locks only, not MySQL locks. */ - error = row_merge_lock_table( + dberr_t error = row_merge_lock_table( prebuilt->trx, ctx->old_table, LOCK_X); if (error != DB_SUCCESS) { @@ -5890,9 +5889,9 @@ rollback_trx: file operations that will be performed in commit_cache_rebuild(), and if none, generate the redo log for these operations. */ - error = fil_mtr_rename_log(ctx->old_table, - ctx->new_table, - ctx->tmp_name, &mtr); + dberr_t error = fil_mtr_rename_log( + ctx->old_table, ctx->new_table, ctx->tmp_name, + &mtr); if (error != DB_SUCCESS) { /* Out of memory or a problem will occur when renaming files. */ @@ -6017,39 +6016,30 @@ rollback_trx: /* Rename the tablespace files. */ commit_cache_rebuild(ctx); - error = innobase_update_foreign_cache(ctx, user_thd); - if (error != DB_SUCCESS) { - goto foreign_fail; + if (innobase_update_foreign_cache(ctx, user_thd) + != DB_SUCCESS + && prebuilt->trx->check_foreigns) { +foreign_fail: + push_warning_printf( + user_thd, + Sql_condition::WARN_LEVEL_WARN, + ER_ALTER_INFO, + "failed to load FOREIGN KEY" + " constraints"); } } else { - error = innobase_update_foreign_cache(ctx, user_thd); + bool fk_fail = innobase_update_foreign_cache( + ctx, user_thd) != DB_SUCCESS; - if (error != DB_SUCCESS) { -foreign_fail: - /* The data dictionary cache - should be corrupted now. The - best solution should be to - kill and restart the server, - but the *.frm file has not - been replaced yet. */ - my_error(ER_CANNOT_ADD_FOREIGN, - MYF(0)); - sql_print_error( - "InnoDB: dict_load_foreigns()" - " returned %u for %s", - (unsigned) error, - thd_query_string(user_thd) - ->str); - ut_ad(0); - } else { - if (!commit_cache_norebuild( - ctx, table, trx)) { - ut_a(!prebuilt->trx->check_foreigns); - } + if (!commit_cache_norebuild(ctx, table, trx)) { + fk_fail = true; + ut_ad(!prebuilt->trx->check_foreigns); + } - innobase_rename_columns_cache( - ha_alter_info, table, - ctx->new_table); + innobase_rename_columns_cache(ha_alter_info, table, + ctx->new_table); + if (fk_fail && prebuilt->trx->check_foreigns) { + goto foreign_fail; } } DBUG_INJECT_CRASH("ib_commit_inplace_crash", diff --git a/storage/xtradb/handler/handler0alter.cc b/storage/xtradb/handler/handler0alter.cc index c27cd7f1b40..cd8fc8ad589 100644 --- a/storage/xtradb/handler/handler0alter.cc +++ b/storage/xtradb/handler/handler0alter.cc @@ -5654,7 +5654,6 @@ ha_innobase::commit_inplace_alter_table( Alter_inplace_info* ha_alter_info, bool commit) { - dberr_t error; ha_innobase_inplace_ctx* ctx0 = static_cast (ha_alter_info->handler_ctx); @@ -5721,7 +5720,7 @@ ha_innobase::commit_inplace_alter_table( transactions collected during crash recovery could be holding InnoDB locks only, not MySQL locks. */ - error = row_merge_lock_table( + dberr_t error = row_merge_lock_table( prebuilt->trx, ctx->old_table, LOCK_X); if (error != DB_SUCCESS) { @@ -5906,9 +5905,9 @@ rollback_trx: file operations that will be performed in commit_cache_rebuild(), and if none, generate the redo log for these operations. */ - error = fil_mtr_rename_log(ctx->old_table, - ctx->new_table, - ctx->tmp_name, &mtr); + dberr_t error = fil_mtr_rename_log( + ctx->old_table, ctx->new_table, ctx->tmp_name, + &mtr); if (error != DB_SUCCESS) { /* Out of memory or a problem will occur when renaming files. */ @@ -6033,39 +6032,30 @@ rollback_trx: /* Rename the tablespace files. */ commit_cache_rebuild(ctx); - error = innobase_update_foreign_cache(ctx, user_thd); - if (error != DB_SUCCESS) { - goto foreign_fail; + if (innobase_update_foreign_cache(ctx, user_thd) + != DB_SUCCESS + && prebuilt->trx->check_foreigns) { +foreign_fail: + push_warning_printf( + user_thd, + Sql_condition::WARN_LEVEL_WARN, + ER_ALTER_INFO, + "failed to load FOREIGN KEY" + " constraints"); } } else { - error = innobase_update_foreign_cache(ctx, user_thd); + bool fk_fail = innobase_update_foreign_cache( + ctx, user_thd) != DB_SUCCESS; - if (error != DB_SUCCESS) { -foreign_fail: - /* The data dictionary cache - should be corrupted now. The - best solution should be to - kill and restart the server, - but the *.frm file has not - been replaced yet. */ - my_error(ER_CANNOT_ADD_FOREIGN, - MYF(0)); - sql_print_error( - "InnoDB: dict_load_foreigns()" - " returned %u for %s", - (unsigned) error, - thd_query_string(user_thd) - ->str); - ut_ad(0); - } else { - if (!commit_cache_norebuild( - ctx, table, trx)) { - ut_a(!prebuilt->trx->check_foreigns); - } + if (!commit_cache_norebuild(ctx, table, trx)) { + fk_fail = true; + ut_ad(!prebuilt->trx->check_foreigns); + } - innobase_rename_columns_cache( - ha_alter_info, table, - ctx->new_table); + innobase_rename_columns_cache(ha_alter_info, table, + ctx->new_table); + if (fk_fail && prebuilt->trx->check_foreigns) { + goto foreign_fail; } } DBUG_INJECT_CRASH("ib_commit_inplace_crash", From 368eda060f5922929eb4741e97b37a205591bdf3 Mon Sep 17 00:00:00 2001 From: Elena Stepanova Date: Tue, 29 Jan 2019 20:33:43 +0200 Subject: [PATCH 34/37] List of unstable tests for 10.0.38 release --- mysql-test/unstable-tests | 119 ++++++++++++++------------------------ 1 file changed, 45 insertions(+), 74 deletions(-) diff --git a/mysql-test/unstable-tests b/mysql-test/unstable-tests index 6725e2ae9ff..10f15f3ddc4 100644 --- a/mysql-test/unstable-tests +++ b/mysql-test/unstable-tests @@ -23,93 +23,72 @@ # ############################################################################## -# Based on 10.0 6ced789186fabd7dce97b3d6d171ff9e5ddc5f48 +# Based on bb-10.0-release 1522ee2949ae304ad9092894896a6272dc08bb39 main.alter_table : Modified in 10.0.37 -main.assign_key_cache : Added in 10.0.36 -main.assign_key_cache_debug : Added in 10.0.36 -main.auto_increment : Modified in 10.0.36 -main.bootstrap : Modified in 10.0.36 -main.connect_debug : Added in 10.0.36 +main.auto_increment_ranges_innodb : Modified in 10.0.38 +main.bigint : Modified in 10.0.38 main.count_distinct2 : MDEV-11768 - timeout main.create_delayed : MDEV-10605 - failed with timeout main.create_or_replace : Modified in 10.0.37 -main.ctype_binary : Modified in 10.0.36 -main.ctype_eucjpms : Modified in 10.0.36 -main.ctype_euckr : Modified in 10.0.36 -main.ctype_gbk : Modified in 10.0.36 -main.ctype_latin1 : Modified in 10.0.36 +main.ctype_latin1 : Modified in 10.0.38 main.ctype_uca : Modified in 10.0.37 -main.ctype_ucs : Modified in 10.0.36 -main.ctype_ujis : Modified in 10.0.36 -main.ctype_utf16le : Modified in 10.0.36 -main.ctype_utf16 : Modified in 10.0.36 -main.ctype_utf32 : Modified in 10.0.36 -main.ctype_utf8mb4 : Modified in 10.0.36 -main.ctype_utf8 : Modified in 10.0.36 main.debug_sync : MDEV-10607 - internal error -main.derived : Modified in 10.0.36 main.derived_opt : MDEV-11768 - timeout; modified in 10.0.37 +main.events_bugs : MDEV-12892 - Server crash main.events_slowlog : MDEV-12821 - wrong result main.func_concat : Modified in 10.0.37 +main.func_group_innodb : Modified in 10.0.38 main.func_isnull : Modified in 10.0.37 main.func_time : Modified in 10.0.37 main.gis : MDEV-13411 - wrong result on P8; modified in 10.0.37 main.grant : Modified in 10.0.37 -main.grant2 : Modified in 10.0.36 -main.grant_not_windows : Added in 10.0.36 main.group_min_max : Modified in 10.0.37 -main.having : Modified in 10.0.36 main.host_cache_size_functionality : MDEV-10606 - sporadic failure on shutdown +main.huge_frm-6224 : Modified in 10.0.38 main.index_intersect_innodb : MDEV-10643 - failed with timeout main.index_merge_innodb : MDEV-7142 - wrong result +main.index_merge_myisam : Modified in 10.0.38 +main.innodb_ext_key : Modified in 10.0.38 main.innodb_mysql_lock : MDEV-7861 - sporadic lock detection failure -main.insert_select : Modified in 10.0.36 main.join : Modified in 10.0.37 -main.join_cache : Modified in 10.0.36 -main.join_outer : Modified in 10.0.36 main.kill_processlist-6619 : MDEV-10793 - wrong result -main.limit : Modified in 10.0.36 main.log_tables-big : MDEV-13408 - wrong result main.lowercase_fs_off : Modified in 10.0.37 main.mdev-504 : MDEV-10607 - sporadic "can't connect" main.mdev375 : MDEV-10607 - sporadic "can't connect" main.merge : MDEV-10607 - sporadic "can't connect" -main.myisam : Modified in 10.0.36 -main.mysql : Modified in 10.0.36 -main.mysql_cp932 : Modified in 10.0.36 -main.mysqldump : Modified in 10.0.36 +main.mysql : Modified in 10.0.38 +main.mysqldump : Modified in 10.0.38 main.mysqlhotcopy_myisam : MDEV-10995 - test hangs on debug build -main.mysqlslap : Modified in 10.0.36 main.mysqltest : MDEV-9269 - fails on Alpha main.mysql_client_test_nonblock : MDEV-15096 - exec failed main.order_by_zerolength-4285 : Modified in 10.0.37 +main.partition : Modified in 10.0.38 main.partition_explicit_prune : Modified in 10.0.37 +main.partition_innodb : Modified in 10.0.38 main.ps : MDEV-11017 - sporadic wrong Prepared_stmt_count -main.rename : Modified in 10.0.36 main.query_cache_debug : MDEV-15281 - resize or similar command in progress +main.range_innodb : Modified in 10.0.38 +main.read_only : Modified in 10.0.38 +main.row-checksum : Modified in 10.0.38 main.selectivity : Modified in 10.0.37 main.show_explain : MDEV-10674 - wrong result main.sp : Modified in 10.0.37 -main.sp-innodb : Modified in 10.0.36 main.sp_notembedded : MDEV-10607 - internal error main.sp-security : MDEV-10607 - sporadic "can't connect"; modified in 10.0.37 -main.statistics : Modified in 10.0.36 -main.statistics_close : Added in 10.0.36 -main.stat_tables : Modified in 10.0.37 +main.stat_tables : Modified in 10.0.38 main.stat_tables_par_innodb : MDEV-14155 - wrong rounding -main.subselect : Modified in 10.0.36 +main.subselect2 : Modified in 10.0.38 main.subselect_extra_no_semijoin : Modified in 10.0.37 main.subselect_innodb : MDEV-10614 - sporadic wrong results -main.subselect_sj : Modified in 10.0.36 -main.subselect_sj_mat : Modified in 10.0.36 -main.subselect_sj2_mat : Modified in 10.0.36 -main.subselect4 : Modified in 10.0.36 +main.subselect_mat : Modified in 10.0.38 main.tc_heuristic_recover : MDEV-15200 - wrong error on mysqld_stub_cmd main.type_datetime : Modified in 10.0.37 main.type_float : Modified in 10.0.37 +main.type_newdecimal : Modified in 10.0.38 main.type_year : Modified in 10.0.37 -main.union : Modified in 10.0.36 +main.union : Modified in 10.0.38 main.xa : MDEV-11769 - lock wait timeout #---------------------------------------------------------------- @@ -124,11 +103,11 @@ archive-test_sql_discovery.discover : MDEV-16817 - Table marked as crashed #---------------------------------------------------------------- binlog.binlog_commit_wait : MDEV-10150 - Error: too much time elapsed -binlog.binlog_tmp_table_row : Added in 10.0.36 binlog.binlog_xa_recover : MDEV-8517 - Extra checkpoint #---------------------------------------------------------------- +connect.part_table : Modified in 10.0.38 connect.zip : MDEV-13884 - wrong result #---------------------------------------------------------------- @@ -137,16 +116,14 @@ engines/rr_trx.* : MDEV-10998 - tests not maintained #---------------------------------------------------------------- -federated.assisted_discovery : Include file modified in 10.0.36 -federated.federatedx : MDEV-10617 - Wrong checksum, timeouts; include file modified in 10.0.36 +federated.federatedx : MDEV-10617 - Wrong checksum, timeouts federated.federated_bug_35333 : MDEV-13410 - Wrong result federated.federated_innodb : MDEV-10617, MDEV-10417 - Wrong checksum, timeouts, fails on Mips -federated.federated_partition : MDEV-10417 - Fails on Mips; include file modified in 10.0.36 -federated.federated_transactions : MDEV-10617, MDEV-10417 - Wrong checksum, timeouts, fails on Mips; include file modified in 10.0.36 +federated.federated_partition : MDEV-10417 - Fails on Mips +federated.federated_transactions : MDEV-10617, MDEV-10417 - Wrong checksum, timeouts, fails on Mips #---------------------------------------------------------------- -funcs_1.is_engines_federated : Include file modified in 10.0.36 funcs_1.memory_views : MDEV-11773 - timeout funcs_1.processlist_val_ps : MDEV-12175 - Wrong result funcs_1.processlist_val_no_prot : MDEV-11223 - Wrong result @@ -155,50 +132,43 @@ funcs_2/charset.* : MDEV-10999 - test not maintained #---------------------------------------------------------------- -handler.ps : Added in 10.0.36 - -#---------------------------------------------------------------- - -heap.heap_auto_increment : Modified in 10.0.36 heap.heap_btree : Modified in 10.0.37 #---------------------------------------------------------------- +innodb.alter_candidate_key : Added in 10.0.38 innodb.alter_inplace_perfschema : Added in 10.0.37 -innodb.alter_partitioned_xa : Added in 10.0.36 innodb.binlog_consistent : MDEV-10618 - Server fails to start innodb.foreign-keys : Modified in 10.0.37 -innodb.foreign_key : Added in 10.0.37 +innodb.foreign_key : Modified in 10.0.38 innodb.group_commit_crash : MDEV-11770 - checksum mismatch innodb.group_commit_crash_no_optimize_thread : MDEV-11770 - checksum mismatch -innodb.innodb-alter : Modified in 10.0.36 +innodb.innodb_28867993 : Added in 10.0.38 +innodb.innodb-alter : Modified in 10.0.38 innodb.innodb-alter-debug : Modified in 10.0.37 innodb.innodb-alter-table : MDEV-10619 - Testcase timeout innodb.innodb_bug30423 : MDEV-7311 - Wrong number of rows in the plan innodb.innodb_bug48024 : MDEV-14352 - Assertion failure -innodb.innodb_bug54044 : Modified in 10.0.36 -innodb.innodb-mdev7046 : Modified in 10.0.36 +innodb.innodb-index : Modified in 10.0.38 innodb.innodb_monitor : MDEV-10939 - Testcase timeout -innodb.innodb-wl5522 : Modified in 10.0.36 +innodb.innodb_simulate_comp_failures : MDEV-18417 - ASAN failures +innodb.innodb-table-online : Modified in 10.0.38 +innodb.innodb-virtual-columns : Modified in 10.0.38 innodb.log_file_size : MDEV-15668 - Not found pattern innodb.recovery_shutdown : MDEV-15671 - Warning: database page corruption -innodb.rename_table : Added in 10.0.36 innodb.table_definition_cache_debug : MDEV-14206 - Unexpected warning innodb.table_flags : Modified in 10.0.37 innodb.xa_recovery : MDEV-15279 - mysqld got exception -innodb_fts.basic : Added in 10.0.36 innodb_fts.fts_kill_query : Modified in 10.0.37 innodb_fts.innodb-fts-fic : MDEV-14154 - Assertion failure innodb_fts.innodb_fts_misc_debug : MDEV-14156 - Unexpected warning -innodb_fts.sync_ddl : Added in 10.0.36 #---------------------------------------------------------------- -maria.alter : Modified in 10.0.36 maria.create : Added in 10.0.37 maria.fulltext2 : Added in 10.0.37 -maria.lock : Modified in 10.0.36 +maria.insert_select : MDEV-12757 - Timeout maria.maria : MDEV-14430 - Wrong result; modified in 10.0.37 #---------------------------------------------------------------- @@ -221,16 +191,15 @@ multi_source.status_vars : MDEV-4632 - failed while waiting for Slave_received_h #---------------------------------------------------------------- -parts.alter_data_directory_innodb : Added in 10.0.36 parts.partition_auto_increment_archive : MDEV-16491 - Table marked as crashed parts.partition_auto_increment_maria : MDEV-14430 - wrong result parts.partition_exch_qa_10 : MDEV-11765 - wrong result -parts.truncate_locked : Added in 10.0.36 parts.update_and_cache : Added in 10.0.37 #---------------------------------------------------------------- perfschema.connect_attrs : MDEV-17283 - Wrong result +perfschema.dml_setup_instruments : Modified in 10.0.38 perfschema.func_file_io : MDEV-5708 - fails for s390x perfschema.func_mutex : MDEV-5708 - fails for s390x perfschema.hostcache_ipv6_ssl : MDEV-10696 - crash on shutdown @@ -242,17 +211,17 @@ perfschema_stress.* : MDEV-10996 - tests not maintained #---------------------------------------------------------------- plugins.feedback_plugin_send : MDEV-7932 - ssl failed for url, MDEV-11118 - wrong result -plugins.server_audit : MDEV-9562 - crashes on sol10-sparc; modified in 10.0.36 +plugins.server_audit : MDEV-9562 - crashes on sol10-sparc plugins.thread_pool_server_audit : MDEV-9562 - crashes on sol10-sparc, MDEV-14295 - wrong result #---------------------------------------------------------------- roles.create_and_grant_role : MDEV-11772 - wrong result +roles.flush_roles-17898 : Added in 10.0.38 #---------------------------------------------------------------- rpl.last_insert_id : MDEV-10625 - warnings in error log -rpl.rename : Added in 10.0.36 rpl.rpl_15919 : Added in 10.0.37 rpl.rpl_auto_increment : MDEV-10417 - Fails on Mips rpl.rpl_auto_increment_bug45679 : MDEV-10417 - Fails on Mips @@ -263,6 +232,7 @@ rpl.rpl_foreign_key_innodb : Modified in 10.0.37 rpl.rpl_gtid_crash : MDEV-9501 - Warning: failed registering on master rpl.rpl_gtid_stop_start : MDEV-10629 - Crash on shutdown rpl.rpl_gtid_until : MDEV-10625 - warnings in error log +rpl.rpl_idempotency : Modified in 10.0.38 rpl.rpl_innodb_bug30888 : MDEV-10417 - Fails on Mips rpl.rpl_insert : MDEV-9329 - Fails on Ubuntu/s390x rpl.rpl_insert_delayed : MDEV-9329 - Fails on Ubuntu/s390x @@ -270,14 +240,14 @@ rpl.rpl_insert_id_pk : MDEV-16567 - Assertion failure rpl.rpl_invoked_features : MDEV-10417 - Fails on Mips rpl.rpl_lcase_tblnames_rewrite_db : Added in 10.0.37 rpl.rpl_mdev6020 : MDEV-10417 - Timeouts, fails on Mips -rpl.rpl_mixed_implicit_commit_binlog : Included file modified in 10.0.36 rpl.rpl_parallel : MDEV-10653 - Timeouts rpl.rpl_parallel_mdev6589 : MDEV-12979 - Assertion failure rpl.rpl_parallel_multilevel2 : MDEV-14723 - Timeout rpl.rpl_parallel_temptable : MDEV-10356 - Crash in close_thread_tables rpl.rpl_partition_innodb : MDEV-10417 - Fails on Mips rpl.rpl_row_basic_11bugs : MDEV-12171 - Server failed to start -rpl.rpl_row_implicit_commit_binlog : Included file modified in 10.0.36 +rpl.rpl_row_big_table_id_32bit : Added in 10.0.38 +rpl.rpl_row_big_table_id_64bit : Added in 10.0.38 rpl.rpl_row_index_choice : MDEV-13409 - Server crash rpl.rpl_row_lcase_tblnames : Added in 10.0.37 rpl.rpl_row_sp001 : MDEV-9329 - Fails on Ubuntu/s390x @@ -287,7 +257,7 @@ rpl.rpl_semi_sync_uninstall_plugin : MDEV-7140 - Wrong plugin status rpl.rpl_show_slave_hosts : MDEV-12171 - Server failed to start rpl.rpl_skip_replication : MDEV-9268 - Fails with timeout in sync_slave_with_master on Alpha rpl.rpl_slave_grp_exec : MDEV-10514 - Unexpected deadlock -rpl.rpl_stm_implicit_commit_binlog : Included file modified in 10.0.36 +rpl.rpl_start_stop_slave : MDEV-13567 - Timeout in sync rpl.rpl_stm_lcase_tblnames : Added in 10.0.37 rpl.rpl_sync : MDEV-10633 - Database page corruption rpl.rpl_temporary_error2 : MDEV-10634 - Wrong number of retries @@ -324,6 +294,7 @@ stress.ddl_innodb : MDEV-10635 - Testcase timeout sys_vars.autocommit_func2 : MDEV-9329 - Fails on Ubuntu/s390x sys_vars.innodb_ft_result_cache_limit_32 : Added in 10.0.37 sys_vars.innodb_ft_result_cache_limit_64 : Added in 10.0.37 +sys_vars.table_definition_cache_basic : Modified in 10.0.38 sys_vars.thread_cache_size_func : MDEV-11775 - wrong result #---------------------------------------------------------------- @@ -345,6 +316,7 @@ tokudb.savepoint-5 : MDEV-15280 - wrong result tokudb_backup.* : MDEV-11001 - tests don't work tokudb_bugs.PS-3773 : Added in 10.0.37 +tokudb_bugs.PS-4979 : Added in 10.0.38 tokudb_bugs.alter_table_comment_rebuild_data : Added in 10.0.37 tokudb_bugs.checkpoint_lock : MDEV-10637 - Wrong processlist output tokudb_bugs.checkpoint_lock_3 : MDEV-10637 - Wrong processlist output @@ -360,14 +332,13 @@ rpl-tokudb.* : MDEV-14354 - Tests fail with tcmalloc #---------------------------------------------------------------- -unit.lf : MDEV-12897 - Unexpected return code +unit.lf : MDEV-18416 - Object was probably modified after being freed unit.ma_test_loghandler : MDEV-10638 - record read not ok -unit.my_atomic : MDEV-15670 - Signal 11 thrown #---------------------------------------------------------------- vcol.not_supported : MDEV-10639 - Testcase timeout vcol.vcol_keys_innodb : MDEV-10639 - Testcase timeout -vcol.vcol_misc : MDEV-16651 - Wrong error message; modified in 10.0.36 +vcol.vcol_misc : MDEV-16651 - Wrong error message #---------------------------------------------------------------- From d9d83f1d92b696ef56f4944df036b8a78364ebb4 Mon Sep 17 00:00:00 2001 From: Daniel Bartholomew Date: Thu, 31 Jan 2019 09:09:50 -0500 Subject: [PATCH 35/37] bump the VERSION --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 163137684a2..0924461861a 100644 --- a/VERSION +++ b/VERSION @@ -1,3 +1,3 @@ MYSQL_VERSION_MAJOR=10 MYSQL_VERSION_MINOR=0 -MYSQL_VERSION_PATCH=38 +MYSQL_VERSION_PATCH=39 From c1e1764fc4b913ee688b383aac2698b83661d64c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Sat, 2 Feb 2019 12:49:04 +0200 Subject: [PATCH 36/37] Fix embedded innodb_plugin after 560799ebd8efe11f4c4ae1bb9ed4d39185e03800 wsrep_certification_rules: Define as a weak global symbol. While there are separate _embedded.a for statically linked storage engine plugins, there is only one ha_innodb.so which is supposed to work with both values of WITH_WSREP. The merge from 10.0-galera introduced a reference to a global variable that is only defined when the server is built WITH_WSREP. We must define that symbol as weak global, so that when a dynamically linked InnoDB or XtraDB is used with the embedded server (which never includes write-set replication patches), the variable will be read as 0, instead of causing a failure to load the InnoDB or XtraDB plugin. --- sql/wsrep_mysqld_c.h | 6 +++++- storage/innobase/row/row0ins.cc | 6 +++++- storage/xtradb/row/row0ins.cc | 6 +++++- 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/sql/wsrep_mysqld_c.h b/sql/wsrep_mysqld_c.h index 15ca0ae2a6d..235a871c113 100644 --- a/sql/wsrep_mysqld_c.h +++ b/sql/wsrep_mysqld_c.h @@ -21,6 +21,10 @@ enum enum_wsrep_certification_rules { WSREP_CERTIFICATION_RULES_OPTIMIZED }; -extern ulong wsrep_certification_rules; +/* This is intentionally declared as a weak global symbol, so that +the same ha_innodb.so can be used with the embedded server +(which does not link to the definition of this variable) +and with the regular server built WITH_WSREP. */ +extern ulong wsrep_certification_rules __attribute__((weak)); #endif /* WSREP_MYSQLD_C_H */ diff --git a/storage/innobase/row/row0ins.cc b/storage/innobase/row/row0ins.cc index 22588d33418..65a27ebeb37 100644 --- a/storage/innobase/row/row0ins.cc +++ b/storage/innobase/row/row0ins.cc @@ -56,6 +56,7 @@ Created 4/20/1996 Heikki Tuuri #include "m_string.h" #ifdef WITH_WSREP +#include #include "../../../wsrep/wsrep_api.h" #include "wsrep_mysqld_c.h" #endif /* WITH_WSREP */ @@ -1645,6 +1646,9 @@ run_again: if (check_ref) { err = DB_SUCCESS; #ifdef WITH_WSREP + if (!wsrep_on(trx->mysql_thd)) { + goto end_scan; + } enum wsrep_key_type key_type; if (upd_node != NULL) { key_type = WSREP_KEY_SHARED; @@ -1661,7 +1665,7 @@ run_again: } err = wsrep_append_foreign_key( - thr_get_trx(thr), + trx, foreign, rec, check_index, diff --git a/storage/xtradb/row/row0ins.cc b/storage/xtradb/row/row0ins.cc index b14d43e4f42..476c18680f3 100644 --- a/storage/xtradb/row/row0ins.cc +++ b/storage/xtradb/row/row0ins.cc @@ -56,6 +56,7 @@ Created 4/20/1996 Heikki Tuuri #include "m_string.h" #ifdef WITH_WSREP +#include #include "../../../wsrep/wsrep_api.h" #include "wsrep_mysqld_c.h" #endif /* WITH_WSREP */ @@ -1657,6 +1658,9 @@ run_again: if (check_ref) { err = DB_SUCCESS; #ifdef WITH_WSREP + if (!wsrep_on(trx->mysql_thd)) { + goto end_scan; + } enum wsrep_key_type key_type; if (upd_node != NULL) { key_type = WSREP_KEY_SHARED; @@ -1673,7 +1677,7 @@ run_again: } err = wsrep_append_foreign_key( - thr_get_trx(thr), + trx, foreign, rec, check_index, From 955c7b32226c816b24a2ed1750e12bc0256565ad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Sun, 3 Feb 2019 17:00:39 +0200 Subject: [PATCH 37/37] MDEV-16896 encryption.innodb-checksum-algorithm crashes buf_page_is_corrupted(): Read the global variable srv_checksum_algorithm only once in order to avoid a race condition when SET GLOBAL innodb_checksum_algorithm=...; is being executed concurrently with this function. --- storage/innobase/buf/buf0buf.cc | 21 +++++++++------------ storage/xtradb/buf/buf0buf.cc | 21 +++++++++------------ 2 files changed, 18 insertions(+), 24 deletions(-) diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc index af68f894aca..621433c1c5c 100644 --- a/storage/innobase/buf/buf0buf.cc +++ b/storage/innobase/buf/buf0buf.cc @@ -2,7 +2,7 @@ Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, Google Inc. -Copyright (c) 2013, 2018, MariaDB Corporation. +Copyright (c) 2013, 2019, MariaDB Corporation. Portions of this file contain modifications contributed and copyrighted by Google, Inc. Those modifications are gratefully acknowledged and are described @@ -925,7 +925,10 @@ buf_page_is_corrupted( /* Check whether the checksum fields have correct values */ - if (srv_checksum_algorithm == SRV_CHECKSUM_ALGORITHM_NONE) { + const srv_checksum_algorithm_t curr_algo = + static_cast(srv_checksum_algorithm); + + if (curr_algo == SRV_CHECKSUM_ALGORITHM_NONE) { return(false); } @@ -957,9 +960,6 @@ buf_page_is_corrupted( return(false); } - const srv_checksum_algorithm_t curr_algo = - static_cast(srv_checksum_algorithm); - switch (curr_algo) { case SRV_CHECKSUM_ALGORITHM_STRICT_CRC32: return !buf_page_is_checksum_valid_crc32( @@ -988,9 +988,7 @@ buf_page_is_corrupted( for writing checksums because we assume that the chance of it matching is higher. */ - if (srv_checksum_algorithm - == SRV_CHECKSUM_ALGORITHM_CRC32) { - + if (curr_algo == SRV_CHECKSUM_ALGORITHM_CRC32) { crc32 = buf_calc_page_crc32(read_buf); crc32_inited = true; @@ -1000,7 +998,7 @@ buf_page_is_corrupted( return true; } } else { - ut_ad(srv_checksum_algorithm + ut_ad(curr_algo == SRV_CHECKSUM_ALGORITHM_INNODB); if (checksum_field2 @@ -1029,8 +1027,7 @@ buf_page_is_corrupted( for writing checksums because we assume that the chance of it matching is higher. */ - if (srv_checksum_algorithm - == SRV_CHECKSUM_ALGORITHM_CRC32) { + if (curr_algo == SRV_CHECKSUM_ALGORITHM_CRC32) { if (!crc32_inited) { crc32 = buf_calc_page_crc32(read_buf); @@ -1043,7 +1040,7 @@ buf_page_is_corrupted( return true; } } else { - ut_ad(srv_checksum_algorithm + ut_ad(curr_algo == SRV_CHECKSUM_ALGORITHM_INNODB); if (checksum_field1 diff --git a/storage/xtradb/buf/buf0buf.cc b/storage/xtradb/buf/buf0buf.cc index 85b337f641a..f4ef3ca7015 100644 --- a/storage/xtradb/buf/buf0buf.cc +++ b/storage/xtradb/buf/buf0buf.cc @@ -2,7 +2,7 @@ Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, Google Inc. -Copyright (c) 2013, 2018, MariaDB Corporation. +Copyright (c) 2013, 2019, MariaDB Corporation. Portions of this file contain modifications contributed and copyrighted by Google, Inc. Those modifications are gratefully acknowledged and are described @@ -924,7 +924,10 @@ buf_page_is_corrupted( /* Check whether the checksum fields have correct values */ - if (srv_checksum_algorithm == SRV_CHECKSUM_ALGORITHM_NONE) { + const srv_checksum_algorithm_t curr_algo = + static_cast(srv_checksum_algorithm); + + if (curr_algo == SRV_CHECKSUM_ALGORITHM_NONE) { return(false); } @@ -956,9 +959,6 @@ buf_page_is_corrupted( return(false); } - const srv_checksum_algorithm_t curr_algo = - static_cast(srv_checksum_algorithm); - switch (curr_algo) { case SRV_CHECKSUM_ALGORITHM_STRICT_CRC32: return !buf_page_is_checksum_valid_crc32( @@ -987,9 +987,7 @@ buf_page_is_corrupted( for writing checksums because we assume that the chance of it matching is higher. */ - if (srv_checksum_algorithm - == SRV_CHECKSUM_ALGORITHM_CRC32) { - + if (curr_algo == SRV_CHECKSUM_ALGORITHM_CRC32) { crc32 = buf_calc_page_crc32(read_buf); crc32_inited = true; @@ -999,7 +997,7 @@ buf_page_is_corrupted( return true; } } else { - ut_ad(srv_checksum_algorithm + ut_ad(curr_algo == SRV_CHECKSUM_ALGORITHM_INNODB); if (checksum_field2 @@ -1028,8 +1026,7 @@ buf_page_is_corrupted( for writing checksums because we assume that the chance of it matching is higher. */ - if (srv_checksum_algorithm - == SRV_CHECKSUM_ALGORITHM_CRC32) { + if (curr_algo == SRV_CHECKSUM_ALGORITHM_CRC32) { if (!crc32_inited) { crc32 = buf_calc_page_crc32(read_buf); @@ -1042,7 +1039,7 @@ buf_page_is_corrupted( return true; } } else { - ut_ad(srv_checksum_algorithm + ut_ad(curr_algo == SRV_CHECKSUM_ALGORITHM_INNODB); if (checksum_field1