diff --git a/BUILD/check-cpu b/BUILD/check-cpu index 2852aa98ef3..0720a53c54d 100755 --- a/BUILD/check-cpu +++ b/BUILD/check-cpu @@ -105,6 +105,12 @@ check_cpu () { *Athlon*64*) cpu_arg="athlon64"; ;; + *Turion*) + cpu_arg="athlon64"; + ;; + *Opteron*) + cpu_arg="athlon64"; + ;; *Athlon*) cpu_arg="athlon"; ;; diff --git a/Makefile.am b/Makefile.am index b92ee7ae0bc..14f2886c472 100644 --- a/Makefile.am +++ b/Makefile.am @@ -136,8 +136,10 @@ test-bt: @PERL@ ./mysql-test-run.pl --force --comment=rpl --suite=rpl -cd mysql-test ; MTR_BUILD_THREAD=auto \ @PERL@ ./mysql-test-run.pl --force --comment=partitions --suite=parts - -cd mysql-test ; MTR_BUILD_THREAD=auto \ - @PERL@ ./mysql-test-run.pl --force --comment=rowlock --suite=row_lock + +# Re-enable the "rowlock" suite when bug#28685 is fixed +# -cd mysql-test ; MTR_BUILD_THREAD=auto \ +# @PERL@ ./mysql-test-run.pl --force --comment=rowlock --suite=row_lock # Re-enable the "jp" suite when bug#28563 is fixed # -cd mysql-test ; MTR_BUILD_THREAD=auto \ diff --git a/client/mysqlslap.c b/client/mysqlslap.c index 86f024a9b6f..546b9dee3f5 100644 --- a/client/mysqlslap.c +++ b/client/mysqlslap.c @@ -35,7 +35,8 @@ Supply your own create and query SQL statements, with 50 clients querying (200 selects for each): - mysqlslap --create="CREATE TABLE A (a int);INSERT INTO A (23)" \ + mysqlslap --delimiter=";" \ + --create="CREATE TABLE A (a int);INSERT INTO A VALUES (23)" \ --query="SELECT * FROM A" --concurrency=50 --iterations=200 Let the program build the query SQL statement with a table of two int @@ -554,7 +555,7 @@ static struct my_option my_long_options[] = GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"host", 'h', "Connect to host.", (uchar**) &host, (uchar**) &host, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"iterations", 'i', "Number of times too run the tests.", (uchar**) &iterations, + {"iterations", 'i', "Number of times to run the tests.", (uchar**) &iterations, (uchar**) &iterations, 0, GET_UINT, REQUIRED_ARG, 1, 0, 0, 0, 0, 0}, {"number-char-cols", 'x', "Number of VARCHAR columns to create table with if specifying --auto-generate-sql ", diff --git a/config/ac-macros/compiler_flag.m4 b/config/ac-macros/compiler_flag.m4 index 88097c7a62e..ce2ce6cbdfa 100644 --- a/config/ac-macros/compiler_flag.m4 +++ b/config/ac-macros/compiler_flag.m4 @@ -7,7 +7,7 @@ AC_DEFUN([AC_SYS_COMPILER_FLAG], AC_CACHE_VAL(mysql_cv_option_$2, [ CFLAGS="[$]OLD_CFLAGS $1" - AC_TRY_RUN([int main(){exit(0);}],mysql_cv_option_$2=yes,mysql_cv_option_$2=no,mysql_cv_option_$2=no) + AC_TRY_LINK([int main(){exit(0);}],mysql_cv_option_$2=yes,mysql_cv_option_$2=no,mysql_cv_option_$2=no) ]) CFLAGS="[$]OLD_CFLAGS" diff --git a/config/ac-macros/misc.m4 b/config/ac-macros/misc.m4 index a20db96a950..0619a52fbbf 100644 --- a/config/ac-macros/misc.m4 +++ b/config/ac-macros/misc.m4 @@ -450,29 +450,6 @@ AC_DEFINE([HAVE_BOOL], [1], [bool is not defined by all C++ compilators]) fi ])dnl -AC_DEFUN([MYSQL_STACK_DIRECTION], - [AC_CACHE_CHECK(stack direction for C alloca, ac_cv_c_stack_direction, - [AC_TRY_RUN([#include - int find_stack_direction () - { - static char *addr = 0; - auto char dummy; - if (addr == 0) - { - addr = &dummy; - return find_stack_direction (); - } - else - return (&dummy > addr) ? 1 : -1; - } - int main () - { - exit (find_stack_direction() < 0); - }], ac_cv_c_stack_direction=1, ac_cv_c_stack_direction=-1, - ac_cv_c_stack_direction=0)]) - AC_DEFINE_UNQUOTED(STACK_DIRECTION, $ac_cv_c_stack_direction) -])dnl - AC_DEFUN([MYSQL_CHECK_LONGLONG_TO_FLOAT], [ AC_MSG_CHECKING(if conversion of longlong to float works) @@ -488,7 +465,9 @@ int main() fprintf(file,"%g\n",f); fclose(file); return (0); -}], ac_cv_conv_longlong_to_float=`cat conftestval`, ac_cv_conv_longlong_to_float=0, ifelse([$2], , , ac_cv_conv_longlong_to_float=$2))])dnl +}], ac_cv_conv_longlong_to_float=`cat conftestval`, + ac_cv_conv_longlong_to_float=0, + ac_cv_conv_longlong_to_float="yes")])dnl # Cross compiling, assume can convert if test "$ac_cv_conv_longlong_to_float" = "1" -o "$ac_cv_conv_longlong_to_float" = "yes" then ac_cv_conv_longlong_to_float=yes diff --git a/configure.in b/configure.in index 3ba8f64cad1..cb71a67126b 100644 --- a/configure.in +++ b/configure.in @@ -230,14 +230,8 @@ AC_CHECK_PROGS(YACC, ['bison -y -p MYSQL']) AC_CHECK_PROG(PDFMANUAL, pdftex, manual.pdf) AC_CHECK_PROG(DVIS, tex, manual.dvi) -AC_MSG_CHECKING("return type of sprintf") - #check the return type of sprintf -case $SYSTEM_TYPE in - *netware*) - AC_DEFINE(SPRINTF_RETURNS_INT, [1]) AC_MSG_RESULT("int") - ;; - *) +AC_MSG_CHECKING("return type of sprintf") AC_TRY_RUN([ int main() { @@ -263,10 +257,12 @@ AC_TRY_RUN([ [AC_DEFINE(SPRINTF_RETURNS_PTR, [1], [Broken sprintf]) AC_MSG_RESULT("ptr")], [AC_DEFINE(SPRINTF_RETURNS_GARBAGE, [1], [Broken sprintf]) - AC_MSG_RESULT("garbage")]) - ]) - ;; -esac + AC_MSG_RESULT("garbage")] + )], + # Cross compile, assume POSIX + [AC_DEFINE(SPRINTF_RETURNS_INT, [1], [POSIX sprintf]) + AC_MSG_RESULT("int (we assume)")] +) AC_PATH_PROG(uname_prog, uname, no) @@ -1667,6 +1663,12 @@ AC_ARG_WITH(client-ldflags, [CLIENT_EXTRA_LDFLAGS=]) AC_SUBST(CLIENT_EXTRA_LDFLAGS) +AC_ARG_WITH(mysqld-libs, + [ --with-mysqld-libs Extra libraries to link with for mysqld], + [MYSQLD_EXTRA_LIBS=$withval], + [MYSQLD_EXTRA_LIBS=]) +AC_SUBST(MYSQLD_EXTRA_LIBS) + AC_ARG_WITH(lib-ccflags, [ --with-lib-ccflags Extra CC options for libraries], [LIB_EXTRA_CCFLAGS=$withval], @@ -1784,8 +1786,6 @@ MYSQL_TYPE_ACCEPT #---END: # Figure out what type of struct rlimit to use with setrlimit MYSQL_TYPE_STRUCT_RLIMIT -# Find where the stack goes -MYSQL_STACK_DIRECTION # We want to skip alloca on irix unconditionally. It may work on some version.. MYSQL_FUNC_ALLOCA # Do struct timespec have members tv_sec or ts_sec diff --git a/include/config-netware.h b/include/config-netware.h index f287699249b..f7f494b519c 100644 --- a/include/config-netware.h +++ b/include/config-netware.h @@ -112,9 +112,6 @@ extern "C" { /* signal by closing the sockets */ #define SIGNAL_WITH_VIO_CLOSE 1 -/* On NetWare, stack grows towards lower address*/ -#define STACK_DIRECTION -1 - /* On NetWare, we need to set stack size for threads, otherwise default 16K is used */ #define NW_THD_STACKSIZE 65536 diff --git a/include/config-win.h b/include/config-win.h index bc2ae60f137..fa5c15b0668 100644 --- a/include/config-win.h +++ b/include/config-win.h @@ -250,8 +250,6 @@ inline double ulonglong2double(ulonglong value) #endif -#define STACK_DIRECTION -1 - /* Optimized store functions for Intel x86 */ #ifndef _WIN64 diff --git a/mysql-test/Makefile.am b/mysql-test/Makefile.am index 1e9371e5bf2..774008bc106 100644 --- a/mysql-test/Makefile.am +++ b/mysql-test/Makefile.am @@ -71,7 +71,7 @@ dist-hook: $(INSTALL_DATA) $(srcdir)/std_data/ndb_backup50/BACKUP* $(distdir)/std_data/ndb_backup50 $(INSTALL_DATA) $(srcdir)/std_data/ndb_backup51/BACKUP* $(distdir)/std_data/ndb_backup51 $(INSTALL_DATA) $(srcdir)/lib/*.pl $(distdir)/lib - -rm -rf `find $(distdir)/suite -type d -name SCCS` + -rm -rf `find $(distdir)/suite -type d -name SCCS` $(distdir)/suite/row_lock install-data-local: $(mkinstalldirs) \ @@ -113,7 +113,7 @@ install-data-local: $(INSTALL_DATA) $(srcdir)/std_data/ndb_backup50/BACKUP* $(DESTDIR)$(testdir)/std_data/ndb_backup50 $(INSTALL_DATA) $(srcdir)/std_data/ndb_backup51/BACKUP* $(DESTDIR)$(testdir)/std_data/ndb_backup51 $(INSTALL_DATA) $(srcdir)/lib/*.pl $(DESTDIR)$(testdir)/lib - for f in `(cd $(srcdir); find suite -type f | grep -v SCCS)`; \ + for f in `(cd $(srcdir); find suite -type f | egrep -v 'SCCS|row_lock')`; \ do \ d=$(DESTDIR)$(testdir)/`dirname $$f`; \ mkdir -p $$d ; \ diff --git a/mysql-test/include/federated.inc b/mysql-test/include/federated.inc index dde24cd8198..925ecdd9682 100644 --- a/mysql-test/include/federated.inc +++ b/mysql-test/include/federated.inc @@ -1,3 +1,4 @@ +--source include/have_log_bin.inc --source include/not_embedded.inc --source ./include/have_federated_db.inc diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index cd423f93652..dc272d79cdb 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -3815,8 +3815,7 @@ sub mysqld_arguments ($$$$) { "%s--log-slow-queries=%s-slow.log", $prefix, $log_base_path); # Check if "extra_opt" contains --skip-log-bin - my $skip_binlog= grep(/^--skip-log-bin/, @$extra_opt); - + my $skip_binlog= grep(/^--skip-log-bin/, @$extra_opt, @opt_extra_mysqld_opt); if ( $mysqld->{'type'} eq 'master' ) { if (! ($opt_skip_master_binlog || $skip_binlog) ) diff --git a/mysql-test/ndb/ndb_config_1_node.ini b/mysql-test/ndb/ndb_config_1_node.ini index 787bf82d391..4e0be7796dc 100644 --- a/mysql-test/ndb/ndb_config_1_node.ini +++ b/mysql-test/ndb/ndb_config_1_node.ini @@ -10,7 +10,8 @@ DataDir= CHOOSE_FILESYSTEM MaxNoOfOrderedIndexes= CHOOSE_MaxNoOfOrderedIndexes MaxNoOfAttributes= CHOOSE_MaxNoOfAttributes TimeBetweenGlobalCheckpoints= 500 -NoOfFragmentLogFiles= 3 +NoOfFragmentLogFiles= 8 +FragmentLogFileSize= 6M DiskPageBufferMemory= CHOOSE_DiskPageBufferMemory # diff --git a/mysql-test/ndb/ndb_config_2_node.ini b/mysql-test/ndb/ndb_config_2_node.ini index 42769d3f4a4..6bcb148d471 100644 --- a/mysql-test/ndb/ndb_config_2_node.ini +++ b/mysql-test/ndb/ndb_config_2_node.ini @@ -10,8 +10,10 @@ DataDir= CHOOSE_FILESYSTEM MaxNoOfOrderedIndexes= CHOOSE_MaxNoOfOrderedIndexes MaxNoOfAttributes= CHOOSE_MaxNoOfAttributes TimeBetweenGlobalCheckpoints= 500 -NoOfFragmentLogFiles= 3 +NoOfFragmentLogFiles= 4 +FragmentLogFileSize=12M DiskPageBufferMemory= CHOOSE_DiskPageBufferMemory +ODirect= 1 # the following parametes just function as a small regression # test that the parameter exists InitialNoOfOpenFiles= 27 diff --git a/mysql-test/r/date_formats.result b/mysql-test/r/date_formats.result index 7375260d863..62eeb4b2fbf 100644 --- a/mysql-test/r/date_formats.result +++ b/mysql-test/r/date_formats.result @@ -1,14 +1,12 @@ drop table if exists t1; -SHOW GLOBAL VARIABLES LIKE "%_format%"; +SHOW GLOBAL VARIABLES WHERE Variable_name LIKE "%_format%" AND Variable_name != "binlog_format"; Variable_name Value -binlog_format date_format %d.%m.%Y datetime_format %Y-%m-%d %H:%i:%s default_week_format 0 time_format %H.%i.%s -SHOW SESSION VARIABLES LIKE "%_format%"; +SHOW SESSION VARIABLES WHERE Variable_name LIKE "%_format%" AND Variable_name != "binlog_format"; Variable_name Value -binlog_format date_format %d.%m.%Y datetime_format %Y-%m-%d %H:%i:%s default_week_format 0 @@ -30,9 +28,8 @@ set datetime_format= '%H:%i:%s %Y-%m-%d'; set datetime_format= '%H:%i:%s.%f %m-%d-%Y'; set datetime_format= '%h:%i:%s %p %Y-%m-%d'; set datetime_format= '%h:%i:%s.%f %p %Y-%m-%d'; -SHOW SESSION VARIABLES LIKE "%format"; +SHOW SESSION VARIABLES WHERE Variable_name LIKE "%format" AND Variable_name != "binlog_format"; Variable_name Value -binlog_format date_format %m-%d-%Y datetime_format %h:%i:%s.%f %p %Y-%m-%d default_week_format 0 diff --git a/mysql-test/r/have_log_bin.require b/mysql-test/r/have_log_bin.require index cacdf8df0ce..d4fd77e4f8d 100644 --- a/mysql-test/r/have_log_bin.require +++ b/mysql-test/r/have_log_bin.require @@ -1,2 +1,2 @@ Variable_name Value -have_log_bin ON +log_bin ON diff --git a/mysql-test/r/ndb_basic.result b/mysql-test/r/ndb_basic.result index 16eb36bac10..c84c7fffd66 100644 --- a/mysql-test/r/ndb_basic.result +++ b/mysql-test/r/ndb_basic.result @@ -770,35 +770,6 @@ c abc ab d ab ab e abc abc DROP TABLE t1; -End of 5.0 tests -CREATE TABLE t1 (a VARCHAR(255) NOT NULL, -CONSTRAINT pk_a PRIMARY KEY (a))engine=ndb; -CREATE TABLE t2(a VARCHAR(255) NOT NULL, -b VARCHAR(255) NOT NULL, -c VARCHAR(255) NOT NULL, -CONSTRAINT pk_b_c_id PRIMARY KEY (b,c), -CONSTRAINT fk_a FOREIGN KEY(a) REFERENCES t1(a))engine=ndb; -drop table t1, t2; -create table t1 (a int not null primary key, b int) engine=ndb; -insert into t1 values(1,1),(2,2),(3,3); -create table t2 like t1; -insert into t2 select * from t1; -select * from t1 order by a; -a b -1 1 -2 2 -3 3 -select * from t2 order by a; -a b -1 1 -2 2 -3 3 -drop table t1, t2; -create table t1 (a int not null primary key, b int not null default 0, c varchar(254)) engine=ndb; -create table if not exists t1 (a int not null primary key, b int not null default 0, c varchar(254)) engine=ndb; -create table t2 like t1; -rename table t1 to t10, t2 to t20; -drop table t10,t20; create table t1 (a int not null primary key, b int not null) engine=ndb; create table t2 (a int not null primary key, b int not null) engine=ndb; insert into t1 values (1,10), (2,20), (3,30); @@ -867,7 +838,36 @@ select * from t1 order by a; a b 1 10 2 10 -3 1 +3 30 4 1 drop table t1,t2; +End of 5.0 tests +CREATE TABLE t1 (a VARCHAR(255) NOT NULL, +CONSTRAINT pk_a PRIMARY KEY (a))engine=ndb; +CREATE TABLE t2(a VARCHAR(255) NOT NULL, +b VARCHAR(255) NOT NULL, +c VARCHAR(255) NOT NULL, +CONSTRAINT pk_b_c_id PRIMARY KEY (b,c), +CONSTRAINT fk_a FOREIGN KEY(a) REFERENCES t1(a))engine=ndb; +drop table t1, t2; +create table t1 (a int not null primary key, b int) engine=ndb; +insert into t1 values(1,1),(2,2),(3,3); +create table t2 like t1; +insert into t2 select * from t1; +select * from t1 order by a; +a b +1 1 +2 2 +3 3 +select * from t2 order by a; +a b +1 1 +2 2 +3 3 +drop table t1, t2; +create table t1 (a int not null primary key, b int not null default 0, c varchar(254)) engine=ndb; +create table if not exists t1 (a int not null primary key, b int not null default 0, c varchar(254)) engine=ndb; +create table t2 like t1; +rename table t1 to t10, t2 to t20; +drop table t10,t20; End of 5.1 tests diff --git a/mysql-test/t/date_formats.test b/mysql-test/t/date_formats.test index fe39cd95753..a121439c12e 100644 --- a/mysql-test/t/date_formats.test +++ b/mysql-test/t/date_formats.test @@ -6,10 +6,8 @@ drop table if exists t1; --enable_warnings ---replace_result ROW STATEMENT MIXED -SHOW GLOBAL VARIABLES LIKE "%_format%"; ---replace_result ROW STATEMENT MIXED -SHOW SESSION VARIABLES LIKE "%_format%"; +SHOW GLOBAL VARIABLES WHERE Variable_name LIKE "%_format%" AND Variable_name != "binlog_format"; +SHOW SESSION VARIABLES WHERE Variable_name LIKE "%_format%" AND Variable_name != "binlog_format"; # # Test setting a lot of different formats to see which formats are accepted and @@ -36,8 +34,7 @@ set datetime_format= '%H:%i:%s.%f %m-%d-%Y'; set datetime_format= '%h:%i:%s %p %Y-%m-%d'; set datetime_format= '%h:%i:%s.%f %p %Y-%m-%d'; ---replace_result ROW STATEMENT MIXED -SHOW SESSION VARIABLES LIKE "%format"; +SHOW SESSION VARIABLES WHERE Variable_name LIKE "%format" AND Variable_name != "binlog_format"; --error 1231 SET time_format='%h:%i:%s'; diff --git a/mysql-test/t/disabled.def b/mysql-test/t/disabled.def index 1b8d68b26ef..b72d248a774 100644 --- a/mysql-test/t/disabled.def +++ b/mysql-test/t/disabled.def @@ -43,7 +43,6 @@ rpl_ndb_ddl : BUG#28798 2007-05-31 lars Valgrind failure in NDB #rpl_ndb_dd_advance : Bug#25913 rpl_ndb_dd_advance fails randomly -rpl_ndb_stm_innodb : Bug#26783 ndb_partition_error2 : HF is not sure if the test can work as internded on all the platforms im_options_set : Bug#20294: Instance manager tests fail randomly diff --git a/mysql-test/t/flush_block_commit_notembedded.test b/mysql-test/t/flush_block_commit_notembedded.test index 4650a5a15a8..25eb093226e 100644 --- a/mysql-test/t/flush_block_commit_notembedded.test +++ b/mysql-test/t/flush_block_commit_notembedded.test @@ -3,6 +3,8 @@ # We verify that we did not introduce a deadlock. # This is intended to mimick how mysqldump and innobackup work. +-- source include/have_log_bin.inc + # And it requires InnoDB -- source include/not_embedded.inc -- source include/have_innodb.inc diff --git a/mysql-test/t/mysqlbinlog.test b/mysql-test/t/mysqlbinlog.test index e4cbd2938fb..f4785baa9db 100644 --- a/mysql-test/t/mysqlbinlog.test +++ b/mysql-test/t/mysqlbinlog.test @@ -2,6 +2,8 @@ # TODO: Need to look at making a row based version once the new row based client is completed. [jbm] -- source include/have_binlog_format_mixed_or_statement.inc +-- source include/have_log_bin.inc + # Embedded server doesn't support binlogging -- source include/not_embedded.inc diff --git a/mysql-test/t/mysqldump.test b/mysql-test/t/mysqldump.test index 487e16bbe46..ca8d6a47fa2 100644 --- a/mysql-test/t/mysqldump.test +++ b/mysql-test/t/mysqldump.test @@ -1,6 +1,9 @@ # Embedded server doesn't support external clients --source include/not_embedded.inc +# Binlog is required +--source include/have_log_bin.inc + --disable_warnings DROP TABLE IF EXISTS t1, `"t"1`, t1aa, t2, t2aa, t3; drop database if exists mysqldump_test_db; diff --git a/mysql-test/t/mysqltest.test b/mysql-test/t/mysqltest.test index d7c9945fe18..c4a8847d19b 100644 --- a/mysql-test/t/mysqltest.test +++ b/mysql-test/t/mysqltest.test @@ -1,3 +1,5 @@ +-- source include/have_log_bin.inc + # This test should work in embedded server after mysqltest is fixed -- source include/not_embedded.inc diff --git a/mysql-test/t/ndb_basic.test b/mysql-test/t/ndb_basic.test index 792006af621..870c7435d3e 100644 --- a/mysql-test/t/ndb_basic.test +++ b/mysql-test/t/ndb_basic.test @@ -752,46 +752,6 @@ INSERT INTO t1 VALUES SELECT * FROM t1 ORDER BY a; DROP TABLE t1; -# End of 5.0 tests ---echo End of 5.0 tests - - -# -# Bug #18483 Cannot create table with FK constraint -# ndb does not support foreign key constraint, it is silently ignored -# in line with other storage engines -# -CREATE TABLE t1 (a VARCHAR(255) NOT NULL, - CONSTRAINT pk_a PRIMARY KEY (a))engine=ndb; -CREATE TABLE t2(a VARCHAR(255) NOT NULL, - b VARCHAR(255) NOT NULL, - c VARCHAR(255) NOT NULL, - CONSTRAINT pk_b_c_id PRIMARY KEY (b,c), - CONSTRAINT fk_a FOREIGN KEY(a) REFERENCES t1(a))engine=ndb; -drop table t1, t2; - -# bug#24301 -create table t1 (a int not null primary key, b int) engine=ndb; -insert into t1 values(1,1),(2,2),(3,3); -create table t2 like t1; -insert into t2 select * from t1; -select * from t1 order by a; -select * from t2 order by a; -drop table t1, t2; - -# create table if not exists ---disable_warnings -create table t1 (a int not null primary key, b int not null default 0, c varchar(254)) engine=ndb; -create table if not exists t1 (a int not null primary key, b int not null default 0, c varchar(254)) engine=ndb; ---enable_warnings - -# create like -create table t2 like t1; - -# multi rename -rename table t1 to t10, t2 to t20; -drop table t10,t20; - # delete create table t1 (a int not null primary key, b int not null) engine=ndb; create table t2 (a int not null primary key, b int not null) engine=ndb; @@ -832,4 +792,44 @@ update ignore t1,t2 set a = 1, c = 1 where a = 3 and c = 3; select * from t1 order by a; drop table t1,t2; +# End of 5.0 tests +--echo End of 5.0 tests + + +# +# Bug #18483 Cannot create table with FK constraint +# ndb does not support foreign key constraint, it is silently ignored +# in line with other storage engines +# +CREATE TABLE t1 (a VARCHAR(255) NOT NULL, + CONSTRAINT pk_a PRIMARY KEY (a))engine=ndb; +CREATE TABLE t2(a VARCHAR(255) NOT NULL, + b VARCHAR(255) NOT NULL, + c VARCHAR(255) NOT NULL, + CONSTRAINT pk_b_c_id PRIMARY KEY (b,c), + CONSTRAINT fk_a FOREIGN KEY(a) REFERENCES t1(a))engine=ndb; +drop table t1, t2; + +# bug#24301 +create table t1 (a int not null primary key, b int) engine=ndb; +insert into t1 values(1,1),(2,2),(3,3); +create table t2 like t1; +insert into t2 select * from t1; +select * from t1 order by a; +select * from t2 order by a; +drop table t1, t2; + +# create table if not exists +--disable_warnings +create table t1 (a int not null primary key, b int not null default 0, c varchar(254)) engine=ndb; +create table if not exists t1 (a int not null primary key, b int not null default 0, c varchar(254)) engine=ndb; +--enable_warnings + +# create like +create table t2 like t1; + +# multi rename +rename table t1 to t10, t2 to t20; +drop table t10,t20; + --echo End of 5.1 tests diff --git a/mysql-test/t/ps.test b/mysql-test/t/ps.test index 3239109b1be..394f44bbc83 100644 --- a/mysql-test/t/ps.test +++ b/mysql-test/t/ps.test @@ -1,4 +1,5 @@ -- source include/not_embedded.inc +-- source include/have_log_bin.inc # # SQL Syntax for Prepared Statements test # diff --git a/mysql-test/t/sp_trans.test b/mysql-test/t/sp_trans.test index a79f6c7e7e0..bd5e6e33cf7 100644 --- a/mysql-test/t/sp_trans.test +++ b/mysql-test/t/sp_trans.test @@ -2,6 +2,7 @@ # tests that require InnoDB... # +-- source include/have_log_bin.inc -- source include/have_innodb.inc --disable_warnings diff --git a/mysql-test/t/user_var-binlog.test b/mysql-test/t/user_var-binlog.test index 3ce01a0927b..fc534cc357a 100644 --- a/mysql-test/t/user_var-binlog.test +++ b/mysql-test/t/user_var-binlog.test @@ -3,6 +3,7 @@ # TODO: Create row based version once $MYSQL_BINLOG has new RB version # Embedded server does not support binlogging --source include/not_embedded.inc +--source include/have_log_bin.inc # Check that user variables are binlogged correctly (BUG#3875) create table t1 (a varchar(50)); diff --git a/mysys/mf_keycache.c b/mysys/mf_keycache.c index 4c98882ac7e..473aabe0c01 100644 --- a/mysys/mf_keycache.c +++ b/mysys/mf_keycache.c @@ -325,6 +325,9 @@ static int keycache_pthread_cond_signal(pthread_cond_t *cond); #endif /* defined(KEYCACHE_DEBUG) */ #if !defined(DBUG_OFF) +#if defined(inline) +#undef inline +#endif #define inline /* disabled inline for easier debugging */ static int fail_block(BLOCK_LINK *block); static int fail_hlink(HASH_LINK *hlink); diff --git a/plugin/daemon_example/Makefile.am b/plugin/daemon_example/Makefile.am index 21a86f8973e..92b1ab040fb 100644 --- a/plugin/daemon_example/Makefile.am +++ b/plugin/daemon_example/Makefile.am @@ -36,3 +36,6 @@ noinst_LIBRARIES = @plugin_daemon_example_static_target@ libdaemon_example_a_CXXFLAGS = $(AM_CFLAGS) libdaemon_example_a_CFLAGS = $(AM_CFLAGS) libdaemon_example_a_SOURCES= daemon_example.cc + +# Don't update the files from bitkeeper +%::SCCS/s.% diff --git a/plugin/fulltext/Makefile.am b/plugin/fulltext/Makefile.am index d4ec097efbd..ec033018a00 100644 --- a/plugin/fulltext/Makefile.am +++ b/plugin/fulltext/Makefile.am @@ -22,3 +22,6 @@ pkglib_LTLIBRARIES= mypluglib.la mypluglib_la_SOURCES= plugin_example.c mypluglib_la_LDFLAGS= -module -rpath $(pkglibdir) mypluglib_la_CFLAGS= -DMYSQL_DYNAMIC_PLUGIN + +# Don't update the files from bitkeeper +%::SCCS/s.% diff --git a/sql/Makefile.am b/sql/Makefile.am index d280b22f493..fdf3f88c1f8 100644 --- a/sql/Makefile.am +++ b/sql/Makefile.am @@ -38,7 +38,8 @@ mysqld_LDADD = @MYSQLD_EXTRA_LDFLAGS@ \ @pstack_libs@ \ @mysql_plugin_libs@ \ $(LDADD) $(CXXLDFLAGS) $(WRAPLIBS) @LIBDL@ \ - $(yassl_libs) $(openssl_libs) + $(yassl_libs) $(openssl_libs) \ + @MYSQLD_EXTRA_LIBS@ noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \ item_strfunc.h item_timefunc.h \ diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index aeaea90feb6..2eb3826ebee 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -278,11 +278,6 @@ inline int execute_no_commit(ha_ndbcluster *h, NdbTransaction *trans, bool force_release) { -#ifdef NOT_USED - int m_batch_execute= 0; - if (m_batch_execute) - return 0; -#endif h->release_completed_operations(trans, force_release); return h->m_ignore_no_key ? execute_no_commit_ignore_no_key(h,trans) : @@ -294,11 +289,6 @@ int execute_no_commit(ha_ndbcluster *h, NdbTransaction *trans, inline int execute_commit(ha_ndbcluster *h, NdbTransaction *trans) { -#ifdef NOT_USED - int m_batch_execute= 0; - if (m_batch_execute) - return 0; -#endif return trans->execute(NdbTransaction::Commit, NdbOperation::AbortOnError, h->m_force_send); @@ -307,11 +297,6 @@ int execute_commit(ha_ndbcluster *h, NdbTransaction *trans) inline int execute_commit(THD *thd, NdbTransaction *trans) { -#ifdef NOT_USED - int m_batch_execute= 0; - if (m_batch_execute) - return 0; -#endif return trans->execute(NdbTransaction::Commit, NdbOperation::AbortOnError, thd->variables.ndb_force_send); @@ -321,11 +306,6 @@ inline int execute_no_commit_ie(ha_ndbcluster *h, NdbTransaction *trans, bool force_release) { -#ifdef NOT_USED - int m_batch_execute= 0; - if (m_batch_execute) - return 0; -#endif h->release_completed_operations(trans, force_release); return trans->execute(NdbTransaction::NoCommit, NdbOperation::AO_IgnoreError, @@ -2925,7 +2905,8 @@ int ha_ndbcluster::update_row(const uchar *old_data, uchar *new_data) * If IGNORE the ignore constraint violations on primary and unique keys, * but check that it is not part of INSERT ... ON DUPLICATE KEY UPDATE */ - if (m_ignore_dup_key && thd->lex->sql_command == SQLCOM_UPDATE) + if (m_ignore_dup_key && (thd->lex->sql_command == SQLCOM_UPDATE || + thd->lex->sql_command == SQLCOM_UPDATE_MULTI)) { int peek_res= peek_indexed_rows(new_data, pk_update); @@ -4267,8 +4248,6 @@ THR_LOCK_DATA **ha_ndbcluster::store_lock(THD *thd, extern MASTER_INFO *active_mi; static int ndbcluster_update_apply_status(THD *thd, int do_update) { - return 0; - Thd_ndb *thd_ndb= get_thd_ndb(thd); Ndb *ndb= thd_ndb->ndb; NDBDICT *dict= ndb->getDictionary(); diff --git a/sql/my_decimal.h b/sql/my_decimal.h index f9ba99a4509..800ae23425b 100644 --- a/sql/my_decimal.h +++ b/sql/my_decimal.h @@ -40,6 +40,7 @@ C_MODE_END /* the number of digits that my_decimal can possibly contain */ #define DECIMAL_MAX_POSSIBLE_PRECISION (DECIMAL_BUFF_LENGTH * 9) + /* maximum guaranteed precision of number in decimal digits (number of our digits * number of decimal digits in one our big digit - number of decimal diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 31e3196ded1..f5b0220a46c 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -5022,17 +5022,14 @@ bool check_merge_table_access(THD *thd, char *db, Check stack size; Send error if there isn't enough stack to continue ****************************************************************************/ -#if STACK_DIRECTION < 0 -#define used_stack(A,B) (long) (A - B) -#else -#define used_stack(A,B) (long) (B - A) -#endif +#ifndef EMBEDDED_LIBRARY + +#define used_stack(A,B) (long)(A > B ? A - B : B - A) #ifndef DBUG_OFF long max_stack_used; #endif -#ifndef EMBEDDED_LIBRARY /* Note: The 'buf' parameter is necessary, even if it is unused here. - fix_fields functions has a "dummy" buffer large enough for the diff --git a/storage/ndb/include/mgmapi/mgmapi_config_parameters.h b/storage/ndb/include/mgmapi/mgmapi_config_parameters.h index 119958d0ce0..d0bd8be16a3 100644 --- a/storage/ndb/include/mgmapi/mgmapi_config_parameters.h +++ b/storage/ndb/include/mgmapi/mgmapi_config_parameters.h @@ -64,6 +64,7 @@ #define CFG_DB_FILESYSTEM_PATH 125 #define CFG_DB_NO_REDOLOG_FILES 126 +#define CFG_DB_REDOLOG_FILE_SIZE 140 #define CFG_DB_LCP_DISC_PAGES_TUP 127 #define CFG_DB_LCP_DISC_PAGES_TUP_SR 128 @@ -81,6 +82,8 @@ #define CFG_DB_BACKUP_WRITE_SIZE 136 #define CFG_DB_BACKUP_MAX_WRITE_SIZE 139 +#define CFG_DB_WATCHDOG_INTERVAL_INITIAL 141 + #define CFG_LOG_DESTINATION 147 #define CFG_DB_DISCLESS 148 @@ -113,6 +116,8 @@ #define CFG_DB_MEMREPORT_FREQUENCY 166 +#define CFG_DB_O_DIRECT 168 + #define CFG_DB_SGA 198 /* super pool mem */ #define CFG_DB_DATA_MEM_2 199 /* used in special build in 5.1 */ diff --git a/storage/ndb/include/ndb_global.h.in b/storage/ndb/include/ndb_global.h.in index dd4303f949c..2fc594b3f5a 100644 --- a/storage/ndb/include/ndb_global.h.in +++ b/storage/ndb/include/ndb_global.h.in @@ -144,4 +144,6 @@ extern "C" { #define MAX(x,y) (((x)>(y))?(x):(y)) #endif +#define NDB_O_DIRECT_WRITE_ALIGNMENT 512 + #endif diff --git a/storage/ndb/include/portlib/NdbTick.h b/storage/ndb/include/portlib/NdbTick.h index 59f580de38e..70c36fdfd1e 100644 --- a/storage/ndb/include/portlib/NdbTick.h +++ b/storage/ndb/include/portlib/NdbTick.h @@ -37,9 +37,6 @@ NDB_TICKS NdbTick_CurrentMillisecond(void); */ int NdbTick_CurrentMicrosecond(NDB_TICKS * secs, Uint32 * micros); - /*#define TIME_MEASUREMENT*/ -#ifdef TIME_MEASUREMENT - struct MicroSecondTimer { NDB_TICKS seconds; NDB_TICKS micro_seconds; @@ -54,7 +51,6 @@ struct MicroSecondTimer { NDB_TICKS NdbTick_getMicrosPassed(struct MicroSecondTimer start, struct MicroSecondTimer stop); int NdbTick_getMicroTimer(struct MicroSecondTimer* time_now); -#endif #ifdef __cplusplus } diff --git a/storage/ndb/ndbapi-examples/mgmapi_logevent/Makefile b/storage/ndb/ndbapi-examples/mgmapi_logevent/Makefile index c9b4507c4a7..b67150b71fa 100644 --- a/storage/ndb/ndbapi-examples/mgmapi_logevent/Makefile +++ b/storage/ndb/ndbapi-examples/mgmapi_logevent/Makefile @@ -1,6 +1,6 @@ TARGET = mgmapi_logevent -SRCS = $(TARGET).cpp -OBJS = $(TARGET).o +SRCS = main.cpp +OBJS = main.o CXX = g++ CFLAGS = -c -Wall -fno-rtti -fno-exceptions CXXFLAGS = @@ -17,7 +17,7 @@ SYS_LIB = $(TARGET): $(OBJS) $(CXX) $(CXXFLAGS) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET) -$(TARGET).o: $(SRCS) +$(OBJS): $(SRCS) $(CXX) $(CFLAGS) -I$(TOP_SRCDIR)/include -I$(INCLUDE_DIR) -I$(INCLUDE_DIR)/mgmapi -I$(INCLUDE_DIR)/ndbapi $(SRCS) clean: diff --git a/storage/ndb/ndbapi-examples/mgmapi_logevent/mgmapi_logevent.cpp b/storage/ndb/ndbapi-examples/mgmapi_logevent/main.cpp similarity index 100% rename from storage/ndb/ndbapi-examples/mgmapi_logevent/mgmapi_logevent.cpp rename to storage/ndb/ndbapi-examples/mgmapi_logevent/main.cpp diff --git a/storage/ndb/ndbapi-examples/mgmapi_logevent2/Makefile b/storage/ndb/ndbapi-examples/mgmapi_logevent2/Makefile index 95b43b11f6b..fd9499c7a68 100644 --- a/storage/ndb/ndbapi-examples/mgmapi_logevent2/Makefile +++ b/storage/ndb/ndbapi-examples/mgmapi_logevent2/Makefile @@ -1,6 +1,6 @@ TARGET = mgmapi_logevent2 -SRCS = $(TARGET).cpp -OBJS = $(TARGET).o +SRCS = main.cpp +OBJS = main.o CXX = g++ CFLAGS = -c -Wall -fno-rtti -fno-exceptions CXXFLAGS = @@ -17,7 +17,7 @@ SYS_LIB = $(TARGET): $(OBJS) $(CXX) $(CXXFLAGS) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET) -$(TARGET).o: $(SRCS) +$(OBJS): $(SRCS) $(CXX) $(CFLAGS) -I$(TOP_SRCDIR)/include -I$(INCLUDE_DIR) -I$(INCLUDE_DIR)/mgmapi -I$(INCLUDE_DIR)/ndbapi $(SRCS) clean: diff --git a/storage/ndb/ndbapi-examples/mgmapi_logevent2/mgmapi_logevent2.cpp b/storage/ndb/ndbapi-examples/mgmapi_logevent2/main.cpp similarity index 100% rename from storage/ndb/ndbapi-examples/mgmapi_logevent2/mgmapi_logevent2.cpp rename to storage/ndb/ndbapi-examples/mgmapi_logevent2/main.cpp diff --git a/storage/ndb/ndbapi-examples/ndbapi_simple_dual/Makefile b/storage/ndb/ndbapi-examples/ndbapi_simple_dual/Makefile index 7f0ca52fcc3..9757df3ceab 100644 --- a/storage/ndb/ndbapi-examples/ndbapi_simple_dual/Makefile +++ b/storage/ndb/ndbapi-examples/ndbapi_simple_dual/Makefile @@ -1,6 +1,6 @@ TARGET = ndbapi_simple_dual -SRCS = $(TARGET).cpp -OBJS = $(TARGET).o +SRCS = main.cpp +OBJS = main.o CXX = g++ CFLAGS = -c -Wall -fno-rtti -fno-exceptions CXXFLAGS = @@ -17,7 +17,7 @@ SYS_LIB = $(TARGET): $(OBJS) $(CXX) $(CXXFLAGS) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET) -$(TARGET).o: $(SRCS) +$(OBJS): $(SRCS) $(CXX) $(CFLAGS) -I$(TOP_SRCDIR)/include -I$(INCLUDE_DIR) -I$(INCLUDE_DIR)/ndbapi $(SRCS) clean: diff --git a/storage/ndb/ndbapi-examples/ndbapi_simple_dual/ndbapi_simple_dual.cpp b/storage/ndb/ndbapi-examples/ndbapi_simple_dual/main.cpp similarity index 100% rename from storage/ndb/ndbapi-examples/ndbapi_simple_dual/ndbapi_simple_dual.cpp rename to storage/ndb/ndbapi-examples/ndbapi_simple_dual/main.cpp diff --git a/storage/ndb/ndbapi-examples/ndbapi_simple_index/Makefile b/storage/ndb/ndbapi-examples/ndbapi_simple_index/Makefile index c38975381f5..975563b9508 100644 --- a/storage/ndb/ndbapi-examples/ndbapi_simple_index/Makefile +++ b/storage/ndb/ndbapi-examples/ndbapi_simple_index/Makefile @@ -1,6 +1,6 @@ TARGET = ndbapi_simple_index -SRCS = $(TARGET).cpp -OBJS = $(TARGET).o +SRCS = main.cpp +OBJS = main.o CXX = g++ CFLAGS = -c -Wall -fno-rtti -fno-exceptions CXXFLAGS = @@ -17,7 +17,7 @@ SYS_LIB = $(TARGET): $(OBJS) $(CXX) $(CXXFLAGS) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET) -$(TARGET).o: $(SRCS) +$(OBJS): $(SRCS) $(CXX) $(CFLAGS) -I$(INCLUDE_DIR)/include -I$(INCLUDE_DIR)/storage/ndb/include -I$(INCLUDE_DIR)/storage/ndb/include/ndbapi $(SRCS) clean: diff --git a/storage/ndb/ndbapi-examples/ndbapi_simple_index/ndbapi_simple_index.cpp b/storage/ndb/ndbapi-examples/ndbapi_simple_index/main.cpp similarity index 100% rename from storage/ndb/ndbapi-examples/ndbapi_simple_index/ndbapi_simple_index.cpp rename to storage/ndb/ndbapi-examples/ndbapi_simple_index/main.cpp diff --git a/storage/ndb/src/common/portlib/NdbTick.c b/storage/ndb/src/common/portlib/NdbTick.c index 238e9b1956d..7e54984794f 100644 --- a/storage/ndb/src/common/portlib/NdbTick.c +++ b/storage/ndb/src/common/portlib/NdbTick.c @@ -15,7 +15,7 @@ #include -#include "NdbTick.h" +#include #define NANOSEC_PER_SEC 1000000000 #define MICROSEC_PER_SEC 1000000 @@ -71,7 +71,6 @@ NdbTick_CurrentMicrosecond(NDB_TICKS * secs, Uint32 * micros){ } #endif -#ifdef TIME_MEASUREMENT int NdbTick_getMicroTimer(struct MicroSecondTimer* input_timer) { @@ -102,4 +101,3 @@ NdbTick_getMicrosPassed(struct MicroSecondTimer start, } return ret_value; } -#endif diff --git a/storage/ndb/src/common/transporter/TransporterRegistry.cpp b/storage/ndb/src/common/transporter/TransporterRegistry.cpp index 3e7589a54fe..5f5f3c17b2d 100644 --- a/storage/ndb/src/common/transporter/TransporterRegistry.cpp +++ b/storage/ndb/src/common/transporter/TransporterRegistry.cpp @@ -818,6 +818,7 @@ TransporterRegistry::performReceive() { Uint32 * ptr; Uint32 sz = t->getReceiveData(&ptr); + transporter_recv_from(callbackObj, nodeId); Uint32 szUsed = unpack(ptr, sz, nodeId, ioStates[nodeId]); t->updateReceiveDataPtr(szUsed); } diff --git a/storage/ndb/src/kernel/blocks/ERROR_codes.txt b/storage/ndb/src/kernel/blocks/ERROR_codes.txt index b3405679978..67eb89f850f 100644 --- a/storage/ndb/src/kernel/blocks/ERROR_codes.txt +++ b/storage/ndb/src/kernel/blocks/ERROR_codes.txt @@ -1,5 +1,5 @@ Next QMGR 1 -Next NDBCNTR 1001 +Next NDBCNTR 1002 Next NDBFS 2000 Next DBACC 3002 Next DBTUP 4029 @@ -523,3 +523,4 @@ Dbtup: NDBCNTR: 1000: Crash insertion on SystemError::CopyFragRef +1001: Delay sending NODE_FAILREP (to own node), until error is cleared diff --git a/storage/ndb/src/kernel/blocks/backup/Backup.cpp b/storage/ndb/src/kernel/blocks/backup/Backup.cpp index d86c22024cd..439a2322bd3 100644 --- a/storage/ndb/src/kernel/blocks/backup/Backup.cpp +++ b/storage/ndb/src/kernel/blocks/backup/Backup.cpp @@ -2771,6 +2771,8 @@ Backup::openFiles(Signal* signal, BackupRecordPtr ptr) c_backupFilePool.getPtr(filePtr, ptr.p->dataFilePtr); filePtr.p->m_flags |= BackupFile::BF_OPENING; + if (c_defaults.m_o_direct) + req->fileFlags |= FsOpenReq::OM_DIRECT; req->userPointer = filePtr.i; FsOpenReq::setVersion(req->fileNumber, 2); FsOpenReq::setSuffix(req->fileNumber, FsOpenReq::S_DATA); @@ -3745,12 +3747,31 @@ Backup::OperationRecord::newFragment(Uint32 tableId, Uint32 fragNo) } bool -Backup::OperationRecord::fragComplete(Uint32 tableId, Uint32 fragNo) +Backup::OperationRecord::fragComplete(Uint32 tableId, Uint32 fragNo, bool fill_record) { Uint32 * tmp; const Uint32 footSz = sizeof(BackupFormat::DataFile::FragmentFooter) >> 2; + Uint32 sz = footSz + 1; - if(dataBuffer.getWritePtr(&tmp, footSz + 1)) { + if (fill_record) + { + Uint32 * new_tmp; + if (!dataBuffer.getWritePtr(&tmp, sz)) + return false; + new_tmp = tmp + sz; + + if ((UintPtr)new_tmp & (sizeof(Page32)-1)) + { + /* padding is needed to get full write */ + new_tmp += 2 /* to fit empty header minimum 2 words*/; + new_tmp = (Uint32 *)(((UintPtr)new_tmp + sizeof(Page32)-1) & + ~(UintPtr)(sizeof(Page32)-1)); + /* new write sz */ + sz = new_tmp - tmp; + } + } + + if(dataBuffer.getWritePtr(&tmp, sz)) { jam(); * tmp = 0; // Finish record stream tmp++; @@ -3762,7 +3783,17 @@ Backup::OperationRecord::fragComplete(Uint32 tableId, Uint32 fragNo) foot->FragmentNo = htonl(fragNo); foot->NoOfRecords = htonl(noOfRecords); foot->Checksum = htonl(0); - dataBuffer.updateWritePtr(footSz + 1); + + if (sz != footSz + 1) + { + tmp += footSz; + memset(tmp, 0, (sz - footSz - 1) * 4); + *tmp = htonl(BackupFormat::EMPTY_ENTRY); + tmp++; + *tmp = htonl(sz - footSz - 1); + } + + dataBuffer.updateWritePtr(sz); return true; }//if return false; @@ -3864,8 +3895,13 @@ Backup::fragmentCompleted(Signal* signal, BackupFilePtr filePtr) return; }//if + BackupRecordPtr ptr LINT_SET_PTR; + c_backupPool.getPtr(ptr, filePtr.p->backupPtr); + OperationRecord & op = filePtr.p->operation; - if(!op.fragComplete(filePtr.p->tableId, filePtr.p->fragmentNo)) { + if(!op.fragComplete(filePtr.p->tableId, filePtr.p->fragmentNo, + c_defaults.m_o_direct)) + { jam(); signal->theData[0] = BackupContinueB::BUFFER_FULL_FRAG_COMPLETE; signal->theData[1] = filePtr.i; @@ -3875,9 +3911,6 @@ Backup::fragmentCompleted(Signal* signal, BackupFilePtr filePtr) filePtr.p->m_flags &= ~(Uint32)BackupFile::BF_SCAN_THREAD; - BackupRecordPtr ptr LINT_SET_PTR; - c_backupPool.getPtr(ptr, filePtr.p->backupPtr); - if (ptr.p->is_lcp()) { ptr.p->slaveState.setState(STOPPING); @@ -4914,6 +4947,8 @@ Backup::lcp_open_file(Signal* signal, BackupRecordPtr ptr) FsOpenReq::OM_CREATE | FsOpenReq::OM_APPEND | FsOpenReq::OM_AUTOSYNC; + if (c_defaults.m_o_direct) + req->fileFlags |= FsOpenReq::OM_DIRECT; FsOpenReq::v2_setCount(req->fileNumber, 0xFFFFFFFF); req->auto_sync_size = c_defaults.m_disk_synch_size; diff --git a/storage/ndb/src/kernel/blocks/backup/Backup.hpp b/storage/ndb/src/kernel/blocks/backup/Backup.hpp index 32f2e14ac92..3fd9b2967fd 100644 --- a/storage/ndb/src/kernel/blocks/backup/Backup.hpp +++ b/storage/ndb/src/kernel/blocks/backup/Backup.hpp @@ -240,7 +240,7 @@ public: * Once per fragment */ bool newFragment(Uint32 tableId, Uint32 fragNo); - bool fragComplete(Uint32 tableId, Uint32 fragNo); + bool fragComplete(Uint32 tableId, Uint32 fragNo, bool fill_record); /** * Once per scan frag (next) req/conf @@ -534,6 +534,7 @@ public: Uint32 m_disk_write_speed; Uint32 m_disk_synch_size; Uint32 m_diskless; + Uint32 m_o_direct; }; /** diff --git a/storage/ndb/src/kernel/blocks/backup/BackupFormat.hpp b/storage/ndb/src/kernel/blocks/backup/BackupFormat.hpp index ace9dfe5c79..20f8f6650be 100644 --- a/storage/ndb/src/kernel/blocks/backup/BackupFormat.hpp +++ b/storage/ndb/src/kernel/blocks/backup/BackupFormat.hpp @@ -32,7 +32,8 @@ struct BackupFormat { TABLE_LIST = 4, TABLE_DESCRIPTION = 5, GCP_ENTRY = 6, - FRAGMENT_INFO = 7 + FRAGMENT_INFO = 7, + EMPTY_ENTRY = 8 }; struct FileHeader { @@ -93,6 +94,13 @@ struct BackupFormat { Uint32 NoOfRecords; Uint32 Checksum; }; + + /* optional padding for O_DIRECT */ + struct EmptyEntry { + Uint32 SectionType; + Uint32 SectionLength; + /* not used data */ + }; }; /** diff --git a/storage/ndb/src/kernel/blocks/backup/BackupInit.cpp b/storage/ndb/src/kernel/blocks/backup/BackupInit.cpp index 4faa02e494f..2cd2a8a2bee 100644 --- a/storage/ndb/src/kernel/blocks/backup/BackupInit.cpp +++ b/storage/ndb/src/kernel/blocks/backup/BackupInit.cpp @@ -148,10 +148,13 @@ Backup::execREAD_CONFIG_REQ(Signal* signal) c_defaults.m_disk_write_speed = 10 * (1024 * 1024); c_defaults.m_disk_write_speed_sr = 100 * (1024 * 1024); c_defaults.m_disk_synch_size = 4 * (1024 * 1024); - + c_defaults.m_o_direct = true; + Uint32 noBackups = 0, noTables = 0, noAttribs = 0, noFrags = 0; ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_DISCLESS, &c_defaults.m_diskless)); + ndb_mgm_get_int_parameter(p, CFG_DB_O_DIRECT, + &c_defaults.m_o_direct); ndb_mgm_get_int_parameter(p, CFG_DB_CHECKPOINT_SPEED_SR, &c_defaults.m_disk_write_speed_sr); ndb_mgm_get_int_parameter(p, CFG_DB_CHECKPOINT_SPEED, @@ -204,7 +207,7 @@ Backup::execREAD_CONFIG_REQ(Signal* signal) / sizeof(Page32); // We need to allocate an additional of 2 pages. 1 page because of a bug in // ArrayPool and another one for DICTTAINFO. - c_pagePool.setSize(noPages + NO_OF_PAGES_META_FILE + 2); + c_pagePool.setSize(noPages + NO_OF_PAGES_META_FILE + 2, true); { // Init all tables SLList tables(c_tablePool); diff --git a/storage/ndb/src/kernel/blocks/backup/FsBuffer.hpp b/storage/ndb/src/kernel/blocks/backup/FsBuffer.hpp index d26f36ccf40..bb0bbd6d770 100644 --- a/storage/ndb/src/kernel/blocks/backup/FsBuffer.hpp +++ b/storage/ndb/src/kernel/blocks/backup/FsBuffer.hpp @@ -270,8 +270,8 @@ FsBuffer::getReadPtr(Uint32 ** ptr, Uint32 * sz, bool * _eof){ * ptr = &Tp[Tr]; - DEBUG(ndbout_c("getReadPtr() Tr: %d Tw: %d Ts: %d Tm: %d sz1: %d -> %d", - Tr, Tw, Ts, Tm, sz1, * sz)); + DEBUG(ndbout_c("getReadPtr() Tr: %d Tmw: %d Ts: %d Tm: %d sz1: %d -> %d", + Tr, Tmw, Ts, Tm, sz1, * sz)); return true; } @@ -279,8 +279,8 @@ FsBuffer::getReadPtr(Uint32 ** ptr, Uint32 * sz, bool * _eof){ if(!m_eof){ * _eof = false; - DEBUG(ndbout_c("getReadPtr() Tr: %d Tw: %d Ts: %d Tm: %d sz1: %d -> false", - Tr, Tw, Ts, Tm, sz1)); + DEBUG(ndbout_c("getReadPtr() Tr: %d Tmw: %d Ts: %d Tm: %d sz1: %d -> false", + Tr, Tmw, Ts, Tm, sz1)); return false; } @@ -289,8 +289,8 @@ FsBuffer::getReadPtr(Uint32 ** ptr, Uint32 * sz, bool * _eof){ * _eof = true; * ptr = &Tp[Tr]; - DEBUG(ndbout_c("getReadPtr() Tr: %d Tw: %d Ts: %d Tm: %d sz1: %d -> %d eof", - Tr, Tw, Ts, Tm, sz1, * sz)); + DEBUG(ndbout_c("getReadPtr() Tr: %d Tmw: %d Ts: %d Tm: %d sz1: %d -> %d eof", + Tr, Tmw, Ts, Tm, sz1, * sz)); return false; } @@ -316,13 +316,13 @@ FsBuffer::getWritePtr(Uint32 ** ptr, Uint32 sz){ if(sz1 > sz){ // Note at least 1 word of slack * ptr = &Tp[Tw]; - DEBUG(ndbout_c("getWritePtr(%d) Tr: %d Tw: %d Ts: %d sz1: %d -> true", - sz, Tr, Tw, Ts, sz1)); + DEBUG(ndbout_c("getWritePtr(%d) Tw: %d sz1: %d -> true", + sz, Tw, sz1)); return true; } - DEBUG(ndbout_c("getWritePtr(%d) Tr: %d Tw: %d Ts: %d sz1: %d -> false", - sz, Tr, Tw, Ts, sz1)); + DEBUG(ndbout_c("getWritePtr(%d) Tw: %d sz1: %d -> false", + sz, Tw, sz1)); return false; } @@ -339,11 +339,15 @@ FsBuffer::updateWritePtr(Uint32 sz){ m_free -= sz; if(Tnew < Ts){ m_writeIndex = Tnew; + DEBUG(ndbout_c("updateWritePtr(%d) m_writeIndex: %d", + sz, m_writeIndex)); return; } memcpy(Tp, &Tp[Ts], (Tnew - Ts) << 2); m_writeIndex = Tnew - Ts; + DEBUG(ndbout_c("updateWritePtr(%d) m_writeIndex: %d", + sz, m_writeIndex)); } inline diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp index ac3acdc6778..edc8c0131db 100644 --- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp +++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp @@ -698,6 +698,9 @@ void Dbdict::execFSCLOSECONF(Signal* signal) case FsConnectRecord::OPEN_READ_SCHEMA2: openSchemaFile(signal, 1, fsPtr.i, false, false); break; + case FsConnectRecord::OPEN_READ_TAB_FILE2: + openTableFile(signal, 1, fsPtr.i, c_readTableRecord.tableId, false); + break; default: jamLine((fsPtr.p->fsState & 0xFFF)); ndbrequire(false); @@ -1073,8 +1076,11 @@ void Dbdict::readTableConf(Signal* signal, void Dbdict::readTableRef(Signal* signal, FsConnectRecordPtr fsPtr) { + /** + * First close corrupt file + */ fsPtr.p->fsState = FsConnectRecord::OPEN_READ_TAB_FILE2; - openTableFile(signal, 1, fsPtr.i, c_readTableRecord.tableId, false); + closeFile(signal, fsPtr.p->filePtr, fsPtr.i); return; }//Dbdict::readTableRef() diff --git a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp index 1fe932aaae8..bc14eec1f98 100644 --- a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp +++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp @@ -4741,12 +4741,18 @@ void Dbdih::failedNodeLcpHandling(Signal* signal, NodeRecordPtr failedNodePtr) jam(); const Uint32 nodeId = failedNodePtr.i; - if (c_lcpState.m_participatingLQH.get(failedNodePtr.i)){ + if (isMaster() && c_lcpState.m_participatingLQH.get(failedNodePtr.i)) + { /*----------------------------------------------------*/ /* THE NODE WAS INVOLVED IN A LOCAL CHECKPOINT. WE */ /* MUST UPDATE THE ACTIVE STATUS TO INDICATE THAT */ /* THE NODE HAVE MISSED A LOCAL CHECKPOINT. */ /*----------------------------------------------------*/ + + /** + * Bug#28717, Only master should do this, as this status is copied + * to other nodes + */ switch (failedNodePtr.p->activeStatus) { case Sysfile::NS_Active: jam(); diff --git a/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp index ba146fce005..64d214d472b 100644 --- a/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp +++ b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp @@ -71,7 +71,6 @@ class Dbtup; /* CONSTANTS OF THE LOG PAGES */ /* ------------------------------------------------------------------------- */ #define ZPAGE_HEADER_SIZE 32 -#define ZNO_MBYTES_IN_FILE 16 #define ZPAGE_SIZE 8192 #define ZPAGES_IN_MBYTE 32 #define ZTWOLOG_NO_PAGES_IN_MBYTE 5 @@ -115,9 +114,6 @@ class Dbtup; /* ------------------------------------------------------------------------- */ /* VARIOUS CONSTANTS USED AS FLAGS TO THE FILE MANAGER. */ /* ------------------------------------------------------------------------- */ -#define ZOPEN_READ 0 -#define ZOPEN_WRITE 1 -#define ZOPEN_READ_WRITE 2 #define ZVAR_NO_LOG_PAGE_WORD 1 #define ZLIST_OF_PAIRS 0 #define ZLIST_OF_PAIRS_SYNCH 16 @@ -142,7 +138,7 @@ class Dbtup; /* IN THE MBYTE. */ /* ------------------------------------------------------------------------- */ #define ZFD_HEADER_SIZE 3 -#define ZFD_PART_SIZE 48 +#define ZFD_MBYTE_SIZE 3 #define ZLOG_HEAD_SIZE 8 #define ZNEXT_LOG_SIZE 2 #define ZABORT_LOG_SIZE 3 @@ -169,7 +165,6 @@ class Dbtup; #define ZPOS_LOG_TYPE 0 #define ZPOS_NO_FD 1 #define ZPOS_FILE_NO 2 -#define ZMAX_LOG_FILES_IN_PAGE_ZERO 40 /* ------------------------------------------------------------------------- */ /* THE POSITIONS WITHIN A PREPARE LOG RECORD AND A NEW PREPARE */ /* LOG RECORD. */ @@ -1437,17 +1432,17 @@ public: * header of each log file. That information is used during * system restart to find the tail of the log. */ - UintR logLastPrepRef[16]; + UintR *logLastPrepRef; /** * The max global checkpoint completed before the mbyte in the * log file was started. One variable per mbyte. */ - UintR logMaxGciCompleted[16]; + UintR *logMaxGciCompleted; /** * The max global checkpoint started before the mbyte in the log * file was started. One variable per mbyte. */ - UintR logMaxGciStarted[16]; + UintR *logMaxGciStarted; /** * This variable contains the file name as needed by the file * system when opening the file. @@ -2163,6 +2158,7 @@ private: void execSTART_RECREF(Signal* signal); void execGCP_SAVEREQ(Signal* signal); + void execFSOPENREF(Signal* signal); void execFSOPENCONF(Signal* signal); void execFSCLOSECONF(Signal* signal); void execFSWRITECONF(Signal* signal); @@ -2671,6 +2667,8 @@ private: LogPartRecord *logPartRecord; LogPartRecordPtr logPartPtr; UintR clogPartFileSize; + Uint32 clogFileSize; // In MBYTE + Uint32 cmaxLogFilesInPageZero; // // Configurable LogFileRecord *logFileRecord; @@ -2678,13 +2676,15 @@ private: UintR cfirstfreeLogFile; UintR clogFileFileSize; -#define ZLFO_FILE_SIZE 256 /* MAX 256 OUTSTANDING FILE OPERATIONS */ +#define ZLFO_MIN_FILE_SIZE 256 +// RedoBuffer/32K minimum ZLFO_MIN_FILE_SIZE LogFileOperationRecord *logFileOperationRecord; LogFileOperationRecordPtr lfoPtr; UintR cfirstfreeLfo; UintR clfoFileSize; LogPageRecord *logPageRecord; + void *logPageRecordUnaligned; LogPageRecordPtr logPagePtr; UintR cfirstfreeLogPage; UintR clogPageFileSize; @@ -2695,7 +2695,7 @@ private: UintR cfirstfreePageRef; UintR cpageRefFileSize; -#define ZSCANREC_FILE_SIZE 100 +// Configurable ArrayPool c_scanRecordPool; ScanRecordPtr scanptr; UintR cscanNoFreeRec; @@ -2888,6 +2888,7 @@ private: UintR ctransidHash[1024]; Uint32 c_diskless; + Uint32 c_o_direct; Uint32 c_error_insert_table_id; public: diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp index c054c227c8e..d6411ee1cb9 100644 --- a/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp +++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp @@ -30,11 +30,11 @@ void Dblqh::initData() cgcprecFileSize = ZGCPREC_FILE_SIZE; chostFileSize = MAX_NDB_NODES; clcpFileSize = ZNO_CONCURRENT_LCP; - clfoFileSize = ZLFO_FILE_SIZE; + clfoFileSize = 0; clogFileFileSize = 0; clogPartFileSize = ZLOG_PART_FILE_SIZE; cpageRefFileSize = ZPAGE_REF_FILE_SIZE; - cscanrecFileSize = ZSCANREC_FILE_SIZE; + cscanrecFileSize = 0; ctabrecFileSize = 0; ctcConnectrecFileSize = 0; ctcNodeFailrecFileSize = MAX_NDB_NODES; @@ -49,6 +49,7 @@ void Dblqh::initData() logFileRecord = 0; logFileOperationRecord = 0; logPageRecord = 0; + logPageRecordUnaligned= 0; pageRefRecord = 0; tablerec = 0; tcConnectionrec = 0; @@ -60,6 +61,8 @@ void Dblqh::initData() cLqhTimeOutCheckCount = 0; cbookedAccOps = 0; m_backup_ptr = RNIL; + clogFileSize = 16; + cmaxLogFilesInPageZero = 40; }//Dblqh::initData() void Dblqh::initRecords() @@ -105,10 +108,13 @@ void Dblqh::initRecords() sizeof(LogFileOperationRecord), clfoFileSize); - logPageRecord = (LogPageRecord*)allocRecord("LogPageRecord", - sizeof(LogPageRecord), - clogPageFileSize, - false); + logPageRecord = + (LogPageRecord*)allocRecordAligned("LogPageRecord", + sizeof(LogPageRecord), + clogPageFileSize, + &logPageRecordUnaligned, + NDB_O_DIRECT_WRITE_ALIGNMENT, + false); pageRefRecord = (PageRefRecord*)allocRecord("PageRefRecord", sizeof(PageRefRecord), @@ -260,6 +266,7 @@ Dblqh::Dblqh(Block_context& ctx): addRecSignal(GSN_START_FRAGREQ, &Dblqh::execSTART_FRAGREQ); addRecSignal(GSN_START_RECREF, &Dblqh::execSTART_RECREF); addRecSignal(GSN_GCP_SAVEREQ, &Dblqh::execGCP_SAVEREQ); + addRecSignal(GSN_FSOPENREF, &Dblqh::execFSOPENREF, true); addRecSignal(GSN_FSOPENCONF, &Dblqh::execFSOPENCONF); addRecSignal(GSN_FSCLOSECONF, &Dblqh::execFSCLOSECONF); addRecSignal(GSN_FSWRITECONF, &Dblqh::execFSWRITECONF); @@ -377,7 +384,7 @@ Dblqh::~Dblqh() sizeof(LogFileOperationRecord), clfoFileSize); - deallocRecord((void**)&logPageRecord, + deallocRecord((void**)&logPageRecordUnaligned, "LogPageRecord", sizeof(LogPageRecord), clogPageFileSize); diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp index 2ffed9749b8..8f42a8039d8 100644 --- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp +++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp @@ -1023,6 +1023,11 @@ void Dblqh::execREAD_CONFIG_REQ(Signal* signal) clogPageFileSize+= (16 - mega_byte_part); } + /* maximum number of log file operations */ + clfoFileSize = clogPageFileSize; + if (clfoFileSize < ZLFO_MIN_FILE_SIZE) + clfoFileSize = ZLFO_MIN_FILE_SIZE; + ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_LQH_TABLE, &ctabrecFileSize)); ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_LQH_TC_CONNECT, &ctcConnectrecFileSize)); @@ -1031,14 +1036,44 @@ void Dblqh::execREAD_CONFIG_REQ(Signal* signal) cmaxAccOps = cscanrecFileSize * MAX_PARALLEL_OP_PER_SCAN; ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_DISCLESS, &c_diskless)); + c_o_direct = true; + ndb_mgm_get_int_parameter(p, CFG_DB_O_DIRECT, &c_o_direct); Uint32 tmp= 0; ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_LQH_FRAG, &tmp)); c_fragment_pool.setSize(tmp); + if (!ndb_mgm_get_int_parameter(p, CFG_DB_REDOLOG_FILE_SIZE, + &clogFileSize)) + { + // convert to mbyte + clogFileSize = (clogFileSize + 1024*1024 - 1) / (1024 * 1024); + ndbrequire(clogFileSize >= 4 && clogFileSize <= 1024); + } + + cmaxLogFilesInPageZero = (ZPAGE_SIZE - ZPAGE_HEADER_SIZE - 128) / + (ZFD_MBYTE_SIZE * clogFileSize); + + /** + * "Old" cmaxLogFilesInPageZero was 40 + * Each FD need 3 words per mb, require that they can fit into 1 page + * (atleast 1 FD) + * Is also checked in ConfigInfo.cpp (max FragmentLogFileSize = 1Gb) + * 1Gb = 1024Mb => 3(ZFD_MBYTE_SIZE) * 1024 < 8192 (ZPAGE_SIZE) + */ + if (cmaxLogFilesInPageZero > 40) + { + jam(); + cmaxLogFilesInPageZero = 40; + } + else + { + ndbrequire(cmaxLogFilesInPageZero); + } + initRecords(); initialiseRecordsLab(signal, 0, ref, senderData); - + return; }//Dblqh::execSIZEALT_REP() @@ -11788,9 +11823,9 @@ void Dblqh::sendStartLcp(Signal* signal) Uint32 Dblqh::remainingLogSize(const LogFileRecordPtr &sltCurrLogFilePtr, const LogPartRecordPtr &sltLogPartPtr) { - Uint32 hf = sltCurrLogFilePtr.p->fileNo*ZNO_MBYTES_IN_FILE+sltCurrLogFilePtr.p->currentMbyte; - Uint32 tf = sltLogPartPtr.p->logTailFileNo*ZNO_MBYTES_IN_FILE+sltLogPartPtr.p->logTailMbyte; - Uint32 sz = sltLogPartPtr.p->noLogFiles*ZNO_MBYTES_IN_FILE; + Uint32 hf = sltCurrLogFilePtr.p->fileNo*clogFileSize+sltCurrLogFilePtr.p->currentMbyte; + Uint32 tf = sltLogPartPtr.p->logTailFileNo*clogFileSize+sltLogPartPtr.p->logTailMbyte; + Uint32 sz = sltLogPartPtr.p->noLogFiles*clogFileSize; if (tf > hf) hf += sz; return sz-(hf-tf); } @@ -11848,7 +11883,7 @@ void Dblqh::setLogTail(Signal* signal, Uint32 keepGci) /* ------------------------------------------------------------------------- */ SLT_LOOP: for (tsltIndex = tsltStartMbyte; - tsltIndex <= ZNO_MBYTES_IN_FILE - 1; + tsltIndex <= clogFileSize - 1; tsltIndex++) { if (sltLogFilePtr.p->logMaxGciStarted[tsltIndex] >= keepGci) { /* ------------------------------------------------------------------------- */ @@ -11864,7 +11899,7 @@ void Dblqh::setLogTail(Signal* signal, Uint32 keepGci) /* ------------------------------------------------------------------------- */ /*STEPPING BACK INCLUDES ALSO STEPPING BACK TO THE PREVIOUS LOG FILE. */ /* ------------------------------------------------------------------------- */ - tsltMbyte = ZNO_MBYTES_IN_FILE - 1; + tsltMbyte = clogFileSize - 1; sltLogFilePtr.i = sltLogFilePtr.p->prevLogFile; ptrCheckGuard(sltLogFilePtr, clogFileFileSize, logFileRecord); }//if @@ -11902,7 +11937,7 @@ void Dblqh::setLogTail(Signal* signal, Uint32 keepGci) UintR ToldTailFileNo = sltLogPartPtr.p->logTailFileNo; UintR ToldTailMByte = sltLogPartPtr.p->logTailMbyte; - arrGuard(tsltMbyte, 16); + arrGuard(tsltMbyte, clogFileSize); sltLogPartPtr.p->logTailFileNo = sltLogFilePtr.p->logLastPrepRef[tsltMbyte] >> 16; /* ------------------------------------------------------------------------- */ @@ -12402,6 +12437,26 @@ void Dblqh::execFSOPENCONF(Signal* signal) }//switch }//Dblqh::execFSOPENCONF() +void +Dblqh::execFSOPENREF(Signal* signal) +{ + jamEntry(); + FsRef* ref = (FsRef*)signal->getDataPtr(); + Uint32 err = ref->errorCode; + if (err == FsRef::fsErrInvalidFileSize) + { + char buf[256]; + BaseString::snprintf(buf, sizeof(buf), + "Invalid file size for redo logfile, " + " size only changable with --initial"); + progError(__LINE__, + NDBD_EXIT_INVALID_CONFIG, + buf); + return; + } + + SimulatedBlock::execFSOPENREF(signal); +} /* ************>> */ /* FSREADCONF > */ @@ -13047,7 +13102,7 @@ void Dblqh::openFileInitLab(Signal* signal) { logFilePtr.p->logFileStatus = LogFileRecord::OPEN_INIT; seizeLogpage(signal); - writeSinglePage(signal, (ZNO_MBYTES_IN_FILE * ZPAGES_IN_MBYTE) - 1, + writeSinglePage(signal, (clogFileSize * ZPAGES_IN_MBYTE) - 1, ZPAGE_SIZE - 1, __LINE__); lfoPtr.p->lfoState = LogFileOperationRecord::INIT_WRITE_AT_END; return; @@ -13110,7 +13165,7 @@ void Dblqh::writeInitMbyteLab(Signal* signal) { releaseLfo(signal); logFilePtr.p->currentMbyte = logFilePtr.p->currentMbyte + 1; - if (logFilePtr.p->currentMbyte == ZNO_MBYTES_IN_FILE) { + if (logFilePtr.p->currentMbyte == clogFileSize) { jam(); releaseLogpage(signal); logFilePtr.p->logFileStatus = LogFileRecord::CLOSING_INIT; @@ -13230,7 +13285,7 @@ void Dblqh::initLogfile(Signal* signal, Uint32 fileNo) logFilePtr.p->lastPageWritten = 0; logFilePtr.p->logPageZero = RNIL; logFilePtr.p->currentMbyte = 0; - for (tilIndex = 0; tilIndex <= 15; tilIndex++) { + for (tilIndex = 0; tilIndex < clogFileSize; tilIndex++) { logFilePtr.p->logMaxGciCompleted[tilIndex] = (UintR)-1; logFilePtr.p->logMaxGciStarted[tilIndex] = (UintR)-1; logFilePtr.p->logLastPrepRef[tilIndex] = 0; @@ -13281,8 +13336,14 @@ void Dblqh::openFileRw(Signal* signal, LogFileRecordPtr olfLogFilePtr) signal->theData[3] = olfLogFilePtr.p->fileName[1]; signal->theData[4] = olfLogFilePtr.p->fileName[2]; signal->theData[5] = olfLogFilePtr.p->fileName[3]; - signal->theData[6] = ZOPEN_READ_WRITE | FsOpenReq::OM_AUTOSYNC; + signal->theData[6] = FsOpenReq::OM_READWRITE | FsOpenReq::OM_AUTOSYNC | FsOpenReq::OM_CHECK_SIZE; + if (c_o_direct) + signal->theData[6] |= FsOpenReq::OM_DIRECT; req->auto_sync_size = MAX_REDO_PAGES_WITHOUT_SYNCH * sizeof(LogPageRecord); + Uint64 sz = clogFileSize; + sz *= 1024; sz *= 1024; + req->file_size_hi = sz >> 32; + req->file_size_lo = sz & 0xFFFFFFFF; sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, FsOpenReq::SignalLength, JBA); }//Dblqh::openFileRw() @@ -13301,7 +13362,9 @@ void Dblqh::openLogfileInit(Signal* signal) signal->theData[3] = logFilePtr.p->fileName[1]; signal->theData[4] = logFilePtr.p->fileName[2]; signal->theData[5] = logFilePtr.p->fileName[3]; - signal->theData[6] = 0x302 | FsOpenReq::OM_AUTOSYNC; + signal->theData[6] = FsOpenReq::OM_READWRITE | FsOpenReq::OM_TRUNCATE | FsOpenReq::OM_CREATE | FsOpenReq::OM_AUTOSYNC; + if (c_o_direct) + signal->theData[6] |= FsOpenReq::OM_DIRECT; req->auto_sync_size = MAX_REDO_PAGES_WITHOUT_SYNCH * sizeof(LogPageRecord); sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, FsOpenReq::SignalLength, JBA); }//Dblqh::openLogfileInit() @@ -13337,8 +13400,14 @@ void Dblqh::openNextLogfile(Signal* signal) signal->theData[3] = onlLogFilePtr.p->fileName[1]; signal->theData[4] = onlLogFilePtr.p->fileName[2]; signal->theData[5] = onlLogFilePtr.p->fileName[3]; - signal->theData[6] = 2 | FsOpenReq::OM_AUTOSYNC; + signal->theData[6] = FsOpenReq::OM_READWRITE | FsOpenReq::OM_AUTOSYNC | FsOpenReq::OM_CHECK_SIZE; + if (c_o_direct) + signal->theData[6] |= FsOpenReq::OM_DIRECT; req->auto_sync_size = MAX_REDO_PAGES_WITHOUT_SYNCH * sizeof(LogPageRecord); + Uint64 sz = clogFileSize; + sz *= 1024; sz *= 1024; + req->file_size_hi = sz >> 32; + req->file_size_lo = sz & 0xFFFFFFFF; sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, FsOpenReq::SignalLength, JBA); }//if }//Dblqh::openNextLogfile() @@ -13469,7 +13538,7 @@ void Dblqh::writeFileDescriptor(Signal* signal) /* -------------------------------------------------- */ /* START BY WRITING TO LOG FILE RECORD */ /* -------------------------------------------------- */ - arrGuard(logFilePtr.p->currentMbyte, 16); + arrGuard(logFilePtr.p->currentMbyte, clogFileSize); logFilePtr.p->logMaxGciCompleted[logFilePtr.p->currentMbyte] = logPartPtr.p->logPartNewestCompletedGCI; logFilePtr.p->logMaxGciStarted[logFilePtr.p->currentMbyte] = cnewestGci; @@ -13495,10 +13564,7 @@ void Dblqh::writeFileDescriptor(Signal* signal) /* ------------------------------------------------------------------------- */ void Dblqh::writeFileHeaderOpen(Signal* signal, Uint32 wmoType) { - LogFileRecordPtr wmoLogFilePtr; UintR twmoNoLogDescriptors; - UintR twmoLoop; - UintR twmoIndex; /* -------------------------------------------------- */ /* WRITE HEADER INFORMATION IN THE NEW FILE. */ @@ -13506,52 +13572,44 @@ void Dblqh::writeFileHeaderOpen(Signal* signal, Uint32 wmoType) logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_LOG_TYPE] = ZFD_TYPE; logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_FILE_NO] = logFilePtr.p->fileNo; - if (logPartPtr.p->noLogFiles > ZMAX_LOG_FILES_IN_PAGE_ZERO) { + if (logPartPtr.p->noLogFiles > cmaxLogFilesInPageZero) { jam(); - twmoNoLogDescriptors = ZMAX_LOG_FILES_IN_PAGE_ZERO; + twmoNoLogDescriptors = cmaxLogFilesInPageZero; } else { jam(); twmoNoLogDescriptors = logPartPtr.p->noLogFiles; }//if logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_NO_FD] = twmoNoLogDescriptors; - wmoLogFilePtr.i = logFilePtr.i; - twmoLoop = 0; -WMO_LOOP: - jam(); - if (twmoLoop < twmoNoLogDescriptors) { - jam(); - ptrCheckGuard(wmoLogFilePtr, clogFileFileSize, logFileRecord); - for (twmoIndex = 0; twmoIndex <= ZNO_MBYTES_IN_FILE - 1; twmoIndex++) { + + { + Uint32 pos = ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE; + LogFileRecordPtr filePtr = logFilePtr; + for (Uint32 fd = 0; fd < twmoNoLogDescriptors; fd++) + { jam(); - arrGuard(((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) + - (twmoLoop * ZFD_PART_SIZE)) + twmoIndex, ZPAGE_SIZE); - logPagePtr.p->logPageWord[((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) + - (twmoLoop * ZFD_PART_SIZE)) + twmoIndex] = - wmoLogFilePtr.p->logMaxGciCompleted[twmoIndex]; - arrGuard((((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) + - (twmoLoop * ZFD_PART_SIZE)) + ZNO_MBYTES_IN_FILE) + - twmoIndex, ZPAGE_SIZE); - logPagePtr.p->logPageWord[(((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) + - (twmoLoop * ZFD_PART_SIZE)) + ZNO_MBYTES_IN_FILE) + twmoIndex] = - wmoLogFilePtr.p->logMaxGciStarted[twmoIndex]; - arrGuard((((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) + - (twmoLoop * ZFD_PART_SIZE)) + (2 * ZNO_MBYTES_IN_FILE)) + - twmoIndex, ZPAGE_SIZE); - logPagePtr.p->logPageWord[(((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) + - (twmoLoop * ZFD_PART_SIZE)) + (2 * ZNO_MBYTES_IN_FILE)) + twmoIndex] = - wmoLogFilePtr.p->logLastPrepRef[twmoIndex]; - }//for - wmoLogFilePtr.i = wmoLogFilePtr.p->prevLogFile; - twmoLoop = twmoLoop + 1; - goto WMO_LOOP; - }//if - logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = - (ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) + - (ZFD_PART_SIZE * twmoNoLogDescriptors); - arrGuard(logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX], ZPAGE_SIZE); - logPagePtr.p->logPageWord[logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX]] = - ZNEXT_LOG_RECORD_TYPE; + ptrCheckGuard(filePtr, clogFileFileSize, logFileRecord); + for (Uint32 mb = 0; mb < clogFileSize; mb ++) + { + jam(); + Uint32 pos0 = pos + fd * (ZFD_MBYTE_SIZE * clogFileSize) + mb; + Uint32 pos1 = pos0 + clogFileSize; + Uint32 pos2 = pos1 + clogFileSize; + arrGuard(pos0, ZPAGE_SIZE); + arrGuard(pos1, ZPAGE_SIZE); + arrGuard(pos2, ZPAGE_SIZE); + logPagePtr.p->logPageWord[pos0] = filePtr.p->logMaxGciCompleted[mb]; + logPagePtr.p->logPageWord[pos1] = filePtr.p->logMaxGciStarted[mb]; + logPagePtr.p->logPageWord[pos2] = filePtr.p->logLastPrepRef[mb]; + } + filePtr.i = filePtr.p->prevLogFile; + } + pos += (twmoNoLogDescriptors * ZFD_MBYTE_SIZE * clogFileSize); + arrGuard(pos, ZPAGE_SIZE); + logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = pos; + logPagePtr.p->logPageWord[pos] = ZNEXT_LOG_RECORD_TYPE; + } + /* ------------------------------------------------------- */ /* THIS IS A SPECIAL WRITE OF THE FIRST PAGE IN THE */ /* LOG FILE. THIS HAS SPECIAL SIGNIFANCE TO FIND */ @@ -13696,9 +13754,9 @@ void Dblqh::openSrLastFileLab(Signal* signal) void Dblqh::readSrLastFileLab(Signal* signal) { logPartPtr.p->logLap = logPagePtr.p->logPageWord[ZPOS_LOG_LAP]; - if (logPartPtr.p->noLogFiles > ZMAX_LOG_FILES_IN_PAGE_ZERO) { + if (logPartPtr.p->noLogFiles > cmaxLogFilesInPageZero) { jam(); - initGciInLogFileRec(signal, ZMAX_LOG_FILES_IN_PAGE_ZERO); + initGciInLogFileRec(signal, cmaxLogFilesInPageZero); } else { jam(); initGciInLogFileRec(signal, logPartPtr.p->noLogFiles); @@ -13723,7 +13781,7 @@ void Dblqh::readSrLastMbyteLab(Signal* signal) logPartPtr.p->lastMbyte = logFilePtr.p->currentMbyte - 1; }//if }//if - arrGuard(logFilePtr.p->currentMbyte, 16); + arrGuard(logFilePtr.p->currentMbyte, clogFileSize); logFilePtr.p->logMaxGciCompleted[logFilePtr.p->currentMbyte] = logPagePtr.p->logPageWord[ZPOS_MAX_GCI_COMPLETED]; logFilePtr.p->logMaxGciStarted[logFilePtr.p->currentMbyte] = @@ -13731,7 +13789,7 @@ void Dblqh::readSrLastMbyteLab(Signal* signal) logFilePtr.p->logLastPrepRef[logFilePtr.p->currentMbyte] = logPagePtr.p->logPageWord[ZLAST_LOG_PREP_REF]; releaseLogpage(signal); - if (logFilePtr.p->currentMbyte < (ZNO_MBYTES_IN_FILE - 1)) { + if (logFilePtr.p->currentMbyte < (clogFileSize - 1)) { jam(); logFilePtr.p->currentMbyte++; readSinglePage(signal, ZPAGES_IN_MBYTE * logFilePtr.p->currentMbyte); @@ -13745,21 +13803,21 @@ void Dblqh::readSrLastMbyteLab(Signal* signal) * ---------------------------------------------------------------------- */ if (logPartPtr.p->lastMbyte == ZNIL) { jam(); - logPartPtr.p->lastMbyte = ZNO_MBYTES_IN_FILE - 1; + logPartPtr.p->lastMbyte = clogFileSize - 1; }//if }//if logFilePtr.p->logFileStatus = LogFileRecord::CLOSING_SR; closeFile(signal, logFilePtr, __LINE__); - if (logPartPtr.p->noLogFiles > ZMAX_LOG_FILES_IN_PAGE_ZERO) { + if (logPartPtr.p->noLogFiles > cmaxLogFilesInPageZero) { Uint32 fileNo; - if (logFilePtr.p->fileNo >= ZMAX_LOG_FILES_IN_PAGE_ZERO) { + if (logFilePtr.p->fileNo >= cmaxLogFilesInPageZero) { jam(); - fileNo = logFilePtr.p->fileNo - ZMAX_LOG_FILES_IN_PAGE_ZERO; + fileNo = logFilePtr.p->fileNo - cmaxLogFilesInPageZero; } else { jam(); fileNo = (logPartPtr.p->noLogFiles + logFilePtr.p->fileNo) - - ZMAX_LOG_FILES_IN_PAGE_ZERO; + cmaxLogFilesInPageZero; }//if if (fileNo == 0) { jam(); @@ -13769,11 +13827,11 @@ void Dblqh::readSrLastMbyteLab(Signal* signal) * -------------------------------------------------------------------- */ fileNo = 1; logPartPtr.p->srRemainingFiles = - logPartPtr.p->noLogFiles - (ZMAX_LOG_FILES_IN_PAGE_ZERO - 1); + logPartPtr.p->noLogFiles - (cmaxLogFilesInPageZero - 1); } else { jam(); logPartPtr.p->srRemainingFiles = - logPartPtr.p->noLogFiles - ZMAX_LOG_FILES_IN_PAGE_ZERO; + logPartPtr.p->noLogFiles - cmaxLogFilesInPageZero; }//if LogFileRecordPtr locLogFilePtr; findLogfile(signal, fileNo, logPartPtr, &locLogFilePtr); @@ -13798,9 +13856,9 @@ void Dblqh::openSrNextFileLab(Signal* signal) void Dblqh::readSrNextFileLab(Signal* signal) { - if (logPartPtr.p->srRemainingFiles > ZMAX_LOG_FILES_IN_PAGE_ZERO) { + if (logPartPtr.p->srRemainingFiles > cmaxLogFilesInPageZero) { jam(); - initGciInLogFileRec(signal, ZMAX_LOG_FILES_IN_PAGE_ZERO); + initGciInLogFileRec(signal, cmaxLogFilesInPageZero); } else { jam(); initGciInLogFileRec(signal, logPartPtr.p->srRemainingFiles); @@ -13808,16 +13866,16 @@ void Dblqh::readSrNextFileLab(Signal* signal) releaseLogpage(signal); logFilePtr.p->logFileStatus = LogFileRecord::CLOSING_SR; closeFile(signal, logFilePtr, __LINE__); - if (logPartPtr.p->srRemainingFiles > ZMAX_LOG_FILES_IN_PAGE_ZERO) { + if (logPartPtr.p->srRemainingFiles > cmaxLogFilesInPageZero) { Uint32 fileNo; - if (logFilePtr.p->fileNo >= ZMAX_LOG_FILES_IN_PAGE_ZERO) { + if (logFilePtr.p->fileNo >= cmaxLogFilesInPageZero) { jam(); - fileNo = logFilePtr.p->fileNo - ZMAX_LOG_FILES_IN_PAGE_ZERO; + fileNo = logFilePtr.p->fileNo - cmaxLogFilesInPageZero; } else { jam(); fileNo = (logPartPtr.p->noLogFiles + logFilePtr.p->fileNo) - - ZMAX_LOG_FILES_IN_PAGE_ZERO; + cmaxLogFilesInPageZero; }//if if (fileNo == 0) { jam(); @@ -13826,11 +13884,11 @@ void Dblqh::readSrNextFileLab(Signal* signal) * -------------------------------------------------------------------- */ fileNo = 1; logPartPtr.p->srRemainingFiles = - logPartPtr.p->srRemainingFiles - (ZMAX_LOG_FILES_IN_PAGE_ZERO - 1); + logPartPtr.p->srRemainingFiles - (cmaxLogFilesInPageZero - 1); } else { jam(); logPartPtr.p->srRemainingFiles = - logPartPtr.p->srRemainingFiles - ZMAX_LOG_FILES_IN_PAGE_ZERO; + logPartPtr.p->srRemainingFiles - cmaxLogFilesInPageZero; }//if LogFileRecordPtr locLogFilePtr; findLogfile(signal, fileNo, logPartPtr, &locLogFilePtr); @@ -14701,7 +14759,7 @@ void Dblqh::srLogLimits(Signal* signal) * EXECUTED. * ----------------------------------------------------------------------- */ while(true) { - ndbrequire(tmbyte < 16); + ndbrequire(tmbyte < clogFileSize); if (logPartPtr.p->logExecState == LogPartRecord::LES_SEARCH_STOP) { if (logFilePtr.p->logMaxGciCompleted[tmbyte] < logPartPtr.p->logLastGci) { jam(); @@ -14742,7 +14800,7 @@ void Dblqh::srLogLimits(Signal* signal) if (logPartPtr.p->logExecState != LogPartRecord::LES_EXEC_LOG) { if (tmbyte == 0) { jam(); - tmbyte = ZNO_MBYTES_IN_FILE - 1; + tmbyte = clogFileSize - 1; logFilePtr.i = logFilePtr.p->prevLogFile; ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord); } else { @@ -15136,7 +15194,7 @@ void Dblqh::execSr(Signal* signal) logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_NO_FD]; logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = (ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) + - (noFdDescriptors * ZFD_PART_SIZE); + (noFdDescriptors * ZFD_MBYTE_SIZE * clogFileSize); } break; /* ========================================================================= */ @@ -15176,11 +15234,11 @@ void Dblqh::execSr(Signal* signal) /*---------------------------------------------------------------------------*/ /* START EXECUTION OF A NEW MBYTE IN THE LOG. */ /*---------------------------------------------------------------------------*/ - if (logFilePtr.p->currentMbyte < (ZNO_MBYTES_IN_FILE - 1)) { + if (logFilePtr.p->currentMbyte < (clogFileSize - 1)) { jam(); logPartPtr.p->logExecState = LogPartRecord::LES_EXEC_LOG_NEW_MBYTE; } else { - ndbrequire(logFilePtr.p->currentMbyte == (ZNO_MBYTES_IN_FILE - 1)); + ndbrequire(logFilePtr.p->currentMbyte == (clogFileSize - 1)); jam(); /*---------------------------------------------------------------------------*/ /* WE HAVE TO CHANGE FILE. CLOSE THIS ONE AND THEN OPEN THE NEXT. */ @@ -15375,7 +15433,7 @@ void Dblqh::invalidateLogAfterLastGCI(Signal* signal) { jam(); releaseLfo(signal); releaseLogpage(signal); - if (logPartPtr.p->invalidatePageNo < (ZNO_MBYTES_IN_FILE * ZPAGES_IN_MBYTE - 1)) { + if (logPartPtr.p->invalidatePageNo < (clogFileSize * ZPAGES_IN_MBYTE - 1)) { // We continue in this file. logPartPtr.p->invalidatePageNo++; } else { @@ -16716,6 +16774,22 @@ void Dblqh::initialiseLogFile(Signal* signal) ptrAss(logFilePtr, logFileRecord); logFilePtr.p->nextLogFile = logFilePtr.i + 1; logFilePtr.p->logFileStatus = LogFileRecord::LFS_IDLE; + + logFilePtr.p->logLastPrepRef = new Uint32[clogFileSize]; + logFilePtr.p->logMaxGciCompleted = new Uint32[clogFileSize]; + logFilePtr.p->logMaxGciStarted = new Uint32[clogFileSize]; + + if (logFilePtr.p->logLastPrepRef == 0 || + logFilePtr.p->logMaxGciCompleted == 0 || + logFilePtr.p->logMaxGciStarted == 0) + { + char buf[256]; + BaseString::snprintf(buf, sizeof(buf), + "Failed to alloc mbyte(%u) arrays for logfile %u", + clogFileSize, logFilePtr.i); + progError(__LINE__, NDBD_EXIT_MEMALLOC, buf); + } + }//for logFilePtr.i = clogFileFileSize - 1; ptrAss(logFilePtr, logFileRecord); @@ -17044,41 +17118,31 @@ void Dblqh::initFragrec(Signal* signal, * ========================================================================= */ void Dblqh::initGciInLogFileRec(Signal* signal, Uint32 noFdDescriptors) { - LogFileRecordPtr iglLogFilePtr; - UintR tiglLoop; - UintR tiglIndex; - - tiglLoop = 0; - iglLogFilePtr.i = logFilePtr.i; - iglLogFilePtr.p = logFilePtr.p; -IGL_LOOP: - for (tiglIndex = 0; tiglIndex <= ZNO_MBYTES_IN_FILE - 1; tiglIndex++) { - arrGuard(((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) + - (tiglLoop * ZFD_PART_SIZE)) + tiglIndex, ZPAGE_SIZE); - iglLogFilePtr.p->logMaxGciCompleted[tiglIndex] = - logPagePtr.p->logPageWord[((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) + - (tiglLoop * ZFD_PART_SIZE)) + tiglIndex]; - arrGuard((((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) + ZNO_MBYTES_IN_FILE) + - (tiglLoop * ZFD_PART_SIZE)) + tiglIndex, ZPAGE_SIZE); - iglLogFilePtr.p->logMaxGciStarted[tiglIndex] = - logPagePtr.p->logPageWord[(((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) + - ZNO_MBYTES_IN_FILE) + - (tiglLoop * ZFD_PART_SIZE)) + tiglIndex]; - arrGuard((((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) + - (2 * ZNO_MBYTES_IN_FILE)) + (tiglLoop * ZFD_PART_SIZE)) + - tiglIndex, ZPAGE_SIZE); - iglLogFilePtr.p->logLastPrepRef[tiglIndex] = - logPagePtr.p->logPageWord[(((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) + - (2 * ZNO_MBYTES_IN_FILE)) + - (tiglLoop * ZFD_PART_SIZE)) + tiglIndex]; - }//for - tiglLoop = tiglLoop + 1; - if (tiglLoop < noFdDescriptors) { + LogFileRecordPtr filePtr = logFilePtr; + Uint32 pos = ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE; + for (Uint32 fd = 0; fd < noFdDescriptors; fd++) + { jam(); - iglLogFilePtr.i = iglLogFilePtr.p->prevLogFile; - ptrCheckGuard(iglLogFilePtr, clogFileFileSize, logFileRecord); - goto IGL_LOOP; - }//if + for (Uint32 mb = 0; mb < clogFileSize; mb++) + { + jam(); + Uint32 pos0 = pos + fd * (ZFD_MBYTE_SIZE * clogFileSize) + mb; + Uint32 pos1 = pos0 + clogFileSize; + Uint32 pos2 = pos1 + clogFileSize; + arrGuard(pos0, ZPAGE_SIZE); + arrGuard(pos1, ZPAGE_SIZE); + arrGuard(pos2, ZPAGE_SIZE); + filePtr.p->logMaxGciCompleted[mb] = logPagePtr.p->logPageWord[pos0]; + filePtr.p->logMaxGciStarted[mb] = logPagePtr.p->logPageWord[pos1]; + filePtr.p->logLastPrepRef[mb] = logPagePtr.p->logPageWord[pos2]; + } + if (fd + 1 < noFdDescriptors) + { + jam(); + filePtr.i = filePtr.p->prevLogFile; + ptrCheckGuard(filePtr, clogFileFileSize, logFileRecord); + } + } }//Dblqh::initGciInLogFileRec() /* ========================================================================== @@ -18331,7 +18395,7 @@ void Dblqh::writeNextLog(Signal* signal) ndbrequire(logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] < ZPAGE_SIZE); logPagePtr.p->logPageWord[logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX]] = ZNEXT_MBYTE_TYPE; - if (logFilePtr.p->currentMbyte == (ZNO_MBYTES_IN_FILE - 1)) { + if (logFilePtr.p->currentMbyte == (clogFileSize - 1)) { jam(); /* -------------------------------------------------- */ /* CALCULATE THE NEW REMAINING WORDS WHEN */ @@ -18420,7 +18484,7 @@ void Dblqh::writeNextLog(Signal* signal) systemError(signal, __LINE__); }//if }//if - if (logFilePtr.p->currentMbyte == (ZNO_MBYTES_IN_FILE - 1)) { + if (logFilePtr.p->currentMbyte == (clogFileSize - 1)) { jam(); twnlNextMbyte = 0; if (logFilePtr.p->fileChangeState != LogFileRecord::NOT_ONGOING) { diff --git a/storage/ndb/src/kernel/blocks/dblqh/Makefile.am b/storage/ndb/src/kernel/blocks/dblqh/Makefile.am index c7c477a512c..b545096dc83 100644 --- a/storage/ndb/src/kernel/blocks/dblqh/Makefile.am +++ b/storage/ndb/src/kernel/blocks/dblqh/Makefile.am @@ -16,7 +16,7 @@ EXTRA_PROGRAMS = ndbd_redo_log_reader ndbd_redo_log_reader_SOURCES = redoLogReader/records.cpp \ - redoLogReader/redoLogFileReader.cpp + redoLogReader/reader.cpp include $(top_srcdir)/storage/ndb/config/common.mk.am include $(top_srcdir)/storage/ndb/config/type_kernel.mk.am diff --git a/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/redoLogFileReader.cpp b/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/reader.cpp similarity index 100% rename from storage/ndb/src/kernel/blocks/dblqh/redoLogReader/redoLogFileReader.cpp rename to storage/ndb/src/kernel/blocks/dblqh/redoLogReader/reader.cpp diff --git a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp index adc6d1e3ed4..56ecc8ddc39 100644 --- a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp +++ b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp @@ -277,6 +277,14 @@ void Ndbcntr::execSTTOR(Signal* signal) break; case ZSTART_PHASE_1: jam(); + { + Uint32 db_watchdog_interval = 0; + const ndb_mgm_configuration_iterator * p = + m_ctx.m_config.getOwnConfigIterator(); + ndb_mgm_get_int_parameter(p, CFG_DB_WATCHDOG_INTERVAL, &db_watchdog_interval); + ndbrequire(db_watchdog_interval); + update_watch_dog_timer(db_watchdog_interval); + } startPhase1Lab(signal); break; case ZSTART_PHASE_2: @@ -1410,6 +1418,13 @@ void Ndbcntr::execNODE_FAILREP(Signal* signal) { jamEntry(); + if (ERROR_INSERTED(1001)) + { + sendSignalWithDelay(reference(), GSN_NODE_FAILREP, signal, 100, + signal->getLength()); + return; + } + const NodeFailRep * nodeFail = (NodeFailRep *)&signal->theData[0]; NdbNodeBitmask allFailed; allFailed.assign(NdbNodeBitmask::Size, nodeFail->theNodes); @@ -2734,16 +2749,34 @@ void Ndbcntr::execSTART_ORD(Signal* signal){ c_missra.execSTART_ORD(signal); } +#define CLEAR_DX 13 +#define CLEAR_LCP 3 + void -Ndbcntr::clearFilesystem(Signal* signal){ +Ndbcntr::clearFilesystem(Signal* signal) +{ + const Uint32 lcp = c_fsRemoveCount >= CLEAR_DX; + FsRemoveReq * req = (FsRemoveReq *)signal->getDataPtrSend(); req->userReference = reference(); req->userPointer = 0; req->directory = 1; req->ownDirectory = 1; - FsOpenReq::setVersion(req->fileNumber, 3); - FsOpenReq::setSuffix(req->fileNumber, FsOpenReq::S_CTL); // Can by any... - FsOpenReq::v1_setDisk(req->fileNumber, c_fsRemoveCount); + + if (lcp == 0) + { + FsOpenReq::setVersion(req->fileNumber, 3); + FsOpenReq::setSuffix(req->fileNumber, FsOpenReq::S_CTL); // Can by any... + FsOpenReq::v1_setDisk(req->fileNumber, c_fsRemoveCount); + } + else + { + FsOpenReq::setVersion(req->fileNumber, 5); + FsOpenReq::setSuffix(req->fileNumber, FsOpenReq::S_DATA); + FsOpenReq::v5_setLcpNo(req->fileNumber, c_fsRemoveCount - CLEAR_DX); + FsOpenReq::v5_setTableId(req->fileNumber, 0); + FsOpenReq::v5_setFragmentId(req->fileNumber, 0); + } sendSignal(NDBFS_REF, GSN_FSREMOVEREQ, signal, FsRemoveReq::SignalLength, JBA); c_fsRemoveCount++; @@ -2752,12 +2785,12 @@ Ndbcntr::clearFilesystem(Signal* signal){ void Ndbcntr::execFSREMOVECONF(Signal* signal){ jamEntry(); - if(c_fsRemoveCount == 13){ + if(c_fsRemoveCount == CLEAR_DX + CLEAR_LCP){ jam(); sendSttorry(signal); } else { jam(); - ndbrequire(c_fsRemoveCount < 13); + ndbrequire(c_fsRemoveCount < CLEAR_DX + CLEAR_LCP); clearFilesystem(signal); }//if } diff --git a/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp index 5f93ee31bc7..cf18bf34040 100644 --- a/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp +++ b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp @@ -163,7 +163,12 @@ AsyncFile::run() theStartFlag = true; // Create write buffer for bigger writes theWriteBufferSize = WRITEBUFFERSIZE; - theWriteBuffer = (char *) ndbd_malloc(theWriteBufferSize); + theWriteBufferUnaligned = (char *) ndbd_malloc(theWriteBufferSize + + NDB_O_DIRECT_WRITE_ALIGNMENT-1); + theWriteBuffer = (char *) + (((UintPtr)theWriteBufferUnaligned + NDB_O_DIRECT_WRITE_ALIGNMENT - 1) & + ~(UintPtr)(NDB_O_DIRECT_WRITE_ALIGNMENT - 1)); + NdbMutex_Unlock(theStartMutexPtr); NdbCondition_Signal(theStartConditionPtr); @@ -247,6 +252,78 @@ AsyncFile::run() static char g_odirect_readbuf[2*GLOBAL_PAGE_SIZE -1]; #endif +int +AsyncFile::check_odirect_write(Uint32 flags, int& new_flags, int mode) +{ + assert(new_flags & (O_CREAT | O_TRUNC)); +#ifdef O_DIRECT + int ret; + char * bufptr = (char*)((UintPtr(g_odirect_readbuf)+(GLOBAL_PAGE_SIZE - 1)) & ~(GLOBAL_PAGE_SIZE - 1)); + while (((ret = ::write(theFd, bufptr, GLOBAL_PAGE_SIZE)) == -1) && + (errno == EINTR)); + if (ret == -1) + { + new_flags &= ~O_DIRECT; + ndbout_c("%s Failed to write using O_DIRECT, disabling", + theFileName.c_str()); + } + + close(theFd); + theFd = ::open(theFileName.c_str(), new_flags, mode); + if (theFd == -1) + return errno; +#endif + + return 0; +} + +int +AsyncFile::check_odirect_read(Uint32 flags, int &new_flags, int mode) +{ +#ifdef O_DIRECT + int ret; + char * bufptr = (char*)((UintPtr(g_odirect_readbuf)+(GLOBAL_PAGE_SIZE - 1)) & ~(GLOBAL_PAGE_SIZE - 1)); + while (((ret = ::read(theFd, bufptr, GLOBAL_PAGE_SIZE)) == -1) && + (errno == EINTR)); + if (ret == -1) + { + ndbout_c("%s Failed to read using O_DIRECT, disabling", + theFileName.c_str()); + goto reopen; + } + + if(lseek(theFd, 0, SEEK_SET) != 0) + { + return errno; + } + + if ((flags & FsOpenReq::OM_CHECK_SIZE) == 0) + { + struct stat buf; + if ((fstat(theFd, &buf) == -1)) + { + return errno; + } + else if ((buf.st_size % GLOBAL_PAGE_SIZE) != 0) + { + ndbout_c("%s filesize not a multiple of %d, disabling O_DIRECT", + theFileName.c_str(), GLOBAL_PAGE_SIZE); + goto reopen; + } + } + + return 0; + +reopen: + close(theFd); + new_flags &= ~O_DIRECT; + theFd = ::open(theFileName.c_str(), new_flags, mode); + if (theFd == -1) + return errno; +#endif + return 0; +} + void AsyncFile::openReq(Request* request) { m_auto_sync_freq = 0; @@ -312,7 +389,7 @@ void AsyncFile::openReq(Request* request) } #else Uint32 flags = request->par.open.flags; - Uint32 new_flags = 0; + int new_flags = 0; // Convert file open flags from Solaris to Liux if (flags & FsOpenReq::OM_CREATE) @@ -343,10 +420,6 @@ void AsyncFile::openReq(Request* request) { new_flags |= O_DIRECT; } -#elif defined O_SYNC - { - flags |= FsOpenReq::OM_SYNC; - } #endif if ((flags & FsOpenReq::OM_SYNC) && ! (flags & FsOpenReq::OM_INIT)) @@ -355,15 +428,19 @@ void AsyncFile::openReq(Request* request) new_flags |= O_SYNC; #endif } - + + const char * rw = ""; switch(flags & 0x3){ case FsOpenReq::OM_READONLY: + rw = "r"; new_flags |= O_RDONLY; break; case FsOpenReq::OM_WRITEONLY: + rw = "w"; new_flags |= O_WRONLY; break; case FsOpenReq::OM_READWRITE: + rw = "rw"; new_flags |= O_RDWR; break; default: @@ -404,11 +481,6 @@ no_odirect: if (new_flags & O_DIRECT) { new_flags &= ~O_DIRECT; - flags |= FsOpenReq::OM_SYNC; -#ifdef O_SYNC - if (! (flags & FsOpenReq::OM_INIT)) - new_flags |= O_SYNC; -#endif goto no_odirect; } #endif @@ -421,11 +493,6 @@ no_odirect: else if (new_flags & O_DIRECT) { new_flags &= ~O_DIRECT; - flags |= FsOpenReq::OM_SYNC; -#ifdef O_SYNC - if (! (flags & FsOpenReq::OM_INIT)) - new_flags |= O_SYNC; -#endif goto no_odirect; } #endif @@ -512,7 +579,6 @@ no_odirect: { ndbout_c("error on first write(%d), disable O_DIRECT", err); new_flags &= ~O_DIRECT; - flags |= FsOpenReq::OM_SYNC; close(theFd); theFd = ::open(theFileName.c_str(), new_flags, mode); if (theFd != -1) @@ -532,26 +598,32 @@ no_odirect: else if (flags & FsOpenReq::OM_DIRECT) { #ifdef O_DIRECT - do { - int ret; - char * bufptr = (char*)((UintPtr(g_odirect_readbuf)+(GLOBAL_PAGE_SIZE - 1)) & ~(GLOBAL_PAGE_SIZE - 1)); - while (((ret = ::read(theFd, bufptr, GLOBAL_PAGE_SIZE)) == -1) && (errno == EINTR)); - if (ret == -1) - { - ndbout_c("%s Failed to read using O_DIRECT, disabling", theFileName.c_str()); - flags |= FsOpenReq::OM_SYNC; - flags |= FsOpenReq::OM_INIT; - break; - } - if(lseek(theFd, 0, SEEK_SET) != 0) - { - request->error = errno; - return; - } - } while (0); + if (flags & (FsOpenReq::OM_TRUNCATE | FsOpenReq::OM_CREATE)) + { + request->error = check_odirect_write(flags, new_flags, mode); + } + else + { + request->error = check_odirect_read(flags, new_flags, mode); + } + + if (request->error) + return; #endif } - +#ifdef VM_TRACE + if (flags & FsOpenReq::OM_DIRECT) + { +#ifdef O_DIRECT + ndbout_c("%s %s O_DIRECT: %d", + theFileName.c_str(), rw, + !!(new_flags & O_DIRECT)); +#else + ndbout_c("%s %s O_DIRECT: 0", + theFileName.c_str(), rw); +#endif + } +#endif if ((flags & FsOpenReq::OM_SYNC) && (flags & FsOpenReq::OM_INIT)) { #ifdef O_SYNC @@ -562,6 +634,10 @@ no_odirect: new_flags &= ~(O_CREAT | O_TRUNC); new_flags |= O_SYNC; theFd = ::open(theFileName.c_str(), new_flags, mode); + if (theFd == -1) + { + request->error = errno; + } #endif } #endif @@ -1079,7 +1155,8 @@ AsyncFile::rmrfReq(Request * request, char * path, bool removePath){ void AsyncFile::endReq() { // Thread is ended with return - if (theWriteBuffer) ndbd_free(theWriteBuffer, theWriteBufferSize); + if (theWriteBufferUnaligned) + ndbd_free(theWriteBufferUnaligned, theWriteBufferSize); } diff --git a/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp index cc667225ce2..d8d585c47f7 100644 --- a/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp +++ b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp @@ -234,9 +234,13 @@ private: bool theStartFlag; int theWriteBufferSize; char* theWriteBuffer; + void* theWriteBufferUnaligned; size_t m_write_wo_sync; // Writes wo/ sync size_t m_auto_sync_freq; // Auto sync freq in bytes + + int check_odirect_read(Uint32 flags, int&new_flags, int mode); + int check_odirect_write(Uint32 flags, int&new_flags, int mode); public: SimulatedBlock& m_fs; Ptr m_page_ptr; diff --git a/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp b/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp index 44f8a8ab05b..26bf8878852 100644 --- a/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp +++ b/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp @@ -652,7 +652,7 @@ AsyncFile* Ndbfs::createAsyncFile(){ // Check limit of open files - if (m_maxFiles !=0 && theFiles.size()+1 == m_maxFiles) { + if (m_maxFiles !=0 && theFiles.size() == m_maxFiles) { // Print info about all open files for (unsigned i = 0; i < theFiles.size(); i++){ AsyncFile* file = theFiles[i]; diff --git a/storage/ndb/src/kernel/blocks/pgman.cpp b/storage/ndb/src/kernel/blocks/pgman.cpp index 57563d3c6d4..7296cd56c3f 100644 --- a/storage/ndb/src/kernel/blocks/pgman.cpp +++ b/storage/ndb/src/kernel/blocks/pgman.cpp @@ -123,8 +123,8 @@ Pgman::execREAD_CONFIG_REQ(Signal* signal) if (page_buffer > 0) { page_buffer /= GLOBAL_PAGE_SIZE; // in pages - m_page_entry_pool.setSize(100*page_buffer); m_param.m_max_pages = page_buffer; + m_page_entry_pool.setSize(m_param.m_lirs_stack_mult * page_buffer); m_param.m_max_hot_pages = (page_buffer * 9) / 10; } @@ -141,6 +141,7 @@ Pgman::execREAD_CONFIG_REQ(Signal* signal) Pgman::Param::Param() : m_max_pages(64), // smallish for testing + m_lirs_stack_mult(10), m_max_hot_pages(56), m_max_loop_count(256), m_max_io_waits(64), @@ -301,6 +302,9 @@ Pgman::get_sublist_no(Page_state state) { return Page_entry::SL_LOCKED; } + if (state == Page_entry::ONSTACK) { + return Page_entry::SL_IDLE; + } return Page_entry::SL_OTHER; } @@ -415,15 +419,55 @@ Pgman::get_page_entry(Ptr& ptr, Uint32 file_no, Uint32 page_no) { if (find_page_entry(ptr, file_no, page_no)) { + jam(); ndbrequire(ptr.p->m_state != 0); m_stats.m_page_hits++; + +#ifdef VM_TRACE + debugOut << "PGMAN: get_page_entry: found" << endl; + debugOut << "PGMAN: " << ptr << endl; +#endif return true; } + if (m_page_entry_pool.getNoOfFree() == 0) + { + jam(); + Page_sublist& pl_idle = *m_page_sublist[Page_entry::SL_IDLE]; + Ptr idle_ptr; + if (pl_idle.first(idle_ptr)) + { + jam(); + +#ifdef VM_TRACE + debugOut << "PGMAN: get_page_entry: re-use idle entry" << endl; + debugOut << "PGMAN: " << idle_ptr << endl; +#endif + + Page_state state = idle_ptr.p->m_state; + ndbrequire(state == Page_entry::ONSTACK); + + Page_stack& pl_stack = m_page_stack; + ndbrequire(pl_stack.hasPrev(idle_ptr)); + pl_stack.remove(idle_ptr); + state &= ~ Page_entry::ONSTACK; + set_page_state(idle_ptr, state); + ndbrequire(idle_ptr.p->m_state == 0); + + release_page_entry(idle_ptr); + } + } + if (seize_page_entry(ptr, file_no, page_no)) { + jam(); ndbrequire(ptr.p->m_state == 0); m_stats.m_page_faults++; + +#ifdef VM_TRACE + debugOut << "PGMAN: get_page_entry: seize" << endl; + debugOut << "PGMAN: " << ptr << endl; +#endif return true; } @@ -624,6 +668,7 @@ Pgman::lirs_reference(Ptr ptr) jam(); move_cleanup_ptr(ptr); pl_queue.remove(ptr); + state &= ~ Page_entry::ONQUEUE; } if (state & Page_entry::BOUND) { @@ -654,6 +699,12 @@ Pgman::lirs_reference(Ptr ptr) pl_stack.add(ptr); state |= Page_entry::ONSTACK; state |= Page_entry::HOT; + // it could be on queue already + if (state & Page_entry::ONQUEUE) { + jam(); + pl_queue.remove(ptr); + state &= ~Page_entry::ONQUEUE; + } } set_page_state(ptr, state); @@ -902,9 +953,11 @@ Pgman::process_map(Signal* signal) #ifdef VM_TRACE debugOut << "PGMAN: >process_map" << endl; #endif - int max_count = m_param.m_max_io_waits - m_stats.m_current_io_waits; - if (max_count > 0) + int max_count = 0; + if (m_param.m_max_io_waits > m_stats.m_current_io_waits) { + max_count = m_param.m_max_io_waits - m_stats.m_current_io_waits; max_count = max_count / 2 + 1; + } Page_sublist& pl_map = *m_page_sublist[Page_entry::SL_MAP]; while (! pl_map.isEmpty() && --max_count >= 0) @@ -1056,15 +1109,10 @@ Pgman::process_cleanup(Signal* signal) } int max_loop_count = m_param.m_max_loop_count; - int max_count = m_param.m_max_io_waits - m_stats.m_current_io_waits; - - if (max_count > 0) - { + int max_count = 0; + if (m_param.m_max_io_waits > m_stats.m_current_io_waits) { + max_count = m_param.m_max_io_waits - m_stats.m_current_io_waits; max_count = max_count / 2 + 1; - /* - * Possibly add code here to avoid writing too rapidly. May be - * unnecessary since only cold pages are cleaned. - */ } Ptr ptr = m_cleanup_ptr; @@ -1166,9 +1214,12 @@ bool Pgman::process_lcp(Signal* signal) { Page_hashlist& pl_hash = m_page_hashlist; - int max_count = m_param.m_max_io_waits - m_stats.m_current_io_waits; - if (max_count > 0) + + int max_count = 0; + if (m_param.m_max_io_waits > m_stats.m_current_io_waits) { + max_count = m_param.m_max_io_waits - m_stats.m_current_io_waits; max_count = max_count / 2 + 1; + } #ifdef VM_TRACE debugOut @@ -1927,6 +1978,8 @@ Pgman::verify_page_entry(Ptr ptr) break; case Page_entry::SL_LOCKED: break; + case Page_entry::SL_IDLE: + break; case Page_entry::SL_OTHER: break; default: @@ -1973,8 +2026,11 @@ Pgman::verify_page_lists() ndbrequire(stack_count == pl_stack.count() || dump_page_lists()); ndbrequire(queue_count == pl_queue.count() || dump_page_lists()); + Uint32 hot_count = 0; Uint32 hot_bound_count = 0; Uint32 cold_bound_count = 0; + Uint32 stack_request_count = 0; + Uint32 queue_request_count = 0; Uint32 i1 = RNIL; for (pl_stack.first(ptr); ptr.i != RNIL; pl_stack.next(ptr)) @@ -1985,9 +2041,13 @@ Pgman::verify_page_lists() ndbrequire(state & Page_entry::ONSTACK || dump_page_lists()); if (! pl_stack.hasPrev(ptr)) ndbrequire(state & Page_entry::HOT || dump_page_lists()); - if (state & Page_entry::HOT && - state & Page_entry::BOUND) - hot_bound_count++; + if (state & Page_entry::HOT) { + hot_count++; + if (state & Page_entry::BOUND) + hot_bound_count++; + } + if (state & Page_entry::REQUEST) + stack_request_count++; } Uint32 i2 = RNIL; @@ -1999,6 +2059,8 @@ Pgman::verify_page_lists() ndbrequire(state & Page_entry::ONQUEUE || dump_page_lists()); ndbrequire(state & Page_entry::BOUND || dump_page_lists()); cold_bound_count++; + if (state & Page_entry::REQUEST) + queue_request_count++; } Uint32 tot_bound_count = @@ -2031,7 +2093,11 @@ Pgman::verify_page_lists() << " cache:" << m_stats.m_num_pages << "(" << locked_bound_count << "L)" << " stack:" << pl_stack.count() + << " hot:" << hot_count + << " hot_bound:" << hot_bound_count + << " stack_request:" << stack_request_count << " queue:" << pl_queue.count() + << " queue_request:" << queue_request_count << " queuewait:" << queuewait_count << endl; debugOut << "PGMAN:"; @@ -2139,6 +2205,8 @@ Pgman::get_sublist_name(Uint32 list_no) return "busy"; case Page_entry::SL_LOCKED: return "locked"; + case Page_entry::SL_IDLE: + return "idle"; case Page_entry::SL_OTHER: return "other"; } diff --git a/storage/ndb/src/kernel/blocks/pgman.hpp b/storage/ndb/src/kernel/blocks/pgman.hpp index 07029d1c3e5..e3bf0fa5780 100644 --- a/storage/ndb/src/kernel/blocks/pgman.hpp +++ b/storage/ndb/src/kernel/blocks/pgman.hpp @@ -325,8 +325,9 @@ private: ,SL_CALLBACK_IO = 4 ,SL_BUSY = 5 ,SL_LOCKED = 6 - ,SL_OTHER = 7 - ,SUBLIST_COUNT = 8 + ,SL_IDLE = 7 + ,SL_OTHER = 8 + ,SUBLIST_COUNT = 9 }; Uint16 m_file_no; // disk page address set at seize @@ -401,6 +402,7 @@ private: struct Param { Param(); Uint32 m_max_pages; // max number of cache pages + Uint32 m_lirs_stack_mult; // in m_max_pages (around 3-10) Uint32 m_max_hot_pages; // max hot cache pages (up to 99%) Uint32 m_max_loop_count; // limit purely local loops Uint32 m_max_io_waits; diff --git a/storage/ndb/src/kernel/blocks/restore.cpp b/storage/ndb/src/kernel/blocks/restore.cpp index d4a2414ef2f..2d40cd79daa 100644 --- a/storage/ndb/src/kernel/blocks/restore.cpp +++ b/storage/ndb/src/kernel/blocks/restore.cpp @@ -557,6 +557,9 @@ Restore::restore_next(Signal* signal, FilePtr file_ptr) case BackupFormat::GCP_ENTRY: parse_gcp_entry(signal, file_ptr, data, len); break; + case BackupFormat::EMPTY_ENTRY: + // skip + break; case 0x4e444242: // 'NDBB' if (check_file_version(signal, ntohl(* (data+2))) == 0) { diff --git a/storage/ndb/src/kernel/vm/Configuration.cpp b/storage/ndb/src/kernel/vm/Configuration.cpp index ebdd4c97aab..72770d35cde 100644 --- a/storage/ndb/src/kernel/vm/Configuration.cpp +++ b/storage/ndb/src/kernel/vm/Configuration.cpp @@ -443,6 +443,11 @@ Configuration::setupConfiguration(){ "TimeBetweenWatchDogCheck missing"); } + if(iter.get(CFG_DB_WATCHDOG_INTERVAL_INITIAL, &_timeBetweenWatchDogCheckInitial)){ + ERROR_SET(fatal, NDBD_EXIT_INVALID_CONFIG, "Invalid configuration fetched", + "TimeBetweenWatchDogCheckInitial missing"); + } + /** * Get paths */ @@ -462,9 +467,12 @@ Configuration::setupConfiguration(){ * Create the watch dog thread */ { - Uint32 t = _timeBetweenWatchDogCheck; + if (_timeBetweenWatchDogCheckInitial < _timeBetweenWatchDogCheck) + _timeBetweenWatchDogCheckInitial = _timeBetweenWatchDogCheck; + + Uint32 t = _timeBetweenWatchDogCheckInitial; t = globalEmulatorData.theWatchDog ->setCheckInterval(t); - _timeBetweenWatchDogCheck = t; + _timeBetweenWatchDogCheckInitial = t; } ConfigValues* cf = ConfigValuesFactory::extractCurrentSection(iter.m_config); diff --git a/storage/ndb/src/kernel/vm/Configuration.hpp b/storage/ndb/src/kernel/vm/Configuration.hpp index 934261e40af..918a889a171 100644 --- a/storage/ndb/src/kernel/vm/Configuration.hpp +++ b/storage/ndb/src/kernel/vm/Configuration.hpp @@ -84,6 +84,7 @@ private: Uint32 _maxErrorLogs; Uint32 _lockPagesInMainMemory; Uint32 _timeBetweenWatchDogCheck; + Uint32 _timeBetweenWatchDogCheckInitial; ndb_mgm_configuration * m_ownConfig; ndb_mgm_configuration * m_clusterConfig; diff --git a/storage/ndb/src/kernel/vm/SimulatedBlock.cpp b/storage/ndb/src/kernel/vm/SimulatedBlock.cpp index 3125fc33258..7ad1d486a02 100644 --- a/storage/ndb/src/kernel/vm/SimulatedBlock.cpp +++ b/storage/ndb/src/kernel/vm/SimulatedBlock.cpp @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -38,6 +39,9 @@ #include #include +#include +extern EventLogger g_eventLogger; + #define ljamEntry() jamEntryLine(30000 + __LINE__) #define ljam() jamLine(30000 + __LINE__) @@ -655,14 +659,20 @@ SimulatedBlock::getBatSize(Uint16 blockNo){ return sb->theBATSize; } +void* SimulatedBlock::allocRecord(const char * type, size_t s, size_t n, bool clear, Uint32 paramId) +{ + return allocRecordAligned(type, s, n, 0, 0, clear, paramId); +} + void* -SimulatedBlock::allocRecord(const char * type, size_t s, size_t n, bool clear, Uint32 paramId) +SimulatedBlock::allocRecordAligned(const char * type, size_t s, size_t n, void **unaligned_buffer, Uint32 align, bool clear, Uint32 paramId) { void * p = NULL; - size_t size = n*s; - Uint64 real_size = (Uint64)((Uint64)n)*((Uint64)s); - refresh_watch_dog(); + Uint32 over_alloc = unaligned_buffer ? (align - 1) : 0; + size_t size = n*s + over_alloc; + Uint64 real_size = (Uint64)((Uint64)n)*((Uint64)s) + over_alloc; + refresh_watch_dog(9); if (real_size > 0){ #ifdef VM_TRACE_MEM ndbout_c("%s::allocRecord(%s, %u, %u) = %llu bytes", @@ -696,14 +706,24 @@ SimulatedBlock::allocRecord(const char * type, size_t s, size_t n, bool clear, U char * ptr = (char*)p; const Uint32 chunk = 128 * 1024; while(size > chunk){ - refresh_watch_dog(); + refresh_watch_dog(9); memset(ptr, 0, chunk); ptr += chunk; size -= chunk; } - refresh_watch_dog(); + refresh_watch_dog(9); memset(ptr, 0, size); } + if (unaligned_buffer) + { + *unaligned_buffer = p; + p = (void *)(((UintPtr)p + over_alloc) & ~(UintPtr)(over_alloc)); +#ifdef VM_TRACE + g_eventLogger.info("'%s' (%u) %llu %llu, alignment correction %u bytes", + type, align, (Uint64)p, (Uint64)p+n*s, + (Uint32)((UintPtr)p - (UintPtr)*unaligned_buffer)); +#endif + } } return p; } @@ -720,9 +740,16 @@ SimulatedBlock::deallocRecord(void ** ptr, } void -SimulatedBlock::refresh_watch_dog() +SimulatedBlock::refresh_watch_dog(Uint32 place) { - globalData.incrementWatchDogCounter(1); + globalData.incrementWatchDogCounter(place); +} + +void +SimulatedBlock::update_watch_dog_timer(Uint32 interval) +{ + extern EmulatorData globalEmulatorData; + globalEmulatorData.theWatchDog->setCheckInterval(interval); } void diff --git a/storage/ndb/src/kernel/vm/SimulatedBlock.hpp b/storage/ndb/src/kernel/vm/SimulatedBlock.hpp index 37a8dde5956..86e26986f93 100644 --- a/storage/ndb/src/kernel/vm/SimulatedBlock.hpp +++ b/storage/ndb/src/kernel/vm/SimulatedBlock.hpp @@ -334,7 +334,8 @@ protected: * Refresh Watch Dog in initialising code * */ - void refresh_watch_dog(); + void refresh_watch_dog(Uint32 place = 1); + void update_watch_dog_timer(Uint32 interval); /** * Prog error @@ -377,6 +378,7 @@ protected: * */ void* allocRecord(const char * type, size_t s, size_t n, bool clear = true, Uint32 paramId = 0); + void* allocRecordAligned(const char * type, size_t s, size_t n, void **unaligned_buffer, Uint32 align = NDB_O_DIRECT_WRITE_ALIGNMENT, bool clear = true, Uint32 paramId = 0); /** * Deallocate record diff --git a/storage/ndb/src/kernel/vm/WatchDog.cpp b/storage/ndb/src/kernel/vm/WatchDog.cpp index d1abb709b1e..a7f5e8f5c2b 100644 --- a/storage/ndb/src/kernel/vm/WatchDog.cpp +++ b/storage/ndb/src/kernel/vm/WatchDog.cpp @@ -16,6 +16,7 @@ #include #include +#include #include "WatchDog.hpp" #include "GlobalData.hpp" @@ -24,6 +25,8 @@ #include #include +#include + extern EventLogger g_eventLogger; extern "C" @@ -71,66 +74,115 @@ WatchDog::doStop(){ } } +const char *get_action(Uint32 IPValue) +{ + const char *action; + switch (IPValue) { + case 1: + action = "Job Handling"; + break; + case 2: + action = "Scanning Timers"; + break; + case 3: + action = "External I/O"; + break; + case 4: + action = "Print Job Buffers at crash"; + break; + case 5: + action = "Checking connections"; + break; + case 6: + action = "Performing Send"; + break; + case 7: + action = "Polling for Receive"; + break; + case 8: + action = "Performing Receive"; + break; + case 9: + action = "Allocating memory"; + break; + default: + action = "Unknown place"; + break; + }//switch + return action; +} + void -WatchDog::run(){ - unsigned int anIPValue; - unsigned int alerts = 0; +WatchDog::run() +{ + unsigned int anIPValue, sleep_time; unsigned int oldIPValue = 0; - + unsigned int theIntervalCheck = theInterval; + struct MicroSecondTimer start_time, last_time, now; + NdbTick_getMicroTimer(&start_time); + last_time = start_time; + // WatchDog for the single threaded NDB - while(!theStop){ - Uint32 tmp = theInterval / 500; - tmp= (tmp ? tmp : 1); - - while(!theStop && tmp > 0){ - NdbSleep_MilliSleep(500); - tmp--; - } - + while (!theStop) + { + sleep_time= 100; + + NdbSleep_MilliSleep(sleep_time); if(theStop) break; + NdbTick_getMicroTimer(&now); + if (NdbTick_getMicrosPassed(last_time, now)/1000 > sleep_time*2) + { + struct tms my_tms; + times(&my_tms); + g_eventLogger.info("Watchdog: User time: %llu System time: %llu", + (Uint64)my_tms.tms_utime, + (Uint64)my_tms.tms_stime); + g_eventLogger.warning("Watchdog: Warning overslept %u ms, expected %u ms.", + NdbTick_getMicrosPassed(last_time, now)/1000, + sleep_time); + } + last_time = now; + // Verify that the IP thread is not stuck in a loop anIPValue = *theIPValue; - if(anIPValue != 0) { + if (anIPValue != 0) + { oldIPValue = anIPValue; globalData.incrementWatchDogCounter(0); - alerts = 0; - } else { - const char *last_stuck_action; - alerts++; - switch (oldIPValue) { - case 1: - last_stuck_action = "Job Handling"; - break; - case 2: - last_stuck_action = "Scanning Timers"; - break; - case 3: - last_stuck_action = "External I/O"; - break; - case 4: - last_stuck_action = "Print Job Buffers at crash"; - break; - case 5: - last_stuck_action = "Checking connections"; - break; - case 6: - last_stuck_action = "Performing Send"; - break; - case 7: - last_stuck_action = "Polling for Receive"; - break; - case 8: - last_stuck_action = "Performing Receive"; - break; - default: - last_stuck_action = "Unknown place"; - break; - }//switch - g_eventLogger.warning("Ndb kernel is stuck in: %s", last_stuck_action); - if(alerts == 3){ - shutdownSystem(last_stuck_action); + NdbTick_getMicroTimer(&start_time); + theIntervalCheck = theInterval; + } + else + { + int warn = 1; + Uint32 elapsed = NdbTick_getMicrosPassed(start_time, now)/1000; + /* + oldIPValue == 9 indicates malloc going on, this can take some time + so only warn if we pass the watchdog interval + */ + if (oldIPValue == 9) + if (elapsed < theIntervalCheck) + warn = 0; + else + theIntervalCheck += theInterval; + + if (warn) + { + const char *last_stuck_action = get_action(oldIPValue); + g_eventLogger.warning("Ndb kernel is stuck in: %s", last_stuck_action); + { + struct tms my_tms; + times(&my_tms); + g_eventLogger.info("Watchdog: User time: %llu System time: %llu", + (Uint64)my_tms.tms_utime, + (Uint64)my_tms.tms_stime); + } + if (elapsed > 3 * theInterval) + { + shutdownSystem(last_stuck_action); + } } } } diff --git a/storage/ndb/src/mgmsrv/ConfigInfo.cpp b/storage/ndb/src/mgmsrv/ConfigInfo.cpp index e27e55d2a13..d6ecfd9b900 100644 --- a/storage/ndb/src/mgmsrv/ConfigInfo.cpp +++ b/storage/ndb/src/mgmsrv/ConfigInfo.cpp @@ -579,6 +579,18 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { "70", STR_VALUE(MAX_INT_RNIL) }, + { + CFG_DB_WATCHDOG_INTERVAL_INITIAL, + "TimeBetweenWatchDogCheckInitial", + DB_TOKEN, + "Time between execution checks inside a database node in the early start phases when memory is allocated", + ConfigInfo::CI_USED, + true, + ConfigInfo::CI_INT, + "6000", + "70", + STR_VALUE(MAX_INT_RNIL) }, + { CFG_DB_STOP_ON_ERROR, "StopOnError", @@ -879,6 +891,18 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { "3", STR_VALUE(MAX_INT_RNIL) }, + { + CFG_DB_REDOLOG_FILE_SIZE, + "FragmentLogFileSize", + DB_TOKEN, + "Size of each Redo log file", + ConfigInfo::CI_USED, + false, + ConfigInfo::CI_INT, + "16M", + "4M", + "1G" }, + { CFG_DB_MAX_OPEN_FILES, "MaxNoOfOpenFiles", @@ -1309,6 +1333,18 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { "0", STR_VALUE(MAX_INT_RNIL) }, + { + CFG_DB_O_DIRECT, + "ODirect", + DB_TOKEN, + "Use O_DIRECT file write/read when possible", + ConfigInfo::CI_USED, + true, + ConfigInfo::CI_BOOL, + "false", + "false", + "true"}, + /*************************************************************************** * API ***************************************************************************/ diff --git a/storage/ndb/src/ndbapi/ndberror.c b/storage/ndb/src/ndbapi/ndberror.c index 8c40ddf2116..b10859c3180 100644 --- a/storage/ndb/src/ndbapi/ndberror.c +++ b/storage/ndb/src/ndbapi/ndberror.c @@ -179,7 +179,7 @@ ErrorBundle ErrorCodes[] = { { 873, DMEC, TR, "Out of attrinfo records for scan in tuple manager" }, { 899, DMEC, TR, "Rowid already allocated" }, { 1217, DMEC, TR, "Out of operation records in local data manager (increase MaxNoOfLocalOperations)" }, - { 1220, DMEC, TR, "REDO log files overloaded, consult online manual (decrease TimeBetweenLocalCheckpoints, and|or increase NoOfFragmentLogFiles)" }, + { 1220, DMEC, TR, "REDO log files overloaded, consult online manual (increase FragmentLogFileSize)" }, { 1222, DMEC, TR, "Out of transaction markers in LQH" }, { 4021, DMEC, TR, "Out of Send Buffer space in NDB API" }, { 4022, DMEC, TR, "Out of Send Buffer space in NDB API" }, diff --git a/storage/ndb/test/ndbapi/testNodeRestart.cpp b/storage/ndb/test/ndbapi/testNodeRestart.cpp index 85dbc2aab2a..97b831963fc 100644 --- a/storage/ndb/test/ndbapi/testNodeRestart.cpp +++ b/storage/ndb/test/ndbapi/testNodeRestart.cpp @@ -1629,6 +1629,85 @@ runBug28023(NDBT_Context* ctx, NDBT_Step* step) return NDBT_FAILED; } } + + return NDBT_OK; +} + + +int +runBug28717(NDBT_Context* ctx, NDBT_Step* step) +{ + int result = NDBT_OK; + int loops = ctx->getNumLoops(); + int records = ctx->getNumRecords(); + Ndb* pNdb = GETNDB(step); + NdbRestarter res; + + if (res.getNumDbNodes() < 4) + { + return NDBT_OK; + } + + int master = res.getMasterNodeId(); + int node0 = res.getRandomNodeOtherNodeGroup(master, rand()); + int node1 = res.getRandomNodeSameNodeGroup(node0, rand()); + + ndbout_c("master: %d node0: %d node1: %d", master, node0, node1); + + if (res.restartOneDbNode(node0, false, true, true)) + { + return NDBT_FAILED; + } + + { + int filter[] = { 15, NDB_MGM_EVENT_CATEGORY_CHECKPOINT, 0 }; + NdbLogEventHandle handle = + ndb_mgm_create_logevent_handle(res.handle, filter); + + + int dump[] = { DumpStateOrd::DihStartLcpImmediately }; + struct ndb_logevent event; + + for (Uint32 i = 0; i<3; i++) + { + res.dumpStateOneNode(master, dump, 1); + while(ndb_logevent_get_next(handle, &event, 0) >= 0 && + event.type != NDB_LE_LocalCheckpointStarted); + while(ndb_logevent_get_next(handle, &event, 0) >= 0 && + event.type != NDB_LE_LocalCheckpointCompleted); + } + } + + if (res.waitNodesNoStart(&node0, 1)) + return NDBT_FAILED; + + int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 }; + + if (res.dumpStateOneNode(node0, val2, 2)) + return NDBT_FAILED; + + if (res.insertErrorInNode(node0, 5010)) + return NDBT_FAILED; + + if (res.insertErrorInNode(node1, 1001)) + return NDBT_FAILED; + + if (res.startNodes(&node0, 1)) + return NDBT_FAILED; + + NdbSleep_SecSleep(3); + + if (res.insertErrorInNode(node1, 0)) + return NDBT_FAILED; + + if (res.waitNodesNoStart(&node0, 1)) + return NDBT_FAILED; + + if (res.startNodes(&node0, 1)) + return NDBT_FAILED; + + if (res.waitClusterStarted()) + return NDBT_FAILED; return NDBT_OK; } @@ -1993,6 +2072,9 @@ TESTCASE("Bug27466", ""){ TESTCASE("Bug28023", ""){ INITIALIZER(runBug28023); } +TESTCASE("Bug28717", ""){ + INITIALIZER(runBug28717); +} NDBT_TESTSUITE_END(testNodeRestart); int main(int argc, const char** argv){ diff --git a/storage/ndb/test/run-test/conf-dl145a.cnf b/storage/ndb/test/run-test/conf-dl145a.cnf index ea344f1a62a..5f61bee755d 100644 --- a/storage/ndb/test/run-test/conf-dl145a.cnf +++ b/storage/ndb/test/run-test/conf-dl145a.cnf @@ -21,3 +21,6 @@ BackupMemory = 64M MaxNoOfConcurrentScans = 100 MaxNoOfSavedMessages= 1000 SendBufferMemory = 2M +NoOfFragmentLogFiles = 4 +FragmentLogFileSize = 64M + diff --git a/storage/ndb/test/run-test/daily-basic-tests.txt b/storage/ndb/test/run-test/daily-basic-tests.txt index 2cd3942bf07..6ce2da47670 100644 --- a/storage/ndb/test/run-test/daily-basic-tests.txt +++ b/storage/ndb/test/run-test/daily-basic-tests.txt @@ -567,6 +567,10 @@ max-time: 1500 cmd: testDict args: -n CreateAndDrop +max-time: 1000 +cmd: testNodeRestart +args: -n Bug28717 T1 + max-time: 1500 cmd: testDict args: -n CreateAndDropAtRandom -l 200 T1 @@ -706,7 +710,7 @@ args: -n ExecuteAsynch T1 max-time: 1000 cmd: testNdbApi -args: -n BugBug28443 +args: -n Bug28443 #max-time: 500 #cmd: testInterpreter diff --git a/storage/ndb/tools/restore/Restore.cpp b/storage/ndb/tools/restore/Restore.cpp index f99cacfc613..516dfe24855 100644 --- a/storage/ndb/tools/restore/Restore.cpp +++ b/storage/ndb/tools/restore/Restore.cpp @@ -873,13 +873,32 @@ bool RestoreDataIterator::readFragmentHeader(int & ret, Uint32 *fragmentId) debug << "RestoreDataIterator::getNextFragment" << endl; - if (buffer_read(&Header, sizeof(Header), 1) != 1){ + while (1) + { + /* read first part of header */ + if (buffer_read(&Header, 8, 1) != 1) + { + ret = 0; + return false; + } // if + + /* skip if EMPTY_ENTRY */ + Header.SectionType = ntohl(Header.SectionType); + Header.SectionLength = ntohl(Header.SectionLength); + if (Header.SectionType == BackupFormat::EMPTY_ENTRY) + { + void *tmp; + buffer_get_ptr(&tmp, Header.SectionLength*4-8, 1); + continue; + } + break; + } + /* read rest of header */ + if (buffer_read(((char*)&Header)+8, sizeof(Header)-8, 1) != 1) + { ret = 0; return false; - } // if - - Header.SectionType = ntohl(Header.SectionType); - Header.SectionLength = ntohl(Header.SectionLength); + } Header.TableId = ntohl(Header.TableId); Header.FragmentNo = ntohl(Header.FragmentNo); Header.ChecksumType = ntohl(Header.ChecksumType); diff --git a/unittest/Makefile.am b/unittest/Makefile.am index 6197586b008..65fa615fb98 100644 --- a/unittest/Makefile.am +++ b/unittest/Makefile.am @@ -25,3 +25,6 @@ test: test-verbose: HARNESS_VERBOSE=1 perl unit.pl run $(unittests) + +# Don't update the files from bitkeeper +%::SCCS/s.% diff --git a/unittest/examples/Makefile.am b/unittest/examples/Makefile.am index f3183225888..94032c00928 100644 --- a/unittest/examples/Makefile.am +++ b/unittest/examples/Makefile.am @@ -22,3 +22,5 @@ LDADD = -lmytap noinst_PROGRAMS = simple-t skip-t todo-t skip_all-t no_plan-t core-t +# Don't update the files from bitkeeper +%::SCCS/s.% diff --git a/unittest/mysys/Makefile.am b/unittest/mysys/Makefile.am index 54b3d203e10..be91ef31c9d 100644 --- a/unittest/mysys/Makefile.am +++ b/unittest/mysys/Makefile.am @@ -23,3 +23,5 @@ LDADD = $(top_builddir)/unittest/mytap/libmytap.a \ noinst_PROGRAMS = bitmap-t base64-t my_atomic-t +# Don't update the files from bitkeeper +%::SCCS/s.% diff --git a/unittest/mytap/Makefile.am b/unittest/mytap/Makefile.am index 2f77c4e3e0d..c02bcd3b49d 100644 --- a/unittest/mytap/Makefile.am +++ b/unittest/mytap/Makefile.am @@ -21,3 +21,6 @@ noinst_HEADERS = tap.h libmytap_a_SOURCES = tap.c SUBDIRS = . t + +# Don't update the files from bitkeeper +%::SCCS/s.% diff --git a/unittest/mytap/t/Makefile.am b/unittest/mytap/t/Makefile.am index 576bbafa299..e89a088bb3a 100644 --- a/unittest/mytap/t/Makefile.am +++ b/unittest/mytap/t/Makefile.am @@ -21,3 +21,5 @@ LDADD = -lmytap noinst_PROGRAMS = basic-t +# Don't update the files from bitkeeper +%::SCCS/s.%