Merge trift2.:/MySQL/M51/mysql-5.1

into  trift2.:/MySQL/M51/push-5.1
This commit is contained in:
joerg@trift2. 2007-06-06 19:59:18 +02:00
commit 449cd28598
82 changed files with 1024 additions and 475 deletions

View file

@ -105,6 +105,12 @@ check_cpu () {
*Athlon*64*)
cpu_arg="athlon64";
;;
*Turion*)
cpu_arg="athlon64";
;;
*Opteron*)
cpu_arg="athlon64";
;;
*Athlon*)
cpu_arg="athlon";
;;

View file

@ -136,8 +136,10 @@ test-bt:
@PERL@ ./mysql-test-run.pl --force --comment=rpl --suite=rpl
-cd mysql-test ; MTR_BUILD_THREAD=auto \
@PERL@ ./mysql-test-run.pl --force --comment=partitions --suite=parts
-cd mysql-test ; MTR_BUILD_THREAD=auto \
@PERL@ ./mysql-test-run.pl --force --comment=rowlock --suite=row_lock
# Re-enable the "rowlock" suite when bug#28685 is fixed
# -cd mysql-test ; MTR_BUILD_THREAD=auto \
# @PERL@ ./mysql-test-run.pl --force --comment=rowlock --suite=row_lock
# Re-enable the "jp" suite when bug#28563 is fixed
# -cd mysql-test ; MTR_BUILD_THREAD=auto \

View file

@ -35,7 +35,8 @@
Supply your own create and query SQL statements, with 50 clients
querying (200 selects for each):
mysqlslap --create="CREATE TABLE A (a int);INSERT INTO A (23)" \
mysqlslap --delimiter=";" \
--create="CREATE TABLE A (a int);INSERT INTO A VALUES (23)" \
--query="SELECT * FROM A" --concurrency=50 --iterations=200
Let the program build the query SQL statement with a table of two int
@ -554,7 +555,7 @@ static struct my_option my_long_options[] =
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"host", 'h', "Connect to host.", (uchar**) &host, (uchar**) &host, 0, GET_STR,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"iterations", 'i', "Number of times too run the tests.", (uchar**) &iterations,
{"iterations", 'i', "Number of times to run the tests.", (uchar**) &iterations,
(uchar**) &iterations, 0, GET_UINT, REQUIRED_ARG, 1, 0, 0, 0, 0, 0},
{"number-char-cols", 'x',
"Number of VARCHAR columns to create table with if specifying --auto-generate-sql ",

View file

@ -7,7 +7,7 @@ AC_DEFUN([AC_SYS_COMPILER_FLAG],
AC_CACHE_VAL(mysql_cv_option_$2,
[
CFLAGS="[$]OLD_CFLAGS $1"
AC_TRY_RUN([int main(){exit(0);}],mysql_cv_option_$2=yes,mysql_cv_option_$2=no,mysql_cv_option_$2=no)
AC_TRY_LINK([int main(){exit(0);}],mysql_cv_option_$2=yes,mysql_cv_option_$2=no,mysql_cv_option_$2=no)
])
CFLAGS="[$]OLD_CFLAGS"

View file

@ -450,29 +450,6 @@ AC_DEFINE([HAVE_BOOL], [1], [bool is not defined by all C++ compilators])
fi
])dnl
AC_DEFUN([MYSQL_STACK_DIRECTION],
[AC_CACHE_CHECK(stack direction for C alloca, ac_cv_c_stack_direction,
[AC_TRY_RUN([#include <stdlib.h>
int find_stack_direction ()
{
static char *addr = 0;
auto char dummy;
if (addr == 0)
{
addr = &dummy;
return find_stack_direction ();
}
else
return (&dummy > addr) ? 1 : -1;
}
int main ()
{
exit (find_stack_direction() < 0);
}], ac_cv_c_stack_direction=1, ac_cv_c_stack_direction=-1,
ac_cv_c_stack_direction=0)])
AC_DEFINE_UNQUOTED(STACK_DIRECTION, $ac_cv_c_stack_direction)
])dnl
AC_DEFUN([MYSQL_CHECK_LONGLONG_TO_FLOAT],
[
AC_MSG_CHECKING(if conversion of longlong to float works)
@ -488,7 +465,9 @@ int main()
fprintf(file,"%g\n",f);
fclose(file);
return (0);
}], ac_cv_conv_longlong_to_float=`cat conftestval`, ac_cv_conv_longlong_to_float=0, ifelse([$2], , , ac_cv_conv_longlong_to_float=$2))])dnl
}], ac_cv_conv_longlong_to_float=`cat conftestval`,
ac_cv_conv_longlong_to_float=0,
ac_cv_conv_longlong_to_float="yes")])dnl # Cross compiling, assume can convert
if test "$ac_cv_conv_longlong_to_float" = "1" -o "$ac_cv_conv_longlong_to_float" = "yes"
then
ac_cv_conv_longlong_to_float=yes

View file

@ -230,14 +230,8 @@ AC_CHECK_PROGS(YACC, ['bison -y -p MYSQL'])
AC_CHECK_PROG(PDFMANUAL, pdftex, manual.pdf)
AC_CHECK_PROG(DVIS, tex, manual.dvi)
AC_MSG_CHECKING("return type of sprintf")
#check the return type of sprintf
case $SYSTEM_TYPE in
*netware*)
AC_DEFINE(SPRINTF_RETURNS_INT, [1]) AC_MSG_RESULT("int")
;;
*)
AC_MSG_CHECKING("return type of sprintf")
AC_TRY_RUN([
int main()
{
@ -263,10 +257,12 @@ AC_TRY_RUN([
[AC_DEFINE(SPRINTF_RETURNS_PTR, [1], [Broken sprintf])
AC_MSG_RESULT("ptr")],
[AC_DEFINE(SPRINTF_RETURNS_GARBAGE, [1], [Broken sprintf])
AC_MSG_RESULT("garbage")])
])
;;
esac
AC_MSG_RESULT("garbage")]
)],
# Cross compile, assume POSIX
[AC_DEFINE(SPRINTF_RETURNS_INT, [1], [POSIX sprintf])
AC_MSG_RESULT("int (we assume)")]
)
AC_PATH_PROG(uname_prog, uname, no)
@ -1667,6 +1663,12 @@ AC_ARG_WITH(client-ldflags,
[CLIENT_EXTRA_LDFLAGS=])
AC_SUBST(CLIENT_EXTRA_LDFLAGS)
AC_ARG_WITH(mysqld-libs,
[ --with-mysqld-libs Extra libraries to link with for mysqld],
[MYSQLD_EXTRA_LIBS=$withval],
[MYSQLD_EXTRA_LIBS=])
AC_SUBST(MYSQLD_EXTRA_LIBS)
AC_ARG_WITH(lib-ccflags,
[ --with-lib-ccflags Extra CC options for libraries],
[LIB_EXTRA_CCFLAGS=$withval],
@ -1784,8 +1786,6 @@ MYSQL_TYPE_ACCEPT
#---END:
# Figure out what type of struct rlimit to use with setrlimit
MYSQL_TYPE_STRUCT_RLIMIT
# Find where the stack goes
MYSQL_STACK_DIRECTION
# We want to skip alloca on irix unconditionally. It may work on some version..
MYSQL_FUNC_ALLOCA
# Do struct timespec have members tv_sec or ts_sec

View file

@ -112,9 +112,6 @@ extern "C" {
/* signal by closing the sockets */
#define SIGNAL_WITH_VIO_CLOSE 1
/* On NetWare, stack grows towards lower address*/
#define STACK_DIRECTION -1
/* On NetWare, we need to set stack size for threads, otherwise default 16K is used */
#define NW_THD_STACKSIZE 65536

View file

@ -250,8 +250,6 @@ inline double ulonglong2double(ulonglong value)
#endif
#define STACK_DIRECTION -1
/* Optimized store functions for Intel x86 */
#ifndef _WIN64

View file

@ -71,7 +71,7 @@ dist-hook:
$(INSTALL_DATA) $(srcdir)/std_data/ndb_backup50/BACKUP* $(distdir)/std_data/ndb_backup50
$(INSTALL_DATA) $(srcdir)/std_data/ndb_backup51/BACKUP* $(distdir)/std_data/ndb_backup51
$(INSTALL_DATA) $(srcdir)/lib/*.pl $(distdir)/lib
-rm -rf `find $(distdir)/suite -type d -name SCCS`
-rm -rf `find $(distdir)/suite -type d -name SCCS` $(distdir)/suite/row_lock
install-data-local:
$(mkinstalldirs) \
@ -113,7 +113,7 @@ install-data-local:
$(INSTALL_DATA) $(srcdir)/std_data/ndb_backup50/BACKUP* $(DESTDIR)$(testdir)/std_data/ndb_backup50
$(INSTALL_DATA) $(srcdir)/std_data/ndb_backup51/BACKUP* $(DESTDIR)$(testdir)/std_data/ndb_backup51
$(INSTALL_DATA) $(srcdir)/lib/*.pl $(DESTDIR)$(testdir)/lib
for f in `(cd $(srcdir); find suite -type f | grep -v SCCS)`; \
for f in `(cd $(srcdir); find suite -type f | egrep -v 'SCCS|row_lock')`; \
do \
d=$(DESTDIR)$(testdir)/`dirname $$f`; \
mkdir -p $$d ; \

View file

@ -1,3 +1,4 @@
--source include/have_log_bin.inc
--source include/not_embedded.inc
--source ./include/have_federated_db.inc

View file

@ -3815,8 +3815,7 @@ sub mysqld_arguments ($$$$) {
"%s--log-slow-queries=%s-slow.log", $prefix, $log_base_path);
# Check if "extra_opt" contains --skip-log-bin
my $skip_binlog= grep(/^--skip-log-bin/, @$extra_opt);
my $skip_binlog= grep(/^--skip-log-bin/, @$extra_opt, @opt_extra_mysqld_opt);
if ( $mysqld->{'type'} eq 'master' )
{
if (! ($opt_skip_master_binlog || $skip_binlog) )

View file

@ -10,7 +10,8 @@ DataDir= CHOOSE_FILESYSTEM
MaxNoOfOrderedIndexes= CHOOSE_MaxNoOfOrderedIndexes
MaxNoOfAttributes= CHOOSE_MaxNoOfAttributes
TimeBetweenGlobalCheckpoints= 500
NoOfFragmentLogFiles= 3
NoOfFragmentLogFiles= 8
FragmentLogFileSize= 6M
DiskPageBufferMemory= CHOOSE_DiskPageBufferMemory
#

View file

@ -10,8 +10,10 @@ DataDir= CHOOSE_FILESYSTEM
MaxNoOfOrderedIndexes= CHOOSE_MaxNoOfOrderedIndexes
MaxNoOfAttributes= CHOOSE_MaxNoOfAttributes
TimeBetweenGlobalCheckpoints= 500
NoOfFragmentLogFiles= 3
NoOfFragmentLogFiles= 4
FragmentLogFileSize=12M
DiskPageBufferMemory= CHOOSE_DiskPageBufferMemory
ODirect= 1
# the following parametes just function as a small regression
# test that the parameter exists
InitialNoOfOpenFiles= 27

View file

@ -1,14 +1,12 @@
drop table if exists t1;
SHOW GLOBAL VARIABLES LIKE "%_format%";
SHOW GLOBAL VARIABLES WHERE Variable_name LIKE "%_format%" AND Variable_name != "binlog_format";
Variable_name Value
binlog_format <format>
date_format %d.%m.%Y
datetime_format %Y-%m-%d %H:%i:%s
default_week_format 0
time_format %H.%i.%s
SHOW SESSION VARIABLES LIKE "%_format%";
SHOW SESSION VARIABLES WHERE Variable_name LIKE "%_format%" AND Variable_name != "binlog_format";
Variable_name Value
binlog_format <format>
date_format %d.%m.%Y
datetime_format %Y-%m-%d %H:%i:%s
default_week_format 0
@ -30,9 +28,8 @@ set datetime_format= '%H:%i:%s %Y-%m-%d';
set datetime_format= '%H:%i:%s.%f %m-%d-%Y';
set datetime_format= '%h:%i:%s %p %Y-%m-%d';
set datetime_format= '%h:%i:%s.%f %p %Y-%m-%d';
SHOW SESSION VARIABLES LIKE "%format";
SHOW SESSION VARIABLES WHERE Variable_name LIKE "%format" AND Variable_name != "binlog_format";
Variable_name Value
binlog_format <format>
date_format %m-%d-%Y
datetime_format %h:%i:%s.%f %p %Y-%m-%d
default_week_format 0

View file

@ -1,2 +1,2 @@
Variable_name Value
have_log_bin ON
log_bin ON

View file

@ -770,35 +770,6 @@ c abc ab
d ab ab
e abc abc
DROP TABLE t1;
End of 5.0 tests
CREATE TABLE t1 (a VARCHAR(255) NOT NULL,
CONSTRAINT pk_a PRIMARY KEY (a))engine=ndb;
CREATE TABLE t2(a VARCHAR(255) NOT NULL,
b VARCHAR(255) NOT NULL,
c VARCHAR(255) NOT NULL,
CONSTRAINT pk_b_c_id PRIMARY KEY (b,c),
CONSTRAINT fk_a FOREIGN KEY(a) REFERENCES t1(a))engine=ndb;
drop table t1, t2;
create table t1 (a int not null primary key, b int) engine=ndb;
insert into t1 values(1,1),(2,2),(3,3);
create table t2 like t1;
insert into t2 select * from t1;
select * from t1 order by a;
a b
1 1
2 2
3 3
select * from t2 order by a;
a b
1 1
2 2
3 3
drop table t1, t2;
create table t1 (a int not null primary key, b int not null default 0, c varchar(254)) engine=ndb;
create table if not exists t1 (a int not null primary key, b int not null default 0, c varchar(254)) engine=ndb;
create table t2 like t1;
rename table t1 to t10, t2 to t20;
drop table t10,t20;
create table t1 (a int not null primary key, b int not null) engine=ndb;
create table t2 (a int not null primary key, b int not null) engine=ndb;
insert into t1 values (1,10), (2,20), (3,30);
@ -867,7 +838,36 @@ select * from t1 order by a;
a b
1 10
2 10
3 1
3 30
4 1
drop table t1,t2;
End of 5.0 tests
CREATE TABLE t1 (a VARCHAR(255) NOT NULL,
CONSTRAINT pk_a PRIMARY KEY (a))engine=ndb;
CREATE TABLE t2(a VARCHAR(255) NOT NULL,
b VARCHAR(255) NOT NULL,
c VARCHAR(255) NOT NULL,
CONSTRAINT pk_b_c_id PRIMARY KEY (b,c),
CONSTRAINT fk_a FOREIGN KEY(a) REFERENCES t1(a))engine=ndb;
drop table t1, t2;
create table t1 (a int not null primary key, b int) engine=ndb;
insert into t1 values(1,1),(2,2),(3,3);
create table t2 like t1;
insert into t2 select * from t1;
select * from t1 order by a;
a b
1 1
2 2
3 3
select * from t2 order by a;
a b
1 1
2 2
3 3
drop table t1, t2;
create table t1 (a int not null primary key, b int not null default 0, c varchar(254)) engine=ndb;
create table if not exists t1 (a int not null primary key, b int not null default 0, c varchar(254)) engine=ndb;
create table t2 like t1;
rename table t1 to t10, t2 to t20;
drop table t10,t20;
End of 5.1 tests

View file

@ -6,10 +6,8 @@
drop table if exists t1;
--enable_warnings
--replace_result ROW <format> STATEMENT <format> MIXED <format>
SHOW GLOBAL VARIABLES LIKE "%_format%";
--replace_result ROW <format> STATEMENT <format> MIXED <format>
SHOW SESSION VARIABLES LIKE "%_format%";
SHOW GLOBAL VARIABLES WHERE Variable_name LIKE "%_format%" AND Variable_name != "binlog_format";
SHOW SESSION VARIABLES WHERE Variable_name LIKE "%_format%" AND Variable_name != "binlog_format";
#
# Test setting a lot of different formats to see which formats are accepted and
@ -36,8 +34,7 @@ set datetime_format= '%H:%i:%s.%f %m-%d-%Y';
set datetime_format= '%h:%i:%s %p %Y-%m-%d';
set datetime_format= '%h:%i:%s.%f %p %Y-%m-%d';
--replace_result ROW <format> STATEMENT <format> MIXED <format>
SHOW SESSION VARIABLES LIKE "%format";
SHOW SESSION VARIABLES WHERE Variable_name LIKE "%format" AND Variable_name != "binlog_format";
--error 1231
SET time_format='%h:%i:%s';

View file

@ -43,7 +43,6 @@ rpl_ndb_ddl : BUG#28798 2007-05-31 lars Valgrind failure in NDB
#rpl_ndb_dd_advance : Bug#25913 rpl_ndb_dd_advance fails randomly
rpl_ndb_stm_innodb : Bug#26783
ndb_partition_error2 : HF is not sure if the test can work as internded on all the platforms
im_options_set : Bug#20294: Instance manager tests fail randomly

View file

@ -3,6 +3,8 @@
# We verify that we did not introduce a deadlock.
# This is intended to mimick how mysqldump and innobackup work.
-- source include/have_log_bin.inc
# And it requires InnoDB
-- source include/not_embedded.inc
-- source include/have_innodb.inc

View file

@ -2,6 +2,8 @@
# TODO: Need to look at making a row based version once the new row based client is completed. [jbm]
-- source include/have_binlog_format_mixed_or_statement.inc
-- source include/have_log_bin.inc
# Embedded server doesn't support binlogging
-- source include/not_embedded.inc

View file

@ -1,6 +1,9 @@
# Embedded server doesn't support external clients
--source include/not_embedded.inc
# Binlog is required
--source include/have_log_bin.inc
--disable_warnings
DROP TABLE IF EXISTS t1, `"t"1`, t1aa, t2, t2aa, t3;
drop database if exists mysqldump_test_db;

View file

@ -1,3 +1,5 @@
-- source include/have_log_bin.inc
# This test should work in embedded server after mysqltest is fixed
-- source include/not_embedded.inc

View file

@ -752,46 +752,6 @@ INSERT INTO t1 VALUES
SELECT * FROM t1 ORDER BY a;
DROP TABLE t1;
# End of 5.0 tests
--echo End of 5.0 tests
#
# Bug #18483 Cannot create table with FK constraint
# ndb does not support foreign key constraint, it is silently ignored
# in line with other storage engines
#
CREATE TABLE t1 (a VARCHAR(255) NOT NULL,
CONSTRAINT pk_a PRIMARY KEY (a))engine=ndb;
CREATE TABLE t2(a VARCHAR(255) NOT NULL,
b VARCHAR(255) NOT NULL,
c VARCHAR(255) NOT NULL,
CONSTRAINT pk_b_c_id PRIMARY KEY (b,c),
CONSTRAINT fk_a FOREIGN KEY(a) REFERENCES t1(a))engine=ndb;
drop table t1, t2;
# bug#24301
create table t1 (a int not null primary key, b int) engine=ndb;
insert into t1 values(1,1),(2,2),(3,3);
create table t2 like t1;
insert into t2 select * from t1;
select * from t1 order by a;
select * from t2 order by a;
drop table t1, t2;
# create table if not exists
--disable_warnings
create table t1 (a int not null primary key, b int not null default 0, c varchar(254)) engine=ndb;
create table if not exists t1 (a int not null primary key, b int not null default 0, c varchar(254)) engine=ndb;
--enable_warnings
# create like
create table t2 like t1;
# multi rename
rename table t1 to t10, t2 to t20;
drop table t10,t20;
# delete
create table t1 (a int not null primary key, b int not null) engine=ndb;
create table t2 (a int not null primary key, b int not null) engine=ndb;
@ -832,4 +792,44 @@ update ignore t1,t2 set a = 1, c = 1 where a = 3 and c = 3;
select * from t1 order by a;
drop table t1,t2;
# End of 5.0 tests
--echo End of 5.0 tests
#
# Bug #18483 Cannot create table with FK constraint
# ndb does not support foreign key constraint, it is silently ignored
# in line with other storage engines
#
CREATE TABLE t1 (a VARCHAR(255) NOT NULL,
CONSTRAINT pk_a PRIMARY KEY (a))engine=ndb;
CREATE TABLE t2(a VARCHAR(255) NOT NULL,
b VARCHAR(255) NOT NULL,
c VARCHAR(255) NOT NULL,
CONSTRAINT pk_b_c_id PRIMARY KEY (b,c),
CONSTRAINT fk_a FOREIGN KEY(a) REFERENCES t1(a))engine=ndb;
drop table t1, t2;
# bug#24301
create table t1 (a int not null primary key, b int) engine=ndb;
insert into t1 values(1,1),(2,2),(3,3);
create table t2 like t1;
insert into t2 select * from t1;
select * from t1 order by a;
select * from t2 order by a;
drop table t1, t2;
# create table if not exists
--disable_warnings
create table t1 (a int not null primary key, b int not null default 0, c varchar(254)) engine=ndb;
create table if not exists t1 (a int not null primary key, b int not null default 0, c varchar(254)) engine=ndb;
--enable_warnings
# create like
create table t2 like t1;
# multi rename
rename table t1 to t10, t2 to t20;
drop table t10,t20;
--echo End of 5.1 tests

View file

@ -1,4 +1,5 @@
-- source include/not_embedded.inc
-- source include/have_log_bin.inc
#
# SQL Syntax for Prepared Statements test
#

View file

@ -2,6 +2,7 @@
# tests that require InnoDB...
#
-- source include/have_log_bin.inc
-- source include/have_innodb.inc
--disable_warnings

View file

@ -3,6 +3,7 @@
# TODO: Create row based version once $MYSQL_BINLOG has new RB version
# Embedded server does not support binlogging
--source include/not_embedded.inc
--source include/have_log_bin.inc
# Check that user variables are binlogged correctly (BUG#3875)
create table t1 (a varchar(50));

View file

@ -325,6 +325,9 @@ static int keycache_pthread_cond_signal(pthread_cond_t *cond);
#endif /* defined(KEYCACHE_DEBUG) */
#if !defined(DBUG_OFF)
#if defined(inline)
#undef inline
#endif
#define inline /* disabled inline for easier debugging */
static int fail_block(BLOCK_LINK *block);
static int fail_hlink(HASH_LINK *hlink);

View file

@ -36,3 +36,6 @@ noinst_LIBRARIES = @plugin_daemon_example_static_target@
libdaemon_example_a_CXXFLAGS = $(AM_CFLAGS)
libdaemon_example_a_CFLAGS = $(AM_CFLAGS)
libdaemon_example_a_SOURCES= daemon_example.cc
# Don't update the files from bitkeeper
%::SCCS/s.%

View file

@ -22,3 +22,6 @@ pkglib_LTLIBRARIES= mypluglib.la
mypluglib_la_SOURCES= plugin_example.c
mypluglib_la_LDFLAGS= -module -rpath $(pkglibdir)
mypluglib_la_CFLAGS= -DMYSQL_DYNAMIC_PLUGIN
# Don't update the files from bitkeeper
%::SCCS/s.%

View file

@ -38,7 +38,8 @@ mysqld_LDADD = @MYSQLD_EXTRA_LDFLAGS@ \
@pstack_libs@ \
@mysql_plugin_libs@ \
$(LDADD) $(CXXLDFLAGS) $(WRAPLIBS) @LIBDL@ \
$(yassl_libs) $(openssl_libs)
$(yassl_libs) $(openssl_libs) \
@MYSQLD_EXTRA_LIBS@
noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \
item_strfunc.h item_timefunc.h \

View file

@ -278,11 +278,6 @@ inline
int execute_no_commit(ha_ndbcluster *h, NdbTransaction *trans,
bool force_release)
{
#ifdef NOT_USED
int m_batch_execute= 0;
if (m_batch_execute)
return 0;
#endif
h->release_completed_operations(trans, force_release);
return h->m_ignore_no_key ?
execute_no_commit_ignore_no_key(h,trans) :
@ -294,11 +289,6 @@ int execute_no_commit(ha_ndbcluster *h, NdbTransaction *trans,
inline
int execute_commit(ha_ndbcluster *h, NdbTransaction *trans)
{
#ifdef NOT_USED
int m_batch_execute= 0;
if (m_batch_execute)
return 0;
#endif
return trans->execute(NdbTransaction::Commit,
NdbOperation::AbortOnError,
h->m_force_send);
@ -307,11 +297,6 @@ int execute_commit(ha_ndbcluster *h, NdbTransaction *trans)
inline
int execute_commit(THD *thd, NdbTransaction *trans)
{
#ifdef NOT_USED
int m_batch_execute= 0;
if (m_batch_execute)
return 0;
#endif
return trans->execute(NdbTransaction::Commit,
NdbOperation::AbortOnError,
thd->variables.ndb_force_send);
@ -321,11 +306,6 @@ inline
int execute_no_commit_ie(ha_ndbcluster *h, NdbTransaction *trans,
bool force_release)
{
#ifdef NOT_USED
int m_batch_execute= 0;
if (m_batch_execute)
return 0;
#endif
h->release_completed_operations(trans, force_release);
return trans->execute(NdbTransaction::NoCommit,
NdbOperation::AO_IgnoreError,
@ -2925,7 +2905,8 @@ int ha_ndbcluster::update_row(const uchar *old_data, uchar *new_data)
* If IGNORE the ignore constraint violations on primary and unique keys,
* but check that it is not part of INSERT ... ON DUPLICATE KEY UPDATE
*/
if (m_ignore_dup_key && thd->lex->sql_command == SQLCOM_UPDATE)
if (m_ignore_dup_key && (thd->lex->sql_command == SQLCOM_UPDATE ||
thd->lex->sql_command == SQLCOM_UPDATE_MULTI))
{
int peek_res= peek_indexed_rows(new_data, pk_update);
@ -4267,8 +4248,6 @@ THR_LOCK_DATA **ha_ndbcluster::store_lock(THD *thd,
extern MASTER_INFO *active_mi;
static int ndbcluster_update_apply_status(THD *thd, int do_update)
{
return 0;
Thd_ndb *thd_ndb= get_thd_ndb(thd);
Ndb *ndb= thd_ndb->ndb;
NDBDICT *dict= ndb->getDictionary();

View file

@ -40,6 +40,7 @@ C_MODE_END
/* the number of digits that my_decimal can possibly contain */
#define DECIMAL_MAX_POSSIBLE_PRECISION (DECIMAL_BUFF_LENGTH * 9)
/*
maximum guaranteed precision of number in decimal digits (number of our
digits * number of decimal digits in one our big digit - number of decimal

View file

@ -5022,17 +5022,14 @@ bool check_merge_table_access(THD *thd, char *db,
Check stack size; Send error if there isn't enough stack to continue
****************************************************************************/
#if STACK_DIRECTION < 0
#define used_stack(A,B) (long) (A - B)
#else
#define used_stack(A,B) (long) (B - A)
#endif
#ifndef EMBEDDED_LIBRARY
#define used_stack(A,B) (long)(A > B ? A - B : B - A)
#ifndef DBUG_OFF
long max_stack_used;
#endif
#ifndef EMBEDDED_LIBRARY
/*
Note: The 'buf' parameter is necessary, even if it is unused here.
- fix_fields functions has a "dummy" buffer large enough for the

View file

@ -64,6 +64,7 @@
#define CFG_DB_FILESYSTEM_PATH 125
#define CFG_DB_NO_REDOLOG_FILES 126
#define CFG_DB_REDOLOG_FILE_SIZE 140
#define CFG_DB_LCP_DISC_PAGES_TUP 127
#define CFG_DB_LCP_DISC_PAGES_TUP_SR 128
@ -81,6 +82,8 @@
#define CFG_DB_BACKUP_WRITE_SIZE 136
#define CFG_DB_BACKUP_MAX_WRITE_SIZE 139
#define CFG_DB_WATCHDOG_INTERVAL_INITIAL 141
#define CFG_LOG_DESTINATION 147
#define CFG_DB_DISCLESS 148
@ -113,6 +116,8 @@
#define CFG_DB_MEMREPORT_FREQUENCY 166
#define CFG_DB_O_DIRECT 168
#define CFG_DB_SGA 198 /* super pool mem */
#define CFG_DB_DATA_MEM_2 199 /* used in special build in 5.1 */

View file

@ -144,4 +144,6 @@ extern "C" {
#define MAX(x,y) (((x)>(y))?(x):(y))
#endif
#define NDB_O_DIRECT_WRITE_ALIGNMENT 512
#endif

View file

@ -37,9 +37,6 @@ NDB_TICKS NdbTick_CurrentMillisecond(void);
*/
int NdbTick_CurrentMicrosecond(NDB_TICKS * secs, Uint32 * micros);
/*#define TIME_MEASUREMENT*/
#ifdef TIME_MEASUREMENT
struct MicroSecondTimer {
NDB_TICKS seconds;
NDB_TICKS micro_seconds;
@ -54,7 +51,6 @@ struct MicroSecondTimer {
NDB_TICKS NdbTick_getMicrosPassed(struct MicroSecondTimer start,
struct MicroSecondTimer stop);
int NdbTick_getMicroTimer(struct MicroSecondTimer* time_now);
#endif
#ifdef __cplusplus
}

View file

@ -1,6 +1,6 @@
TARGET = mgmapi_logevent
SRCS = $(TARGET).cpp
OBJS = $(TARGET).o
SRCS = main.cpp
OBJS = main.o
CXX = g++
CFLAGS = -c -Wall -fno-rtti -fno-exceptions
CXXFLAGS =
@ -17,7 +17,7 @@ SYS_LIB =
$(TARGET): $(OBJS)
$(CXX) $(CXXFLAGS) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET)
$(TARGET).o: $(SRCS)
$(OBJS): $(SRCS)
$(CXX) $(CFLAGS) -I$(TOP_SRCDIR)/include -I$(INCLUDE_DIR) -I$(INCLUDE_DIR)/mgmapi -I$(INCLUDE_DIR)/ndbapi $(SRCS)
clean:

View file

@ -1,6 +1,6 @@
TARGET = mgmapi_logevent2
SRCS = $(TARGET).cpp
OBJS = $(TARGET).o
SRCS = main.cpp
OBJS = main.o
CXX = g++
CFLAGS = -c -Wall -fno-rtti -fno-exceptions
CXXFLAGS =
@ -17,7 +17,7 @@ SYS_LIB =
$(TARGET): $(OBJS)
$(CXX) $(CXXFLAGS) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET)
$(TARGET).o: $(SRCS)
$(OBJS): $(SRCS)
$(CXX) $(CFLAGS) -I$(TOP_SRCDIR)/include -I$(INCLUDE_DIR) -I$(INCLUDE_DIR)/mgmapi -I$(INCLUDE_DIR)/ndbapi $(SRCS)
clean:

View file

@ -1,6 +1,6 @@
TARGET = ndbapi_simple_dual
SRCS = $(TARGET).cpp
OBJS = $(TARGET).o
SRCS = main.cpp
OBJS = main.o
CXX = g++
CFLAGS = -c -Wall -fno-rtti -fno-exceptions
CXXFLAGS =
@ -17,7 +17,7 @@ SYS_LIB =
$(TARGET): $(OBJS)
$(CXX) $(CXXFLAGS) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET)
$(TARGET).o: $(SRCS)
$(OBJS): $(SRCS)
$(CXX) $(CFLAGS) -I$(TOP_SRCDIR)/include -I$(INCLUDE_DIR) -I$(INCLUDE_DIR)/ndbapi $(SRCS)
clean:

View file

@ -1,6 +1,6 @@
TARGET = ndbapi_simple_index
SRCS = $(TARGET).cpp
OBJS = $(TARGET).o
SRCS = main.cpp
OBJS = main.o
CXX = g++
CFLAGS = -c -Wall -fno-rtti -fno-exceptions
CXXFLAGS =
@ -17,7 +17,7 @@ SYS_LIB =
$(TARGET): $(OBJS)
$(CXX) $(CXXFLAGS) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET)
$(TARGET).o: $(SRCS)
$(OBJS): $(SRCS)
$(CXX) $(CFLAGS) -I$(INCLUDE_DIR)/include -I$(INCLUDE_DIR)/storage/ndb/include -I$(INCLUDE_DIR)/storage/ndb/include/ndbapi $(SRCS)
clean:

View file

@ -15,7 +15,7 @@
#include <ndb_global.h>
#include "NdbTick.h"
#include <NdbTick.h>
#define NANOSEC_PER_SEC 1000000000
#define MICROSEC_PER_SEC 1000000
@ -71,7 +71,6 @@ NdbTick_CurrentMicrosecond(NDB_TICKS * secs, Uint32 * micros){
}
#endif
#ifdef TIME_MEASUREMENT
int
NdbTick_getMicroTimer(struct MicroSecondTimer* input_timer)
{
@ -102,4 +101,3 @@ NdbTick_getMicrosPassed(struct MicroSecondTimer start,
}
return ret_value;
}
#endif

View file

@ -818,6 +818,7 @@ TransporterRegistry::performReceive()
{
Uint32 * ptr;
Uint32 sz = t->getReceiveData(&ptr);
transporter_recv_from(callbackObj, nodeId);
Uint32 szUsed = unpack(ptr, sz, nodeId, ioStates[nodeId]);
t->updateReceiveDataPtr(szUsed);
}

View file

@ -1,5 +1,5 @@
Next QMGR 1
Next NDBCNTR 1001
Next NDBCNTR 1002
Next NDBFS 2000
Next DBACC 3002
Next DBTUP 4029
@ -523,3 +523,4 @@ Dbtup:
NDBCNTR:
1000: Crash insertion on SystemError::CopyFragRef
1001: Delay sending NODE_FAILREP (to own node), until error is cleared

View file

@ -2771,6 +2771,8 @@ Backup::openFiles(Signal* signal, BackupRecordPtr ptr)
c_backupFilePool.getPtr(filePtr, ptr.p->dataFilePtr);
filePtr.p->m_flags |= BackupFile::BF_OPENING;
if (c_defaults.m_o_direct)
req->fileFlags |= FsOpenReq::OM_DIRECT;
req->userPointer = filePtr.i;
FsOpenReq::setVersion(req->fileNumber, 2);
FsOpenReq::setSuffix(req->fileNumber, FsOpenReq::S_DATA);
@ -3745,12 +3747,31 @@ Backup::OperationRecord::newFragment(Uint32 tableId, Uint32 fragNo)
}
bool
Backup::OperationRecord::fragComplete(Uint32 tableId, Uint32 fragNo)
Backup::OperationRecord::fragComplete(Uint32 tableId, Uint32 fragNo, bool fill_record)
{
Uint32 * tmp;
const Uint32 footSz = sizeof(BackupFormat::DataFile::FragmentFooter) >> 2;
Uint32 sz = footSz + 1;
if(dataBuffer.getWritePtr(&tmp, footSz + 1)) {
if (fill_record)
{
Uint32 * new_tmp;
if (!dataBuffer.getWritePtr(&tmp, sz))
return false;
new_tmp = tmp + sz;
if ((UintPtr)new_tmp & (sizeof(Page32)-1))
{
/* padding is needed to get full write */
new_tmp += 2 /* to fit empty header minimum 2 words*/;
new_tmp = (Uint32 *)(((UintPtr)new_tmp + sizeof(Page32)-1) &
~(UintPtr)(sizeof(Page32)-1));
/* new write sz */
sz = new_tmp - tmp;
}
}
if(dataBuffer.getWritePtr(&tmp, sz)) {
jam();
* tmp = 0; // Finish record stream
tmp++;
@ -3762,7 +3783,17 @@ Backup::OperationRecord::fragComplete(Uint32 tableId, Uint32 fragNo)
foot->FragmentNo = htonl(fragNo);
foot->NoOfRecords = htonl(noOfRecords);
foot->Checksum = htonl(0);
dataBuffer.updateWritePtr(footSz + 1);
if (sz != footSz + 1)
{
tmp += footSz;
memset(tmp, 0, (sz - footSz - 1) * 4);
*tmp = htonl(BackupFormat::EMPTY_ENTRY);
tmp++;
*tmp = htonl(sz - footSz - 1);
}
dataBuffer.updateWritePtr(sz);
return true;
}//if
return false;
@ -3864,8 +3895,13 @@ Backup::fragmentCompleted(Signal* signal, BackupFilePtr filePtr)
return;
}//if
BackupRecordPtr ptr LINT_SET_PTR;
c_backupPool.getPtr(ptr, filePtr.p->backupPtr);
OperationRecord & op = filePtr.p->operation;
if(!op.fragComplete(filePtr.p->tableId, filePtr.p->fragmentNo)) {
if(!op.fragComplete(filePtr.p->tableId, filePtr.p->fragmentNo,
c_defaults.m_o_direct))
{
jam();
signal->theData[0] = BackupContinueB::BUFFER_FULL_FRAG_COMPLETE;
signal->theData[1] = filePtr.i;
@ -3875,9 +3911,6 @@ Backup::fragmentCompleted(Signal* signal, BackupFilePtr filePtr)
filePtr.p->m_flags &= ~(Uint32)BackupFile::BF_SCAN_THREAD;
BackupRecordPtr ptr LINT_SET_PTR;
c_backupPool.getPtr(ptr, filePtr.p->backupPtr);
if (ptr.p->is_lcp())
{
ptr.p->slaveState.setState(STOPPING);
@ -4914,6 +4947,8 @@ Backup::lcp_open_file(Signal* signal, BackupRecordPtr ptr)
FsOpenReq::OM_CREATE |
FsOpenReq::OM_APPEND |
FsOpenReq::OM_AUTOSYNC;
if (c_defaults.m_o_direct)
req->fileFlags |= FsOpenReq::OM_DIRECT;
FsOpenReq::v2_setCount(req->fileNumber, 0xFFFFFFFF);
req->auto_sync_size = c_defaults.m_disk_synch_size;

View file

@ -240,7 +240,7 @@ public:
* Once per fragment
*/
bool newFragment(Uint32 tableId, Uint32 fragNo);
bool fragComplete(Uint32 tableId, Uint32 fragNo);
bool fragComplete(Uint32 tableId, Uint32 fragNo, bool fill_record);
/**
* Once per scan frag (next) req/conf
@ -534,6 +534,7 @@ public:
Uint32 m_disk_write_speed;
Uint32 m_disk_synch_size;
Uint32 m_diskless;
Uint32 m_o_direct;
};
/**

View file

@ -32,7 +32,8 @@ struct BackupFormat {
TABLE_LIST = 4,
TABLE_DESCRIPTION = 5,
GCP_ENTRY = 6,
FRAGMENT_INFO = 7
FRAGMENT_INFO = 7,
EMPTY_ENTRY = 8
};
struct FileHeader {
@ -93,6 +94,13 @@ struct BackupFormat {
Uint32 NoOfRecords;
Uint32 Checksum;
};
/* optional padding for O_DIRECT */
struct EmptyEntry {
Uint32 SectionType;
Uint32 SectionLength;
/* not used data */
};
};
/**

View file

@ -148,10 +148,13 @@ Backup::execREAD_CONFIG_REQ(Signal* signal)
c_defaults.m_disk_write_speed = 10 * (1024 * 1024);
c_defaults.m_disk_write_speed_sr = 100 * (1024 * 1024);
c_defaults.m_disk_synch_size = 4 * (1024 * 1024);
c_defaults.m_o_direct = true;
Uint32 noBackups = 0, noTables = 0, noAttribs = 0, noFrags = 0;
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_DISCLESS,
&c_defaults.m_diskless));
ndb_mgm_get_int_parameter(p, CFG_DB_O_DIRECT,
&c_defaults.m_o_direct);
ndb_mgm_get_int_parameter(p, CFG_DB_CHECKPOINT_SPEED_SR,
&c_defaults.m_disk_write_speed_sr);
ndb_mgm_get_int_parameter(p, CFG_DB_CHECKPOINT_SPEED,
@ -204,7 +207,7 @@ Backup::execREAD_CONFIG_REQ(Signal* signal)
/ sizeof(Page32);
// We need to allocate an additional of 2 pages. 1 page because of a bug in
// ArrayPool and another one for DICTTAINFO.
c_pagePool.setSize(noPages + NO_OF_PAGES_META_FILE + 2);
c_pagePool.setSize(noPages + NO_OF_PAGES_META_FILE + 2, true);
{ // Init all tables
SLList<Table> tables(c_tablePool);

View file

@ -270,8 +270,8 @@ FsBuffer::getReadPtr(Uint32 ** ptr, Uint32 * sz, bool * _eof){
* ptr = &Tp[Tr];
DEBUG(ndbout_c("getReadPtr() Tr: %d Tw: %d Ts: %d Tm: %d sz1: %d -> %d",
Tr, Tw, Ts, Tm, sz1, * sz));
DEBUG(ndbout_c("getReadPtr() Tr: %d Tmw: %d Ts: %d Tm: %d sz1: %d -> %d",
Tr, Tmw, Ts, Tm, sz1, * sz));
return true;
}
@ -279,8 +279,8 @@ FsBuffer::getReadPtr(Uint32 ** ptr, Uint32 * sz, bool * _eof){
if(!m_eof){
* _eof = false;
DEBUG(ndbout_c("getReadPtr() Tr: %d Tw: %d Ts: %d Tm: %d sz1: %d -> false",
Tr, Tw, Ts, Tm, sz1));
DEBUG(ndbout_c("getReadPtr() Tr: %d Tmw: %d Ts: %d Tm: %d sz1: %d -> false",
Tr, Tmw, Ts, Tm, sz1));
return false;
}
@ -289,8 +289,8 @@ FsBuffer::getReadPtr(Uint32 ** ptr, Uint32 * sz, bool * _eof){
* _eof = true;
* ptr = &Tp[Tr];
DEBUG(ndbout_c("getReadPtr() Tr: %d Tw: %d Ts: %d Tm: %d sz1: %d -> %d eof",
Tr, Tw, Ts, Tm, sz1, * sz));
DEBUG(ndbout_c("getReadPtr() Tr: %d Tmw: %d Ts: %d Tm: %d sz1: %d -> %d eof",
Tr, Tmw, Ts, Tm, sz1, * sz));
return false;
}
@ -316,13 +316,13 @@ FsBuffer::getWritePtr(Uint32 ** ptr, Uint32 sz){
if(sz1 > sz){ // Note at least 1 word of slack
* ptr = &Tp[Tw];
DEBUG(ndbout_c("getWritePtr(%d) Tr: %d Tw: %d Ts: %d sz1: %d -> true",
sz, Tr, Tw, Ts, sz1));
DEBUG(ndbout_c("getWritePtr(%d) Tw: %d sz1: %d -> true",
sz, Tw, sz1));
return true;
}
DEBUG(ndbout_c("getWritePtr(%d) Tr: %d Tw: %d Ts: %d sz1: %d -> false",
sz, Tr, Tw, Ts, sz1));
DEBUG(ndbout_c("getWritePtr(%d) Tw: %d sz1: %d -> false",
sz, Tw, sz1));
return false;
}
@ -339,11 +339,15 @@ FsBuffer::updateWritePtr(Uint32 sz){
m_free -= sz;
if(Tnew < Ts){
m_writeIndex = Tnew;
DEBUG(ndbout_c("updateWritePtr(%d) m_writeIndex: %d",
sz, m_writeIndex));
return;
}
memcpy(Tp, &Tp[Ts], (Tnew - Ts) << 2);
m_writeIndex = Tnew - Ts;
DEBUG(ndbout_c("updateWritePtr(%d) m_writeIndex: %d",
sz, m_writeIndex));
}
inline

View file

@ -698,6 +698,9 @@ void Dbdict::execFSCLOSECONF(Signal* signal)
case FsConnectRecord::OPEN_READ_SCHEMA2:
openSchemaFile(signal, 1, fsPtr.i, false, false);
break;
case FsConnectRecord::OPEN_READ_TAB_FILE2:
openTableFile(signal, 1, fsPtr.i, c_readTableRecord.tableId, false);
break;
default:
jamLine((fsPtr.p->fsState & 0xFFF));
ndbrequire(false);
@ -1073,8 +1076,11 @@ void Dbdict::readTableConf(Signal* signal,
void Dbdict::readTableRef(Signal* signal,
FsConnectRecordPtr fsPtr)
{
/**
* First close corrupt file
*/
fsPtr.p->fsState = FsConnectRecord::OPEN_READ_TAB_FILE2;
openTableFile(signal, 1, fsPtr.i, c_readTableRecord.tableId, false);
closeFile(signal, fsPtr.p->filePtr, fsPtr.i);
return;
}//Dbdict::readTableRef()

View file

@ -4741,12 +4741,18 @@ void Dbdih::failedNodeLcpHandling(Signal* signal, NodeRecordPtr failedNodePtr)
jam();
const Uint32 nodeId = failedNodePtr.i;
if (c_lcpState.m_participatingLQH.get(failedNodePtr.i)){
if (isMaster() && c_lcpState.m_participatingLQH.get(failedNodePtr.i))
{
/*----------------------------------------------------*/
/* THE NODE WAS INVOLVED IN A LOCAL CHECKPOINT. WE */
/* MUST UPDATE THE ACTIVE STATUS TO INDICATE THAT */
/* THE NODE HAVE MISSED A LOCAL CHECKPOINT. */
/*----------------------------------------------------*/
/**
* Bug#28717, Only master should do this, as this status is copied
* to other nodes
*/
switch (failedNodePtr.p->activeStatus) {
case Sysfile::NS_Active:
jam();

View file

@ -71,7 +71,6 @@ class Dbtup;
/* CONSTANTS OF THE LOG PAGES */
/* ------------------------------------------------------------------------- */
#define ZPAGE_HEADER_SIZE 32
#define ZNO_MBYTES_IN_FILE 16
#define ZPAGE_SIZE 8192
#define ZPAGES_IN_MBYTE 32
#define ZTWOLOG_NO_PAGES_IN_MBYTE 5
@ -115,9 +114,6 @@ class Dbtup;
/* ------------------------------------------------------------------------- */
/* VARIOUS CONSTANTS USED AS FLAGS TO THE FILE MANAGER. */
/* ------------------------------------------------------------------------- */
#define ZOPEN_READ 0
#define ZOPEN_WRITE 1
#define ZOPEN_READ_WRITE 2
#define ZVAR_NO_LOG_PAGE_WORD 1
#define ZLIST_OF_PAIRS 0
#define ZLIST_OF_PAIRS_SYNCH 16
@ -142,7 +138,7 @@ class Dbtup;
/* IN THE MBYTE. */
/* ------------------------------------------------------------------------- */
#define ZFD_HEADER_SIZE 3
#define ZFD_PART_SIZE 48
#define ZFD_MBYTE_SIZE 3
#define ZLOG_HEAD_SIZE 8
#define ZNEXT_LOG_SIZE 2
#define ZABORT_LOG_SIZE 3
@ -169,7 +165,6 @@ class Dbtup;
#define ZPOS_LOG_TYPE 0
#define ZPOS_NO_FD 1
#define ZPOS_FILE_NO 2
#define ZMAX_LOG_FILES_IN_PAGE_ZERO 40
/* ------------------------------------------------------------------------- */
/* THE POSITIONS WITHIN A PREPARE LOG RECORD AND A NEW PREPARE */
/* LOG RECORD. */
@ -1437,17 +1432,17 @@ public:
* header of each log file. That information is used during
* system restart to find the tail of the log.
*/
UintR logLastPrepRef[16];
UintR *logLastPrepRef;
/**
* The max global checkpoint completed before the mbyte in the
* log file was started. One variable per mbyte.
*/
UintR logMaxGciCompleted[16];
UintR *logMaxGciCompleted;
/**
* The max global checkpoint started before the mbyte in the log
* file was started. One variable per mbyte.
*/
UintR logMaxGciStarted[16];
UintR *logMaxGciStarted;
/**
* This variable contains the file name as needed by the file
* system when opening the file.
@ -2163,6 +2158,7 @@ private:
void execSTART_RECREF(Signal* signal);
void execGCP_SAVEREQ(Signal* signal);
void execFSOPENREF(Signal* signal);
void execFSOPENCONF(Signal* signal);
void execFSCLOSECONF(Signal* signal);
void execFSWRITECONF(Signal* signal);
@ -2671,6 +2667,8 @@ private:
LogPartRecord *logPartRecord;
LogPartRecordPtr logPartPtr;
UintR clogPartFileSize;
Uint32 clogFileSize; // In MBYTE
Uint32 cmaxLogFilesInPageZero; //
// Configurable
LogFileRecord *logFileRecord;
@ -2678,13 +2676,15 @@ private:
UintR cfirstfreeLogFile;
UintR clogFileFileSize;
#define ZLFO_FILE_SIZE 256 /* MAX 256 OUTSTANDING FILE OPERATIONS */
#define ZLFO_MIN_FILE_SIZE 256
// RedoBuffer/32K minimum ZLFO_MIN_FILE_SIZE
LogFileOperationRecord *logFileOperationRecord;
LogFileOperationRecordPtr lfoPtr;
UintR cfirstfreeLfo;
UintR clfoFileSize;
LogPageRecord *logPageRecord;
void *logPageRecordUnaligned;
LogPageRecordPtr logPagePtr;
UintR cfirstfreeLogPage;
UintR clogPageFileSize;
@ -2695,7 +2695,7 @@ private:
UintR cfirstfreePageRef;
UintR cpageRefFileSize;
#define ZSCANREC_FILE_SIZE 100
// Configurable
ArrayPool<ScanRecord> c_scanRecordPool;
ScanRecordPtr scanptr;
UintR cscanNoFreeRec;
@ -2888,6 +2888,7 @@ private:
UintR ctransidHash[1024];
Uint32 c_diskless;
Uint32 c_o_direct;
Uint32 c_error_insert_table_id;
public:

View file

@ -30,11 +30,11 @@ void Dblqh::initData()
cgcprecFileSize = ZGCPREC_FILE_SIZE;
chostFileSize = MAX_NDB_NODES;
clcpFileSize = ZNO_CONCURRENT_LCP;
clfoFileSize = ZLFO_FILE_SIZE;
clfoFileSize = 0;
clogFileFileSize = 0;
clogPartFileSize = ZLOG_PART_FILE_SIZE;
cpageRefFileSize = ZPAGE_REF_FILE_SIZE;
cscanrecFileSize = ZSCANREC_FILE_SIZE;
cscanrecFileSize = 0;
ctabrecFileSize = 0;
ctcConnectrecFileSize = 0;
ctcNodeFailrecFileSize = MAX_NDB_NODES;
@ -49,6 +49,7 @@ void Dblqh::initData()
logFileRecord = 0;
logFileOperationRecord = 0;
logPageRecord = 0;
logPageRecordUnaligned= 0;
pageRefRecord = 0;
tablerec = 0;
tcConnectionrec = 0;
@ -60,6 +61,8 @@ void Dblqh::initData()
cLqhTimeOutCheckCount = 0;
cbookedAccOps = 0;
m_backup_ptr = RNIL;
clogFileSize = 16;
cmaxLogFilesInPageZero = 40;
}//Dblqh::initData()
void Dblqh::initRecords()
@ -105,10 +108,13 @@ void Dblqh::initRecords()
sizeof(LogFileOperationRecord),
clfoFileSize);
logPageRecord = (LogPageRecord*)allocRecord("LogPageRecord",
sizeof(LogPageRecord),
clogPageFileSize,
false);
logPageRecord =
(LogPageRecord*)allocRecordAligned("LogPageRecord",
sizeof(LogPageRecord),
clogPageFileSize,
&logPageRecordUnaligned,
NDB_O_DIRECT_WRITE_ALIGNMENT,
false);
pageRefRecord = (PageRefRecord*)allocRecord("PageRefRecord",
sizeof(PageRefRecord),
@ -260,6 +266,7 @@ Dblqh::Dblqh(Block_context& ctx):
addRecSignal(GSN_START_FRAGREQ, &Dblqh::execSTART_FRAGREQ);
addRecSignal(GSN_START_RECREF, &Dblqh::execSTART_RECREF);
addRecSignal(GSN_GCP_SAVEREQ, &Dblqh::execGCP_SAVEREQ);
addRecSignal(GSN_FSOPENREF, &Dblqh::execFSOPENREF, true);
addRecSignal(GSN_FSOPENCONF, &Dblqh::execFSOPENCONF);
addRecSignal(GSN_FSCLOSECONF, &Dblqh::execFSCLOSECONF);
addRecSignal(GSN_FSWRITECONF, &Dblqh::execFSWRITECONF);
@ -377,7 +384,7 @@ Dblqh::~Dblqh()
sizeof(LogFileOperationRecord),
clfoFileSize);
deallocRecord((void**)&logPageRecord,
deallocRecord((void**)&logPageRecordUnaligned,
"LogPageRecord",
sizeof(LogPageRecord),
clogPageFileSize);

View file

@ -1023,6 +1023,11 @@ void Dblqh::execREAD_CONFIG_REQ(Signal* signal)
clogPageFileSize+= (16 - mega_byte_part);
}
/* maximum number of log file operations */
clfoFileSize = clogPageFileSize;
if (clfoFileSize < ZLFO_MIN_FILE_SIZE)
clfoFileSize = ZLFO_MIN_FILE_SIZE;
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_LQH_TABLE, &ctabrecFileSize));
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_LQH_TC_CONNECT,
&ctcConnectrecFileSize));
@ -1031,14 +1036,44 @@ void Dblqh::execREAD_CONFIG_REQ(Signal* signal)
cmaxAccOps = cscanrecFileSize * MAX_PARALLEL_OP_PER_SCAN;
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_DISCLESS, &c_diskless));
c_o_direct = true;
ndb_mgm_get_int_parameter(p, CFG_DB_O_DIRECT, &c_o_direct);
Uint32 tmp= 0;
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_LQH_FRAG, &tmp));
c_fragment_pool.setSize(tmp);
if (!ndb_mgm_get_int_parameter(p, CFG_DB_REDOLOG_FILE_SIZE,
&clogFileSize))
{
// convert to mbyte
clogFileSize = (clogFileSize + 1024*1024 - 1) / (1024 * 1024);
ndbrequire(clogFileSize >= 4 && clogFileSize <= 1024);
}
cmaxLogFilesInPageZero = (ZPAGE_SIZE - ZPAGE_HEADER_SIZE - 128) /
(ZFD_MBYTE_SIZE * clogFileSize);
/**
* "Old" cmaxLogFilesInPageZero was 40
* Each FD need 3 words per mb, require that they can fit into 1 page
* (atleast 1 FD)
* Is also checked in ConfigInfo.cpp (max FragmentLogFileSize = 1Gb)
* 1Gb = 1024Mb => 3(ZFD_MBYTE_SIZE) * 1024 < 8192 (ZPAGE_SIZE)
*/
if (cmaxLogFilesInPageZero > 40)
{
jam();
cmaxLogFilesInPageZero = 40;
}
else
{
ndbrequire(cmaxLogFilesInPageZero);
}
initRecords();
initialiseRecordsLab(signal, 0, ref, senderData);
return;
}//Dblqh::execSIZEALT_REP()
@ -11788,9 +11823,9 @@ void Dblqh::sendStartLcp(Signal* signal)
Uint32 Dblqh::remainingLogSize(const LogFileRecordPtr &sltCurrLogFilePtr,
const LogPartRecordPtr &sltLogPartPtr)
{
Uint32 hf = sltCurrLogFilePtr.p->fileNo*ZNO_MBYTES_IN_FILE+sltCurrLogFilePtr.p->currentMbyte;
Uint32 tf = sltLogPartPtr.p->logTailFileNo*ZNO_MBYTES_IN_FILE+sltLogPartPtr.p->logTailMbyte;
Uint32 sz = sltLogPartPtr.p->noLogFiles*ZNO_MBYTES_IN_FILE;
Uint32 hf = sltCurrLogFilePtr.p->fileNo*clogFileSize+sltCurrLogFilePtr.p->currentMbyte;
Uint32 tf = sltLogPartPtr.p->logTailFileNo*clogFileSize+sltLogPartPtr.p->logTailMbyte;
Uint32 sz = sltLogPartPtr.p->noLogFiles*clogFileSize;
if (tf > hf) hf += sz;
return sz-(hf-tf);
}
@ -11848,7 +11883,7 @@ void Dblqh::setLogTail(Signal* signal, Uint32 keepGci)
/* ------------------------------------------------------------------------- */
SLT_LOOP:
for (tsltIndex = tsltStartMbyte;
tsltIndex <= ZNO_MBYTES_IN_FILE - 1;
tsltIndex <= clogFileSize - 1;
tsltIndex++) {
if (sltLogFilePtr.p->logMaxGciStarted[tsltIndex] >= keepGci) {
/* ------------------------------------------------------------------------- */
@ -11864,7 +11899,7 @@ void Dblqh::setLogTail(Signal* signal, Uint32 keepGci)
/* ------------------------------------------------------------------------- */
/*STEPPING BACK INCLUDES ALSO STEPPING BACK TO THE PREVIOUS LOG FILE. */
/* ------------------------------------------------------------------------- */
tsltMbyte = ZNO_MBYTES_IN_FILE - 1;
tsltMbyte = clogFileSize - 1;
sltLogFilePtr.i = sltLogFilePtr.p->prevLogFile;
ptrCheckGuard(sltLogFilePtr, clogFileFileSize, logFileRecord);
}//if
@ -11902,7 +11937,7 @@ void Dblqh::setLogTail(Signal* signal, Uint32 keepGci)
UintR ToldTailFileNo = sltLogPartPtr.p->logTailFileNo;
UintR ToldTailMByte = sltLogPartPtr.p->logTailMbyte;
arrGuard(tsltMbyte, 16);
arrGuard(tsltMbyte, clogFileSize);
sltLogPartPtr.p->logTailFileNo =
sltLogFilePtr.p->logLastPrepRef[tsltMbyte] >> 16;
/* ------------------------------------------------------------------------- */
@ -12402,6 +12437,26 @@ void Dblqh::execFSOPENCONF(Signal* signal)
}//switch
}//Dblqh::execFSOPENCONF()
void
Dblqh::execFSOPENREF(Signal* signal)
{
jamEntry();
FsRef* ref = (FsRef*)signal->getDataPtr();
Uint32 err = ref->errorCode;
if (err == FsRef::fsErrInvalidFileSize)
{
char buf[256];
BaseString::snprintf(buf, sizeof(buf),
"Invalid file size for redo logfile, "
" size only changable with --initial");
progError(__LINE__,
NDBD_EXIT_INVALID_CONFIG,
buf);
return;
}
SimulatedBlock::execFSOPENREF(signal);
}
/* ************>> */
/* FSREADCONF > */
@ -13047,7 +13102,7 @@ void Dblqh::openFileInitLab(Signal* signal)
{
logFilePtr.p->logFileStatus = LogFileRecord::OPEN_INIT;
seizeLogpage(signal);
writeSinglePage(signal, (ZNO_MBYTES_IN_FILE * ZPAGES_IN_MBYTE) - 1,
writeSinglePage(signal, (clogFileSize * ZPAGES_IN_MBYTE) - 1,
ZPAGE_SIZE - 1, __LINE__);
lfoPtr.p->lfoState = LogFileOperationRecord::INIT_WRITE_AT_END;
return;
@ -13110,7 +13165,7 @@ void Dblqh::writeInitMbyteLab(Signal* signal)
{
releaseLfo(signal);
logFilePtr.p->currentMbyte = logFilePtr.p->currentMbyte + 1;
if (logFilePtr.p->currentMbyte == ZNO_MBYTES_IN_FILE) {
if (logFilePtr.p->currentMbyte == clogFileSize) {
jam();
releaseLogpage(signal);
logFilePtr.p->logFileStatus = LogFileRecord::CLOSING_INIT;
@ -13230,7 +13285,7 @@ void Dblqh::initLogfile(Signal* signal, Uint32 fileNo)
logFilePtr.p->lastPageWritten = 0;
logFilePtr.p->logPageZero = RNIL;
logFilePtr.p->currentMbyte = 0;
for (tilIndex = 0; tilIndex <= 15; tilIndex++) {
for (tilIndex = 0; tilIndex < clogFileSize; tilIndex++) {
logFilePtr.p->logMaxGciCompleted[tilIndex] = (UintR)-1;
logFilePtr.p->logMaxGciStarted[tilIndex] = (UintR)-1;
logFilePtr.p->logLastPrepRef[tilIndex] = 0;
@ -13281,8 +13336,14 @@ void Dblqh::openFileRw(Signal* signal, LogFileRecordPtr olfLogFilePtr)
signal->theData[3] = olfLogFilePtr.p->fileName[1];
signal->theData[4] = olfLogFilePtr.p->fileName[2];
signal->theData[5] = olfLogFilePtr.p->fileName[3];
signal->theData[6] = ZOPEN_READ_WRITE | FsOpenReq::OM_AUTOSYNC;
signal->theData[6] = FsOpenReq::OM_READWRITE | FsOpenReq::OM_AUTOSYNC | FsOpenReq::OM_CHECK_SIZE;
if (c_o_direct)
signal->theData[6] |= FsOpenReq::OM_DIRECT;
req->auto_sync_size = MAX_REDO_PAGES_WITHOUT_SYNCH * sizeof(LogPageRecord);
Uint64 sz = clogFileSize;
sz *= 1024; sz *= 1024;
req->file_size_hi = sz >> 32;
req->file_size_lo = sz & 0xFFFFFFFF;
sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, FsOpenReq::SignalLength, JBA);
}//Dblqh::openFileRw()
@ -13301,7 +13362,9 @@ void Dblqh::openLogfileInit(Signal* signal)
signal->theData[3] = logFilePtr.p->fileName[1];
signal->theData[4] = logFilePtr.p->fileName[2];
signal->theData[5] = logFilePtr.p->fileName[3];
signal->theData[6] = 0x302 | FsOpenReq::OM_AUTOSYNC;
signal->theData[6] = FsOpenReq::OM_READWRITE | FsOpenReq::OM_TRUNCATE | FsOpenReq::OM_CREATE | FsOpenReq::OM_AUTOSYNC;
if (c_o_direct)
signal->theData[6] |= FsOpenReq::OM_DIRECT;
req->auto_sync_size = MAX_REDO_PAGES_WITHOUT_SYNCH * sizeof(LogPageRecord);
sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, FsOpenReq::SignalLength, JBA);
}//Dblqh::openLogfileInit()
@ -13337,8 +13400,14 @@ void Dblqh::openNextLogfile(Signal* signal)
signal->theData[3] = onlLogFilePtr.p->fileName[1];
signal->theData[4] = onlLogFilePtr.p->fileName[2];
signal->theData[5] = onlLogFilePtr.p->fileName[3];
signal->theData[6] = 2 | FsOpenReq::OM_AUTOSYNC;
signal->theData[6] = FsOpenReq::OM_READWRITE | FsOpenReq::OM_AUTOSYNC | FsOpenReq::OM_CHECK_SIZE;
if (c_o_direct)
signal->theData[6] |= FsOpenReq::OM_DIRECT;
req->auto_sync_size = MAX_REDO_PAGES_WITHOUT_SYNCH * sizeof(LogPageRecord);
Uint64 sz = clogFileSize;
sz *= 1024; sz *= 1024;
req->file_size_hi = sz >> 32;
req->file_size_lo = sz & 0xFFFFFFFF;
sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, FsOpenReq::SignalLength, JBA);
}//if
}//Dblqh::openNextLogfile()
@ -13469,7 +13538,7 @@ void Dblqh::writeFileDescriptor(Signal* signal)
/* -------------------------------------------------- */
/* START BY WRITING TO LOG FILE RECORD */
/* -------------------------------------------------- */
arrGuard(logFilePtr.p->currentMbyte, 16);
arrGuard(logFilePtr.p->currentMbyte, clogFileSize);
logFilePtr.p->logMaxGciCompleted[logFilePtr.p->currentMbyte] =
logPartPtr.p->logPartNewestCompletedGCI;
logFilePtr.p->logMaxGciStarted[logFilePtr.p->currentMbyte] = cnewestGci;
@ -13495,10 +13564,7 @@ void Dblqh::writeFileDescriptor(Signal* signal)
/* ------------------------------------------------------------------------- */
void Dblqh::writeFileHeaderOpen(Signal* signal, Uint32 wmoType)
{
LogFileRecordPtr wmoLogFilePtr;
UintR twmoNoLogDescriptors;
UintR twmoLoop;
UintR twmoIndex;
/* -------------------------------------------------- */
/* WRITE HEADER INFORMATION IN THE NEW FILE. */
@ -13506,52 +13572,44 @@ void Dblqh::writeFileHeaderOpen(Signal* signal, Uint32 wmoType)
logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_LOG_TYPE] = ZFD_TYPE;
logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_FILE_NO] =
logFilePtr.p->fileNo;
if (logPartPtr.p->noLogFiles > ZMAX_LOG_FILES_IN_PAGE_ZERO) {
if (logPartPtr.p->noLogFiles > cmaxLogFilesInPageZero) {
jam();
twmoNoLogDescriptors = ZMAX_LOG_FILES_IN_PAGE_ZERO;
twmoNoLogDescriptors = cmaxLogFilesInPageZero;
} else {
jam();
twmoNoLogDescriptors = logPartPtr.p->noLogFiles;
}//if
logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_NO_FD] =
twmoNoLogDescriptors;
wmoLogFilePtr.i = logFilePtr.i;
twmoLoop = 0;
WMO_LOOP:
jam();
if (twmoLoop < twmoNoLogDescriptors) {
jam();
ptrCheckGuard(wmoLogFilePtr, clogFileFileSize, logFileRecord);
for (twmoIndex = 0; twmoIndex <= ZNO_MBYTES_IN_FILE - 1; twmoIndex++) {
{
Uint32 pos = ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE;
LogFileRecordPtr filePtr = logFilePtr;
for (Uint32 fd = 0; fd < twmoNoLogDescriptors; fd++)
{
jam();
arrGuard(((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
(twmoLoop * ZFD_PART_SIZE)) + twmoIndex, ZPAGE_SIZE);
logPagePtr.p->logPageWord[((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
(twmoLoop * ZFD_PART_SIZE)) + twmoIndex] =
wmoLogFilePtr.p->logMaxGciCompleted[twmoIndex];
arrGuard((((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
(twmoLoop * ZFD_PART_SIZE)) + ZNO_MBYTES_IN_FILE) +
twmoIndex, ZPAGE_SIZE);
logPagePtr.p->logPageWord[(((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
(twmoLoop * ZFD_PART_SIZE)) + ZNO_MBYTES_IN_FILE) + twmoIndex] =
wmoLogFilePtr.p->logMaxGciStarted[twmoIndex];
arrGuard((((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
(twmoLoop * ZFD_PART_SIZE)) + (2 * ZNO_MBYTES_IN_FILE)) +
twmoIndex, ZPAGE_SIZE);
logPagePtr.p->logPageWord[(((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
(twmoLoop * ZFD_PART_SIZE)) + (2 * ZNO_MBYTES_IN_FILE)) + twmoIndex] =
wmoLogFilePtr.p->logLastPrepRef[twmoIndex];
}//for
wmoLogFilePtr.i = wmoLogFilePtr.p->prevLogFile;
twmoLoop = twmoLoop + 1;
goto WMO_LOOP;
}//if
logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] =
(ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
(ZFD_PART_SIZE * twmoNoLogDescriptors);
arrGuard(logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX], ZPAGE_SIZE);
logPagePtr.p->logPageWord[logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX]] =
ZNEXT_LOG_RECORD_TYPE;
ptrCheckGuard(filePtr, clogFileFileSize, logFileRecord);
for (Uint32 mb = 0; mb < clogFileSize; mb ++)
{
jam();
Uint32 pos0 = pos + fd * (ZFD_MBYTE_SIZE * clogFileSize) + mb;
Uint32 pos1 = pos0 + clogFileSize;
Uint32 pos2 = pos1 + clogFileSize;
arrGuard(pos0, ZPAGE_SIZE);
arrGuard(pos1, ZPAGE_SIZE);
arrGuard(pos2, ZPAGE_SIZE);
logPagePtr.p->logPageWord[pos0] = filePtr.p->logMaxGciCompleted[mb];
logPagePtr.p->logPageWord[pos1] = filePtr.p->logMaxGciStarted[mb];
logPagePtr.p->logPageWord[pos2] = filePtr.p->logLastPrepRef[mb];
}
filePtr.i = filePtr.p->prevLogFile;
}
pos += (twmoNoLogDescriptors * ZFD_MBYTE_SIZE * clogFileSize);
arrGuard(pos, ZPAGE_SIZE);
logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = pos;
logPagePtr.p->logPageWord[pos] = ZNEXT_LOG_RECORD_TYPE;
}
/* ------------------------------------------------------- */
/* THIS IS A SPECIAL WRITE OF THE FIRST PAGE IN THE */
/* LOG FILE. THIS HAS SPECIAL SIGNIFANCE TO FIND */
@ -13696,9 +13754,9 @@ void Dblqh::openSrLastFileLab(Signal* signal)
void Dblqh::readSrLastFileLab(Signal* signal)
{
logPartPtr.p->logLap = logPagePtr.p->logPageWord[ZPOS_LOG_LAP];
if (logPartPtr.p->noLogFiles > ZMAX_LOG_FILES_IN_PAGE_ZERO) {
if (logPartPtr.p->noLogFiles > cmaxLogFilesInPageZero) {
jam();
initGciInLogFileRec(signal, ZMAX_LOG_FILES_IN_PAGE_ZERO);
initGciInLogFileRec(signal, cmaxLogFilesInPageZero);
} else {
jam();
initGciInLogFileRec(signal, logPartPtr.p->noLogFiles);
@ -13723,7 +13781,7 @@ void Dblqh::readSrLastMbyteLab(Signal* signal)
logPartPtr.p->lastMbyte = logFilePtr.p->currentMbyte - 1;
}//if
}//if
arrGuard(logFilePtr.p->currentMbyte, 16);
arrGuard(logFilePtr.p->currentMbyte, clogFileSize);
logFilePtr.p->logMaxGciCompleted[logFilePtr.p->currentMbyte] =
logPagePtr.p->logPageWord[ZPOS_MAX_GCI_COMPLETED];
logFilePtr.p->logMaxGciStarted[logFilePtr.p->currentMbyte] =
@ -13731,7 +13789,7 @@ void Dblqh::readSrLastMbyteLab(Signal* signal)
logFilePtr.p->logLastPrepRef[logFilePtr.p->currentMbyte] =
logPagePtr.p->logPageWord[ZLAST_LOG_PREP_REF];
releaseLogpage(signal);
if (logFilePtr.p->currentMbyte < (ZNO_MBYTES_IN_FILE - 1)) {
if (logFilePtr.p->currentMbyte < (clogFileSize - 1)) {
jam();
logFilePtr.p->currentMbyte++;
readSinglePage(signal, ZPAGES_IN_MBYTE * logFilePtr.p->currentMbyte);
@ -13745,21 +13803,21 @@ void Dblqh::readSrLastMbyteLab(Signal* signal)
* ---------------------------------------------------------------------- */
if (logPartPtr.p->lastMbyte == ZNIL) {
jam();
logPartPtr.p->lastMbyte = ZNO_MBYTES_IN_FILE - 1;
logPartPtr.p->lastMbyte = clogFileSize - 1;
}//if
}//if
logFilePtr.p->logFileStatus = LogFileRecord::CLOSING_SR;
closeFile(signal, logFilePtr, __LINE__);
if (logPartPtr.p->noLogFiles > ZMAX_LOG_FILES_IN_PAGE_ZERO) {
if (logPartPtr.p->noLogFiles > cmaxLogFilesInPageZero) {
Uint32 fileNo;
if (logFilePtr.p->fileNo >= ZMAX_LOG_FILES_IN_PAGE_ZERO) {
if (logFilePtr.p->fileNo >= cmaxLogFilesInPageZero) {
jam();
fileNo = logFilePtr.p->fileNo - ZMAX_LOG_FILES_IN_PAGE_ZERO;
fileNo = logFilePtr.p->fileNo - cmaxLogFilesInPageZero;
} else {
jam();
fileNo =
(logPartPtr.p->noLogFiles + logFilePtr.p->fileNo) -
ZMAX_LOG_FILES_IN_PAGE_ZERO;
cmaxLogFilesInPageZero;
}//if
if (fileNo == 0) {
jam();
@ -13769,11 +13827,11 @@ void Dblqh::readSrLastMbyteLab(Signal* signal)
* -------------------------------------------------------------------- */
fileNo = 1;
logPartPtr.p->srRemainingFiles =
logPartPtr.p->noLogFiles - (ZMAX_LOG_FILES_IN_PAGE_ZERO - 1);
logPartPtr.p->noLogFiles - (cmaxLogFilesInPageZero - 1);
} else {
jam();
logPartPtr.p->srRemainingFiles =
logPartPtr.p->noLogFiles - ZMAX_LOG_FILES_IN_PAGE_ZERO;
logPartPtr.p->noLogFiles - cmaxLogFilesInPageZero;
}//if
LogFileRecordPtr locLogFilePtr;
findLogfile(signal, fileNo, logPartPtr, &locLogFilePtr);
@ -13798,9 +13856,9 @@ void Dblqh::openSrNextFileLab(Signal* signal)
void Dblqh::readSrNextFileLab(Signal* signal)
{
if (logPartPtr.p->srRemainingFiles > ZMAX_LOG_FILES_IN_PAGE_ZERO) {
if (logPartPtr.p->srRemainingFiles > cmaxLogFilesInPageZero) {
jam();
initGciInLogFileRec(signal, ZMAX_LOG_FILES_IN_PAGE_ZERO);
initGciInLogFileRec(signal, cmaxLogFilesInPageZero);
} else {
jam();
initGciInLogFileRec(signal, logPartPtr.p->srRemainingFiles);
@ -13808,16 +13866,16 @@ void Dblqh::readSrNextFileLab(Signal* signal)
releaseLogpage(signal);
logFilePtr.p->logFileStatus = LogFileRecord::CLOSING_SR;
closeFile(signal, logFilePtr, __LINE__);
if (logPartPtr.p->srRemainingFiles > ZMAX_LOG_FILES_IN_PAGE_ZERO) {
if (logPartPtr.p->srRemainingFiles > cmaxLogFilesInPageZero) {
Uint32 fileNo;
if (logFilePtr.p->fileNo >= ZMAX_LOG_FILES_IN_PAGE_ZERO) {
if (logFilePtr.p->fileNo >= cmaxLogFilesInPageZero) {
jam();
fileNo = logFilePtr.p->fileNo - ZMAX_LOG_FILES_IN_PAGE_ZERO;
fileNo = logFilePtr.p->fileNo - cmaxLogFilesInPageZero;
} else {
jam();
fileNo =
(logPartPtr.p->noLogFiles + logFilePtr.p->fileNo) -
ZMAX_LOG_FILES_IN_PAGE_ZERO;
cmaxLogFilesInPageZero;
}//if
if (fileNo == 0) {
jam();
@ -13826,11 +13884,11 @@ void Dblqh::readSrNextFileLab(Signal* signal)
* -------------------------------------------------------------------- */
fileNo = 1;
logPartPtr.p->srRemainingFiles =
logPartPtr.p->srRemainingFiles - (ZMAX_LOG_FILES_IN_PAGE_ZERO - 1);
logPartPtr.p->srRemainingFiles - (cmaxLogFilesInPageZero - 1);
} else {
jam();
logPartPtr.p->srRemainingFiles =
logPartPtr.p->srRemainingFiles - ZMAX_LOG_FILES_IN_PAGE_ZERO;
logPartPtr.p->srRemainingFiles - cmaxLogFilesInPageZero;
}//if
LogFileRecordPtr locLogFilePtr;
findLogfile(signal, fileNo, logPartPtr, &locLogFilePtr);
@ -14701,7 +14759,7 @@ void Dblqh::srLogLimits(Signal* signal)
* EXECUTED.
* ----------------------------------------------------------------------- */
while(true) {
ndbrequire(tmbyte < 16);
ndbrequire(tmbyte < clogFileSize);
if (logPartPtr.p->logExecState == LogPartRecord::LES_SEARCH_STOP) {
if (logFilePtr.p->logMaxGciCompleted[tmbyte] < logPartPtr.p->logLastGci) {
jam();
@ -14742,7 +14800,7 @@ void Dblqh::srLogLimits(Signal* signal)
if (logPartPtr.p->logExecState != LogPartRecord::LES_EXEC_LOG) {
if (tmbyte == 0) {
jam();
tmbyte = ZNO_MBYTES_IN_FILE - 1;
tmbyte = clogFileSize - 1;
logFilePtr.i = logFilePtr.p->prevLogFile;
ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
} else {
@ -15136,7 +15194,7 @@ void Dblqh::execSr(Signal* signal)
logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_NO_FD];
logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] =
(ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
(noFdDescriptors * ZFD_PART_SIZE);
(noFdDescriptors * ZFD_MBYTE_SIZE * clogFileSize);
}
break;
/* ========================================================================= */
@ -15176,11 +15234,11 @@ void Dblqh::execSr(Signal* signal)
/*---------------------------------------------------------------------------*/
/* START EXECUTION OF A NEW MBYTE IN THE LOG. */
/*---------------------------------------------------------------------------*/
if (logFilePtr.p->currentMbyte < (ZNO_MBYTES_IN_FILE - 1)) {
if (logFilePtr.p->currentMbyte < (clogFileSize - 1)) {
jam();
logPartPtr.p->logExecState = LogPartRecord::LES_EXEC_LOG_NEW_MBYTE;
} else {
ndbrequire(logFilePtr.p->currentMbyte == (ZNO_MBYTES_IN_FILE - 1));
ndbrequire(logFilePtr.p->currentMbyte == (clogFileSize - 1));
jam();
/*---------------------------------------------------------------------------*/
/* WE HAVE TO CHANGE FILE. CLOSE THIS ONE AND THEN OPEN THE NEXT. */
@ -15375,7 +15433,7 @@ void Dblqh::invalidateLogAfterLastGCI(Signal* signal) {
jam();
releaseLfo(signal);
releaseLogpage(signal);
if (logPartPtr.p->invalidatePageNo < (ZNO_MBYTES_IN_FILE * ZPAGES_IN_MBYTE - 1)) {
if (logPartPtr.p->invalidatePageNo < (clogFileSize * ZPAGES_IN_MBYTE - 1)) {
// We continue in this file.
logPartPtr.p->invalidatePageNo++;
} else {
@ -16716,6 +16774,22 @@ void Dblqh::initialiseLogFile(Signal* signal)
ptrAss(logFilePtr, logFileRecord);
logFilePtr.p->nextLogFile = logFilePtr.i + 1;
logFilePtr.p->logFileStatus = LogFileRecord::LFS_IDLE;
logFilePtr.p->logLastPrepRef = new Uint32[clogFileSize];
logFilePtr.p->logMaxGciCompleted = new Uint32[clogFileSize];
logFilePtr.p->logMaxGciStarted = new Uint32[clogFileSize];
if (logFilePtr.p->logLastPrepRef == 0 ||
logFilePtr.p->logMaxGciCompleted == 0 ||
logFilePtr.p->logMaxGciStarted == 0)
{
char buf[256];
BaseString::snprintf(buf, sizeof(buf),
"Failed to alloc mbyte(%u) arrays for logfile %u",
clogFileSize, logFilePtr.i);
progError(__LINE__, NDBD_EXIT_MEMALLOC, buf);
}
}//for
logFilePtr.i = clogFileFileSize - 1;
ptrAss(logFilePtr, logFileRecord);
@ -17044,41 +17118,31 @@ void Dblqh::initFragrec(Signal* signal,
* ========================================================================= */
void Dblqh::initGciInLogFileRec(Signal* signal, Uint32 noFdDescriptors)
{
LogFileRecordPtr iglLogFilePtr;
UintR tiglLoop;
UintR tiglIndex;
tiglLoop = 0;
iglLogFilePtr.i = logFilePtr.i;
iglLogFilePtr.p = logFilePtr.p;
IGL_LOOP:
for (tiglIndex = 0; tiglIndex <= ZNO_MBYTES_IN_FILE - 1; tiglIndex++) {
arrGuard(((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
(tiglLoop * ZFD_PART_SIZE)) + tiglIndex, ZPAGE_SIZE);
iglLogFilePtr.p->logMaxGciCompleted[tiglIndex] =
logPagePtr.p->logPageWord[((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
(tiglLoop * ZFD_PART_SIZE)) + tiglIndex];
arrGuard((((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) + ZNO_MBYTES_IN_FILE) +
(tiglLoop * ZFD_PART_SIZE)) + tiglIndex, ZPAGE_SIZE);
iglLogFilePtr.p->logMaxGciStarted[tiglIndex] =
logPagePtr.p->logPageWord[(((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
ZNO_MBYTES_IN_FILE) +
(tiglLoop * ZFD_PART_SIZE)) + tiglIndex];
arrGuard((((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
(2 * ZNO_MBYTES_IN_FILE)) + (tiglLoop * ZFD_PART_SIZE)) +
tiglIndex, ZPAGE_SIZE);
iglLogFilePtr.p->logLastPrepRef[tiglIndex] =
logPagePtr.p->logPageWord[(((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
(2 * ZNO_MBYTES_IN_FILE)) +
(tiglLoop * ZFD_PART_SIZE)) + tiglIndex];
}//for
tiglLoop = tiglLoop + 1;
if (tiglLoop < noFdDescriptors) {
LogFileRecordPtr filePtr = logFilePtr;
Uint32 pos = ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE;
for (Uint32 fd = 0; fd < noFdDescriptors; fd++)
{
jam();
iglLogFilePtr.i = iglLogFilePtr.p->prevLogFile;
ptrCheckGuard(iglLogFilePtr, clogFileFileSize, logFileRecord);
goto IGL_LOOP;
}//if
for (Uint32 mb = 0; mb < clogFileSize; mb++)
{
jam();
Uint32 pos0 = pos + fd * (ZFD_MBYTE_SIZE * clogFileSize) + mb;
Uint32 pos1 = pos0 + clogFileSize;
Uint32 pos2 = pos1 + clogFileSize;
arrGuard(pos0, ZPAGE_SIZE);
arrGuard(pos1, ZPAGE_SIZE);
arrGuard(pos2, ZPAGE_SIZE);
filePtr.p->logMaxGciCompleted[mb] = logPagePtr.p->logPageWord[pos0];
filePtr.p->logMaxGciStarted[mb] = logPagePtr.p->logPageWord[pos1];
filePtr.p->logLastPrepRef[mb] = logPagePtr.p->logPageWord[pos2];
}
if (fd + 1 < noFdDescriptors)
{
jam();
filePtr.i = filePtr.p->prevLogFile;
ptrCheckGuard(filePtr, clogFileFileSize, logFileRecord);
}
}
}//Dblqh::initGciInLogFileRec()
/* ==========================================================================
@ -18331,7 +18395,7 @@ void Dblqh::writeNextLog(Signal* signal)
ndbrequire(logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] < ZPAGE_SIZE);
logPagePtr.p->logPageWord[logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX]] =
ZNEXT_MBYTE_TYPE;
if (logFilePtr.p->currentMbyte == (ZNO_MBYTES_IN_FILE - 1)) {
if (logFilePtr.p->currentMbyte == (clogFileSize - 1)) {
jam();
/* -------------------------------------------------- */
/* CALCULATE THE NEW REMAINING WORDS WHEN */
@ -18420,7 +18484,7 @@ void Dblqh::writeNextLog(Signal* signal)
systemError(signal, __LINE__);
}//if
}//if
if (logFilePtr.p->currentMbyte == (ZNO_MBYTES_IN_FILE - 1)) {
if (logFilePtr.p->currentMbyte == (clogFileSize - 1)) {
jam();
twnlNextMbyte = 0;
if (logFilePtr.p->fileChangeState != LogFileRecord::NOT_ONGOING) {

View file

@ -16,7 +16,7 @@
EXTRA_PROGRAMS = ndbd_redo_log_reader
ndbd_redo_log_reader_SOURCES = redoLogReader/records.cpp \
redoLogReader/redoLogFileReader.cpp
redoLogReader/reader.cpp
include $(top_srcdir)/storage/ndb/config/common.mk.am
include $(top_srcdir)/storage/ndb/config/type_kernel.mk.am

View file

@ -277,6 +277,14 @@ void Ndbcntr::execSTTOR(Signal* signal)
break;
case ZSTART_PHASE_1:
jam();
{
Uint32 db_watchdog_interval = 0;
const ndb_mgm_configuration_iterator * p =
m_ctx.m_config.getOwnConfigIterator();
ndb_mgm_get_int_parameter(p, CFG_DB_WATCHDOG_INTERVAL, &db_watchdog_interval);
ndbrequire(db_watchdog_interval);
update_watch_dog_timer(db_watchdog_interval);
}
startPhase1Lab(signal);
break;
case ZSTART_PHASE_2:
@ -1410,6 +1418,13 @@ void Ndbcntr::execNODE_FAILREP(Signal* signal)
{
jamEntry();
if (ERROR_INSERTED(1001))
{
sendSignalWithDelay(reference(), GSN_NODE_FAILREP, signal, 100,
signal->getLength());
return;
}
const NodeFailRep * nodeFail = (NodeFailRep *)&signal->theData[0];
NdbNodeBitmask allFailed;
allFailed.assign(NdbNodeBitmask::Size, nodeFail->theNodes);
@ -2734,16 +2749,34 @@ void Ndbcntr::execSTART_ORD(Signal* signal){
c_missra.execSTART_ORD(signal);
}
#define CLEAR_DX 13
#define CLEAR_LCP 3
void
Ndbcntr::clearFilesystem(Signal* signal){
Ndbcntr::clearFilesystem(Signal* signal)
{
const Uint32 lcp = c_fsRemoveCount >= CLEAR_DX;
FsRemoveReq * req = (FsRemoveReq *)signal->getDataPtrSend();
req->userReference = reference();
req->userPointer = 0;
req->directory = 1;
req->ownDirectory = 1;
FsOpenReq::setVersion(req->fileNumber, 3);
FsOpenReq::setSuffix(req->fileNumber, FsOpenReq::S_CTL); // Can by any...
FsOpenReq::v1_setDisk(req->fileNumber, c_fsRemoveCount);
if (lcp == 0)
{
FsOpenReq::setVersion(req->fileNumber, 3);
FsOpenReq::setSuffix(req->fileNumber, FsOpenReq::S_CTL); // Can by any...
FsOpenReq::v1_setDisk(req->fileNumber, c_fsRemoveCount);
}
else
{
FsOpenReq::setVersion(req->fileNumber, 5);
FsOpenReq::setSuffix(req->fileNumber, FsOpenReq::S_DATA);
FsOpenReq::v5_setLcpNo(req->fileNumber, c_fsRemoveCount - CLEAR_DX);
FsOpenReq::v5_setTableId(req->fileNumber, 0);
FsOpenReq::v5_setFragmentId(req->fileNumber, 0);
}
sendSignal(NDBFS_REF, GSN_FSREMOVEREQ, signal,
FsRemoveReq::SignalLength, JBA);
c_fsRemoveCount++;
@ -2752,12 +2785,12 @@ Ndbcntr::clearFilesystem(Signal* signal){
void
Ndbcntr::execFSREMOVECONF(Signal* signal){
jamEntry();
if(c_fsRemoveCount == 13){
if(c_fsRemoveCount == CLEAR_DX + CLEAR_LCP){
jam();
sendSttorry(signal);
} else {
jam();
ndbrequire(c_fsRemoveCount < 13);
ndbrequire(c_fsRemoveCount < CLEAR_DX + CLEAR_LCP);
clearFilesystem(signal);
}//if
}

View file

@ -163,7 +163,12 @@ AsyncFile::run()
theStartFlag = true;
// Create write buffer for bigger writes
theWriteBufferSize = WRITEBUFFERSIZE;
theWriteBuffer = (char *) ndbd_malloc(theWriteBufferSize);
theWriteBufferUnaligned = (char *) ndbd_malloc(theWriteBufferSize +
NDB_O_DIRECT_WRITE_ALIGNMENT-1);
theWriteBuffer = (char *)
(((UintPtr)theWriteBufferUnaligned + NDB_O_DIRECT_WRITE_ALIGNMENT - 1) &
~(UintPtr)(NDB_O_DIRECT_WRITE_ALIGNMENT - 1));
NdbMutex_Unlock(theStartMutexPtr);
NdbCondition_Signal(theStartConditionPtr);
@ -247,6 +252,78 @@ AsyncFile::run()
static char g_odirect_readbuf[2*GLOBAL_PAGE_SIZE -1];
#endif
int
AsyncFile::check_odirect_write(Uint32 flags, int& new_flags, int mode)
{
assert(new_flags & (O_CREAT | O_TRUNC));
#ifdef O_DIRECT
int ret;
char * bufptr = (char*)((UintPtr(g_odirect_readbuf)+(GLOBAL_PAGE_SIZE - 1)) & ~(GLOBAL_PAGE_SIZE - 1));
while (((ret = ::write(theFd, bufptr, GLOBAL_PAGE_SIZE)) == -1) &&
(errno == EINTR));
if (ret == -1)
{
new_flags &= ~O_DIRECT;
ndbout_c("%s Failed to write using O_DIRECT, disabling",
theFileName.c_str());
}
close(theFd);
theFd = ::open(theFileName.c_str(), new_flags, mode);
if (theFd == -1)
return errno;
#endif
return 0;
}
int
AsyncFile::check_odirect_read(Uint32 flags, int &new_flags, int mode)
{
#ifdef O_DIRECT
int ret;
char * bufptr = (char*)((UintPtr(g_odirect_readbuf)+(GLOBAL_PAGE_SIZE - 1)) & ~(GLOBAL_PAGE_SIZE - 1));
while (((ret = ::read(theFd, bufptr, GLOBAL_PAGE_SIZE)) == -1) &&
(errno == EINTR));
if (ret == -1)
{
ndbout_c("%s Failed to read using O_DIRECT, disabling",
theFileName.c_str());
goto reopen;
}
if(lseek(theFd, 0, SEEK_SET) != 0)
{
return errno;
}
if ((flags & FsOpenReq::OM_CHECK_SIZE) == 0)
{
struct stat buf;
if ((fstat(theFd, &buf) == -1))
{
return errno;
}
else if ((buf.st_size % GLOBAL_PAGE_SIZE) != 0)
{
ndbout_c("%s filesize not a multiple of %d, disabling O_DIRECT",
theFileName.c_str(), GLOBAL_PAGE_SIZE);
goto reopen;
}
}
return 0;
reopen:
close(theFd);
new_flags &= ~O_DIRECT;
theFd = ::open(theFileName.c_str(), new_flags, mode);
if (theFd == -1)
return errno;
#endif
return 0;
}
void AsyncFile::openReq(Request* request)
{
m_auto_sync_freq = 0;
@ -312,7 +389,7 @@ void AsyncFile::openReq(Request* request)
}
#else
Uint32 flags = request->par.open.flags;
Uint32 new_flags = 0;
int new_flags = 0;
// Convert file open flags from Solaris to Liux
if (flags & FsOpenReq::OM_CREATE)
@ -343,10 +420,6 @@ void AsyncFile::openReq(Request* request)
{
new_flags |= O_DIRECT;
}
#elif defined O_SYNC
{
flags |= FsOpenReq::OM_SYNC;
}
#endif
if ((flags & FsOpenReq::OM_SYNC) && ! (flags & FsOpenReq::OM_INIT))
@ -355,15 +428,19 @@ void AsyncFile::openReq(Request* request)
new_flags |= O_SYNC;
#endif
}
const char * rw = "";
switch(flags & 0x3){
case FsOpenReq::OM_READONLY:
rw = "r";
new_flags |= O_RDONLY;
break;
case FsOpenReq::OM_WRITEONLY:
rw = "w";
new_flags |= O_WRONLY;
break;
case FsOpenReq::OM_READWRITE:
rw = "rw";
new_flags |= O_RDWR;
break;
default:
@ -404,11 +481,6 @@ no_odirect:
if (new_flags & O_DIRECT)
{
new_flags &= ~O_DIRECT;
flags |= FsOpenReq::OM_SYNC;
#ifdef O_SYNC
if (! (flags & FsOpenReq::OM_INIT))
new_flags |= O_SYNC;
#endif
goto no_odirect;
}
#endif
@ -421,11 +493,6 @@ no_odirect:
else if (new_flags & O_DIRECT)
{
new_flags &= ~O_DIRECT;
flags |= FsOpenReq::OM_SYNC;
#ifdef O_SYNC
if (! (flags & FsOpenReq::OM_INIT))
new_flags |= O_SYNC;
#endif
goto no_odirect;
}
#endif
@ -512,7 +579,6 @@ no_odirect:
{
ndbout_c("error on first write(%d), disable O_DIRECT", err);
new_flags &= ~O_DIRECT;
flags |= FsOpenReq::OM_SYNC;
close(theFd);
theFd = ::open(theFileName.c_str(), new_flags, mode);
if (theFd != -1)
@ -532,26 +598,32 @@ no_odirect:
else if (flags & FsOpenReq::OM_DIRECT)
{
#ifdef O_DIRECT
do {
int ret;
char * bufptr = (char*)((UintPtr(g_odirect_readbuf)+(GLOBAL_PAGE_SIZE - 1)) & ~(GLOBAL_PAGE_SIZE - 1));
while (((ret = ::read(theFd, bufptr, GLOBAL_PAGE_SIZE)) == -1) && (errno == EINTR));
if (ret == -1)
{
ndbout_c("%s Failed to read using O_DIRECT, disabling", theFileName.c_str());
flags |= FsOpenReq::OM_SYNC;
flags |= FsOpenReq::OM_INIT;
break;
}
if(lseek(theFd, 0, SEEK_SET) != 0)
{
request->error = errno;
return;
}
} while (0);
if (flags & (FsOpenReq::OM_TRUNCATE | FsOpenReq::OM_CREATE))
{
request->error = check_odirect_write(flags, new_flags, mode);
}
else
{
request->error = check_odirect_read(flags, new_flags, mode);
}
if (request->error)
return;
#endif
}
#ifdef VM_TRACE
if (flags & FsOpenReq::OM_DIRECT)
{
#ifdef O_DIRECT
ndbout_c("%s %s O_DIRECT: %d",
theFileName.c_str(), rw,
!!(new_flags & O_DIRECT));
#else
ndbout_c("%s %s O_DIRECT: 0",
theFileName.c_str(), rw);
#endif
}
#endif
if ((flags & FsOpenReq::OM_SYNC) && (flags & FsOpenReq::OM_INIT))
{
#ifdef O_SYNC
@ -562,6 +634,10 @@ no_odirect:
new_flags &= ~(O_CREAT | O_TRUNC);
new_flags |= O_SYNC;
theFd = ::open(theFileName.c_str(), new_flags, mode);
if (theFd == -1)
{
request->error = errno;
}
#endif
}
#endif
@ -1079,7 +1155,8 @@ AsyncFile::rmrfReq(Request * request, char * path, bool removePath){
void AsyncFile::endReq()
{
// Thread is ended with return
if (theWriteBuffer) ndbd_free(theWriteBuffer, theWriteBufferSize);
if (theWriteBufferUnaligned)
ndbd_free(theWriteBufferUnaligned, theWriteBufferSize);
}

View file

@ -234,9 +234,13 @@ private:
bool theStartFlag;
int theWriteBufferSize;
char* theWriteBuffer;
void* theWriteBufferUnaligned;
size_t m_write_wo_sync; // Writes wo/ sync
size_t m_auto_sync_freq; // Auto sync freq in bytes
int check_odirect_read(Uint32 flags, int&new_flags, int mode);
int check_odirect_write(Uint32 flags, int&new_flags, int mode);
public:
SimulatedBlock& m_fs;
Ptr<GlobalPage> m_page_ptr;

View file

@ -652,7 +652,7 @@ AsyncFile*
Ndbfs::createAsyncFile(){
// Check limit of open files
if (m_maxFiles !=0 && theFiles.size()+1 == m_maxFiles) {
if (m_maxFiles !=0 && theFiles.size() == m_maxFiles) {
// Print info about all open files
for (unsigned i = 0; i < theFiles.size(); i++){
AsyncFile* file = theFiles[i];

View file

@ -123,8 +123,8 @@ Pgman::execREAD_CONFIG_REQ(Signal* signal)
if (page_buffer > 0)
{
page_buffer /= GLOBAL_PAGE_SIZE; // in pages
m_page_entry_pool.setSize(100*page_buffer);
m_param.m_max_pages = page_buffer;
m_page_entry_pool.setSize(m_param.m_lirs_stack_mult * page_buffer);
m_param.m_max_hot_pages = (page_buffer * 9) / 10;
}
@ -141,6 +141,7 @@ Pgman::execREAD_CONFIG_REQ(Signal* signal)
Pgman::Param::Param() :
m_max_pages(64), // smallish for testing
m_lirs_stack_mult(10),
m_max_hot_pages(56),
m_max_loop_count(256),
m_max_io_waits(64),
@ -301,6 +302,9 @@ Pgman::get_sublist_no(Page_state state)
{
return Page_entry::SL_LOCKED;
}
if (state == Page_entry::ONSTACK) {
return Page_entry::SL_IDLE;
}
return Page_entry::SL_OTHER;
}
@ -415,15 +419,55 @@ Pgman::get_page_entry(Ptr<Page_entry>& ptr, Uint32 file_no, Uint32 page_no)
{
if (find_page_entry(ptr, file_no, page_no))
{
jam();
ndbrequire(ptr.p->m_state != 0);
m_stats.m_page_hits++;
#ifdef VM_TRACE
debugOut << "PGMAN: get_page_entry: found" << endl;
debugOut << "PGMAN: " << ptr << endl;
#endif
return true;
}
if (m_page_entry_pool.getNoOfFree() == 0)
{
jam();
Page_sublist& pl_idle = *m_page_sublist[Page_entry::SL_IDLE];
Ptr<Page_entry> idle_ptr;
if (pl_idle.first(idle_ptr))
{
jam();
#ifdef VM_TRACE
debugOut << "PGMAN: get_page_entry: re-use idle entry" << endl;
debugOut << "PGMAN: " << idle_ptr << endl;
#endif
Page_state state = idle_ptr.p->m_state;
ndbrequire(state == Page_entry::ONSTACK);
Page_stack& pl_stack = m_page_stack;
ndbrequire(pl_stack.hasPrev(idle_ptr));
pl_stack.remove(idle_ptr);
state &= ~ Page_entry::ONSTACK;
set_page_state(idle_ptr, state);
ndbrequire(idle_ptr.p->m_state == 0);
release_page_entry(idle_ptr);
}
}
if (seize_page_entry(ptr, file_no, page_no))
{
jam();
ndbrequire(ptr.p->m_state == 0);
m_stats.m_page_faults++;
#ifdef VM_TRACE
debugOut << "PGMAN: get_page_entry: seize" << endl;
debugOut << "PGMAN: " << ptr << endl;
#endif
return true;
}
@ -624,6 +668,7 @@ Pgman::lirs_reference(Ptr<Page_entry> ptr)
jam();
move_cleanup_ptr(ptr);
pl_queue.remove(ptr);
state &= ~ Page_entry::ONQUEUE;
}
if (state & Page_entry::BOUND)
{
@ -654,6 +699,12 @@ Pgman::lirs_reference(Ptr<Page_entry> ptr)
pl_stack.add(ptr);
state |= Page_entry::ONSTACK;
state |= Page_entry::HOT;
// it could be on queue already
if (state & Page_entry::ONQUEUE) {
jam();
pl_queue.remove(ptr);
state &= ~Page_entry::ONQUEUE;
}
}
set_page_state(ptr, state);
@ -902,9 +953,11 @@ Pgman::process_map(Signal* signal)
#ifdef VM_TRACE
debugOut << "PGMAN: >process_map" << endl;
#endif
int max_count = m_param.m_max_io_waits - m_stats.m_current_io_waits;
if (max_count > 0)
int max_count = 0;
if (m_param.m_max_io_waits > m_stats.m_current_io_waits) {
max_count = m_param.m_max_io_waits - m_stats.m_current_io_waits;
max_count = max_count / 2 + 1;
}
Page_sublist& pl_map = *m_page_sublist[Page_entry::SL_MAP];
while (! pl_map.isEmpty() && --max_count >= 0)
@ -1056,15 +1109,10 @@ Pgman::process_cleanup(Signal* signal)
}
int max_loop_count = m_param.m_max_loop_count;
int max_count = m_param.m_max_io_waits - m_stats.m_current_io_waits;
if (max_count > 0)
{
int max_count = 0;
if (m_param.m_max_io_waits > m_stats.m_current_io_waits) {
max_count = m_param.m_max_io_waits - m_stats.m_current_io_waits;
max_count = max_count / 2 + 1;
/*
* Possibly add code here to avoid writing too rapidly. May be
* unnecessary since only cold pages are cleaned.
*/
}
Ptr<Page_entry> ptr = m_cleanup_ptr;
@ -1166,9 +1214,12 @@ bool
Pgman::process_lcp(Signal* signal)
{
Page_hashlist& pl_hash = m_page_hashlist;
int max_count = m_param.m_max_io_waits - m_stats.m_current_io_waits;
if (max_count > 0)
int max_count = 0;
if (m_param.m_max_io_waits > m_stats.m_current_io_waits) {
max_count = m_param.m_max_io_waits - m_stats.m_current_io_waits;
max_count = max_count / 2 + 1;
}
#ifdef VM_TRACE
debugOut
@ -1927,6 +1978,8 @@ Pgman::verify_page_entry(Ptr<Page_entry> ptr)
break;
case Page_entry::SL_LOCKED:
break;
case Page_entry::SL_IDLE:
break;
case Page_entry::SL_OTHER:
break;
default:
@ -1973,8 +2026,11 @@ Pgman::verify_page_lists()
ndbrequire(stack_count == pl_stack.count() || dump_page_lists());
ndbrequire(queue_count == pl_queue.count() || dump_page_lists());
Uint32 hot_count = 0;
Uint32 hot_bound_count = 0;
Uint32 cold_bound_count = 0;
Uint32 stack_request_count = 0;
Uint32 queue_request_count = 0;
Uint32 i1 = RNIL;
for (pl_stack.first(ptr); ptr.i != RNIL; pl_stack.next(ptr))
@ -1985,9 +2041,13 @@ Pgman::verify_page_lists()
ndbrequire(state & Page_entry::ONSTACK || dump_page_lists());
if (! pl_stack.hasPrev(ptr))
ndbrequire(state & Page_entry::HOT || dump_page_lists());
if (state & Page_entry::HOT &&
state & Page_entry::BOUND)
hot_bound_count++;
if (state & Page_entry::HOT) {
hot_count++;
if (state & Page_entry::BOUND)
hot_bound_count++;
}
if (state & Page_entry::REQUEST)
stack_request_count++;
}
Uint32 i2 = RNIL;
@ -1999,6 +2059,8 @@ Pgman::verify_page_lists()
ndbrequire(state & Page_entry::ONQUEUE || dump_page_lists());
ndbrequire(state & Page_entry::BOUND || dump_page_lists());
cold_bound_count++;
if (state & Page_entry::REQUEST)
queue_request_count++;
}
Uint32 tot_bound_count =
@ -2031,7 +2093,11 @@ Pgman::verify_page_lists()
<< " cache:" << m_stats.m_num_pages
<< "(" << locked_bound_count << "L)"
<< " stack:" << pl_stack.count()
<< " hot:" << hot_count
<< " hot_bound:" << hot_bound_count
<< " stack_request:" << stack_request_count
<< " queue:" << pl_queue.count()
<< " queue_request:" << queue_request_count
<< " queuewait:" << queuewait_count << endl;
debugOut << "PGMAN:";
@ -2139,6 +2205,8 @@ Pgman::get_sublist_name(Uint32 list_no)
return "busy";
case Page_entry::SL_LOCKED:
return "locked";
case Page_entry::SL_IDLE:
return "idle";
case Page_entry::SL_OTHER:
return "other";
}

View file

@ -325,8 +325,9 @@ private:
,SL_CALLBACK_IO = 4
,SL_BUSY = 5
,SL_LOCKED = 6
,SL_OTHER = 7
,SUBLIST_COUNT = 8
,SL_IDLE = 7
,SL_OTHER = 8
,SUBLIST_COUNT = 9
};
Uint16 m_file_no; // disk page address set at seize
@ -401,6 +402,7 @@ private:
struct Param {
Param();
Uint32 m_max_pages; // max number of cache pages
Uint32 m_lirs_stack_mult; // in m_max_pages (around 3-10)
Uint32 m_max_hot_pages; // max hot cache pages (up to 99%)
Uint32 m_max_loop_count; // limit purely local loops
Uint32 m_max_io_waits;

View file

@ -557,6 +557,9 @@ Restore::restore_next(Signal* signal, FilePtr file_ptr)
case BackupFormat::GCP_ENTRY:
parse_gcp_entry(signal, file_ptr, data, len);
break;
case BackupFormat::EMPTY_ENTRY:
// skip
break;
case 0x4e444242: // 'NDBB'
if (check_file_version(signal, ntohl(* (data+2))) == 0)
{

View file

@ -443,6 +443,11 @@ Configuration::setupConfiguration(){
"TimeBetweenWatchDogCheck missing");
}
if(iter.get(CFG_DB_WATCHDOG_INTERVAL_INITIAL, &_timeBetweenWatchDogCheckInitial)){
ERROR_SET(fatal, NDBD_EXIT_INVALID_CONFIG, "Invalid configuration fetched",
"TimeBetweenWatchDogCheckInitial missing");
}
/**
* Get paths
*/
@ -462,9 +467,12 @@ Configuration::setupConfiguration(){
* Create the watch dog thread
*/
{
Uint32 t = _timeBetweenWatchDogCheck;
if (_timeBetweenWatchDogCheckInitial < _timeBetweenWatchDogCheck)
_timeBetweenWatchDogCheckInitial = _timeBetweenWatchDogCheck;
Uint32 t = _timeBetweenWatchDogCheckInitial;
t = globalEmulatorData.theWatchDog ->setCheckInterval(t);
_timeBetweenWatchDogCheck = t;
_timeBetweenWatchDogCheckInitial = t;
}
ConfigValues* cf = ConfigValuesFactory::extractCurrentSection(iter.m_config);

View file

@ -84,6 +84,7 @@ private:
Uint32 _maxErrorLogs;
Uint32 _lockPagesInMainMemory;
Uint32 _timeBetweenWatchDogCheck;
Uint32 _timeBetweenWatchDogCheckInitial;
ndb_mgm_configuration * m_ownConfig;
ndb_mgm_configuration * m_clusterConfig;

View file

@ -19,6 +19,7 @@
#include <NdbOut.hpp>
#include <GlobalData.hpp>
#include <Emulator.hpp>
#include <WatchDog.hpp>
#include <ErrorHandlingMacros.hpp>
#include <TimeQueue.hpp>
#include <TransporterRegistry.hpp>
@ -38,6 +39,9 @@
#include <AttributeDescriptor.hpp>
#include <NdbSqlUtil.hpp>
#include <EventLogger.hpp>
extern EventLogger g_eventLogger;
#define ljamEntry() jamEntryLine(30000 + __LINE__)
#define ljam() jamLine(30000 + __LINE__)
@ -655,14 +659,20 @@ SimulatedBlock::getBatSize(Uint16 blockNo){
return sb->theBATSize;
}
void* SimulatedBlock::allocRecord(const char * type, size_t s, size_t n, bool clear, Uint32 paramId)
{
return allocRecordAligned(type, s, n, 0, 0, clear, paramId);
}
void*
SimulatedBlock::allocRecord(const char * type, size_t s, size_t n, bool clear, Uint32 paramId)
SimulatedBlock::allocRecordAligned(const char * type, size_t s, size_t n, void **unaligned_buffer, Uint32 align, bool clear, Uint32 paramId)
{
void * p = NULL;
size_t size = n*s;
Uint64 real_size = (Uint64)((Uint64)n)*((Uint64)s);
refresh_watch_dog();
Uint32 over_alloc = unaligned_buffer ? (align - 1) : 0;
size_t size = n*s + over_alloc;
Uint64 real_size = (Uint64)((Uint64)n)*((Uint64)s) + over_alloc;
refresh_watch_dog(9);
if (real_size > 0){
#ifdef VM_TRACE_MEM
ndbout_c("%s::allocRecord(%s, %u, %u) = %llu bytes",
@ -696,14 +706,24 @@ SimulatedBlock::allocRecord(const char * type, size_t s, size_t n, bool clear, U
char * ptr = (char*)p;
const Uint32 chunk = 128 * 1024;
while(size > chunk){
refresh_watch_dog();
refresh_watch_dog(9);
memset(ptr, 0, chunk);
ptr += chunk;
size -= chunk;
}
refresh_watch_dog();
refresh_watch_dog(9);
memset(ptr, 0, size);
}
if (unaligned_buffer)
{
*unaligned_buffer = p;
p = (void *)(((UintPtr)p + over_alloc) & ~(UintPtr)(over_alloc));
#ifdef VM_TRACE
g_eventLogger.info("'%s' (%u) %llu %llu, alignment correction %u bytes",
type, align, (Uint64)p, (Uint64)p+n*s,
(Uint32)((UintPtr)p - (UintPtr)*unaligned_buffer));
#endif
}
}
return p;
}
@ -720,9 +740,16 @@ SimulatedBlock::deallocRecord(void ** ptr,
}
void
SimulatedBlock::refresh_watch_dog()
SimulatedBlock::refresh_watch_dog(Uint32 place)
{
globalData.incrementWatchDogCounter(1);
globalData.incrementWatchDogCounter(place);
}
void
SimulatedBlock::update_watch_dog_timer(Uint32 interval)
{
extern EmulatorData globalEmulatorData;
globalEmulatorData.theWatchDog->setCheckInterval(interval);
}
void

View file

@ -334,7 +334,8 @@ protected:
* Refresh Watch Dog in initialising code
*
*/
void refresh_watch_dog();
void refresh_watch_dog(Uint32 place = 1);
void update_watch_dog_timer(Uint32 interval);
/**
* Prog error
@ -377,6 +378,7 @@ protected:
*
*/
void* allocRecord(const char * type, size_t s, size_t n, bool clear = true, Uint32 paramId = 0);
void* allocRecordAligned(const char * type, size_t s, size_t n, void **unaligned_buffer, Uint32 align = NDB_O_DIRECT_WRITE_ALIGNMENT, bool clear = true, Uint32 paramId = 0);
/**
* Deallocate record

View file

@ -16,6 +16,7 @@
#include <ndb_global.h>
#include <my_pthread.h>
#include <sys/times.h>
#include "WatchDog.hpp"
#include "GlobalData.hpp"
@ -24,6 +25,8 @@
#include <ErrorHandlingMacros.hpp>
#include <EventLogger.hpp>
#include <NdbTick.h>
extern EventLogger g_eventLogger;
extern "C"
@ -71,66 +74,115 @@ WatchDog::doStop(){
}
}
const char *get_action(Uint32 IPValue)
{
const char *action;
switch (IPValue) {
case 1:
action = "Job Handling";
break;
case 2:
action = "Scanning Timers";
break;
case 3:
action = "External I/O";
break;
case 4:
action = "Print Job Buffers at crash";
break;
case 5:
action = "Checking connections";
break;
case 6:
action = "Performing Send";
break;
case 7:
action = "Polling for Receive";
break;
case 8:
action = "Performing Receive";
break;
case 9:
action = "Allocating memory";
break;
default:
action = "Unknown place";
break;
}//switch
return action;
}
void
WatchDog::run(){
unsigned int anIPValue;
unsigned int alerts = 0;
WatchDog::run()
{
unsigned int anIPValue, sleep_time;
unsigned int oldIPValue = 0;
unsigned int theIntervalCheck = theInterval;
struct MicroSecondTimer start_time, last_time, now;
NdbTick_getMicroTimer(&start_time);
last_time = start_time;
// WatchDog for the single threaded NDB
while(!theStop){
Uint32 tmp = theInterval / 500;
tmp= (tmp ? tmp : 1);
while(!theStop && tmp > 0){
NdbSleep_MilliSleep(500);
tmp--;
}
while (!theStop)
{
sleep_time= 100;
NdbSleep_MilliSleep(sleep_time);
if(theStop)
break;
NdbTick_getMicroTimer(&now);
if (NdbTick_getMicrosPassed(last_time, now)/1000 > sleep_time*2)
{
struct tms my_tms;
times(&my_tms);
g_eventLogger.info("Watchdog: User time: %llu System time: %llu",
(Uint64)my_tms.tms_utime,
(Uint64)my_tms.tms_stime);
g_eventLogger.warning("Watchdog: Warning overslept %u ms, expected %u ms.",
NdbTick_getMicrosPassed(last_time, now)/1000,
sleep_time);
}
last_time = now;
// Verify that the IP thread is not stuck in a loop
anIPValue = *theIPValue;
if(anIPValue != 0) {
if (anIPValue != 0)
{
oldIPValue = anIPValue;
globalData.incrementWatchDogCounter(0);
alerts = 0;
} else {
const char *last_stuck_action;
alerts++;
switch (oldIPValue) {
case 1:
last_stuck_action = "Job Handling";
break;
case 2:
last_stuck_action = "Scanning Timers";
break;
case 3:
last_stuck_action = "External I/O";
break;
case 4:
last_stuck_action = "Print Job Buffers at crash";
break;
case 5:
last_stuck_action = "Checking connections";
break;
case 6:
last_stuck_action = "Performing Send";
break;
case 7:
last_stuck_action = "Polling for Receive";
break;
case 8:
last_stuck_action = "Performing Receive";
break;
default:
last_stuck_action = "Unknown place";
break;
}//switch
g_eventLogger.warning("Ndb kernel is stuck in: %s", last_stuck_action);
if(alerts == 3){
shutdownSystem(last_stuck_action);
NdbTick_getMicroTimer(&start_time);
theIntervalCheck = theInterval;
}
else
{
int warn = 1;
Uint32 elapsed = NdbTick_getMicrosPassed(start_time, now)/1000;
/*
oldIPValue == 9 indicates malloc going on, this can take some time
so only warn if we pass the watchdog interval
*/
if (oldIPValue == 9)
if (elapsed < theIntervalCheck)
warn = 0;
else
theIntervalCheck += theInterval;
if (warn)
{
const char *last_stuck_action = get_action(oldIPValue);
g_eventLogger.warning("Ndb kernel is stuck in: %s", last_stuck_action);
{
struct tms my_tms;
times(&my_tms);
g_eventLogger.info("Watchdog: User time: %llu System time: %llu",
(Uint64)my_tms.tms_utime,
(Uint64)my_tms.tms_stime);
}
if (elapsed > 3 * theInterval)
{
shutdownSystem(last_stuck_action);
}
}
}
}

View file

@ -579,6 +579,18 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
"70",
STR_VALUE(MAX_INT_RNIL) },
{
CFG_DB_WATCHDOG_INTERVAL_INITIAL,
"TimeBetweenWatchDogCheckInitial",
DB_TOKEN,
"Time between execution checks inside a database node in the early start phases when memory is allocated",
ConfigInfo::CI_USED,
true,
ConfigInfo::CI_INT,
"6000",
"70",
STR_VALUE(MAX_INT_RNIL) },
{
CFG_DB_STOP_ON_ERROR,
"StopOnError",
@ -879,6 +891,18 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
"3",
STR_VALUE(MAX_INT_RNIL) },
{
CFG_DB_REDOLOG_FILE_SIZE,
"FragmentLogFileSize",
DB_TOKEN,
"Size of each Redo log file",
ConfigInfo::CI_USED,
false,
ConfigInfo::CI_INT,
"16M",
"4M",
"1G" },
{
CFG_DB_MAX_OPEN_FILES,
"MaxNoOfOpenFiles",
@ -1309,6 +1333,18 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
"0",
STR_VALUE(MAX_INT_RNIL) },
{
CFG_DB_O_DIRECT,
"ODirect",
DB_TOKEN,
"Use O_DIRECT file write/read when possible",
ConfigInfo::CI_USED,
true,
ConfigInfo::CI_BOOL,
"false",
"false",
"true"},
/***************************************************************************
* API
***************************************************************************/

View file

@ -179,7 +179,7 @@ ErrorBundle ErrorCodes[] = {
{ 873, DMEC, TR, "Out of attrinfo records for scan in tuple manager" },
{ 899, DMEC, TR, "Rowid already allocated" },
{ 1217, DMEC, TR, "Out of operation records in local data manager (increase MaxNoOfLocalOperations)" },
{ 1220, DMEC, TR, "REDO log files overloaded, consult online manual (decrease TimeBetweenLocalCheckpoints, and|or increase NoOfFragmentLogFiles)" },
{ 1220, DMEC, TR, "REDO log files overloaded, consult online manual (increase FragmentLogFileSize)" },
{ 1222, DMEC, TR, "Out of transaction markers in LQH" },
{ 4021, DMEC, TR, "Out of Send Buffer space in NDB API" },
{ 4022, DMEC, TR, "Out of Send Buffer space in NDB API" },

View file

@ -1629,6 +1629,85 @@ runBug28023(NDBT_Context* ctx, NDBT_Step* step)
return NDBT_FAILED;
}
}
return NDBT_OK;
}
int
runBug28717(NDBT_Context* ctx, NDBT_Step* step)
{
int result = NDBT_OK;
int loops = ctx->getNumLoops();
int records = ctx->getNumRecords();
Ndb* pNdb = GETNDB(step);
NdbRestarter res;
if (res.getNumDbNodes() < 4)
{
return NDBT_OK;
}
int master = res.getMasterNodeId();
int node0 = res.getRandomNodeOtherNodeGroup(master, rand());
int node1 = res.getRandomNodeSameNodeGroup(node0, rand());
ndbout_c("master: %d node0: %d node1: %d", master, node0, node1);
if (res.restartOneDbNode(node0, false, true, true))
{
return NDBT_FAILED;
}
{
int filter[] = { 15, NDB_MGM_EVENT_CATEGORY_CHECKPOINT, 0 };
NdbLogEventHandle handle =
ndb_mgm_create_logevent_handle(res.handle, filter);
int dump[] = { DumpStateOrd::DihStartLcpImmediately };
struct ndb_logevent event;
for (Uint32 i = 0; i<3; i++)
{
res.dumpStateOneNode(master, dump, 1);
while(ndb_logevent_get_next(handle, &event, 0) >= 0 &&
event.type != NDB_LE_LocalCheckpointStarted);
while(ndb_logevent_get_next(handle, &event, 0) >= 0 &&
event.type != NDB_LE_LocalCheckpointCompleted);
}
}
if (res.waitNodesNoStart(&node0, 1))
return NDBT_FAILED;
int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 };
if (res.dumpStateOneNode(node0, val2, 2))
return NDBT_FAILED;
if (res.insertErrorInNode(node0, 5010))
return NDBT_FAILED;
if (res.insertErrorInNode(node1, 1001))
return NDBT_FAILED;
if (res.startNodes(&node0, 1))
return NDBT_FAILED;
NdbSleep_SecSleep(3);
if (res.insertErrorInNode(node1, 0))
return NDBT_FAILED;
if (res.waitNodesNoStart(&node0, 1))
return NDBT_FAILED;
if (res.startNodes(&node0, 1))
return NDBT_FAILED;
if (res.waitClusterStarted())
return NDBT_FAILED;
return NDBT_OK;
}
@ -1993,6 +2072,9 @@ TESTCASE("Bug27466", ""){
TESTCASE("Bug28023", ""){
INITIALIZER(runBug28023);
}
TESTCASE("Bug28717", ""){
INITIALIZER(runBug28717);
}
NDBT_TESTSUITE_END(testNodeRestart);
int main(int argc, const char** argv){

View file

@ -21,3 +21,6 @@ BackupMemory = 64M
MaxNoOfConcurrentScans = 100
MaxNoOfSavedMessages= 1000
SendBufferMemory = 2M
NoOfFragmentLogFiles = 4
FragmentLogFileSize = 64M

View file

@ -567,6 +567,10 @@ max-time: 1500
cmd: testDict
args: -n CreateAndDrop
max-time: 1000
cmd: testNodeRestart
args: -n Bug28717 T1
max-time: 1500
cmd: testDict
args: -n CreateAndDropAtRandom -l 200 T1
@ -706,7 +710,7 @@ args: -n ExecuteAsynch T1
max-time: 1000
cmd: testNdbApi
args: -n BugBug28443
args: -n Bug28443
#max-time: 500
#cmd: testInterpreter

View file

@ -873,13 +873,32 @@ bool RestoreDataIterator::readFragmentHeader(int & ret, Uint32 *fragmentId)
debug << "RestoreDataIterator::getNextFragment" << endl;
if (buffer_read(&Header, sizeof(Header), 1) != 1){
while (1)
{
/* read first part of header */
if (buffer_read(&Header, 8, 1) != 1)
{
ret = 0;
return false;
} // if
/* skip if EMPTY_ENTRY */
Header.SectionType = ntohl(Header.SectionType);
Header.SectionLength = ntohl(Header.SectionLength);
if (Header.SectionType == BackupFormat::EMPTY_ENTRY)
{
void *tmp;
buffer_get_ptr(&tmp, Header.SectionLength*4-8, 1);
continue;
}
break;
}
/* read rest of header */
if (buffer_read(((char*)&Header)+8, sizeof(Header)-8, 1) != 1)
{
ret = 0;
return false;
} // if
Header.SectionType = ntohl(Header.SectionType);
Header.SectionLength = ntohl(Header.SectionLength);
}
Header.TableId = ntohl(Header.TableId);
Header.FragmentNo = ntohl(Header.FragmentNo);
Header.ChecksumType = ntohl(Header.ChecksumType);

View file

@ -25,3 +25,6 @@ test:
test-verbose:
HARNESS_VERBOSE=1 perl unit.pl run $(unittests)
# Don't update the files from bitkeeper
%::SCCS/s.%

View file

@ -22,3 +22,5 @@ LDADD = -lmytap
noinst_PROGRAMS = simple-t skip-t todo-t skip_all-t no_plan-t core-t
# Don't update the files from bitkeeper
%::SCCS/s.%

View file

@ -23,3 +23,5 @@ LDADD = $(top_builddir)/unittest/mytap/libmytap.a \
noinst_PROGRAMS = bitmap-t base64-t my_atomic-t
# Don't update the files from bitkeeper
%::SCCS/s.%

View file

@ -21,3 +21,6 @@ noinst_HEADERS = tap.h
libmytap_a_SOURCES = tap.c
SUBDIRS = . t
# Don't update the files from bitkeeper
%::SCCS/s.%

View file

@ -21,3 +21,5 @@ LDADD = -lmytap
noinst_PROGRAMS = basic-t
# Don't update the files from bitkeeper
%::SCCS/s.%