Merge whalegate.ndb.mysql.com:/home/tomas/mysql-5.1-new-ndb

into  whalegate.ndb.mysql.com:/home/tomas/mysql-5.1-engines


extra/perror.c:
  Auto merged
mysql-test/t/ndb_basic.test:
  Auto merged
mysql-test/t/ndb_insert.test:
  Auto merged
sql/ha_ndbcluster.cc:
  Auto merged
sql/handler.cc:
  Auto merged
sql/handler.h:
  Auto merged
sql/mysqld.cc:
  Auto merged
storage/ndb/src/common/transporter/Packer.cpp:
  Auto merged
storage/ndb/src/common/transporter/TCP_Transporter.hpp:
  Auto merged
storage/ndb/src/common/transporter/TransporterRegistry.cpp:
  Auto merged
storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp:
  Auto merged
storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp:
  Auto merged
storage/ndb/src/mgmclient/main.cpp:
  Auto merged
storage/ndb/src/ndbapi/NdbBlob.cpp:
  Auto merged
storage/ndb/test/ndbapi/testNdbApi.cpp:
  Auto merged
storage/ndb/test/run-test/daily-basic-tests.txt:
  Auto merged
storage/ndb/tools/restore/consumer_restore.cpp:
  Auto merged
mysql-test/t/disabled.def:
  manual merge
This commit is contained in:
unknown 2007-06-17 17:21:27 +02:00
commit 8f7696d266
49 changed files with 1540 additions and 376 deletions

View file

@ -25,6 +25,7 @@
#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
#include "../storage/ndb/src/ndbapi/ndberror.c"
#include "../storage/ndb/src/kernel/error/ndbd_exit_codes.c"
#include "../storage/ndb/include/mgmapi/mgmapi_error.h"
#endif
static my_bool verbose, print_all_codes;
@ -32,6 +33,20 @@ static my_bool verbose, print_all_codes;
#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
static my_bool ndb_code;
static char ndb_string[1024];
int mgmapi_error_string(int err_no, char *str, int size)
{
int i;
for (i= 0; i < ndb_mgm_noOfErrorMsgs; i++)
{
if ((int)ndb_mgm_error_msgs[i].code == err_no)
{
my_snprintf(str, size-1, "%s", ndb_mgm_error_msgs[i].msg);
str[size-1]= '\0';
return 0;
}
}
return -1;
}
#endif
static struct my_option my_long_options[] =
@ -238,8 +253,9 @@ int main(int argc,char *argv[])
#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
if (ndb_code)
{
if ((ndb_error_string(code, ndb_string, sizeof(ndb_string)) < 0) &&
(ndbd_exit_string(code, ndb_string, sizeof(ndb_string)) < 0))
if ((ndb_error_string(code, ndb_string, sizeof(ndb_string)) < 0) &&
(ndbd_exit_string(code, ndb_string, sizeof(ndb_string)) < 0) &&
(mgmapi_error_string(code, ndb_string, sizeof(ndb_string)) < 0))
{
msg= 0;
}

View file

@ -0,0 +1,64 @@
use test;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10;
Connected to Management Server at: :
Waiting for completed, this may take several minutes
Node : Backup started from node
Node : Backup started from node completed
StartGCP: StopGCP:
#Records: #LogRecords:
Data: bytes Log: bytes
create table t1
(pk int key
,a1 BIT(1), a2 BIT(5), a3 BIT(33), a4 BIT(63), a5 BIT(64)
,b1 TINYINT, b2 TINYINT UNSIGNED
,c1 SMALLINT, c2 SMALLINT UNSIGNED
,d1 INT, d2 INT UNSIGNED
,e1 BIGINT, e2 BIGINT UNSIGNED
,f1 CHAR(1) BINARY, f2 CHAR(32) BINARY, f3 CHAR(255) BINARY
,g1 VARCHAR(32) BINARY, g2 VARCHAR(255) BINARY, g3 VARCHAR(1000) BINARY
,h1 BINARY(1), h2 BINARY(8), h3 BINARY(255)
,i1 VARBINARY(32), i2 VARBINARY(255), i3 VARBINARY(1000)
) engine ndb;
insert into t1 values
(1
,0x1, 0x17, 0x789a, 0x789abcde, 0xfedc0001
,127, 255
,32767, 65535
,2147483647, 4294967295
,9223372036854775807, 18446744073709551615
,'1','12345678901234567890123456789012','123456789'
,'1','12345678901234567890123456789012','123456789'
,0x12,0x123456789abcdef0, 0x012345
,0x12,0x123456789abcdef0, 0x00123450
);
insert into t1 values
(2
,0, 0, 0, 0, 0
,-128, 0
,-32768, 0
,-2147483648, 0
,-9223372036854775808, 0
,'','',''
,'','',''
,0x0,0x0,0x0
,0x0,0x0,0x0
);
insert into t1 values
(3
,NULL,NULL,NULL,NULL,NULL
,NULL,NULL
,NULL,NULL
,NULL,NULL
,NULL,NULL
,NULL,NULL,NULL
,NULL,NULL,NULL
,NULL,NULL,NULL
,NULL,NULL,NULL
);
Connected to Management Server at: :
Waiting for completed, this may take several minutes
Node : Backup started from node
Node : Backup started from node completed
StartGCP: StopGCP:
#Records: #LogRecords:
Data: bytes Log: bytes

View file

@ -6,6 +6,34 @@ attr1 INT NOT NULL,
attr2 INT,
attr3 VARCHAR(10)
) ENGINE=ndbcluster;
drop table t1;
SHOW GLOBAL STATUS LIKE 'ndb%';
Variable_name Value
Ndb_cluster_node_id #
Ndb_config_from_host #
Ndb_config_from_port #
Ndb_number_of_data_nodes #
SHOW GLOBAL VARIABLES LIKE 'ndb%';
Variable_name Value
ndb_autoincrement_prefetch_sz #
ndb_cache_check_time #
ndb_connectstring #
ndb_extra_logging #
ndb_force_send #
ndb_index_stat_cache_entries #
ndb_index_stat_enable #
ndb_index_stat_update_freq #
ndb_report_thresh_binlog_epoch_slip #
ndb_report_thresh_binlog_mem_usage #
ndb_use_copying_alter_table #
ndb_use_exact_count #
ndb_use_transactions #
CREATE TABLE t1 (
pk1 INT NOT NULL PRIMARY KEY,
attr1 INT NOT NULL,
attr2 INT,
attr3 VARCHAR(10)
) ENGINE=ndbcluster;
SHOW INDEX FROM t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment
t1 0 PRIMARY 1 pk1 A 0 NULL NULL BTREE

View file

@ -657,3 +657,172 @@ a b
2 NULL
3 NULL
drop table t1;
CREATE TABLE t1 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=NDBCLUSTER;
CREATE TABLE t2 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=MYISAM;
SET @@session.auto_increment_increment=10;
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
SELECT * FROM t1 ORDER BY pk;
pk b c
1 1 0
11 2 1
21 3 2
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
COUNT(t1.pk)
3
TRUNCATE t1;
TRUNCATE t2;
SET @@session.auto_increment_offset=5;
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t1 (pk,b,c) VALUES (27,4,3),(NULL,5,4),(99,6,5),(NULL,7,6);
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t2 (pk,b,c) VALUES (27,4,3),(NULL,5,4),(99,6,5),(NULL,7,6);
SELECT * FROM t1 ORDER BY pk;
pk b c
5 1 0
15 2 1
25 3 2
27 4 3
35 5 4
99 6 5
105 7 6
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
COUNT(t1.pk)
7
TRUNCATE t1;
TRUNCATE t2;
SET @@session.auto_increment_increment=2;
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
SELECT * FROM t1 ORDER BY pk;
pk b c
1 1 0
3 2 1
5 3 2
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
COUNT(t1.pk)
3
DROP TABLE t1, t2;
CREATE TABLE t1 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=NDBCLUSTER AUTO_INCREMENT = 7;
CREATE TABLE t2 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=MYISAM AUTO_INCREMENT = 7;
SET @@session.auto_increment_offset=1;
SET @@session.auto_increment_increment=1;
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
SELECT * FROM t1 ORDER BY pk;
pk b c
7 1 0
8 2 1
9 3 2
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
COUNT(t1.pk)
3
DROP TABLE t1, t2;
CREATE TABLE t1 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=NDBCLUSTER AUTO_INCREMENT = 3;
CREATE TABLE t2 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=MYISAM AUTO_INCREMENT = 3;
SET @@session.auto_increment_offset=5;
SET @@session.auto_increment_increment=10;
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
SELECT * FROM t1 ORDER BY pk;
pk b c
5 1 0
15 2 1
25 3 2
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
COUNT(t1.pk)
3
DROP TABLE t1, t2;
CREATE TABLE t1 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=NDBCLUSTER AUTO_INCREMENT = 7;
CREATE TABLE t2 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=MYISAM AUTO_INCREMENT = 7;
SET @@session.auto_increment_offset=5;
SET @@session.auto_increment_increment=10;
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
SELECT * FROM t1 ORDER BY pk;
pk b c
15 1 0
25 2 1
35 3 2
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
COUNT(t1.pk)
3
DROP TABLE t1, t2;
CREATE TABLE t1 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=NDBCLUSTER AUTO_INCREMENT = 5;
CREATE TABLE t2 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=MYISAM AUTO_INCREMENT = 5;
SET @@session.auto_increment_offset=5;
SET @@session.auto_increment_increment=10;
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
SELECT * FROM t1 ORDER BY pk;
pk b c
5 1 0
15 2 1
25 3 2
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
COUNT(t1.pk)
3
DROP TABLE t1, t2;
CREATE TABLE t1 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=NDBCLUSTER AUTO_INCREMENT = 100;
CREATE TABLE t2 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=MYISAM AUTO_INCREMENT = 100;
SET @@session.auto_increment_offset=5;
SET @@session.auto_increment_increment=10;
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
SELECT * FROM t1 ORDER BY pk;
pk b c
105 1 0
115 2 1
125 3 2
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
COUNT(t1.pk)
3
DROP TABLE t1, t2;

View file

@ -18,7 +18,7 @@ CREATE TABLE `t2_c` (
PRIMARY KEY (`capgotod`),
KEY `i quadaddsvr` (`gotod`)
) ENGINE=ndbcluster DEFAULT CHARSET=latin1;
INSERT INTO `t2_c` VALUES (500,4,'','q3.net','addavp:MK_CASELECTOR=1','postorod rattoaa'),(2,1,'4','','addavp:MK_BRANDTAD=345','REDS Brandtad'),(3,2,'4','q3.net','execorder','fixedRatediPO REDS'),(1,1,'3','','addavp:MK_BRANDTAD=123','TEST Brandtad'),(6,5,'','told.q3.net','addavp:MK_BRANDTAD=123','Brandtad Toldzone'),(4,3,'3','q3.net','addavp:MK_POOLHINT=2','ratedi PO TEST');
INSERT INTO `t2_c` VALUES (500,4,'','q3.net','addavp:MK_CASELECTOR=1','postorod rattoaa'),(2,1,'4','','addavp:MK_BRANDTAD=345','REDS Brandtad'),(3,2,'4','q3.net','execorder','fixedRatediPO REDS'),(1,1,'3','','addavp:MK_BRANDTAD=123','TEST Brandtad'),(6,5,'','told.q3.net','addavp:MK_BRANDTAD=123','Brandtad Toldzone'),(4,3,'3','q3.net','addavp:MK_POOLHINT=2','ratedi PO TEST'),(5,0,'',NULL,NULL,'');
CREATE TABLE `t3_c` (
`CapGoaledatta` smallint(5) unsigned NOT NULL default '0',
`capgotod` smallint(5) unsigned NOT NULL default '0',
@ -154,15 +154,15 @@ count(*)
5
select count(*) from t2;
count(*)
6
7
select count(*) from t2_c;
count(*)
6
7
select count(*)
from (select * from t2 union
select * from t2_c) a;
count(*)
6
7
select count(*) from t3;
count(*)
4
@ -286,15 +286,15 @@ count(*)
5
select count(*) from t2;
count(*)
6
7
select count(*) from t2_c;
count(*)
6
7
select count(*)
from (select * from t2 union
select * from t2_c) a;
count(*)
6
7
select count(*) from t3;
count(*)
4
@ -386,15 +386,15 @@ count(*)
5
select count(*) from t2;
count(*)
6
7
select count(*) from t2_c;
count(*)
6
7
select count(*)
from (select * from t2 union
select * from t2_c) a;
count(*)
6
7
select count(*) from t3;
count(*)
4

View file

@ -47,3 +47,4 @@ ndb_partition_error2 : HF is not sure if the test can work as internded on all
im_options_set : Bug#20294: Instance manager tests fail randomly
im_options_unset : Bug#20294: Instance manager tests fail randomly
mysql_upgrade : Bug#28560 test links to /usr/local/mysql/lib libraries, causes non-determinism and failures on ABI breakage

View file

@ -0,0 +1,66 @@
-- source include/have_ndb.inc
-- source include/ndb_default_cluster.inc
-- source include/not_embedded.inc
--disable_warnings
use test;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10;
--enable_warnings
#NO.1 test output of backup
--exec $NDB_MGM --no-defaults -e "start backup" |sed -e 's/[0-9]//g' |sed -e 's/localhost//g' |sed -e 's/\.\.\.*//g'
create table t1
(pk int key
,a1 BIT(1), a2 BIT(5), a3 BIT(33), a4 BIT(63), a5 BIT(64)
,b1 TINYINT, b2 TINYINT UNSIGNED
,c1 SMALLINT, c2 SMALLINT UNSIGNED
,d1 INT, d2 INT UNSIGNED
,e1 BIGINT, e2 BIGINT UNSIGNED
,f1 CHAR(1) BINARY, f2 CHAR(32) BINARY, f3 CHAR(255) BINARY
,g1 VARCHAR(32) BINARY, g2 VARCHAR(255) BINARY, g3 VARCHAR(1000) BINARY
,h1 BINARY(1), h2 BINARY(8), h3 BINARY(255)
,i1 VARBINARY(32), i2 VARBINARY(255), i3 VARBINARY(1000)
) engine ndb;
insert into t1 values
(1
,0x1, 0x17, 0x789a, 0x789abcde, 0xfedc0001
,127, 255
,32767, 65535
,2147483647, 4294967295
,9223372036854775807, 18446744073709551615
,'1','12345678901234567890123456789012','123456789'
,'1','12345678901234567890123456789012','123456789'
,0x12,0x123456789abcdef0, 0x012345
,0x12,0x123456789abcdef0, 0x00123450
);
insert into t1 values
(2
,0, 0, 0, 0, 0
,-128, 0
,-32768, 0
,-2147483648, 0
,-9223372036854775808, 0
,'','',''
,'','',''
,0x0,0x0,0x0
,0x0,0x0,0x0
);
insert into t1 values
(3
,NULL,NULL,NULL,NULL,NULL
,NULL,NULL
,NULL,NULL
,NULL,NULL
,NULL,NULL
,NULL,NULL,NULL
,NULL,NULL,NULL
,NULL,NULL,NULL
,NULL,NULL,NULL
);
#NO.2 test output of backup after some simple SQL operations
--exec $NDB_MGM --no-defaults -e "start backup" |sed -e 's/[0-9]//g' |sed -e 's/localhost//g' |sed -e 's/\.\.\.*//g'

View file

@ -6,22 +6,30 @@ DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
drop database if exists mysqltest;
--enable_warnings
## workaround for bug#16445
## remove to reproduce bug and run tests from ndb start
## and with ndb_autodiscover disabled. Fails on Linux 50 % of the times
#CREATE TABLE t1 (
# pk1 INT NOT NULL PRIMARY KEY,
# attr1 INT NOT NULL,
# attr2 INT,
# attr3 VARCHAR(10)
#) ENGINE=ndbcluster;
#drop table t1;
# workaround for bug#16445
# remove to reproduce bug and run tests from ndb start
# and with ndb_autodiscover disabled. Fails on Linux 50 % of the times
CREATE TABLE t1 (
pk1 INT NOT NULL PRIMARY KEY,
attr1 INT NOT NULL,
attr2 INT,
attr3 VARCHAR(10)
) ENGINE=ndbcluster;
drop table t1;
#
# Basic test to show that the NDB
# table handler is working
#
#
# Show status and variables
#
--replace_column 2 #
SHOW GLOBAL STATUS LIKE 'ndb%';
--replace_column 2 #
SHOW GLOBAL VARIABLES LIKE 'ndb%';
#
# Create a normal table with primary key
#

View file

@ -639,4 +639,141 @@ insert ignore into t1 values (1,0), (2,0), (2,null), (3,null);
select * from t1 order by a;
drop table t1;
# Bug#26342 auto_increment_increment AND auto_increment_offset REALLY REALLY anger NDB cluster
CREATE TABLE t1 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=NDBCLUSTER;
CREATE TABLE t2 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=MYISAM;
SET @@session.auto_increment_increment=10;
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
SELECT * FROM t1 ORDER BY pk;
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
TRUNCATE t1;
TRUNCATE t2;
SET @@session.auto_increment_offset=5;
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t1 (pk,b,c) VALUES (27,4,3),(NULL,5,4),(99,6,5),(NULL,7,6);
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t2 (pk,b,c) VALUES (27,4,3),(NULL,5,4),(99,6,5),(NULL,7,6);
SELECT * FROM t1 ORDER BY pk;
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
TRUNCATE t1;
TRUNCATE t2;
SET @@session.auto_increment_increment=2;
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
SELECT * FROM t1 ORDER BY pk;
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
DROP TABLE t1, t2;
CREATE TABLE t1 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=NDBCLUSTER AUTO_INCREMENT = 7;
CREATE TABLE t2 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=MYISAM AUTO_INCREMENT = 7;
SET @@session.auto_increment_offset=1;
SET @@session.auto_increment_increment=1;
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
SELECT * FROM t1 ORDER BY pk;
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
DROP TABLE t1, t2;
CREATE TABLE t1 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=NDBCLUSTER AUTO_INCREMENT = 3;
CREATE TABLE t2 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=MYISAM AUTO_INCREMENT = 3;
SET @@session.auto_increment_offset=5;
SET @@session.auto_increment_increment=10;
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
SELECT * FROM t1 ORDER BY pk;
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
DROP TABLE t1, t2;
CREATE TABLE t1 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=NDBCLUSTER AUTO_INCREMENT = 7;
CREATE TABLE t2 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=MYISAM AUTO_INCREMENT = 7;
SET @@session.auto_increment_offset=5;
SET @@session.auto_increment_increment=10;
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
SELECT * FROM t1 ORDER BY pk;
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
DROP TABLE t1, t2;
CREATE TABLE t1 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=NDBCLUSTER AUTO_INCREMENT = 5;
CREATE TABLE t2 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=MYISAM AUTO_INCREMENT = 5;
SET @@session.auto_increment_offset=5;
SET @@session.auto_increment_increment=10;
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
SELECT * FROM t1 ORDER BY pk;
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
DROP TABLE t1, t2;
CREATE TABLE t1 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=NDBCLUSTER AUTO_INCREMENT = 100;
CREATE TABLE t2 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=MYISAM AUTO_INCREMENT = 100;
SET @@session.auto_increment_offset=5;
SET @@session.auto_increment_increment=10;
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
SELECT * FROM t1 ORDER BY pk;
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
DROP TABLE t1, t2;
# End of 4.1 tests

View file

@ -33,7 +33,7 @@ CREATE TABLE `t2_c` (
PRIMARY KEY (`capgotod`),
KEY `i quadaddsvr` (`gotod`)
) ENGINE=ndbcluster DEFAULT CHARSET=latin1;
INSERT INTO `t2_c` VALUES (500,4,'','q3.net','addavp:MK_CASELECTOR=1','postorod rattoaa'),(2,1,'4','','addavp:MK_BRANDTAD=345','REDS Brandtad'),(3,2,'4','q3.net','execorder','fixedRatediPO REDS'),(1,1,'3','','addavp:MK_BRANDTAD=123','TEST Brandtad'),(6,5,'','told.q3.net','addavp:MK_BRANDTAD=123','Brandtad Toldzone'),(4,3,'3','q3.net','addavp:MK_POOLHINT=2','ratedi PO TEST');
INSERT INTO `t2_c` VALUES (500,4,'','q3.net','addavp:MK_CASELECTOR=1','postorod rattoaa'),(2,1,'4','','addavp:MK_BRANDTAD=345','REDS Brandtad'),(3,2,'4','q3.net','execorder','fixedRatediPO REDS'),(1,1,'3','','addavp:MK_BRANDTAD=123','TEST Brandtad'),(6,5,'','told.q3.net','addavp:MK_BRANDTAD=123','Brandtad Toldzone'),(4,3,'3','q3.net','addavp:MK_POOLHINT=2','ratedi PO TEST'),(5,0,'',NULL,NULL,'');
# Added ROW_FORMAT=FIXED to use below to see that setting is preserved
# by restore

View file

@ -6036,7 +6036,7 @@ void ha_ndbcluster::get_auto_increment(ulonglong offset, ulonglong increment,
ulonglong nb_desired_values,
ulonglong *first_value,
ulonglong *nb_reserved_values)
{
{
int cache_size;
Uint64 auto_value;
DBUG_ENTER("get_auto_increment");
@ -6060,7 +6060,7 @@ void ha_ndbcluster::get_auto_increment(ulonglong offset, ulonglong increment,
Ndb_tuple_id_range_guard g(m_share);
if (m_skip_auto_increment &&
ndb->readAutoIncrementValue(m_table, g.range, auto_value) ||
ndb->getAutoIncrementValue(m_table, g.range, auto_value, cache_size))
ndb->getAutoIncrementValue(m_table, g.range, auto_value, cache_size, increment, offset))
{
if (--retries &&
ndb->getNdbError().status == NdbError::TemporaryError);

View file

@ -1158,13 +1158,14 @@ void clean_up(bool print_message)
if (cleanup_done++)
return; /* purecov: inspected */
logger.cleanup_base();
/*
make sure that handlers finish up
what they have that is dependent on the binlog
*/
ha_binlog_end(current_thd);
logger.cleanup_base();
injector::free_instance();
mysql_bin_log.cleanup();

163
storage/ndb/MAINTAINERS Normal file
View file

@ -0,0 +1,163 @@
MySQL Cluster MAINTAINERS
-------------------------
This is a list of knowledgable people in parts of the NDB code.
In changing that area of code, you probably want to talk to the
people who know a lot about it to look over the patch.
When sending patches and queries, always CC the mailing list.
If no list specified, assume internals@lists.mysql.com
P: Person
M: Mail
L: Mailing list
W: Web page with status/info
C: Comment
SRC: Source directory (relative to this directory)
T: SCM tree type and location
S: Status, one of:
Supported: Somebody is paid to maintain this.
Maintained: Not their primary job, but maintained.
Orphan: No current obvious maintainer.
Obsolete: Replaced by something else.
-------------------------------------------------------------
Binlog Injector
SRC: ha_ndbcluster_binlog.cc
C: see also row based replication
P: Stewart Smith
M: stewart@mysql.com
C: Original author
P: Tomas Ulin
M: tomas@mysql.com
C: Lots of updates
P: Martin Skold
M: martin@mysql.com
C: Metadata ops
S: Supported
BLOBs
SRC: ha_ndbcluster.cc
SRC: src/ndbapi/NdbBlob*
P: Pekka
M: pekka@mysql.com
S: Supported
cpcd/cpcc
SRC: src/cw/cpcd
SRC: src/cw/cpcc
C: Maintained only as part of autotest
P: Jonas Orland
M: jonas@mysql.com
S: Maintained
cpcc-win32
SRC: src/cw/cpcc-win32
S: Obsolete
Handler
SRC: ha_ndbcluster.cc
P: Martin Skold
M: martin@mysql.com
S: Supported
Management Server
SRC: src/mgmsrv/
P: Stewart Smith
M: stewart@mysql.com
S: Supported
Management Client
SRC: src/mgmclient/
P: Stewart Smith
M: stewart@mysql.com
S: Supported
Management API
SRC: src/mgmapi/
P: Stewart Smith
M: stewart@mysql.com
S: Supported
NDB API Examples
SRC: ndbapi-examples/
P: Tomas Ulin
M: tomas@mysql.com
C: Originally by Lars
P: Lars Thalmann
M: lars@mysql.com
S: Maintained
NDB API NdbRecord Examples
SRC: ndbapi-examples/
P: Kristian Nielsen
M: knielsen@mysql.com
S: Maintained
tsman
C: Disk Data (Table Space MANager)
SRC: src/kernel/blocks/tsman.cpp
SRC: src/kernel/blocks/tsman.hpp
P: Jonas Oreland
M: jonas@mysql.com
S: Supported
lgman
C: Disk Data (LoG MANager)
SRC: src/kernel/blocks/lgman.cpp
SRC: src/kernel/blocks/lgman.hpp
P: Jonas Oreland
M: jonas@mysql.com
S: Supported
pgman
C: Disk Data (PaGe MANager)
SRC: src/kernel/blocks/lgman.cpp
SRC: src/kernel/blocks/lgman.hpp
P: Jonas Oreland
M: jonas@mysql.com
S: Supported
SUMA
C: SUbscription MAnager
C: Used for replication
SRC: src/kernel/blocks/suma/
P: Tomas Ulin
P: tomas@mysql.com
P: Jonas Oreland
P: jonas@mysql.com
S: Supported
TRIX
C: TRiggers and IndeXs (but only online Index build)
SRC: src/kernel/blocks/trix
P: Martin Skold
P: mskold@mysql.com
S: Supported
QMGR
C: Cluster (with a Q) ManaGeR
C: Heartbeats etc
SRC: src/kernel/blocks/qmgr
S: Supported
NDBFS
C: NDB FileSystem
C: File System abstraction
SRC: src/kernel/blocks/ndbfs
S: Supported
TRIX
C: TRiggers and IndeXs (but only online Index build)
SRC: src/kernel/blocks/trix
S: Supported
TRIX
C: TRiggers and IndeXs (but only online Index build)
SRC: src/kernel/blocks/trix
S: Supported

View file

@ -45,6 +45,7 @@ ndbapi/ndberror.h
mgmapiinclude_HEADERS = \
mgmapi/mgmapi.h \
mgmapi/mgmapi_error.h \
mgmapi/mgmapi_debug.h \
mgmapi/mgmapi_config_parameters.h \
mgmapi/mgmapi_config_parameters_debug.h \

View file

@ -173,5 +173,5 @@ private:
STATIC_CONST(MAX_TEXT_LENGTH = 256);
};
extern void getRestartAction(Uint32 action, BaseString &str);
#endif

View file

@ -18,6 +18,7 @@
#include "mgmapi_config_parameters.h"
#include "ndb_logevent.h"
#include "mgmapi_error.h"
#define MGM_LOGLEVELS CFG_MAX_LOGLEVEL - CFG_MIN_LOGLEVEL + 1
#define NDB_MGM_MAX_LOGLEVEL 15
@ -211,105 +212,6 @@ extern "C" {
#endif
};
/**
* Error codes
*/
enum ndb_mgm_error {
/** Not an error */
NDB_MGM_NO_ERROR = 0,
/* Request for service errors */
/** Supplied connectstring is illegal */
NDB_MGM_ILLEGAL_CONNECT_STRING = 1001,
/** Supplied NdbMgmHandle is illegal */
NDB_MGM_ILLEGAL_SERVER_HANDLE = 1005,
/** Illegal reply from server */
NDB_MGM_ILLEGAL_SERVER_REPLY = 1006,
/** Illegal number of nodes */
NDB_MGM_ILLEGAL_NUMBER_OF_NODES = 1007,
/** Illegal node status */
NDB_MGM_ILLEGAL_NODE_STATUS = 1008,
/** Memory allocation error */
NDB_MGM_OUT_OF_MEMORY = 1009,
/** Management server not connected */
NDB_MGM_SERVER_NOT_CONNECTED = 1010,
/** Could not connect to socker */
NDB_MGM_COULD_NOT_CONNECT_TO_SOCKET = 1011,
/** Could not bind local address */
NDB_MGM_BIND_ADDRESS = 1012,
/* Alloc node id failures */
/** Generic error, retry may succeed */
NDB_MGM_ALLOCID_ERROR = 1101,
/** Non retriable error */
NDB_MGM_ALLOCID_CONFIG_MISMATCH = 1102,
/* Service errors - Start/Stop Node or System */
/** Start failed */
NDB_MGM_START_FAILED = 2001,
/** Stop failed */
NDB_MGM_STOP_FAILED = 2002,
/** Restart failed */
NDB_MGM_RESTART_FAILED = 2003,
/* Service errors - Backup */
/** Unable to start backup */
NDB_MGM_COULD_NOT_START_BACKUP = 3001,
/** Unable to abort backup */
NDB_MGM_COULD_NOT_ABORT_BACKUP = 3002,
/* Service errors - Single User Mode */
/** Unable to enter single user mode */
NDB_MGM_COULD_NOT_ENTER_SINGLE_USER_MODE = 4001,
/** Unable to exit single user mode */
NDB_MGM_COULD_NOT_EXIT_SINGLE_USER_MODE = 4002,
/* Usage errors */
/** Usage error */
NDB_MGM_USAGE_ERROR = 5001
};
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
struct Ndb_Mgm_Error_Msg {
enum ndb_mgm_error code;
const char * msg;
};
const struct Ndb_Mgm_Error_Msg ndb_mgm_error_msgs[] = {
{ NDB_MGM_NO_ERROR, "No error" },
/* Request for service errors */
{ NDB_MGM_ILLEGAL_CONNECT_STRING, "Illegal connect string" },
{ NDB_MGM_ILLEGAL_SERVER_HANDLE, "Illegal server handle" },
{ NDB_MGM_ILLEGAL_SERVER_REPLY, "Illegal reply from server" },
{ NDB_MGM_ILLEGAL_NUMBER_OF_NODES, "Illegal number of nodes" },
{ NDB_MGM_ILLEGAL_NODE_STATUS, "Illegal node status" },
{ NDB_MGM_OUT_OF_MEMORY, "Out of memory" },
{ NDB_MGM_SERVER_NOT_CONNECTED, "Management server not connected" },
{ NDB_MGM_COULD_NOT_CONNECT_TO_SOCKET, "Could not connect to socket" },
/* Service errors - Start/Stop Node or System */
{ NDB_MGM_START_FAILED, "Start failed" },
{ NDB_MGM_STOP_FAILED, "Stop failed" },
{ NDB_MGM_RESTART_FAILED, "Restart failed" },
/* Service errors - Backup */
{ NDB_MGM_COULD_NOT_START_BACKUP, "Could not start backup" },
{ NDB_MGM_COULD_NOT_ABORT_BACKUP, "Could not abort backup" },
/* Service errors - Single User Mode */
{ NDB_MGM_COULD_NOT_ENTER_SINGLE_USER_MODE,
"Could not enter single user mode" },
{ NDB_MGM_COULD_NOT_EXIT_SINGLE_USER_MODE,
"Could not exit single user mode" },
/* Usage errors */
{ NDB_MGM_USAGE_ERROR,
"Usage error" }
};
const int ndb_mgm_noOfErrorMsgs =
sizeof(ndb_mgm_error_msgs)/sizeof(struct Ndb_Mgm_Error_Msg);
#endif
/**
* Status of a node in the cluster.
*

View file

@ -118,6 +118,8 @@
#define CFG_DB_O_DIRECT 168
#define CFG_DB_MAX_ALLOCATE 169
#define CFG_DB_SGA 198 /* super pool mem */
#define CFG_DB_DATA_MEM_2 199 /* used in special build in 5.1 */

View file

@ -0,0 +1,121 @@
/* Copyright (C) 2003 MySQL AB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#ifndef MGMAPI_ERROR_H
#define MGMAPI_ERROR_H
#ifdef __cplusplus
extern "C" {
#endif
/**
* Error codes
*/
enum ndb_mgm_error {
/** Not an error */
NDB_MGM_NO_ERROR = 0,
/* Request for service errors */
/** Supplied connectstring is illegal */
NDB_MGM_ILLEGAL_CONNECT_STRING = 1001,
/** Supplied NdbMgmHandle is illegal */
NDB_MGM_ILLEGAL_SERVER_HANDLE = 1005,
/** Illegal reply from server */
NDB_MGM_ILLEGAL_SERVER_REPLY = 1006,
/** Illegal number of nodes */
NDB_MGM_ILLEGAL_NUMBER_OF_NODES = 1007,
/** Illegal node status */
NDB_MGM_ILLEGAL_NODE_STATUS = 1008,
/** Memory allocation error */
NDB_MGM_OUT_OF_MEMORY = 1009,
/** Management server not connected */
NDB_MGM_SERVER_NOT_CONNECTED = 1010,
/** Could not connect to socker */
NDB_MGM_COULD_NOT_CONNECT_TO_SOCKET = 1011,
/** Could not bind local address */
NDB_MGM_BIND_ADDRESS = 1012,
/* Alloc node id failures */
/** Generic error, retry may succeed */
NDB_MGM_ALLOCID_ERROR = 1101,
/** Non retriable error */
NDB_MGM_ALLOCID_CONFIG_MISMATCH = 1102,
/* Service errors - Start/Stop Node or System */
/** Start failed */
NDB_MGM_START_FAILED = 2001,
/** Stop failed */
NDB_MGM_STOP_FAILED = 2002,
/** Restart failed */
NDB_MGM_RESTART_FAILED = 2003,
/* Service errors - Backup */
/** Unable to start backup */
NDB_MGM_COULD_NOT_START_BACKUP = 3001,
/** Unable to abort backup */
NDB_MGM_COULD_NOT_ABORT_BACKUP = 3002,
/* Service errors - Single User Mode */
/** Unable to enter single user mode */
NDB_MGM_COULD_NOT_ENTER_SINGLE_USER_MODE = 4001,
/** Unable to exit single user mode */
NDB_MGM_COULD_NOT_EXIT_SINGLE_USER_MODE = 4002,
/* Usage errors */
/** Usage error */
NDB_MGM_USAGE_ERROR = 5001
};
struct Ndb_Mgm_Error_Msg {
enum ndb_mgm_error code;
const char * msg;
};
const struct Ndb_Mgm_Error_Msg ndb_mgm_error_msgs[] = {
{ NDB_MGM_NO_ERROR, "No error" },
/* Request for service errors */
{ NDB_MGM_ILLEGAL_CONNECT_STRING, "Illegal connect string" },
{ NDB_MGM_ILLEGAL_SERVER_HANDLE, "Illegal server handle" },
{ NDB_MGM_ILLEGAL_SERVER_REPLY, "Illegal reply from server" },
{ NDB_MGM_ILLEGAL_NUMBER_OF_NODES, "Illegal number of nodes" },
{ NDB_MGM_ILLEGAL_NODE_STATUS, "Illegal node status" },
{ NDB_MGM_OUT_OF_MEMORY, "Out of memory" },
{ NDB_MGM_SERVER_NOT_CONNECTED, "Management server not connected" },
{ NDB_MGM_COULD_NOT_CONNECT_TO_SOCKET, "Could not connect to socket" },
/* Service errors - Start/Stop Node or System */
{ NDB_MGM_START_FAILED, "Start failed" },
{ NDB_MGM_STOP_FAILED, "Stop failed" },
{ NDB_MGM_RESTART_FAILED, "Restart failed" },
/* Service errors - Backup */
{ NDB_MGM_COULD_NOT_START_BACKUP, "Could not start backup" },
{ NDB_MGM_COULD_NOT_ABORT_BACKUP, "Could not abort backup" },
/* Service errors - Single User Mode */
{ NDB_MGM_COULD_NOT_ENTER_SINGLE_USER_MODE,
"Could not enter single user mode" },
{ NDB_MGM_COULD_NOT_EXIT_SINGLE_USER_MODE,
"Could not exit single user mode" },
/* Usage errors */
{ NDB_MGM_USAGE_ERROR,
"Usage error" }
};
const int ndb_mgm_noOfErrorMsgs =
sizeof(ndb_mgm_error_msgs)/sizeof(struct Ndb_Mgm_Error_Msg);
#ifdef __cplusplus
}
#endif
#endif

View file

@ -1055,6 +1055,7 @@ class Ndb
friend class NdbDictInterface;
friend class NdbBlob;
friend class NdbImpl;
friend class Ndb_internal;
#endif
public:
@ -1488,12 +1489,15 @@ public:
int initAutoIncrement();
int getAutoIncrementValue(const char* aTableName,
Uint64 & tupleId, Uint32 cacheSize);
Uint64 & tupleId, Uint32 cacheSize,
Uint64 step = 1, Uint64 start = 1);
int getAutoIncrementValue(const NdbDictionary::Table * aTable,
Uint64 & tupleId, Uint32 cacheSize);
Uint64 & tupleId, Uint32 cacheSize,
Uint64 step = 1, Uint64 start = 1);
int getAutoIncrementValue(const NdbDictionary::Table * aTable,
TupleIdRange & range, Uint64 & tupleId,
Uint32 cacheSize);
Uint32 cacheSize,
Uint64 step = 1, Uint64 start = 1);
int readAutoIncrementValue(const char* aTableName,
Uint64 & tupleId);
int readAutoIncrementValue(const NdbDictionary::Table * aTable,
@ -1510,7 +1514,7 @@ public:
private:
int getTupleIdFromNdb(const NdbTableImpl* table,
TupleIdRange & range, Uint64 & tupleId,
Uint32 cacheSize);
Uint32 cacheSize, Uint64 step = 1, Uint64 start = 1);
int readTupleIdFromNdb(const NdbTableImpl* table,
TupleIdRange & range, Uint64 & tupleId);
int setTupleIdInNdb(const NdbTableImpl* table,

View file

@ -1042,6 +1042,13 @@ protected:
*/
Int8 m_abortOption;
/*
* For blob impl, option to not propagate error to trans level.
* Could be AO_IgnoreError variant if we want it public.
* Ignored unless AO_IgnoreError is also set.
*/
Int8 m_noErrorPropagation;
friend struct Ndb_free_list_t<NdbOperation>;
};

View file

@ -448,6 +448,41 @@ Backup::execDUMP_STATE_ORD(Signal* signal)
filePtr.p->m_flags);
}
}
ndbout_c("m_curr_disk_write_speed: %u m_words_written_this_period: %u m_overflow_disk_write: %u",
m_curr_disk_write_speed, m_words_written_this_period, m_overflow_disk_write);
ndbout_c("m_reset_delay_used: %u m_reset_disk_speed_time: %llu",
m_reset_delay_used, (Uint64)m_reset_disk_speed_time);
for(c_backups.first(ptr); ptr.i != RNIL; c_backups.next(ptr))
{
ndbout_c("BackupRecord %u: BackupId: %u MasterRef: %x ClientRef: %x",
ptr.i, ptr.p->backupId, ptr.p->masterRef, ptr.p->clientRef);
ndbout_c(" State: %u", ptr.p->slaveState.getState());
ndbout_c(" noOfByte: %llu noOfRecords: %llu",
ptr.p->noOfBytes, ptr.p->noOfRecords);
ndbout_c(" noOfLogBytes: %llu noOfLogRecords: %llu",
ptr.p->noOfLogBytes, ptr.p->noOfLogRecords);
ndbout_c(" errorCode: %u", ptr.p->errorCode);
BackupFilePtr filePtr;
for(ptr.p->files.first(filePtr); filePtr.i != RNIL;
ptr.p->files.next(filePtr))
{
ndbout_c(" file %u: type: %u flags: H'%x tableId: %u fragmentId: %u",
filePtr.i, filePtr.p->fileType, filePtr.p->m_flags,
filePtr.p->tableId, filePtr.p->fragmentNo);
}
if (ptr.p->slaveState.getState() == SCANNING && ptr.p->dataFilePtr != RNIL)
{
c_backupFilePool.getPtr(filePtr, ptr.p->dataFilePtr);
OperationRecord & op = filePtr.p->operation;
Uint32 *tmp = NULL;
Uint32 sz = 0;
bool eof = FALSE;
bool ready = op.dataBuffer.getReadPtr(&tmp, &sz, &eof);
ndbout_c("ready: %s eof: %s", ready ? "TRUE" : "FALSE", eof ? "TRUE" : "FALSE");
}
}
return;
}
if(signal->theData[0] == 24){
/**

View file

@ -2700,6 +2700,10 @@ private:
ArrayPool<Page> c_page_pool;
Uint32 cnoOfAllocatedPages;
Uint32 m_max_allocate_pages;
/* read ahead in pages during disk order scan */
Uint32 m_max_page_read_ahead;
Tablerec *tablerec;
Uint32 cnoOfTablerec;

View file

@ -74,6 +74,10 @@ Dbtup::reportMemoryUsage(Signal* signal, int incDec){
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 6, JBB);
}
#ifdef VM_TRACE
extern Uint32 fc_left, fc_right, fc_remove;
#endif
void
Dbtup::execDUMP_STATE_ORD(Signal* signal)
{
@ -155,12 +159,20 @@ Dbtup::execDUMP_STATE_ORD(Signal* signal)
return;
}//if
#endif
#if defined VM_TRACE && 0
if (type == 1211){
ndbout_c("Startar modul test av Page Manager");
#if defined VM_TRACE
if (type == 1211 || type == 1212 || type == 1213){
Uint32 seed = time(0);
if (signal->getLength() > 1)
seed = signal->theData[1];
ndbout_c("Startar modul test av Page Manager (seed: 0x%x)", seed);
srand(seed);
Vector<Chunk> chunks;
const Uint32 LOOPS = 1000;
Uint32 sum_req = 0;
Uint32 sum_conf = 0;
Uint32 sum_loop = 0;
Uint32 max_loop = 0;
for(Uint32 i = 0; i<LOOPS; i++){
// Case
@ -177,8 +189,15 @@ Dbtup::execDUMP_STATE_ORD(Signal* signal)
if(chunks.size() == 0 && c == 0){
c = 1 + rand() % 2;
}
if (type == 1211)
ndbout_c("loop=%d case=%d free=%d alloc=%d", i, c, free, alloc);
ndbout_c("loop=%d case=%d free=%d alloc=%d", i, c, free, alloc);
if (type == 1213)
{
c = 1;
alloc = 2 + (sum_conf >> 3) + (sum_conf >> 4);
}
switch(c){
case 0:{ // Release
const int ch = rand() % chunks.size();
@ -190,23 +209,33 @@ Dbtup::execDUMP_STATE_ORD(Signal* signal)
case 2: { // Seize(n) - fail
alloc += free;
// Fall through
sum_req += free;
goto doalloc;
}
case 1: { // Seize(n) (success)
sum_req += alloc;
doalloc:
Chunk chunk;
allocConsPages(alloc, chunk.pageCount, chunk.pageId);
ndbrequire(chunk.pageCount <= alloc);
if(chunk.pageCount != 0){
chunks.push_back(chunk);
if(chunk.pageCount != alloc) {
ndbout_c(" Tried to allocate %d - only allocated %d - free: %d",
alloc, chunk.pageCount, free);
if (type == 1211)
ndbout_c(" Tried to allocate %d - only allocated %d - free: %d",
alloc, chunk.pageCount, free);
}
} else {
ndbout_c(" Failed to alloc %d pages with %d pages free",
alloc, free);
}
sum_conf += chunk.pageCount;
Uint32 tot = fc_left + fc_right + fc_remove;
sum_loop += tot;
if (tot > max_loop)
max_loop = tot;
for(Uint32 i = 0; i<chunk.pageCount; i++){
PagePtr pagePtr;
pagePtr.i = chunk.pageId + i;
@ -225,6 +254,10 @@ Dbtup::execDUMP_STATE_ORD(Signal* signal)
returnCommonArea(chunk.pageId, chunk.pageCount);
chunks.erase(chunks.size() - 1);
}
ndbout_c("Got %u%% of requested allocs, loops : %u 100*avg: %u max: %u",
(100 * sum_conf) / sum_req, sum_loop, 100*sum_loop / LOOPS,
max_loop);
}
#endif
}//Dbtup::execDUMP_STATE_ORD()

View file

@ -305,6 +305,12 @@ void Dbtup::execREAD_CONFIG_REQ(Signal* signal)
Uint32 noOfTriggers= 0;
Uint32 tmp= 0;
if (ndb_mgm_get_int_parameter(p, CFG_DB_MAX_ALLOCATE, &tmp))
tmp = 32 * 1024 * 1024;
m_max_allocate_pages = (tmp + GLOBAL_PAGE_SIZE - 1) / GLOBAL_PAGE_SIZE;
tmp = 0;
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUP_PAGE_RANGE, &tmp));
initPageRangeSize(tmp);
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUP_TABLE, &cnoOfTablerec));
@ -338,6 +344,18 @@ void Dbtup::execREAD_CONFIG_REQ(Signal* signal)
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_BATCH_SIZE, &nScanBatch));
c_scanLockPool.setSize(nScanOp * nScanBatch);
/* read ahead for disk scan can not be more that disk page buffer */
{
Uint64 tmp = 64*1024*1024;
ndb_mgm_get_int64_parameter(p, CFG_DB_DISK_PAGE_BUFFER_MEMORY, &tmp);
m_max_page_read_ahead = (tmp + GLOBAL_PAGE_SIZE - 1) / GLOBAL_PAGE_SIZE; // in pages
// never read ahead more than 32 pages
if (m_max_page_read_ahead > 32)
m_max_page_read_ahead = 32;
}
ScanOpPtr lcp;
ndbrequire(c_scanOpPool.seize(lcp));
new (lcp.p) ScanOp();

View file

@ -146,10 +146,17 @@ void Dbtup::initializePage()
cnoOfAllocatedPages = tmp; // Is updated by returnCommonArea
}//Dbtup::initializePage()
#ifdef VM_TRACE
Uint32 fc_left, fc_right, fc_remove;
#endif
void Dbtup::allocConsPages(Uint32 noOfPagesToAllocate,
Uint32& noOfPagesAllocated,
Uint32& allocPageRef)
{
#ifdef VM_TRACE
fc_left = fc_right = fc_remove = 0;
#endif
if (noOfPagesToAllocate == 0){
jam();
noOfPagesAllocated = 0;
@ -228,7 +235,10 @@ void Dbtup::findFreeLeftNeighbours(Uint32& allocPageRef,
{
PagePtr pageFirstPtr, pageLastPtr;
Uint32 remainAllocate = noOfPagesToAllocate - noPagesAllocated;
while (allocPageRef > 0) {
Uint32 loop = 0;
while (allocPageRef > 0 &&
++loop < 16)
{
jam();
pageLastPtr.i = allocPageRef - 1;
c_page_pool.getPtr(pageLastPtr);
@ -256,6 +266,9 @@ void Dbtup::findFreeLeftNeighbours(Uint32& allocPageRef,
remainAllocate -= listSize;
}//if
}//if
#ifdef VM_TRACE
fc_left++;
#endif
}//while
}//Dbtup::findFreeLeftNeighbours()
@ -269,7 +282,10 @@ void Dbtup::findFreeRightNeighbours(Uint32& allocPageRef,
jam();
return;
}//if
while ((allocPageRef + noPagesAllocated) < c_page_pool.getSize()) {
Uint32 loop = 0;
while ((allocPageRef + noPagesAllocated) < c_page_pool.getSize() &&
++loop < 16)
{
jam();
pageFirstPtr.i = allocPageRef + noPagesAllocated;
c_page_pool.getPtr(pageFirstPtr);
@ -296,24 +312,37 @@ void Dbtup::findFreeRightNeighbours(Uint32& allocPageRef,
remainAllocate -= listSize;
}//if
}//if
#ifdef VM_TRACE
fc_right++;
#endif
}//while
}//Dbtup::findFreeRightNeighbours()
void Dbtup::insertCommonArea(Uint32 insPageRef, Uint32 insList)
{
cnoOfAllocatedPages -= (1 << insList);
PagePtr pageLastPtr, pageInsPtr;
PagePtr pageLastPtr, pageInsPtr, pageHeadPtr;
pageHeadPtr.i = cfreepageList[insList];
c_page_pool.getPtr(pageInsPtr, insPageRef);
ndbrequire(insList < 16);
pageLastPtr.i = (pageInsPtr.i + (1 << insList)) - 1;
pageInsPtr.p->next_cluster_page = cfreepageList[insList];
pageInsPtr.p->page_state = ZFREE_COMMON;
pageInsPtr.p->next_cluster_page = pageHeadPtr.i;
pageInsPtr.p->prev_cluster_page = RNIL;
pageInsPtr.p->last_cluster_page = pageLastPtr.i;
cfreepageList[insList] = pageInsPtr.i;
if (pageHeadPtr.i != RNIL)
{
jam();
c_page_pool.getPtr(pageHeadPtr);
pageHeadPtr.p->prev_cluster_page = pageInsPtr.i;
}
c_page_pool.getPtr(pageLastPtr);
pageLastPtr.p->page_state = ZFREE_COMMON;
pageLastPtr.p->first_cluster_page = pageInsPtr.i;
pageLastPtr.p->next_page = RNIL;
}//Dbtup::insertCommonArea()
@ -321,12 +350,13 @@ void Dbtup::insertCommonArea(Uint32 insPageRef, Uint32 insList)
void Dbtup::removeCommonArea(Uint32 remPageRef, Uint32 list)
{
cnoOfAllocatedPages += (1 << list);
PagePtr pagePrevPtr, pageNextPtr, pageLastPtr, pageSearchPtr, remPagePtr;
PagePtr pagePrevPtr, pageNextPtr, pageLastPtr, remPagePtr;
c_page_pool.getPtr(remPagePtr, remPageRef);
ndbrequire(list < 16);
if (cfreepageList[list] == remPagePtr.i) {
jam();
ndbassert(remPagePtr.p->prev_cluster_page == RNIL);
cfreepageList[list] = remPagePtr.p->next_cluster_page;
pageNextPtr.i = cfreepageList[list];
if (pageNextPtr.i != RNIL) {
@ -335,30 +365,25 @@ void Dbtup::removeCommonArea(Uint32 remPageRef, Uint32 list)
pageNextPtr.p->prev_cluster_page = RNIL;
}//if
} else {
pageSearchPtr.i = cfreepageList[list];
while (true) {
jam();
c_page_pool.getPtr(pageSearchPtr);
pagePrevPtr = pageSearchPtr;
pageSearchPtr.i = pageSearchPtr.p->next_cluster_page;
if (pageSearchPtr.i == remPagePtr.i) {
jam();
break;
}//if
}//while
pagePrevPtr.i = remPagePtr.p->prev_cluster_page;
pageNextPtr.i = remPagePtr.p->next_cluster_page;
c_page_pool.getPtr(pagePrevPtr);
pagePrevPtr.p->next_cluster_page = pageNextPtr.i;
if (pageNextPtr.i != RNIL) {
if (pageNextPtr.i != RNIL)
{
jam();
c_page_pool.getPtr(pageNextPtr);
pageNextPtr.p->prev_cluster_page = pagePrevPtr.i;
}//if
}
}//if
remPagePtr.p->next_cluster_page= RNIL;
remPagePtr.p->last_cluster_page= RNIL;
remPagePtr.p->prev_cluster_page= RNIL;
remPagePtr.p->page_state = ~ZFREE_COMMON;
pageLastPtr.i = (remPagePtr.i + (1 << list)) - 1;
c_page_pool.getPtr(pageLastPtr);
pageLastPtr.p->first_cluster_page= RNIL;
pageLastPtr.p->page_state = ~ZFREE_COMMON;
}//Dbtup::removeCommonArea()

View file

@ -432,6 +432,11 @@ void Dbtup::allocMoreFragPages(Fragrecord* const regFragPtr)
// We will grow by 18.75% plus two more additional pages to grow
// a little bit quicker in the beginning.
/* -----------------------------------------------------------------*/
if (noAllocPages > m_max_allocate_pages)
{
noAllocPages = m_max_allocate_pages;
}
Uint32 allocated = allocFragPages(regFragPtr, noAllocPages);
regFragPtr->noOfPagesToGrow += allocated;
}//Dbtup::allocMoreFragPages()

View file

@ -687,13 +687,74 @@ Dbtup::scanNext(Signal* signal, ScanOpPtr scanPtr)
// move to next extent
jam();
pos.m_extent_info_ptr_i = ext_ptr.i;
Extent_info* ext = c_extent_pool.getPtr(pos.m_extent_info_ptr_i);
ext = c_extent_pool.getPtr(pos.m_extent_info_ptr_i);
key.m_file_no = ext->m_key.m_file_no;
key.m_page_no = ext->m_first_page_no;
}
}
key.m_page_idx = 0;
pos.m_get = ScanPos::Get_page_dd;
/*
read ahead for scan in disk order
do read ahead every 8:th page
*/
if ((bits & ScanOp::SCAN_DD) &&
(((key.m_page_no - ext->m_first_page_no) & 7) == 0))
{
jam();
// initialize PGMAN request
Page_cache_client::Request preq;
preq.m_page = pos.m_key;
preq.m_callback = TheNULLCallback;
// set maximum read ahead
Uint32 read_ahead = m_max_page_read_ahead;
while (true)
{
// prepare page read ahead in current extent
Uint32 page_no = preq.m_page.m_page_no;
Uint32 page_no_limit = page_no + read_ahead;
Uint32 limit = ext->m_first_page_no + alloc.m_extent_size;
if (page_no_limit > limit)
{
jam();
// read ahead crosses extent, set limit for this extent
read_ahead = page_no_limit - limit;
page_no_limit = limit;
// and make sure we only read one extra extent next time around
if (read_ahead > alloc.m_extent_size)
read_ahead = alloc.m_extent_size;
}
else
{
jam();
read_ahead = 0; // no more to read ahead after this
}
// do read ahead pages for this extent
while (page_no < page_no_limit)
{
// page request to PGMAN
jam();
preq.m_page.m_page_no = page_no;
int flags = 0;
// ignore result
m_pgman.get_page(signal, preq, flags);
jamEntry();
page_no++;
}
if (!read_ahead || !list.next(ext_ptr))
{
// no more extents after this or read ahead done
jam();
break;
}
// move to next extent and initialize PGMAN request accordingly
Extent_info* ext = c_extent_pool.getPtr(ext_ptr.i);
preq.m_page.m_file_no = ext->m_key.m_file_no;
preq.m_page.m_page_no = ext->m_first_page_no;
}
} // if ScanOp::SCAN_DD read ahead
}
/*FALLTHRU*/
case ScanPos::Get_page_dd:
@ -726,6 +787,7 @@ Dbtup::scanNext(Signal* signal, ScanOpPtr scanPtr)
safe_cast(&Dbtup::disk_page_tup_scan_callback);
int flags = 0;
int res = m_pgman.get_page(signal, preq, flags);
jamEntry();
if (res == 0) {
jam();
// request queued

View file

@ -122,7 +122,7 @@ Pgman::execREAD_CONFIG_REQ(Signal* signal)
if (page_buffer > 0)
{
page_buffer /= GLOBAL_PAGE_SIZE; // in pages
page_buffer = (page_buffer + GLOBAL_PAGE_SIZE - 1) / GLOBAL_PAGE_SIZE; // in pages
m_param.m_max_pages = page_buffer;
m_page_entry_pool.setSize(m_param.m_lirs_stack_mult * page_buffer);
m_param.m_max_hot_pages = (page_buffer * 9) / 10;
@ -144,7 +144,7 @@ Pgman::Param::Param() :
m_lirs_stack_mult(10),
m_max_hot_pages(56),
m_max_loop_count(256),
m_max_io_waits(64),
m_max_io_waits(256),
m_stats_loop_delay(1000),
m_cleanup_loop_delay(200),
m_lcp_loop_delay(0)

View file

@ -1658,6 +1658,11 @@ SimulatedBlock::sendFragmentedSignal(NodeReceiverGroup rg,
}
SimulatedBlock::Callback SimulatedBlock::TheEmptyCallback = {0, 0};
void
SimulatedBlock::TheNULLCallbackFunction(class Signal*, Uint32, Uint32)
{ abort(); /* should never be called */ }
SimulatedBlock::Callback SimulatedBlock::TheNULLCallback =
{ &SimulatedBlock::TheNULLCallbackFunction, 0 };
void
SimulatedBlock::sendFragmentedSignal(BlockReference ref,

View file

@ -131,6 +131,8 @@ public:
virtual const char* get_filename(Uint32 fd) const { return "";}
protected:
static Callback TheEmptyCallback;
void TheNULLCallbackFunction(class Signal*, Uint32, Uint32);
static Callback TheNULLCallback;
void execute(Signal* signal, Callback & c, Uint32 returnCode);
@ -599,6 +601,8 @@ inline
void
SimulatedBlock::execute(Signal* signal, Callback & c, Uint32 returnCode){
CallbackFunction fun = c.m_callbackFunction;
if (fun == TheNULLCallback.m_callbackFunction)
return;
ndbrequire(fun != 0);
c.m_callbackFunction = NULL;
(this->*fun)(signal, c.m_callbackData, returnCode);

View file

@ -524,7 +524,7 @@ ndb_mgm_connect(NdbMgmHandle handle, int no_retries,
NDB_SOCKET_TYPE sockfd= NDB_INVALID_SOCKET;
Uint32 i;
SocketClient s(0, 0);
s.set_connect_timeout(handle->timeout);
s.set_connect_timeout((handle->timeout+999)/1000);
if (!s.init())
{
fprintf(handle->errstream,

View file

@ -18,6 +18,7 @@
#include <Vector.hpp>
#include <mgmapi.h>
#include <util/BaseString.hpp>
#include <ndbd_exit_codes.h>
class MgmtSrvr;
@ -704,6 +705,133 @@ CommandInterpreter::printError()
}
}
/*
* print log event from mgmsrv to console screen
*/
#define make_uint64(a,b) (((Uint64)(a)) + (((Uint64)(b)) << 32))
#define Q64(a) make_uint64(event->EVENT.a ## _lo, event->EVENT.a ## _hi)
#define R event->source_nodeid
#define Q(a) event->EVENT.a
#define QVERSION getMajor(Q(version)), getMinor(Q(version)), getBuild(Q(version))
#define NDB_LE_(a) NDB_LE_ ## a
static void
printLogEvent(struct ndb_logevent* event)
{
switch (event->type) {
/**
* NDB_MGM_EVENT_CATEGORY_BACKUP
*/
#undef EVENT
#define EVENT BackupStarted
case NDB_LE_BackupStarted:
ndbout_c("Node %u: Backup %d started from node %d",
R, Q(backup_id), Q(starting_node));
break;
#undef EVENT
#define EVENT BackupFailedToStart
case NDB_LE_BackupFailedToStart:
ndbout_c("Node %u: Backup request from %d failed to start. Error: %d",
R, Q(starting_node), Q(error));
break;
#undef EVENT
#define EVENT BackupCompleted
case NDB_LE_BackupCompleted:
ndbout_c("Node %u: Backup %u started from node %u completed\n"
" StartGCP: %u StopGCP: %u\n"
" #Records: %u #LogRecords: %u\n"
" Data: %u bytes Log: %u bytes", R,
Q(backup_id), Q(starting_node),
Q(start_gci), Q(stop_gci),
Q(n_records), Q(n_log_records),
Q(n_bytes), Q(n_log_bytes));
break;
#undef EVENT
#define EVENT BackupAborted
case NDB_LE_BackupAborted:
ndbout_c("Node %u: Backup %d started from %d has been aborted. Error: %d",
R, Q(backup_id), Q(starting_node), Q(error));
break;
/**
* NDB_MGM_EVENT_CATEGORY_STARTUP
*/
#undef EVENT
#define EVENT NDBStartStarted
case NDB_LE_NDBStartStarted:
ndbout_c("Node %u: Start initiated (version %d.%d.%d)",
R, QVERSION);
break;
#undef EVENT
#define EVENT NDBStartCompleted
case NDB_LE_NDBStartCompleted:
ndbout_c("Node %u: Started (version %d.%d.%d)",
R, QVERSION);
break;
#undef EVENT
#define EVENT NDBStopStarted
case NDB_LE_NDBStopStarted:
ndbout_c("Node %u: %s shutdown initiated", R,
(Q(stoptype) == 1 ? "Cluster" : "Node"));
break;
#undef EVENT
#define EVENT NDBStopCompleted
case NDB_LE_NDBStopCompleted:
{
BaseString action_str("");
BaseString signum_str("");
getRestartAction(Q(action), action_str);
if (Q(signum))
signum_str.appfmt(" Initiated by signal %d.",
Q(signum));
ndbout_c("Node %u: Node shutdown completed%s.%s",
R, action_str.c_str(), signum_str.c_str());
}
break;
#undef EVENT
#define EVENT NDBStopForced
case NDB_LE_NDBStopForced:
{
BaseString action_str("");
BaseString reason_str("");
BaseString sphase_str("");
int signum = Q(signum);
int error = Q(error);
int sphase = Q(sphase);
int extra = Q(extra);
getRestartAction(Q(action), action_str);
if (signum)
reason_str.appfmt(" Initiated by signal %d.", signum);
if (error)
{
ndbd_exit_classification cl;
ndbd_exit_status st;
const char *msg = ndbd_exit_message(error, &cl);
const char *cl_msg = ndbd_exit_classification_message(cl, &st);
const char *st_msg = ndbd_exit_status_message(st);
reason_str.appfmt(" Caused by error %d: \'%s(%s). %s\'.",
error, msg, cl_msg, st_msg);
if (extra != 0)
reason_str.appfmt(" (extra info %d)", extra);
}
if (sphase < 255)
sphase_str.appfmt(" Occured during startphase %u.", sphase);
ndbout_c("Node %u: Forced node shutdown completed%s.%s%s",
R, action_str.c_str(), sphase_str.c_str(),
reason_str.c_str());
}
break;
#undef EVENT
#define EVENT StopAborted
case NDB_LE_NDBStopAborted:
ndbout_c("Node %u: Node shutdown aborted", R);
break;
/**
* default nothing to print
*/
default:
break;
}
}
//*****************************************************************************
//*****************************************************************************
@ -720,30 +848,21 @@ event_thread_run(void* p)
int filter[] = { 15, NDB_MGM_EVENT_CATEGORY_BACKUP,
1, NDB_MGM_EVENT_CATEGORY_STARTUP,
0 };
int fd = ndb_mgm_listen_event(handle, filter);
if (fd != NDB_INVALID_SOCKET)
NdbLogEventHandle log_handle= NULL;
struct ndb_logevent log_event;
log_handle= ndb_mgm_create_logevent_handle(handle, filter);
if (log_handle)
{
do_event_thread= 1;
char *tmp= 0;
char buf[1024];
do {
SocketInputStream in(fd,2000);
if((tmp = in.gets(buf, sizeof(buf))))
{
const char ping_token[]= "<PING>";
if (memcmp(ping_token,tmp,sizeof(ping_token)-1))
if(tmp && strlen(tmp))
{
Guard g(printmutex);
ndbout << tmp;
}
}
else if(in.timedout() && ndb_mgm_check_connection(handle)<0)
{
break;
}
if (ndb_logevent_get_next(log_handle, &log_event, 2000) <= 0)
continue;
Guard g(printmutex);
printLogEvent(&log_event);
} while(do_event_thread);
NDB_CLOSE_SOCKET(fd);
ndb_mgm_destroy_logevent_handle(&log_handle);
}
else
{
@ -1007,6 +1126,7 @@ CommandInterpreter::execute_impl(const char *_line, bool interactive)
DBUG_RETURN(true);
}
else if(strcasecmp(firstToken, "ENTER") == 0 &&
allAfterFirstToken != NULL &&
allAfterFirstToken != NULL &&
strncasecmp(allAfterFirstToken, "SINGLE USER MODE ",
sizeof("SINGLE USER MODE") - 1) == 0){
@ -2476,8 +2596,7 @@ CommandInterpreter::executeStartBackup(char* parameters, bool interactive)
{
struct ndb_mgm_reply reply;
unsigned int backupId;
int fd = -1;
Vector<BaseString> args;
{
BaseString(parameters).split(args);
@ -2494,8 +2613,6 @@ CommandInterpreter::executeStartBackup(char* parameters, bool interactive)
if (sz == 2 && args[1] == "NOWAIT")
{
flags = 0;
result = ndb_mgm_start_backup(m_mgmsrv, 0, &backupId, &reply);
goto END_BACKUP;
}
else if (sz == 1 || (sz == 3 && args[1] == "WAIT" && args[2] == "COMPLETED"))
{
@ -2513,62 +2630,74 @@ CommandInterpreter::executeStartBackup(char* parameters, bool interactive)
return -1;
}
/**
* If interactive...event listner is already running
*/
NdbLogEventHandle log_handle= NULL;
struct ndb_logevent log_event;
if (flags == 2 && !interactive)
{
int filter[] = { 15, NDB_MGM_EVENT_CATEGORY_BACKUP, 0, 0 };
fd = ndb_mgm_listen_event(m_mgmsrv, filter);
if (fd < 0)
log_handle = ndb_mgm_create_logevent_handle(m_mgmsrv, filter);
if (!log_handle)
{
ndbout << "Initializing start of backup failed" << endl;
printError();
return fd;
return -1;
}
}
result = ndb_mgm_start_backup(m_mgmsrv, flags, &backupId, &reply);
END_BACKUP:
if (result != 0) {
ndbout << "Backup failed" << endl;
printError();
if (fd >= 0)
close(fd);
if (log_handle)
ndb_mgm_destroy_logevent_handle(&log_handle);
return result;
}
if (fd >= 0)
/**
* If interactive, event listner thread is already running
*/
if (log_handle && !interactive)
{
char *tmp;
char buf[1024];
{
SocketInputStream in(fd);
int count = 0;
do {
tmp = in.gets(buf, 1024);
if(tmp)
{
ndbout << tmp;
unsigned int id;
if(sscanf(tmp, "%*[^:]: Backup %d ", &id) == 1 && id == backupId){
count++;
}
}
} while(count < 2);
}
SocketInputStream in(fd, 10);
int count = 0;
int retry = 0;
do {
tmp = in.gets(buf, 1024);
if(tmp && tmp[0] != 0)
if (ndb_logevent_get_next(log_handle, &log_event, 60000) > 0)
{
ndbout << tmp;
int print = 0;
switch (log_event.type) {
case NDB_LE_BackupStarted:
if (log_event.BackupStarted.backup_id == backupId)
print = 1;
break;
case NDB_LE_BackupCompleted:
if (log_event.BackupCompleted.backup_id == backupId)
print = 1;
break;
case NDB_LE_BackupAborted:
if (log_event.BackupAborted.backup_id == backupId)
print = 1;
break;
default:
break;
}
if (print)
{
Guard g(m_print_mutex);
printLogEvent(&log_event);
count++;
}
}
} while(tmp && tmp[0] != 0);
close(fd);
else
{
retry++;
}
} while(count < 2 && retry < 3);
if (retry >= 3)
ndbout << "get backup event failed for " << retry << " times" << endl;
ndb_mgm_destroy_logevent_handle(&log_handle);
}
return 0;

View file

@ -21,7 +21,8 @@ libndbmgmclient_la_LIBADD = ../mgmapi/libmgmapi.la \
../common/logger/liblogger.la \
../common/portlib/libportlib.la \
../common/util/libgeneral.la \
../common/portlib/libportlib.la
../common/portlib/libportlib.la \
../common/debugger/libtrace.la
ndb_mgm_SOURCES = main.cpp

View file

@ -23,6 +23,8 @@ extern "C" {
#elif !defined(__NETWARE__)
#include <readline/readline.h>
extern "C" int add_history(const char *command); /* From readline directory */
extern "C" int read_history(const char *command);
extern "C" int write_history(const char *command);
#define HAVE_READLINE
#endif
}
@ -155,10 +157,35 @@ int main(int argc, char** argv){
signal(SIGPIPE, handler);
com = new Ndb_mgmclient(opt_connect_str,1);
int ret= 0;
BaseString histfile;
if (!opt_execute_str)
{
#ifdef HAVE_READLINE
char *histfile_env= getenv("NDB_MGM_HISTFILE");
if (histfile_env)
histfile.assign(histfile_env,strlen(histfile_env));
else if(getenv("HOME"))
{
histfile.assign(getenv("HOME"),strlen(getenv("HOME")));
histfile.append("/.ndb_mgm_history");
}
if (histfile.length())
read_history(histfile.c_str());
#endif
ndbout << "-- NDB Cluster -- Management Client --" << endl;
while(read_and_execute(_try_reconnect));
#ifdef HAVE_READLINE
if (histfile.length())
{
BaseString histfile_tmp;
histfile_tmp.assign(histfile);
histfile_tmp.append(".TMP");
if(!write_history(histfile_tmp.c_str()))
my_rename(histfile_tmp.c_str(), histfile.c_str(), MYF(MY_WME));
}
#endif
}
else
{

View file

@ -1321,6 +1321,18 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
"0",
STR_VALUE(MAX_INT_RNIL) },
{
CFG_DB_MAX_ALLOCATE,
"MaxAllocate",
DB_TOKEN,
"Maximum size of allocation to use when allocating memory for tables",
ConfigInfo::CI_USED,
false,
ConfigInfo::CI_INT,
"32M",
"1M",
"1G" },
{
CFG_DB_MEMREPORT_FREQUENCY,
"MemReportFrequency",

View file

@ -18,6 +18,7 @@
#include "MgmtSrvr.hpp"
#include "MgmtErrorReporter.hpp"
#include "ndb_mgmd_error.h"
#include <ConfigRetriever.hpp>
#include <NdbOut.hpp>
@ -239,13 +240,6 @@ MgmtSrvr::stopEventLog()
// Nothing yet
}
class ErrorItem
{
public:
int _errorCode;
const char * _errorText;
};
bool
MgmtSrvr::setEventLogFilter(int severity, int enable)
{
@ -268,62 +262,6 @@ MgmtSrvr::isEventLogFilterEnabled(int severity)
return g_eventLogger.isEnable((Logger::LoggerLevel)severity);
}
static ErrorItem errorTable[] =
{
{MgmtSrvr::NO_CONTACT_WITH_PROCESS, "No contact with the process (dead ?)."},
{MgmtSrvr::PROCESS_NOT_CONFIGURED, "The process is not configured."},
{MgmtSrvr::WRONG_PROCESS_TYPE,
"The process has wrong type. Expected a DB process."},
{MgmtSrvr::COULD_NOT_ALLOCATE_MEMORY, "Could not allocate memory."},
{MgmtSrvr::SEND_OR_RECEIVE_FAILED, "Send to process or receive failed."},
{MgmtSrvr::INVALID_LEVEL, "Invalid level. Should be between 1 and 30."},
{MgmtSrvr::INVALID_ERROR_NUMBER, "Invalid error number. Should be >= 0."},
{MgmtSrvr::INVALID_TRACE_NUMBER, "Invalid trace number."},
{MgmtSrvr::NOT_IMPLEMENTED, "Not implemented."},
{MgmtSrvr::INVALID_BLOCK_NAME, "Invalid block name"},
{MgmtSrvr::CONFIG_PARAM_NOT_EXIST,
"The configuration parameter does not exist for the process type."},
{MgmtSrvr::CONFIG_PARAM_NOT_UPDATEABLE,
"The configuration parameter is not possible to update."},
{MgmtSrvr::VALUE_WRONG_FORMAT_INT_EXPECTED,
"Incorrect value. Expected integer."},
{MgmtSrvr::VALUE_TOO_LOW, "Value is too low."},
{MgmtSrvr::VALUE_TOO_HIGH, "Value is too high."},
{MgmtSrvr::VALUE_WRONG_FORMAT_BOOL_EXPECTED,
"Incorrect value. Expected TRUE or FALSE."},
{MgmtSrvr::CONFIG_FILE_OPEN_WRITE_ERROR,
"Could not open configuration file for writing."},
{MgmtSrvr::CONFIG_FILE_OPEN_READ_ERROR,
"Could not open configuration file for reading."},
{MgmtSrvr::CONFIG_FILE_WRITE_ERROR,
"Write error when writing configuration file."},
{MgmtSrvr::CONFIG_FILE_READ_ERROR,
"Read error when reading configuration file."},
{MgmtSrvr::CONFIG_FILE_CLOSE_ERROR, "Could not close configuration file."},
{MgmtSrvr::CONFIG_CHANGE_REFUSED_BY_RECEIVER,
"The change was refused by the receiving process."},
{MgmtSrvr::COULD_NOT_SYNC_CONFIG_CHANGE_AGAINST_PHYSICAL_MEDIUM,
"The change could not be synced against physical medium."},
{MgmtSrvr::CONFIG_FILE_CHECKSUM_ERROR,
"The config file is corrupt. Checksum error."},
{MgmtSrvr::NOT_POSSIBLE_TO_SEND_CONFIG_UPDATE_TO_PROCESS_TYPE,
"It is not possible to send an update of a configuration variable "
"to this kind of process."},
{MgmtSrvr::NODE_SHUTDOWN_IN_PROGESS, "Node shutdown in progress" },
{MgmtSrvr::SYSTEM_SHUTDOWN_IN_PROGRESS, "System shutdown in progress" },
{MgmtSrvr::NODE_SHUTDOWN_WOULD_CAUSE_SYSTEM_CRASH,
"Node shutdown would cause system crash" },
{MgmtSrvr::UNSUPPORTED_NODE_SHUTDOWN,
"Unsupported multi node shutdown. Abort option required." },
{MgmtSrvr::NODE_NOT_API_NODE, "The specified node is not an API node." },
{MgmtSrvr::OPERATION_NOT_ALLOWED_START_STOP,
"Operation not allowed while nodes are starting or stopping."},
{MgmtSrvr::NO_CONTACT_WITH_DB_NODES, "No contact with database nodes" }
};
int MgmtSrvr::translateStopRef(Uint32 errCode)
{
switch(errCode){
@ -343,8 +281,6 @@ int MgmtSrvr::translateStopRef(Uint32 errCode)
return 4999;
}
static int noOfErrorCodes = sizeof(errorTable) / sizeof(ErrorItem);
int
MgmtSrvr::getNodeCount(enum ndb_mgm_node_type type) const
{
@ -1969,18 +1905,8 @@ MgmtSrvr::dumpState(int nodeId, const Uint32 args[], Uint32 no)
const char* MgmtSrvr::getErrorText(int errorCode, char *buf, int buf_sz)
{
for (int i = 0; i < noOfErrorCodes; ++i) {
if (errorCode == errorTable[i]._errorCode) {
BaseString::snprintf(buf, buf_sz, errorTable[i]._errorText);
buf[buf_sz-1]= 0;
return buf;
}
}
ndb_error_string(errorCode, buf, buf_sz);
buf[buf_sz-1]= 0;
return buf;
}

View file

@ -148,45 +148,6 @@ public:
*/
bool isEventLogFilterEnabled(int severity);
STATIC_CONST( NO_CONTACT_WITH_PROCESS = 5000 );
STATIC_CONST( PROCESS_NOT_CONFIGURED = 5001 );
STATIC_CONST( WRONG_PROCESS_TYPE = 5002 );
STATIC_CONST( COULD_NOT_ALLOCATE_MEMORY = 5003 );
STATIC_CONST( SEND_OR_RECEIVE_FAILED = 5005 );
STATIC_CONST( INVALID_LEVEL = 5006 );
STATIC_CONST( INVALID_ERROR_NUMBER = 5007 );
STATIC_CONST( INVALID_TRACE_NUMBER = 5008 );
STATIC_CONST( NOT_IMPLEMENTED = 5009 );
STATIC_CONST( INVALID_BLOCK_NAME = 5010 );
STATIC_CONST( CONFIG_PARAM_NOT_EXIST = 5011 );
STATIC_CONST( CONFIG_PARAM_NOT_UPDATEABLE = 5012 );
STATIC_CONST( VALUE_WRONG_FORMAT_INT_EXPECTED = 5013 );
STATIC_CONST( VALUE_TOO_LOW = 5014 );
STATIC_CONST( VALUE_TOO_HIGH = 5015 );
STATIC_CONST( VALUE_WRONG_FORMAT_BOOL_EXPECTED = 5016 );
STATIC_CONST( CONFIG_FILE_OPEN_WRITE_ERROR = 5017 );
STATIC_CONST( CONFIG_FILE_OPEN_READ_ERROR = 5018 );
STATIC_CONST( CONFIG_FILE_WRITE_ERROR = 5019 );
STATIC_CONST( CONFIG_FILE_READ_ERROR = 5020 );
STATIC_CONST( CONFIG_FILE_CLOSE_ERROR = 5021 );
STATIC_CONST( CONFIG_CHANGE_REFUSED_BY_RECEIVER = 5022 );
STATIC_CONST( COULD_NOT_SYNC_CONFIG_CHANGE_AGAINST_PHYSICAL_MEDIUM = 5023 );
STATIC_CONST( CONFIG_FILE_CHECKSUM_ERROR = 5024 );
STATIC_CONST( NOT_POSSIBLE_TO_SEND_CONFIG_UPDATE_TO_PROCESS_TYPE = 5025 );
STATIC_CONST( NODE_SHUTDOWN_IN_PROGESS = 5026 );
STATIC_CONST( SYSTEM_SHUTDOWN_IN_PROGRESS = 5027 );
STATIC_CONST( NODE_SHUTDOWN_WOULD_CAUSE_SYSTEM_CRASH = 5028 );
STATIC_CONST( NO_CONTACT_WITH_DB_NODES = 5030 );
STATIC_CONST( UNSUPPORTED_NODE_SHUTDOWN = 5031 );
STATIC_CONST( NODE_NOT_API_NODE = 5062 );
STATIC_CONST( OPERATION_NOT_ALLOWED_START_STOP = 5063 );
/**
* This enum specifies the different signal loggig modes possible to set
* with the setSignalLoggingMode method.

View file

@ -0,0 +1,33 @@
/* Copyright (C) 2007 MySQL AB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#ifndef NDB_MGMD_ERROR_H
#define NDB_MGMD_ERROR_H
#define NO_CONTACT_WITH_PROCESS 5000
#define WRONG_PROCESS_TYPE 5002
#define SEND_OR_RECEIVE_FAILED 5005
#define INVALID_ERROR_NUMBER 5007
#define INVALID_TRACE_NUMBER 5008
#define INVALID_BLOCK_NAME 5010
#define NODE_SHUTDOWN_IN_PROGESS 5026
#define SYSTEM_SHUTDOWN_IN_PROGRESS 5027
#define NODE_SHUTDOWN_WOULD_CAUSE_SYSTEM_CRASH 5028
#define NO_CONTACT_WITH_DB_NODES 5030
#define UNSUPPORTED_NODE_SHUTDOWN 5031
#define NODE_NOT_API_NODE 5062
#define OPERATION_NOT_ALLOWED_START_STOP 5063
#endif

View file

@ -754,17 +754,27 @@ Ndb::getNodeId()
}
/****************************************************************************
Uint64 getTupleIdFromNdb( Uint32 aTableId, Uint32 cacheSize );
Uint64 getAutoIncrementValue( const char* aTableName,
Uint64 & autoValue,
Uint32 cacheSize,
Uint64 step,
Uint64 start);
Parameters: aTableId : The TableId.
cacheSize: Prefetch this many values
Remark: Returns a new TupleId to the application.
The TupleId comes from SYSTAB_0 where SYSKEY_0 = TableId.
It is initialized to (TableId << 48) + 1 in NdbcntrMain.cpp.
Parameters: aTableName (IN) : The table name.
autoValue (OUT) : Returns new autoincrement value
cacheSize (IN) : Prefetch this many values
step (IN) : Specifies the step between the
autoincrement values.
start (IN) : Start value for first value
Remark: Returns a new autoincrement value to the application.
The autoincrement values can be increased by steps
(default 1) and a number of values can be prefetched
by specifying cacheSize (default 10).
****************************************************************************/
int
Ndb::getAutoIncrementValue(const char* aTableName,
Uint64 & tupleId, Uint32 cacheSize)
Uint64 & autoValue, Uint32 cacheSize,
Uint64 step, Uint64 start)
{
DBUG_ENTER("Ndb::getAutoIncrementValue");
ASSERT_NOT_MYSQLD;
@ -778,15 +788,16 @@ Ndb::getAutoIncrementValue(const char* aTableName,
}
const NdbTableImpl* table = info->m_table_impl;
TupleIdRange & range = info->m_tuple_id_range;
if (getTupleIdFromNdb(table, range, tupleId, cacheSize) == -1)
if (getTupleIdFromNdb(table, range, autoValue, cacheSize, step, start) == -1)
DBUG_RETURN(-1);
DBUG_PRINT("info", ("value %lu", (ulong) tupleId));
DBUG_PRINT("info", ("value %lu", (ulong) autoValue));
DBUG_RETURN(0);
}
int
Ndb::getAutoIncrementValue(const NdbDictionary::Table * aTable,
Uint64 & tupleId, Uint32 cacheSize)
Uint64 & autoValue, Uint32 cacheSize,
Uint64 step, Uint64 start)
{
DBUG_ENTER("Ndb::getAutoIncrementValue");
ASSERT_NOT_MYSQLD;
@ -801,51 +812,86 @@ Ndb::getAutoIncrementValue(const NdbDictionary::Table * aTable,
DBUG_RETURN(-1);
}
TupleIdRange & range = info->m_tuple_id_range;
if (getTupleIdFromNdb(table, range, tupleId, cacheSize) == -1)
if (getTupleIdFromNdb(table, range, autoValue, cacheSize, step, start) == -1)
DBUG_RETURN(-1);
DBUG_PRINT("info", ("value %lu", (ulong)tupleId));
DBUG_PRINT("info", ("value %lu", (ulong)autoValue));
DBUG_RETURN(0);
}
int
Ndb::getAutoIncrementValue(const NdbDictionary::Table * aTable,
TupleIdRange & range, Uint64 & tupleId,
Uint32 cacheSize)
TupleIdRange & range, Uint64 & autoValue,
Uint32 cacheSize, Uint64 step, Uint64 start)
{
DBUG_ENTER("Ndb::getAutoIncrementValue");
assert(aTable != 0);
const NdbTableImpl* table = & NdbTableImpl::getImpl(*aTable);
if (getTupleIdFromNdb(table, range, tupleId, cacheSize) == -1)
if (getTupleIdFromNdb(table, range, autoValue, cacheSize, step, start) == -1)
DBUG_RETURN(-1);
DBUG_PRINT("info", ("value %lu", (ulong)tupleId));
DBUG_PRINT("info", ("value %lu", (ulong)autoValue));
DBUG_RETURN(0);
}
int
Ndb::getTupleIdFromNdb(const NdbTableImpl* table,
TupleIdRange & range, Uint64 & tupleId, Uint32 cacheSize)
TupleIdRange & range, Uint64 & tupleId,
Uint32 cacheSize, Uint64 step, Uint64 start)
{
/*
Returns a new TupleId to the application.
The TupleId comes from SYSTAB_0 where SYSKEY_0 = TableId.
It is initialized to (TableId << 48) + 1 in NdbcntrMain.cpp.
In most cases step= start= 1, in which case we get:
1,2,3,4,5,...
If step=10 and start=5 and first number is 1, we get:
5,15,25,35,...
*/
DBUG_ENTER("Ndb::getTupleIdFromNdb");
if (range.m_first_tuple_id != range.m_last_tuple_id)
/*
Check if the next value can be taken from the pre-fetched
sequence.
*/
if (range.m_first_tuple_id != range.m_last_tuple_id &&
range.m_first_tuple_id + step <= range.m_last_tuple_id)
{
assert(range.m_first_tuple_id < range.m_last_tuple_id);
tupleId = ++range.m_first_tuple_id;
DBUG_PRINT("info", ("next cached value %lu", (ulong)tupleId));
range.m_first_tuple_id += step;
tupleId = range.m_first_tuple_id;
DBUG_PRINT("info", ("Next cached value %lu", (ulong) tupleId));
}
else
{
/*
If start value is greater than step it is ignored
*/
Uint64 offset = (start > step) ? 1 : start;
/*
Pre-fetch a number of values depending on cacheSize
*/
if (cacheSize == 0)
cacheSize = 1;
DBUG_PRINT("info", ("reading %u values from database", (uint)cacheSize));
/*
* reserve next cacheSize entries in db. adds cacheSize to NEXTID
* and returns first tupleId in the new range.
* and returns first tupleId in the new range. If tupleId's are
* incremented in steps then multiply the cacheSize with step size.
*/
Uint64 opValue = cacheSize;
Uint64 opValue = cacheSize * step;
if (opTupleIdOnNdb(table, range, opValue, 0) == -1)
DBUG_RETURN(-1);
tupleId = opValue;
DBUG_PRINT("info", ("Next value fetched from database %lu", (ulong) opValue));
DBUG_PRINT("info", ("Increasing %lu by offset %lu, increment is %lu", (ulong) (ulong) opValue, (ulong) offset, (ulong) step));
Uint64 current, next;
Uint64 div = ((Uint64) (opValue + step - offset)) / step;
next = div * step + offset;
current = (next < step) ? next : next - step;
tupleId = (opValue <= current) ? current : next;
DBUG_PRINT("info", ("Returning %lu", (ulong) tupleId));
range.m_first_tuple_id = tupleId;
}
DBUG_RETURN(0);
}

View file

@ -1262,6 +1262,7 @@ NdbBlob::deletePartsUnknown(Uint32 part)
DBUG_RETURN(-1);
}
tOp->m_abortOption= NdbOperation::AO_IgnoreError;
tOp->m_noErrorPropagation = true;
n++;
}
DBUG_PRINT("info", ("bat=%u", bat));
@ -1598,6 +1599,7 @@ NdbBlob::preExecute(NdbTransaction::ExecType anExecType, bool& batch)
}
if (isWriteOp()) {
tOp->m_abortOption = NdbOperation::AO_IgnoreError;
tOp->m_noErrorPropagation = true;
}
theHeadInlineReadOp = tOp;
// execute immediately
@ -1644,6 +1646,7 @@ NdbBlob::preExecute(NdbTransaction::ExecType anExecType, bool& batch)
}
if (isWriteOp()) {
tOp->m_abortOption = NdbOperation::AO_IgnoreError;
tOp->m_noErrorPropagation = true;
}
theHeadInlineReadOp = tOp;
// execute immediately

View file

@ -41,6 +41,7 @@
#include <NdbEventOperation.hpp>
#include "NdbEventOperationImpl.hpp"
#include <signaldata/AlterTable.hpp>
#include "ndb_internal.hpp"
#include <EventLogger.hpp>
extern EventLogger g_eventLogger;
@ -2838,7 +2839,7 @@ send_report:
data[5]= apply_gci >> 32;
data[6]= latest_gci & ~(Uint32)0;
data[7]= latest_gci >> 32;
m_ndb->theImpl->send_event_report(data,8);
Ndb_internal::send_event_report(m_ndb, data,8);
#ifdef VM_TRACE
assert(m_total_alloc >= m_free_data_sz);
#endif

View file

@ -76,7 +76,8 @@ NdbOperation::NdbOperation(Ndb* aNdb, NdbOperation::Type aType) :
m_keyInfoGSN(GSN_KEYINFO),
m_attrInfoGSN(GSN_ATTRINFO),
theBlobList(NULL),
m_abortOption(-1)
m_abortOption(-1),
m_noErrorPropagation(false)
{
theReceiver.init(NdbReceiver::NDB_OPERATION, this);
theError.code = 0;
@ -101,7 +102,8 @@ NdbOperation::setErrorCode(int anErrorCode)
theError.code = anErrorCode;
theNdbCon->theErrorLine = theErrorLine;
theNdbCon->theErrorOperation = this;
theNdbCon->setOperationErrorCode(anErrorCode);
if (!(m_abortOption == AO_IgnoreError && m_noErrorPropagation))
theNdbCon->setOperationErrorCode(anErrorCode);
}
/******************************************************************************
@ -116,6 +118,7 @@ NdbOperation::setErrorCodeAbort(int anErrorCode)
theError.code = anErrorCode;
theNdbCon->theErrorLine = theErrorLine;
theNdbCon->theErrorOperation = this;
// ignore m_noErrorPropagation
theNdbCon->setOperationErrorCodeAbort(anErrorCode);
}
@ -161,6 +164,7 @@ NdbOperation::init(const NdbTableImpl* tab, NdbTransaction* myConnection){
theMagicNumber = 0xABCDEF01;
theBlobList = NULL;
m_abortOption = -1;
m_noErrorPropagation = false;
m_no_disk_flag = 1;
tSignal = theNdb->getSignal();

View file

@ -0,0 +1,26 @@
/* Copyright (C) 2007 MySQL AB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#include "NdbImpl.hpp"
class Ndb_internal
{
private:
friend class NdbEventBuffer;
Ndb_internal() {}
virtual ~Ndb_internal() {}
static int send_event_report(Ndb *ndb, Uint32 *data, Uint32 length)
{ return ndb->theImpl->send_event_report(data, length); }
};

View file

@ -19,6 +19,9 @@
#include <ndberror.h>
#include <m_string.h>
#include "../mgmsrv/ndb_mgmd_error.h"
typedef struct ErrorBundle {
int code;
int mysql_code;
@ -619,6 +622,33 @@ ErrorBundle ErrorCodes[] = {
{ 4273, DMEC, IE, "No blob table in dict cache" },
{ 4274, DMEC, IE, "Corrupted main table PK in blob operation" },
{ 4275, DMEC, AE, "The blob method is incompatible with operation type or lock mode" },
{ NO_CONTACT_WITH_PROCESS, DMEC, AE,
"No contact with the process (dead ?)."},
{ WRONG_PROCESS_TYPE, DMEC, AE,
"The process has wrong type. Expected a DB process."},
{ SEND_OR_RECEIVE_FAILED, DMEC, AE,
"Send to process or receive failed."},
{ INVALID_ERROR_NUMBER, DMEC, AE,
"Invalid error number. Should be >= 0."},
{ INVALID_TRACE_NUMBER, DMEC, AE,
"Invalid trace number."},
{ INVALID_BLOCK_NAME, DMEC, AE,
"Invalid block name"},
{ NODE_SHUTDOWN_IN_PROGESS, DMEC, AE,
"Node shutdown in progress" },
{ SYSTEM_SHUTDOWN_IN_PROGRESS, DMEC, AE,
"System shutdown in progress" },
{ NODE_SHUTDOWN_WOULD_CAUSE_SYSTEM_CRASH, DMEC, AE,
"Node shutdown would cause system crash" },
{ UNSUPPORTED_NODE_SHUTDOWN, DMEC, AE,
"Unsupported multi node shutdown. Abort option required." },
{ NODE_NOT_API_NODE, DMEC, AE,
"The specified node is not an API node." },
{ OPERATION_NOT_ALLOWED_START_STOP, DMEC, AE,
"Operation not allowed while nodes are starting or stopping."},
{ NO_CONTACT_WITH_DB_NODES, DMEC, AE,
"No contact with database nodes" }
};
static

View file

@ -212,6 +212,76 @@ int runTestApiSession(NDBT_Context* ctx, NDBT_Step* step)
}
}
int runTestApiConnectTimeout(NDBT_Context* ctx, NDBT_Step* step)
{
char *mgm= ctx->getRemoteMgm();
int result= NDBT_FAILED;
int cc= 0;
int mgmd_nodeid= 0;
ndb_mgm_reply reply;
NdbMgmHandle h;
h= ndb_mgm_create_handle();
ndb_mgm_set_connectstring(h, mgm);
ndbout << "TEST connect timeout" << endl;
ndb_mgm_set_timeout(h, 3000);
struct timeval tstart, tend;
int secs;
timerclear(&tstart);
timerclear(&tend);
gettimeofday(&tstart,NULL);
ndb_mgm_connect(h,0,0,0);
gettimeofday(&tend,NULL);
secs= tend.tv_sec - tstart.tv_sec;
ndbout << "Took about: " << secs <<" seconds"<<endl;
if(secs < 4)
result= NDBT_OK;
else
goto done;
ndb_mgm_set_connectstring(h, mgm);
ndbout << "TEST connect timeout" << endl;
ndb_mgm_destroy_handle(&h);
h= ndb_mgm_create_handle();
ndb_mgm_set_connectstring(h, "1.1.1.1");
ndbout << "TEST connect timeout (invalid host)" << endl;
ndb_mgm_set_timeout(h, 3000);
timerclear(&tstart);
timerclear(&tend);
gettimeofday(&tstart,NULL);
ndb_mgm_connect(h,0,0,0);
gettimeofday(&tend,NULL);
secs= tend.tv_sec - tstart.tv_sec;
ndbout << "Took about: " << secs <<" seconds"<<endl;
if(secs < 4)
result= NDBT_OK;
else
result= NDBT_FAILED;
done:
ndb_mgm_disconnect(h);
ndb_mgm_destroy_handle(&h);
return result;
}
int runTestApiTimeoutBasic(NDBT_Context* ctx, NDBT_Step* step)
{
char *mgm= ctx->getRemoteMgm();
@ -727,6 +797,11 @@ TESTCASE("ApiSessionFailure",
"Test failures in MGMAPI session"){
INITIALIZER(runTestApiSession);
}
TESTCASE("ApiConnectTimeout",
"Connect timeout tests for MGMAPI"){
INITIALIZER(runTestApiConnectTimeout);
}
TESTCASE("ApiTimeoutBasic",
"Basic timeout tests for MGMAPI"){

View file

@ -898,6 +898,10 @@ max-time: 120
cmd: testMgm
args: -n ApiSessionFailure T1
max-time: 15
cmd: testMgm
args: -n ApiConnectTimeout T1
max-time: 120
cmd: testMgm
args: -n ApiTimeoutBasic T1

View file

@ -30,9 +30,11 @@ int main(int argc, const char** argv){
const char* _tabname = NULL;
int _help = 0;
int _batch = 512;
const char* db = "TEST_DB";
struct getargs args[] = {
{ "batch", 'b', arg_integer, &_batch, "Number of operations in each transaction", "batch" },
{ "database", 'd', arg_string, &db, "Database", "" },
{ "usage", '?', arg_flag, &_help, "Print help", "" }
};
int num_args = sizeof(args) / sizeof(args[0]);
@ -55,7 +57,7 @@ int main(int argc, const char** argv){
{
return NDBT_ProgramExit(NDBT_FAILED);
}
Ndb MyNdb(&con, "TEST_DB" );
Ndb MyNdb(&con, db);
if(MyNdb.init() != 0){
ERR(MyNdb.getNdbError());

View file

@ -62,13 +62,13 @@ foreach my $node (@nodes)
(($config_get_fs)?" with filesystem":"").
"\n\n";
my $recurse= ($config_get_fs)?'-r ':'';
system 'scp '.$recurse.$config_username.config($node,'host').
system 'scp -p '.$recurse.$config_username.config($node,'host').
':'.config($node,'datadir')."/ndb_".$node."* ".
"$reportdir/\n";
}
print "\n\n Copying configuration file...\n\n\t$config_file\n\n";
system "cp $config_file $reportdir/";
system "cp -p $config_file $reportdir/";
my $r = system 'bzip2 2>&1 > /dev/null < /dev/null';
my $outfile;

View file

@ -1159,19 +1159,22 @@ void BackupRestore::tuple_a(restore_callback_t *cb)
char * dataPtr = attr_data->string_value;
Uint32 length = 0;
const unsigned char * src = (const unsigned char *)dataPtr;
switch(attr_desc->m_column->getType()){
case NdbDictionary::Column::Varchar:
case NdbDictionary::Column::Varbinary:
length = src[0] + 1;
break;
case NdbDictionary::Column::Longvarchar:
case NdbDictionary::Column::Longvarbinary:
length = src[0] + (src[1] << 8) + 2;
break;
default:
length = attr_data->size;
break;
if (!attr_data->null)
{
const unsigned char * src = (const unsigned char *)dataPtr;
switch(attr_desc->m_column->getType()){
case NdbDictionary::Column::Varchar:
case NdbDictionary::Column::Varbinary:
length = src[0] + 1;
break;
case NdbDictionary::Column::Longvarchar:
case NdbDictionary::Column::Longvarbinary:
length = src[0] + (src[1] << 8) + 2;
break;
default:
length = attr_data->size;
break;
}
}
if (j == 0 && tup.getTable()->have_auto_inc(i))
tup.getTable()->update_max_auto_val(dataPtr,size*arraySize);