Merge whalegate.ndb.mysql.com:/home/tomas/mysql-5.1

into  whalegate.ndb.mysql.com:/home/tomas/cge-5.1
This commit is contained in:
tomas@whalegate.ndb.mysql.com 2007-12-11 21:02:29 +01:00
commit 2a01585422
67 changed files with 2032 additions and 961 deletions

View file

@ -973,14 +973,7 @@ SELECT c1,hex(c4),c5 FROM t18 ORDER BY c1;
--echo --echo
--echo ** update from master ** --echo ** update from master **
connection master; connection master;
####################################### UPDATE t18 SET c5 = 'TEST' WHERE c6 = 3;
# This test should be uncommented
# once bug30674 is patched
#######################################
#***************************
#UPDATE t18 SET c5 = 'TEST' WHERE c6 = 3;
#***************************
--replace_column 5 CURRENT_TIMESTAMP --replace_column 5 CURRENT_TIMESTAMP
SELECT c1,hex(c4),c5,c6,c7 FROM t18 ORDER BY c1; SELECT c1,hex(c4),c5,c6,c7 FROM t18 ORDER BY c1;

View file

@ -5,10 +5,6 @@ connect (server2,127.0.0.1,root,,test,$MASTER_MYPORT1,);
# Check that server1 has NDB support # Check that server1 has NDB support
connection server1; connection server1;
disable_query_log; disable_query_log;
--disable_warnings
drop table if exists t1, t2;
--enable_warnings
flush tables;
--require r/true.require --require r/true.require
select (support = 'YES' or support = 'DEFAULT') as `TRUE` from information_schema.engines where engine = 'ndbcluster'; select (support = 'YES' or support = 'DEFAULT') as `TRUE` from information_schema.engines where engine = 'ndbcluster';
--source include/ndb_not_readonly.inc --source include/ndb_not_readonly.inc
@ -17,14 +13,32 @@ enable_query_log;
# Check that server2 has NDB support # Check that server2 has NDB support
connection server2; connection server2;
disable_query_log; disable_query_log;
--disable_warnings
drop table if exists t1, t2;
--enable_warnings
flush tables;
--require r/true.require --require r/true.require
select (support = 'YES' or support = 'DEFAULT') as `TRUE` from information_schema.engines where engine = 'ndbcluster'; select (support = 'YES' or support = 'DEFAULT') as `TRUE` from information_schema.engines where engine = 'ndbcluster';
--source include/ndb_not_readonly.inc --source include/ndb_not_readonly.inc
enable_query_log; enable_query_log;
# Set the default connection to 'server1' # cleanup
connection server1;
disable_query_log;
disable_warnings;
--error 0,1051
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10;
flush tables;
flush status;
enable_warnings;
enable_query_log;
connection server2;
disable_query_log;
disable_warnings;
--error 0,1051
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10;
flush tables;
flush status;
enable_warnings;
enable_query_log;
# Set the default connection
connection server1; connection server1;

View file

@ -129,7 +129,15 @@ our $opt_vs_config = $ENV{'MTR_VS_CONFIG'};
our $default_vardir; our $default_vardir;
our $opt_usage; our $opt_usage;
our $opt_suites= "main,binlog,rpl,rpl_ndb,ndb"; # Default suites to run our $opt_suites;
our $opt_suites_default= "main,binlog,rpl,rpl_ndb,ndb"; # Default suites to run
our @extra_suites=
(
["mysql-5.1-new-ndb", "ndb_team"],
["mysql-5.1-telco-6.2", "ndb_team"],
["mysql-5.1-telco-6.3", "ndb_team"],
);
our $opt_script_debug= 0; # Script debugging, enable with --script-debug our $opt_script_debug= 0; # Script debugging, enable with --script-debug
our $opt_verbose= 0; # Verbose output, enable with --verbose our $opt_verbose= 0; # Verbose output, enable with --verbose
@ -397,6 +405,20 @@ sub main () {
else else
{ {
# Figure out which tests we are going to run # Figure out which tests we are going to run
if (!$opt_suites)
{
# use default and add any extra_suites as defined
$opt_suites= $opt_suites_default;
my $ddd= basename(dirname($glob_mysql_test_dir));
foreach my $extra_suite (@extra_suites)
{
if ($extra_suite->[0] eq "$ddd")
{
$opt_suites= "$extra_suite->[1],$opt_suites";
}
}
}
my $tests= collect_test_cases($opt_suites); my $tests= collect_test_cases($opt_suites);
# Turn off NDB and other similar options if no tests use it # Turn off NDB and other similar options if no tests use it
@ -5195,7 +5217,7 @@ Options to control what test suites or cases to run
start-from=PREFIX Run test cases starting from test prefixed with PREFIX start-from=PREFIX Run test cases starting from test prefixed with PREFIX
suite[s]=NAME1,..,NAMEN Collect tests in suites from the comma separated suite[s]=NAME1,..,NAMEN Collect tests in suites from the comma separated
list of suite names. list of suite names.
The default is: "$opt_suites" The default is: "$opt_suites_default"
skip-rpl Skip the replication test cases. skip-rpl Skip the replication test cases.
skip-im Don't start IM, and skip the IM test cases skip-im Don't start IM, and skip the IM test cases
big-test Set the environment variable BIG_TEST, which can be big-test Set the environment variable BIG_TEST, which can be

View file

@ -0,0 +1,445 @@
DROP TABLE IF EXISTS t1,t2;
DROP TABLE IF EXISTS t1;
set @old_auto_increment_offset = @@session.auto_increment_offset;
set @old_auto_increment_increment = @@session.auto_increment_increment;
set @old_ndb_autoincrement_prefetch_sz = @@session.ndb_autoincrement_prefetch_sz;
flush status;
create table t1 (a int not null auto_increment primary key) engine ndb;
insert into t1 values (NULL);
select * from t1 order by a;
a
1
update t1 set a = 5 where a = 1;
insert into t1 values (NULL);
select * from t1 order by a;
a
5
6
insert into t1 values (7);
insert into t1 values (NULL);
select * from t1 order by a;
a
5
6
7
8
insert into t1 values (2);
insert into t1 values (NULL);
select * from t1 order by a;
a
2
5
6
7
8
9
update t1 set a = 4 where a = 2;
insert into t1 values (NULL);
select * from t1 order by a;
a
4
5
6
7
8
9
10
delete from t1 where a = 10;
insert into t1 values (NULL);
select * from t1 order by a;
a
4
5
6
7
8
9
11
replace t1 values (NULL);
select * from t1 order by a;
a
4
5
6
7
8
9
11
12
replace t1 values (15);
select * from t1 order by a;
a
4
5
6
7
8
9
11
12
15
replace into t1 values (NULL);
select * from t1 order by a;
a
4
5
6
7
8
9
11
12
15
16
replace t1 values (15);
select * from t1 order by a;
a
4
5
6
7
8
9
11
12
15
16
insert ignore into t1 values (NULL);
select * from t1 order by a;
a
4
5
6
7
8
9
11
12
15
16
17
insert ignore into t1 values (15), (NULL);
select * from t1 order by a;
a
4
5
6
7
8
9
11
12
15
16
17
18
insert into t1 values (15)
on duplicate key update a = 20;
insert into t1 values (NULL);
select * from t1 order by a;
a
4
5
6
7
8
9
11
12
16
17
18
20
21
insert into t1 values (NULL) on duplicate key update a = 30;
select * from t1 order by a;
a
4
5
6
7
8
9
11
12
16
17
18
20
21
22
insert into t1 values (30) on duplicate key update a = 40;
select * from t1 order by a;
a
4
5
6
7
8
9
11
12
16
17
18
20
21
22
30
insert ignore into t1 values(600),(NULL),(NULL),(610),(NULL);
select * from t1 order by a;
a
4
5
6
7
8
9
11
12
16
17
18
20
21
22
30
600
601
602
610
611
drop table t1;
create table t1 (a int not null primary key,
b int not null unique auto_increment) engine ndb;
insert into t1 values (1, NULL);
insert into t1 values (3, NULL);
update t1 set b = 3 where a = 3;
insert into t1 values (4, NULL);
select * from t1 order by a;
a b
1 1
3 3
4 4
drop table t1;
CREATE TABLE t1 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=NDBCLUSTER;
CREATE TABLE t2 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=MYISAM;
SET @@session.auto_increment_increment=10;
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
SELECT * FROM t1 ORDER BY pk;
pk b c
1 1 0
11 2 1
21 3 2
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
COUNT(t1.pk)
3
TRUNCATE t1;
TRUNCATE t2;
SET @@session.auto_increment_offset=5;
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t1 (pk,b,c) VALUES (27,4,3),(NULL,5,4),(99,6,5),(NULL,7,6);
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t2 (pk,b,c) VALUES (27,4,3),(NULL,5,4),(99,6,5),(NULL,7,6);
SELECT * FROM t1 ORDER BY pk;
pk b c
5 1 0
15 2 1
25 3 2
27 4 3
35 5 4
99 6 5
105 7 6
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
COUNT(t1.pk)
7
TRUNCATE t1;
TRUNCATE t2;
SET @@session.auto_increment_increment=2;
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
SELECT * FROM t1 ORDER BY pk;
pk b c
1 1 0
3 2 1
5 3 2
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
COUNT(t1.pk)
3
DROP TABLE t1, t2;
CREATE TABLE t1 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=NDBCLUSTER AUTO_INCREMENT = 7;
CREATE TABLE t2 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=MYISAM AUTO_INCREMENT = 7;
SET @@session.auto_increment_offset=1;
SET @@session.auto_increment_increment=1;
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
SELECT * FROM t1 ORDER BY pk;
pk b c
7 1 0
8 2 1
9 3 2
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
COUNT(t1.pk)
3
DROP TABLE t1, t2;
CREATE TABLE t1 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=NDBCLUSTER AUTO_INCREMENT = 3;
CREATE TABLE t2 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=MYISAM AUTO_INCREMENT = 3;
SET @@session.auto_increment_offset=5;
SET @@session.auto_increment_increment=10;
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
SELECT * FROM t1 ORDER BY pk;
pk b c
5 1 0
15 2 1
25 3 2
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
COUNT(t1.pk)
3
DROP TABLE t1, t2;
CREATE TABLE t1 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=NDBCLUSTER AUTO_INCREMENT = 7;
CREATE TABLE t2 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=MYISAM AUTO_INCREMENT = 7;
SET @@session.auto_increment_offset=5;
SET @@session.auto_increment_increment=10;
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
SELECT * FROM t1 ORDER BY pk;
pk b c
15 1 0
25 2 1
35 3 2
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
COUNT(t1.pk)
3
DROP TABLE t1, t2;
CREATE TABLE t1 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=NDBCLUSTER AUTO_INCREMENT = 5;
CREATE TABLE t2 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=MYISAM AUTO_INCREMENT = 5;
SET @@session.auto_increment_offset=5;
SET @@session.auto_increment_increment=10;
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
SELECT * FROM t1 ORDER BY pk;
pk b c
5 1 0
15 2 1
25 3 2
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
COUNT(t1.pk)
3
DROP TABLE t1, t2;
CREATE TABLE t1 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=NDBCLUSTER AUTO_INCREMENT = 100;
CREATE TABLE t2 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=MYISAM AUTO_INCREMENT = 100;
SET @@session.auto_increment_offset=5;
SET @@session.auto_increment_increment=10;
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
SELECT * FROM t1 ORDER BY pk;
pk b c
105 1 0
115 2 1
125 3 2
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
COUNT(t1.pk)
3
DROP TABLE t1, t2;
SET @@session.auto_increment_offset=1;
SET @@session.auto_increment_increment=1;
set ndb_autoincrement_prefetch_sz = 32;
drop table if exists t1;
SET @@session.auto_increment_offset=1;
SET @@session.auto_increment_increment=1;
set ndb_autoincrement_prefetch_sz = 32;
create table t1 (a int not null auto_increment primary key) engine ndb;
insert into t1 values (NULL);
insert into t1 values (NULL);
select * from t1 order by a;
a
1
33
insert into t1 values (20);
insert into t1 values (NULL);
select * from t1 order by a;
a
1
20
33
34
insert into t1 values (35);
insert into t1 values (NULL);
insert into t1 values (NULL);
ERROR 23000: Duplicate entry '35' for key 'PRIMARY'
select * from t1 order by a;
a
1
20
21
33
34
35
insert into t1 values (100);
insert into t1 values (NULL);
insert into t1 values (NULL);
select * from t1 order by a;
a
1
20
21
22
33
34
35
100
101
set auto_increment_offset = @old_auto_increment_offset;
set auto_increment_increment = @old_auto_increment_increment;
set ndb_autoincrement_prefetch_sz = @old_ndb_autoincrement_prefetch_sz;
drop table t1;

View file

@ -869,6 +869,30 @@ a b
3 30 3 30
4 1 4 1
drop table t1,t2; drop table t1,t2;
create table t1 (a varchar(100) primary key, b varchar(100)) engine = NDB;
insert into t1 values
('a', 'a'),('b','b'),('c', 'c'),('aa', 'aa'),('bb', 'bb'),('cc', 'cc');
replace into t1 values ('a', '-a');
replace into t1 values ('b', '-b');
replace into t1 values ('c', '-c');
replace into t1 values ('aa', '-aa');
replace into t1 values ('bb', '-bb');
replace into t1 values ('cc', '-cc');
replace into t1 values ('aaa', '-aaa');
replace into t1 values ('bbb', '-bbb');
replace into t1 values ('ccc', '-ccc');
select * from t1 order by 1,2;
a b
a -a
aa -aa
aaa -aaa
b -b
bb -bb
bbb -bbb
c -c
cc -cc
ccc -ccc
drop table t1;
End of 5.0 tests End of 5.0 tests
CREATE TABLE t1 (a VARCHAR(255) NOT NULL, CREATE TABLE t1 (a VARCHAR(255) NOT NULL,
CONSTRAINT pk_a PRIMARY KEY (a))engine=ndb; CONSTRAINT pk_a PRIMARY KEY (a))engine=ndb;

View file

@ -568,3 +568,24 @@ select count(*) from t1;
count(*) count(*)
0 0
drop table t1; drop table t1;
create table t1(
a int,
blob_nn blob not null,
text_nn text not null,
blob_nl blob,
text_nl text,
primary key(a)
) engine=ndb;
insert into t1(a) values (1);
Warnings:
Warning 1364 Field 'blob_nn' doesn't have a default value
Warning 1364 Field 'text_nn' doesn't have a default value
insert into t1(a, text_nl) values (2, 'MySQL Cluster NDB');
Warnings:
Warning 1364 Field 'blob_nn' doesn't have a default value
Warning 1364 Field 'text_nn' doesn't have a default value
select a, length(blob_nn), length(text_nn), blob_nl, text_nl from t1 order by a;
a length(blob_nn) length(text_nn) blob_nl text_nl
1 0 0 NULL NULL
2 0 0 NULL MySQL Cluster NDB
drop table t1;

View file

@ -112,9 +112,9 @@ unique key(a)
) engine=ndb; ) engine=ndb;
insert into t1 values(1, 'aAa'); insert into t1 values(1, 'aAa');
insert into t1 values(2, 'aaa'); insert into t1 values(2, 'aaa');
ERROR 23000: Duplicate entry '' for key '*UNKNOWN*' ERROR 23000: Duplicate entry 'aaa' for key 'a'
insert into t1 values(3, 'AAA'); insert into t1 values(3, 'AAA');
ERROR 23000: Duplicate entry '' for key '*UNKNOWN*' ERROR 23000: Duplicate entry 'AAA' for key 'a'
select * from t1 order by p; select * from t1 order by p;
p a p a
1 aAa 1 aAa
@ -138,9 +138,9 @@ unique key(a)
) engine=ndb; ) engine=ndb;
insert into t1 values (1,'A'),(2,'b '),(3,'C '),(4,'d '),(5,'E'),(6,'f'); insert into t1 values (1,'A'),(2,'b '),(3,'C '),(4,'d '),(5,'E'),(6,'f');
insert into t1 values(99,'b'); insert into t1 values(99,'b');
ERROR 23000: Duplicate entry '' for key '*UNKNOWN*' ERROR 23000: Duplicate entry 'b' for key 'a'
insert into t1 values(99,'a '); insert into t1 values(99,'a ');
ERROR 23000: Duplicate entry '' for key '*UNKNOWN*' ERROR 23000: Duplicate entry 'a ' for key 'a'
select a,length(a) from t1 order by a; select a,length(a) from t1 order by a;
a length(a) a length(a)
A 1 A 1

View file

@ -22,7 +22,7 @@ select * from t1 where b = 4 order by a;
a b c a b c
3 4 6 3 4 6
insert into t1 values(8, 2, 3); insert into t1 values(8, 2, 3);
ERROR 23000: Duplicate entry '' for key '*UNKNOWN*' ERROR 23000: Duplicate entry '2' for key 'ib'
select * from t1 order by a; select * from t1 order by a;
a b c a b c
1 2 3 1 2 3
@ -93,7 +93,7 @@ a b c
1 1 1 1 1 1
4 4 NULL 4 4 NULL
insert into t1 values(5,1,1); insert into t1 values(5,1,1);
ERROR 23000: Duplicate entry '' for key '*UNKNOWN*' ERROR 23000: Duplicate entry '1-1' for key 'bc'
drop table t1; drop table t1;
CREATE TABLE t2 ( CREATE TABLE t2 (
a int unsigned NOT NULL PRIMARY KEY, a int unsigned NOT NULL PRIMARY KEY,
@ -116,7 +116,7 @@ select * from t2 where b = 4 order by a;
a b c a b c
3 4 6 3 4 6
insert into t2 values(8, 2, 3); insert into t2 values(8, 2, 3);
ERROR 23000: Duplicate entry '' for key '*UNKNOWN*' ERROR 23000: Duplicate entry '2-3' for key 'b'
select * from t2 order by a; select * from t2 order by a;
a b c a b c
1 2 3 1 2 3
@ -139,7 +139,7 @@ a b c
8 2 3 8 2 3
create unique index bi using hash on t2(b); create unique index bi using hash on t2(b);
insert into t2 values(9, 3, 1); insert into t2 values(9, 3, 1);
ERROR 23000: Duplicate entry '' for key '*UNKNOWN*' ERROR 23000: Duplicate entry '3' for key 'bi'
alter table t2 drop index bi; alter table t2 drop index bi;
insert into t2 values(9, 3, 1); insert into t2 values(9, 3, 1);
select * from t2 order by a; select * from t2 order by a;
@ -229,7 +229,7 @@ pk a
3 NULL 3 NULL
4 4 4 4
insert into t1 values (5,0); insert into t1 values (5,0);
ERROR 23000: Duplicate entry '' for key '*UNKNOWN*' ERROR 23000: Duplicate entry '0' for key 'a'
select * from t1 order by pk; select * from t1 order by pk;
pk a pk a
-1 NULL -1 NULL
@ -262,7 +262,7 @@ pk a b c
0 NULL 18 NULL 0 NULL 18 NULL
1 3 19 abc 1 3 19 abc
insert into t2 values(2,3,19,'abc'); insert into t2 values(2,3,19,'abc');
ERROR 23000: Duplicate entry '' for key '*UNKNOWN*' ERROR 23000: Duplicate entry '3-abc' for key 'si'
select * from t2 order by pk; select * from t2 order by pk;
pk a b c pk a b c
-1 1 17 NULL -1 1 17 NULL
@ -682,7 +682,7 @@ create table t1 (a int primary key, b varchar(1000) not null, unique key (b))
engine=ndb charset=utf8; engine=ndb charset=utf8;
insert into t1 values (1, repeat(_utf8 0xe288ab6474, 200)); insert into t1 values (1, repeat(_utf8 0xe288ab6474, 200));
insert into t1 values (2, repeat(_utf8 0xe288ab6474, 200)); insert into t1 values (2, repeat(_utf8 0xe288ab6474, 200));
ERROR 23000: Duplicate entry '' for key '*UNKNOWN*' ERROR 23000: Duplicate entry '∫dt∫dt∫dt∫dt∫dt∫dt∫dt∫dt∫dt∫dt∫dt∫dt∫d' for key 'b'
select a, sha1(b) from t1; select a, sha1(b) from t1;
a sha1(b) a sha1(b)
1 08f5d02c8b8bc244f275bdfc22c42c5cab0d9d7d 1 08f5d02c8b8bc244f275bdfc22c42c5cab0d9d7d

View file

@ -657,172 +657,3 @@ a b
2 NULL 2 NULL
3 NULL 3 NULL
drop table t1; drop table t1;
CREATE TABLE t1 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=NDBCLUSTER;
CREATE TABLE t2 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=MYISAM;
SET @@session.auto_increment_increment=10;
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
SELECT * FROM t1 ORDER BY pk;
pk b c
1 1 0
11 2 1
21 3 2
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
COUNT(t1.pk)
3
TRUNCATE t1;
TRUNCATE t2;
SET @@session.auto_increment_offset=5;
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t1 (pk,b,c) VALUES (27,4,3),(NULL,5,4),(99,6,5),(NULL,7,6);
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t2 (pk,b,c) VALUES (27,4,3),(NULL,5,4),(99,6,5),(NULL,7,6);
SELECT * FROM t1 ORDER BY pk;
pk b c
5 1 0
15 2 1
25 3 2
27 4 3
35 5 4
99 6 5
105 7 6
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
COUNT(t1.pk)
7
TRUNCATE t1;
TRUNCATE t2;
SET @@session.auto_increment_increment=2;
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
SELECT * FROM t1 ORDER BY pk;
pk b c
1 1 0
3 2 1
5 3 2
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
COUNT(t1.pk)
3
DROP TABLE t1, t2;
CREATE TABLE t1 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=NDBCLUSTER AUTO_INCREMENT = 7;
CREATE TABLE t2 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=MYISAM AUTO_INCREMENT = 7;
SET @@session.auto_increment_offset=1;
SET @@session.auto_increment_increment=1;
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
SELECT * FROM t1 ORDER BY pk;
pk b c
7 1 0
8 2 1
9 3 2
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
COUNT(t1.pk)
3
DROP TABLE t1, t2;
CREATE TABLE t1 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=NDBCLUSTER AUTO_INCREMENT = 3;
CREATE TABLE t2 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=MYISAM AUTO_INCREMENT = 3;
SET @@session.auto_increment_offset=5;
SET @@session.auto_increment_increment=10;
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
SELECT * FROM t1 ORDER BY pk;
pk b c
5 1 0
15 2 1
25 3 2
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
COUNT(t1.pk)
3
DROP TABLE t1, t2;
CREATE TABLE t1 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=NDBCLUSTER AUTO_INCREMENT = 7;
CREATE TABLE t2 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=MYISAM AUTO_INCREMENT = 7;
SET @@session.auto_increment_offset=5;
SET @@session.auto_increment_increment=10;
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
SELECT * FROM t1 ORDER BY pk;
pk b c
15 1 0
25 2 1
35 3 2
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
COUNT(t1.pk)
3
DROP TABLE t1, t2;
CREATE TABLE t1 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=NDBCLUSTER AUTO_INCREMENT = 5;
CREATE TABLE t2 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=MYISAM AUTO_INCREMENT = 5;
SET @@session.auto_increment_offset=5;
SET @@session.auto_increment_increment=10;
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
SELECT * FROM t1 ORDER BY pk;
pk b c
5 1 0
15 2 1
25 3 2
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
COUNT(t1.pk)
3
DROP TABLE t1, t2;
CREATE TABLE t1 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=NDBCLUSTER AUTO_INCREMENT = 100;
CREATE TABLE t2 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=MYISAM AUTO_INCREMENT = 100;
SET @@session.auto_increment_offset=5;
SET @@session.auto_increment_increment=10;
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
SELECT * FROM t1 ORDER BY pk;
pk b c
105 1 0
115 2 1
125 3 2
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
COUNT(t1.pk)
3
DROP TABLE t1, t2;

View file

@ -1,4 +1,5 @@
drop table if exists t1, t2, t3, t4; drop table if exists t1, t2, t3, t4;
flush status;
drop table if exists t1, t2, t3, t4; drop table if exists t1, t2, t3, t4;
flush status; flush status;
create table t1 (a int) engine=ndbcluster; create table t1 (a int) engine=ndbcluster;

View file

@ -1,4 +1,5 @@
drop table if exists t1, t2, t3, t4; drop table if exists t1, t2, t3, t4;
flush status;
drop table if exists t1, t2, t3, t4; drop table if exists t1, t2, t3, t4;
flush status; flush status;
create table t1 (a int) engine=ndbcluster; create table t1 (a int) engine=ndbcluster;

View file

@ -28,7 +28,7 @@ pk1 b c
2 2 2 2 2 2
4 1 1 4 1 1
UPDATE t1 set pk1 = 1, c = 2 where pk1 = 4; UPDATE t1 set pk1 = 1, c = 2 where pk1 = 4;
ERROR 23000: Duplicate entry '' for key '*UNKNOWN*' ERROR 23000: Duplicate entry '2' for key 'c'
UPDATE IGNORE t1 set pk1 = 1, c = 2 where pk1 = 4; UPDATE IGNORE t1 set pk1 = 1, c = 2 where pk1 = 4;
select * from t1 order by pk1; select * from t1 order by pk1;
pk1 b c pk1 b c
@ -62,9 +62,9 @@ INSERT INTO t3 VALUES (2, 2);
UPDATE t1 SET a = 1; UPDATE t1 SET a = 1;
UPDATE t1 SET a = 1 ORDER BY a; UPDATE t1 SET a = 1 ORDER BY a;
UPDATE t2 SET a = 1; UPDATE t2 SET a = 1;
ERROR 23000: Duplicate entry '' for key '*UNKNOWN*' ERROR 23000: Duplicate entry '1-2' for key 'a'
UPDATE t2 SET a = 1 ORDER BY a; UPDATE t2 SET a = 1 ORDER BY a;
ERROR 23000: Duplicate entry '' for key '*UNKNOWN*' ERROR 23000: Duplicate entry '1-2' for key 'a'
UPDATE t3 SET a = 1; UPDATE t3 SET a = 1;
ERROR 23000: Duplicate entry '1-2' for key 'PRIMARY' ERROR 23000: Duplicate entry '1-2' for key 'PRIMARY'
UPDATE t3 SET a = 1 ORDER BY a; UPDATE t3 SET a = 1 ORDER BY a;

View file

@ -12,10 +12,7 @@
partition_03ndb : BUG#16385 2006-03-24 mikael Partitions: crash when updating a range partitioned NDB table partition_03ndb : BUG#16385 2006-03-24 mikael Partitions: crash when updating a range partitioned NDB table
ndb_partition_error2 : HF is not sure if the test can work as internded on all the platforms ndb_partition_error2 : HF is not sure if the test can work as internded on all the platforms
ndb_binlog_basic : Bug #32759 2007-11-27 mats ndb_binlog_basic assert failure 'thd->transaction.stmt.modified_non_trans_table'
# the below testcase have been reworked to avoid the bug, test contains comment, keep bug open # the below testcase have been reworked to avoid the bug, test contains comment, keep bug open
#ndb_binlog_ddl_multi : BUG#18976 2006-04-10 kent CRBR: multiple binlog, second binlog may miss schema log events #ndb_binlog_ddl_multi : BUG#18976 2006-04-10 kent CRBR: multiple binlog, second binlog may miss schema log events
#ndb_binlog_discover : bug#21806 2006-08-24 #ndb_binlog_discover : bug#21806 2006-08-24
ndb_backup_print : Bug#32357: ndb_backup_print test fails sometimes in pushbuild
ndb_dd_backuprestore : Bug#32659 ndb_dd_backuprestore.test fails randomly

View file

@ -0,0 +1,293 @@
-- source include/have_multi_ndb.inc
-- source include/not_embedded.inc
--disable_warnings
connection server1;
DROP TABLE IF EXISTS t1,t2;
connection server2;
DROP TABLE IF EXISTS t1;
connection server1;
--enable_warnings
set @old_auto_increment_offset = @@session.auto_increment_offset;
set @old_auto_increment_increment = @@session.auto_increment_increment;
set @old_ndb_autoincrement_prefetch_sz = @@session.ndb_autoincrement_prefetch_sz;
flush status;
create table t1 (a int not null auto_increment primary key) engine ndb;
# Step 1: Verify simple insert
insert into t1 values (NULL);
select * from t1 order by a;
# Step 2: Verify simple update with higher than highest value causes
# next insert to use updated_value + 1
update t1 set a = 5 where a = 1;
insert into t1 values (NULL);
select * from t1 order by a;
# Step 3: Verify insert that inserts higher than highest value causes
# next insert to use inserted_value + 1
insert into t1 values (7);
insert into t1 values (NULL);
select * from t1 order by a;
# Step 4: Verify that insert into hole, lower than highest value doesn't
# affect next insert
insert into t1 values (2);
insert into t1 values (NULL);
select * from t1 order by a;
# Step 5: Verify that update into hole, lower than highest value doesn't
# affect next insert
update t1 set a = 4 where a = 2;
insert into t1 values (NULL);
select * from t1 order by a;
# Step 6: Verify that delete of highest value doesn't cause the next
# insert to reuse this value
delete from t1 where a = 10;
insert into t1 values (NULL);
select * from t1 order by a;
# Step 7: Verify that REPLACE has the same effect as INSERT
replace t1 values (NULL);
select * from t1 order by a;
replace t1 values (15);
select * from t1 order by a;
replace into t1 values (NULL);
select * from t1 order by a;
# Step 8: Verify that REPLACE has the same effect as UPDATE
replace t1 values (15);
select * from t1 order by a;
# Step 9: Verify that IGNORE doesn't affect auto_increment
insert ignore into t1 values (NULL);
select * from t1 order by a;
insert ignore into t1 values (15), (NULL);
select * from t1 order by a;
# Step 10: Verify that on duplicate key as UPDATE behaves as an
# UPDATE
insert into t1 values (15)
on duplicate key update a = 20;
insert into t1 values (NULL);
select * from t1 order by a;
# Step 11: Verify that on duplicate key as INSERT behaves as INSERT
insert into t1 values (NULL) on duplicate key update a = 30;
select * from t1 order by a;
insert into t1 values (30) on duplicate key update a = 40;
select * from t1 order by a;
#Step 12: Vefify INSERT IGNORE (bug#32055)
insert ignore into t1 values(600),(NULL),(NULL),(610),(NULL);
select * from t1 order by a;
drop table t1;
#Step 13: Verify auto_increment of unique key
create table t1 (a int not null primary key,
b int not null unique auto_increment) engine ndb;
insert into t1 values (1, NULL);
insert into t1 values (3, NULL);
update t1 set b = 3 where a = 3;
insert into t1 values (4, NULL);
select * from t1 order by a;
drop table t1;
#Step 14: Verify that auto_increment_increment and auto_increment_offset
# work as expected
CREATE TABLE t1 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=NDBCLUSTER;
CREATE TABLE t2 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=MYISAM;
SET @@session.auto_increment_increment=10;
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
SELECT * FROM t1 ORDER BY pk;
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
TRUNCATE t1;
TRUNCATE t2;
SET @@session.auto_increment_offset=5;
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t1 (pk,b,c) VALUES (27,4,3),(NULL,5,4),(99,6,5),(NULL,7,6);
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t2 (pk,b,c) VALUES (27,4,3),(NULL,5,4),(99,6,5),(NULL,7,6);
SELECT * FROM t1 ORDER BY pk;
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
TRUNCATE t1;
TRUNCATE t2;
SET @@session.auto_increment_increment=2;
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
SELECT * FROM t1 ORDER BY pk;
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
DROP TABLE t1, t2;
CREATE TABLE t1 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=NDBCLUSTER AUTO_INCREMENT = 7;
CREATE TABLE t2 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=MYISAM AUTO_INCREMENT = 7;
SET @@session.auto_increment_offset=1;
SET @@session.auto_increment_increment=1;
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
SELECT * FROM t1 ORDER BY pk;
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
DROP TABLE t1, t2;
CREATE TABLE t1 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=NDBCLUSTER AUTO_INCREMENT = 3;
CREATE TABLE t2 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=MYISAM AUTO_INCREMENT = 3;
SET @@session.auto_increment_offset=5;
SET @@session.auto_increment_increment=10;
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
SELECT * FROM t1 ORDER BY pk;
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
DROP TABLE t1, t2;
CREATE TABLE t1 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=NDBCLUSTER AUTO_INCREMENT = 7;
CREATE TABLE t2 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=MYISAM AUTO_INCREMENT = 7;
SET @@session.auto_increment_offset=5;
SET @@session.auto_increment_increment=10;
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
SELECT * FROM t1 ORDER BY pk;
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
DROP TABLE t1, t2;
CREATE TABLE t1 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=NDBCLUSTER AUTO_INCREMENT = 5;
CREATE TABLE t2 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=MYISAM AUTO_INCREMENT = 5;
SET @@session.auto_increment_offset=5;
SET @@session.auto_increment_increment=10;
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
SELECT * FROM t1 ORDER BY pk;
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
DROP TABLE t1, t2;
CREATE TABLE t1 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=NDBCLUSTER AUTO_INCREMENT = 100;
CREATE TABLE t2 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=MYISAM AUTO_INCREMENT = 100;
SET @@session.auto_increment_offset=5;
SET @@session.auto_increment_increment=10;
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
SELECT * FROM t1 ORDER BY pk;
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
DROP TABLE t1, t2;
#Step 15: Now verify that behaviour on multiple MySQL Servers behave
# properly. Start by dropping table and recreating it to start
# counters and id caches from zero again.
--disable_warnings
connection server2;
SET @@session.auto_increment_offset=1;
SET @@session.auto_increment_increment=1;
set ndb_autoincrement_prefetch_sz = 32;
drop table if exists t1;
connection server1;
SET @@session.auto_increment_offset=1;
SET @@session.auto_increment_increment=1;
set ndb_autoincrement_prefetch_sz = 32;
--enable_warnings
create table t1 (a int not null auto_increment primary key) engine ndb;
# Basic test, ensure that the second server gets a new range.
#Generate record with key = 1
insert into t1 values (NULL);
connection server2;
#Generate record with key = 33
insert into t1 values (NULL);
connection server1;
select * from t1 order by a;
#This insert should not affect the range of the second server
insert into t1 values (20);
connection server2;
insert into t1 values (NULL);
select * from t1 order by a;
connection server1;
#This insert should remove cached values but also skip values already
#taken by server2, given that there is no method of communicating with
#the other server it should also cause a conflict
connection server1;
insert into t1 values (35);
insert into t1 values (NULL);
connection server2;
--error ER_DUP_ENTRY
insert into t1 values (NULL);
select * from t1 order by a;
insert into t1 values (100);
insert into t1 values (NULL);
connection server1;
insert into t1 values (NULL);
select * from t1 order by a;
set auto_increment_offset = @old_auto_increment_offset;
set auto_increment_increment = @old_auto_increment_increment;
set ndb_autoincrement_prefetch_sz = @old_ndb_autoincrement_prefetch_sz;
drop table t1;

View file

@ -800,9 +800,27 @@ update ignore t1,t2 set a = 1, c = 1 where a = 3 and c = 3;
select * from t1 order by a; select * from t1 order by a;
drop table t1,t2; drop table t1,t2;
# End of 5.0 tests #
--echo End of 5.0 tests # Bug#31635
#
create table t1 (a varchar(100) primary key, b varchar(100)) engine = NDB;
insert into t1 values
('a', 'a'),('b','b'),('c', 'c'),('aa', 'aa'),('bb', 'bb'),('cc', 'cc');
replace into t1 values ('a', '-a');
replace into t1 values ('b', '-b');
replace into t1 values ('c', '-c');
replace into t1 values ('aa', '-aa');
replace into t1 values ('bb', '-bb');
replace into t1 values ('cc', '-cc');
replace into t1 values ('aaa', '-aaa');
replace into t1 values ('bbb', '-bbb');
replace into t1 values ('ccc', '-ccc');
select * from t1 order by 1,2;
drop table t1;
--echo End of 5.0 tests
# #
# Bug #18483 Cannot create table with FK constraint # Bug #18483 Cannot create table with FK constraint

View file

@ -497,3 +497,23 @@ select count(*) from t1;
drop table t1; drop table t1;
# End of 4.1 tests # End of 4.1 tests
# bug # 30674 :
# NOT NULL Blobs should default to zero-length. Not NULL TEXT
# should default to zero-chars
create table t1(
a int,
blob_nn blob not null,
text_nn text not null,
blob_nl blob,
text_nl text,
primary key(a)
) engine=ndb;
insert into t1(a) values (1);
insert into t1(a, text_nl) values (2, 'MySQL Cluster NDB');
select a, length(blob_nn), length(text_nn), blob_nl, text_nl from t1 order by a;
drop table t1;

View file

@ -638,142 +638,4 @@ create table t1(a int primary key, b int, unique key(b)) engine=ndb;
insert ignore into t1 values (1,0), (2,0), (2,null), (3,null); insert ignore into t1 values (1,0), (2,0), (2,null), (3,null);
select * from t1 order by a; select * from t1 order by a;
drop table t1; drop table t1;
# Bug#26342 auto_increment_increment AND auto_increment_offset REALLY REALLY anger NDB cluster
CREATE TABLE t1 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=NDBCLUSTER;
CREATE TABLE t2 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=MYISAM;
SET @@session.auto_increment_increment=10;
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
SELECT * FROM t1 ORDER BY pk;
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
TRUNCATE t1;
TRUNCATE t2;
SET @@session.auto_increment_offset=5;
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t1 (pk,b,c) VALUES (27,4,3),(NULL,5,4),(99,6,5),(NULL,7,6);
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t2 (pk,b,c) VALUES (27,4,3),(NULL,5,4),(99,6,5),(NULL,7,6);
SELECT * FROM t1 ORDER BY pk;
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
TRUNCATE t1;
TRUNCATE t2;
SET @@session.auto_increment_increment=2;
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
SELECT * FROM t1 ORDER BY pk;
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
DROP TABLE t1, t2;
CREATE TABLE t1 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=NDBCLUSTER AUTO_INCREMENT = 7;
CREATE TABLE t2 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=MYISAM AUTO_INCREMENT = 7;
SET @@session.auto_increment_offset=1;
SET @@session.auto_increment_increment=1;
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
SELECT * FROM t1 ORDER BY pk;
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
DROP TABLE t1, t2;
CREATE TABLE t1 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=NDBCLUSTER AUTO_INCREMENT = 3;
CREATE TABLE t2 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=MYISAM AUTO_INCREMENT = 3;
SET @@session.auto_increment_offset=5;
SET @@session.auto_increment_increment=10;
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
SELECT * FROM t1 ORDER BY pk;
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
DROP TABLE t1, t2;
CREATE TABLE t1 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=NDBCLUSTER AUTO_INCREMENT = 7;
CREATE TABLE t2 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=MYISAM AUTO_INCREMENT = 7;
SET @@session.auto_increment_offset=5;
SET @@session.auto_increment_increment=10;
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
SELECT * FROM t1 ORDER BY pk;
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
DROP TABLE t1, t2;
CREATE TABLE t1 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=NDBCLUSTER AUTO_INCREMENT = 5;
CREATE TABLE t2 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=MYISAM AUTO_INCREMENT = 5;
SET @@session.auto_increment_offset=5;
SET @@session.auto_increment_increment=10;
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
SELECT * FROM t1 ORDER BY pk;
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
DROP TABLE t1, t2;
CREATE TABLE t1 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=NDBCLUSTER AUTO_INCREMENT = 100;
CREATE TABLE t2 (
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b INT NOT NULL,
c INT NOT NULL UNIQUE
) ENGINE=MYISAM AUTO_INCREMENT = 100;
SET @@session.auto_increment_offset=5;
SET @@session.auto_increment_increment=10;
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
SELECT * FROM t1 ORDER BY pk;
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
DROP TABLE t1, t2;
# End of 4.1 tests # End of 4.1 tests

View file

@ -4,11 +4,11 @@
--disable_warnings --disable_warnings
connection server2; connection server2;
drop table if exists t1, t2, t3, t4; drop table if exists t1, t2, t3, t4;
flush status;
connection server1; connection server1;
drop table if exists t1, t2, t3, t4; drop table if exists t1, t2, t3, t4;
--enable_warnings
flush status; flush status;
--enable_warnings
# Create test tables on server1 # Create test tables on server1
create table t1 (a int) engine=ndbcluster; create table t1 (a int) engine=ndbcluster;

View file

@ -6,11 +6,12 @@
--disable_warnings --disable_warnings
connection server2; connection server2;
drop table if exists t1, t2, t3, t4; drop table if exists t1, t2, t3, t4;
flush status;
connection server1; connection server1;
drop table if exists t1, t2, t3, t4; drop table if exists t1, t2, t3, t4;
flush status;
--enable_warnings --enable_warnings
flush status;
# Create test tables on server1 # Create test tables on server1
create table t1 (a int) engine=ndbcluster; create table t1 (a int) engine=ndbcluster;

View file

@ -1089,18 +1089,19 @@ c1 hex(c4) c5
3 62316231623162316231623162316231 QA 3 62316231623162316231623162316231 QA
** update from master ** ** update from master **
UPDATE t18 SET c5 = 'TEST' WHERE c6 = 3;
SELECT c1,hex(c4),c5,c6,c7 FROM t18 ORDER BY c1; SELECT c1,hex(c4),c5,c6,c7 FROM t18 ORDER BY c1;
c1 hex(c4) c5 c6 c7 c1 hex(c4) c5 c6 c7
1 62316231623162316231623162316231 Kyle 1 CURRENT_TIMESTAMP 1 62316231623162316231623162316231 Kyle 1 CURRENT_TIMESTAMP
2 62316231623162316231623162316231 JOE 2 CURRENT_TIMESTAMP 2 62316231623162316231623162316231 JOE 2 CURRENT_TIMESTAMP
3 62316231623162316231623162316231 QA 3 CURRENT_TIMESTAMP 3 62316231623162316231623162316231 TEST 3 CURRENT_TIMESTAMP
** Check slave ** ** Check slave **
SELECT c1,hex(c4),c5 FROM t18 ORDER BY c1; SELECT c1,hex(c4),c5 FROM t18 ORDER BY c1;
c1 hex(c4) c5 c1 hex(c4) c5
1 62316231623162316231623162316231 Kyle 1 62316231623162316231623162316231 Kyle
2 62316231623162316231623162316231 JOE 2 62316231623162316231623162316231 JOE
3 62316231623162316231623162316231 QA 3 62316231623162316231623162316231 TEST
DROP TABLE t18; DROP TABLE t18;
@ -2229,18 +2230,19 @@ c1 hex(c4) c5
3 62316231623162316231623162316231 QA 3 62316231623162316231623162316231 QA
** update from master ** ** update from master **
UPDATE t18 SET c5 = 'TEST' WHERE c6 = 3;
SELECT c1,hex(c4),c5,c6,c7 FROM t18 ORDER BY c1; SELECT c1,hex(c4),c5,c6,c7 FROM t18 ORDER BY c1;
c1 hex(c4) c5 c6 c7 c1 hex(c4) c5 c6 c7
1 62316231623162316231623162316231 Kyle 1 CURRENT_TIMESTAMP 1 62316231623162316231623162316231 Kyle 1 CURRENT_TIMESTAMP
2 62316231623162316231623162316231 JOE 2 CURRENT_TIMESTAMP 2 62316231623162316231623162316231 JOE 2 CURRENT_TIMESTAMP
3 62316231623162316231623162316231 QA 3 CURRENT_TIMESTAMP 3 62316231623162316231623162316231 TEST 3 CURRENT_TIMESTAMP
** Check slave ** ** Check slave **
SELECT c1,hex(c4),c5 FROM t18 ORDER BY c1; SELECT c1,hex(c4),c5 FROM t18 ORDER BY c1;
c1 hex(c4) c5 c1 hex(c4) c5
1 62316231623162316231623162316231 Kyle 1 62316231623162316231623162316231 Kyle
2 62316231623162316231623162316231 JOE 2 62316231623162316231623162316231 JOE
3 62316231623162316231623162316231 QA 3 62316231623162316231623162316231 TEST
DROP TABLE t18; DROP TABLE t18;
@ -3369,18 +3371,19 @@ c1 hex(c4) c5
3 62316231623162316231623162316231 QA 3 62316231623162316231623162316231 QA
** update from master ** ** update from master **
UPDATE t18 SET c5 = 'TEST' WHERE c6 = 3;
SELECT c1,hex(c4),c5,c6,c7 FROM t18 ORDER BY c1; SELECT c1,hex(c4),c5,c6,c7 FROM t18 ORDER BY c1;
c1 hex(c4) c5 c6 c7 c1 hex(c4) c5 c6 c7
1 62316231623162316231623162316231 Kyle 1 CURRENT_TIMESTAMP 1 62316231623162316231623162316231 Kyle 1 CURRENT_TIMESTAMP
2 62316231623162316231623162316231 JOE 2 CURRENT_TIMESTAMP 2 62316231623162316231623162316231 JOE 2 CURRENT_TIMESTAMP
3 62316231623162316231623162316231 QA 3 CURRENT_TIMESTAMP 3 62316231623162316231623162316231 TEST 3 CURRENT_TIMESTAMP
** Check slave ** ** Check slave **
SELECT c1,hex(c4),c5 FROM t18 ORDER BY c1; SELECT c1,hex(c4),c5 FROM t18 ORDER BY c1;
c1 hex(c4) c5 c1 hex(c4) c5
1 62316231623162316231623162316231 Kyle 1 62316231623162316231623162316231 Kyle
2 62316231623162316231623162316231 JOE 2 62316231623162316231623162316231 JOE
3 62316231623162316231623162316231 QA 3 62316231623162316231623162316231 TEST
DROP TABLE t18; DROP TABLE t18;

View file

@ -1089,18 +1089,19 @@ c1 hex(c4) c5
3 62316231623162316231623162316231 QA 3 62316231623162316231623162316231 QA
** update from master ** ** update from master **
UPDATE t18 SET c5 = 'TEST' WHERE c6 = 3;
SELECT c1,hex(c4),c5,c6,c7 FROM t18 ORDER BY c1; SELECT c1,hex(c4),c5,c6,c7 FROM t18 ORDER BY c1;
c1 hex(c4) c5 c6 c7 c1 hex(c4) c5 c6 c7
1 62316231623162316231623162316231 Kyle 1 CURRENT_TIMESTAMP 1 62316231623162316231623162316231 Kyle 1 CURRENT_TIMESTAMP
2 62316231623162316231623162316231 JOE 2 CURRENT_TIMESTAMP 2 62316231623162316231623162316231 JOE 2 CURRENT_TIMESTAMP
3 62316231623162316231623162316231 QA 3 CURRENT_TIMESTAMP 3 62316231623162316231623162316231 TEST 3 CURRENT_TIMESTAMP
** Check slave ** ** Check slave **
SELECT c1,hex(c4),c5 FROM t18 ORDER BY c1; SELECT c1,hex(c4),c5 FROM t18 ORDER BY c1;
c1 hex(c4) c5 c1 hex(c4) c5
1 62316231623162316231623162316231 Kyle 1 62316231623162316231623162316231 Kyle
2 62316231623162316231623162316231 JOE 2 62316231623162316231623162316231 JOE
3 62316231623162316231623162316231 QA 3 62316231623162316231623162316231 TEST
DROP TABLE t18; DROP TABLE t18;
@ -2229,18 +2230,19 @@ c1 hex(c4) c5
3 62316231623162316231623162316231 QA 3 62316231623162316231623162316231 QA
** update from master ** ** update from master **
UPDATE t18 SET c5 = 'TEST' WHERE c6 = 3;
SELECT c1,hex(c4),c5,c6,c7 FROM t18 ORDER BY c1; SELECT c1,hex(c4),c5,c6,c7 FROM t18 ORDER BY c1;
c1 hex(c4) c5 c6 c7 c1 hex(c4) c5 c6 c7
1 62316231623162316231623162316231 Kyle 1 CURRENT_TIMESTAMP 1 62316231623162316231623162316231 Kyle 1 CURRENT_TIMESTAMP
2 62316231623162316231623162316231 JOE 2 CURRENT_TIMESTAMP 2 62316231623162316231623162316231 JOE 2 CURRENT_TIMESTAMP
3 62316231623162316231623162316231 QA 3 CURRENT_TIMESTAMP 3 62316231623162316231623162316231 TEST 3 CURRENT_TIMESTAMP
** Check slave ** ** Check slave **
SELECT c1,hex(c4),c5 FROM t18 ORDER BY c1; SELECT c1,hex(c4),c5 FROM t18 ORDER BY c1;
c1 hex(c4) c5 c1 hex(c4) c5
1 62316231623162316231623162316231 Kyle 1 62316231623162316231623162316231 Kyle
2 62316231623162316231623162316231 JOE 2 62316231623162316231623162316231 JOE
3 62316231623162316231623162316231 QA 3 62316231623162316231623162316231 TEST
DROP TABLE t18; DROP TABLE t18;
@ -3369,18 +3371,19 @@ c1 hex(c4) c5
3 62316231623162316231623162316231 QA 3 62316231623162316231623162316231 QA
** update from master ** ** update from master **
UPDATE t18 SET c5 = 'TEST' WHERE c6 = 3;
SELECT c1,hex(c4),c5,c6,c7 FROM t18 ORDER BY c1; SELECT c1,hex(c4),c5,c6,c7 FROM t18 ORDER BY c1;
c1 hex(c4) c5 c6 c7 c1 hex(c4) c5 c6 c7
1 62316231623162316231623162316231 Kyle 1 CURRENT_TIMESTAMP 1 62316231623162316231623162316231 Kyle 1 CURRENT_TIMESTAMP
2 62316231623162316231623162316231 JOE 2 CURRENT_TIMESTAMP 2 62316231623162316231623162316231 JOE 2 CURRENT_TIMESTAMP
3 62316231623162316231623162316231 QA 3 CURRENT_TIMESTAMP 3 62316231623162316231623162316231 TEST 3 CURRENT_TIMESTAMP
** Check slave ** ** Check slave **
SELECT c1,hex(c4),c5 FROM t18 ORDER BY c1; SELECT c1,hex(c4),c5 FROM t18 ORDER BY c1;
c1 hex(c4) c5 c1 hex(c4) c5
1 62316231623162316231623162316231 Kyle 1 62316231623162316231623162316231 Kyle
2 62316231623162316231623162316231 JOE 2 62316231623162316231623162316231 JOE
3 62316231623162316231623162316231 QA 3 62316231623162316231623162316231 TEST
DROP TABLE t18; DROP TABLE t18;

View file

@ -1086,6 +1086,9 @@ Modified #
Created # Created #
Security_type DEFINER Security_type DEFINER
Comment Comment
character_set_client latin1
collation_connection latin1_swedish_ci
Database Collation latin1_swedish_ci
-------- switch to slave -------- -------- switch to slave --------
SHOW PROCEDURE STATUS LIKE 'p1'; SHOW PROCEDURE STATUS LIKE 'p1';
@ -1097,6 +1100,9 @@ Modified #
Created # Created #
Security_type DEFINER Security_type DEFINER
Comment Comment
character_set_client latin1
collation_connection latin1_swedish_ci
Database Collation latin1_swedish_ci
-------- switch to master ------- -------- switch to master -------
@ -1149,6 +1155,9 @@ Modified #
Created # Created #
Security_type DEFINER Security_type DEFINER
Comment I have been altered Comment I have been altered
character_set_client latin1
collation_connection latin1_swedish_ci
Database Collation latin1_swedish_ci
-------- switch to slave -------- -------- switch to slave --------
SHOW PROCEDURE STATUS LIKE 'p1'; SHOW PROCEDURE STATUS LIKE 'p1';
@ -1160,6 +1169,9 @@ Modified #
Created # Created #
Security_type DEFINER Security_type DEFINER
Comment I have been altered Comment I have been altered
character_set_client latin1
collation_connection latin1_swedish_ci
Database Collation latin1_swedish_ci
-------- switch to master ------- -------- switch to master -------
@ -1251,13 +1263,13 @@ TEST-INFO: SLAVE: The INSERT is committed (Succeeded)
-------- switch to master ------- -------- switch to master -------
SHOW CREATE VIEW v1; SHOW CREATE VIEW v1;
View Create View View Create View character_set_client collation_connection
v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`f1` AS `f1` from `t1` v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`f1` AS `f1` from `t1` latin1 latin1_swedish_ci
-------- switch to slave -------- -------- switch to slave --------
SHOW CREATE VIEW v1; SHOW CREATE VIEW v1;
View Create View View Create View character_set_client collation_connection
v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`f1` AS `f1` from `t1` v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`f1` AS `f1` from `t1` latin1 latin1_swedish_ci
-------- switch to master ------- -------- switch to master -------
@ -1302,13 +1314,13 @@ TEST-INFO: SLAVE: The INSERT is committed (Succeeded)
-------- switch to master ------- -------- switch to master -------
SHOW CREATE VIEW v1; SHOW CREATE VIEW v1;
View Create View View Create View character_set_client collation_connection
v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`f1` AS `f1` from `t1` v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`f1` AS `f1` from `t1` latin1 latin1_swedish_ci
-------- switch to slave -------- -------- switch to slave --------
SHOW CREATE VIEW v1; SHOW CREATE VIEW v1;
View Create View View Create View character_set_client collation_connection
v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`f1` AS `f1` from `t1` v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`f1` AS `f1` from `t1` latin1 latin1_swedish_ci
-------- switch to master ------- -------- switch to master -------
@ -1402,13 +1414,13 @@ TEST-INFO: SLAVE: The INSERT is committed (Succeeded)
-------- switch to master ------- -------- switch to master -------
SHOW TRIGGERS; SHOW TRIGGERS;
Trigger Event Table Statement Timing Created sql_mode Definer Trigger Event Table Statement Timing Created sql_mode Definer character_set_client collation_connection Database Collation
trg1 INSERT t1 SET @a:=1 BEFORE NULL root@localhost trg1 INSERT t1 SET @a:=1 BEFORE NULL root@localhost latin1 latin1_swedish_ci latin1_swedish_ci
-------- switch to slave -------- -------- switch to slave --------
SHOW TRIGGERS; SHOW TRIGGERS;
Trigger Event Table Statement Timing Created sql_mode Definer Trigger Event Table Statement Timing Created sql_mode Definer character_set_client collation_connection Database Collation
trg1 INSERT t1 SET @a:=1 BEFORE NULL root@localhost trg1 INSERT t1 SET @a:=1 BEFORE NULL root@localhost latin1 latin1_swedish_ci latin1_swedish_ci
-------- switch to master ------- -------- switch to master -------
@ -1453,11 +1465,11 @@ TEST-INFO: SLAVE: The INSERT is committed (Succeeded)
-------- switch to master ------- -------- switch to master -------
SHOW TRIGGERS; SHOW TRIGGERS;
Trigger Event Table Statement Timing Created sql_mode Definer Trigger Event Table Statement Timing Created sql_mode Definer character_set_client collation_connection Database Collation
-------- switch to slave -------- -------- switch to slave --------
SHOW TRIGGERS; SHOW TRIGGERS;
Trigger Event Table Statement Timing Created sql_mode Definer Trigger Event Table Statement Timing Created sql_mode Definer character_set_client collation_connection Database Collation
-------- switch to master ------- -------- switch to master -------

View file

@ -1089,18 +1089,19 @@ c1 hex(c4) c5
3 62316231623162316231623162316231 QA 3 62316231623162316231623162316231 QA
** update from master ** ** update from master **
UPDATE t18 SET c5 = 'TEST' WHERE c6 = 3;
SELECT c1,hex(c4),c5,c6,c7 FROM t18 ORDER BY c1; SELECT c1,hex(c4),c5,c6,c7 FROM t18 ORDER BY c1;
c1 hex(c4) c5 c6 c7 c1 hex(c4) c5 c6 c7
1 62316231623162316231623162316231 Kyle 1 CURRENT_TIMESTAMP 1 62316231623162316231623162316231 Kyle 1 CURRENT_TIMESTAMP
2 62316231623162316231623162316231 JOE 2 CURRENT_TIMESTAMP 2 62316231623162316231623162316231 JOE 2 CURRENT_TIMESTAMP
3 62316231623162316231623162316231 QA 3 CURRENT_TIMESTAMP 3 62316231623162316231623162316231 TEST 3 CURRENT_TIMESTAMP
** Check slave ** ** Check slave **
SELECT c1,hex(c4),c5 FROM t18 ORDER BY c1; SELECT c1,hex(c4),c5 FROM t18 ORDER BY c1;
c1 hex(c4) c5 c1 hex(c4) c5
1 62316231623162316231623162316231 Kyle 1 62316231623162316231623162316231 Kyle
2 62316231623162316231623162316231 JOE 2 62316231623162316231623162316231 JOE
3 62316231623162316231623162316231 QA 3 62316231623162316231623162316231 TEST
DROP TABLE t18; DROP TABLE t18;
@ -2229,18 +2230,19 @@ c1 hex(c4) c5
3 62316231623162316231623162316231 QA 3 62316231623162316231623162316231 QA
** update from master ** ** update from master **
UPDATE t18 SET c5 = 'TEST' WHERE c6 = 3;
SELECT c1,hex(c4),c5,c6,c7 FROM t18 ORDER BY c1; SELECT c1,hex(c4),c5,c6,c7 FROM t18 ORDER BY c1;
c1 hex(c4) c5 c6 c7 c1 hex(c4) c5 c6 c7
1 62316231623162316231623162316231 Kyle 1 CURRENT_TIMESTAMP 1 62316231623162316231623162316231 Kyle 1 CURRENT_TIMESTAMP
2 62316231623162316231623162316231 JOE 2 CURRENT_TIMESTAMP 2 62316231623162316231623162316231 JOE 2 CURRENT_TIMESTAMP
3 62316231623162316231623162316231 QA 3 CURRENT_TIMESTAMP 3 62316231623162316231623162316231 TEST 3 CURRENT_TIMESTAMP
** Check slave ** ** Check slave **
SELECT c1,hex(c4),c5 FROM t18 ORDER BY c1; SELECT c1,hex(c4),c5 FROM t18 ORDER BY c1;
c1 hex(c4) c5 c1 hex(c4) c5
1 62316231623162316231623162316231 Kyle 1 62316231623162316231623162316231 Kyle
2 62316231623162316231623162316231 JOE 2 62316231623162316231623162316231 JOE
3 62316231623162316231623162316231 QA 3 62316231623162316231623162316231 TEST
DROP TABLE t18; DROP TABLE t18;

View file

@ -389,9 +389,9 @@ INSERT INTO t8 VALUES (99,99,99);
INSERT INTO t8 VALUES (99,22,33); INSERT INTO t8 VALUES (99,22,33);
ERROR 23000: Duplicate entry '99' for key 'PRIMARY' ERROR 23000: Duplicate entry '99' for key 'PRIMARY'
INSERT INTO t8 VALUES (11,99,33); INSERT INTO t8 VALUES (11,99,33);
ERROR 23000: Duplicate entry '' for key '*UNKNOWN*' ERROR 23000: Duplicate entry '99' for key 'b'
INSERT INTO t8 VALUES (11,22,99); INSERT INTO t8 VALUES (11,22,99);
ERROR 23000: Duplicate entry '' for key '*UNKNOWN*' ERROR 23000: Duplicate entry '99' for key 'c'
SELECT * FROM t8 ORDER BY a; SELECT * FROM t8 ORDER BY a;
a b c a b c
99 99 99 99 99 99

View file

@ -14,7 +14,6 @@
rpl_ndb_2innodb : Bug #32648 Test failure between NDB Cluster and other engines rpl_ndb_2innodb : Bug #32648 Test failure between NDB Cluster and other engines
rpl_ndb_2myisam : Bug #32648 Test failure between NDB Cluster and other engines rpl_ndb_2myisam : Bug #32648 Test failure between NDB Cluster and other engines
rpl_ndb_2other : Bug #32648 Test failure between NDB Cluster and other engines rpl_ndb_2other : Bug #32648 Test failure between NDB Cluster and other engines
rpl_ndb_ddl : BUG#28798 2007-05-31 lars Valgrind failure in NDB
rpl_ndb_ctype_ucs2_def : BUG#27404 util thd mysql_parse sig11 when mysqld default multibyte charset rpl_ndb_ctype_ucs2_def : BUG#27404 util thd mysql_parse sig11 when mysqld default multibyte charset
rpl_ndb_extraColMaster : BUG#30854 : Tables name show as binary in slave err msg on vm-win2003-64-b and Solaris rpl_ndb_extraColMaster : BUG#30854 : Tables name show as binary in slave err msg on vm-win2003-64-b and Solaris
rpl_ndb_mix_innodb : Bug #32720 Test rpl_ndb_mix_innodb fails on SPARC and PowerPC rpl_ndb_mix_innodb : Bug #32720 Test rpl_ndb_mix_innodb fails on SPARC and PowerPC

View file

@ -588,6 +588,24 @@ int ha_ndbcluster::ndb_err(NdbTransaction *trans)
err.code, res)); err.code, res));
if (res == HA_ERR_FOUND_DUPP_KEY) if (res == HA_ERR_FOUND_DUPP_KEY)
{ {
char *error_data= err.details;
uint dupkey= MAX_KEY;
for (uint i= 0; i < MAX_KEY; i++)
{
if (m_index[i].type == UNIQUE_INDEX ||
m_index[i].type == UNIQUE_ORDERED_INDEX)
{
const NDBINDEX *unique_index=
(const NDBINDEX *) m_index[i].unique_index;
if (unique_index &&
(char *) unique_index->getObjectId() == error_data)
{
dupkey= i;
break;
}
}
}
if (m_rows_to_insert == 1) if (m_rows_to_insert == 1)
{ {
/* /*
@ -595,7 +613,7 @@ int ha_ndbcluster::ndb_err(NdbTransaction *trans)
violations here, so we need to return MAX_KEY for non-primary violations here, so we need to return MAX_KEY for non-primary
to signal that key is unknown to signal that key is unknown
*/ */
m_dupkey= err.code == 630 ? table_share->primary_key : MAX_KEY; m_dupkey= err.code == 630 ? table_share->primary_key : dupkey;
} }
else else
{ {
@ -618,7 +636,7 @@ bool ha_ndbcluster::get_error_message(int error,
DBUG_ENTER("ha_ndbcluster::get_error_message"); DBUG_ENTER("ha_ndbcluster::get_error_message");
DBUG_PRINT("enter", ("error: %d", error)); DBUG_PRINT("enter", ("error: %d", error));
Ndb *ndb= get_ndb(); Ndb *ndb= check_ndb_in_thd(current_thd);
if (!ndb) if (!ndb)
DBUG_RETURN(FALSE); DBUG_RETURN(FALSE);
@ -2704,6 +2722,29 @@ int ha_ndbcluster::full_table_scan(uchar *buf)
DBUG_RETURN(next_result(buf)); DBUG_RETURN(next_result(buf));
} }
int
ha_ndbcluster::set_auto_inc(Field *field)
{
DBUG_ENTER("ha_ndbcluster::set_auto_inc");
Ndb *ndb= get_ndb();
bool read_bit= bitmap_is_set(table->read_set, field->field_index);
bitmap_set_bit(table->read_set, field->field_index);
Uint64 next_val= (Uint64) field->val_int() + 1;
if (!read_bit)
bitmap_clear_bit(table->read_set, field->field_index);
#ifndef DBUG_OFF
char buff[22];
DBUG_PRINT("info",
("Trying to set next auto increment value to %s",
llstr(next_val, buff)));
#endif
Ndb_tuple_id_range_guard g(m_share);
if (ndb->setAutoIncrementValue(m_table, g.range, next_val, TRUE)
== -1)
ERR_RETURN(ndb->getNdbError());
DBUG_RETURN(0);
}
/* /*
Insert one record into NDB Insert one record into NDB
*/ */
@ -2910,18 +2951,11 @@ int ha_ndbcluster::write_row(uchar *record)
} }
if ((has_auto_increment) && (m_skip_auto_increment)) if ((has_auto_increment) && (m_skip_auto_increment))
{ {
Ndb *ndb= get_ndb(); int ret_val;
Uint64 next_val= (Uint64) table->next_number_field->val_int() + 1; if ((ret_val= set_auto_inc(table->next_number_field)))
#ifndef DBUG_OFF {
char buff[22]; DBUG_RETURN(ret_val);
DBUG_PRINT("info", }
("Trying to set next auto increment value to %s",
llstr(next_val, buff)));
#endif
Ndb_tuple_id_range_guard g(m_share);
if (ndb->setAutoIncrementValue(m_table, g.range, next_val, TRUE)
== -1)
ERR_RETURN(ndb->getNdbError());
} }
m_skip_auto_increment= TRUE; m_skip_auto_increment= TRUE;
@ -3046,6 +3080,17 @@ int ha_ndbcluster::update_row(const uchar *old_data, uchar *new_data)
// Insert new row // Insert new row
DBUG_PRINT("info", ("delete succeded")); DBUG_PRINT("info", ("delete succeded"));
m_primary_key_update= TRUE; m_primary_key_update= TRUE;
/*
If we are updating a primary key with auto_increment
then we need to update the auto_increment counter
*/
if (table->found_next_number_field &&
bitmap_is_set(table->write_set,
table->found_next_number_field->field_index) &&
(error= set_auto_inc(table->found_next_number_field)))
{
DBUG_RETURN(error);
}
insert_res= write_row(new_data); insert_res= write_row(new_data);
m_primary_key_update= FALSE; m_primary_key_update= FALSE;
if (insert_res) if (insert_res)
@ -3068,7 +3113,17 @@ int ha_ndbcluster::update_row(const uchar *old_data, uchar *new_data)
DBUG_PRINT("info", ("delete+insert succeeded")); DBUG_PRINT("info", ("delete+insert succeeded"));
DBUG_RETURN(0); DBUG_RETURN(0);
} }
/*
If we are updating a unique key with auto_increment
then we need to update the auto_increment counter
*/
if (table->found_next_number_field &&
bitmap_is_set(table->write_set,
table->found_next_number_field->field_index) &&
(error= set_auto_inc(table->found_next_number_field)))
{
DBUG_RETURN(error);
}
if (cursor) if (cursor)
{ {
/* /*
@ -4478,9 +4533,11 @@ int ha_ndbcluster::init_handler_for_statement(THD *thd, Thd_ndb *thd_ndb)
// store thread specific data first to set the right context // store thread specific data first to set the right context
m_force_send= thd->variables.ndb_force_send; m_force_send= thd->variables.ndb_force_send;
m_ha_not_exact_count= !thd->variables.ndb_use_exact_count; m_ha_not_exact_count= !thd->variables.ndb_use_exact_count;
m_autoincrement_prefetch= m_autoincrement_prefetch=
(ha_rows) thd->variables.ndb_autoincrement_prefetch_sz; (thd->variables.ndb_autoincrement_prefetch_sz >
NDB_DEFAULT_AUTO_PREFETCH) ?
(ha_rows) thd->variables.ndb_autoincrement_prefetch_sz
: (ha_rows) NDB_DEFAULT_AUTO_PREFETCH;
m_active_trans= thd_ndb->trans; m_active_trans= thd_ndb->trans;
DBUG_ASSERT(m_active_trans); DBUG_ASSERT(m_active_trans);
// Start of transaction // Start of transaction
@ -6163,8 +6220,9 @@ void ha_ndbcluster::get_auto_increment(ulonglong offset, ulonglong increment,
ulonglong *first_value, ulonglong *first_value,
ulonglong *nb_reserved_values) ulonglong *nb_reserved_values)
{ {
int cache_size; uint cache_size;
Uint64 auto_value; Uint64 auto_value;
THD *thd= current_thd;
DBUG_ENTER("get_auto_increment"); DBUG_ENTER("get_auto_increment");
DBUG_PRINT("enter", ("m_tabname: %s", m_tabname)); DBUG_PRINT("enter", ("m_tabname: %s", m_tabname));
Ndb *ndb= get_ndb(); Ndb *ndb= get_ndb();
@ -6174,11 +6232,14 @@ void ha_ndbcluster::get_auto_increment(ulonglong offset, ulonglong increment,
/* We guessed too low */ /* We guessed too low */
m_rows_to_insert+= m_autoincrement_prefetch; m_rows_to_insert+= m_autoincrement_prefetch;
} }
cache_size= uint remaining= m_rows_to_insert - m_rows_inserted;
(int) ((m_rows_to_insert - m_rows_inserted < m_autoincrement_prefetch) ? uint min_prefetch=
m_rows_to_insert - m_rows_inserted : (remaining < thd->variables.ndb_autoincrement_prefetch_sz) ?
((m_rows_to_insert > m_autoincrement_prefetch) ? thd->variables.ndb_autoincrement_prefetch_sz
m_rows_to_insert : m_autoincrement_prefetch)); : remaining;
cache_size= ((remaining < m_autoincrement_prefetch) ?
min_prefetch
: remaining);
uint retries= NDB_AUTO_INCREMENT_RETRIES; uint retries= NDB_AUTO_INCREMENT_RETRIES;
int retry_sleep= 30; /* 30 milliseconds, transaction */ int retry_sleep= 30; /* 30 milliseconds, transaction */
for (;;) for (;;)
@ -6265,7 +6326,7 @@ ha_ndbcluster::ha_ndbcluster(handlerton *hton, TABLE_SHARE *table_arg):
m_dupkey((uint) -1), m_dupkey((uint) -1),
m_ha_not_exact_count(FALSE), m_ha_not_exact_count(FALSE),
m_force_send(TRUE), m_force_send(TRUE),
m_autoincrement_prefetch((ha_rows) 32), m_autoincrement_prefetch((ha_rows) NDB_DEFAULT_AUTO_PREFETCH),
m_transaction_on(TRUE), m_transaction_on(TRUE),
m_cond(NULL), m_cond(NULL),
m_multi_cursor(NULL) m_multi_cursor(NULL)

View file

@ -31,6 +31,8 @@
#include <ndbapi_limits.h> #include <ndbapi_limits.h>
#define NDB_HIDDEN_PRIMARY_KEY_LENGTH 8 #define NDB_HIDDEN_PRIMARY_KEY_LENGTH 8
#define NDB_DEFAULT_AUTO_PREFETCH 32
class Ndb; // Forward declaration class Ndb; // Forward declaration
class NdbOperation; // Forward declaration class NdbOperation; // Forward declaration
@ -446,6 +448,7 @@ private:
uint errcode); uint errcode);
int peek_indexed_rows(const uchar *record, NDB_WRITE_OP write_op); int peek_indexed_rows(const uchar *record, NDB_WRITE_OP write_op);
int fetch_next(NdbScanOperation* op); int fetch_next(NdbScanOperation* op);
int set_auto_inc(Field *field);
int next_result(uchar *buf); int next_result(uchar *buf);
int define_read_attrs(uchar* buf, NdbOperation* op); int define_read_attrs(uchar* buf, NdbOperation* op);
int filtered_scan(const uchar *key, uint key_len, int filtered_scan(const uchar *key, uint key_len,

View file

@ -241,18 +241,22 @@ static void dbug_print_table(const char *info, TABLE *table)
static void run_query(THD *thd, char *buf, char *end, static void run_query(THD *thd, char *buf, char *end,
const int *no_print_error, my_bool disable_binlog) const int *no_print_error, my_bool disable_binlog)
{ {
ulong save_query_length= thd->query_length; ulong save_thd_query_length= thd->query_length;
char *save_query= thd->query; char *save_thd_query= thd->query;
ulong save_thread_id= thd->variables.pseudo_thread_id; ulong save_thread_id= thd->variables.pseudo_thread_id;
struct system_status_var save_thd_status_var= thd->status_var;
THD_TRANS save_thd_transaction_all= thd->transaction.all;
THD_TRANS save_thd_transaction_stmt= thd->transaction.stmt;
ulonglong save_thd_options= thd->options; ulonglong save_thd_options= thd->options;
DBUG_ASSERT(sizeof(save_thd_options) == sizeof(thd->options)); DBUG_ASSERT(sizeof(save_thd_options) == sizeof(thd->options));
NET save_net= thd->net; NET save_thd_net= thd->net;
const char* found_semicolon= NULL; const char* found_semicolon= NULL;
bzero((char*) &thd->net, sizeof(NET)); bzero((char*) &thd->net, sizeof(NET));
thd->query_length= end - buf; thd->query_length= end - buf;
thd->query= buf; thd->query= buf;
thd->variables.pseudo_thread_id= thread_id; thd->variables.pseudo_thread_id= thread_id;
thd->transaction.stmt.modified_non_trans_table= FALSE;
if (disable_binlog) if (disable_binlog)
thd->options&= ~OPTION_BIN_LOG; thd->options&= ~OPTION_BIN_LOG;
@ -275,10 +279,13 @@ static void run_query(THD *thd, char *buf, char *end,
} }
thd->options= save_thd_options; thd->options= save_thd_options;
thd->query_length= save_query_length; thd->query_length= save_thd_query_length;
thd->query= save_query; thd->query= save_thd_query;
thd->variables.pseudo_thread_id= save_thread_id; thd->variables.pseudo_thread_id= save_thread_id;
thd->net= save_net; thd->status_var= save_thd_status_var;
thd->transaction.all= save_thd_transaction_all;
thd->transaction.stmt= save_thd_transaction_stmt;
thd->net= save_thd_net;
if (thd == injector_thd) if (thd == injector_thd)
{ {
@ -777,8 +784,9 @@ static int ndbcluster_create_ndb_apply_status_table(THD *thd)
" end_pos BIGINT UNSIGNED NOT NULL, " " end_pos BIGINT UNSIGNED NOT NULL, "
" PRIMARY KEY USING HASH (server_id) ) ENGINE=NDB"); " PRIMARY KEY USING HASH (server_id) ) ENGINE=NDB");
const int no_print_error[4]= {ER_TABLE_EXISTS_ERROR, const int no_print_error[5]= {ER_TABLE_EXISTS_ERROR,
701, 701,
702,
4009, 4009,
0}; // do not print error 701 etc 0}; // do not print error 701 etc
run_query(thd, buf, end, no_print_error, TRUE); run_query(thd, buf, end, no_print_error, TRUE);
@ -837,8 +845,9 @@ static int ndbcluster_create_schema_table(THD *thd)
" type INT UNSIGNED NOT NULL," " type INT UNSIGNED NOT NULL,"
" PRIMARY KEY USING HASH (db,name) ) ENGINE=NDB"); " PRIMARY KEY USING HASH (db,name) ) ENGINE=NDB");
const int no_print_error[4]= {ER_TABLE_EXISTS_ERROR, const int no_print_error[5]= {ER_TABLE_EXISTS_ERROR,
701, 701,
702,
4009, 4009,
0}; // do not print error 701 etc 0}; // do not print error 701 etc
run_query(thd, buf, end, no_print_error, TRUE); run_query(thd, buf, end, no_print_error, TRUE);
@ -3587,6 +3596,7 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
Thd_ndb *thd_ndb=0; Thd_ndb *thd_ndb=0;
int ndb_update_ndb_binlog_index= 1; int ndb_update_ndb_binlog_index= 1;
injector *inj= injector::instance(); injector *inj= injector::instance();
uint incident_id= 0;
#ifdef RUN_NDB_BINLOG_TIMER #ifdef RUN_NDB_BINLOG_TIMER
Timer main_timer; Timer main_timer;
@ -3693,18 +3703,64 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
pthread_mutex_unlock(&injector_mutex); pthread_mutex_unlock(&injector_mutex);
pthread_cond_signal(&injector_cond); pthread_cond_signal(&injector_cond);
/*
wait for mysql server to start (so that the binlog is started
and thus can receive the first GAP event)
*/
pthread_mutex_lock(&LOCK_server_started);
while (!mysqld_server_started)
{
struct timespec abstime;
set_timespec(abstime, 1);
pthread_cond_timedwait(&COND_server_started, &LOCK_server_started,
&abstime);
if (ndbcluster_terminating)
{
pthread_mutex_unlock(&LOCK_server_started);
pthread_mutex_lock(&LOCK_ndb_util_thread);
goto err;
}
}
pthread_mutex_unlock(&LOCK_server_started);
restart: restart:
/* /*
Main NDB Injector loop Main NDB Injector loop
*/ */
while (ndb_binlog_running)
{ {
/* /*
Always insert a GAP event as we cannot know what has happened in the cluster check if it is the first log, if so we do not insert a GAP event
while not being connected. as there is really no log to have a GAP in
*/ */
LEX_STRING const msg= { C_STRING_WITH_LEN("Cluster connect") }; if (incident_id == 0)
inj->record_incident(thd, INCIDENT_LOST_EVENTS, msg); {
LOG_INFO log_info;
mysql_bin_log.get_current_log(&log_info);
int len= strlen(log_info.log_file_name);
uint no= 0;
if ((sscanf(log_info.log_file_name + len - 6, "%u", &no) == 1) &&
no == 1)
{
/* this is the fist log, so skip GAP event */
break;
}
}
/*
Always insert a GAP event as we cannot know what has happened
in the cluster while not being connected.
*/
LEX_STRING const msg[2]=
{
{ C_STRING_WITH_LEN("mysqld startup") },
{ C_STRING_WITH_LEN("cluster disconnect")}
};
IF_DBUG(int error=)
inj->record_incident(thd, INCIDENT_LOST_EVENTS, msg[incident_id]);
DBUG_ASSERT(!error);
break;
} }
incident_id= 1;
{ {
thd->proc_info= "Waiting for ndbcluster to start"; thd->proc_info= "Waiting for ndbcluster to start";

View file

@ -5534,13 +5534,8 @@ master-ssl",
{"ndb-autoincrement-prefetch-sz", OPT_NDB_AUTOINCREMENT_PREFETCH_SZ, {"ndb-autoincrement-prefetch-sz", OPT_NDB_AUTOINCREMENT_PREFETCH_SZ,
"Specify number of autoincrement values that are prefetched.", "Specify number of autoincrement values that are prefetched.",
(uchar**) &global_system_variables.ndb_autoincrement_prefetch_sz, (uchar**) &global_system_variables.ndb_autoincrement_prefetch_sz,
(uchar**) &global_system_variables.ndb_autoincrement_prefetch_sz, (uchar**) &max_system_variables.ndb_autoincrement_prefetch_sz,
0, GET_ULONG, REQUIRED_ARG, 32, 1, 256, 0, 0, 0}, 0, GET_ULONG, REQUIRED_ARG, 1, 1, 256, 0, 0, 0},
{"ndb-distribution", OPT_NDB_DISTRIBUTION,
"Default distribution for new tables in ndb",
(uchar**) &opt_ndb_distribution,
(uchar**) &opt_ndb_distribution,
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"ndb-force-send", OPT_NDB_FORCE_SEND, {"ndb-force-send", OPT_NDB_FORCE_SEND,
"Force send of buffers to ndb immediately without waiting for " "Force send of buffers to ndb immediately without waiting for "
"other threads.", "other threads.",

View file

@ -40,12 +40,13 @@ class TcKeyRef {
friend bool printTCKEYREF(FILE *, const Uint32 *, Uint32, Uint16); friend bool printTCKEYREF(FILE *, const Uint32 *, Uint32, Uint16);
public: public:
STATIC_CONST( SignalLength = 4 ); STATIC_CONST( SignalLength = 5 );
private: private:
Uint32 connectPtr; Uint32 connectPtr;
Uint32 transId[2]; Uint32 transId[2];
Uint32 errorCode; Uint32 errorCode;
Uint32 errorData;
}; };
#endif #endif

View file

@ -38,12 +38,13 @@ class TcRollbackRep {
friend bool printTCROLBACKREP(FILE *, const Uint32 *, Uint32, Uint16); friend bool printTCROLBACKREP(FILE *, const Uint32 *, Uint32, Uint16);
public: public:
STATIC_CONST( SignalLength = 4 ); STATIC_CONST( SignalLength = 5 );
private: private:
Uint32 connectPtr; Uint32 connectPtr;
Uint32 transId[2]; Uint32 transId[2];
Uint32 returnCode; Uint32 returnCode;
Uint32 errorData;
}; };
#endif #endif

View file

@ -1020,7 +1020,7 @@ public:
* Get the name of the table being indexed * Get the name of the table being indexed
*/ */
const char * getTable() const; const char * getTable() const;
/** /**
* Get the number of columns in the index * Get the number of columns in the index
*/ */

View file

@ -70,7 +70,7 @@ Transporter::Transporter(TransporterRegistry &t_reg,
signalIdUsed = _signalId; signalIdUsed = _signalId;
m_connected = false; m_connected = false;
m_timeOutMillis = 1000; m_timeOutMillis = 30000;
m_connect_address.s_addr= 0; m_connect_address.s_addr= 0;
if(s_port<0) if(s_port<0)
@ -101,7 +101,7 @@ Transporter::connect_server(NDB_SOCKET_TYPE sockfd) {
if(m_connected) if(m_connected)
{ {
DBUG_RETURN(true); // TODO assert(0); DBUG_RETURN(false); // TODO assert(0);
} }
{ {

View file

@ -758,7 +758,8 @@ TransporterRegistry::poll_TCP(Uint32 timeOutMillis)
TCP_Transporter * t = theTCPTransporters[i]; TCP_Transporter * t = theTCPTransporters[i];
// If the transporter is connected // If the transporter is connected
if (t->isConnected()) { NodeId nodeId = t->getRemoteNodeId();
if (is_connected(nodeId) && t->isConnected()) {
const NDB_SOCKET_TYPE socket = t->getSocket(); const NDB_SOCKET_TYPE socket = t->getSocket();
// Find the highest socket value. It will be used by select // Find the highest socket value. It will be used by select

View file

@ -5,7 +5,7 @@ Next DBACC 3002
Next DBTUP 4029 Next DBTUP 4029
Next DBLQH 5047 Next DBLQH 5047
Next DBDICT 6008 Next DBDICT 6008
Next DBDIH 7193 Next DBDIH 7195
Next DBTC 8054 Next DBTC 8054
Next CMVMI 9000 Next CMVMI 9000
Next BACKUP 10038 Next BACKUP 10038
@ -81,6 +81,11 @@ Delay GCP_SAVEREQ by 10 secs
7185: Dont reply to COPY_GCI_REQ where reason == GCP 7185: Dont reply to COPY_GCI_REQ where reason == GCP
7193: Dont send LCP_FRAG_ORD to self, and crash when sending first
LCP_FRAG_ORD(last)
7194: Force removeNodeFromStored to complete in the middle of MASTER_LCPCONF
ERROR CODES FOR TESTING NODE FAILURE, LOCAL CHECKPOINT HANDLING: ERROR CODES FOR TESTING NODE FAILURE, LOCAL CHECKPOINT HANDLING:
----------------------------------------------------------------- -----------------------------------------------------------------

View file

@ -1026,8 +1026,9 @@ Backup::execINCL_NODEREQ(Signal* signal)
break; break;
}//if }//if
}//for }//for
signal->theData[0] = reference(); signal->theData[0] = inclNode;
sendSignal(senderRef, GSN_INCL_NODECONF, signal, 1, JBB); signal->theData[1] = reference();
sendSignal(senderRef, GSN_INCL_NODECONF, signal, 2, JBB);
} }
/***************************************************************************** /*****************************************************************************

View file

@ -421,9 +421,10 @@ void Cmvmi::execCLOSE_COMREQ(Signal* signal)
// Uint32 noOfNodes = closeCom->noOfNodes; // Uint32 noOfNodes = closeCom->noOfNodes;
jamEntry(); jamEntry();
for (unsigned i = 0; i < MAX_NODES; i++){ for (unsigned i = 0; i < MAX_NODES; i++)
if(NodeBitmask::get(closeCom->theNodes, i)){ {
if(NodeBitmask::get(closeCom->theNodes, i))
{
jam(); jam();
//----------------------------------------------------- //-----------------------------------------------------
@ -437,7 +438,9 @@ void Cmvmi::execCLOSE_COMREQ(Signal* signal)
globalTransporterRegistry.do_disconnect(i); globalTransporterRegistry.do_disconnect(i);
} }
} }
if (failNo != 0) {
if (failNo != 0)
{
jam(); jam();
signal->theData[0] = userRef; signal->theData[0] = userRef;
signal->theData[1] = failNo; signal->theData[1] = failNo;
@ -456,13 +459,21 @@ void Cmvmi::execOPEN_COMREQ(Signal* signal)
jamEntry(); jamEntry();
const Uint32 len = signal->getLength(); const Uint32 len = signal->getLength();
if(len == 2){ if(len == 2)
{
#ifdef ERROR_INSERT #ifdef ERROR_INSERT
if (! ((ERROR_INSERTED(9000) || ERROR_INSERTED(9002)) if (! ((ERROR_INSERTED(9000) || ERROR_INSERTED(9002))
&& c_error_9000_nodes_mask.get(tStartingNode))) && c_error_9000_nodes_mask.get(tStartingNode)))
#endif #endif
{ {
if (globalData.theStartLevel != NodeState::SL_STARTED &&
(getNodeInfo(tStartingNode).m_type != NodeInfo::DB &&
getNodeInfo(tStartingNode).m_type != NodeInfo::MGM))
{
jam();
goto done;
}
globalTransporterRegistry.do_connect(tStartingNode); globalTransporterRegistry.do_connect(tStartingNode);
globalTransporterRegistry.setIOState(tStartingNode, HaltIO); globalTransporterRegistry.setIOState(tStartingNode, HaltIO);
@ -475,9 +486,11 @@ void Cmvmi::execOPEN_COMREQ(Signal* signal)
//----------------------------------------------------- //-----------------------------------------------------
} }
} else { } else {
for(unsigned int i = 1; i < MAX_NODES; i++ ) { for(unsigned int i = 1; i < MAX_NODES; i++ )
{
jam(); jam();
if (i != getOwnNodeId() && getNodeInfo(i).m_type == tData2){ if (i != getOwnNodeId() && getNodeInfo(i).m_type == tData2)
{
jam(); jam();
#ifdef ERROR_INSERT #ifdef ERROR_INSERT
@ -496,6 +509,7 @@ void Cmvmi::execOPEN_COMREQ(Signal* signal)
} }
} }
done:
if (userRef != 0) { if (userRef != 0) {
jam(); jam();
signal->theData[0] = tStartingNode; signal->theData[0] = tStartingNode;
@ -536,24 +550,10 @@ void Cmvmi::execDISCONNECT_REP(Signal *signal)
setNodeInfo(hostId).m_connectCount++; setNodeInfo(hostId).m_connectCount++;
const NodeInfo::NodeType type = getNodeInfo(hostId).getType(); const NodeInfo::NodeType type = getNodeInfo(hostId).getType();
ndbrequire(type != NodeInfo::INVALID); ndbrequire(type != NodeInfo::INVALID);
if(type == NodeInfo::DB || globalData.theStartLevel == NodeState::SL_STARTED){
jam();
DisconnectRep * const rep = (DisconnectRep *)&signal->theData[0];
rep->nodeId = hostId;
rep->err = errNo;
sendSignal(QMGR_REF, GSN_DISCONNECT_REP, signal,
DisconnectRep::SignalLength, JBA);
} else if((globalData.theStartLevel == NodeState::SL_CMVMI ||
globalData.theStartLevel == NodeState::SL_STARTING)
&& type == NodeInfo::MGM) {
/**
* Someone disconnected during cmvmi period
*/
jam();
globalTransporterRegistry.do_connect(hostId);
}
sendSignal(QMGR_REF, GSN_DISCONNECT_REP, signal,
DisconnectRep::SignalLength, JBA);
cancelSubscription(hostId); cancelSubscription(hostId);
signal->theData[0] = NDB_LE_Disconnected; signal->theData[0] = NDB_LE_Disconnected;
@ -587,6 +587,8 @@ void Cmvmi::execCONNECT_REP(Signal *signal){
*/ */
if(type == NodeInfo::MGM){ if(type == NodeInfo::MGM){
jam(); jam();
signal->theData[0] = hostId;
sendSignal(QMGR_REF, GSN_CONNECT_REP, signal, 1, JBA);
} else { } else {
/** /**
* Dont allow api nodes to connect * Dont allow api nodes to connect
@ -802,6 +804,8 @@ Cmvmi::execSTART_ORD(Signal* signal) {
} }
} }
} }
EXECUTE_DIRECT(QMGR, GSN_START_ORD, signal, 1);
return ; return ;
} }
@ -829,9 +833,6 @@ Cmvmi::execSTART_ORD(Signal* signal) {
* *
* Do Restart * Do Restart
*/ */
globalScheduler.clear();
globalTimeQueue.clear();
// Disconnect all nodes as part of the system restart. // Disconnect all nodes as part of the system restart.
// We need to ensure that we are starting up // We need to ensure that we are starting up

View file

@ -3825,8 +3825,9 @@ void Dbdict::execINCL_NODEREQ(Signal* signal)
c_nodes.getPtr(nodePtr); c_nodes.getPtr(nodePtr);
ndbrequire(nodePtr.p->nodeState == NodeRecord::NDB_NODE_DEAD); ndbrequire(nodePtr.p->nodeState == NodeRecord::NDB_NODE_DEAD);
nodePtr.p->nodeState = NodeRecord::NDB_NODE_ALIVE; nodePtr.p->nodeState = NodeRecord::NDB_NODE_ALIVE;
signal->theData[0] = reference(); signal->theData[0] = nodePtr.i;
sendSignal(retRef, GSN_INCL_NODECONF, signal, 1, JBB); signal->theData[1] = reference();
sendSignal(retRef, GSN_INCL_NODECONF, signal, 2, JBB);
c_aliveNodes.set(nodePtr.i); c_aliveNodes.set(nodePtr.i);
}//execINCL_NODEREQ() }//execINCL_NODEREQ()

View file

@ -1310,7 +1310,17 @@ private:
LcpStatus lcpStatus; LcpStatus lcpStatus;
Uint32 lcpStatusUpdatedPlace; Uint32 lcpStatusUpdatedPlace;
struct Save {
LcpStatus m_status;
Uint32 m_place;
} m_saveState[10];
void setLcpStatus(LcpStatus status, Uint32 line){ void setLcpStatus(LcpStatus status, Uint32 line){
for (Uint32 i = 9; i > 0; i--)
m_saveState[i] = m_saveState[i-1];
m_saveState[0].m_status = lcpStatus;
m_saveState[0].m_place = lcpStatusUpdatedPlace;
lcpStatus = status; lcpStatus = status;
lcpStatusUpdatedPlace = line; lcpStatusUpdatedPlace = line;
} }

View file

@ -2135,12 +2135,9 @@ void Dbdih::gcpBlockedLab(Signal* signal)
/*---------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------*/
void Dbdih::execINCL_NODECONF(Signal* signal) void Dbdih::execINCL_NODECONF(Signal* signal)
{ {
Uint32 TsendNodeId;
Uint32 TstartNode_or_blockref;
jamEntry(); jamEntry();
TstartNode_or_blockref = signal->theData[0]; Uint32 TstartNode = signal->theData[0];
TsendNodeId = signal->theData[1]; Uint32 TsendNodeId_or_blockref = signal->theData[1];
Uint32 blocklist[6]; Uint32 blocklist[6];
blocklist[0] = clocallqhblockref; blocklist[0] = clocallqhblockref;
@ -2152,9 +2149,21 @@ void Dbdih::execINCL_NODECONF(Signal* signal)
for (Uint32 i = 0; blocklist[i] != 0; i++) for (Uint32 i = 0; blocklist[i] != 0; i++)
{ {
if (TstartNode_or_blockref == blocklist[i]) if (TsendNodeId_or_blockref == blocklist[i])
{ {
jam(); jam();
if (TstartNode != c_nodeStartSlave.nodeId)
{
jam();
warningEvent("Recevied INCL_NODECONF for %u from %s"
" while %u is starting",
TstartNode,
getBlockName(refToBlock(TsendNodeId_or_blockref)),
c_nodeStartSlave.nodeId);
return;
}
if (getNodeStatus(c_nodeStartSlave.nodeId) == NodeRecord::ALIVE && if (getNodeStatus(c_nodeStartSlave.nodeId) == NodeRecord::ALIVE &&
blocklist[i+1] != 0) blocklist[i+1] != 0)
{ {
@ -2182,10 +2191,21 @@ void Dbdih::execINCL_NODECONF(Signal* signal)
} }
} }
} }
if (c_nodeStartMaster.startNode != TstartNode)
{
jam();
warningEvent("Recevied INCL_NODECONF for %u from %u"
" while %u is starting",
TstartNode,
TsendNodeId_or_blockref,
c_nodeStartMaster.startNode);
return;
}
ndbrequire(cmasterdihref = reference()); ndbrequire(cmasterdihref = reference());
receiveLoopMacro(INCL_NODEREQ, TsendNodeId); receiveLoopMacro(INCL_NODEREQ, TsendNodeId_or_blockref);
CRASH_INSERTION(7128); CRASH_INSERTION(7128);
/*-------------------------------------------------------------------------*/ /*-------------------------------------------------------------------------*/
// Now that we have included the starting node in the node lists in the // Now that we have included the starting node in the node lists in the
@ -5181,11 +5201,19 @@ void Dbdih::startRemoveFailedNode(Signal* signal, NodeRecordPtr failedNodePtr)
} }
jam(); jam();
signal->theData[0] = DihContinueB::ZREMOVE_NODE_FROM_TABLE;
signal->theData[1] = failedNodePtr.i; if (!ERROR_INSERTED(7194))
signal->theData[2] = 0; // Tab id {
sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB); signal->theData[0] = DihContinueB::ZREMOVE_NODE_FROM_TABLE;
signal->theData[1] = failedNodePtr.i;
signal->theData[2] = 0; // Tab id
sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
}
else
{
ndbout_c("7194 Not starting ZREMOVE_NODE_FROM_TABLE");
}
setLocalNodefailHandling(signal, failedNodePtr.i, NF_REMOVE_NODE_FROM_TABLE); setLocalNodefailHandling(signal, failedNodePtr.i, NF_REMOVE_NODE_FROM_TABLE);
}//Dbdih::startRemoveFailedNode() }//Dbdih::startRemoveFailedNode()
@ -6114,12 +6142,22 @@ Dbdih::checkEmptyLcpComplete(Signal *signal){
signal->theData[0] = 7012; signal->theData[0] = 7012;
execDUMP_STATE_ORD(signal); execDUMP_STATE_ORD(signal);
if (ERROR_INSERTED(7194))
{
ndbout_c("7194 starting ZREMOVE_NODE_FROM_TABLE");
signal->theData[0] = DihContinueB::ZREMOVE_NODE_FROM_TABLE;
signal->theData[1] = c_lcpMasterTakeOverState.failedNodeId;
signal->theData[2] = 0; // Tab id
sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
}
c_lcpMasterTakeOverState.set(LMTOS_INITIAL, __LINE__); c_lcpMasterTakeOverState.set(LMTOS_INITIAL, __LINE__);
MasterLCPReq * const req = (MasterLCPReq *)&signal->theData[0]; MasterLCPReq * const req = (MasterLCPReq *)&signal->theData[0];
req->masterRef = reference(); req->masterRef = reference();
req->failedNodeId = c_lcpMasterTakeOverState.failedNodeId; req->failedNodeId = c_lcpMasterTakeOverState.failedNodeId;
sendLoopMacro(MASTER_LCPREQ, sendMASTER_LCPREQ); sendLoopMacro(MASTER_LCPREQ, sendMASTER_LCPREQ);
} else { } else {
sendMASTER_LCPCONF(signal); sendMASTER_LCPCONF(signal);
} }
@ -6141,7 +6179,7 @@ void Dbdih::execMASTER_LCPREQ(Signal* signal)
jam(); jam();
ndbout_c("resending GSN_MASTER_LCPREQ"); ndbout_c("resending GSN_MASTER_LCPREQ");
sendSignalWithDelay(reference(), GSN_MASTER_LCPREQ, signal, sendSignalWithDelay(reference(), GSN_MASTER_LCPREQ, signal,
signal->getLength(), 50); 50, signal->getLength());
return; return;
} }
Uint32 failedNodeId = req->failedNodeId; Uint32 failedNodeId = req->failedNodeId;
@ -6432,6 +6470,15 @@ void Dbdih::execMASTER_LCPCONF(Signal* signal)
{ {
const MasterLCPConf * const conf = (MasterLCPConf *)&signal->theData[0]; const MasterLCPConf * const conf = (MasterLCPConf *)&signal->theData[0];
jamEntry(); jamEntry();
if (ERROR_INSERTED(7194))
{
ndbout_c("delaying MASTER_LCPCONF due to error 7194");
sendSignalWithDelay(reference(), GSN_MASTER_LCPCONF, signal,
300, signal->getLength());
return;
}
Uint32 senderNodeId = conf->senderNodeId; Uint32 senderNodeId = conf->senderNodeId;
MasterLCPConf::State lcpState = (MasterLCPConf::State)conf->lcpState; MasterLCPConf::State lcpState = (MasterLCPConf::State)conf->lcpState;
const Uint32 failedNodeId = conf->failedNodeId; const Uint32 failedNodeId = conf->failedNodeId;
@ -6566,7 +6613,6 @@ void Dbdih::MASTER_LCPhandling(Signal* signal, Uint32 failedNodeId)
#endif #endif
c_lcpState.keepGci = SYSFILE->keepGCI; c_lcpState.keepGci = SYSFILE->keepGCI;
c_lcpState.setLcpStatus(LCP_START_LCP_ROUND, __LINE__);
startLcpRoundLoopLab(signal, 0, 0); startLcpRoundLoopLab(signal, 0, 0);
break; break;
} }
@ -10538,6 +10584,8 @@ void Dbdih::sendLastLCP_FRAG_ORD(Signal* signal)
if(ERROR_INSERTED(7075)){ if(ERROR_INSERTED(7075)){
continue; continue;
} }
CRASH_INSERTION(7193);
BlockReference ref = calcLqhBlockRef(nodePtr.i); BlockReference ref = calcLqhBlockRef(nodePtr.i);
sendSignal(ref, GSN_LCP_FRAG_ORD, signal,LcpFragOrd::SignalLength, JBB); sendSignal(ref, GSN_LCP_FRAG_ORD, signal,LcpFragOrd::SignalLength, JBB);
} }
@ -10650,6 +10698,12 @@ void Dbdih::execLCP_FRAG_REP(Signal* signal)
Uint32 started = lcpReport->maxGciStarted; Uint32 started = lcpReport->maxGciStarted;
Uint32 completed = lcpReport->maxGciCompleted; Uint32 completed = lcpReport->maxGciCompleted;
if (started > c_lcpState.lcpStopGcp)
{
jam();
c_lcpState.lcpStopGcp = started;
}
if(tableDone){ if(tableDone){
jam(); jam();
@ -10765,6 +10819,13 @@ Dbdih::checkLcpAllTablesDoneInLqh(){
CRASH_INSERTION2(7017, !isMaster()); CRASH_INSERTION2(7017, !isMaster());
c_lcpState.setLcpStatus(LCP_TAB_COMPLETED, __LINE__); c_lcpState.setLcpStatus(LCP_TAB_COMPLETED, __LINE__);
if (ERROR_INSERTED(7194))
{
ndbout_c("CLEARING 7194");
CLEAR_ERROR_INSERT_VALUE;
}
return true; return true;
} }
@ -10954,6 +11015,11 @@ Dbdih::sendLCP_FRAG_ORD(Signal* signal,
BlockReference ref = calcLqhBlockRef(replicaPtr.p->procNode); BlockReference ref = calcLqhBlockRef(replicaPtr.p->procNode);
if (ERROR_INSERTED(7193) && replicaPtr.p->procNode == getOwnNodeId())
{
return;
}
LcpFragOrd * const lcpFragOrd = (LcpFragOrd *)&signal->theData[0]; LcpFragOrd * const lcpFragOrd = (LcpFragOrd *)&signal->theData[0];
lcpFragOrd->tableId = info.tableId; lcpFragOrd->tableId = info.tableId;
lcpFragOrd->fragmentId = info.fragId; lcpFragOrd->fragmentId = info.fragId;
@ -11178,7 +11244,12 @@ void Dbdih::allNodesLcpCompletedLab(Signal* signal)
signal->theData[0] = NDB_LE_LocalCheckpointCompleted; //Event type signal->theData[0] = NDB_LE_LocalCheckpointCompleted; //Event type
signal->theData[1] = SYSFILE->latestLCP_ID; signal->theData[1] = SYSFILE->latestLCP_ID;
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB); sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
c_lcpState.lcpStopGcp = c_newest_restorable_gci;
if (c_newest_restorable_gci > c_lcpState.lcpStopGcp)
{
jam();
c_lcpState.lcpStopGcp = c_newest_restorable_gci;
}
/** /**
* Start checking for next LCP * Start checking for next LCP
@ -12048,13 +12119,12 @@ void Dbdih::findMinGci(ReplicaRecordPtr fmgReplicaPtr,
lcpNo = fmgReplicaPtr.p->nextLcp; lcpNo = fmgReplicaPtr.p->nextLcp;
do { do {
ndbrequire(lcpNo < MAX_LCP_STORED); ndbrequire(lcpNo < MAX_LCP_STORED);
if (fmgReplicaPtr.p->lcpStatus[lcpNo] == ZVALID && if (fmgReplicaPtr.p->lcpStatus[lcpNo] == ZVALID)
fmgReplicaPtr.p->maxGciStarted[lcpNo] < c_newest_restorable_gci)
{ {
jam(); jam();
keepGci = fmgReplicaPtr.p->maxGciCompleted[lcpNo]; keepGci = fmgReplicaPtr.p->maxGciCompleted[lcpNo];
oldestRestorableGci = fmgReplicaPtr.p->maxGciStarted[lcpNo]; oldestRestorableGci = fmgReplicaPtr.p->maxGciStarted[lcpNo];
ndbrequire(((int)oldestRestorableGci) >= 0); ndbassert(fmgReplicaPtr.p->maxGciStarted[lcpNo] <c_newest_restorable_gci);
return; return;
} else { } else {
jam(); jam();
@ -12956,6 +13026,7 @@ void Dbdih::newCrashedReplica(Uint32 nodeId, ReplicaRecordPtr ncrReplicaPtr)
void Dbdih::nodeResetStart() void Dbdih::nodeResetStart()
{ {
jam(); jam();
c_nodeStartSlave.nodeId = 0;
c_nodeStartMaster.startNode = RNIL; c_nodeStartMaster.startNode = RNIL;
c_nodeStartMaster.failNr = cfailurenr; c_nodeStartMaster.failNr = cfailurenr;
c_nodeStartMaster.activeState = false; c_nodeStartMaster.activeState = false;
@ -14500,6 +14571,14 @@ Dbdih::execDUMP_STATE_ORD(Signal* signal)
("immediateLcpStart = %d masterLcpNodeId = %d", ("immediateLcpStart = %d masterLcpNodeId = %d",
c_lcpState.immediateLcpStart, c_lcpState.immediateLcpStart,
refToNode(c_lcpState.m_masterLcpDihRef)); refToNode(c_lcpState.m_masterLcpDihRef));
for (Uint32 i = 0; i<10; i++)
{
infoEvent("%u : status: %u place: %u", i,
c_lcpState.m_saveState[i].m_status,
c_lcpState.m_saveState[i].m_place);
}
infoEvent("-- Node %d LCP STATE --", getOwnNodeId()); infoEvent("-- Node %d LCP STATE --", getOwnNodeId());
} }

View file

@ -585,7 +585,6 @@ public:
enum ExecSrStatus { enum ExecSrStatus {
IDLE = 0, IDLE = 0,
ACTIVE_REMOVE_AFTER = 1,
ACTIVE = 2 ACTIVE = 2
}; };
/** /**
@ -869,11 +868,6 @@ public:
* heard of. * heard of.
*/ */
Uint8 fragDistributionKey; Uint8 fragDistributionKey;
/**
* The identity of the next local checkpoint this fragment
* should perform.
*/
Uint8 nextLcp;
/** /**
* How many local checkpoints does the fragment contain * How many local checkpoints does the fragment contain
*/ */
@ -2097,10 +2091,6 @@ private:
void execEXEC_SRCONF(Signal* signal); void execEXEC_SRCONF(Signal* signal);
void execREAD_PSEUDO_REQ(Signal* signal); void execREAD_PSEUDO_REQ(Signal* signal);
void build_acc(Signal*, Uint32 fragPtrI);
void execBUILDINDXREF(Signal*signal);
void execBUILDINDXCONF(Signal*signal);
void execDUMP_STATE_ORD(Signal* signal); void execDUMP_STATE_ORD(Signal* signal);
void execACC_ABORTCONF(Signal* signal); void execACC_ABORTCONF(Signal* signal);
void execNODE_FAILREP(Signal* signal); void execNODE_FAILREP(Signal* signal);
@ -2780,7 +2770,13 @@ private:
/*THIS VARIABLE KEEPS TRACK OF HOW MANY FRAGMENTS THAT PARTICIPATE IN */ /*THIS VARIABLE KEEPS TRACK OF HOW MANY FRAGMENTS THAT PARTICIPATE IN */
/*EXECUTING THE LOG. IF ZERO WE DON'T NEED TO EXECUTE THE LOG AT ALL. */ /*EXECUTING THE LOG. IF ZERO WE DON'T NEED TO EXECUTE THE LOG AT ALL. */
/* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */
UintR cnoFragmentsExecSr; Uint32 cnoFragmentsExecSr;
/**
* This is no of sent GSN_EXEC_FRAGREQ during this log phase
*/
Uint32 cnoOutstandingExecFragReq;
/* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */
/*THIS VARIABLE KEEPS TRACK OF WHICH OF THE FIRST TWO RESTART PHASES THAT */ /*THIS VARIABLE KEEPS TRACK OF WHICH OF THE FIRST TWO RESTART PHASES THAT */
/*HAVE COMPLETED. */ /*HAVE COMPLETED. */
@ -2801,7 +2797,6 @@ private:
DLFifoList<Fragrecord> c_lcp_waiting_fragments; // StartFragReq'ed DLFifoList<Fragrecord> c_lcp_waiting_fragments; // StartFragReq'ed
DLFifoList<Fragrecord> c_lcp_restoring_fragments; // Restoring as we speek DLFifoList<Fragrecord> c_lcp_restoring_fragments; // Restoring as we speek
DLFifoList<Fragrecord> c_lcp_complete_fragments; // Restored DLFifoList<Fragrecord> c_lcp_complete_fragments; // Restored
DLFifoList<Fragrecord> c_redo_complete_fragments; // Redo'ed
/* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */
/*USED DURING SYSTEM RESTART, INDICATES THE OLDEST GCI THAT CAN BE RESTARTED */ /*USED DURING SYSTEM RESTART, INDICATES THE OLDEST GCI THAT CAN BE RESTARTED */

View file

@ -168,7 +168,6 @@ Dblqh::Dblqh(Block_context& ctx):
c_lcp_waiting_fragments(c_fragment_pool), c_lcp_waiting_fragments(c_fragment_pool),
c_lcp_restoring_fragments(c_fragment_pool), c_lcp_restoring_fragments(c_fragment_pool),
c_lcp_complete_fragments(c_fragment_pool), c_lcp_complete_fragments(c_fragment_pool),
c_redo_complete_fragments(c_fragment_pool),
m_commitAckMarkerHash(m_commitAckMarkerPool), m_commitAckMarkerHash(m_commitAckMarkerPool),
c_scanTakeOverHash(c_scanRecordPool) c_scanTakeOverHash(c_scanRecordPool)
{ {
@ -295,9 +294,6 @@ Dblqh::Dblqh(Block_context& ctx):
addRecSignal(GSN_READ_PSEUDO_REQ, &Dblqh::execREAD_PSEUDO_REQ); addRecSignal(GSN_READ_PSEUDO_REQ, &Dblqh::execREAD_PSEUDO_REQ);
addRecSignal(GSN_BUILDINDXREF, &Dblqh::execBUILDINDXREF);
addRecSignal(GSN_BUILDINDXCONF, &Dblqh::execBUILDINDXCONF);
addRecSignal(GSN_DEFINE_BACKUP_REF, &Dblqh::execDEFINE_BACKUP_REF); addRecSignal(GSN_DEFINE_BACKUP_REF, &Dblqh::execDEFINE_BACKUP_REF);
addRecSignal(GSN_DEFINE_BACKUP_CONF, &Dblqh::execDEFINE_BACKUP_CONF); addRecSignal(GSN_DEFINE_BACKUP_CONF, &Dblqh::execDEFINE_BACKUP_CONF);

View file

@ -356,7 +356,6 @@ void Dblqh::execCONTINUEB(Signal* signal)
break; break;
case ZSR_PHASE3_START: case ZSR_PHASE3_START:
jam(); jam();
signal->theData[0] = data0;
srPhase3Start(signal); srPhase3Start(signal);
return; return;
break; break;
@ -428,25 +427,25 @@ void Dblqh::execCONTINUEB(Signal* signal)
if (fragptr.i != RNIL) if (fragptr.i != RNIL)
{ {
jam(); jam();
c_redo_complete_fragments.getPtr(fragptr); c_lcp_complete_fragments.getPtr(fragptr);
signal->theData[0] = fragptr.p->tabRef; signal->theData[0] = fragptr.p->tabRef;
signal->theData[1] = fragptr.p->fragId; signal->theData[1] = fragptr.p->fragId;
sendSignal(DBACC_REF, GSN_EXPANDCHECK2, signal, 2, JBB); sendSignal(DBACC_REF, GSN_EXPANDCHECK2, signal, 2, JBB);
Ptr<Fragrecord> save = fragptr; Ptr<Fragrecord> save = fragptr;
c_redo_complete_fragments.next(fragptr); c_lcp_complete_fragments.next(fragptr);
signal->theData[0] = ZENABLE_EXPAND_CHECK; signal->theData[0] = ZENABLE_EXPAND_CHECK;
signal->theData[1] = fragptr.i; signal->theData[1] = fragptr.i;
sendSignal(DBLQH_REF, GSN_CONTINUEB, signal, 2, JBB); sendSignal(DBLQH_REF, GSN_CONTINUEB, signal, 2, JBB);
c_redo_complete_fragments.remove(save); c_lcp_complete_fragments.remove(save);
return; return;
} }
else else
{ {
jam(); jam();
cstartRecReq = 2; cstartRecReq = 2;
ndbrequire(c_redo_complete_fragments.isEmpty()); ndbrequire(c_lcp_complete_fragments.isEmpty());
StartRecConf * conf = (StartRecConf*)signal->getDataPtrSend(); StartRecConf * conf = (StartRecConf*)signal->getDataPtrSend();
conf->startingNodeId = getOwnNodeId(); conf->startingNodeId = getOwnNodeId();
sendSignal(cmasterDihBlockref, GSN_START_RECCONF, signal, sendSignal(cmasterDihBlockref, GSN_START_RECCONF, signal,
@ -495,8 +494,9 @@ void Dblqh::execINCL_NODEREQ(Signal* signal)
cnodeStatus[i] = ZNODE_UP; cnodeStatus[i] = ZNODE_UP;
}//if }//if
}//for }//for
signal->theData[0] = cownref; signal->theData[0] = nodeId;
sendSignal(retRef, GSN_INCL_NODECONF, signal, 1, JBB); signal->theData[1] = cownref;
sendSignal(retRef, GSN_INCL_NODECONF, signal, 2, JBB);
return; return;
}//Dblqh::execINCL_NODEREQ() }//Dblqh::execINCL_NODEREQ()
@ -1121,7 +1121,6 @@ void Dblqh::execLQHFRAGREQ(Signal* signal)
Uint32 minRowsHigh = req->minRowsHigh; Uint32 minRowsHigh = req->minRowsHigh;
Uint32 tschemaVersion = req->schemaVersion; Uint32 tschemaVersion = req->schemaVersion;
Uint32 ttupKeyLength = req->keyLength; Uint32 ttupKeyLength = req->keyLength;
Uint32 nextLcp = req->nextLCP;
Uint32 noOfKeyAttr = req->noOfKeyAttr; Uint32 noOfKeyAttr = req->noOfKeyAttr;
Uint32 noOfCharsets = req->noOfCharsets; Uint32 noOfCharsets = req->noOfCharsets;
Uint32 checksumIndicator = req->checksumIndicator; Uint32 checksumIndicator = req->checksumIndicator;
@ -1214,7 +1213,6 @@ void Dblqh::execLQHFRAGREQ(Signal* signal)
fragptr.p->lcpFlag = Fragrecord::LCP_STATE_FALSE; fragptr.p->lcpFlag = Fragrecord::LCP_STATE_FALSE;
}//if }//if
fragptr.p->nextLcp = nextLcp;
//---------------------------------------------- //----------------------------------------------
// For node restarts it is not necessarily zero // For node restarts it is not necessarily zero
//---------------------------------------------- //----------------------------------------------
@ -8939,6 +8937,9 @@ void Dblqh::storedProcConfScanLab(Signal* signal)
case Fragrecord::REMOVING: case Fragrecord::REMOVING:
jam(); jam();
default: default:
jamLine(fragptr.p->fragStatus);
ndbout_c("fragptr.p->fragStatus: %u",
fragptr.p->fragStatus);
ndbrequire(false); ndbrequire(false);
break; break;
}//switch }//switch
@ -14141,15 +14142,12 @@ void Dblqh::execSTART_FRAGREQ(Signal* signal)
if (lcpNo == (MAX_LCP_STORED - 1)) { if (lcpNo == (MAX_LCP_STORED - 1)) {
jam(); jam();
fragptr.p->lcpId[lcpNo] = lcpId; fragptr.p->lcpId[lcpNo] = lcpId;
fragptr.p->nextLcp = 0;
} else if (lcpNo < (MAX_LCP_STORED - 1)) { } else if (lcpNo < (MAX_LCP_STORED - 1)) {
jam(); jam();
fragptr.p->lcpId[lcpNo] = lcpId; fragptr.p->lcpId[lcpNo] = lcpId;
fragptr.p->nextLcp = lcpNo + 1;
} else { } else {
ndbrequire(lcpNo == ZNIL); ndbrequire(lcpNo == ZNIL);
jam(); jam();
fragptr.p->nextLcp = 0;
}//if }//if
fragptr.p->srNoLognodes = noOfLogNodes; fragptr.p->srNoLognodes = noOfLogNodes;
fragptr.p->logFlag = Fragrecord::STATE_FALSE; fragptr.p->logFlag = Fragrecord::STATE_FALSE;
@ -14181,19 +14179,9 @@ void Dblqh::execSTART_FRAGREQ(Signal* signal)
*/ */
c_lcp_complete_fragments.add(fragptr); c_lcp_complete_fragments.add(fragptr);
if(lcpNo == ZNIL) signal->theData[0] = tabptr.i;
{ signal->theData[1] = fragId;
signal->theData[0] = tabptr.i; sendSignal(DBACC_REF, GSN_EXPANDCHECK2, signal, 2, JBB);
signal->theData[1] = fragId;
sendSignal(DBACC_REF, GSN_EXPANDCHECK2, signal, 2, JBB);
}
if (getNodeState().getNodeRestartInProgress())
{
jam();
fragptr.p->fragStatus = Fragrecord::ACTIVE_CREATION;
}
c_tup->disk_restart_lcp_id(tabptr.i, fragId, RNIL); c_tup->disk_restart_lcp_id(tabptr.i, fragId, RNIL);
jamEntry(); jamEntry();
return; return;
@ -14395,65 +14383,9 @@ void Dblqh::execSTART_RECCONF(Signal* signal)
return; return;
} }
c_lcp_complete_fragments.first(fragptr);
build_acc(signal, fragptr.i);
return;
}//Dblqh::execSTART_RECCONF()
void
Dblqh::build_acc(Signal* signal, Uint32 fragPtrI)
{
fragptr.i = fragPtrI;
while(fragptr.i != RNIL)
{
c_lcp_complete_fragments.getPtr(fragptr);
tabptr.i = fragptr.p->tabRef;
ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
if(true || fragptr.i != tabptr.p->fragrec[0])
{
// Only need to send 1 build per table, TUP will rebuild all
fragptr.i = fragptr.p->nextList;
continue;
}
BuildIndxReq* const req = (BuildIndxReq*)signal->getDataPtrSend();
req->setUserRef(reference());
req->setConnectionPtr(fragptr.i);
req->setRequestType(BuildIndxReq::RT_SYSTEMRESTART);
req->setBuildId(0); // not used
req->setBuildKey(0); // not used
req->setIndexType(RNIL);
req->setIndexId(RNIL);
req->setTableId(tabptr.i);
req->setParallelism(0);
sendSignal(DBTUP_REF, GSN_BUILDINDXREQ, signal,
BuildIndxReq::SignalLength, JBB);
return;
}
startExecSr(signal); startExecSr(signal);
} }
void
Dblqh::execBUILDINDXREF(Signal* signal)
{
ndbrequire(false);
}
void
Dblqh::execBUILDINDXCONF(Signal* signal)
{
BuildIndxConf* conf = (BuildIndxConf*)signal->getDataPtrSend();
Uint32 fragPtrI = conf->getConnectionPtr();
fragptr.i = fragPtrI;
c_fragment_pool.getPtr(fragptr);
infoEvent("LQH: primary key index %u rebuild done", fragptr.p->tabRef);
build_acc(signal, fragptr.p->nextList);
}
/* ***************> */ /* ***************> */
/* START_RECREF > */ /* START_RECREF > */
/* ***************> */ /* ***************> */
@ -14472,9 +14404,9 @@ void Dblqh::execSTART_EXEC_SR(Signal* signal)
fragptr.i = signal->theData[0]; fragptr.i = signal->theData[0];
Uint32 next = RNIL; Uint32 next = RNIL;
if (fragptr.i == RNIL) { if (fragptr.i == RNIL)
{
jam(); jam();
ndbrequire(cnoOfNodes < MAX_NDB_NODES);
/* ---------------------------------------------------------------------- /* ----------------------------------------------------------------------
* NO MORE FRAGMENTS TO START EXECUTING THE LOG ON. * NO MORE FRAGMENTS TO START EXECUTING THE LOG ON.
* SEND EXEC_SRREQ TO ALL LQH TO INDICATE THAT THIS NODE WILL * SEND EXEC_SRREQ TO ALL LQH TO INDICATE THAT THIS NODE WILL
@ -14490,10 +14422,15 @@ void Dblqh::execSTART_EXEC_SR(Signal* signal)
} else { } else {
jam(); jam();
c_lcp_complete_fragments.getPtr(fragptr); c_lcp_complete_fragments.getPtr(fragptr);
if (fragptr.p->srNoLognodes > csrPhasesCompleted) { next = fragptr.p->nextList;
if (fragptr.p->srNoLognodes > csrPhasesCompleted)
{
jam(); jam();
cnoOutstandingExecFragReq++;
Uint32 index = csrPhasesCompleted; Uint32 index = csrPhasesCompleted;
arrGuard(index, 4); arrGuard(index, MAX_LOG_EXEC);
BlockReference ref = calcLqhBlockRef(fragptr.p->srLqhLognode[index]); BlockReference ref = calcLqhBlockRef(fragptr.p->srLqhLognode[index]);
fragptr.p->srStatus = Fragrecord::SS_STARTED; fragptr.p->srStatus = Fragrecord::SS_STARTED;
@ -14512,34 +14449,7 @@ void Dblqh::execSTART_EXEC_SR(Signal* signal)
sendSignal(ref, GSN_EXEC_FRAGREQ, signal, sendSignal(ref, GSN_EXEC_FRAGREQ, signal,
ExecFragReq::SignalLength, JBB); ExecFragReq::SignalLength, JBB);
next = fragptr.p->nextList; }
} else {
jam();
/* --------------------------------------------------------------------
* THIS FRAGMENT IS NOW FINISHED WITH THE SYSTEM RESTART. IT DOES
* NOT NEED TO PARTICIPATE IN ANY MORE PHASES. REMOVE IT FROM THE
* LIST OF COMPLETED FRAGMENTS TO EXECUTE THE LOG ON.
* ALSO SEND START_FRAGCONF TO DIH AND SET THE STATE TO ACTIVE ON THE
* FRAGMENT.
* ------------------------------------------------------------------- */
next = fragptr.p->nextList;
c_lcp_complete_fragments.remove(fragptr);
c_redo_complete_fragments.add(fragptr);
if (!getNodeState().getNodeRestartInProgress())
{
fragptr.p->logFlag = Fragrecord::STATE_TRUE;
fragptr.p->fragStatus = Fragrecord::FSACTIVE;
}
else
{
fragptr.p->fragStatus = Fragrecord::ACTIVE_CREATION;
}
signal->theData[0] = fragptr.p->srUserptr;
signal->theData[1] = cownNodeid;
sendSignal(fragptr.p->srBlockref, GSN_START_FRAGCONF, signal, 2, JBB);
} //if
signal->theData[0] = next; signal->theData[0] = next;
sendSignal(cownref, GSN_START_EXEC_SR, signal, 1, JBB); sendSignal(cownref, GSN_START_EXEC_SR, signal, 1, JBB);
}//if }//if
@ -14560,24 +14470,8 @@ void Dblqh::execEXEC_FRAGREQ(Signal* signal)
tabptr.i = execFragReq->tableId; tabptr.i = execFragReq->tableId;
Uint32 fragId = execFragReq->fragId; Uint32 fragId = execFragReq->fragId;
ptrCheckGuard(tabptr, ctabrecFileSize, tablerec); ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
if (!getFragmentrec(signal, fragId)) { ndbrequire(getFragmentrec(signal, fragId));
jam();
if (!insertFragrec(signal, fragId)) {
jam();
sendExecFragRefLab(signal);
return;
}//if
initFragrec(signal, tabptr.i, fragId, ZLOG_NODE);
fragptr.p->execSrStatus = Fragrecord::ACTIVE_REMOVE_AFTER;
} else {
jam();
if (fragptr.p->execSrStatus == Fragrecord::ACTIVE_REMOVE_AFTER) {
jam();
fragptr.p->execSrStatus = Fragrecord::ACTIVE_REMOVE_AFTER;
} else {
jam();
}//if
}//if
ndbrequire(fragptr.p->execSrNoReplicas < 4); ndbrequire(fragptr.p->execSrNoReplicas < 4);
fragptr.p->execSrBlockref[fragptr.p->execSrNoReplicas] = execFragReq->userRef; fragptr.p->execSrBlockref[fragptr.p->execSrNoReplicas] = execFragReq->userRef;
fragptr.p->execSrUserptr[fragptr.p->execSrNoReplicas] = execFragReq->userPtr; fragptr.p->execSrUserptr[fragptr.p->execSrNoReplicas] = execFragReq->userPtr;
@ -14610,6 +14504,21 @@ void Dblqh::execEXEC_FRAGCONF(Signal* signal)
fragptr.i = signal->theData[0]; fragptr.i = signal->theData[0];
c_fragment_pool.getPtr(fragptr); c_fragment_pool.getPtr(fragptr);
fragptr.p->srStatus = Fragrecord::SS_COMPLETED; fragptr.p->srStatus = Fragrecord::SS_COMPLETED;
ndbrequire(cnoOutstandingExecFragReq);
cnoOutstandingExecFragReq--;
if (fragptr.p->srNoLognodes == csrPhasesCompleted + 1)
{
jam();
fragptr.p->logFlag = Fragrecord::STATE_TRUE;
fragptr.p->fragStatus = Fragrecord::FSACTIVE;
signal->theData[0] = fragptr.p->srUserptr;
signal->theData[1] = cownNodeid;
sendSignal(fragptr.p->srBlockref, GSN_START_FRAGCONF, signal, 2, JBB);
}
return; return;
}//Dblqh::execEXEC_FRAGCONF() }//Dblqh::execEXEC_FRAGCONF()
@ -14633,6 +14542,7 @@ void Dblqh::execEXEC_SRCONF(Signal* signal)
Uint32 nodeId = signal->theData[0]; Uint32 nodeId = signal->theData[0];
arrGuard(nodeId, MAX_NDB_NODES); arrGuard(nodeId, MAX_NDB_NODES);
m_sr_exec_sr_conf.set(nodeId); m_sr_exec_sr_conf.set(nodeId);
if (!m_sr_nodes.equal(m_sr_exec_sr_conf)) if (!m_sr_nodes.equal(m_sr_exec_sr_conf))
{ {
jam(); jam();
@ -14653,16 +14563,8 @@ void Dblqh::execEXEC_SRCONF(Signal* signal)
* NOW CHECK IF ALL FRAGMENTS IN THIS PHASE HAVE COMPLETED. IF SO START THE * NOW CHECK IF ALL FRAGMENTS IN THIS PHASE HAVE COMPLETED. IF SO START THE
* NEXT PHASE. * NEXT PHASE.
* ----------------------------------------------------------------------- */ * ----------------------------------------------------------------------- */
c_lcp_complete_fragments.first(fragptr); ndbrequire(cnoOutstandingExecFragReq == 0);
while (fragptr.i != RNIL)
{
jam();
if(fragptr.p->srStatus != Fragrecord::SS_COMPLETED)
{
return;
}
c_lcp_complete_fragments.next(fragptr);
}
execSrCompletedLab(signal); execSrCompletedLab(signal);
return; return;
}//Dblqh::execEXEC_SRCONF() }//Dblqh::execEXEC_SRCONF()
@ -14718,6 +14620,7 @@ void Dblqh::execSrCompletedLab(Signal* signal)
* THERE ARE YET MORE PHASES TO RESTART. * THERE ARE YET MORE PHASES TO RESTART.
* WE MUST INITIALISE DATA FOR NEXT PHASE AND SEND START SIGNAL. * WE MUST INITIALISE DATA FOR NEXT PHASE AND SEND START SIGNAL.
* --------------------------------------------------------------------- */ * --------------------------------------------------------------------- */
csrPhaseStarted = ZSR_PHASE1_COMPLETED; // Set correct state first...
startExecSr(signal); startExecSr(signal);
}//if }//if
return; return;
@ -14791,7 +14694,8 @@ void Dblqh::srPhase3Start(Signal* signal)
UintR tsrPhaseStarted; UintR tsrPhaseStarted;
jamEntry(); jamEntry();
tsrPhaseStarted = signal->theData[0];
tsrPhaseStarted = signal->theData[1];
if (csrPhaseStarted == ZSR_NO_PHASE_STARTED) { if (csrPhaseStarted == ZSR_NO_PHASE_STARTED) {
jam(); jam();
csrPhaseStarted = tsrPhaseStarted; csrPhaseStarted = tsrPhaseStarted;
@ -15968,18 +15872,6 @@ void Dblqh::sendExecConf(Signal* signal)
sendSignal(fragptr.p->execSrBlockref[i], GSN_EXEC_FRAGCONF, sendSignal(fragptr.p->execSrBlockref[i], GSN_EXEC_FRAGCONF,
signal, 1, JBB); signal, 1, JBB);
}//for }//for
if (fragptr.p->execSrStatus == Fragrecord::ACTIVE) {
jam();
fragptr.p->execSrStatus = Fragrecord::IDLE;
} else {
ndbrequire(fragptr.p->execSrStatus == Fragrecord::ACTIVE_REMOVE_AFTER);
jam();
Uint32 fragId = fragptr.p->fragId;
tabptr.i = fragptr.p->tabRef;
ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
c_lcp_complete_fragments.remove(fragptr);
deleteFragrec(fragId);
}//if
fragptr.p->execSrNoReplicas = 0; fragptr.p->execSrNoReplicas = 0;
}//if }//if
loopCount++; loopCount++;
@ -16007,17 +15899,10 @@ void Dblqh::sendExecConf(Signal* signal)
void Dblqh::srPhase3Comp(Signal* signal) void Dblqh::srPhase3Comp(Signal* signal)
{ {
jamEntry(); jamEntry();
ndbrequire(cnoOfNodes < MAX_NDB_NODES);
for (Uint32 i = 0; i < cnoOfNodes; i++) { signal->theData[0] = cownNodeid;
jam(); NodeReceiverGroup rg(DBLQH, m_sr_nodes);
if (cnodeStatus[i] == ZNODE_UP) { sendSignal(rg, GSN_EXEC_SRCONF, signal, 1, JBB);
jam();
ndbrequire(cnodeData[i] < MAX_NDB_NODES);
BlockReference ref = calcLqhBlockRef(cnodeData[i]);
signal->theData[0] = cownNodeid;
sendSignal(ref, GSN_EXEC_SRCONF, signal, 1, JBB);
}//if
}//for
return; return;
}//Dblqh::srPhase3Comp() }//Dblqh::srPhase3Comp()
@ -16259,7 +16144,7 @@ void Dblqh::srFourthComp(Signal* signal)
if(cstartType == NodeState::ST_SYSTEM_RESTART) if(cstartType == NodeState::ST_SYSTEM_RESTART)
{ {
jam(); jam();
if (c_redo_complete_fragments.first(fragptr)) if (c_lcp_complete_fragments.first(fragptr))
{ {
jam(); jam();
signal->theData[0] = ZENABLE_EXPAND_CHECK; signal->theData[0] = ZENABLE_EXPAND_CHECK;
@ -17367,7 +17252,6 @@ void Dblqh::initFragrec(Signal* signal,
fragptr.p->maxGciInLcp = 0; fragptr.p->maxGciInLcp = 0;
fragptr.p->copyFragState = ZIDLE; fragptr.p->copyFragState = ZIDLE;
fragptr.p->newestGci = cnewestGci; fragptr.p->newestGci = cnewestGci;
fragptr.p->nextLcp = 0;
fragptr.p->tabRef = tableId; fragptr.p->tabRef = tableId;
fragptr.p->fragId = fragId; fragptr.p->fragId = fragId;
fragptr.p->srStatus = Fragrecord::SS_IDLE; fragptr.p->srStatus = Fragrecord::SS_IDLE;
@ -18456,6 +18340,7 @@ void Dblqh::sendLqhTransconf(Signal* signal, LqhTransConf::OperationStatus stat)
void Dblqh::startExecSr(Signal* signal) void Dblqh::startExecSr(Signal* signal)
{ {
cnoFragmentsExecSr = 0; cnoFragmentsExecSr = 0;
cnoOutstandingExecFragReq = 0;
c_lcp_complete_fragments.first(fragptr); c_lcp_complete_fragments.first(fragptr);
signal->theData[0] = fragptr.i; signal->theData[0] = fragptr.i;
sendSignal(cownref, GSN_START_EXEC_SR, signal, 1, JBB); sendSignal(cownref, GSN_START_EXEC_SR, signal, 1, JBB);

View file

@ -734,6 +734,7 @@ public:
// Index op return context // Index op return context
UintR indexOp; UintR indexOp;
UintR clientData; UintR clientData;
Uint32 errorData;
UintR attrInfoLen; UintR attrInfoLen;
UintR accumulatingIndexOp; UintR accumulatingIndexOp;

View file

@ -310,9 +310,11 @@ void Dbtc::execINCL_NODEREQ(Signal* signal)
hostptr.i = signal->theData[1]; hostptr.i = signal->theData[1];
ptrCheckGuard(hostptr, chostFilesize, hostRecord); ptrCheckGuard(hostptr, chostFilesize, hostRecord);
hostptr.p->hostStatus = HS_ALIVE; hostptr.p->hostStatus = HS_ALIVE;
signal->theData[0] = cownref;
c_alive_nodes.set(hostptr.i); c_alive_nodes.set(hostptr.i);
signal->theData[0] = hostptr.i;
signal->theData[1] = cownref;
if (ERROR_INSERTED(8039)) if (ERROR_INSERTED(8039))
{ {
CLEAR_ERROR_INSERT_VALUE; CLEAR_ERROR_INSERT_VALUE;
@ -321,11 +323,11 @@ void Dbtc::execINCL_NODEREQ(Signal* signal)
sendSignal(numberToRef(CMVMI, hostptr.i), sendSignal(numberToRef(CMVMI, hostptr.i),
GSN_NDB_TAMPER, signal, 1, JBB); GSN_NDB_TAMPER, signal, 1, JBB);
signal->theData[0] = save; signal->theData[0] = save;
sendSignalWithDelay(tblockref, GSN_INCL_NODECONF, signal, 5000, 1); sendSignalWithDelay(tblockref, GSN_INCL_NODECONF, signal, 5000, 2);
return; return;
} }
sendSignal(tblockref, GSN_INCL_NODECONF, signal, 1, JBB); sendSignal(tblockref, GSN_INCL_NODECONF, signal, 2, JBB);
} }
void Dbtc::execREAD_NODESREF(Signal* signal) void Dbtc::execREAD_NODESREF(Signal* signal)
@ -5117,6 +5119,7 @@ void Dbtc::releaseDirtyWrite(Signal* signal)
void Dbtc::execLQHKEYREF(Signal* signal) void Dbtc::execLQHKEYREF(Signal* signal)
{ {
const LqhKeyRef * const lqhKeyRef = (LqhKeyRef *)signal->getDataPtr(); const LqhKeyRef * const lqhKeyRef = (LqhKeyRef *)signal->getDataPtr();
Uint32 indexId = 0;
jamEntry(); jamEntry();
UintR compare_transid1, compare_transid2; UintR compare_transid1, compare_transid2;
@ -5168,6 +5171,9 @@ void Dbtc::execLQHKEYREF(Signal* signal)
ptrCheckGuard(opPtr, ctcConnectFilesize, localTcConnectRecord); ptrCheckGuard(opPtr, ctcConnectFilesize, localTcConnectRecord);
// The operation executed an index trigger // The operation executed an index trigger
TcIndexData* indexData = c_theIndexes.getPtr(currentIndexId);
indexId = indexData->indexId;
regApiPtr->errorData = indexId;
const Uint32 opType = regTcPtr->operation; const Uint32 opType = regTcPtr->operation;
if (errCode == ZALREADYEXIST) if (errCode == ZALREADYEXIST)
errCode = terrorCode = ZNOTUNIQUE; errCode = terrorCode = ZNOTUNIQUE;
@ -5180,7 +5186,6 @@ void Dbtc::execLQHKEYREF(Signal* signal)
} else { } else {
jam(); jam();
/** ZDELETE && NOT_FOUND */ /** ZDELETE && NOT_FOUND */
TcIndexData* indexData = c_theIndexes.getPtr(currentIndexId);
if(indexData->indexState == IS_BUILDING && state != CS_ABORTING){ if(indexData->indexState == IS_BUILDING && state != CS_ABORTING){
jam(); jam();
/** /**
@ -5265,12 +5270,14 @@ void Dbtc::execLQHKEYREF(Signal* signal)
jam(); jam();
regApiPtr->lqhkeyreqrec--; // Compensate for extra during read regApiPtr->lqhkeyreqrec--; // Compensate for extra during read
tcKeyRef->connectPtr = indexOp; tcKeyRef->connectPtr = indexOp;
tcKeyRef->errorData = indexId;
EXECUTE_DIRECT(DBTC, GSN_TCKEYREF, signal, TcKeyRef::SignalLength); EXECUTE_DIRECT(DBTC, GSN_TCKEYREF, signal, TcKeyRef::SignalLength);
apiConnectptr.i = save; apiConnectptr.i = save;
apiConnectptr.p = regApiPtr; apiConnectptr.p = regApiPtr;
} else { } else {
jam(); jam();
tcKeyRef->connectPtr = clientData; tcKeyRef->connectPtr = clientData;
tcKeyRef->errorData = indexId;
sendSignal(regApiPtr->ndbapiBlockref, sendSignal(regApiPtr->ndbapiBlockref,
GSN_TCKEYREF, signal, TcKeyRef::SignalLength, JBB); GSN_TCKEYREF, signal, TcKeyRef::SignalLength, JBB);
}//if }//if
@ -10571,6 +10578,7 @@ void Dbtc::releaseAbortResources(Signal* signal)
tcRollbackRep->transId[0] = apiConnectptr.p->transid[0]; tcRollbackRep->transId[0] = apiConnectptr.p->transid[0];
tcRollbackRep->transId[1] = apiConnectptr.p->transid[1]; tcRollbackRep->transId[1] = apiConnectptr.p->transid[1];
tcRollbackRep->returnCode = apiConnectptr.p->returncode; tcRollbackRep->returnCode = apiConnectptr.p->returncode;
tcRollbackRep->errorData = apiConnectptr.p->errorData;
sendSignal(blockRef, GSN_TCROLLBACKREP, signal, sendSignal(blockRef, GSN_TCROLLBACKREP, signal,
TcRollbackRep::SignalLength, JBB); TcRollbackRep::SignalLength, JBB);
} }
@ -11995,6 +12003,7 @@ void Dbtc::execTCKEYCONF(Signal* signal)
tcIndxRef->transId[0] = regApiPtr->transid[0]; tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1]; tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4349; tcIndxRef->errorCode = 4349;
tcIndxRef->errorData = 0;
sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal, sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
TcKeyRef::SignalLength, JBB); TcKeyRef::SignalLength, JBB);
return; return;
@ -12014,6 +12023,7 @@ void Dbtc::execTCKEYCONF(Signal* signal)
tcIndxRef->transId[0] = regApiPtr->transid[0]; tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1]; tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4349; tcIndxRef->errorCode = 4349;
tcIndxRef->errorData = 0;
sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal, sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
TcKeyRef::SignalLength, JBB); TcKeyRef::SignalLength, JBB);
return; return;
@ -12097,6 +12107,7 @@ void Dbtc::execTCKEYREF(Signal* signal)
tcIndxRef->transId[0] = tcKeyRef->transId[0]; tcIndxRef->transId[0] = tcKeyRef->transId[0];
tcIndxRef->transId[1] = tcKeyRef->transId[1]; tcIndxRef->transId[1] = tcKeyRef->transId[1];
tcIndxRef->errorCode = tcKeyRef->errorCode; tcIndxRef->errorCode = tcKeyRef->errorCode;
tcIndxRef->errorData = 0;
releaseIndexOperation(regApiPtr, indexOp); releaseIndexOperation(regApiPtr, indexOp);
@ -12174,6 +12185,7 @@ void Dbtc::execTRANSID_AI(Signal* signal)
tcIndxRef->transId[0] = regApiPtr->transid[0]; tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1]; tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4000; tcIndxRef->errorCode = 4000;
tcIndxRef->errorData = 0;
sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal, sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
TcKeyRef::SignalLength, JBB); TcKeyRef::SignalLength, JBB);
return; return;
@ -12189,6 +12201,7 @@ void Dbtc::execTRANSID_AI(Signal* signal)
tcIndxRef->transId[0] = regApiPtr->transid[0]; tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1]; tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4349; tcIndxRef->errorCode = 4349;
tcIndxRef->errorData = 0;
sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal, sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
TcKeyRef::SignalLength, JBB); TcKeyRef::SignalLength, JBB);
return; return;
@ -12217,6 +12230,7 @@ void Dbtc::execTRANSID_AI(Signal* signal)
tcIndxRef->transId[0] = regApiPtr->transid[0]; tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1]; tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4349; tcIndxRef->errorCode = 4349;
tcIndxRef->errorData = 0;
sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal, sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
TcKeyRef::SignalLength, JBB); TcKeyRef::SignalLength, JBB);
*/ */
@ -12242,6 +12256,7 @@ void Dbtc::execTRANSID_AI(Signal* signal)
tcIndxRef->transId[0] = regApiPtr->transid[0]; tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1]; tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4349; tcIndxRef->errorCode = 4349;
tcIndxRef->errorData = regApiPtr->errorData;
sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal, sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
TcKeyRef::SignalLength, JBB); TcKeyRef::SignalLength, JBB);
return; return;
@ -12295,6 +12310,7 @@ void Dbtc::readIndexTable(Signal* signal,
tcIndxRef->transId[0] = regApiPtr->transid[0]; tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1]; tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4000; tcIndxRef->errorCode = 4000;
// tcIndxRef->errorData = ??; Where to find indexId
sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal, sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
TcKeyRef::SignalLength, JBB); TcKeyRef::SignalLength, JBB);
return; return;
@ -12441,6 +12457,7 @@ void Dbtc::executeIndexOperation(Signal* signal,
tcIndxRef->transId[0] = regApiPtr->transid[0]; tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1]; tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4349; tcIndxRef->errorCode = 4349;
tcIndxRef->errorData = 0;
sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal, sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
TcKeyRef::SignalLength, JBB); TcKeyRef::SignalLength, JBB);
return; return;

View file

@ -265,6 +265,8 @@ private:
void execALLOC_NODEID_CONF(Signal *); void execALLOC_NODEID_CONF(Signal *);
void execALLOC_NODEID_REF(Signal *); void execALLOC_NODEID_REF(Signal *);
void completeAllocNodeIdReq(Signal *); void completeAllocNodeIdReq(Signal *);
void execSTART_ORD(Signal*);
// Arbitration signals // Arbitration signals
void execARBIT_CFG(Signal* signal); void execARBIT_CFG(Signal* signal);
@ -281,6 +283,7 @@ private:
void check_readnodes_reply(Signal* signal, Uint32 nodeId, Uint32 gsn); void check_readnodes_reply(Signal* signal, Uint32 nodeId, Uint32 gsn);
Uint32 check_startup(Signal* signal); Uint32 check_startup(Signal* signal);
void api_failed(Signal* signal, Uint32 aFailedNode);
void node_failed(Signal* signal, Uint16 aFailedNode); void node_failed(Signal* signal, Uint16 aFailedNode);
void checkStartInterface(Signal* signal); void checkStartInterface(Signal* signal);
void failReport(Signal* signal, void failReport(Signal* signal,

View file

@ -31,10 +31,6 @@ void Qmgr::initData()
cnoCommitFailedNodes = 0; cnoCommitFailedNodes = 0;
c_maxDynamicId = 0; c_maxDynamicId = 0;
c_clusterNodes.clear(); c_clusterNodes.clear();
Uint32 hbDBAPI = 500;
setHbApiDelay(hbDBAPI);
c_connectedNodes.set(getOwnNodeId());
c_stopReq.senderRef = 0; c_stopReq.senderRef = 0;
/** /**
@ -43,6 +39,27 @@ void Qmgr::initData()
ndbrequire((Uint32)NodeInfo::DB == 0); ndbrequire((Uint32)NodeInfo::DB == 0);
ndbrequire((Uint32)NodeInfo::API == 1); ndbrequire((Uint32)NodeInfo::API == 1);
ndbrequire((Uint32)NodeInfo::MGM == 2); ndbrequire((Uint32)NodeInfo::MGM == 2);
NodeRecPtr nodePtr;
nodePtr.i = getOwnNodeId();
ptrAss(nodePtr, nodeRec);
nodePtr.p->blockRef = reference();
c_connectedNodes.set(getOwnNodeId());
setNodeInfo(getOwnNodeId()).m_version = NDB_VERSION;
/**
* Timeouts
*/
const ndb_mgm_configuration_iterator * p =
m_ctx.m_config.getOwnConfigIterator();
ndbrequire(p != 0);
Uint32 hbDBAPI = 1500;
ndb_mgm_get_int_parameter(p, CFG_DB_API_HEARTBEAT_INTERVAL, &hbDBAPI);
setHbApiDelay(hbDBAPI);
}//Qmgr::initData() }//Qmgr::initData()
void Qmgr::initRecords() void Qmgr::initRecords()
@ -113,6 +130,7 @@ Qmgr::Qmgr(Block_context& ctx)
addRecSignal(GSN_DIH_RESTARTREF, &Qmgr::execDIH_RESTARTREF); addRecSignal(GSN_DIH_RESTARTREF, &Qmgr::execDIH_RESTARTREF);
addRecSignal(GSN_DIH_RESTARTCONF, &Qmgr::execDIH_RESTARTCONF); addRecSignal(GSN_DIH_RESTARTCONF, &Qmgr::execDIH_RESTARTCONF);
addRecSignal(GSN_NODE_VERSION_REP, &Qmgr::execNODE_VERSION_REP); addRecSignal(GSN_NODE_VERSION_REP, &Qmgr::execNODE_VERSION_REP);
addRecSignal(GSN_START_ORD, &Qmgr::execSTART_ORD);
initData(); initData();
}//Qmgr::Qmgr() }//Qmgr::Qmgr()

View file

@ -238,6 +238,38 @@ Qmgr::execREAD_CONFIG_REQ(Signal* signal)
ReadConfigConf::SignalLength, JBB); ReadConfigConf::SignalLength, JBB);
} }
void
Qmgr::execSTART_ORD(Signal* signal)
{
/**
* Start timer handling
*/
signal->theData[0] = ZTIMER_HANDLING;
sendSignal(QMGR_REF, GSN_CONTINUEB, signal, 1, JBB);
NodeRecPtr nodePtr;
for (nodePtr.i = 1; nodePtr.i < MAX_NODES; nodePtr.i++)
{
ptrAss(nodePtr, nodeRec);
nodePtr.p->ndynamicId = 0;
if(getNodeInfo(nodePtr.i).m_type == NodeInfo::DB)
{
nodePtr.p->phase = ZINIT;
c_definedNodes.set(nodePtr.i);
} else {
nodePtr.p->phase = ZAPI_INACTIVE;
}
setNodeInfo(nodePtr.i).m_heartbeat_cnt= 0;
nodePtr.p->sendPrepFailReqStatus = Q_NOT_ACTIVE;
nodePtr.p->sendCommitFailReqStatus = Q_NOT_ACTIVE;
nodePtr.p->sendPresToStatus = Q_NOT_ACTIVE;
nodePtr.p->failState = NORMAL;
nodePtr.p->rcv[0] = 0;
nodePtr.p->rcv[1] = 0;
}//for
}
/* /*
4.2 ADD NODE MODULE*/ 4.2 ADD NODE MODULE*/
/*##########################################################################*/ /*##########################################################################*/
@ -298,8 +330,6 @@ void Qmgr::startphase1(Signal* signal)
nodePtr.i = getOwnNodeId(); nodePtr.i = getOwnNodeId();
ptrAss(nodePtr, nodeRec); ptrAss(nodePtr, nodeRec);
nodePtr.p->phase = ZSTARTING; nodePtr.p->phase = ZSTARTING;
nodePtr.p->blockRef = reference();
c_connectedNodes.set(nodePtr.i);
signal->theData[0] = reference(); signal->theData[0] = reference();
sendSignal(DBDIH_REF, GSN_DIH_RESTARTREQ, signal, 1, JBB); sendSignal(DBDIH_REF, GSN_DIH_RESTARTREQ, signal, 1, JBB);
@ -371,11 +401,14 @@ void Qmgr::execCONNECT_REP(Signal* signal)
case ZFAIL_CLOSING: case ZFAIL_CLOSING:
jam(); jam();
return; return;
case ZINIT:
ndbrequire(false);
case ZAPI_ACTIVE: case ZAPI_ACTIVE:
case ZAPI_INACTIVE: case ZAPI_INACTIVE:
return; return;
case ZINIT:
ndbrequire(getNodeInfo(nodeId).m_type == NodeInfo::MGM);
break;
default:
ndbrequire(false);
} }
if (getNodeInfo(nodeId).getType() != NodeInfo::DB) if (getNodeInfo(nodeId).getType() != NodeInfo::DB)
@ -1212,12 +1245,6 @@ void Qmgr::execCM_REGREF(Signal* signal)
{ {
jam(); jam();
electionWon(signal); electionWon(signal);
/**
* Start timer handling
*/
signal->theData[0] = ZTIMER_HANDLING;
sendSignal(QMGR_REF, GSN_CONTINUEB, signal, 10, JBB);
} }
return; return;
@ -1855,12 +1882,6 @@ Qmgr::joinedCluster(Signal* signal, NodeRecPtr nodePtr){
sendSttorryLab(signal); sendSttorryLab(signal);
/**
* Start timer handling
*/
signal->theData[0] = ZTIMER_HANDLING;
sendSignal(QMGR_REF, GSN_CONTINUEB, signal, 10, JBB);
sendCmAckAdd(signal, getOwnNodeId(), CmAdd::CommitNew); sendCmAckAdd(signal, getOwnNodeId(), CmAdd::CommitNew);
} }
@ -2094,25 +2115,6 @@ void Qmgr::findNeighbours(Signal* signal)
/*---------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------*/
void Qmgr::initData(Signal* signal) void Qmgr::initData(Signal* signal)
{ {
NodeRecPtr nodePtr;
for (nodePtr.i = 1; nodePtr.i < MAX_NODES; nodePtr.i++) {
ptrAss(nodePtr, nodeRec);
nodePtr.p->ndynamicId = 0;
if(getNodeInfo(nodePtr.i).m_type == NodeInfo::DB){
nodePtr.p->phase = ZINIT;
c_definedNodes.set(nodePtr.i);
} else {
nodePtr.p->phase = ZAPI_INACTIVE;
}
setNodeInfo(nodePtr.i).m_heartbeat_cnt= 0;
nodePtr.p->sendPrepFailReqStatus = Q_NOT_ACTIVE;
nodePtr.p->sendCommitFailReqStatus = Q_NOT_ACTIVE;
nodePtr.p->sendPresToStatus = Q_NOT_ACTIVE;
nodePtr.p->failState = NORMAL;
nodePtr.p->rcv[0] = 0;
nodePtr.p->rcv[1] = 0;
}//for
cfailureNr = 1; cfailureNr = 1;
ccommitFailureNr = 1; ccommitFailureNr = 1;
cprepareFailureNr = 1; cprepareFailureNr = 1;
@ -2146,13 +2148,11 @@ void Qmgr::initData(Signal* signal)
ndbrequire(p != 0); ndbrequire(p != 0);
Uint32 hbDBDB = 1500; Uint32 hbDBDB = 1500;
Uint32 hbDBAPI = 1500;
Uint32 arbitTimeout = 1000; Uint32 arbitTimeout = 1000;
c_restartPartialTimeout = 30000; c_restartPartialTimeout = 30000;
c_restartPartionedTimeout = 60000; c_restartPartionedTimeout = 60000;
c_restartFailureTimeout = ~0; c_restartFailureTimeout = ~0;
ndb_mgm_get_int_parameter(p, CFG_DB_HEARTBEAT_INTERVAL, &hbDBDB); ndb_mgm_get_int_parameter(p, CFG_DB_HEARTBEAT_INTERVAL, &hbDBDB);
ndb_mgm_get_int_parameter(p, CFG_DB_API_HEARTBEAT_INTERVAL, &hbDBAPI);
ndb_mgm_get_int_parameter(p, CFG_DB_ARBIT_TIMEOUT, &arbitTimeout); ndb_mgm_get_int_parameter(p, CFG_DB_ARBIT_TIMEOUT, &arbitTimeout);
ndb_mgm_get_int_parameter(p, CFG_DB_START_PARTIAL_TIMEOUT, ndb_mgm_get_int_parameter(p, CFG_DB_START_PARTIAL_TIMEOUT,
&c_restartPartialTimeout); &c_restartPartialTimeout);
@ -2177,7 +2177,6 @@ void Qmgr::initData(Signal* signal)
} }
setHbDelay(hbDBDB); setHbDelay(hbDBDB);
setHbApiDelay(hbDBAPI);
setArbitTimeout(arbitTimeout); setArbitTimeout(arbitTimeout);
arbitRec.state = ARBIT_NULL; // start state for all nodes arbitRec.state = ARBIT_NULL; // start state for all nodes
@ -2204,7 +2203,6 @@ void Qmgr::initData(Signal* signal)
execARBIT_CFG(signal); execARBIT_CFG(signal);
} }
setNodeInfo(getOwnNodeId()).m_version = NDB_VERSION;
}//Qmgr::initData() }//Qmgr::initData()
@ -2237,20 +2235,22 @@ void Qmgr::timerHandlingLab(Signal* signal)
hb_check_timer.reset(); hb_check_timer.reset();
} }
} }
if (interface_check_timer.check(TcurrentTime)) { if (interface_check_timer.check(TcurrentTime)) {
jam(); jam();
interface_check_timer.reset(); interface_check_timer.reset();
checkStartInterface(signal); checkStartInterface(signal);
} }
if (hb_api_timer.check(TcurrentTime))
{
jam();
hb_api_timer.reset();
apiHbHandlingLab(signal);
}
if (cactivateApiCheck != 0) { if (cactivateApiCheck != 0) {
jam(); jam();
if (hb_api_timer.check(TcurrentTime)) {
jam();
hb_api_timer.reset();
apiHbHandlingLab(signal);
}//if
if (clatestTransactionCheck == 0) { if (clatestTransactionCheck == 0) {
//------------------------------------------------------------- //-------------------------------------------------------------
// Initialise the Transaction check timer. // Initialise the Transaction check timer.
@ -2367,18 +2367,21 @@ void Qmgr::apiHbHandlingLab(Signal* signal)
if(type == NodeInfo::INVALID) if(type == NodeInfo::INVALID)
continue; continue;
if (TnodePtr.p->phase == ZAPI_ACTIVE){ if (c_connectedNodes.get(nodeId))
{
jam(); jam();
setNodeInfo(TnodePtr.i).m_heartbeat_cnt++; setNodeInfo(TnodePtr.i).m_heartbeat_cnt++;
if(getNodeInfo(TnodePtr.i).m_heartbeat_cnt > 2){ if(getNodeInfo(TnodePtr.i).m_heartbeat_cnt > 2)
{
signal->theData[0] = NDB_LE_MissedHeartbeat; signal->theData[0] = NDB_LE_MissedHeartbeat;
signal->theData[1] = nodeId; signal->theData[1] = nodeId;
signal->theData[2] = getNodeInfo(TnodePtr.i).m_heartbeat_cnt - 1; signal->theData[2] = getNodeInfo(TnodePtr.i).m_heartbeat_cnt - 1;
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB); sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB);
} }
if (getNodeInfo(TnodePtr.i).m_heartbeat_cnt > 4) { if (getNodeInfo(TnodePtr.i).m_heartbeat_cnt > 4)
{
jam(); jam();
/*------------------------------------------------------------------*/ /*------------------------------------------------------------------*/
/* THE API NODE HAS NOT SENT ANY HEARTBEAT FOR THREE SECONDS. /* THE API NODE HAS NOT SENT ANY HEARTBEAT FOR THREE SECONDS.
@ -2390,8 +2393,8 @@ void Qmgr::apiHbHandlingLab(Signal* signal)
signal->theData[0] = NDB_LE_DeadDueToHeartbeat; signal->theData[0] = NDB_LE_DeadDueToHeartbeat;
signal->theData[1] = nodeId; signal->theData[1] = nodeId;
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB); sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
node_failed(signal, nodeId); api_failed(signal, nodeId);
}//if }//if
}//if }//if
}//for }//for
@ -2480,26 +2483,6 @@ void Qmgr::sendApiFailReq(Signal* signal, Uint16 failedNodeNo)
sendSignal(DBTC_REF, GSN_API_FAILREQ, signal, 2, JBA); sendSignal(DBTC_REF, GSN_API_FAILREQ, signal, 2, JBA);
sendSignal(DBDICT_REF, GSN_API_FAILREQ, signal, 2, JBA); sendSignal(DBDICT_REF, GSN_API_FAILREQ, signal, 2, JBA);
sendSignal(SUMA_REF, GSN_API_FAILREQ, signal, 2, JBA); sendSignal(SUMA_REF, GSN_API_FAILREQ, signal, 2, JBA);
/**-------------------------------------------------------------------------
* THE OTHER NODE WAS AN API NODE. THE COMMUNICATION LINK IS ALREADY
* BROKEN AND THUS NO ACTION IS NEEDED TO BREAK THE CONNECTION.
* WE ONLY NEED TO SET PARAMETERS TO ENABLE A NEW CONNECTION IN A FEW
* SECONDS.
*-------------------------------------------------------------------------*/
setNodeInfo(failedNodePtr.i).m_heartbeat_cnt= 0;
setNodeInfo(failedNodePtr.i).m_version = 0;
recompute_version_info(getNodeInfo(failedNodePtr.i).m_type);
CloseComReqConf * const closeCom = (CloseComReqConf *)&signal->theData[0];
closeCom->xxxBlockRef = reference();
closeCom->failNo = 0;
closeCom->noOfNodes = 1;
NodeBitmask::clear(closeCom->theNodes);
NodeBitmask::set(closeCom->theNodes, failedNodePtr.i);
sendSignal(CMVMI_REF, GSN_CLOSE_COMREQ, signal,
CloseComReqConf::SignalLength, JBA);
}//Qmgr::sendApiFailReq() }//Qmgr::sendApiFailReq()
void Qmgr::execAPI_FAILREQ(Signal* signal) void Qmgr::execAPI_FAILREQ(Signal* signal)
@ -2512,20 +2495,7 @@ void Qmgr::execAPI_FAILREQ(Signal* signal)
ndbrequire(getNodeInfo(failedNodePtr.i).getType() != NodeInfo::DB); ndbrequire(getNodeInfo(failedNodePtr.i).getType() != NodeInfo::DB);
// ignore if api not active api_failed(signal, signal->theData[0]);
if (failedNodePtr.p->phase != ZAPI_ACTIVE)
{
jam();
// But send to SUMA anyway...
sendSignal(SUMA_REF, GSN_API_FAILREQ, signal, 2, JBA);
return;
}
signal->theData[0] = NDB_LE_Disconnected;
signal->theData[1] = failedNodePtr.i;
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
node_failed(signal, failedNodePtr.i);
} }
void Qmgr::execAPI_FAILCONF(Signal* signal) void Qmgr::execAPI_FAILCONF(Signal* signal)
@ -2649,6 +2619,13 @@ void Qmgr::execDISCONNECT_REP(Signal* signal)
ndbrequire(false); ndbrequire(false);
} }
if (getNodeInfo(nodeId).getType() != NodeInfo::DB)
{
jam();
api_failed(signal, nodeId);
return;
}
switch(nodePtr.p->phase){ switch(nodePtr.p->phase){
case ZRUNNING: case ZRUNNING:
jam(); jam();
@ -2685,66 +2662,109 @@ void Qmgr::node_failed(Signal* signal, Uint16 aFailedNode)
failedNodePtr.i = aFailedNode; failedNodePtr.i = aFailedNode;
ptrCheckGuard(failedNodePtr, MAX_NODES, nodeRec); ptrCheckGuard(failedNodePtr, MAX_NODES, nodeRec);
if (getNodeInfo(failedNodePtr.i).getType() == NodeInfo::DB){ ndbrequire(getNodeInfo(failedNodePtr.i).getType() == NodeInfo::DB);
/**---------------------------------------------------------------------
* THE OTHER NODE IS AN NDB NODE, WE HANDLE IT AS IF A HEARTBEAT
* FAILURE WAS DISCOVERED.
*---------------------------------------------------------------------*/
switch(failedNodePtr.p->phase){
case ZRUNNING:
jam(); jam();
/**--------------------------------------------------------------------- failReportLab(signal, aFailedNode, FailRep::ZLINK_FAILURE);
* THE OTHER NODE IS AN NDB NODE, WE HANDLE IT AS IF A HEARTBEAT return;
* FAILURE WAS DISCOVERED. case ZFAIL_CLOSING:
*---------------------------------------------------------------------*/ jam();
switch(failedNodePtr.p->phase){ return;
case ZRUNNING: case ZSTARTING:
jam(); c_start.reset();
failReportLab(signal, aFailedNode, FailRep::ZLINK_FAILURE); // Fall-through
return; default:
case ZFAIL_CLOSING: jam();
jam(); /*---------------------------------------------------------------------*/
return; // The other node is still not in the cluster but disconnected.
case ZSTARTING: // We must restart communication in three seconds.
c_start.reset(); /*---------------------------------------------------------------------*/
// Fall-through failedNodePtr.p->failState = NORMAL;
default: failedNodePtr.p->phase = ZFAIL_CLOSING;
jam(); setNodeInfo(failedNodePtr.i).m_heartbeat_cnt= 0;
/*---------------------------------------------------------------------*/
// The other node is still not in the cluster but disconnected.
// We must restart communication in three seconds.
/*---------------------------------------------------------------------*/
failedNodePtr.p->failState = NORMAL;
failedNodePtr.p->phase = ZFAIL_CLOSING;
setNodeInfo(failedNodePtr.i).m_heartbeat_cnt= 0;
CloseComReqConf * const closeCom = CloseComReqConf * const closeCom =
(CloseComReqConf *)&signal->theData[0]; (CloseComReqConf *)&signal->theData[0];
closeCom->xxxBlockRef = reference(); closeCom->xxxBlockRef = reference();
closeCom->failNo = 0; closeCom->failNo = 0;
closeCom->noOfNodes = 1; closeCom->noOfNodes = 1;
NodeBitmask::clear(closeCom->theNodes); NodeBitmask::clear(closeCom->theNodes);
NodeBitmask::set(closeCom->theNodes, failedNodePtr.i); NodeBitmask::set(closeCom->theNodes, failedNodePtr.i);
sendSignal(CMVMI_REF, GSN_CLOSE_COMREQ, signal, sendSignal(CMVMI_REF, GSN_CLOSE_COMREQ, signal,
CloseComReqConf::SignalLength, JBA); CloseComReqConf::SignalLength, JBA);
}//if }//if
return;
}
void
Qmgr::api_failed(Signal* signal, Uint32 nodeId)
{
NodeRecPtr failedNodePtr;
/**------------------------------------------------------------------------
* A COMMUNICATION LINK HAS BEEN DISCONNECTED. WE MUST TAKE SOME ACTION
* DUE TO THIS.
*-----------------------------------------------------------------------*/
failedNodePtr.i = nodeId;
ptrCheckGuard(failedNodePtr, MAX_NODES, nodeRec);
if (failedNodePtr.p->phase == ZFAIL_CLOSING)
{
/**
* Failure handling already in progress
*/
jam();
return; return;
} }
/** if (failedNodePtr.p->phase == ZAPI_ACTIVE)
* API code {
*/
jam();
if (failedNodePtr.p->phase != ZFAIL_CLOSING){
jam(); jam();
//------------------------------------------------------------------------- sendApiFailReq(signal, nodeId);
// The API was active and has now failed. We need to initiate API failure
// handling. If the API had already failed then we can ignore this
// discovery.
//-------------------------------------------------------------------------
failedNodePtr.p->phase = ZFAIL_CLOSING;
sendApiFailReq(signal, aFailedNode);
arbitRec.code = ArbitCode::ApiFail; arbitRec.code = ArbitCode::ApiFail;
handleArbitApiFail(signal, aFailedNode); handleArbitApiFail(signal, nodeId);
}//if }
return; else
}//Qmgr::node_failed() {
/**
* Always inform SUMA
*/
jam();
signal->theData[0] = nodeId;
signal->theData[1] = QMGR_REF;
sendSignal(SUMA_REF, GSN_API_FAILREQ, signal, 2, JBA);
failedNodePtr.p->failState = NORMAL;
}
failedNodePtr.p->phase = ZFAIL_CLOSING;
setNodeInfo(failedNodePtr.i).m_heartbeat_cnt= 0;
setNodeInfo(failedNodePtr.i).m_version = 0;
recompute_version_info(getNodeInfo(failedNodePtr.i).m_type);
CloseComReqConf * const closeCom = (CloseComReqConf *)&signal->theData[0];
closeCom->xxxBlockRef = reference();
closeCom->failNo = 0;
closeCom->noOfNodes = 1;
NodeBitmask::clear(closeCom->theNodes);
NodeBitmask::set(closeCom->theNodes, failedNodePtr.i);
sendSignal(CMVMI_REF, GSN_CLOSE_COMREQ, signal,
CloseComReqConf::SignalLength, JBA);
if (getNodeInfo(failedNodePtr.i).getType() == NodeInfo::MGM)
{
/**
* Allow MGM do reconnect "directly"
*/
jam();
setNodeInfo(failedNodePtr.i).m_heartbeat_cnt = 3;
}
}
/**-------------------------------------------------------------------------- /**--------------------------------------------------------------------------
* AN API NODE IS REGISTERING. IF FOR THE FIRST TIME WE WILL ENABLE * AN API NODE IS REGISTERING. IF FOR THE FIRST TIME WE WILL ENABLE
@ -4963,43 +4983,39 @@ Qmgr::execDUMP_STATE_ORD(Signal* signal)
c_start.m_president_candidate_gci); c_start.m_president_candidate_gci);
infoEvent("ctoStatus = %d\n", ctoStatus); infoEvent("ctoStatus = %d\n", ctoStatus);
for(Uint32 i = 1; i<MAX_NDB_NODES; i++){ for(Uint32 i = 1; i<MAX_NDB_NODES; i++){
if(getNodeInfo(i).getType() == NodeInfo::DB){ NodeRecPtr nodePtr;
NodeRecPtr nodePtr; nodePtr.i = i;
nodePtr.i = i; ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRec);
ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRec); char buf[100];
char buf[100]; switch(nodePtr.p->phase){
switch(nodePtr.p->phase){ case ZINIT:
case ZINIT: sprintf(buf, "Node %d: ZINIT(%d)", i, nodePtr.p->phase);
sprintf(buf, "Node %d: ZINIT(%d)", i, nodePtr.p->phase); break;
break; case ZSTARTING:
case ZSTARTING: sprintf(buf, "Node %d: ZSTARTING(%d)", i, nodePtr.p->phase);
sprintf(buf, "Node %d: ZSTARTING(%d)", i, nodePtr.p->phase); break;
break; case ZRUNNING:
case ZRUNNING: sprintf(buf, "Node %d: ZRUNNING(%d)", i, nodePtr.p->phase);
sprintf(buf, "Node %d: ZRUNNING(%d)", i, nodePtr.p->phase); break;
break; case ZPREPARE_FAIL:
case ZPREPARE_FAIL: sprintf(buf, "Node %d: ZPREPARE_FAIL(%d)", i, nodePtr.p->phase);
sprintf(buf, "Node %d: ZPREPARE_FAIL(%d)", i, nodePtr.p->phase); break;
break; case ZFAIL_CLOSING:
case ZFAIL_CLOSING: sprintf(buf, "Node %d: ZFAIL_CLOSING(%d)", i, nodePtr.p->phase);
sprintf(buf, "Node %d: ZFAIL_CLOSING(%d)", i, nodePtr.p->phase); break;
break; case ZAPI_INACTIVE:
case ZAPI_INACTIVE: sprintf(buf, "Node %d: ZAPI_INACTIVE(%d)", i, nodePtr.p->phase);
sprintf(buf, "Node %d: ZAPI_INACTIVE(%d)", i, nodePtr.p->phase); break;
break; case ZAPI_ACTIVE:
case ZAPI_ACTIVE: sprintf(buf, "Node %d: ZAPI_ACTIVE(%d)", i, nodePtr.p->phase);
sprintf(buf, "Node %d: ZAPI_ACTIVE(%d)", i, nodePtr.p->phase); break;
break; default:
default: sprintf(buf, "Node %d: <UNKNOWN>(%d)", i, nodePtr.p->phase);
sprintf(buf, "Node %d: <UNKNOWN>(%d)", i, nodePtr.p->phase); break;
break;
}
infoEvent(buf);
} }
infoEvent(buf);
} }
default: }
;
}//switch
#ifdef ERROR_INSERT #ifdef ERROR_INSERT
if (signal->theData[0] == 935 && signal->getLength() == 2) if (signal->theData[0] == 935 && signal->getLength() == 2)

View file

@ -821,8 +821,9 @@ Suma::execINCL_NODEREQ(Signal* signal){
ndbrequire(!c_alive_nodes.get(nodeId)); ndbrequire(!c_alive_nodes.get(nodeId));
c_alive_nodes.set(nodeId); c_alive_nodes.set(nodeId);
signal->theData[0] = reference(); signal->theData[0] = nodeId;
sendSignal(senderRef, GSN_INCL_NODECONF, signal, 1, JBB); signal->theData[1] = reference();
sendSignal(senderRef, GSN_INCL_NODECONF, signal, 2, JBB);
} }
void void
@ -974,6 +975,54 @@ Suma::execDUMP_STATE_ORD(Signal* signal){
} }
return; return;
} }
if (tCase == 8011)
{
jam();
Uint32 bucket = signal->theData[1];
KeyTable<Table>::Iterator it;
if (signal->getLength() == 1)
{
jam();
bucket = 0;
infoEvent("-- Starting dump of subscribers --");
}
c_tables.next(bucket, it);
const Uint32 RT_BREAK = 16;
for(Uint32 i = 0; i<RT_BREAK || it.bucket == bucket; i++)
{
jam();
if(it.curr.i == RNIL)
{
jam();
infoEvent("-- Ending dump of subscribers --");
return;
}
infoEvent("Table: %u ver: %u #n: %u (ref,data,subscritopn)",
it.curr.p->m_tableId,
it.curr.p->m_schemaVersion,
it.curr.p->n_subscribers);
Ptr<Subscriber> ptr;
LocalDLList<Subscriber> list(c_subscriberPool, it.curr.p->c_subscribers);
for (list.first(ptr); !ptr.isNull(); list.next(ptr), i++)
{
jam();
infoEvent(" [ %x %u %u ]",
ptr.p->m_senderRef,
ptr.p->m_senderData,
ptr.p->m_subPtrI);
}
c_tables.next(it);
}
signal->theData[0] = tCase;
signal->theData[1] = it.bucket;
sendSignalWithDelay(reference(), GSN_DUMP_STATE_ORD, signal, 100, 2);
return;
}
} }
/************************************************************* /*************************************************************
@ -2402,6 +2451,7 @@ Suma::execSUB_START_REQ(Signal* signal){
{ {
jam(); jam();
c_subscriberPool.release(subbPtr);
sendSubStartRef(signal, SubStartRef::PartiallyConnected); sendSubStartRef(signal, SubStartRef::PartiallyConnected);
return; return;
} }

View file

@ -119,7 +119,11 @@ operator<<(NdbOut& out, const LogLevel & ll)
void void
MgmtSrvr::logLevelThreadRun() MgmtSrvr::logLevelThreadRun()
{ {
while (!_isStopThread) { while (!_isStopThread)
{
Vector<NodeId> failed_started_nodes;
Vector<EventSubscribeReq> failed_log_level_requests;
/** /**
* Handle started nodes * Handle started nodes
*/ */
@ -144,14 +148,15 @@ MgmtSrvr::logLevelThreadRun()
m_started_nodes.unlock(); m_started_nodes.unlock();
if (setEventReportingLevelImpl(node, req)) if (setEventReportingLevelImpl(node, req))
{ {
ndbout_c("setEventReportingLevelImpl(%d): failed", node); failed_started_nodes.push_back(node);
} }
else
SetLogLevelOrd ord; {
ord = m_nodeLogLevel[node]; SetLogLevelOrd ord;
setNodeLogLevelImpl(node, ord); ord = m_nodeLogLevel[node];
setNodeLogLevelImpl(node, ord);
}
m_started_nodes.lock(); m_started_nodes.lock();
} }
} }
@ -166,17 +171,20 @@ MgmtSrvr::logLevelThreadRun()
if(req.blockRef == 0) if(req.blockRef == 0)
{ {
req.blockRef = _ownReference; req.blockRef = _ownReference;
if (setEventReportingLevelImpl(0, req)) if (setEventReportingLevelImpl(0, req))
{ {
ndbout_c("setEventReportingLevelImpl: failed 2!"); failed_log_level_requests.push_back(req);
} }
} }
else else
{ {
SetLogLevelOrd ord; SetLogLevelOrd ord;
ord = req; ord = req;
setNodeLogLevelImpl(req.blockRef, ord); if (setNodeLogLevelImpl(req.blockRef, ord))
{
failed_log_level_requests.push_back(req);
}
} }
m_log_level_requests.lock(); m_log_level_requests.lock();
} }
@ -185,7 +193,28 @@ MgmtSrvr::logLevelThreadRun()
if(!ERROR_INSERTED(10000)) if(!ERROR_INSERTED(10000))
m_event_listner.check_listeners(); m_event_listner.check_listeners();
NdbSleep_MilliSleep(_logLevelThreadSleep); Uint32 sleeptime = _logLevelThreadSleep;
if (failed_started_nodes.size())
{
m_started_nodes.lock();
for (Uint32 i = 0; i<failed_started_nodes.size(); i++)
m_started_nodes.push_back(failed_started_nodes[i], false);
m_started_nodes.unlock();
failed_started_nodes.clear();
sleeptime = 100;
}
if (failed_log_level_requests.size())
{
m_log_level_requests.lock();
for (Uint32 i = 0; i<failed_log_level_requests.size(); i++)
m_log_level_requests.push_back(failed_log_level_requests[i], false);
m_log_level_requests.unlock();
failed_log_level_requests.clear();
sleeptime = 100;
}
NdbSleep_MilliSleep(sleeptime);
} }
} }
@ -1535,7 +1564,6 @@ MgmtSrvr::setEventReportingLevelImpl(int nodeId_arg,
{ {
SignalSender ss(theFacade); SignalSender ss(theFacade);
NdbNodeBitmask nodes; NdbNodeBitmask nodes;
int retries = 30;
nodes.clear(); nodes.clear();
while (1) while (1)
{ {
@ -1572,19 +1600,9 @@ MgmtSrvr::setEventReportingLevelImpl(int nodeId_arg,
continue; continue;
} }
// api_reg_conf not recevied yet, need to retry // api_reg_conf not recevied yet, need to retry
break; return SEND_OR_RECEIVE_FAILED;
} }
} }
if (nodeId <= max)
{
if (--retries)
{
ss.unlock();
NdbSleep_MilliSleep(100);
continue;
}
return SEND_OR_RECEIVE_FAILED;
}
if (nodeId_arg == 0) if (nodeId_arg == 0)
{ {
@ -1607,6 +1625,10 @@ MgmtSrvr::setEventReportingLevelImpl(int nodeId_arg,
continue; // node is not connected, skip continue; // node is not connected, skip
if (ss.sendSignal(nodeId, &ssig) == SEND_OK) if (ss.sendSignal(nodeId, &ssig) == SEND_OK)
nodes.set(nodeId); nodes.set(nodeId);
else if (max == nodeId)
{
return SEND_OR_RECEIVE_FAILED;
}
} }
break; break;
} }
@ -2988,8 +3010,7 @@ int MgmtSrvr::connect_to_self(void)
return 0; return 0;
} }
template class MutexVector<unsigned short>; template class MutexVector<unsigned short>;
template class MutexVector<Ndb_mgmd_event_service::Event_listener>; template class MutexVector<Ndb_mgmd_event_service::Event_listener>;
template class Vector<EventSubscribeReq>;
template class MutexVector<EventSubscribeReq>; template class MutexVector<EventSubscribeReq>;

View file

@ -792,6 +792,18 @@ NdbEventOperationImpl::receive_event()
p = p->next(); p = p->next();
} }
} }
// change the blobHandle's to refer to the new table object.
NdbBlob *p = theBlobList;
while (p)
{
int no = p->getColumn()->getColumnNo();
NdbColumnImpl *tAttrInfo = at->getColumn(no);
DBUG_PRINT("info", ("blob_handle: 0x%lx "
"switching column impl 0x%lx -> 0x%lx",
(long) p, (long) p->theColumn, (long) tAttrInfo));
p->theColumn = tAttrInfo;
p = p->next();
}
if (tmp_table_impl) if (tmp_table_impl)
delete tmp_table_impl; delete tmp_table_impl;
} }

View file

@ -24,6 +24,7 @@
#include "Interpreter.hpp" #include "Interpreter.hpp"
#include <AttributeHeader.hpp> #include <AttributeHeader.hpp>
#include <signaldata/TcKeyReq.hpp> #include <signaldata/TcKeyReq.hpp>
#include <signaldata/TcKeyRef.hpp>
#include <signaldata/KeyInfo.hpp> #include <signaldata/KeyInfo.hpp>
#include <signaldata/AttrInfo.hpp> #include <signaldata/AttrInfo.hpp>
#include <signaldata/ScanTab.hpp> #include <signaldata/ScanTab.hpp>
@ -545,6 +546,12 @@ NdbOperation::receiveTCKEYREF( NdbApiSignal* aSignal)
}//if }//if
setErrorCode(aSignal->readData(4)); setErrorCode(aSignal->readData(4));
if (aSignal->getLength() == TcKeyRef::SignalLength)
{
// Signal may contain additional error data
theError.details = (char *) aSignal->readData(5);
}
theStatus = Finished; theStatus = Finished;
theReceiver.m_received_result_length = ~0; theReceiver.m_received_result_length = ~0;

View file

@ -30,6 +30,7 @@
#include <signaldata/TcCommit.hpp> #include <signaldata/TcCommit.hpp>
#include <signaldata/TcKeyFailConf.hpp> #include <signaldata/TcKeyFailConf.hpp>
#include <signaldata/TcHbRep.hpp> #include <signaldata/TcHbRep.hpp>
#include <signaldata/TcRollbackRep.hpp>
/***************************************************************************** /*****************************************************************************
NdbTransaction( Ndb* aNdb ); NdbTransaction( Ndb* aNdb );
@ -1729,6 +1730,8 @@ Remark: Handles the reception of the ROLLBACKREP signal.
int int
NdbTransaction::receiveTCROLLBACKREP( NdbApiSignal* aSignal) NdbTransaction::receiveTCROLLBACKREP( NdbApiSignal* aSignal)
{ {
DBUG_ENTER("NdbTransaction::receiveTCROLLBACKREP");
/**************************************************************************** /****************************************************************************
Check that we are expecting signals from this transaction and that it doesn't Check that we are expecting signals from this transaction and that it doesn't
belong to a transaction already completed. Simply ignore messages from other belong to a transaction already completed. Simply ignore messages from other
@ -1736,6 +1739,11 @@ transactions.
****************************************************************************/ ****************************************************************************/
if(checkState_TransId(aSignal->getDataPtr() + 1)){ if(checkState_TransId(aSignal->getDataPtr() + 1)){
theError.code = aSignal->readData(4);// Override any previous errors theError.code = aSignal->readData(4);// Override any previous errors
if (aSignal->getLength() == TcRollbackRep::SignalLength)
{
// Signal may contain additional error data
theError.details = (char *) aSignal->readData(5);
}
/**********************************************************************/ /**********************************************************************/
/* A serious error has occured. This could be due to deadlock or */ /* A serious error has occured. This could be due to deadlock or */
@ -1747,14 +1755,14 @@ transactions.
theCompletionStatus = CompletedFailure; theCompletionStatus = CompletedFailure;
theCommitStatus = Aborted; theCommitStatus = Aborted;
theReturnStatus = ReturnFailure; theReturnStatus = ReturnFailure;
return 0; DBUG_RETURN(0);
} else { } else {
#ifdef NDB_NO_DROPPED_SIGNAL #ifdef NDB_NO_DROPPED_SIGNAL
abort(); abort();
#endif #endif
} }
return -1; DBUG_RETURN(-1);
}//NdbTransaction::receiveTCROLLBACKREP() }//NdbTransaction::receiveTCROLLBACKREP()
/******************************************************************************* /*******************************************************************************

View file

@ -1501,9 +1501,9 @@ void PollGuard::unlock_and_signal()
if (t_signal_cond_waiter) if (t_signal_cond_waiter)
t_signal_cond_waiter->set_poll_owner(true); t_signal_cond_waiter->set_poll_owner(true);
} }
m_tp->unlock_mutex();
if (t_signal_cond_waiter) if (t_signal_cond_waiter)
t_signal_cond_waiter->cond_signal(); t_signal_cond_waiter->cond_signal();
m_tp->unlock_mutex();
m_locked=false; m_locked=false;
} }

View file

@ -169,7 +169,7 @@ ErrorBundle ErrorCodes[] = {
{ 219, DMEC, TR, "219" }, { 219, DMEC, TR, "219" },
{ 233, DMEC, TR, { 233, DMEC, TR,
"Out of operation records in transaction coordinator (increase MaxNoOfConcurrentOperations)" }, "Out of operation records in transaction coordinator (increase MaxNoOfConcurrentOperations)" },
{ 275, DMEC, TR, "275" }, { 275, DMEC, TR, "Out of transaction records for complete phase (increase MaxNoOfConcurrentTransactions)" },
{ 279, DMEC, TR, "Out of transaction markers in transaction coordinator" }, { 279, DMEC, TR, "Out of transaction markers in transaction coordinator" },
{ 414, DMEC, TR, "414" }, { 414, DMEC, TR, "414" },
{ 418, DMEC, TR, "Out of transaction buffers in LQH" }, { 418, DMEC, TR, "Out of transaction buffers in LQH" },
@ -766,8 +766,6 @@ ndberror_update(ndberror_struct * error){
if(!found){ if(!found){
error->status = ST_U; error->status = ST_U;
} }
error->details = 0;
} }
#if CHECK_ERRORCODES #if CHECK_ERRORCODES

View file

@ -2776,9 +2776,13 @@ runDropDDObjects(NDBT_Context* ctx, NDBT_Step* step){
case NdbDictionary::Object::UserTable: case NdbDictionary::Object::UserTable:
tableFound = list.elements[i].name; tableFound = list.elements[i].name;
if(tableFound != 0){ if(tableFound != 0){
if(pDict->dropTable(tableFound) != 0){ if(strcmp(tableFound, "ndb_apply_status") != 0 &&
g_err << "Failed to drop table: " << pDict->getNdbError() << endl; strcmp(tableFound, "NDB$BLOB_2_3") != 0 &&
return NDBT_FAILED; strcmp(tableFound, "ndb_schema") != 0){
if(pDict->dropTable(tableFound) != 0){
g_err << "Failed to drop table: " << tableFound << pDict->getNdbError() << endl;
return NDBT_FAILED;
}
} }
} }
tableFound = 0; tableFound = 0;

View file

@ -1590,6 +1590,8 @@ runBug27466(NDBT_Context* ctx, NDBT_Step* step)
node2 = res.getDbNodeId(rand() % res.getNumDbNodes()); node2 = res.getDbNodeId(rand() % res.getNumDbNodes());
} }
ndbout_c("nodes %u %u", node1, node2);
if (res.restartOneDbNode(node1, false, true, true)) if (res.restartOneDbNode(node1, false, true, true))
return NDBT_FAILED; return NDBT_FAILED;
@ -1830,6 +1832,51 @@ runBug31525(NDBT_Context* ctx, NDBT_Step* step)
if (res.restartOneDbNode(nodes[1], false, false, true)) if (res.restartOneDbNode(nodes[1], false, false, true))
return NDBT_FAILED; return NDBT_FAILED;
if (res.waitClusterStarted())
return NDBT_FAILED;
return NDBT_OK;
}
int
runBug32160(NDBT_Context* ctx, NDBT_Step* step)
{
int result = NDBT_OK;
int loops = ctx->getNumLoops();
int records = ctx->getNumRecords();
Ndb* pNdb = GETNDB(step);
NdbRestarter res;
if (res.getNumDbNodes() < 2)
{
return NDBT_OK;
}
int master = res.getMasterNodeId();
int next = res.getNextMasterNodeId(master);
if (res.insertErrorInNode(next, 7194))
{
return NDBT_FAILED;
}
int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 };
if (res.dumpStateOneNode(master, val2, 2))
return NDBT_FAILED;
if (res.insertErrorInNode(master, 7193))
return NDBT_FAILED;
int val3[] = { 7099 };
if (res.dumpStateOneNode(master, val3, 1))
return NDBT_FAILED;
if (res.waitNodesNoStart(&master, 1))
return NDBT_FAILED;
if (res.startNodes(&master, 1))
return NDBT_FAILED;
if (res.waitClusterStarted()) if (res.waitClusterStarted())
return NDBT_FAILED; return NDBT_FAILED;
@ -2205,6 +2252,9 @@ TESTCASE("Bug28717", ""){
TESTCASE("Bug29364", ""){ TESTCASE("Bug29364", ""){
INITIALIZER(runBug29364); INITIALIZER(runBug29364);
} }
TESTCASE("Bug32160", ""){
INITIALIZER(runBug32160);
}
NDBT_TESTSUITE_END(testNodeRestart); NDBT_TESTSUITE_END(testNodeRestart);
int main(int argc, const char** argv){ int main(int argc, const char** argv){

View file

@ -1501,6 +1501,38 @@ int runSR_DD_2(NDBT_Context* ctx, NDBT_Step* step)
return result; return result;
} }
int runBug22696(NDBT_Context* ctx, NDBT_Step* step)
{
Ndb* pNdb = GETNDB(step);
int result = NDBT_OK;
Uint32 loops = ctx->getNumLoops();
Uint32 rows = ctx->getNumRecords();
NdbRestarter restarter;
HugoTransactions hugoTrans(*ctx->getTab());
Uint32 i = 0;
while(i<=loops && result != NDBT_FAILED)
{
for (Uint32 j = 0; j<10 && result != NDBT_FAILED; j++)
CHECK(hugoTrans.scanUpdateRecords(pNdb, rows) == 0);
CHECK(restarter.restartAll(false, true, i > 0 ? true : false) == 0);
CHECK(restarter.waitClusterNoStart() == 0);
CHECK(restarter.insertErrorInAllNodes(7072) == 0);
CHECK(restarter.startAll() == 0);
CHECK(restarter.waitClusterStarted() == 0);
i++;
if (i < loops)
{
NdbSleep_SecSleep(5); // Wait for a few gcp
}
}
ctx->stopTest();
return result;
}
int int
runBug27434(NDBT_Context* ctx, NDBT_Step* step) runBug27434(NDBT_Context* ctx, NDBT_Step* step)
{ {
@ -1813,8 +1845,13 @@ TESTCASE("Bug28770",
STEP(runBug28770); STEP(runBug28770);
FINALIZER(runClearTable); FINALIZER(runClearTable);
} }
TESTCASE("Bug22696", "")
{
INITIALIZER(runWaitStarted);
INITIALIZER(runLoadTable);
INITIALIZER(runBug22696);
FINALIZER(runClearTable);
}
NDBT_TESTSUITE_END(testSystemRestart); NDBT_TESTSUITE_END(testSystemRestart);
int main(int argc, const char** argv){ int main(int argc, const char** argv){

View file

@ -581,6 +581,10 @@ max-time: 1000
cmd: testNodeRestart cmd: testNodeRestart
args: -n Bug29364 T1 args: -n Bug29364 T1
max-time: 300
cmd: testNodeRestart
args: -n Bug32160 T1
# #
# DICT TESTS # DICT TESTS
max-time: 500 max-time: 500
@ -1038,4 +1042,7 @@ max-time: 300
cmd: test_event cmd: test_event
args: -n Bug31701 T1 args: -n Bug31701 T1
max-time: 300
cmd: testSystemRestart
args: -n Bug22696 T1

View file

@ -13,7 +13,7 @@
# along with this program; if not, write to the Free Software # along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
ndbtest_PROGRAMS = hugoLoad hugoFill hugoLockRecords hugoPkDelete hugoPkRead hugoPkReadRecord hugoPkUpdate hugoScanRead hugoScanUpdate restart verify_index copy_tab create_index ndb_cpcc listen_event eventlog rep_latency ndbtest_PROGRAMS = hugoLoad hugoFill hugoLockRecords hugoPkDelete hugoPkRead hugoPkReadRecord hugoPkUpdate hugoScanRead hugoScanUpdate restart verify_index copy_tab create_index ndb_cpcc listen_event eventlog rep_latency ndb_connect
# transproxy # transproxy
@ -35,6 +35,7 @@ ndb_cpcc_SOURCES = cpcc.cpp
listen_event_SOURCES = listen.cpp listen_event_SOURCES = listen.cpp
eventlog_SOURCES = log_listner.cpp eventlog_SOURCES = log_listner.cpp
rep_latency_SOURCES = rep_latency.cpp rep_latency_SOURCES = rep_latency.cpp
ndb_connect_SOURCES = connect.cpp
include $(top_srcdir)/storage/ndb/config/common.mk.am include $(top_srcdir)/storage/ndb/config/common.mk.am
include $(top_srcdir)/storage/ndb/config/type_ndbapitest.mk.am include $(top_srcdir)/storage/ndb/config/type_ndbapitest.mk.am

View file

@ -0,0 +1,152 @@
/* Copyright (C) 2003 MySQL AB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#include <ndb_global.h>
#include <ndb_opts.h>
#include <NDBT.hpp>
#include <NdbApi.hpp>
#include <NdbSleep.h>
NDB_STD_OPTS_VARS;
static int _loop = 25;
static int _sleep = 25;
static int _drop = 1;
typedef uchar* gptr;
static struct my_option my_long_options[] =
{
NDB_STD_OPTS("ndb_desc"),
{ "loop", 'l', "loops",
(gptr*) &_loop, (gptr*) &_loop, 0,
GET_INT, REQUIRED_ARG, _loop, 0, 0, 0, 0, 0 },
{ "sleep", 's', "Sleep (ms) between connection attempt",
(gptr*) &_sleep, (gptr*) &_sleep, 0,
GET_INT, REQUIRED_ARG, _sleep, 0, 0, 0, 0, 0 },
{ "drop", 'd',
"Drop event operations before disconnect (0 = no, 1 = yes, else rand",
(gptr*) &_drop, (gptr*) &_drop, 0,
GET_INT, REQUIRED_ARG, _drop, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
};
static void usage()
{
char desc[] = "This program connects to ndbd, and then disconnects\n";
ndb_std_print_version();
my_print_help(my_long_options);
my_print_variables(my_long_options);
}
int main(int argc, char** argv){
NDB_INIT(argv[0]);
const char *load_default_groups[]= { "mysql_cluster",0 };
load_defaults("my",load_default_groups,&argc,&argv);
int ho_error;
#ifndef DBUG_OFF
opt_debug= "d:t:O,/tmp/ndb_desc.trace";
#endif
if ((ho_error=handle_options(&argc, &argv, my_long_options,
ndb_std_get_one_option)))
return NDBT_ProgramExit(NDBT_WRONGARGS);
for (int i = 0; i<_loop; i++)
{
Ndb_cluster_connection con(opt_connect_str);
if(con.connect(12, 5, 1) != 0)
{
ndbout << "Unable to connect to management server." << endl;
return NDBT_ProgramExit(NDBT_FAILED);
}
if (con.wait_until_ready(30,30) != 0)
{
ndbout << "Cluster nodes not ready in 30 seconds." << endl;
return NDBT_ProgramExit(NDBT_FAILED);
}
Ndb MyNdb(&con, "TEST_DB");
if(MyNdb.init() != 0){
ERR(MyNdb.getNdbError());
return NDBT_ProgramExit(NDBT_FAILED);
}
Vector<NdbEventOperation*> ops;
const NdbDictionary::Dictionary * dict= MyNdb.getDictionary();
for (int j = 0; j < argc; j++)
{
const NdbDictionary::Table * pTab = dict->getTable(argv[j]);
if (pTab == 0)
{
ndbout_c("Failed to retreive table: \"%s\"", argv[j]);
}
BaseString tmp;
tmp.appfmt("EV-%s", argv[j]);
NdbEventOperation* pOp = MyNdb.createEventOperation(tmp.c_str());
if ( pOp == NULL )
{
ndbout << "Event operation creation failed: " <<
MyNdb.getNdbError() << endl;
return NDBT_ProgramExit(NDBT_FAILED);
}
for (int a = 0; a < pTab->getNoOfColumns(); a++)
{
pOp->getValue(pTab->getColumn(a)->getName());
pOp->getPreValue(pTab->getColumn(a)->getName());
}
if (pOp->execute())
{
ndbout << "operation execution failed: " << pOp->getNdbError()
<< endl;
return NDBT_ProgramExit(NDBT_FAILED);
}
ops.push_back(pOp);
}
if (_sleep)
{
NdbSleep_MilliSleep(10 + rand() % _sleep);
}
for (Uint32 i = 0; i<ops.size(); i++)
{
switch(_drop){
case 0:
break;
do_drop:
case 1:
if (MyNdb.dropEventOperation(ops[i]))
{
ndbout << "drop event operation failed "
<< MyNdb.getNdbError() << endl;
return NDBT_ProgramExit(NDBT_FAILED);
}
break;
default:
if ((rand() % 100) > 50)
goto do_drop;
}
}
}
return NDBT_ProgramExit(NDBT_OK);
}
template class Vector<NdbEventOperation*>;

View file

@ -534,6 +534,88 @@ TupleS::prepareRecord(TableS & tab){
return true; return true;
} }
int
RestoreDataIterator::readTupleData(Uint32 *buf_ptr, Uint32 *ptr,
Uint32 dataLength)
{
while (ptr + 2 < buf_ptr + dataLength)
{
typedef BackupFormat::DataFile::VariableData VarData;
VarData * data = (VarData *)ptr;
Uint32 sz = ntohl(data->Sz);
Uint32 attrId = ntohl(data->Id); // column_no
AttributeData * attr_data = m_tuple.getData(attrId);
const AttributeDesc * attr_desc = m_tuple.getDesc(attrId);
// just a reminder - remove when backwards compat implemented
if (m_currentTable->backupVersion < MAKE_VERSION(5,1,3) &&
attr_desc->m_column->getNullable())
{
const Uint32 ind = attr_desc->m_nullBitIndex;
if(BitmaskImpl::get(m_currentTable->m_nullBitmaskSize,
buf_ptr,ind))
{
attr_data->null = true;
attr_data->void_value = NULL;
continue;
}
}
if (m_currentTable->backupVersion < MAKE_VERSION(5,1,3))
{
sz *= 4;
}
attr_data->null = false;
attr_data->void_value = &data->Data[0];
attr_data->size = sz;
//if (m_currentTable->getTableId() >= 2) { ndbout << "var off=" << ptr-buf_ptr << " attrId=" << attrId << endl; }
/**
* Compute array size
*/
const Uint32 arraySize = sz / (attr_desc->size / 8);
assert(arraySize <= attr_desc->arraySize);
//convert the length of blob(v1) and text(v1)
if(!m_hostByteOrder
&& (attr_desc->m_column->getType() == NdbDictionary::Column::Blob
|| attr_desc->m_column->getType() == NdbDictionary::Column::Text)
&& attr_desc->m_column->getArrayType() == NdbDictionary::Column::ArrayTypeFixed)
{
char* p = (char*)&attr_data->u_int64_value[0];
Uint64 x;
memcpy(&x, p, sizeof(Uint64));
x = Twiddle64(x);
memcpy(p, &x, sizeof(Uint64));
}
//convert datetime type
if(!m_hostByteOrder
&& attr_desc->m_column->getType() == NdbDictionary::Column::Datetime)
{
char* p = (char*)&attr_data->u_int64_value[0];
Uint64 x;
memcpy(&x, p, sizeof(Uint64));
x = Twiddle64(x);
memcpy(p, &x, sizeof(Uint64));
}
if(!Twiddle(attr_desc, attr_data, attr_desc->arraySize))
{
return -1;
}
ptr += ((sz + 3) >> 2) + 2;
}
assert(ptr == buf_ptr + dataLength);
return 0;
}
const TupleS * const TupleS *
RestoreDataIterator::getNextTuple(int & res) RestoreDataIterator::getNextTuple(int & res)
{ {
@ -630,78 +712,8 @@ RestoreDataIterator::getNextTuple(int & res)
attr_data->void_value = NULL; attr_data->void_value = NULL;
} }
while (ptr + 2 < buf_ptr + dataLength) { if ((res = readTupleData(buf_ptr, ptr, dataLength)))
typedef BackupFormat::DataFile::VariableData VarData; return NULL;
VarData * data = (VarData *)ptr;
Uint32 sz = ntohl(data->Sz);
Uint32 attrId = ntohl(data->Id); // column_no
AttributeData * attr_data = m_tuple.getData(attrId);
const AttributeDesc * attr_desc = m_tuple.getDesc(attrId);
// just a reminder - remove when backwards compat implemented
if(m_currentTable->backupVersion < MAKE_VERSION(5,1,3) &&
attr_desc->m_column->getNullable()){
const Uint32 ind = attr_desc->m_nullBitIndex;
if(BitmaskImpl::get(m_currentTable->m_nullBitmaskSize,
buf_ptr,ind)){
attr_data->null = true;
attr_data->void_value = NULL;
continue;
}
}
if (m_currentTable->backupVersion < MAKE_VERSION(5,1,3))
{
sz *= 4;
}
attr_data->null = false;
attr_data->void_value = &data->Data[0];
attr_data->size = sz;
//if (m_currentTable->getTableId() >= 2) { ndbout << "var off=" << ptr-buf_ptr << " attrId=" << attrId << endl; }
/**
* Compute array size
*/
const Uint32 arraySize = sz / (attr_desc->size / 8);
assert(arraySize <= attr_desc->arraySize);
//convert the length of blob(v1) and text(v1)
if(!m_hostByteOrder
&& (attr_desc->m_column->getType() == NdbDictionary::Column::Blob
|| attr_desc->m_column->getType() == NdbDictionary::Column::Text)
&& attr_desc->m_column->getArrayType() == NdbDictionary::Column::ArrayTypeFixed)
{
char* p = (char*)&attr_data->u_int64_value[0];
Uint64 x;
memcpy(&x, p, sizeof(Uint64));
x = Twiddle64(x);
memcpy(p, &x, sizeof(Uint64));
}
//convert datetime type
if(!m_hostByteOrder
&& attr_desc->m_column->getType() == NdbDictionary::Column::Datetime)
{
char* p = (char*)&attr_data->u_int64_value[0];
Uint64 x;
memcpy(&x, p, sizeof(Uint64));
x = Twiddle64(x);
memcpy(p, &x, sizeof(Uint64));
}
if(!Twiddle(attr_desc, attr_data, attr_desc->arraySize))
{
res = -1;
return NULL;
}
ptr += ((sz + 3) >> 2) + 2;
}
assert(ptr == buf_ptr + dataLength);
m_count ++; m_count ++;
res = 0; res = 0;

View file

@ -355,6 +355,10 @@ public:
bool validateFragmentFooter(); bool validateFragmentFooter();
const TupleS *getNextTuple(int & res); const TupleS *getNextTuple(int & res);
private:
int readTupleData(Uint32 *buf_ptr, Uint32 *ptr, Uint32 dataLength);
}; };
class LogEntry { class LogEntry {