mirror of
https://github.com/MariaDB/server.git
synced 2025-01-18 04:53:01 +01:00
Merge perch.ndb.mysql.com:/home/jonas/src/mysql-5.1
into perch.ndb.mysql.com:/home/jonas/src/mysql-5.1-new-ndb
This commit is contained in:
commit
6fae8d4133
59 changed files with 1573 additions and 267 deletions
|
@ -419,6 +419,27 @@ t1 CREATE TABLE `t1` (
|
|||
PRIMARY KEY (`a1`),
|
||||
KEY `a3_i` (`a3`)
|
||||
) /*!50100 TABLESPACE ts STORAGE DISK */ ENGINE=ndbcluster DEFAULT CHARSET=latin1
|
||||
TRUNCATE TABLE test.t1;
|
||||
SHOW CREATE TABLE test.t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE `t1` (
|
||||
`a1` int(11) NOT NULL,
|
||||
`a2` float DEFAULT NULL,
|
||||
`a3` double DEFAULT NULL,
|
||||
`a4` bit(1) DEFAULT NULL,
|
||||
`a5` tinyint(4) DEFAULT NULL,
|
||||
`a6` bigint(20) DEFAULT NULL,
|
||||
`a7` date DEFAULT NULL,
|
||||
`a8` time DEFAULT NULL,
|
||||
`a9` datetime DEFAULT NULL,
|
||||
`a10` tinytext,
|
||||
`a11` mediumtext,
|
||||
`a12` longtext,
|
||||
`a13` text,
|
||||
`a14` blob,
|
||||
PRIMARY KEY (`a1`),
|
||||
KEY `a3_i` (`a3`)
|
||||
) /*!50100 TABLESPACE ts STORAGE DISK */ ENGINE=ndbcluster DEFAULT CHARSET=latin1
|
||||
ALTER TABLE test.t1 DROP a14;
|
||||
ALTER TABLE test.t1 DROP a13;
|
||||
ALTER TABLE test.t1 DROP a12;
|
||||
|
|
|
@ -835,3 +835,20 @@ a
|
|||
3
|
||||
delete from t1;
|
||||
drop table t1;
|
||||
create table nationaldish (DishID int(10) unsigned NOT NULL AUTO_INCREMENT,
|
||||
CountryCode char(3) NOT NULL,
|
||||
DishTitle varchar(64) NOT NULL,
|
||||
calories smallint(5) unsigned DEFAULT NULL,
|
||||
PRIMARY KEY (DishID),
|
||||
INDEX i USING HASH (countrycode,calories)
|
||||
) ENGINE=ndbcluster;
|
||||
ERROR HY000: Can't create table 'test.nationaldish' (errno: 138)
|
||||
create table nationaldish (DishID int(10) unsigned NOT NULL AUTO_INCREMENT,
|
||||
CountryCode char(3) NOT NULL,
|
||||
DishTitle varchar(64) NOT NULL,
|
||||
calories smallint(5) unsigned DEFAULT NULL,
|
||||
PRIMARY KEY (DishID)
|
||||
) ENGINE=ndbcluster;
|
||||
create index i on nationaldish(countrycode,calories) using hash;
|
||||
ERROR 42000: Table 'nationaldish' uses an extension that doesn't exist in this MySQL version
|
||||
drop table nationaldish;
|
||||
|
|
|
@ -203,3 +203,51 @@ NODEGROUP PARTITION_NAME
|
|||
0 p0
|
||||
0 p1
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1 (
|
||||
a tinyint unsigned NOT NULL,
|
||||
b bigint(20) unsigned NOT NULL,
|
||||
c char(12),
|
||||
PRIMARY KEY (a,b)
|
||||
) ENGINE ndb DEFAULT CHARSET=latin1 PARTITION BY KEY (a);
|
||||
insert into t1 values(1,1,'1'), (2,2,'2'), (3,3,'3'), (4,4,'4'), (5,5,'5');
|
||||
select * from t1 where a = 1;
|
||||
a b c
|
||||
1 1 1
|
||||
select * from t1 where a = 2;
|
||||
a b c
|
||||
2 2 2
|
||||
select * from t1 where a = 3;
|
||||
a b c
|
||||
3 3 3
|
||||
select * from t1 where a = 4;
|
||||
a b c
|
||||
4 4 4
|
||||
select * from t1 where a = 5;
|
||||
a b c
|
||||
5 5 5
|
||||
delete from t1 where a = 1;
|
||||
select * from t1 order by 1;
|
||||
a b c
|
||||
2 2 2
|
||||
3 3 3
|
||||
4 4 4
|
||||
5 5 5
|
||||
delete from t1 where a = 2;
|
||||
select * from t1 order by 1;
|
||||
a b c
|
||||
3 3 3
|
||||
4 4 4
|
||||
5 5 5
|
||||
delete from t1 where a = 3;
|
||||
select * from t1 order by 1;
|
||||
a b c
|
||||
4 4 4
|
||||
5 5 5
|
||||
delete from t1 where a = 4;
|
||||
select * from t1 order by 1;
|
||||
a b c
|
||||
5 5 5
|
||||
delete from t1 where a = 5;
|
||||
select * from t1 order by 1;
|
||||
a b c
|
||||
drop table t1;
|
||||
|
|
58
mysql-test/r/rpl_ndb_do_db.result
Normal file
58
mysql-test/r/rpl_ndb_do_db.result
Normal file
|
@ -0,0 +1,58 @@
|
|||
stop slave;
|
||||
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
|
||||
reset master;
|
||||
reset slave;
|
||||
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
|
||||
start slave;
|
||||
DROP DATABASE IF EXISTS replica;
|
||||
CREATE DATABASE replica;
|
||||
CREATE TABLE t1 (a INT NOT NULL KEY, b text NOT NULL)ENGINE=NDB;
|
||||
CREATE TABLE t2 (a INT NOT NULL KEY, b text NOT NULL)ENGINE=NDB;
|
||||
USE replica;
|
||||
CREATE TABLE replica.t1 (a INT NOT NULL KEY, b text NOT NULL)ENGINE=NDB;
|
||||
CREATE TABLE replica.t2 (a INT NOT NULL KEY, b text NOT NULL)ENGINE=NDB;
|
||||
USE test;
|
||||
INSERT INTO t1 VALUES(1, repeat('abc',10));
|
||||
INSERT INTO t2 VALUES(1, repeat('abc',10));
|
||||
SHOW TABLES;
|
||||
Tables_in_test
|
||||
t1
|
||||
t2
|
||||
SELECT COUNT(*) FROM t1;
|
||||
COUNT(*)
|
||||
1
|
||||
SELECT COUNT(*) FROM t2;
|
||||
COUNT(*)
|
||||
1
|
||||
USE replica;
|
||||
INSERT INTO replica.t1 VALUES(2, repeat('def',200));
|
||||
INSERT INTO replica.t2 VALUES(2, repeat('def',200));
|
||||
SHOW TABLES;
|
||||
Tables_in_replica
|
||||
t1
|
||||
t2
|
||||
SELECT COUNT(*) FROM t1;
|
||||
COUNT(*)
|
||||
1
|
||||
SELECT COUNT(*) FROM t2;
|
||||
COUNT(*)
|
||||
1
|
||||
SHOW TABLES;
|
||||
Tables_in_test
|
||||
USE replica;
|
||||
SHOW TABLES;
|
||||
Tables_in_replica
|
||||
t1
|
||||
t2
|
||||
SELECT COUNT(*) FROM t1;
|
||||
COUNT(*)
|
||||
1
|
||||
SELECT COUNT(*) FROM t2;
|
||||
COUNT(*)
|
||||
1
|
||||
USE test;
|
||||
SHOW TABLES;
|
||||
Tables_in_test
|
||||
USE test;
|
||||
DROP TABLE t1, t2;
|
||||
DROP DATABASE IF EXISTS replica;
|
22
mysql-test/r/rpl_ndb_do_table.result
Normal file
22
mysql-test/r/rpl_ndb_do_table.result
Normal file
|
@ -0,0 +1,22 @@
|
|||
stop slave;
|
||||
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
|
||||
reset master;
|
||||
reset slave;
|
||||
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
|
||||
start slave;
|
||||
DROP TABLE IF EXISTS t1, t2;
|
||||
CREATE TABLE t1 (a INT NOT NULL KEY, b text NOT NULL)ENGINE=NDB;
|
||||
CREATE TABLE t2 (a INT NOT NULL KEY, b text NOT NULL)ENGINE=NDB;
|
||||
INSERT INTO t1 VALUES(1, repeat('abc',10));
|
||||
INSERT INTO t1 VALUES(2, repeat('def',200));
|
||||
INSERT INTO t1 VALUES(3, repeat('ghi',3000));
|
||||
INSERT INTO t2 VALUES(1, repeat('abc',10));
|
||||
INSERT INTO t2 VALUES(2, repeat('def',200));
|
||||
INSERT INTO t2 VALUES(3, repeat('ghi',3000));
|
||||
SHOW TABLES;
|
||||
Tables_in_test
|
||||
t1
|
||||
SELECT COUNT(*) FROM t1;
|
||||
COUNT(*)
|
||||
3
|
||||
DROP TABLE IF EXISTS t1, t2;
|
54
mysql-test/r/rpl_ndb_rep_ignore.result
Normal file
54
mysql-test/r/rpl_ndb_rep_ignore.result
Normal file
|
@ -0,0 +1,54 @@
|
|||
stop slave;
|
||||
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
|
||||
reset master;
|
||||
reset slave;
|
||||
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
|
||||
start slave;
|
||||
DROP DATABASE IF EXISTS replica;
|
||||
CREATE DATABASE replica;
|
||||
CREATE TABLE t1 (a INT NOT NULL KEY, b text NOT NULL)ENGINE=NDB;
|
||||
CREATE TABLE t2 (a INT NOT NULL KEY, b text NOT NULL)ENGINE=NDB;
|
||||
USE replica;
|
||||
CREATE TABLE replica.t1 (a INT NOT NULL KEY, b text NOT NULL)ENGINE=NDB;
|
||||
CREATE TABLE replica.t2 (a INT NOT NULL KEY, b text NOT NULL)ENGINE=NDB;
|
||||
USE test;
|
||||
INSERT INTO t1 VALUES(1, repeat('abc',10));
|
||||
INSERT INTO t2 VALUES(1, repeat('abc',10));
|
||||
SHOW TABLES;
|
||||
Tables_in_test
|
||||
t1
|
||||
t2
|
||||
SELECT COUNT(*) FROM t1;
|
||||
COUNT(*)
|
||||
1
|
||||
SELECT COUNT(*) FROM t2;
|
||||
COUNT(*)
|
||||
1
|
||||
USE replica;
|
||||
INSERT INTO replica.t1 VALUES(2, repeat('def',200));
|
||||
INSERT INTO replica.t2 VALUES(2, repeat('def',200));
|
||||
SHOW TABLES;
|
||||
Tables_in_replica
|
||||
t1
|
||||
t2
|
||||
SELECT COUNT(*) FROM t1;
|
||||
COUNT(*)
|
||||
1
|
||||
SELECT COUNT(*) FROM t2;
|
||||
COUNT(*)
|
||||
1
|
||||
SHOW TABLES;
|
||||
Tables_in_test
|
||||
USE replica;
|
||||
SHOW TABLES;
|
||||
Tables_in_replica
|
||||
t2
|
||||
SELECT COUNT(*) FROM t2;
|
||||
COUNT(*)
|
||||
1
|
||||
USE test;
|
||||
SHOW TABLES;
|
||||
Tables_in_test
|
||||
USE test;
|
||||
DROP TABLE t1, t2;
|
||||
DROP DATABASE IF EXISTS replica;
|
|
@ -221,6 +221,9 @@ ALTER TABLE test.t1 DROP INDEX a2_i;
|
|||
|
||||
SHOW CREATE TABLE test.t1;
|
||||
|
||||
TRUNCATE TABLE test.t1;
|
||||
|
||||
SHOW CREATE TABLE test.t1;
|
||||
|
||||
#### Try to ALTER DD Tables and drop columns
|
||||
|
||||
|
|
|
@ -455,3 +455,26 @@ rollback;
|
|||
select * from t1 order by a;
|
||||
delete from t1;
|
||||
drop table t1;
|
||||
|
||||
# bug#24820 CREATE INDEX ....USING HASH on NDB table creates ordered index, not HASH index
|
||||
|
||||
--error ER_CANT_CREATE_TABLE
|
||||
create table nationaldish (DishID int(10) unsigned NOT NULL AUTO_INCREMENT,
|
||||
CountryCode char(3) NOT NULL,
|
||||
DishTitle varchar(64) NOT NULL,
|
||||
calories smallint(5) unsigned DEFAULT NULL,
|
||||
PRIMARY KEY (DishID),
|
||||
INDEX i USING HASH (countrycode,calories)
|
||||
) ENGINE=ndbcluster;
|
||||
|
||||
create table nationaldish (DishID int(10) unsigned NOT NULL AUTO_INCREMENT,
|
||||
CountryCode char(3) NOT NULL,
|
||||
DishTitle varchar(64) NOT NULL,
|
||||
calories smallint(5) unsigned DEFAULT NULL,
|
||||
PRIMARY KEY (DishID)
|
||||
) ENGINE=ndbcluster;
|
||||
|
||||
--error ER_UNSUPPORTED_EXTENSION
|
||||
create index i on nationaldish(countrycode,calories) using hash;
|
||||
|
||||
drop table nationaldish;
|
||||
|
|
|
@ -199,3 +199,31 @@ ALTER TABLE t1 ADD COLUMN c4 INT AFTER c1;
|
|||
SELECT NODEGROUP,PARTITION_NAME FROM information_schema.partitions WHERE
|
||||
table_name = "t1";
|
||||
DROP TABLE t1;
|
||||
|
||||
# bug#25587
|
||||
|
||||
CREATE TABLE t1 (
|
||||
a tinyint unsigned NOT NULL,
|
||||
b bigint(20) unsigned NOT NULL,
|
||||
c char(12),
|
||||
PRIMARY KEY (a,b)
|
||||
) ENGINE ndb DEFAULT CHARSET=latin1 PARTITION BY KEY (a);
|
||||
|
||||
insert into t1 values(1,1,'1'), (2,2,'2'), (3,3,'3'), (4,4,'4'), (5,5,'5');
|
||||
select * from t1 where a = 1;
|
||||
select * from t1 where a = 2;
|
||||
select * from t1 where a = 3;
|
||||
select * from t1 where a = 4;
|
||||
select * from t1 where a = 5;
|
||||
delete from t1 where a = 1;
|
||||
select * from t1 order by 1;
|
||||
delete from t1 where a = 2;
|
||||
select * from t1 order by 1;
|
||||
delete from t1 where a = 3;
|
||||
select * from t1 order by 1;
|
||||
delete from t1 where a = 4;
|
||||
select * from t1 order by 1;
|
||||
delete from t1 where a = 5;
|
||||
select * from t1 order by 1;
|
||||
|
||||
drop table t1;
|
||||
|
|
1
mysql-test/t/rpl_ndb_do_db-slave.opt
Normal file
1
mysql-test/t/rpl_ndb_do_db-slave.opt
Normal file
|
@ -0,0 +1 @@
|
|||
--replicate-do-db=replica
|
57
mysql-test/t/rpl_ndb_do_db.test
Normal file
57
mysql-test/t/rpl_ndb_do_db.test
Normal file
|
@ -0,0 +1,57 @@
|
|||
###########################################################
|
||||
# Author: Jeb
|
||||
# Date: 14-12-2006
|
||||
# Purpose: To test --replicate-do-database=db_name
|
||||
# using cluster. Only replica should replicate.
|
||||
##########################################################
|
||||
|
||||
--source include/have_ndb.inc
|
||||
--source include/have_binlog_format_row.inc
|
||||
--source include/master-slave.inc
|
||||
|
||||
--disable_warnings
|
||||
DROP DATABASE IF EXISTS replica;
|
||||
--enable_warnings
|
||||
|
||||
# Create database and tables for the test.
|
||||
CREATE DATABASE replica;
|
||||
CREATE TABLE t1 (a INT NOT NULL KEY, b text NOT NULL)ENGINE=NDB;
|
||||
CREATE TABLE t2 (a INT NOT NULL KEY, b text NOT NULL)ENGINE=NDB;
|
||||
USE replica;
|
||||
CREATE TABLE replica.t1 (a INT NOT NULL KEY, b text NOT NULL)ENGINE=NDB;
|
||||
CREATE TABLE replica.t2 (a INT NOT NULL KEY, b text NOT NULL)ENGINE=NDB;
|
||||
|
||||
# Insert data into db that should not be picked up by slave
|
||||
USE test;
|
||||
INSERT INTO t1 VALUES(1, repeat('abc',10));
|
||||
INSERT INTO t2 VALUES(1, repeat('abc',10));
|
||||
SHOW TABLES;
|
||||
SELECT COUNT(*) FROM t1;
|
||||
SELECT COUNT(*) FROM t2;
|
||||
|
||||
# Insert data into db that should be replicated
|
||||
USE replica;
|
||||
INSERT INTO replica.t1 VALUES(2, repeat('def',200));
|
||||
INSERT INTO replica.t2 VALUES(2, repeat('def',200));
|
||||
SHOW TABLES;
|
||||
SELECT COUNT(*) FROM t1;
|
||||
SELECT COUNT(*) FROM t2;
|
||||
|
||||
# Check results on slave
|
||||
--sync_slave_with_master
|
||||
SHOW TABLES;
|
||||
USE replica;
|
||||
SHOW TABLES;
|
||||
SELECT COUNT(*) FROM t1;
|
||||
SELECT COUNT(*) FROM t2;
|
||||
USE test;
|
||||
SHOW TABLES;
|
||||
|
||||
# Cleanup from testing
|
||||
connection master;
|
||||
USE test;
|
||||
DROP TABLE t1, t2;
|
||||
DROP DATABASE IF EXISTS replica;
|
||||
--sync_slave_with_master
|
||||
|
||||
# End 5.1 test case
|
1
mysql-test/t/rpl_ndb_do_table-slave.opt
Normal file
1
mysql-test/t/rpl_ndb_do_table-slave.opt
Normal file
|
@ -0,0 +1 @@
|
|||
--replicate-do-table=test.t1
|
32
mysql-test/t/rpl_ndb_do_table.test
Normal file
32
mysql-test/t/rpl_ndb_do_table.test
Normal file
|
@ -0,0 +1,32 @@
|
|||
###########################################################
|
||||
# Author: Jeb
|
||||
# Date: 14-12-2006
|
||||
# Purpose: To test --replicate-do-table=db_name.tbl_name
|
||||
# using cluster. Only t1 should replicate.
|
||||
##########################################################
|
||||
|
||||
--source include/have_ndb.inc
|
||||
--source include/have_binlog_format_row.inc
|
||||
--source include/master-slave.inc
|
||||
|
||||
--disable_warnings
|
||||
DROP TABLE IF EXISTS t1, t2;
|
||||
--enable_warnings
|
||||
|
||||
CREATE TABLE t1 (a INT NOT NULL KEY, b text NOT NULL)ENGINE=NDB;
|
||||
CREATE TABLE t2 (a INT NOT NULL KEY, b text NOT NULL)ENGINE=NDB;
|
||||
|
||||
INSERT INTO t1 VALUES(1, repeat('abc',10));
|
||||
INSERT INTO t1 VALUES(2, repeat('def',200));
|
||||
INSERT INTO t1 VALUES(3, repeat('ghi',3000));
|
||||
INSERT INTO t2 VALUES(1, repeat('abc',10));
|
||||
INSERT INTO t2 VALUES(2, repeat('def',200));
|
||||
INSERT INTO t2 VALUES(3, repeat('ghi',3000));
|
||||
|
||||
--sync_slave_with_master
|
||||
SHOW TABLES;
|
||||
SELECT COUNT(*) FROM t1;
|
||||
|
||||
connection master;
|
||||
DROP TABLE IF EXISTS t1, t2;
|
||||
--sync_slave_with_master
|
1
mysql-test/t/rpl_ndb_rep_ignore-slave.opt
Normal file
1
mysql-test/t/rpl_ndb_rep_ignore-slave.opt
Normal file
|
@ -0,0 +1 @@
|
|||
--replicate-ignore-db=test --replicate-ignore-table=replica.t1
|
58
mysql-test/t/rpl_ndb_rep_ignore.test
Normal file
58
mysql-test/t/rpl_ndb_rep_ignore.test
Normal file
|
@ -0,0 +1,58 @@
|
|||
###########################################################
|
||||
# Author: Jeb
|
||||
# Date: 15-12-2006
|
||||
# Purpose: To test --replicate-ignore-table=db_name.tbl_name
|
||||
# and --replicate-ignore-db=db_name
|
||||
# using cluster. Only replica should replicate.
|
||||
##########################################################
|
||||
|
||||
--source include/have_ndb.inc
|
||||
--source include/have_binlog_format_row.inc
|
||||
--source include/master-slave.inc
|
||||
|
||||
--disable_warnings
|
||||
DROP DATABASE IF EXISTS replica;
|
||||
--enable_warnings
|
||||
|
||||
# Create database and tables for the test.
|
||||
CREATE DATABASE replica;
|
||||
CREATE TABLE t1 (a INT NOT NULL KEY, b text NOT NULL)ENGINE=NDB;
|
||||
CREATE TABLE t2 (a INT NOT NULL KEY, b text NOT NULL)ENGINE=NDB;
|
||||
USE replica;
|
||||
CREATE TABLE replica.t1 (a INT NOT NULL KEY, b text NOT NULL)ENGINE=NDB;
|
||||
CREATE TABLE replica.t2 (a INT NOT NULL KEY, b text NOT NULL)ENGINE=NDB;
|
||||
|
||||
# Insert data into db that should not be picked up by slave
|
||||
USE test;
|
||||
INSERT INTO t1 VALUES(1, repeat('abc',10));
|
||||
INSERT INTO t2 VALUES(1, repeat('abc',10));
|
||||
SHOW TABLES;
|
||||
SELECT COUNT(*) FROM t1;
|
||||
SELECT COUNT(*) FROM t2;
|
||||
|
||||
# Insert data into db that should be replicated
|
||||
USE replica;
|
||||
INSERT INTO replica.t1 VALUES(2, repeat('def',200));
|
||||
INSERT INTO replica.t2 VALUES(2, repeat('def',200));
|
||||
SHOW TABLES;
|
||||
SELECT COUNT(*) FROM t1;
|
||||
SELECT COUNT(*) FROM t2;
|
||||
|
||||
# Check results on slave
|
||||
--sync_slave_with_master
|
||||
SHOW TABLES;
|
||||
USE replica;
|
||||
SHOW TABLES;
|
||||
#SELECT COUNT(*) FROM t1;
|
||||
SELECT COUNT(*) FROM t2;
|
||||
USE test;
|
||||
SHOW TABLES;
|
||||
|
||||
# Cleanup from testing
|
||||
connection master;
|
||||
USE test;
|
||||
DROP TABLE t1, t2;
|
||||
DROP DATABASE IF EXISTS replica;
|
||||
--sync_slave_with_master
|
||||
|
||||
# End 5.1 test case
|
|
@ -4734,6 +4734,7 @@ int ha_ndbcluster::create(const char *name,
|
|||
const void *data, *pack_data;
|
||||
bool create_from_engine= (info->table_options & HA_OPTION_CREATE_FROM_ENGINE);
|
||||
bool is_truncate= (thd->lex->sql_command == SQLCOM_TRUNCATE);
|
||||
char tablespace[FN_LEN];
|
||||
|
||||
DBUG_ENTER("ha_ndbcluster::create");
|
||||
DBUG_PRINT("enter", ("name: %s", name));
|
||||
|
@ -4742,8 +4743,22 @@ int ha_ndbcluster::create(const char *name,
|
|||
set_dbname(name);
|
||||
set_tabname(name);
|
||||
|
||||
if ((my_errno= check_ndb_connection()))
|
||||
DBUG_RETURN(my_errno);
|
||||
|
||||
Ndb *ndb= get_ndb();
|
||||
NDBDICT *dict= ndb->getDictionary();
|
||||
|
||||
if (is_truncate)
|
||||
{
|
||||
{
|
||||
Ndb_table_guard ndbtab_g(dict, m_tabname);
|
||||
if (!(m_table= ndbtab_g.get_table()))
|
||||
ERR_RETURN(dict->getNdbError());
|
||||
if ((get_tablespace_name(thd, tablespace, FN_LEN)))
|
||||
info->tablespace= tablespace;
|
||||
m_table= NULL;
|
||||
}
|
||||
DBUG_PRINT("info", ("Dropping and re-creating table for TRUNCATE"));
|
||||
if ((my_errno= delete_table(name)))
|
||||
DBUG_RETURN(my_errno);
|
||||
|
@ -4806,7 +4821,7 @@ int ha_ndbcluster::create(const char *name,
|
|||
if ((my_errno= create_ndb_column(col, field, info)))
|
||||
DBUG_RETURN(my_errno);
|
||||
|
||||
if (info->storage_media == HA_SM_DISK || getenv("NDB_DEFAULT_DISK"))
|
||||
if (info->storage_media == HA_SM_DISK)
|
||||
col.setStorageType(NdbDictionary::Column::StorageTypeDisk);
|
||||
else
|
||||
col.setStorageType(NdbDictionary::Column::StorageTypeMemory);
|
||||
|
@ -4829,9 +4844,9 @@ int ha_ndbcluster::create(const char *name,
|
|||
if (info->storage_media == HA_SM_DISK)
|
||||
{
|
||||
if (info->tablespace)
|
||||
tab.setTablespace(info->tablespace);
|
||||
tab.setTablespaceName(info->tablespace);
|
||||
else
|
||||
tab.setTablespace("DEFAULT-TS");
|
||||
tab.setTablespaceName("DEFAULT-TS");
|
||||
}
|
||||
else if (info->tablespace)
|
||||
{
|
||||
|
@ -4845,7 +4860,7 @@ int ha_ndbcluster::create(const char *name,
|
|||
"STORAGE DISK");
|
||||
DBUG_RETURN(HA_ERR_UNSUPPORTED);
|
||||
}
|
||||
tab.setTablespace(info->tablespace);
|
||||
tab.setTablespaceName(info->tablespace);
|
||||
info->storage_media = HA_SM_DISK; //if use tablespace, that also means store on disk
|
||||
}
|
||||
|
||||
|
@ -4903,12 +4918,7 @@ int ha_ndbcluster::create(const char *name,
|
|||
DBUG_RETURN(my_errno);
|
||||
}
|
||||
|
||||
if ((my_errno= check_ndb_connection()))
|
||||
DBUG_RETURN(my_errno);
|
||||
|
||||
// Create the table in NDB
|
||||
Ndb *ndb= get_ndb();
|
||||
NDBDICT *dict= ndb->getDictionary();
|
||||
if (dict->createTable(tab) != 0)
|
||||
{
|
||||
const NdbError err= dict->getNdbError();
|
||||
|
@ -5143,6 +5153,17 @@ int ha_ndbcluster::create_index(const char *name, KEY *key_info,
|
|||
error= create_unique_index(unique_name, key_info);
|
||||
break;
|
||||
case ORDERED_INDEX:
|
||||
if (key_info->algorithm == HA_KEY_ALG_HASH)
|
||||
{
|
||||
push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
|
||||
ER_ILLEGAL_HA_CREATE_OPTION,
|
||||
ER(ER_ILLEGAL_HA_CREATE_OPTION),
|
||||
ndbcluster_hton_name,
|
||||
"Ndb does not support non-unique "
|
||||
"hash based indexes");
|
||||
error= HA_ERR_UNSUPPORTED;
|
||||
break;
|
||||
}
|
||||
error= create_ordered_index(name, key_info);
|
||||
break;
|
||||
default:
|
||||
|
@ -5237,7 +5258,7 @@ int ha_ndbcluster::add_index(TABLE *table_arg,
|
|||
KEY *key= key_info + idx;
|
||||
KEY_PART_INFO *key_part= key->key_part;
|
||||
KEY_PART_INFO *end= key_part + key->key_parts;
|
||||
NDB_INDEX_TYPE idx_type= get_index_type_from_key(idx, key, false);
|
||||
NDB_INDEX_TYPE idx_type= get_index_type_from_key(idx, key_info, false);
|
||||
DBUG_PRINT("info", ("Adding index: '%s'", key_info[idx].name));
|
||||
// Add fields to key_part struct
|
||||
for (; key_part != end; key_part++)
|
||||
|
@ -7806,7 +7827,7 @@ ndb_get_table_statistics(ha_ndbcluster* file, bool report_error, Ndb* ndb, const
|
|||
retry:
|
||||
if(report_error)
|
||||
{
|
||||
if (file)
|
||||
if (file && pTrans)
|
||||
{
|
||||
reterr= file->ndb_err(pTrans);
|
||||
}
|
||||
|
@ -10007,6 +10028,7 @@ char* ha_ndbcluster::get_tablespace_name(THD *thd, char* name, uint name_len)
|
|||
ndberr= ndbdict->getNdbError();
|
||||
if(ndberr.classification != NdbError::NoError)
|
||||
goto err;
|
||||
DBUG_PRINT("info", ("Found tablespace '%s'", ts.getName()));
|
||||
if (name)
|
||||
{
|
||||
strxnmov(name, name_len, ts.getName(), NullS);
|
||||
|
|
|
@ -4729,8 +4729,8 @@ enum options_mysqld
|
|||
OPT_LOG_BIN_TRUST_FUNCTION_CREATORS,
|
||||
OPT_SAFE_SHOW_DB, OPT_INNODB_SAFE_BINLOG,
|
||||
OPT_INNODB, OPT_ISAM,
|
||||
OPT_ENGINE_CONDITION_PUSHDOWN,
|
||||
OPT_NDBCLUSTER, OPT_NDB_CONNECTSTRING, OPT_NDB_USE_EXACT_COUNT,
|
||||
OPT_ENGINE_CONDITION_PUSHDOWN, OPT_NDBCLUSTER, OPT_NDB_CONNECTSTRING,
|
||||
OPT_NDB_USE_EXACT_COUNT, OPT_NDB_USE_TRANSACTIONS,
|
||||
OPT_NDB_FORCE_SEND, OPT_NDB_AUTOINCREMENT_PREFETCH_SZ,
|
||||
OPT_NDB_SHM, OPT_NDB_OPTIMIZED_NODE_SELECTION, OPT_NDB_CACHE_CHECK_TIME,
|
||||
OPT_NDB_MGMD, OPT_NDB_NODEID,
|
||||
|
@ -5416,6 +5416,17 @@ Disable with --skip-ndbcluster (will save memory).",
|
|||
(gptr*) &global_system_variables.ndb_use_exact_count,
|
||||
(gptr*) &global_system_variables.ndb_use_exact_count,
|
||||
0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0},
|
||||
{"ndb-use-transactions", OPT_NDB_USE_TRANSACTIONS,
|
||||
"Use transactions for large inserts, if enabled then large "
|
||||
"inserts will be split into several smaller transactions",
|
||||
(gptr*) &global_system_variables.ndb_use_transactions,
|
||||
(gptr*) &global_system_variables.ndb_use_transactions,
|
||||
0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0},
|
||||
{"ndb_use_transactions", OPT_NDB_USE_TRANSACTIONS,
|
||||
"same as --ndb-use-transactions.",
|
||||
(gptr*) &global_system_variables.ndb_use_transactions,
|
||||
(gptr*) &global_system_variables.ndb_use_transactions,
|
||||
0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0},
|
||||
{"ndb-shm", OPT_NDB_SHM,
|
||||
"Use shared memory connections when available.",
|
||||
(gptr*) &opt_ndb_shm,
|
||||
|
|
|
@ -327,9 +327,6 @@ void THD::init(void)
|
|||
variables.date_format);
|
||||
variables.datetime_format= date_time_format_copy((THD*) 0,
|
||||
variables.datetime_format);
|
||||
#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
|
||||
variables.ndb_use_transactions= 1;
|
||||
#endif
|
||||
pthread_mutex_unlock(&LOCK_global_system_variables);
|
||||
server_status= SERVER_STATUS_AUTOCOMMIT;
|
||||
if (variables.sql_mode & MODE_NO_BACKSLASH_ESCAPES)
|
||||
|
|
|
@ -182,7 +182,7 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES;
|
|||
#define GSN_CNTR_START_REP 119
|
||||
/* 120 not unused */
|
||||
#define GSN_ROUTE_ORD 121
|
||||
/* 122 unused */
|
||||
#define GSN_NODE_VERSION_REP 122
|
||||
/* 123 unused */
|
||||
/* 124 unused */
|
||||
#define GSN_CHECK_LCP_STOP 125
|
||||
|
|
|
@ -89,4 +89,14 @@ operator<<(NdbOut& ndbout, const NodeInfo & info){
|
|||
return ndbout;
|
||||
}
|
||||
|
||||
struct NodeVersionInfo
|
||||
{
|
||||
STATIC_CONST( DataLength = 6 );
|
||||
struct
|
||||
{
|
||||
Uint32 m_min_version;
|
||||
Uint32 m_max_version;
|
||||
} m_type [3]; // Indexed as NodeInfo::Type
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
|
@ -79,12 +79,13 @@ class ApiRegConf {
|
|||
friend class ClusterMgr;
|
||||
|
||||
public:
|
||||
STATIC_CONST( SignalLength = 3 + NodeState::DataLength );
|
||||
STATIC_CONST( SignalLength = 4 + NodeState::DataLength );
|
||||
private:
|
||||
|
||||
Uint32 qmgrRef;
|
||||
Uint32 version; // Version of NDB node
|
||||
Uint32 apiHeartbeatFrequency;
|
||||
Uint32 minDbVersion;
|
||||
NodeState nodeState;
|
||||
};
|
||||
|
||||
|
|
|
@ -67,6 +67,7 @@ public:
|
|||
// 100-105 TUP and ACC
|
||||
// 200-240 UTIL
|
||||
// 300-305 TRIX
|
||||
QmgrErr935 = 935,
|
||||
NdbfsDumpFileStat = 400,
|
||||
NdbfsDumpAllFiles = 401,
|
||||
NdbfsDumpOpenFiles = 402,
|
||||
|
|
|
@ -71,5 +71,7 @@ char ndb_version_string_buf[NDB_VERSION_STRING_BUF_SZ];
|
|||
|
||||
#define NDBD_QMGR_SINGLEUSER_VERSION_5 MAKE_VERSION(5,0,25)
|
||||
|
||||
#define NDBD_NODE_VERSION_REP MAKE_VERSION(6,1,1)
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -809,9 +809,9 @@ public:
|
|||
*/
|
||||
void setMaxLoadFactor(int);
|
||||
|
||||
void setTablespace(const char * name);
|
||||
void setTablespaceName(const char * name);
|
||||
const char * getTablespaceName() const;
|
||||
void setTablespace(const class Tablespace &);
|
||||
const char * getTablespace() const;
|
||||
bool getTablespace(Uint32 *id= 0, Uint32 *version= 0) const;
|
||||
|
||||
/**
|
||||
|
|
|
@ -65,7 +65,7 @@ void NdbMem_Free(void* ptr);
|
|||
* NdbMem_MemLockAll
|
||||
* Locks virtual memory in main memory
|
||||
*/
|
||||
int NdbMem_MemLockAll(void);
|
||||
int NdbMem_MemLockAll(int);
|
||||
|
||||
/**
|
||||
* NdbMem_MemUnlockAll
|
||||
|
|
|
@ -636,5 +636,6 @@ const GsnName SignalNames [] = {
|
|||
,{ GSN_DICT_COMMIT_REQ, "DICT_COMMIT_REQ"}
|
||||
|
||||
,{ GSN_ROUTE_ORD, "ROUTE_ORD" }
|
||||
,{ GSN_NODE_VERSION_REP, "NODE_VERSION_REP" }
|
||||
};
|
||||
const unsigned short NO_OF_SIGNAL_NAMES = sizeof(SignalNames)/sizeof(GsnName);
|
||||
|
|
|
@ -56,7 +56,15 @@ void NdbMem_Free(void* ptr)
|
|||
}
|
||||
|
||||
|
||||
int NdbMem_MemLockAll(){
|
||||
int NdbMem_MemLockAll(int i){
|
||||
if (i == 1)
|
||||
{
|
||||
#if defined(HAVE_MLOCKALL) && defined(MCL_CURRENT) && defined (MCL_FUTURE)
|
||||
return mlockall(MCL_CURRENT | MCL_FUTURE);
|
||||
#else
|
||||
return -1;
|
||||
#endif
|
||||
}
|
||||
#if defined(HAVE_MLOCKALL) && defined(MCL_CURRENT)
|
||||
return mlockall(MCL_CURRENT);
|
||||
#else
|
||||
|
|
|
@ -48,7 +48,7 @@ static const char Magic[] = { 'N', 'D', 'B', 'C', 'O', 'N', 'F', 'V' };
|
|||
|
||||
//#define DEBUG_CV
|
||||
#ifdef DEBUG_CV
|
||||
#define DEBUG
|
||||
#define DEBUG if(getenv("CV_DEBUG"))
|
||||
#else
|
||||
#define DEBUG if(0)
|
||||
#endif
|
||||
|
@ -216,62 +216,60 @@ ConfigValues::Iterator::set(Uint32 key, const char * value){
|
|||
static
|
||||
bool
|
||||
findKey(const Uint32 * values, Uint32 sz, Uint32 key, Uint32 * _pos){
|
||||
Uint32 pos = hash(key, sz);
|
||||
Uint32 count = 0;
|
||||
while((values[pos] & KP_MASK) != key && count < sz){
|
||||
pos = nextHash(key, sz, pos, ++count);
|
||||
Uint32 lo = 0;
|
||||
Uint32 hi = sz;
|
||||
Uint32 pos = (hi + lo) >> 1;
|
||||
|
||||
DEBUG printf("findKey(H'%.8x %d)", key, sz);
|
||||
|
||||
if (sz == 0)
|
||||
{
|
||||
DEBUG ndbout_c(" -> false, 0");
|
||||
* _pos = 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
if((values[pos] & KP_MASK)== key){
|
||||
*_pos = pos;
|
||||
Uint32 val = 0;
|
||||
Uint32 oldpos = pos + 1;
|
||||
while (pos != oldpos)
|
||||
{
|
||||
DEBUG printf(" [ %d %d %d ] ", lo, pos, hi);
|
||||
assert(pos < hi);
|
||||
assert(pos >= lo);
|
||||
val = values[2*pos] & KP_MASK;
|
||||
if (key > val)
|
||||
{
|
||||
lo = pos;
|
||||
}
|
||||
else if (key < val)
|
||||
{
|
||||
hi = pos;
|
||||
}
|
||||
else
|
||||
{
|
||||
* _pos = 2*pos;
|
||||
DEBUG ndbout_c(" -> true, %d", pos);
|
||||
return true;
|
||||
}
|
||||
oldpos = pos;
|
||||
pos = (hi + lo) >> 1;
|
||||
}
|
||||
|
||||
DEBUG printf(" pos: %d (key %.8x val: %.8x values[pos]: %x) key>val: %d ",
|
||||
pos, key, val, values[2*pos] & KP_MASK,
|
||||
key > val);
|
||||
|
||||
pos += (key > val) ? 1 : 0;
|
||||
|
||||
* _pos = 2*pos;
|
||||
DEBUG ndbout_c(" -> false, %d", pos);
|
||||
return false;
|
||||
}
|
||||
|
||||
static
|
||||
Uint32
|
||||
hash(Uint32 key, Uint32 size){
|
||||
Uint32 tmp = (key >> 16) ^ (key & 0xFFFF);
|
||||
return (((tmp << 16) | tmp) % size) << 1;
|
||||
}
|
||||
|
||||
static
|
||||
Uint32
|
||||
nextHash(Uint32 key, Uint32 size, Uint32 pos, Uint32 count){
|
||||
Uint32 p = (pos >> 1);
|
||||
if((key % size) != 0)
|
||||
p += key;
|
||||
else
|
||||
p += 1;
|
||||
return (p % size) << 1;
|
||||
}
|
||||
|
||||
static
|
||||
Uint32
|
||||
directory(Uint32 sz){
|
||||
const Uint32 _input = sz;
|
||||
if((sz & 1) == 0)
|
||||
sz ++;
|
||||
|
||||
bool prime = false;
|
||||
while(!prime){
|
||||
prime = true;
|
||||
for(Uint32 n = 3; n*n <= sz; n += 2){
|
||||
if((sz % n) == 0){
|
||||
prime = false;
|
||||
sz += 2;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
DEBUG printf("directory %d -> %d\n", _input, sz);
|
||||
return sz;
|
||||
}
|
||||
|
||||
ConfigValuesFactory::ConfigValuesFactory(Uint32 keys, Uint32 data){
|
||||
m_sectionCounter = (1 << KP_SECTION_SHIFT);
|
||||
m_freeKeys = directory(keys);
|
||||
m_freeKeys = keys;
|
||||
m_freeData = (data + 7) & ~7;
|
||||
m_currentSection = 0;
|
||||
m_cfg = create(m_freeKeys, m_freeData);
|
||||
|
@ -330,11 +328,14 @@ ConfigValuesFactory::expand(Uint32 fk, Uint32 fs){
|
|||
return ;
|
||||
}
|
||||
|
||||
DEBUG printf("[ fk fd ] : [ %d %d ]", m_freeKeys, m_freeData);
|
||||
|
||||
m_freeKeys = (m_freeKeys >= fk ? m_cfg->m_size : fk + m_cfg->m_size);
|
||||
m_freeData = (m_freeData >= fs ? m_cfg->m_dataSize : fs + m_cfg->m_dataSize);
|
||||
m_freeKeys = directory(m_freeKeys);
|
||||
m_freeData = (m_freeData + 7) & ~7;
|
||||
|
||||
DEBUG ndbout_c(" [ %d %d ]", m_freeKeys, m_freeData);
|
||||
|
||||
ConfigValues * m_tmp = m_cfg;
|
||||
m_cfg = create(m_freeKeys, m_freeData);
|
||||
put(* m_tmp);
|
||||
|
@ -350,7 +351,6 @@ ConfigValuesFactory::shrink(){
|
|||
|
||||
m_freeKeys = m_cfg->m_size - m_freeKeys;
|
||||
m_freeData = m_cfg->m_dataSize - m_freeData;
|
||||
m_freeKeys = directory(m_freeKeys);
|
||||
m_freeData = (m_freeData + 7) & ~7;
|
||||
|
||||
ConfigValues * m_tmp = m_cfg;
|
||||
|
@ -429,52 +429,58 @@ ConfigValuesFactory::put(const ConfigValues::Entry & entry){
|
|||
}
|
||||
|
||||
const Uint32 tmp = entry.m_key | m_currentSection;
|
||||
const Uint32 sz = m_cfg->m_size;
|
||||
Uint32 pos = hash(tmp, sz);
|
||||
Uint32 count = 0;
|
||||
Uint32 val = m_cfg->m_values[pos];
|
||||
const Uint32 sz = m_cfg->m_size - m_freeKeys;
|
||||
|
||||
while((val & KP_MASK) != tmp && val != CFV_KEY_FREE && count < sz){
|
||||
pos = nextHash(tmp, sz, pos, ++count);
|
||||
val = m_cfg->m_values[pos];
|
||||
}
|
||||
|
||||
if((val & KP_MASK) == tmp){
|
||||
Uint32 pos;
|
||||
if (findKey(m_cfg->m_values, sz, tmp, &pos))
|
||||
{
|
||||
DEBUG ndbout_c("key %x already found at pos: %d", tmp, pos);
|
||||
return false;
|
||||
}
|
||||
|
||||
if(count >= sz){
|
||||
pos = hash(tmp, sz);
|
||||
count = 0;
|
||||
Uint32 val = m_cfg->m_values[pos];
|
||||
|
||||
printf("key: %d, (key %% size): %d\n", entry.m_key, (entry.m_key % sz));
|
||||
printf("pos: %d", pos);
|
||||
while((val & KP_MASK) != tmp && val != CFV_KEY_FREE && count < sz){
|
||||
pos = nextHash(tmp, sz, pos, ++count);
|
||||
val = m_cfg->m_values[pos];
|
||||
printf(" %d", pos);
|
||||
DEBUG {
|
||||
printf("H'before ");
|
||||
Uint32 prev = 0;
|
||||
for (Uint32 i = 0; i<sz; i++)
|
||||
{
|
||||
Uint32 val = m_cfg->m_values[2*i] & KP_MASK;
|
||||
ndbout_c("%.8x", val);
|
||||
assert(val >= prev);
|
||||
prev = val;
|
||||
}
|
||||
printf("\n");
|
||||
|
||||
abort();
|
||||
printf("Full\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
assert(pos < (sz << 1));
|
||||
if (pos != 2*sz)
|
||||
{
|
||||
DEBUG ndbout_c("pos: %d sz: %d", pos, sz);
|
||||
memmove(m_cfg->m_values + pos + 2, m_cfg->m_values + pos,
|
||||
4 * (2*sz - pos));
|
||||
}
|
||||
|
||||
|
||||
Uint32 key = tmp;
|
||||
key |= (entry.m_type << KP_TYPE_SHIFT);
|
||||
m_cfg->m_values[pos] = key;
|
||||
|
||||
DEBUG {
|
||||
printf("H'after ");
|
||||
Uint32 prev = 0;
|
||||
for (Uint32 i = 0; i<=sz; i++)
|
||||
{
|
||||
Uint32 val = m_cfg->m_values[2*i] & KP_MASK;
|
||||
ndbout_c("%.8x", val);
|
||||
assert(val >= prev);
|
||||
prev = val;
|
||||
}
|
||||
}
|
||||
|
||||
switch(entry.m_type){
|
||||
case ConfigValues::IntType:
|
||||
case ConfigValues::SectionType:
|
||||
m_cfg->m_values[pos+1] = entry.m_int;
|
||||
m_freeKeys--;
|
||||
DEBUG printf("Putting at: %d(%d) (loop = %d) key: %d value: %d\n",
|
||||
pos, sz, count,
|
||||
pos, sz, 0,
|
||||
(key >> KP_KEYVAL_SHIFT) & KP_KEYVAL_MASK,
|
||||
entry.m_int);
|
||||
return true;
|
||||
|
@ -486,7 +492,7 @@ ConfigValuesFactory::put(const ConfigValues::Entry & entry){
|
|||
m_freeKeys--;
|
||||
m_freeData -= sizeof(char *);
|
||||
DEBUG printf("Putting at: %d(%d) (loop = %d) key: %d value(%d): %s\n",
|
||||
pos, sz, count,
|
||||
pos, sz, 0,
|
||||
(key >> KP_KEYVAL_SHIFT) & KP_KEYVAL_MASK,
|
||||
index,
|
||||
entry.m_string);
|
||||
|
@ -499,7 +505,7 @@ ConfigValuesFactory::put(const ConfigValues::Entry & entry){
|
|||
m_freeKeys--;
|
||||
m_freeData -= 8;
|
||||
DEBUG printf("Putting at: %d(%d) (loop = %d) key: %d value64(%d): %lld\n",
|
||||
pos, sz, count,
|
||||
pos, sz, 0,
|
||||
(key >> KP_KEYVAL_SHIFT) & KP_KEYVAL_MASK,
|
||||
index,
|
||||
entry.m_int64);
|
||||
|
@ -662,6 +668,8 @@ ConfigValuesFactory::unpack(const void * _src, Uint32 len){
|
|||
}
|
||||
|
||||
const char * src = (const char *)_src;
|
||||
const char * end = src + len - 4;
|
||||
src += sizeof(Magic);
|
||||
|
||||
{
|
||||
Uint32 len32 = (len >> 2);
|
||||
|
@ -677,9 +685,37 @@ ConfigValuesFactory::unpack(const void * _src, Uint32 len){
|
|||
}
|
||||
}
|
||||
|
||||
const char * end = src + len - 4;
|
||||
src += sizeof(Magic);
|
||||
const char * save = src;
|
||||
|
||||
{
|
||||
Uint32 keys = 0;
|
||||
Uint32 data = 0;
|
||||
while(end - src > 4){
|
||||
Uint32 tmp = ntohl(* (const Uint32 *)src); src += 4;
|
||||
keys++;
|
||||
switch(::getTypeOf(tmp)){
|
||||
case ConfigValues::IntType:
|
||||
case ConfigValues::SectionType:
|
||||
src += 4;
|
||||
break;
|
||||
case ConfigValues::Int64Type:
|
||||
src += 8;
|
||||
data += 8;
|
||||
break;
|
||||
case ConfigValues::StringType:{
|
||||
Uint32 s_len = ntohl(* (const Uint32 *)src);
|
||||
src += 4 + mod4(s_len);
|
||||
data += sizeof(char*);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
expand(keys, data);
|
||||
}
|
||||
|
||||
src = save;
|
||||
ConfigValues::Entry entry;
|
||||
while(end - src > 4){
|
||||
Uint32 tmp = ntohl(* (const Uint32 *)src); src += 4;
|
||||
|
|
|
@ -23,6 +23,8 @@ Crash president when he starts to run in ArbitState 1-9.
|
|||
|
||||
934 : Crash president in ALLOC_NODE_ID_REQ
|
||||
|
||||
935 : Crash master on node failure (delayed)
|
||||
and skip sending GSN_COMMIT_FAILREQ to specified node
|
||||
|
||||
ERROR CODES FOR TESTING NODE FAILURE, GLOBAL CHECKPOINT HANDLING:
|
||||
-----------------------------------------------------------------
|
||||
|
|
|
@ -368,9 +368,9 @@ void Cmvmi::execSTTOR(Signal* signal)
|
|||
if (theStartPhase == 1){
|
||||
jam();
|
||||
|
||||
if(m_ctx.m_config.lockPagesInMainMemory())
|
||||
if(m_ctx.m_config.lockPagesInMainMemory() == 1)
|
||||
{
|
||||
int res = NdbMem_MemLockAll();
|
||||
int res = NdbMem_MemLockAll(0);
|
||||
if(res != 0){
|
||||
g_eventLogger.warning("Failed to memlock pages");
|
||||
warningEvent("Failed to memlock pages");
|
||||
|
@ -812,6 +812,21 @@ Cmvmi::execSTART_ORD(Signal* signal) {
|
|||
|
||||
if(globalData.theStartLevel == NodeState::SL_CMVMI){
|
||||
jam();
|
||||
|
||||
if(m_ctx.m_config.lockPagesInMainMemory() == 2)
|
||||
{
|
||||
int res = NdbMem_MemLockAll(1);
|
||||
if(res != 0)
|
||||
{
|
||||
g_eventLogger.warning("Failed to memlock pages");
|
||||
warningEvent("Failed to memlock pages");
|
||||
}
|
||||
else
|
||||
{
|
||||
g_eventLogger.info("Locked future allocations");
|
||||
}
|
||||
}
|
||||
|
||||
globalData.theStartLevel = NodeState::SL_STARTING;
|
||||
globalData.theRestartFlag = system_started;
|
||||
/**
|
||||
|
|
|
@ -10049,9 +10049,20 @@ void Dbdict::execSUB_START_REQ(Signal* signal)
|
|||
}
|
||||
OpSubEventPtr subbPtr;
|
||||
Uint32 errCode = 0;
|
||||
|
||||
DictLockPtr loopPtr;
|
||||
if (c_dictLockQueue.first(loopPtr) &&
|
||||
loopPtr.p->lt->lockType == DictLockReq::NodeRestartLock)
|
||||
{
|
||||
jam();
|
||||
errCode = 1405;
|
||||
goto busy;
|
||||
}
|
||||
|
||||
if (!c_opSubEvent.seize(subbPtr)) {
|
||||
errCode = SubStartRef::Busy;
|
||||
busy:
|
||||
jam();
|
||||
SubStartRef * ref = (SubStartRef *)signal->getDataPtrSend();
|
||||
|
||||
{ // fix
|
||||
|
@ -10150,6 +10161,7 @@ void Dbdict::execSUB_START_REF(Signal* signal)
|
|||
SubStartRef* ref = (SubStartRef*) signal->getDataPtrSend();
|
||||
ref->senderRef = reference();
|
||||
ref->senderData = subbPtr.p->m_senderData;
|
||||
ref->errorCode = err;
|
||||
sendSignal(subbPtr.p->m_senderRef, GSN_SUB_START_REF,
|
||||
signal, SubStartRef::SignalLength2, JBB);
|
||||
c_opSubEvent.release(subbPtr);
|
||||
|
@ -10212,6 +10224,7 @@ void Dbdict::execSUB_START_CONF(Signal* signal)
|
|||
#ifdef EVENT_PH3_DEBUG
|
||||
ndbout_c("DBDICT(Coordinator) got GSN_SUB_START_CONF = (%d)", subbPtr.i);
|
||||
#endif
|
||||
subbPtr.p->m_sub_start_conf = *conf;
|
||||
subbPtr.p->m_reqTracker.reportConf(c_counterMgr, refToNode(senderRef));
|
||||
completeSubStartReq(signal,subbPtr.i,0);
|
||||
}
|
||||
|
@ -10251,6 +10264,9 @@ void Dbdict::completeSubStartReq(Signal* signal,
|
|||
#ifdef EVENT_DEBUG
|
||||
ndbout_c("SUB_START_CONF");
|
||||
#endif
|
||||
|
||||
SubStartConf* conf = (SubStartConf*)signal->getDataPtrSend();
|
||||
* conf = subbPtr.p->m_sub_start_conf;
|
||||
sendSignal(subbPtr.p->m_senderRef, GSN_SUB_START_CONF,
|
||||
signal, SubStartConf::SignalLength, JBB);
|
||||
c_opSubEvent.release(subbPtr);
|
||||
|
@ -10372,6 +10388,7 @@ void Dbdict::execSUB_STOP_REF(Signal* signal)
|
|||
SubStopRef* ref = (SubStopRef*) signal->getDataPtrSend();
|
||||
ref->senderRef = reference();
|
||||
ref->senderData = subbPtr.p->m_senderData;
|
||||
ref->errorCode = err;
|
||||
sendSignal(subbPtr.p->m_senderRef, GSN_SUB_STOP_REF,
|
||||
signal, SubStopRef::SignalLength, JBB);
|
||||
c_opSubEvent.release(subbPtr);
|
||||
|
@ -10424,6 +10441,7 @@ void Dbdict::execSUB_STOP_CONF(Signal* signal)
|
|||
* Coordinator
|
||||
*/
|
||||
ndbrequire(refToBlock(senderRef) == DBDICT);
|
||||
subbPtr.p->m_sub_stop_conf = *conf;
|
||||
subbPtr.p->m_reqTracker.reportConf(c_counterMgr, refToNode(senderRef));
|
||||
completeSubStopReq(signal,subbPtr.i,0);
|
||||
}
|
||||
|
@ -10464,6 +10482,8 @@ void Dbdict::completeSubStopReq(Signal* signal,
|
|||
#ifdef EVENT_DEBUG
|
||||
ndbout_c("SUB_STOP_CONF");
|
||||
#endif
|
||||
SubStopConf* conf = (SubStopConf*)signal->getDataPtrSend();
|
||||
* conf = subbPtr.p->m_sub_stop_conf;
|
||||
sendSignal(subbPtr.p->m_senderRef, GSN_SUB_STOP_CONF,
|
||||
signal, SubStopConf::SignalLength, JBB);
|
||||
c_opSubEvent.release(subbPtr);
|
||||
|
@ -10712,6 +10732,7 @@ Dbdict::execSUB_REMOVE_REF(Signal* signal)
|
|||
SubRemoveRef* ref = (SubRemoveRef*) signal->getDataPtrSend();
|
||||
ref->senderRef = reference();
|
||||
ref->senderData = subbPtr.p->m_senderData;
|
||||
ref->errorCode = err;
|
||||
sendSignal(subbPtr.p->m_senderRef, GSN_SUB_REMOVE_REF,
|
||||
signal, SubRemoveRef::SignalLength, JBB);
|
||||
}
|
||||
|
|
|
@ -51,6 +51,7 @@
|
|||
#include <signaldata/DropTrig.hpp>
|
||||
#include <signaldata/AlterTrig.hpp>
|
||||
#include <signaldata/DictLock.hpp>
|
||||
#include <signaldata/SumaImpl.hpp>
|
||||
#include "SchemaFile.hpp"
|
||||
#include <blocks/mutexes.hpp>
|
||||
#include <SafeCounter.hpp>
|
||||
|
@ -1631,6 +1632,10 @@ private:
|
|||
Uint32 m_senderRef;
|
||||
Uint32 m_senderData;
|
||||
Uint32 m_errorCode;
|
||||
union {
|
||||
SubStartConf m_sub_start_conf;
|
||||
SubStopConf m_sub_stop_conf;
|
||||
};
|
||||
RequestTracker m_reqTracker;
|
||||
};
|
||||
typedef Ptr<OpSubEvent> OpSubEventPtr;
|
||||
|
|
|
@ -636,6 +636,7 @@ private:
|
|||
void execTCGETOPSIZECONF(Signal *);
|
||||
void execTC_CLOPSIZECONF(Signal *);
|
||||
|
||||
int handle_invalid_lcp_no(const class LcpFragRep*, ReplicaRecordPtr);
|
||||
void execLCP_FRAG_REP(Signal *);
|
||||
void execLCP_COMPLETE_REP(Signal *);
|
||||
void execSTART_LCP_REQ(Signal *);
|
||||
|
|
|
@ -2831,7 +2831,9 @@ Dbdih::nr_start_fragments(Signal* signal,
|
|||
return;
|
||||
}//if
|
||||
ptrAss(tabPtr, tabRecord);
|
||||
if (tabPtr.p->tabStatus != TabRecord::TS_ACTIVE){
|
||||
if (tabPtr.p->tabStatus != TabRecord::TS_ACTIVE ||
|
||||
tabPtr.p->tabStorage != TabRecord::ST_NORMAL)
|
||||
{
|
||||
jam();
|
||||
takeOverPtr.p->toCurrentFragid = 0;
|
||||
takeOverPtr.p->toCurrentTabref++;
|
||||
|
@ -2915,6 +2917,17 @@ Dbdih::nr_start_fragment(Signal* signal,
|
|||
takeOverPtr.p->toCurrentTabref,
|
||||
takeOverPtr.p->toCurrentFragid);
|
||||
replicaPtr.p->lcpIdStarted = 0;
|
||||
BlockReference ref = calcLqhBlockRef(takeOverPtr.p->toStartingNode);
|
||||
StartFragReq *req = (StartFragReq *)signal->getDataPtrSend();
|
||||
req->userPtr = 0;
|
||||
req->userRef = reference();
|
||||
req->lcpNo = ZNIL;
|
||||
req->lcpId = 0;
|
||||
req->tableId = takeOverPtr.p->toCurrentTabref;
|
||||
req->fragId = takeOverPtr.p->toCurrentFragid;
|
||||
req->noOfLogNodes = 0;
|
||||
sendSignal(ref, GSN_START_FRAGREQ, signal,
|
||||
StartFragReq::SignalLength, JBB);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -3740,7 +3753,6 @@ void Dbdih::endTakeOver(Uint32 takeOverPtrI)
|
|||
takeOverPtr.i = takeOverPtrI;
|
||||
ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
|
||||
|
||||
releaseTakeOver(takeOverPtrI);
|
||||
if ((takeOverPtr.p->toMasterStatus != TakeOverRecord::IDLE) &&
|
||||
(takeOverPtr.p->toMasterStatus != TakeOverRecord::TO_WAIT_START_TAKE_OVER)) {
|
||||
jam();
|
||||
|
@ -3754,6 +3766,7 @@ void Dbdih::endTakeOver(Uint32 takeOverPtrI)
|
|||
}//if
|
||||
setAllowNodeStart(takeOverPtr.p->toStartingNode, true);
|
||||
initTakeOver(takeOverPtr);
|
||||
releaseTakeOver(takeOverPtrI);
|
||||
}//Dbdih::endTakeOver()
|
||||
|
||||
void Dbdih::releaseTakeOver(Uint32 takeOverPtrI)
|
||||
|
@ -4045,6 +4058,11 @@ void Dbdih::execNODE_FAILREP(Signal* signal)
|
|||
Uint32 newMasterId = nodeFail->masterNodeId;
|
||||
const Uint32 noOfFailedNodes = nodeFail->noOfNodes;
|
||||
|
||||
if (ERROR_INSERTED(7179))
|
||||
{
|
||||
CLEAR_ERROR_INSERT_VALUE;
|
||||
}
|
||||
|
||||
/*-------------------------------------------------------------------------*/
|
||||
// The first step is to convert from a bit mask to an array of failed nodes.
|
||||
/*-------------------------------------------------------------------------*/
|
||||
|
@ -4908,6 +4926,7 @@ void Dbdih::handleTakeOverNewMaster(Signal* signal, Uint32 takeOverPtrI)
|
|||
break;
|
||||
}
|
||||
ndbrequire(ok);
|
||||
endTakeOver(takeOverPtr.i);
|
||||
}//if
|
||||
}//Dbdih::handleTakeOverNewMaster()
|
||||
|
||||
|
@ -10256,6 +10275,36 @@ void Dbdih::execLCP_FRAG_REP(Signal* signal)
|
|||
|
||||
jamEntry();
|
||||
|
||||
if (ERROR_INSERTED(7178) && nodeId != getOwnNodeId())
|
||||
{
|
||||
jam();
|
||||
Uint32 owng =Sysfile::getNodeGroup(getOwnNodeId(), SYSFILE->nodeGroups);
|
||||
Uint32 nodeg = Sysfile::getNodeGroup(nodeId, SYSFILE->nodeGroups);
|
||||
if (owng == nodeg)
|
||||
{
|
||||
jam();
|
||||
ndbout_c("throwing away LCP_FRAG_REP from (and killing) %d", nodeId);
|
||||
SET_ERROR_INSERT_VALUE(7179);
|
||||
signal->theData[0] = 9999;
|
||||
sendSignal(numberToRef(CMVMI, nodeId),
|
||||
GSN_NDB_TAMPER, signal, 1, JBA);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (ERROR_INSERTED(7179) && nodeId != getOwnNodeId())
|
||||
{
|
||||
jam();
|
||||
Uint32 owng =Sysfile::getNodeGroup(getOwnNodeId(), SYSFILE->nodeGroups);
|
||||
Uint32 nodeg = Sysfile::getNodeGroup(nodeId, SYSFILE->nodeGroups);
|
||||
if (owng == nodeg)
|
||||
{
|
||||
jam();
|
||||
ndbout_c("throwing away LCP_FRAG_REP from %d", nodeId);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
CRASH_INSERTION2(7025, isMaster());
|
||||
CRASH_INSERTION2(7016, !isMaster());
|
||||
|
||||
|
@ -10462,6 +10511,37 @@ void Dbdih::findReplica(ReplicaRecordPtr& replicaPtr,
|
|||
ndbrequire(false);
|
||||
}//Dbdih::findReplica()
|
||||
|
||||
|
||||
int
|
||||
Dbdih::handle_invalid_lcp_no(const LcpFragRep* rep,
|
||||
ReplicaRecordPtr replicaPtr)
|
||||
{
|
||||
ndbrequire(!isMaster());
|
||||
Uint32 lcpNo = rep->lcpNo;
|
||||
Uint32 lcpId = rep->lcpId;
|
||||
Uint32 replicaLcpNo = replicaPtr.p->nextLcp;
|
||||
Uint32 prevReplicaLcpNo = prevLcpNo(replicaLcpNo);
|
||||
|
||||
warningEvent("Detected previous node failure of %d during lcp",
|
||||
rep->nodeId);
|
||||
replicaPtr.p->nextLcp = lcpNo;
|
||||
replicaPtr.p->lcpId[lcpNo] = 0;
|
||||
replicaPtr.p->lcpStatus[lcpNo] = ZINVALID;
|
||||
|
||||
for (Uint32 i = lcpNo; i != lcpNo; i = nextLcpNo(i))
|
||||
{
|
||||
jam();
|
||||
if (replicaPtr.p->lcpStatus[i] == ZVALID &&
|
||||
replicaPtr.p->lcpId[i] >= lcpId)
|
||||
{
|
||||
ndbout_c("i: %d lcpId: %d", i, replicaPtr.p->lcpId[i]);
|
||||
ndbrequire(false);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return true if table is all fragment replicas have been checkpointed
|
||||
* to disk (in all LQHs)
|
||||
|
@ -10490,10 +10570,13 @@ Dbdih::reportLcpCompletion(const LcpFragRep* lcpReport)
|
|||
|
||||
ndbrequire(replicaPtr.p->lcpOngoingFlag == true);
|
||||
if(lcpNo != replicaPtr.p->nextLcp){
|
||||
if (handle_invalid_lcp_no(lcpReport, replicaPtr))
|
||||
{
|
||||
ndbout_c("lcpNo = %d replicaPtr.p->nextLcp = %d",
|
||||
lcpNo, replicaPtr.p->nextLcp);
|
||||
ndbrequire(false);
|
||||
}
|
||||
}
|
||||
ndbrequire(lcpNo == replicaPtr.p->nextLcp);
|
||||
ndbrequire(lcpNo < MAX_LCP_STORED);
|
||||
ndbrequire(replicaPtr.p->lcpId[lcpNo] != lcpId);
|
||||
|
|
|
@ -13835,6 +13835,7 @@ void Dblqh::execSTART_FRAGREQ(Signal* signal)
|
|||
Uint32 lcpNo = startFragReq->lcpNo;
|
||||
Uint32 noOfLogNodes = startFragReq->noOfLogNodes;
|
||||
Uint32 lcpId = startFragReq->lcpId;
|
||||
|
||||
ndbrequire(noOfLogNodes <= 4);
|
||||
fragptr.p->fragStatus = Fragrecord::CRASH_RECOVERING;
|
||||
fragptr.p->srBlockref = startFragReq->userRef;
|
||||
|
@ -13890,6 +13891,15 @@ void Dblqh::execSTART_FRAGREQ(Signal* signal)
|
|||
sendSignal(DBACC_REF, GSN_EXPANDCHECK2, signal, 2, JBB);
|
||||
}
|
||||
|
||||
if (getNodeState().getNodeRestartInProgress())
|
||||
{
|
||||
jam();
|
||||
fragptr.p->fragStatus = Fragrecord::ACTIVE_CREATION;
|
||||
}
|
||||
|
||||
c_tup->disk_restart_mark_no_lcp(tabptr.i, fragId);
|
||||
jamEntry();
|
||||
|
||||
return;
|
||||
}//if
|
||||
|
||||
|
@ -16712,8 +16722,8 @@ void Dblqh::initialiseRecordsLab(Signal* signal, Uint32 data,
|
|||
ccurrentGcprec = RNIL;
|
||||
caddNodeState = ZFALSE;
|
||||
cstartRecReq = ZFALSE;
|
||||
cnewestGci = ~0;
|
||||
cnewestCompletedGci = ~0;
|
||||
cnewestGci = 0;
|
||||
cnewestCompletedGci = 0;
|
||||
crestartOldestGci = 0;
|
||||
crestartNewestGci = 0;
|
||||
csrPhaseStarted = ZSR_NO_PHASE_STARTED;
|
||||
|
|
|
@ -2711,6 +2711,8 @@ public:
|
|||
Local_key m_key;
|
||||
};
|
||||
|
||||
void disk_restart_mark_no_lcp(Uint32 table, Uint32 frag);
|
||||
|
||||
private:
|
||||
void disk_restart_undo_next(Signal*);
|
||||
void disk_restart_undo_lcp(Uint32, Uint32, Uint32 flag);
|
||||
|
|
|
@ -1421,6 +1421,13 @@ Dbtup::disk_restart_undo_next(Signal* signal)
|
|||
sendSignal(LGMAN_REF, GSN_CONTINUEB, signal, 1, JBB);
|
||||
}
|
||||
|
||||
void
|
||||
Dbtup::disk_restart_mark_no_lcp(Uint32 tableId, Uint32 fragId)
|
||||
{
|
||||
jamEntry();
|
||||
disk_restart_undo_lcp(tableId, fragId, Fragrecord::UC_CREATE);
|
||||
}
|
||||
|
||||
void
|
||||
Dbtup::disk_restart_undo_lcp(Uint32 tableId, Uint32 fragId, Uint32 flag)
|
||||
{
|
||||
|
|
|
@ -1257,6 +1257,8 @@ Pgman::process_lcp(Signal* signal)
|
|||
void
|
||||
Pgman::process_lcp_locked(Signal* signal, Ptr<Page_entry> ptr)
|
||||
{
|
||||
CRASH_INSERTION(11006);
|
||||
|
||||
ptr.p->m_last_lcp = m_last_lcp;
|
||||
if (ptr.p->m_state & Page_entry::DIRTY)
|
||||
{
|
||||
|
@ -2350,6 +2352,11 @@ Pgman::execDUMP_STATE_ORD(Signal* signal)
|
|||
{
|
||||
g_dbg_lcp = ~g_dbg_lcp;
|
||||
}
|
||||
|
||||
if (signal->theData[0] == 11006)
|
||||
{
|
||||
SET_ERROR_INSERT_VALUE(11006);
|
||||
}
|
||||
}
|
||||
|
||||
// page cache client
|
||||
|
|
|
@ -445,6 +445,21 @@ private:
|
|||
|
||||
StopReq c_stopReq;
|
||||
bool check_multi_node_shutdown(Signal* signal);
|
||||
|
||||
#ifdef ERROR_INSERT
|
||||
Uint32 c_error_insert_extra;
|
||||
#endif
|
||||
|
||||
void recompute_version_info(Uint32 type);
|
||||
void recompute_version_info(Uint32 type, Uint32 version);
|
||||
void execNODE_VERSION_REP(Signal* signal);
|
||||
void sendApiVersionRep(Signal* signal, NodeRecPtr nodePtr);
|
||||
void sendVersionedDb(NodeReceiverGroup rg,
|
||||
GlobalSignalNumber gsn,
|
||||
Signal* signal,
|
||||
Uint32 length,
|
||||
JobBufferLevel jbuf,
|
||||
Uint32 minversion);
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
|
@ -36,6 +36,13 @@ void Qmgr::initData()
|
|||
setHbApiDelay(hbDBAPI);
|
||||
c_connectedNodes.set(getOwnNodeId());
|
||||
c_stopReq.senderRef = 0;
|
||||
|
||||
/**
|
||||
* Check sanity for NodeVersion
|
||||
*/
|
||||
ndbrequire((Uint32)NodeInfo::DB == 0);
|
||||
ndbrequire((Uint32)NodeInfo::API == 1);
|
||||
ndbrequire((Uint32)NodeInfo::MGM == 2);
|
||||
}//Qmgr::initData()
|
||||
|
||||
void Qmgr::initRecords()
|
||||
|
@ -106,6 +113,7 @@ Qmgr::Qmgr(Block_context& ctx)
|
|||
|
||||
addRecSignal(GSN_DIH_RESTARTREF, &Qmgr::execDIH_RESTARTREF);
|
||||
addRecSignal(GSN_DIH_RESTARTCONF, &Qmgr::execDIH_RESTARTCONF);
|
||||
addRecSignal(GSN_NODE_VERSION_REP, &Qmgr::execNODE_VERSION_REP);
|
||||
|
||||
initData();
|
||||
}//Qmgr::Qmgr()
|
||||
|
|
|
@ -259,6 +259,9 @@ void Qmgr::execSTTOR(Signal* signal)
|
|||
case 1:
|
||||
initData(signal);
|
||||
startphase1(signal);
|
||||
recompute_version_info(NodeInfo::DB);
|
||||
recompute_version_info(NodeInfo::API);
|
||||
recompute_version_info(NodeInfo::MGM);
|
||||
return;
|
||||
case 7:
|
||||
cactivateApiCheck = 1;
|
||||
|
@ -764,6 +767,7 @@ void Qmgr::execCM_REGREQ(Signal* signal)
|
|||
*/
|
||||
UintR TdynId = ++c_maxDynamicId;
|
||||
setNodeInfo(addNodePtr.i).m_version = startingVersion;
|
||||
recompute_version_info(NodeInfo::DB, startingVersion);
|
||||
addNodePtr.p->ndynamicId = TdynId;
|
||||
|
||||
/**
|
||||
|
@ -1502,6 +1506,7 @@ void Qmgr::execCM_NODEINFOCONF(Signal* signal)
|
|||
replyNodePtr.p->ndynamicId = dynamicId;
|
||||
replyNodePtr.p->blockRef = signal->getSendersBlockRef();
|
||||
setNodeInfo(replyNodePtr.i).m_version = version;
|
||||
recompute_version_info(NodeInfo::DB, version);
|
||||
|
||||
if(!c_start.m_nodes.done()){
|
||||
jam();
|
||||
|
@ -1601,6 +1606,7 @@ Qmgr::cmAddPrepare(Signal* signal, NodeRecPtr nodePtr, const NodeRec * self){
|
|||
}
|
||||
|
||||
sendCmAckAdd(signal, nodePtr.i, CmAdd::Prepare);
|
||||
sendApiVersionRep(signal, nodePtr);
|
||||
|
||||
/* President have prepared us */
|
||||
CmNodeInfoConf * conf = (CmNodeInfoConf*)signal->getDataPtrSend();
|
||||
|
@ -1612,6 +1618,29 @@ Qmgr::cmAddPrepare(Signal* signal, NodeRecPtr nodePtr, const NodeRec * self){
|
|||
DEBUG_START(GSN_CM_NODEINFOCONF, refToNode(nodePtr.p->blockRef), "");
|
||||
}
|
||||
|
||||
void
|
||||
Qmgr::sendApiVersionRep(Signal* signal, NodeRecPtr nodePtr)
|
||||
{
|
||||
if (getNodeInfo(nodePtr.i).m_version >= NDBD_NODE_VERSION_REP)
|
||||
{
|
||||
jam();
|
||||
Uint32 ref = calcQmgrBlockRef(nodePtr.i);
|
||||
for(Uint32 i = 1; i<MAX_NODES; i++)
|
||||
{
|
||||
jam();
|
||||
Uint32 version = getNodeInfo(i).m_version;
|
||||
Uint32 type = getNodeInfo(i).m_type;
|
||||
if (type != NodeInfo::DB && version)
|
||||
{
|
||||
jam();
|
||||
signal->theData[0] = i;
|
||||
signal->theData[1] = version;
|
||||
sendSignal(ref, GSN_NODE_VERSION_REP, signal, 2, JBB);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
Qmgr::sendCmAckAdd(Signal * signal, Uint32 nodeId, CmAdd::RequestType type){
|
||||
|
||||
|
@ -2400,6 +2429,8 @@ void Qmgr::sendApiFailReq(Signal* signal, Uint16 failedNodeNo)
|
|||
* SECONDS.
|
||||
*-------------------------------------------------------------------------*/
|
||||
setNodeInfo(failedNodePtr.i).m_heartbeat_cnt= 0;
|
||||
setNodeInfo(failedNodePtr.i).m_version = 0;
|
||||
recompute_version_info(getNodeInfo(failedNodePtr.i).m_type);
|
||||
|
||||
CloseComReqConf * const closeCom = (CloseComReqConf *)&signal->theData[0];
|
||||
|
||||
|
@ -2424,7 +2455,12 @@ void Qmgr::execAPI_FAILREQ(Signal* signal)
|
|||
|
||||
// ignore if api not active
|
||||
if (failedNodePtr.p->phase != ZAPI_ACTIVE)
|
||||
{
|
||||
jam();
|
||||
// But send to SUMA anyway...
|
||||
sendSignal(SUMA_REF, GSN_API_FAILREQ, signal, 2, JBA);
|
||||
return;
|
||||
}
|
||||
|
||||
signal->theData[0] = NDB_LE_Disconnected;
|
||||
signal->theData[1] = failedNodePtr.i;
|
||||
|
@ -2706,7 +2742,6 @@ void Qmgr::execAPI_REGREQ(Signal* signal)
|
|||
}
|
||||
|
||||
setNodeInfo(apiNodePtr.i).m_version = version;
|
||||
|
||||
setNodeInfo(apiNodePtr.i).m_heartbeat_cnt= 0;
|
||||
|
||||
ApiRegConf * const apiRegConf = (ApiRegConf *)&signal->theData[0];
|
||||
|
@ -2727,8 +2762,9 @@ void Qmgr::execAPI_REGREQ(Signal* signal)
|
|||
apiRegConf->nodeState.dynamicId = -dynamicId;
|
||||
}
|
||||
}
|
||||
NodeVersionInfo info = getNodeVersionInfo();
|
||||
apiRegConf->minDbVersion = info.m_type[NodeInfo::DB].m_min_version;
|
||||
apiRegConf->nodeState.m_connected_nodes.assign(c_connectedNodes);
|
||||
|
||||
sendSignal(ref, GSN_API_REGCONF, signal, ApiRegConf::SignalLength, JBB);
|
||||
|
||||
if (apiNodePtr.p->phase == ZAPI_INACTIVE &&
|
||||
|
@ -2747,12 +2783,54 @@ void Qmgr::execAPI_REGREQ(Signal* signal)
|
|||
signal->theData[0] = apiNodePtr.i;
|
||||
sendSignal(CMVMI_REF, GSN_ENABLE_COMORD, signal, 1, JBA);
|
||||
|
||||
recompute_version_info(type, version);
|
||||
|
||||
signal->theData[0] = apiNodePtr.i;
|
||||
signal->theData[1] = version;
|
||||
NodeReceiverGroup rg(QMGR, c_clusterNodes);
|
||||
rg.m_nodes.clear(getOwnNodeId());
|
||||
sendVersionedDb(rg, GSN_NODE_VERSION_REP, signal, 2, JBB,
|
||||
NDBD_NODE_VERSION_REP);
|
||||
|
||||
signal->theData[0] = apiNodePtr.i;
|
||||
EXECUTE_DIRECT(NDBCNTR, GSN_API_START_REP, signal, 1);
|
||||
}
|
||||
return;
|
||||
}//Qmgr::execAPI_REGREQ()
|
||||
|
||||
void
|
||||
Qmgr::sendVersionedDb(NodeReceiverGroup rg,
|
||||
GlobalSignalNumber gsn,
|
||||
Signal* signal,
|
||||
Uint32 length,
|
||||
JobBufferLevel jbuf,
|
||||
Uint32 minversion)
|
||||
{
|
||||
jam();
|
||||
NodeVersionInfo info = getNodeVersionInfo();
|
||||
if (info.m_type[NodeInfo::DB].m_min_version >= minversion)
|
||||
{
|
||||
jam();
|
||||
sendSignal(rg, gsn, signal, length, jbuf);
|
||||
}
|
||||
else
|
||||
{
|
||||
jam();
|
||||
Uint32 i = 0, cnt = 0;
|
||||
while((i = rg.m_nodes.find(i + 1)) != NodeBitmask::NotFound)
|
||||
{
|
||||
jam();
|
||||
if (getNodeInfo(i).m_version >= minversion)
|
||||
{
|
||||
jam();
|
||||
cnt++;
|
||||
sendSignal(numberToRef(rg.m_block, i), gsn, signal, length, jbuf);
|
||||
}
|
||||
}
|
||||
ndbassert((cnt == 0 && rg.m_nodes.count() == 0) ||
|
||||
(cnt < rg.m_nodes.count()));
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
Qmgr::execAPI_VERSION_REQ(Signal * signal) {
|
||||
|
@ -2782,6 +2860,76 @@ Qmgr::execAPI_VERSION_REQ(Signal * signal) {
|
|||
ApiVersionConf::SignalLength, JBB);
|
||||
}
|
||||
|
||||
void
|
||||
Qmgr::execNODE_VERSION_REP(Signal* signal)
|
||||
{
|
||||
jamEntry();
|
||||
Uint32 nodeId = signal->theData[0];
|
||||
Uint32 version = signal->theData[1];
|
||||
|
||||
if (nodeId < MAX_NODES)
|
||||
{
|
||||
jam();
|
||||
Uint32 type = getNodeInfo(nodeId).m_type;
|
||||
setNodeInfo(nodeId).m_version = version;
|
||||
recompute_version_info(type, version);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
Qmgr::recompute_version_info(Uint32 type, Uint32 version)
|
||||
{
|
||||
NodeVersionInfo& info = setNodeVersionInfo();
|
||||
switch(type){
|
||||
case NodeInfo::DB:
|
||||
case NodeInfo::API:
|
||||
case NodeInfo::MGM:
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
|
||||
if (info.m_type[type].m_min_version == 0 ||
|
||||
version < info.m_type[type].m_min_version)
|
||||
info.m_type[type].m_min_version = version;
|
||||
if (version > info.m_type[type].m_max_version)
|
||||
info.m_type[type].m_max_version = version;
|
||||
}
|
||||
|
||||
void
|
||||
Qmgr::recompute_version_info(Uint32 type)
|
||||
{
|
||||
switch(type){
|
||||
case NodeInfo::DB:
|
||||
case NodeInfo::API:
|
||||
case NodeInfo::MGM:
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
|
||||
Uint32 min = ~0, max = 0;
|
||||
Uint32 cnt = type == NodeInfo::DB ? MAX_NDB_NODES : MAX_NODES;
|
||||
for (Uint32 i = 1; i<cnt; i++)
|
||||
{
|
||||
if (getNodeInfo(i).m_type == type)
|
||||
{
|
||||
Uint32 version = getNodeInfo(i).m_version;
|
||||
|
||||
if (version)
|
||||
{
|
||||
if (version < min)
|
||||
min = version;
|
||||
if (version > max)
|
||||
max = version;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
NodeVersionInfo& info = setNodeVersionInfo();
|
||||
info.m_type[type].m_min_version = min == ~(Uint32)0 ? 0 : min;
|
||||
info.m_type[type].m_max_version = max;
|
||||
}
|
||||
|
||||
#if 0
|
||||
bool
|
||||
|
@ -2921,6 +3069,17 @@ void Qmgr::failReportLab(Signal* signal, Uint16 aFailedNode,
|
|||
systemErrorLab(signal, __LINE__);
|
||||
return;
|
||||
}//if
|
||||
|
||||
if (getNodeState().startLevel < NodeState::SL_STARTED)
|
||||
{
|
||||
jam();
|
||||
CRASH_INSERTION(932);
|
||||
char buf[100];
|
||||
BaseString::snprintf(buf, 100, "Node failure during restart");
|
||||
progError(__LINE__, NDBD_EXIT_SR_OTHERNODEFAILED, buf);
|
||||
ndbrequire(false);
|
||||
}
|
||||
|
||||
TnoFailedNodes = cnoFailedNodes;
|
||||
failReport(signal, failedNodePtr.i, (UintR)ZTRUE, aFailCause);
|
||||
if (cpresident == getOwnNodeId()) {
|
||||
|
@ -3007,6 +3166,16 @@ void Qmgr::execPREP_FAILREQ(Signal* signal)
|
|||
return;
|
||||
}//if
|
||||
|
||||
if (getNodeState().startLevel < NodeState::SL_STARTED)
|
||||
{
|
||||
jam();
|
||||
CRASH_INSERTION(932);
|
||||
char buf[100];
|
||||
BaseString::snprintf(buf, 100, "Node failure during restart");
|
||||
progError(__LINE__, NDBD_EXIT_SR_OTHERNODEFAILED, buf);
|
||||
ndbrequire(false);
|
||||
}
|
||||
|
||||
guard0 = cnoPrepFailedNodes - 1;
|
||||
arrGuard(guard0, MAX_NDB_NODES);
|
||||
for (Tindex = 0; Tindex <= guard0; Tindex++) {
|
||||
|
@ -3184,6 +3353,18 @@ Qmgr::sendCommitFailReq(Signal* signal)
|
|||
for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
|
||||
jam();
|
||||
ptrAss(nodePtr, nodeRec);
|
||||
|
||||
#ifdef ERROR_INSERT
|
||||
if (ERROR_INSERTED(935) && nodePtr.i == c_error_insert_extra)
|
||||
{
|
||||
ndbout_c("skipping node %d", c_error_insert_extra);
|
||||
CLEAR_ERROR_INSERT_VALUE;
|
||||
signal->theData[0] = 9999;
|
||||
sendSignalWithDelay(CMVMI_REF, GSN_NDB_TAMPER, signal, 1000, 1);
|
||||
continue;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (nodePtr.p->phase == ZRUNNING) {
|
||||
jam();
|
||||
nodePtr.p->sendCommitFailReqStatus = Q_ACTIVE;
|
||||
|
@ -3254,6 +3435,33 @@ void Qmgr::execPREP_FAILREF(Signal* signal)
|
|||
return;
|
||||
}//Qmgr::execPREP_FAILREF()
|
||||
|
||||
static
|
||||
Uint32
|
||||
clear_nodes(Uint32 dstcnt, Uint16 dst[], Uint32 srccnt, const Uint16 src[])
|
||||
{
|
||||
if (srccnt == 0)
|
||||
return dstcnt;
|
||||
|
||||
Uint32 pos = 0;
|
||||
for (Uint32 i = 0; i<dstcnt; i++)
|
||||
{
|
||||
Uint32 node = dst[i];
|
||||
for (Uint32 j = 0; j<srccnt; j++)
|
||||
{
|
||||
if (node == dst[j])
|
||||
{
|
||||
node = RNIL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (node != RNIL)
|
||||
{
|
||||
dst[pos++] = node;
|
||||
}
|
||||
}
|
||||
return pos;
|
||||
}
|
||||
|
||||
/*---------------------------------------------------------------------------*/
|
||||
/* THE PRESIDENT IS NOW COMMITTING THE PREVIOUSLY PREPARED NODE FAILURE. */
|
||||
/*---------------------------------------------------------------------------*/
|
||||
|
@ -3341,19 +3549,18 @@ void Qmgr::execCOMMIT_FAILREQ(Signal* signal)
|
|||
NodeFailRep::SignalLength, JBB);
|
||||
}//if
|
||||
}//for
|
||||
if (cpresident != getOwnNodeId()) {
|
||||
jam();
|
||||
cnoFailedNodes = cnoCommitFailedNodes - cnoFailedNodes;
|
||||
if (cnoFailedNodes > 0) {
|
||||
jam();
|
||||
guard0 = cnoFailedNodes - 1;
|
||||
arrGuard(guard0 + cnoCommitFailedNodes, MAX_NDB_NODES);
|
||||
for (Tj = 0; Tj <= guard0; Tj++) {
|
||||
jam();
|
||||
cfailedNodes[Tj] = cfailedNodes[Tj + cnoCommitFailedNodes];
|
||||
}//for
|
||||
}//if
|
||||
}//if
|
||||
|
||||
/**
|
||||
* Remove committed nodes from failed/prepared
|
||||
*/
|
||||
cnoFailedNodes = clear_nodes(cnoFailedNodes,
|
||||
cfailedNodes,
|
||||
cnoCommitFailedNodes,
|
||||
ccommitFailedNodes);
|
||||
cnoPrepFailedNodes = clear_nodes(cnoPrepFailedNodes,
|
||||
cprepFailedNodes,
|
||||
cnoCommitFailedNodes,
|
||||
ccommitFailedNodes);
|
||||
cnoCommitFailedNodes = 0;
|
||||
}//if
|
||||
/**-----------------------------------------------------------------------
|
||||
|
@ -4732,6 +4939,14 @@ Qmgr::execDUMP_STATE_ORD(Signal* signal)
|
|||
default:
|
||||
;
|
||||
}//switch
|
||||
|
||||
#ifdef ERROR_INSERT
|
||||
if (signal->theData[0] == 935 && signal->getLength() == 2)
|
||||
{
|
||||
SET_ERROR_INSERT_VALUE(935);
|
||||
c_error_insert_extra = signal->theData[1];
|
||||
}
|
||||
#endif
|
||||
}//Qmgr::execDUMP_STATE_ORD()
|
||||
|
||||
void Qmgr::execSET_VAR_REQ(Signal* signal)
|
||||
|
|
|
@ -229,7 +229,6 @@ Suma::execREAD_CONFIG_REQ(Signal* signal)
|
|||
|
||||
c_startup.m_wait_handover= false;
|
||||
c_failedApiNodes.clear();
|
||||
c_startup.m_restart_server_node_id = 0; // Server for my NR
|
||||
|
||||
ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend();
|
||||
conf->senderRef = reference();
|
||||
|
@ -260,6 +259,14 @@ Suma::execSTTOR(Signal* signal) {
|
|||
|
||||
if(startphase == 5)
|
||||
{
|
||||
if (ERROR_INSERTED(13029)) /* Hold startphase 5 */
|
||||
{
|
||||
sendSignalWithDelay(SUMA_REF, GSN_STTOR, signal,
|
||||
30, signal->getLength());
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
c_startup.m_restart_server_node_id = 0;
|
||||
getNodeGroupMembers(signal);
|
||||
if (typeOfStart == NodeState::ST_NODE_RESTART ||
|
||||
typeOfStart == NodeState::ST_INITIAL_NODE_RESTART)
|
||||
|
@ -311,6 +318,12 @@ Suma::execSTTOR(Signal* signal) {
|
|||
createSequence(signal);
|
||||
DBUG_VOID_RETURN;
|
||||
}//if
|
||||
|
||||
if (ERROR_INSERTED(13030))
|
||||
{
|
||||
ndbout_c("Dont start handover");
|
||||
return;
|
||||
}
|
||||
}//if
|
||||
|
||||
if(startphase == 100)
|
||||
|
@ -372,6 +385,8 @@ Suma::execSUMA_START_ME_REF(Signal* signal)
|
|||
|
||||
infoEvent("Suma: node %d refused %d",
|
||||
c_startup.m_restart_server_node_id, ref->errorCode);
|
||||
|
||||
c_startup.m_restart_server_node_id++;
|
||||
send_start_me_req(signal);
|
||||
}
|
||||
|
||||
|
@ -554,6 +569,15 @@ void Suma::execAPI_FAILREQ(Signal* signal)
|
|||
Uint32 failedApiNode = signal->theData[0];
|
||||
//BlockReference retRef = signal->theData[1];
|
||||
|
||||
if (c_startup.m_restart_server_node_id &&
|
||||
c_startup.m_restart_server_node_id != RNIL)
|
||||
{
|
||||
jam();
|
||||
sendSignalWithDelay(reference(), GSN_API_FAILREQ, signal,
|
||||
200, signal->getLength());
|
||||
return;
|
||||
}
|
||||
|
||||
c_failedApiNodes.set(failedApiNode);
|
||||
c_connected_nodes.clear(failedApiNode);
|
||||
bool found = removeSubscribersOnNode(signal, failedApiNode);
|
||||
|
@ -886,6 +910,36 @@ Suma::execDUMP_STATE_ORD(Signal* signal){
|
|||
ptr->m_buffer_head.m_page_id);
|
||||
}
|
||||
}
|
||||
|
||||
if (tCase == 8006)
|
||||
{
|
||||
SET_ERROR_INSERT_VALUE(13029);
|
||||
}
|
||||
|
||||
if (tCase == 8007)
|
||||
{
|
||||
c_startup.m_restart_server_node_id = MAX_NDB_NODES + 1;
|
||||
SET_ERROR_INSERT_VALUE(13029);
|
||||
}
|
||||
|
||||
if (tCase == 8008)
|
||||
{
|
||||
CLEAR_ERROR_INSERT_VALUE;
|
||||
}
|
||||
|
||||
if (tCase == 8009)
|
||||
{
|
||||
if (ERROR_INSERTED(13030))
|
||||
{
|
||||
CLEAR_ERROR_INSERT_VALUE;
|
||||
sendSTTORRY(signal);
|
||||
}
|
||||
else
|
||||
{
|
||||
SET_ERROR_INSERT_VALUE(13030);
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/*************************************************************
|
||||
|
@ -1091,14 +1145,14 @@ Suma::execSUB_CREATE_REQ(Signal* signal)
|
|||
}
|
||||
} else {
|
||||
if (c_startup.m_restart_server_node_id &&
|
||||
refToNode(subRef) != c_startup.m_restart_server_node_id)
|
||||
subRef != calcSumaBlockRef(c_startup.m_restart_server_node_id))
|
||||
{
|
||||
/**
|
||||
* only allow "restart_server" Suma's to come through
|
||||
* for restart purposes
|
||||
*/
|
||||
jam();
|
||||
sendSubStartRef(signal, 1405);
|
||||
sendSubCreateRef(signal, 1415);
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
// Check that id/key is unique
|
||||
|
@ -1376,17 +1430,26 @@ Suma::initTable(Signal *signal, Uint32 tableId, TablePtr &tabPtr,
|
|||
|
||||
if (r)
|
||||
{
|
||||
jam();
|
||||
// we have to wait getting tab info
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
|
||||
if (tabPtr.p->setupTrigger(signal, *this))
|
||||
{
|
||||
jam();
|
||||
// we have to wait for triggers to be setup
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
|
||||
completeOneSubscriber(signal, tabPtr, subbPtr);
|
||||
int ret = completeOneSubscriber(signal, tabPtr, subbPtr);
|
||||
if (ret == -1)
|
||||
{
|
||||
jam();
|
||||
LocalDLList<Subscriber> subscribers(c_subscriberPool,
|
||||
tabPtr.p->c_subscribers);
|
||||
subscribers.release(subbPtr);
|
||||
}
|
||||
completeInitTable(signal, tabPtr);
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
@ -1462,6 +1525,20 @@ Suma::initTable(Signal *signal, Uint32 tableId, TablePtr &tabPtr)
|
|||
req->tableId = tableId;
|
||||
|
||||
DBUG_PRINT("info",("GET_TABINFOREQ id %d", req->tableId));
|
||||
|
||||
if (ERROR_INSERTED(13031))
|
||||
{
|
||||
jam();
|
||||
CLEAR_ERROR_INSERT_VALUE;
|
||||
GetTabInfoRef* ref = (GetTabInfoRef*)signal->getDataPtrSend();
|
||||
ref->tableId = tableId;
|
||||
ref->senderData = tabPtr.i;
|
||||
ref->errorCode = GetTabInfoRef::TableNotDefined;
|
||||
sendSignal(reference(), GSN_GET_TABINFOREF, signal,
|
||||
GetTabInfoRef::SignalLength, JBB);
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
|
||||
sendSignal(DBDICT_REF, GSN_GET_TABINFOREQ, signal,
|
||||
GetTabInfoReq::SignalLength, JBB);
|
||||
DBUG_RETURN(1);
|
||||
|
@ -1475,7 +1552,7 @@ Suma::initTable(Signal *signal, Uint32 tableId, TablePtr &tabPtr)
|
|||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
void
|
||||
int
|
||||
Suma::completeOneSubscriber(Signal *signal, TablePtr tabPtr, SubscriberPtr subbPtr)
|
||||
{
|
||||
jam();
|
||||
|
@ -1485,19 +1562,22 @@ Suma::completeOneSubscriber(Signal *signal, TablePtr tabPtr, SubscriberPtr subbP
|
|||
(c_startup.m_restart_server_node_id == 0 ||
|
||||
tabPtr.p->m_state != Table::DROPPED))
|
||||
{
|
||||
jam();
|
||||
sendSubStartRef(signal,subbPtr,tabPtr.p->m_error,
|
||||
SubscriptionData::TableData);
|
||||
tabPtr.p->n_subscribers--;
|
||||
DBUG_RETURN(-1);
|
||||
}
|
||||
else
|
||||
{
|
||||
jam();
|
||||
SubscriptionPtr subPtr;
|
||||
c_subscriptions.getPtr(subPtr, subbPtr.p->m_subPtrI);
|
||||
subPtr.p->m_table_ptrI= tabPtr.i;
|
||||
sendSubStartComplete(signal,subbPtr, m_last_complete_gci + 3,
|
||||
SubscriptionData::TableData);
|
||||
}
|
||||
DBUG_VOID_RETURN;
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -1510,11 +1590,17 @@ Suma::completeAllSubscribers(Signal *signal, TablePtr tabPtr)
|
|||
LocalDLList<Subscriber> subscribers(c_subscriberPool,
|
||||
tabPtr.p->c_subscribers);
|
||||
SubscriberPtr subbPtr;
|
||||
for(subscribers.first(subbPtr);
|
||||
!subbPtr.isNull();
|
||||
subscribers.next(subbPtr))
|
||||
for(subscribers.first(subbPtr); !subbPtr.isNull();)
|
||||
{
|
||||
completeOneSubscriber(signal, tabPtr, subbPtr);
|
||||
jam();
|
||||
Ptr<Subscriber> tmp = subbPtr;
|
||||
subscribers.next(subbPtr);
|
||||
int ret = completeOneSubscriber(signal, tabPtr, tmp);
|
||||
if (ret == -1)
|
||||
{
|
||||
jam();
|
||||
subscribers.release(tmp);
|
||||
}
|
||||
}
|
||||
}
|
||||
DBUG_VOID_RETURN;
|
||||
|
@ -2231,14 +2317,17 @@ Suma::execSUB_START_REQ(Signal* signal){
|
|||
key.m_subscriptionKey = req->subscriptionKey;
|
||||
|
||||
if (c_startup.m_restart_server_node_id &&
|
||||
refToNode(senderRef) != c_startup.m_restart_server_node_id)
|
||||
senderRef != calcSumaBlockRef(c_startup.m_restart_server_node_id))
|
||||
{
|
||||
/**
|
||||
* only allow "restart_server" Suma's to come through
|
||||
* for restart purposes
|
||||
*/
|
||||
jam();
|
||||
sendSubStartRef(signal, 1405);
|
||||
Uint32 err = c_startup.m_restart_server_node_id != RNIL ? 1405 :
|
||||
SubStartRef::NF_FakeErrorREF;
|
||||
|
||||
sendSubStartRef(signal, err);
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
|
@ -2453,22 +2542,25 @@ Suma::execSUB_STOP_REQ(Signal* signal){
|
|||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
if(!c_subscriptions.find(subPtr, key)){
|
||||
jam();
|
||||
DBUG_PRINT("error", ("not found"));
|
||||
sendSubStopRef(signal, 1407);
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
if (c_startup.m_restart_server_node_id &&
|
||||
refToNode(senderRef) != c_startup.m_restart_server_node_id)
|
||||
senderRef != calcSumaBlockRef(c_startup.m_restart_server_node_id))
|
||||
{
|
||||
/**
|
||||
* only allow "restart_server" Suma's to come through
|
||||
* for restart purposes
|
||||
*/
|
||||
jam();
|
||||
sendSubStopRef(signal, 1405);
|
||||
Uint32 err = c_startup.m_restart_server_node_id != RNIL ? 1405 :
|
||||
SubStopRef::NF_FakeErrorREF;
|
||||
|
||||
sendSubStopRef(signal, err);
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
if(!c_subscriptions.find(subPtr, key)){
|
||||
jam();
|
||||
DBUG_PRINT("error", ("not found"));
|
||||
sendSubStopRef(signal, 1407);
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
|
|
|
@ -250,7 +250,7 @@ public:
|
|||
SubscriberPtr subbPtr);
|
||||
int initTable(Signal *signal,Uint32 tableId, TablePtr &tabPtr);
|
||||
|
||||
void completeOneSubscriber(Signal* signal, TablePtr tabPtr, SubscriberPtr subbPtr);
|
||||
int completeOneSubscriber(Signal* signal, TablePtr tabPtr, SubscriberPtr subbPtr);
|
||||
void completeAllSubscribers(Signal* signal, TablePtr tabPtr);
|
||||
void completeInitTable(Signal* signal, TablePtr tabPtr);
|
||||
|
||||
|
|
|
@ -121,6 +121,8 @@ Suma::Suma(Block_context& ctx) :
|
|||
|
||||
addRecSignal(GSN_SUB_GCP_COMPLETE_REP,
|
||||
&Suma::execSUB_GCP_COMPLETE_REP);
|
||||
|
||||
c_startup.m_restart_server_node_id = RNIL; // Server for my NR
|
||||
}
|
||||
|
||||
Suma::~Suma()
|
||||
|
|
|
@ -479,7 +479,7 @@ Configuration::setupConfiguration(){
|
|||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
bool
|
||||
Uint32
|
||||
Configuration::lockPagesInMainMemory() const {
|
||||
return _lockPagesInMainMemory;
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@ public:
|
|||
void setupConfiguration();
|
||||
void closeConfiguration(bool end_session= true);
|
||||
|
||||
bool lockPagesInMainMemory() const;
|
||||
Uint32 lockPagesInMainMemory() const;
|
||||
|
||||
int timeBetweenWatchDogCheck() const ;
|
||||
void timeBetweenWatchDogCheck(int value);
|
||||
|
|
|
@ -35,6 +35,7 @@ enum restartStates {initial_state,
|
|||
|
||||
struct GlobalData {
|
||||
Uint32 m_restart_seq; //
|
||||
NodeVersionInfo m_versionInfo;
|
||||
NodeInfo m_nodeInfo[MAX_NODES];
|
||||
Signal VMSignals[1]; // Owned by FastScheduler::
|
||||
|
||||
|
|
|
@ -402,6 +402,9 @@ protected:
|
|||
const NodeInfo & getNodeInfo(NodeId nodeId) const;
|
||||
NodeInfo & setNodeInfo(NodeId);
|
||||
|
||||
const NodeVersionInfo& getNodeVersionInfo() const;
|
||||
NodeVersionInfo& setNodeVersionInfo();
|
||||
|
||||
/**********************
|
||||
* Xfrm stuff
|
||||
*/
|
||||
|
@ -708,6 +711,18 @@ SimulatedBlock::getNodeInfo(NodeId nodeId) const {
|
|||
return globalData.m_nodeInfo[nodeId];
|
||||
}
|
||||
|
||||
inline
|
||||
const NodeVersionInfo &
|
||||
SimulatedBlock::getNodeVersionInfo() const {
|
||||
return globalData.m_versionInfo;
|
||||
}
|
||||
|
||||
inline
|
||||
NodeVersionInfo &
|
||||
SimulatedBlock::setNodeVersionInfo() {
|
||||
return globalData.m_versionInfo;
|
||||
}
|
||||
|
||||
inline
|
||||
void
|
||||
SimulatedBlock::EXECUTE_DIRECT(Uint32 block,
|
||||
|
|
|
@ -554,10 +554,10 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
|
|||
"If set to yes, then NDB Cluster data will not be swapped out to disk",
|
||||
ConfigInfo::CI_USED,
|
||||
true,
|
||||
ConfigInfo::CI_BOOL,
|
||||
"false",
|
||||
"false",
|
||||
"true" },
|
||||
ConfigInfo::CI_INT,
|
||||
"0",
|
||||
"1",
|
||||
"2" },
|
||||
|
||||
{
|
||||
CFG_DB_WATCHDOG_INTERVAL,
|
||||
|
|
|
@ -663,8 +663,14 @@ NdbDictionary::Table::getTablespace(Uint32 *id, Uint32 *version) const
|
|||
return true;
|
||||
}
|
||||
|
||||
const char *
|
||||
NdbDictionary::Table::getTablespaceName() const
|
||||
{
|
||||
return m_impl.m_tablespace_name.c_str();
|
||||
}
|
||||
|
||||
void
|
||||
NdbDictionary::Table::setTablespace(const char * name){
|
||||
NdbDictionary::Table::setTablespaceName(const char * name){
|
||||
m_impl.m_tablespace_id = ~0;
|
||||
m_impl.m_tablespace_version = ~0;
|
||||
m_impl.m_tablespace_name.assign(name);
|
||||
|
|
|
@ -215,8 +215,6 @@ NdbEventOperationImpl::~NdbEventOperationImpl()
|
|||
DBUG_VOID_RETURN;
|
||||
|
||||
stop();
|
||||
// m_bufferHandle->dropSubscribeEvent(m_bufferId);
|
||||
; // ToDo? We should send stop signal here
|
||||
|
||||
if (theMainOp == NULL)
|
||||
{
|
||||
|
@ -428,7 +426,7 @@ NdbEventOperationImpl::getBlobHandle(const NdbColumnImpl *tAttrInfo, int n)
|
|||
|
||||
// create blob event operation
|
||||
tBlobOp =
|
||||
m_ndb->theEventBuffer->createEventOperation(*blobEvnt, m_error);
|
||||
m_ndb->theEventBuffer->createEventOperationImpl(*blobEvnt, m_error);
|
||||
if (tBlobOp == NULL)
|
||||
DBUG_RETURN(NULL);
|
||||
|
||||
|
@ -561,6 +559,10 @@ NdbEventOperationImpl::execute_nolock()
|
|||
m_state= EO_EXECUTING;
|
||||
mi_type= m_eventImpl->mi_type;
|
||||
m_ndb->theEventBuffer->add_op();
|
||||
// add kernel reference
|
||||
// removed on TE_STOP, TE_CLUSTER_FAILURE, or error below
|
||||
m_ref_count++;
|
||||
DBUG_PRINT("info", ("m_ref_count: %u for op: %p", m_ref_count, this));
|
||||
int r= NdbDictionaryImpl::getImpl(*myDict).executeSubscribeEvent(*this);
|
||||
if (r == 0) {
|
||||
if (theMainOp == NULL) {
|
||||
|
@ -568,19 +570,31 @@ NdbEventOperationImpl::execute_nolock()
|
|||
NdbEventOperationImpl* blob_op = theBlobOpList;
|
||||
while (blob_op != NULL) {
|
||||
r = blob_op->execute_nolock();
|
||||
if (r != 0)
|
||||
break;
|
||||
if (r != 0) {
|
||||
// since main op is running and possibly some blob ops as well
|
||||
// we can't just reset the main op. Instead return with error,
|
||||
// main op (and blob ops) will be cleaned up when user calls
|
||||
// dropEventOperation
|
||||
m_error.code= myDict->getNdbError().code;
|
||||
DBUG_RETURN(r);
|
||||
}
|
||||
// add blob reference to main op
|
||||
// removed by TE_STOP or TE_CLUSTER_FAILURE
|
||||
m_ref_count++;
|
||||
DBUG_PRINT("info", ("m_ref_count: %u for op: %p", m_ref_count, this));
|
||||
blob_op = blob_op->m_next;
|
||||
}
|
||||
}
|
||||
if (r == 0)
|
||||
{
|
||||
m_ref_count++;
|
||||
DBUG_PRINT("info", ("m_ref_count: %u for op: %p", m_ref_count, this));
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
}
|
||||
//Error
|
||||
// Error
|
||||
// remove kernel reference
|
||||
// added above
|
||||
m_ref_count--;
|
||||
DBUG_PRINT("info", ("m_ref_count: %u for op: %p", m_ref_count, this));
|
||||
m_state= EO_ERROR;
|
||||
mi_type= 0;
|
||||
m_magic_number= 0;
|
||||
|
@ -1222,6 +1236,8 @@ NdbEventBuffer::nextEvent()
|
|||
EventBufData_list::Gci_ops *gci_ops = m_available_data.first_gci_ops();
|
||||
while (gci_ops && op->getGCI() > gci_ops->m_gci)
|
||||
{
|
||||
// moved to next gci, check if any references have been
|
||||
// released when completing the last gci
|
||||
deleteUsedEventOperations();
|
||||
gci_ops = m_available_data.next_gci_ops();
|
||||
}
|
||||
|
@ -1249,6 +1265,8 @@ NdbEventBuffer::nextEvent()
|
|||
#endif
|
||||
|
||||
// free all "per gci unique" collected operations
|
||||
// completed gci, check if any references have been
|
||||
// released when completing the gci
|
||||
EventBufData_list::Gci_ops *gci_ops = m_available_data.first_gci_ops();
|
||||
while (gci_ops)
|
||||
{
|
||||
|
@ -1285,6 +1303,8 @@ NdbEventBuffer::deleteUsedEventOperations()
|
|||
{
|
||||
NdbEventOperationImpl *op = &op_f->m_impl;
|
||||
DBUG_ASSERT(op->m_ref_count > 0);
|
||||
// remove gci reference
|
||||
// added in inserDataL
|
||||
op->m_ref_count--;
|
||||
DBUG_PRINT("info", ("m_ref_count: %u for op: %p", op->m_ref_count, op));
|
||||
if (op->m_ref_count == 0)
|
||||
|
@ -1582,6 +1602,33 @@ NdbEventBuffer::complete_outof_order_gcis()
|
|||
ndbout_c("complete_outof_order_gcis: m_latestGCI: %lld", m_latestGCI);
|
||||
}
|
||||
|
||||
void
|
||||
NdbEventBuffer::insert_event(NdbEventOperationImpl* impl,
|
||||
SubTableData &data,
|
||||
LinearSectionPtr *ptr,
|
||||
Uint32 &oid_ref)
|
||||
{
|
||||
NdbEventOperationImpl *dropped_ev_op = m_dropped_ev_op;
|
||||
do
|
||||
{
|
||||
do
|
||||
{
|
||||
oid_ref = impl->m_oid;
|
||||
insertDataL(impl, &data, ptr);
|
||||
NdbEventOperationImpl* blob_op = impl->theBlobOpList;
|
||||
while (blob_op != NULL)
|
||||
{
|
||||
oid_ref = blob_op->m_oid;
|
||||
insertDataL(blob_op, &data, ptr);
|
||||
blob_op = blob_op->m_next;
|
||||
}
|
||||
} while((impl = impl->m_next));
|
||||
impl = dropped_ev_op;
|
||||
dropped_ev_op = NULL;
|
||||
} while (impl);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
NdbEventBuffer::report_node_connected(Uint32 node_id)
|
||||
{
|
||||
|
@ -1606,21 +1653,8 @@ NdbEventBuffer::report_node_connected(Uint32 node_id)
|
|||
/**
|
||||
* Insert this event for each operation
|
||||
*/
|
||||
{
|
||||
// no need to lock()/unlock(), receive thread calls this
|
||||
NdbEventOperationImpl* impl = &op->m_impl;
|
||||
do if (!impl->m_node_bit_mask.isclear())
|
||||
{
|
||||
data.senderData = impl->m_oid;
|
||||
insertDataL(impl, &data, ptr);
|
||||
} while((impl = impl->m_next));
|
||||
for (impl = m_dropped_ev_op; impl; impl = impl->m_next)
|
||||
if (!impl->m_node_bit_mask.isclear())
|
||||
{
|
||||
data.senderData = impl->m_oid;
|
||||
insertDataL(impl, &data, ptr);
|
||||
}
|
||||
}
|
||||
insert_event(&op->m_impl, data, ptr, data.senderData);
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
|
@ -1648,21 +1682,8 @@ NdbEventBuffer::report_node_failure(Uint32 node_id)
|
|||
/**
|
||||
* Insert this event for each operation
|
||||
*/
|
||||
{
|
||||
// no need to lock()/unlock(), receive thread calls this
|
||||
NdbEventOperationImpl* impl = &op->m_impl;
|
||||
do if (!impl->m_node_bit_mask.isclear())
|
||||
{
|
||||
data.senderData = impl->m_oid;
|
||||
insertDataL(impl, &data, ptr);
|
||||
} while((impl = impl->m_next));
|
||||
for (impl = m_dropped_ev_op; impl; impl = impl->m_next)
|
||||
if (!impl->m_node_bit_mask.isclear())
|
||||
{
|
||||
data.senderData = impl->m_oid;
|
||||
insertDataL(impl, &data, ptr);
|
||||
}
|
||||
}
|
||||
insert_event(&op->m_impl, data, ptr, data.senderData);
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
|
@ -1693,21 +1714,8 @@ NdbEventBuffer::completeClusterFailed()
|
|||
/**
|
||||
* Insert this event for each operation
|
||||
*/
|
||||
{
|
||||
// no need to lock()/unlock(), receive thread calls this
|
||||
NdbEventOperationImpl* impl = &op->m_impl;
|
||||
do if (!impl->m_node_bit_mask.isclear())
|
||||
{
|
||||
data.senderData = impl->m_oid;
|
||||
insertDataL(impl, &data, ptr);
|
||||
} while((impl = impl->m_next));
|
||||
for (impl = m_dropped_ev_op; impl; impl = impl->m_next)
|
||||
if (!impl->m_node_bit_mask.isclear())
|
||||
{
|
||||
data.senderData = impl->m_oid;
|
||||
insertDataL(impl, &data, ptr);
|
||||
}
|
||||
}
|
||||
insert_event(&op->m_impl, data, ptr, data.senderData);
|
||||
|
||||
/**
|
||||
* Release all GCI's with m_gci > gci
|
||||
|
@ -1797,25 +1805,60 @@ NdbEventBuffer::insertDataL(NdbEventOperationImpl *op,
|
|||
{
|
||||
case NdbDictionary::Event::_TE_NODE_FAILURE:
|
||||
op->m_node_bit_mask.clear(SubTableData::getNdbdNodeId(ri));
|
||||
DBUG_PRINT("info",
|
||||
("_TE_NODE_FAILURE: m_ref_count: %u for op: %p id: %u",
|
||||
op->m_ref_count, op, SubTableData::getNdbdNodeId(ri)));
|
||||
break;
|
||||
case NdbDictionary::Event::_TE_ACTIVE:
|
||||
op->m_node_bit_mask.set(SubTableData::getNdbdNodeId(ri));
|
||||
// internal event, do not relay to user
|
||||
DBUG_PRINT("info",
|
||||
("_TE_ACTIVE: m_ref_count: %u for op: %p id: %u",
|
||||
op->m_ref_count, op, SubTableData::getNdbdNodeId(ri)));
|
||||
DBUG_RETURN_EVENT(0);
|
||||
break;
|
||||
case NdbDictionary::Event::_TE_CLUSTER_FAILURE:
|
||||
if (!op->m_node_bit_mask.isclear())
|
||||
{
|
||||
op->m_node_bit_mask.clear();
|
||||
DBUG_ASSERT(op->m_ref_count > 0);
|
||||
// remove kernel reference
|
||||
// added in execute_nolock
|
||||
op->m_ref_count--;
|
||||
DBUG_PRINT("info", ("m_ref_count: %u for op: %p", op->m_ref_count, op));
|
||||
DBUG_PRINT("info", ("_TE_CLUSTER_FAILURE: m_ref_count: %u for op: %p",
|
||||
op->m_ref_count, op));
|
||||
if (op->theMainOp)
|
||||
{
|
||||
DBUG_ASSERT(op->m_ref_count == 0);
|
||||
DBUG_ASSERT(op->theMainOp->m_ref_count > 0);
|
||||
// remove blob reference in main op
|
||||
// added in execute_no_lock
|
||||
op->theMainOp->m_ref_count--;
|
||||
DBUG_PRINT("info", ("m_ref_count: %u for op: %p",
|
||||
op->theMainOp->m_ref_count, op->theMainOp));
|
||||
}
|
||||
}
|
||||
break;
|
||||
case NdbDictionary::Event::_TE_STOP:
|
||||
op->m_node_bit_mask.clear(SubTableData::getNdbdNodeId(ri));
|
||||
if (op->m_node_bit_mask.isclear())
|
||||
{
|
||||
DBUG_ASSERT(op->m_ref_count > 0);
|
||||
// remove kernel reference
|
||||
// added in execute_no_lock
|
||||
op->m_ref_count--;
|
||||
DBUG_PRINT("info", ("m_ref_count: %u for op: %p", op->m_ref_count, op));
|
||||
DBUG_PRINT("info", ("_TE_STOP: m_ref_count: %u for op: %p",
|
||||
op->m_ref_count, op));
|
||||
if (op->theMainOp)
|
||||
{
|
||||
DBUG_ASSERT(op->m_ref_count == 0);
|
||||
DBUG_ASSERT(op->theMainOp->m_ref_count > 0);
|
||||
// remove blob reference in main op
|
||||
// added in execute_no_lock
|
||||
op->theMainOp->m_ref_count--;
|
||||
DBUG_PRINT("info", ("m_ref_count: %u for op: %p",
|
||||
op->theMainOp->m_ref_count, op->theMainOp));
|
||||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
|
@ -2564,6 +2607,8 @@ EventBufData_list::add_gci_op(Gci_op g)
|
|||
#ifndef DBUG_OFF
|
||||
i = m_gci_op_count;
|
||||
#endif
|
||||
// add gci reference
|
||||
// removed in deleteUsedOperations
|
||||
g.op->m_ref_count++;
|
||||
DBUG_PRINT("info", ("m_ref_count: %u for op: %p", g.op->m_ref_count, g.op));
|
||||
m_gci_op_list[m_gci_op_count++] = g;
|
||||
|
@ -2632,6 +2677,8 @@ NdbEventBuffer::createEventOperation(const char* eventName,
|
|||
delete tOp;
|
||||
DBUG_RETURN(NULL);
|
||||
}
|
||||
// add user reference
|
||||
// removed in dropEventOperation
|
||||
getEventOperationImpl(tOp)->m_ref_count = 1;
|
||||
DBUG_PRINT("info", ("m_ref_count: %u for op: %p",
|
||||
getEventOperationImpl(tOp)->m_ref_count, getEventOperationImpl(tOp)));
|
||||
|
@ -2639,10 +2686,10 @@ NdbEventBuffer::createEventOperation(const char* eventName,
|
|||
}
|
||||
|
||||
NdbEventOperationImpl*
|
||||
NdbEventBuffer::createEventOperation(NdbEventImpl& evnt,
|
||||
NdbEventBuffer::createEventOperationImpl(NdbEventImpl& evnt,
|
||||
NdbError &theError)
|
||||
{
|
||||
DBUG_ENTER("NdbEventBuffer::createEventOperation [evnt]");
|
||||
DBUG_ENTER("NdbEventBuffer::createEventOperationImpl");
|
||||
NdbEventOperationImpl* tOp= new NdbEventOperationImpl(m_ndb, evnt);
|
||||
if (tOp == 0)
|
||||
{
|
||||
|
@ -2684,6 +2731,9 @@ NdbEventBuffer::dropEventOperation(NdbEventOperation* tOp)
|
|||
}
|
||||
|
||||
DBUG_ASSERT(op->m_ref_count > 0);
|
||||
// remove user reference
|
||||
// added in createEventOperation
|
||||
// user error to use reference after this
|
||||
op->m_ref_count--;
|
||||
DBUG_PRINT("info", ("m_ref_count: %u for op: %p", op->m_ref_count, op));
|
||||
if (op->m_ref_count == 0)
|
||||
|
|
|
@ -400,7 +400,59 @@ public:
|
|||
Uint32 m_eventId;
|
||||
Uint32 m_oid;
|
||||
|
||||
/*
|
||||
m_node_bit_mask keeps track of which ndb nodes have reference to
|
||||
an event op
|
||||
|
||||
- add - TE_ACTIVE
|
||||
- remove - TE_STOP, TE_NODE_FAILURE, TE_CLUSTER_FAILURE
|
||||
|
||||
TE_NODE_FAILURE and TE_CLUSTER_FAILURE are created as events
|
||||
and added to all event ops listed as active or pending delete
|
||||
in m_dropped_ev_op using insertDataL, includeing the blob
|
||||
event ops referenced by a regular event op.
|
||||
- NdbEventBuffer::report_node_failure
|
||||
- NdbEventBuffer::completeClusterFailed
|
||||
|
||||
TE_ACTIVE is sent from the kernel on initial execute/start of the
|
||||
event op, but is also internally generetad on node connect like
|
||||
TE_NODE_FAILURE and TE_CLUSTER_FAILURE
|
||||
- NdbEventBuffer::report_node_connected
|
||||
|
||||
when m_node_bit_mask becomes clear, the kernel reference is
|
||||
removed from m_ref_count
|
||||
*/
|
||||
|
||||
Bitmask<(unsigned int)_NDB_NODE_BITMASK_SIZE> m_node_bit_mask;
|
||||
|
||||
/*
|
||||
m_ref_count keeps track of outstanding references to an event
|
||||
operation impl object. To make sure that the object is not
|
||||
deleted too early.
|
||||
|
||||
If on dropEventOperation there are still references to an
|
||||
object it is queued for delete in NdbEventBuffer::m_dropped_ev_op
|
||||
|
||||
the following references exists for a _non_ blob event op:
|
||||
* user reference
|
||||
- add - NdbEventBuffer::createEventOperation
|
||||
- remove - NdbEventBuffer::dropEventOperation
|
||||
* kernel reference
|
||||
- add - execute_nolock
|
||||
- remove - TE_STOP, TE_CLUSTER_FAILURE
|
||||
* blob reference
|
||||
- add - execute_nolock on blob event
|
||||
- remove - TE_STOP, TE_CLUSTER_FAILURE on blob event
|
||||
* gci reference
|
||||
- add - insertDataL/add_gci_op
|
||||
- remove - NdbEventBuffer::deleteUsedEventOperations
|
||||
|
||||
the following references exists for a blob event op:
|
||||
* kernel reference
|
||||
- add - execute_nolock
|
||||
- remove - TE_STOP, TE_CLUSTER_FAILURE
|
||||
*/
|
||||
|
||||
int m_ref_count;
|
||||
bool m_mergeEvents;
|
||||
|
||||
|
@ -436,7 +488,7 @@ public:
|
|||
Vector<Gci_container_pod> m_active_gci;
|
||||
NdbEventOperation *createEventOperation(const char* eventName,
|
||||
NdbError &);
|
||||
NdbEventOperationImpl *createEventOperation(NdbEventImpl& evnt,
|
||||
NdbEventOperationImpl *createEventOperationImpl(NdbEventImpl& evnt,
|
||||
NdbError &);
|
||||
void dropEventOperation(NdbEventOperation *);
|
||||
static NdbEventOperationImpl* getEventOperationImpl(NdbEventOperation* tOp);
|
||||
|
@ -541,6 +593,11 @@ public:
|
|||
#endif
|
||||
|
||||
private:
|
||||
void insert_event(NdbEventOperationImpl* impl,
|
||||
SubTableData &data,
|
||||
LinearSectionPtr *ptr,
|
||||
Uint32 &oid_ref);
|
||||
|
||||
int expand(unsigned sz);
|
||||
|
||||
// all allocated data
|
||||
|
@ -552,8 +609,14 @@ private:
|
|||
Vector<EventBufData_chunk *> m_allocated_data;
|
||||
unsigned m_sz;
|
||||
|
||||
// dropped event operations that have not yet
|
||||
// been deleted
|
||||
/*
|
||||
dropped event operations (dropEventOperation) that have not yet
|
||||
been deleted because of outstanding m_ref_count
|
||||
|
||||
check for delete is done on occations when the ref_count may have
|
||||
changed by calling deleteUsedEventOperations:
|
||||
- nextEvent - each time the user has completed processing a gci
|
||||
*/
|
||||
NdbEventOperationImpl *m_dropped_ev_op;
|
||||
|
||||
Uint32 m_active_op_count;
|
||||
|
|
|
@ -1187,25 +1187,31 @@ NdbIndexScanOperation::setBound(const NdbColumnImpl* tAttrInfo,
|
|||
const bool nobytes = (len & 0x3) == 0;
|
||||
const Uint32 totalLen = 2 + sizeInWords;
|
||||
Uint32 tupKeyLen = theTupKeyLen;
|
||||
union {
|
||||
Uint32 tempData[2000];
|
||||
Uint64 __align;
|
||||
};
|
||||
Uint64 *valPtr;
|
||||
if(remaining > totalLen && aligned && nobytes){
|
||||
Uint32 * dst = theKEYINFOptr + currLen;
|
||||
* dst ++ = type;
|
||||
* dst ++ = ahValue;
|
||||
memcpy(dst, aValue, 4 * sizeInWords);
|
||||
theTotalNrOfKeyWordInSignal = currLen + totalLen;
|
||||
valPtr = (Uint64*)aValue;
|
||||
} else {
|
||||
if(!aligned || !nobytes){
|
||||
Uint32 tempData[2000];
|
||||
tempData[0] = type;
|
||||
tempData[1] = ahValue;
|
||||
tempData[2 + (len >> 2)] = 0;
|
||||
memcpy(tempData+2, aValue, len);
|
||||
|
||||
insertBOUNDS(tempData, 2+sizeInWords);
|
||||
valPtr = (Uint64*)(tempData+2);
|
||||
} else {
|
||||
Uint32 buf[2] = { type, ahValue };
|
||||
insertBOUNDS(buf, 2);
|
||||
insertBOUNDS((Uint32*)aValue, sizeInWords);
|
||||
valPtr = (Uint64*)aValue;
|
||||
}
|
||||
}
|
||||
theTupKeyLen = tupKeyLen + totalLen;
|
||||
|
@ -1222,7 +1228,7 @@ NdbIndexScanOperation::setBound(const NdbColumnImpl* tAttrInfo,
|
|||
if(type == BoundEQ && tDistrKey)
|
||||
{
|
||||
theNoOfTupKeyLeft--;
|
||||
return handle_distribution_key((Uint64*)aValue, sizeInWords);
|
||||
return handle_distribution_key(valPtr, sizeInWords);
|
||||
}
|
||||
return 0;
|
||||
} else {
|
||||
|
|
|
@ -163,7 +163,7 @@ int Bank::createTable(const char* tabName, bool disk){
|
|||
return NDBT_FAILED;
|
||||
}
|
||||
NdbDictionary::Table copy(* pTab);
|
||||
copy.setTablespace("DEFAULT-TS");
|
||||
copy.setTablespaceName("DEFAULT-TS");
|
||||
for (Uint32 i = 0; i<copy.getNoOfColumns(); i++)
|
||||
copy.getColumn(i)->setStorageType(NdbDictionary::Column::StorageTypeDisk);
|
||||
if(m_ndb.getDictionary()->createTable(copy) == -1){
|
||||
|
|
|
@ -37,7 +37,7 @@ g_create_hook(Ndb* ndb, NdbDictionary::Table& tab, int when, void* arg)
|
|||
}
|
||||
}
|
||||
if (g_tsname != NULL) {
|
||||
tab.setTablespace(g_tsname);
|
||||
tab.setTablespaceName(g_tsname);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
|
|
@ -1643,7 +1643,7 @@ runCreateDiskTable(NDBT_Context* ctx, NDBT_Step* step){
|
|||
Ndb* pNdb = GETNDB(step);
|
||||
|
||||
NdbDictionary::Table tab = *ctx->getTab();
|
||||
tab.setTablespace("DEFAULT-TS");
|
||||
tab.setTablespaceName("DEFAULT-TS");
|
||||
|
||||
for(Uint32 i = 0; i<tab.getNoOfColumns(); i++)
|
||||
if(!tab.getColumn(i)->getPrimaryKey())
|
||||
|
|
|
@ -931,6 +931,81 @@ retry:
|
|||
return NDBT_OK;
|
||||
}
|
||||
|
||||
int runBug24717(NDBT_Context* ctx, NDBT_Step* step){
|
||||
int result = NDBT_OK;
|
||||
int loops = ctx->getNumLoops();
|
||||
int records = ctx->getNumRecords();
|
||||
NdbRestarter restarter;
|
||||
Ndb* pNdb = GETNDB(step);
|
||||
|
||||
HugoTransactions hugoTrans(*ctx->getTab());
|
||||
|
||||
int dump[] = { 9002, 0 } ;
|
||||
Uint32 ownNode = refToNode(pNdb->getReference());
|
||||
dump[1] = ownNode;
|
||||
|
||||
for (; loops; loops --)
|
||||
{
|
||||
int nodeId = restarter.getRandomNotMasterNodeId(rand());
|
||||
restarter.restartOneDbNode(nodeId, false, true, true);
|
||||
restarter.waitNodesNoStart(&nodeId, 1);
|
||||
|
||||
if (restarter.dumpStateOneNode(nodeId, dump, 2))
|
||||
return NDBT_FAILED;
|
||||
|
||||
restarter.startNodes(&nodeId, 1);
|
||||
|
||||
for (Uint32 i = 0; i < 100; i++)
|
||||
{
|
||||
hugoTrans.pkReadRecords(pNdb, 100, 1, NdbOperation::LM_CommittedRead);
|
||||
}
|
||||
|
||||
restarter.waitClusterStarted();
|
||||
}
|
||||
|
||||
return NDBT_OK;
|
||||
}
|
||||
|
||||
int runBug25364(NDBT_Context* ctx, NDBT_Step* step){
|
||||
int result = NDBT_OK;
|
||||
NdbRestarter restarter;
|
||||
Ndb* pNdb = GETNDB(step);
|
||||
int loops = ctx->getNumLoops();
|
||||
|
||||
if (restarter.getNumDbNodes() < 4)
|
||||
return NDBT_OK;
|
||||
|
||||
int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 };
|
||||
|
||||
for (; loops; loops --)
|
||||
{
|
||||
int master = restarter.getMasterNodeId();
|
||||
int victim = restarter.getRandomNodeOtherNodeGroup(master, rand());
|
||||
int second = restarter.getRandomNodeSameNodeGroup(victim, rand());
|
||||
|
||||
int dump[] = { 935, victim } ;
|
||||
if (restarter.dumpStateOneNode(master, dump, 2))
|
||||
return NDBT_FAILED;
|
||||
|
||||
if (restarter.dumpStateOneNode(master, val2, 2))
|
||||
return NDBT_FAILED;
|
||||
|
||||
if (restarter.restartOneDbNode(second, false, true, true))
|
||||
return NDBT_FAILED;
|
||||
|
||||
int nodes[2] = { master, second };
|
||||
if (restarter.waitNodesNoStart(nodes, 2))
|
||||
return NDBT_FAILED;
|
||||
|
||||
restarter.startNodes(nodes, 2);
|
||||
|
||||
if (restarter.waitNodesStarted(nodes, 2))
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
return NDBT_OK;
|
||||
}
|
||||
|
||||
int
|
||||
runBug21271(NDBT_Context* ctx, NDBT_Step* step){
|
||||
int result = NDBT_OK;
|
||||
|
@ -995,38 +1070,109 @@ runBug24543(NDBT_Context* ctx, NDBT_Step* step){
|
|||
}
|
||||
return NDBT_OK;
|
||||
}
|
||||
int runBug24717(NDBT_Context* ctx, NDBT_Step* step){
|
||||
|
||||
int runBug25468(NDBT_Context* ctx, NDBT_Step* step){
|
||||
|
||||
int result = NDBT_OK;
|
||||
int loops = ctx->getNumLoops();
|
||||
int records = ctx->getNumRecords();
|
||||
NdbRestarter restarter;
|
||||
Ndb* pNdb = GETNDB(step);
|
||||
|
||||
HugoTransactions hugoTrans(*ctx->getTab());
|
||||
|
||||
int dump[] = { 9000, 0 } ;
|
||||
Uint32 ownNode = refToNode(pNdb->getReference());
|
||||
dump[1] = ownNode;
|
||||
|
||||
for (; loops; loops --)
|
||||
for (int i = 0; i<loops; i++)
|
||||
{
|
||||
int nodeId = restarter.getRandomNotMasterNodeId(rand());
|
||||
restarter.restartOneDbNode(nodeId, false, true, true);
|
||||
restarter.waitNodesNoStart(&nodeId, 1);
|
||||
|
||||
if (restarter.dumpStateOneNode(nodeId, dump, 2))
|
||||
return NDBT_FAILED;
|
||||
|
||||
restarter.startNodes(&nodeId, 1);
|
||||
|
||||
for (Uint32 i = 0; i < 100; i++)
|
||||
{
|
||||
hugoTrans.pkReadRecords(pNdb, 100, 1, NdbOperation::LM_CommittedRead);
|
||||
int master = restarter.getMasterNodeId();
|
||||
int node1, node2;
|
||||
switch(i % 5){
|
||||
case 0:
|
||||
node1 = master;
|
||||
node2 = restarter.getRandomNodeSameNodeGroup(master, rand());
|
||||
break;
|
||||
case 1:
|
||||
node1 = restarter.getRandomNodeSameNodeGroup(master, rand());
|
||||
node2 = master;
|
||||
break;
|
||||
case 2:
|
||||
case 3:
|
||||
case 4:
|
||||
node1 = restarter.getRandomNodeOtherNodeGroup(master, rand());
|
||||
if (node1 == -1)
|
||||
node1 = master;
|
||||
node2 = restarter.getRandomNodeSameNodeGroup(node1, rand());
|
||||
break;
|
||||
}
|
||||
|
||||
int reset[2] = { 9001, 0 };
|
||||
restarter.dumpStateOneNode(nodeId, reset, 2);
|
||||
restarter.waitClusterStarted();
|
||||
ndbout_c("node1: %d node2: %d master: %d", node1, node2, master);
|
||||
|
||||
int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 };
|
||||
|
||||
if (restarter.dumpStateOneNode(node2, val2, 2))
|
||||
return NDBT_FAILED;
|
||||
|
||||
if (restarter.insertErrorInNode(node1, 7178))
|
||||
return NDBT_FAILED;
|
||||
|
||||
int val1 = 7099;
|
||||
if (restarter.dumpStateOneNode(master, &val1, 1))
|
||||
return NDBT_FAILED;
|
||||
|
||||
if (restarter.waitNodesNoStart(&node2, 1))
|
||||
return NDBT_FAILED;
|
||||
|
||||
if (restarter.startAll())
|
||||
return NDBT_FAILED;
|
||||
|
||||
if (restarter.waitClusterStarted())
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
return NDBT_OK;
|
||||
}
|
||||
|
||||
int runBug25554(NDBT_Context* ctx, NDBT_Step* step){
|
||||
|
||||
int result = NDBT_OK;
|
||||
int loops = ctx->getNumLoops();
|
||||
int records = ctx->getNumRecords();
|
||||
NdbRestarter restarter;
|
||||
|
||||
if (restarter.getNumDbNodes() < 4)
|
||||
return NDBT_OK;
|
||||
|
||||
for (int i = 0; i<loops; i++)
|
||||
{
|
||||
int master = restarter.getMasterNodeId();
|
||||
int node1 = restarter.getRandomNodeOtherNodeGroup(master, rand());
|
||||
restarter.restartOneDbNode(node1, false, true, true);
|
||||
|
||||
int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 };
|
||||
|
||||
if (restarter.dumpStateOneNode(master, val2, 2))
|
||||
return NDBT_FAILED;
|
||||
|
||||
if (restarter.insertErrorInNode(master, 7141))
|
||||
return NDBT_FAILED;
|
||||
|
||||
if (restarter.waitNodesNoStart(&node1, 1))
|
||||
return NDBT_FAILED;
|
||||
|
||||
if (restarter.dumpStateOneNode(node1, val2, 2))
|
||||
return NDBT_FAILED;
|
||||
|
||||
if (restarter.insertErrorInNode(node1, 932))
|
||||
return NDBT_FAILED;
|
||||
|
||||
if (restarter.startNodes(&node1, 1))
|
||||
return NDBT_FAILED;
|
||||
|
||||
int nodes[] = { master, node1 };
|
||||
if (restarter.waitNodesNoStart(nodes, 2))
|
||||
return NDBT_FAILED;
|
||||
|
||||
if (restarter.startNodes(nodes, 2))
|
||||
return NDBT_FAILED;
|
||||
|
||||
if (restarter.waitClusterStarted())
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
return NDBT_OK;
|
||||
|
@ -1359,6 +1505,15 @@ TESTCASE("Bug21271",
|
|||
TESTCASE("Bug24717", ""){
|
||||
INITIALIZER(runBug24717);
|
||||
}
|
||||
TESTCASE("Bug25364", ""){
|
||||
INITIALIZER(runBug25364);
|
||||
}
|
||||
TESTCASE("Bug25468", ""){
|
||||
INITIALIZER(runBug25468);
|
||||
}
|
||||
TESTCASE("Bug25554", ""){
|
||||
INITIALIZER(runBug25554);
|
||||
}
|
||||
NDBT_TESTSUITE_END(testNodeRestart);
|
||||
|
||||
int main(int argc, const char** argv){
|
||||
|
|
|
@ -513,6 +513,14 @@ max-time: 1000
|
|||
cmd: testNodeRestart
|
||||
args: -n Bug24717 T1
|
||||
|
||||
max-time: 1000
|
||||
cmd: testNodeRestart
|
||||
args: -n Bug25364 T1
|
||||
|
||||
max-time: 1000
|
||||
cmd: testNodeRestart
|
||||
args: -n Bug25554 T1
|
||||
|
||||
#
|
||||
# DICT TESTS
|
||||
max-time: 1500
|
||||
|
@ -764,6 +772,10 @@ max-time: 1500
|
|||
cmd: testSystemRestart
|
||||
args: -n Bug24664
|
||||
|
||||
max-time: 1000
|
||||
cmd: testNodeRestart
|
||||
args: -n Bug25468 T1
|
||||
|
||||
# OLD FLEX
|
||||
max-time: 500
|
||||
cmd: flexBench
|
||||
|
|
|
@ -969,7 +969,7 @@ NDBT_TestSuite::createHook(Ndb* ndb, NdbDictionary::Table& tab, int when)
|
|||
}
|
||||
|
||||
if (tsname != NULL) {
|
||||
tab.setTablespace(tsname);
|
||||
tab.setTablespaceName(tsname);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
|
Loading…
Reference in a new issue