Merge bk-internal:/home/bk/mysql-5.1-new

into  neptunus.(none):/home/msvensson/mysql/mysql-5.1
This commit is contained in:
unknown 2006-03-24 13:50:41 +01:00
commit 9501550d5a
59 changed files with 1492 additions and 518 deletions

View file

@ -577,6 +577,25 @@ pk1 b c
2 2 17
4 4 3
6 6 3
DELETE FROM t1;
CREATE UNIQUE INDEX bi ON t1(b);
INSERT INTO t1 VALUES
(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),
(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10);
INSERT INTO t1 VALUES(0,1,0),(21,21,21) ON DUPLICATE KEY UPDATE pk1=b+10,b=b+10;
select * from t1 order by pk1;
pk1 b c
2 2 2
3 3 3
4 4 4
5 5 5
6 6 6
7 7 7
8 8 8
9 9 9
10 10 10
11 11 1
21 21 21
DROP TABLE t1;
CREATE TABLE t1(a INT) ENGINE=ndb;
INSERT IGNORE INTO t1 VALUES (1);
@ -586,7 +605,7 @@ INSERT IGNORE INTO t1 SELECT a FROM t1;
INSERT IGNORE INTO t1 SELECT a FROM t1;
INSERT IGNORE INTO t1 VALUES (1);
INSERT IGNORE INTO t1 VALUES (1);
SELECT * FROM t1;
SELECT * FROM t1 ORDER BY a;
a
1
1
@ -606,4 +625,11 @@ a
1
1
1
DELETE FROM t1;
CREATE UNIQUE INDEX ai ON t1(a);
INSERT IGNORE INTO t1 VALUES (1);
INSERT IGNORE INTO t1 VALUES (1);
SELECT * FROM t1 ORDER BY a;
a
1
DROP TABLE t1;

View file

@ -19,3 +19,15 @@ gesuchnr benutzer_id
2 1
3 2
drop table t1;
CREATE TABLE t1(i INT PRIMARY KEY AUTO_INCREMENT,
j INT,
k INT,
UNIQUE INDEX(j)
) ENGINE = ndb;
INSERT INTO t1 VALUES (1,1,23),(2,2,24);
REPLACE INTO t1 (j,k) VALUES (1,42);
REPLACE INTO t1 (i,j) VALUES (17,2);
SELECT * from t1 ORDER BY i;
i j k
3 1 42
17 2 24

View file

@ -30,16 +30,16 @@ a b
2 2
3 3
4 4
5 5
6 6
7 5
10 6
SELECT * FROM t1 ORDER BY a;
a b
1 1
2 2
3 3
4 4
5 5
6 6
7 5
10 6
drop table t1;
CREATE TABLE t1 (
a int unsigned not null auto_increment primary key,

View file

@ -32,7 +32,6 @@ rpl_ndb_auto_inc : Bug#17086
rpl_ndb_ddl : master hangs
#rpl_ndb_delete_nowhere : Bug#17400: delete & update of rows in table without pk fails
rpl_ndb_innodb2ndb : Bug#18261: Cluster Replication: tests rpl_ndb_xxx2ndb fails
rpl_ndb_insert_ignore : Bugs: #17431: INSERT IGNORE INTO returns failed: 1296
rpl_ndb_myisam2ndb : Bug#18261: Cluster Replication: tests rpl_ndb_xxx2ndb fails
rpl_ndb_log : result not deterministic, TBD if should remove
rpl_ndb_relay_space : Bug#16993

View file

@ -591,14 +591,14 @@ DELETE FROM t1 WHERE pk1 = 2 OR pk1 = 4 OR pk1 = 6;
INSERT INTO t1 VALUES(1,1,1),(2,2,17),(3,4,5) ON DUPLICATE KEY UPDATE pk1=b;
select * from t1 where pk1 = b and b != c order by pk1;
# The following test case currently does not work
#DELETE FROM t1;
#CREATE UNIQUE INDEX bi ON t1(b);
#INSERT INTO t1 VALUES
#(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),
#(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10);
#INSERT INTO t1 VALUES(0,1,0),(21,21,21) ON DUPLICATE KEY UPDATE pk1=b+10,c=b+10;
#select * from t1 order by pk1;
# Test handling of duplicate unique
DELETE FROM t1;
CREATE UNIQUE INDEX bi ON t1(b);
INSERT INTO t1 VALUES
(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),
(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10);
INSERT INTO t1 VALUES(0,1,0),(21,21,21) ON DUPLICATE KEY UPDATE pk1=b+10,b=b+10;
select * from t1 order by pk1;
DROP TABLE t1;
@ -614,7 +614,12 @@ INSERT IGNORE INTO t1 SELECT a FROM t1;
INSERT IGNORE INTO t1 SELECT a FROM t1;
INSERT IGNORE INTO t1 VALUES (1);
INSERT IGNORE INTO t1 VALUES (1);
SELECT * FROM t1;
SELECT * FROM t1 ORDER BY a;
DELETE FROM t1;
CREATE UNIQUE INDEX ai ON t1(a);
INSERT IGNORE INTO t1 VALUES (1);
INSERT IGNORE INTO t1 VALUES (1);
SELECT * FROM t1 ORDER BY a;
DROP TABLE t1;
# End of 4.1 tests

View file

@ -27,4 +27,15 @@ replace into t1 (gesuchnr,benutzer_id) values (1,1);
select * from t1 order by gesuchnr;
drop table t1;
# bug#17431
CREATE TABLE t1(i INT PRIMARY KEY AUTO_INCREMENT,
j INT,
k INT,
UNIQUE INDEX(j)
) ENGINE = ndb;
INSERT INTO t1 VALUES (1,1,23),(2,2,24);
REPLACE INTO t1 (j,k) VALUES (1,42);
REPLACE INTO t1 (i,j) VALUES (17,2);
SELECT * from t1 ORDER BY i;
# End of 4.1 tests

View file

@ -1240,6 +1240,7 @@ int ha_ndbcluster::add_index_handle(THD *thd, NDBDICT *dict, KEY *key_info,
{
char unique_index_name[FN_LEN];
static const char* unique_suffix= "$unique";
m_has_unique_index= TRUE;
strxnmov(unique_index_name, FN_LEN, index_name, unique_suffix, NullS);
DBUG_PRINT("info", ("Get handle to unique_index %s", unique_index_name));
const NDBINDEX *index= dict->getIndex(unique_index_name, m_tabname);
@ -1266,7 +1267,7 @@ int ha_ndbcluster::open_indexes(Ndb *ndb, TABLE *tab, bool ignore_error)
KEY* key_info= tab->key_info;
const char **key_name= tab->s->keynames.type_names;
DBUG_ENTER("ha_ndbcluster::open_indexes");
m_has_unique_index= FALSE;
for (i= 0; i < tab->s->keys; i++, key_info++, key_name++)
{
if ((error= add_index_handle(thd, dict, key_info, *key_name, i)))
@ -1568,6 +1569,25 @@ int ha_ndbcluster::set_primary_key_from_record(NdbOperation *op, const byte *rec
DBUG_RETURN(0);
}
int ha_ndbcluster::set_index_key_from_record(NdbOperation *op,
const byte *record, uint keyno)
{
KEY* key_info= table->key_info + keyno;
KEY_PART_INFO* key_part= key_info->key_part;
KEY_PART_INFO* end= key_part+key_info->key_parts;
uint i;
DBUG_ENTER("set_index_key_from_record");
for (i= 0; key_part != end; key_part++, i++)
{
Field* field= key_part->field;
if (set_ndb_key(op, field, m_index[keyno].unique_index_attrid_map[i],
record+key_part->offset))
ERR_RETURN(m_active_trans->getNdbError());
}
DBUG_RETURN(0);
}
int
ha_ndbcluster::set_index_key(NdbOperation *op,
const KEY *key_info,
@ -1776,46 +1796,154 @@ int ha_ndbcluster::complemented_read(const byte *old_data, byte *new_data,
}
/*
Peek to check if a particular row already exists
* Check that all operations between first and last all
* have gotten the errcode
* If checking for HA_ERR_KEY_NOT_FOUND then update m_dupkey
* for all succeeding operations
*/
bool ha_ndbcluster::check_all_operations_for_error(NdbTransaction *trans,
const NdbOperation *first,
const NdbOperation *last,
uint errcode)
{
const NdbOperation *op= first;
DBUG_ENTER("ha_ndbcluster::check_all_operations_for_error");
while(op)
{
NdbError err= op->getNdbError();
if (err.status != NdbError::Success)
{
if (ndb_to_mysql_error(&err) != (int) errcode)
DBUG_RETURN(false);
if (op == last) break;
op= trans->getNextCompletedOperation(op);
}
else
{
// We found a duplicate
if (op->getType() == NdbOperation::UniqueIndexAccess)
{
if (errcode == HA_ERR_KEY_NOT_FOUND)
{
NdbIndexOperation *iop= (NdbIndexOperation *) op;
const NDBINDEX *index= iop->getIndex();
// Find the key_no of the index
for(uint i= 0; i<table->s->keys; i++)
{
if (m_index[i].unique_index == index)
{
m_dupkey= i;
break;
}
}
}
}
else
{
// Must have been primary key access
DBUG_ASSERT(op->getType() == NdbOperation::PrimaryKeyAccess);
if (errcode == HA_ERR_KEY_NOT_FOUND)
m_dupkey= table->s->primary_key;
}
DBUG_RETURN(false);
}
}
DBUG_RETURN(true);
}
/*
* Peek to check if any rows already exist with conflicting
* primary key or unique index values
*/
int ha_ndbcluster::peek_row(const byte *record)
int ha_ndbcluster::peek_indexed_rows(const byte *record)
{
NdbTransaction *trans= m_active_trans;
NdbOperation *op;
DBUG_ENTER("peek_row");
NdbOperation::LockMode lm=
(NdbOperation::LockMode)get_ndb_lock_type(m_lock.type);
if (!(op= trans->getNdbOperation((const NDBTAB *) m_table)) ||
op->readTuple(lm) != 0)
ERR_RETURN(trans->getNdbError());
const NdbOperation *first, *last;
uint i;
int res;
if ((res= set_primary_key_from_record(op, record)))
ERR_RETURN(trans->getNdbError());
DBUG_ENTER("peek_indexed_rows");
if (m_use_partition_function)
NdbOperation::LockMode lm= NdbOperation::LM_Read;
first= NULL;
if (table->s->primary_key != MAX_KEY)
{
uint32 part_id;
int error;
longlong func_value;
if ((error= m_part_info->get_partition_id(m_part_info, &part_id,
&func_value)))
{
DBUG_RETURN(error);
}
op->setPartitionId(part_id);
}
/*
* Fetch any row with colliding primary key
*/
if (!(op= trans->getNdbOperation((const NDBTAB *) m_table)) ||
op->readTuple(lm) != 0)
ERR_RETURN(trans->getNdbError());
first= op;
if ((res= set_primary_key_from_record(op, record)))
ERR_RETURN(trans->getNdbError());
if (execute_no_commit_ie(this,trans) != 0)
if (m_use_partition_function)
{
uint32 part_id;
int error;
longlong func_value;
if ((error= m_part_info->get_partition_id(m_part_info, &part_id,
&func_value)))
{
DBUG_RETURN(error);
}
op->setPartitionId(part_id);
}
}
/*
* Fetch any rows with colliding unique indexes
*/
KEY* key_info;
KEY_PART_INFO *key_part, *end;
for (i= 0, key_info= table->key_info; i < table->s->keys; i++, key_info++)
{
if (i != table->s->primary_key &&
key_info->flags & HA_NOSAME)
{
// A unique index is defined on table
NdbIndexOperation *iop;
NDBINDEX *unique_index = (NDBINDEX *) m_index[i].unique_index;
key_part= key_info->key_part;
end= key_part + key_info->key_parts;
if (!(iop= trans->getNdbIndexOperation(unique_index,
(const NDBTAB *) m_table)) ||
iop->readTuple(lm) != 0)
ERR_RETURN(trans->getNdbError());
if (!first)
first= iop;
if ((res= set_index_key_from_record(iop, record, i)))
ERR_RETURN(trans->getNdbError());
}
}
last= trans->getLastDefinedOperation();
if (first)
res= execute_no_commit_ie(this,trans);
else
{
// Table has no keys
table->status= STATUS_NOT_FOUND;
DBUG_RETURN(HA_ERR_KEY_NOT_FOUND);
}
if (check_all_operations_for_error(trans, first, last,
HA_ERR_KEY_NOT_FOUND))
{
table->status= STATUS_NOT_FOUND;
DBUG_RETURN(ndb_err(trans));
}
else
{
DBUG_PRINT("info", ("m_dupkey %d", m_dupkey));
}
DBUG_RETURN(0);
}
/*
Read one record from NDB using unique secondary index
*/
@ -2310,13 +2438,33 @@ int ha_ndbcluster::write_row(byte *record)
DBUG_ENTER("ha_ndbcluster::write_row");
m_write_op= TRUE;
if (!m_use_write && m_ignore_dup_key && table_share->primary_key != MAX_KEY)
has_auto_increment= (table->next_number_field && record == table->record[0]);
if (table_share->primary_key != MAX_KEY)
{
int peek_res= peek_row(record);
/*
* Increase any auto_incremented primary key
*/
if (has_auto_increment)
{
THD *thd= table->in_use;
m_skip_auto_increment= FALSE;
update_auto_increment();
/* Ensure that handler is always called for auto_increment values */
thd->next_insert_id= 0;
m_skip_auto_increment= !auto_increment_column_changed;
}
}
/*
* If IGNORE the ignore constraint violations on primary and unique keys
*/
if (!m_use_write && m_ignore_dup_key)
{
int peek_res= peek_indexed_rows(record);
if (!peek_res)
{
m_dupkey= table_share->primary_key;
DBUG_RETURN(HA_ERR_FOUND_DUPP_KEY);
}
if (peek_res != HA_ERR_KEY_NOT_FOUND)
@ -2326,7 +2474,6 @@ int ha_ndbcluster::write_row(byte *record)
statistic_increment(thd->status_var.ha_write_count, &LOCK_status);
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
table->timestamp_field->set_time();
has_auto_increment= (table->next_number_field && record == table->record[0]);
if (!(op= trans->getNdbOperation((const NDBTAB *) m_table)))
ERR_RETURN(trans->getNdbError());
@ -2367,17 +2514,6 @@ int ha_ndbcluster::write_row(byte *record)
{
int res;
if (has_auto_increment)
{
THD *thd= table->in_use;
m_skip_auto_increment= FALSE;
update_auto_increment();
/* Ensure that handler is always called for auto_increment values */
thd->next_insert_id= 0;
m_skip_auto_increment= !auto_increment_column_changed;
}
if ((res= set_primary_key_from_record(op, record)))
return res;
}
@ -3463,7 +3599,7 @@ int ha_ndbcluster::extra(enum ha_extra_function operation)
break;
case HA_EXTRA_IGNORE_DUP_KEY: /* Dup keys don't rollback everything*/
DBUG_PRINT("info", ("HA_EXTRA_IGNORE_DUP_KEY"));
if (current_thd->lex->sql_command == SQLCOM_REPLACE)
if (current_thd->lex->sql_command == SQLCOM_REPLACE && !m_has_unique_index)
{
DBUG_PRINT("info", ("Turning ON use of write instead of insert"));
m_use_write= TRUE;
@ -5137,6 +5273,7 @@ ha_ndbcluster::ha_ndbcluster(TABLE_SHARE *table_arg):
m_sorted(FALSE),
m_use_write(FALSE),
m_ignore_dup_key(FALSE),
m_has_unique_index(FALSE),
m_primary_key_update(FALSE),
m_ignore_no_key(FALSE),
m_rows_to_insert((ha_rows) 1),

View file

@ -738,7 +738,11 @@ private:
part_id_range *part_spec);
int full_table_scan(byte * buf);
int peek_row(const byte *record);
bool check_all_operations_for_error(NdbTransaction *trans,
const NdbOperation *first,
const NdbOperation *last,
uint errcode);
int peek_indexed_rows(const byte *record);
int unique_index_read(const byte *key, uint key_len,
byte *buf);
int fetch_next(NdbScanOperation* op);
@ -766,6 +770,8 @@ private:
int get_ndb_blobs_value(NdbBlob *last_ndb_blob);
int set_primary_key(NdbOperation *op, const byte *key);
int set_primary_key_from_record(NdbOperation *op, const byte *record);
int set_index_key_from_record(NdbOperation *op, const byte *record,
uint keyno);
int set_bounds(NdbIndexScanOperation*, uint inx, bool rir,
const key_range *keys[2], uint= 0);
int key_cmp(uint keynr, const byte * old_row, const byte * new_row);
@ -832,6 +838,7 @@ private:
bool m_sorted;
bool m_use_write;
bool m_ignore_dup_key;
bool m_has_unique_index;
bool m_primary_key_update;
bool m_write_op;
bool m_ignore_no_key;

View file

@ -128,7 +128,11 @@ public:
DihAllAllowNodeStart = 7016,
DihMinTimeBetweenLCP = 7017,
DihMaxTimeBetweenLCP = 7018,
// 7019
// 7020
// 7021
EnableUndoDelayDataWrite = 7080, // DIH+ACC+TUP
DihSetTimeBetweenGcp = 7090,
DihStartLcpImmediately = 7099,
// 8000 Suma
// 12000 Tux

View file

@ -64,5 +64,11 @@ private:
Uint32 startingNodeId;
Uint32 errorCode;
enum ErrorCode
{
ZNODE_ALREADY_STARTING_ERROR = 305,
InitialStartRequired = 320
};
};
#endif

View file

@ -44,7 +44,8 @@ private:
CHECK_WAIT_DROP_TAB_FAILED_LQH = 16,
TRIGGER_PENDING = 17,
DelayTCKEYCONF = 18
DelayTCKEYCONF = 18,
ZNF_CHECK_TRANSACTIONS = 19
};
};

View file

@ -74,9 +74,7 @@ typedef ndbd_exit_classification_enum ndbd_exit_classification;
#define NDBD_EXIT_SR_OTHERNODEFAILED 2308
#define NDBD_EXIT_NODE_NOT_DEAD 2309
#define NDBD_EXIT_SR_REDOLOG 2310
/*
#define NDBD_EXIT_SR_RESTARTCONFLICT 2311
*/
#define NDBD_EXIT_NO_MORE_UNDOLOG 2312
#define NDBD_EXIT_SR_UNDOLOG 2313
#define NDBD_EXIT_MEMALLOC 2327

View file

@ -129,6 +129,11 @@ public:
*/
int deleteTuple();
/**
* Get index object for this operation
*/
const NdbDictionary::Index * getIndex() const;
#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
/**
* Define the NdbIndexOperation to be a standard operation of type

View file

@ -55,10 +55,33 @@ public:
* @{
*/
/**
* Different access types (supported by sub-classes of NdbOperation)
*/
enum Type {
PrimaryKeyAccess ///< Read, insert, update, or delete using pk
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
= 0 // NdbOperation
#endif
,UniqueIndexAccess ///< Read, update, or delete using unique index
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
= 1 // NdbIndexOperation
#endif
,TableScan ///< Full table scan
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
= 2 // NdbScanOperation
#endif
,OrderedIndexScan ///< Ordered index scan
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
= 3 // NdbIndexScanOperation
#endif
};
/**
* Lock when performing read
*/
enum LockMode {
LM_Read ///< Read with shared lock
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
@ -720,6 +743,11 @@ public:
*/
const NdbDictionary::Table * getTable() const;
/**
* Get the type of access for this operation
*/
const Type getType() const;
/** @} *********************************************************************/
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
@ -773,7 +801,7 @@ protected:
int init(const class NdbTableImpl*, NdbTransaction* aCon);
void initInterpreter();
NdbOperation(Ndb* aNdb);
NdbOperation(Ndb* aNdb, Type aType = PrimaryKeyAccess);
virtual ~NdbOperation();
void next(NdbOperation*); // Set next pointer
NdbOperation* next(); // Get next pointer
@ -887,6 +915,8 @@ protected:
* These are the private variables that are defined in the operation objects.
*****************************************************************************/
Type m_type;
NdbReceiver theReceiver;
NdbError theError; // Errorcode
@ -1050,6 +1080,19 @@ NdbOperation::getFirstRecAttr() const
return theReceiver.theFirstRecAttr;
}
/******************************************************************************
Type getType()
Return Value Return the Type.
Remark: Gets type of access.
******************************************************************************/
inline
const NdbOperation::Type
NdbOperation::getType() const
{
return m_type;
}
/******************************************************************************
OperationStatus Status();

View file

@ -178,7 +178,8 @@ public:
int restart(bool forceSend = false);
protected:
NdbScanOperation(Ndb* aNdb);
NdbScanOperation(Ndb* aNdb,
NdbOperation::Type aType = NdbOperation::TableScan);
virtual ~NdbScanOperation();
int nextResultImpl(bool fetchAllowed = true, bool forceSend = false);

View file

@ -25,6 +25,7 @@
*/
class InputStream {
public:
virtual ~InputStream() {}
virtual char* gets(char * buf, int bufLen) = 0;
};
@ -32,6 +33,7 @@ class FileInputStream : public InputStream {
FILE * f;
public:
FileInputStream(FILE * file = stdin);
virtual ~FileInputStream() {}
char* gets(char * buf, int bufLen);
};
@ -42,6 +44,7 @@ class SocketInputStream : public InputStream {
unsigned m_timeout;
public:
SocketInputStream(NDB_SOCKET_TYPE socket, unsigned readTimeout = 1000);
virtual ~SocketInputStream() {}
char* gets(char * buf, int bufLen);
};

View file

@ -25,6 +25,7 @@
*/
class OutputStream {
public:
virtual ~OutputStream() {}
virtual int print(const char * fmt, ...) = 0;
virtual int println(const char * fmt, ...) = 0;
virtual void flush() {};
@ -34,7 +35,8 @@ class FileOutputStream : public OutputStream {
FILE * f;
public:
FileOutputStream(FILE * file = stdout);
virtual ~FileOutputStream() {}
int print(const char * fmt, ...);
int println(const char * fmt, ...);
void flush() { fflush(f); }
@ -45,7 +47,8 @@ class SocketOutputStream : public OutputStream {
unsigned m_timeout;
public:
SocketOutputStream(NDB_SOCKET_TYPE socket, unsigned writeTimeout = 1000);
virtual ~SocketOutputStream() {}
int print(const char * fmt, ...);
int println(const char * fmt, ...);
};
@ -53,13 +56,15 @@ public:
class SoftOseOutputStream : public OutputStream {
public:
SoftOseOutputStream();
virtual ~SoftOseOutputStream() {}
int print(const char * fmt, ...);
int println(const char * fmt, ...);
};
class NullOutputStream : public OutputStream {
public:
virtual ~NullOutputStream() {}
int print(const char * /* unused */, ...) { return 1;}
int println(const char * /* unused */, ...) { return 1;}
};

View file

@ -92,6 +92,8 @@ public:
*/
class Reader {
public:
virtual ~Reader() {}
/**
* Move to first element
* Return true if element exist
@ -164,6 +166,8 @@ public:
*/
class Writer {
public:
virtual ~Writer() {}
bool first();
bool add(Uint16 key, Uint32 value);
bool add(Uint16 key, const char * value);
@ -183,6 +187,7 @@ public:
class SimplePropertiesLinearReader : public SimpleProperties::Reader {
public:
SimplePropertiesLinearReader(const Uint32 * src, Uint32 len);
virtual ~SimplePropertiesLinearReader() {}
virtual void reset();
virtual bool step(Uint32 len);
@ -201,7 +206,8 @@ private:
class LinearWriter : public SimpleProperties::Writer {
public:
LinearWriter(Uint32 * src, Uint32 len);
virtual ~LinearWriter() {}
virtual bool reset();
virtual bool putWord(Uint32 val);
virtual bool putWords(const Uint32 * src, Uint32 len);
@ -218,6 +224,7 @@ private:
class UtilBufferWriter : public SimpleProperties::Writer {
public:
UtilBufferWriter(class UtilBuffer & buf);
virtual ~UtilBufferWriter() {}
virtual bool reset();
virtual bool putWord(Uint32 val);
@ -237,7 +244,9 @@ class SimplePropertiesSectionReader : public SimpleProperties::Reader {
public:
SimplePropertiesSectionReader(struct SegmentedSectionPtr &,
class SectionSegmentPool &);
virtual ~SimplePropertiesSectionReader() {}
virtual void reset();
virtual bool step(Uint32 len);
virtual bool getWord(Uint32 * dst);
@ -269,6 +278,7 @@ Uint32 SimplePropertiesSectionReader::getSize() const
class SimplePropertiesSectionWriter : public SimpleProperties::Writer {
public:
SimplePropertiesSectionWriter(class SectionSegmentPool &);
virtual ~SimplePropertiesSectionWriter() {}
virtual bool reset();
virtual bool putWord(Uint32 val);

View file

@ -228,6 +228,8 @@ Delay execution of COMPLETECONF signal 2 seconds to generate time-out.
8045: (ABORTCONF only as part of take-over)
Delay execution of ABORTCONF signal 2 seconds to generate time-out.
8050: Send ZABORT_TIMEOUT_BREAK delayed
ERROR CODES FOR TESTING TIME-OUT HANDLING IN DBTC
-------------------------------------------------
@ -305,6 +307,8 @@ Test Crashes in handling node restarts
7131: Crash when receiving START_COPYREQ in master node
7132: Crash when receiving START_COPYCONF in starting node
7170: Crash when receiving START_PERMREF (InitialStartRequired)
DICT:
6000 Crash during NR when receiving DICTSTARTREQ
6001 Crash during NR when receiving SCHEMA_INFO
@ -314,7 +318,7 @@ LQH:
5026 Crash when receiving COPY_ACTIVEREQ
5027 Crash when receiving STAT_RECREQ
5042 Crash starting node, when scan is finished on primary replica
5043 Crash starting node, when scan is finished on primary replica
Test Crashes in handling take over
----------------------------------

View file

@ -75,7 +75,6 @@
#define ZWRONG_FAILURE_NUMBER_ERROR 302
#define ZWRONG_START_NODE_ERROR 303
#define ZNO_REPLICA_FOUND_ERROR 304
#define ZNODE_ALREADY_STARTING_ERROR 305
#define ZNODE_START_DISALLOWED_ERROR 309
// --------------------------------------
@ -1040,7 +1039,8 @@ private:
void prepareReplicas(FragmentstorePtr regFragptr);
void removeNodeFromStored(Uint32 nodeId,
FragmentstorePtr regFragptr,
ReplicaRecordPtr replicaPtr);
ReplicaRecordPtr replicaPtr,
bool temporary);
void removeOldStoredReplica(FragmentstorePtr regFragptr,
ReplicaRecordPtr replicaPtr);
void removeStoredReplica(FragmentstorePtr regFragptr,

View file

@ -1439,6 +1439,33 @@ void Dbdih::ndbStartReqLab(Signal* signal, BlockReference ref)
return;
}
NodeRecordPtr nodePtr;
Uint32 gci = SYSFILE->lastCompletedGCI[getOwnNodeId()];
for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++)
{
jam();
ptrAss(nodePtr, nodeRecord);
if (SYSFILE->lastCompletedGCI[nodePtr.i] > gci)
{
jam();
/**
* Since we're starting(is master) and there
* there are other nodes with higher GCI...
* there gci's must be invalidated...
* and they _must_ do an initial start
* indicate this by setting lastCompletedGCI = 0
*/
SYSFILE->lastCompletedGCI[nodePtr.i] = 0;
ndbrequire(nodePtr.p->nodeStatus != NodeRecord::ALIVE);
warningEvent("Making filesystem for node %d unusable",
nodePtr.i);
}
}
/**
* This set which GCI we will try to restart to
*/
SYSFILE->newestRestorableGCI = gci;
ndbrequire(isMaster());
copyGciLab(signal, CopyGCIReq::RESTART); // We have already read the file!
}//Dbdih::ndbStartReqLab()
@ -1574,7 +1601,7 @@ void Dbdih::execSTART_PERMREF(Signal* signal)
{
jamEntry();
Uint32 errorCode = signal->theData[1];
if (errorCode == ZNODE_ALREADY_STARTING_ERROR) {
if (errorCode == StartPermRef::ZNODE_ALREADY_STARTING_ERROR) {
jam();
/*-----------------------------------------------------------------------*/
// The master was busy adding another node. We will wait for a second and
@ -1584,6 +1611,20 @@ void Dbdih::execSTART_PERMREF(Signal* signal)
sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 3000, 1);
return;
}//if
if (errorCode == StartPermRef::InitialStartRequired)
{
CRASH_INSERTION(7170);
char buf[255];
BaseString::snprintf(buf, sizeof(buf),
"Cluster requires this node to be started "
" with --initial as partial start has been performed"
" and this filesystem is unusable");
progError(__LINE__,
NDBD_EXIT_SR_RESTARTCONFLICT,
buf);
ndbrequire(false);
}
/*------------------------------------------------------------------------*/
// Some node process in another node involving our node was still active. We
// will recover from this by crashing here.
@ -1677,7 +1718,7 @@ void Dbdih::execSTART_PERMREQ(Signal* signal)
(c_nodeStartMaster.wait != ZFALSE)) {
jam();
signal->theData[0] = nodeId;
signal->theData[1] = ZNODE_ALREADY_STARTING_ERROR;
signal->theData[1] = StartPermRef::ZNODE_ALREADY_STARTING_ERROR;
sendSignal(retRef, GSN_START_PERMREF, signal, 2, JBB);
return;
}//if
@ -1687,6 +1728,16 @@ void Dbdih::execSTART_PERMREQ(Signal* signal)
ndbrequire(false);
}//if
if (SYSFILE->lastCompletedGCI[nodeId] == 0 &&
typeStart != NodeState::ST_INITIAL_NODE_RESTART)
{
jam();
signal->theData[0] = nodeId;
signal->theData[1] = StartPermRef::InitialStartRequired;
sendSignal(retRef, GSN_START_PERMREF, signal, 2, JBB);
return;
}
/*----------------------------------------------------------------------
* WE START THE INCLUSION PROCEDURE
* ---------------------------------------------------------------------*/
@ -3685,24 +3736,12 @@ void Dbdih::closingGcpLab(Signal* signal, FileRecordPtr filePtr)
/* ------------------------------------------------------------------------- */
void Dbdih::selectMasterCandidateAndSend(Signal* signal)
{
Uint32 gci = 0;
Uint32 masterCandidateId = 0;
NodeRecordPtr nodePtr;
for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
jam();
ptrAss(nodePtr, nodeRecord);
if (SYSFILE->lastCompletedGCI[nodePtr.i] > gci) {
jam();
masterCandidateId = nodePtr.i;
gci = SYSFILE->lastCompletedGCI[nodePtr.i];
}//if
}//for
ndbrequire(masterCandidateId != 0);
setNodeGroups();
signal->theData[0] = masterCandidateId;
signal->theData[1] = gci;
signal->theData[0] = getOwnNodeId();
signal->theData[1] = SYSFILE->lastCompletedGCI[getOwnNodeId()];
sendSignal(cntrlblockref, GSN_DIH_RESTARTCONF, signal, 2, JBB);
NodeRecordPtr nodePtr;
Uint32 node_groups[MAX_NDB_NODES];
memset(node_groups, 0, sizeof(node_groups));
for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
@ -3720,10 +3759,10 @@ void Dbdih::selectMasterCandidateAndSend(Signal* signal)
if(count != 0 && count != cnoReplicas){
char buf[255];
BaseString::snprintf(buf, sizeof(buf),
"Illegal configuration change."
" Initial start needs to be performed "
" when changing no of replicas (%d != %d)",
node_groups[nodePtr.i], cnoReplicas);
"Illegal configuration change."
" Initial start needs to be performed "
" when changing no of replicas (%d != %d)",
node_groups[nodePtr.i], cnoReplicas);
progError(__LINE__, NDBD_EXIT_INVALID_CONFIG, buf);
}
}
@ -5396,6 +5435,7 @@ void Dbdih::removeNodeFromTable(Signal* signal,
//const Uint32 lcpId = SYSFILE->latestLCP_ID;
const bool lcpOngoingFlag = (tabPtr.p->tabLcpStatus== TabRecord::TLS_ACTIVE);
const bool temporary = !tabPtr.p->storedTable;
FragmentstorePtr fragPtr;
for(Uint32 fragNo = 0; fragNo < tabPtr.p->totalfragments; fragNo++){
@ -5416,7 +5456,7 @@ void Dbdih::removeNodeFromTable(Signal* signal,
jam();
found = true;
noOfRemovedReplicas++;
removeNodeFromStored(nodeId, fragPtr, replicaPtr);
removeNodeFromStored(nodeId, fragPtr, replicaPtr, temporary);
if(replicaPtr.p->lcpOngoingFlag){
jam();
/**
@ -6121,9 +6161,6 @@ void Dbdih::MASTER_LCPhandling(Signal* signal, Uint32 failedNodeId)
signal->theData[0] = 7012;
execDUMP_STATE_ORD(signal);
signal->theData[0] = 7015;
execDUMP_STATE_ORD(signal);
c_lcpMasterTakeOverState.set(LMTOS_IDLE, __LINE__);
checkLocalNodefailComplete(signal, failedNodePtr.i, NF_LCP_TAKE_OVER);
@ -12502,9 +12539,18 @@ void Dbdih::removeDeadNode(NodeRecordPtr removeNodePtr)
/*---------------------------------------------------------------*/
void Dbdih::removeNodeFromStored(Uint32 nodeId,
FragmentstorePtr fragPtr,
ReplicaRecordPtr replicatePtr)
ReplicaRecordPtr replicatePtr,
bool temporary)
{
newCrashedReplica(nodeId, replicatePtr);
if (!temporary)
{
jam();
newCrashedReplica(nodeId, replicatePtr);
}
else
{
jam();
}
removeStoredReplica(fragPtr, replicatePtr);
linkOldStoredReplica(fragPtr, replicatePtr);
ndbrequire(fragPtr.p->storedReplicas != RNIL);
@ -13439,7 +13485,8 @@ void
Dbdih::execDUMP_STATE_ORD(Signal* signal)
{
DumpStateOrd * const & dumpState = (DumpStateOrd *)&signal->theData[0];
if (dumpState->args[0] == DumpStateOrd::DihDumpNodeRestartInfo) {
Uint32 arg = dumpState->args[0];
if (arg == DumpStateOrd::DihDumpNodeRestartInfo) {
infoEvent("c_nodeStartMaster.blockLcp = %d, c_nodeStartMaster.blockGcp = %d, c_nodeStartMaster.wait = %d",
c_nodeStartMaster.blockLcp, c_nodeStartMaster.blockGcp, c_nodeStartMaster.wait);
infoEvent("cstartGcpNow = %d, cgcpStatus = %d",
@ -13449,7 +13496,7 @@ Dbdih::execDUMP_STATE_ORD(Signal* signal)
infoEvent("cgcpOrderBlocked = %d, cgcpStartCounter = %d",
cgcpOrderBlocked, cgcpStartCounter);
}//if
if (dumpState->args[0] == DumpStateOrd::DihDumpNodeStatusInfo) {
if (arg == DumpStateOrd::DihDumpNodeStatusInfo) {
NodeRecordPtr localNodePtr;
infoEvent("Printing nodeStatus of all nodes");
for (localNodePtr.i = 1; localNodePtr.i < MAX_NDB_NODES; localNodePtr.i++) {
@ -13461,7 +13508,7 @@ Dbdih::execDUMP_STATE_ORD(Signal* signal)
}//for
}//if
if (dumpState->args[0] == DumpStateOrd::DihPrintFragmentation){
if (arg == DumpStateOrd::DihPrintFragmentation){
infoEvent("Printing fragmentation of all tables --");
for(Uint32 i = 0; i<ctabFileSize; i++){
TabRecordPtr tabPtr;
@ -13636,7 +13683,7 @@ Dbdih::execDUMP_STATE_ORD(Signal* signal)
}
}
if(dumpState->args[0] == 7019 && signal->getLength() == 2)
if(arg == 7019 && signal->getLength() == 2)
{
char buf2[8+1];
NodeRecordPtr nodePtr;
@ -13654,7 +13701,7 @@ Dbdih::execDUMP_STATE_ORD(Signal* signal)
nodePtr.p->m_nodefailSteps.getText(buf2));
}
if(dumpState->args[0] == 7020 && signal->getLength() > 3)
if(arg == 7020 && signal->getLength() > 3)
{
Uint32 gsn= signal->theData[1];
Uint32 block= signal->theData[2];
@ -13678,7 +13725,7 @@ Dbdih::execDUMP_STATE_ORD(Signal* signal)
gsn, getBlockName(block, "UNKNOWN"), length, buf);
}
if(dumpState->args[0] == DumpStateOrd::DihDumpLCPState){
if(arg == DumpStateOrd::DihDumpLCPState){
infoEvent("-- Node %d LCP STATE --", getOwnNodeId());
infoEvent("lcpStatus = %d (update place = %d) ",
c_lcpState.lcpStatus, c_lcpState.lcpStatusUpdatedPlace);
@ -13694,7 +13741,7 @@ Dbdih::execDUMP_STATE_ORD(Signal* signal)
infoEvent("-- Node %d LCP STATE --", getOwnNodeId());
}
if(dumpState->args[0] == DumpStateOrd::DihDumpLCPMasterTakeOver){
if(arg == DumpStateOrd::DihDumpLCPMasterTakeOver){
infoEvent("-- Node %d LCP MASTER TAKE OVER STATE --", getOwnNodeId());
infoEvent
("c_lcpMasterTakeOverState.state = %d updatePlace = %d failedNodeId = %d",
@ -13709,52 +13756,25 @@ Dbdih::execDUMP_STATE_ORD(Signal* signal)
infoEvent("-- Node %d LCP MASTER TAKE OVER STATE --", getOwnNodeId());
}
if (signal->theData[0] == 7015){
for(Uint32 i = 0; i<ctabFileSize; i++){
TabRecordPtr tabPtr;
tabPtr.i = i;
ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
if(tabPtr.p->tabStatus != TabRecord::TS_ACTIVE)
continue;
infoEvent
("Table %d: TabCopyStatus: %d TabUpdateStatus: %d TabLcpStatus: %d",
tabPtr.i,
tabPtr.p->tabCopyStatus,
tabPtr.p->tabUpdateState,
tabPtr.p->tabLcpStatus);
if (signal->theData[0] == 7015)
{
if (signal->getLength() == 1)
{
signal->theData[1] = 0;
}
FragmentstorePtr fragPtr;
for (Uint32 fid = 0; fid < tabPtr.p->totalfragments; fid++) {
jam();
getFragstore(tabPtr.p, fid, fragPtr);
char buf[100], buf2[100];
BaseString::snprintf(buf, sizeof(buf), " Fragment %d: noLcpReplicas==%d ",
fid, fragPtr.p->noLcpReplicas);
Uint32 num=0;
ReplicaRecordPtr replicaPtr;
replicaPtr.i = fragPtr.p->storedReplicas;
do {
ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
BaseString::snprintf(buf2, sizeof(buf2), "%s %d(on %d)=%d(%s)",
buf, num,
replicaPtr.p->procNode,
replicaPtr.p->lcpIdStarted,
replicaPtr.p->lcpOngoingFlag ? "Ongoing" : "Idle");
BaseString::snprintf(buf, sizeof(buf), "%s", buf2);
num++;
replicaPtr.i = replicaPtr.p->nextReplica;
} while (replicaPtr.i != RNIL);
infoEvent(buf);
}
Uint32 tableId = signal->theData[1];
if (tableId < ctabFileSize)
{
signal->theData[0] = 7021;
execDUMP_STATE_ORD(signal);
signal->theData[0] = 7015;
signal->theData[1] = tableId + 1;
sendSignal(reference(), GSN_DUMP_STATE_ORD, signal, 2, JBB);
}
}
if(dumpState->args[0] == DumpStateOrd::EnableUndoDelayDataWrite){
if(arg == DumpStateOrd::EnableUndoDelayDataWrite){
ndbout << "Dbdih:: delay write of datapages for table = "
<< dumpState->args[1]<< endl;
// Send this dump to ACC and TUP
@ -13784,7 +13804,7 @@ Dbdih::execDUMP_STATE_ORD(Signal* signal)
return;
}
if(dumpState->args[0] == 7098){
if(arg == 7098){
if(signal->length() == 3){
jam();
infoEvent("startLcpRoundLoopLab(tabel=%d, fragment=%d)",
@ -13797,10 +13817,73 @@ Dbdih::execDUMP_STATE_ORD(Signal* signal)
}
}
if(dumpState->args[0] == DumpStateOrd::DihStartLcpImmediately){
if(arg == DumpStateOrd::DihStartLcpImmediately){
c_lcpState.ctimer += (1 << c_lcpState.clcpDelay);
return;
}
if (arg == DumpStateOrd::DihSetTimeBetweenGcp)
{
if (signal->getLength() == 1)
{
const ndb_mgm_configuration_iterator * p =
m_ctx.m_config.getOwnConfigIterator();
ndbrequire(p != 0);
ndb_mgm_get_int_parameter(p, CFG_DB_GCP_INTERVAL, &cgcpDelay);
}
else
{
cgcpDelay = signal->theData[1];
}
ndbout_c("Setting time between gcp : %d", cgcpDelay);
}
if (arg == 7021 && signal->getLength() == 2)
{
TabRecordPtr tabPtr;
tabPtr.i = signal->theData[1];
if (tabPtr.i >= ctabFileSize)
return;
ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
if(tabPtr.p->tabStatus != TabRecord::TS_ACTIVE)
return;
infoEvent
("Table %d: TabCopyStatus: %d TabUpdateStatus: %d TabLcpStatus: %d",
tabPtr.i,
tabPtr.p->tabCopyStatus,
tabPtr.p->tabUpdateState,
tabPtr.p->tabLcpStatus);
FragmentstorePtr fragPtr;
for (Uint32 fid = 0; fid < tabPtr.p->totalfragments; fid++) {
jam();
getFragstore(tabPtr.p, fid, fragPtr);
char buf[100], buf2[100];
BaseString::snprintf(buf, sizeof(buf), " Fragment %d: noLcpReplicas==%d ",
fid, fragPtr.p->noLcpReplicas);
Uint32 num=0;
ReplicaRecordPtr replicaPtr;
replicaPtr.i = fragPtr.p->storedReplicas;
do {
ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
BaseString::snprintf(buf2, sizeof(buf2), "%s %d(on %d)=%d(%s)",
buf, num,
replicaPtr.p->procNode,
replicaPtr.p->lcpIdStarted,
replicaPtr.p->lcpOngoingFlag ? "Ongoing" : "Idle");
BaseString::snprintf(buf, sizeof(buf), "%s", buf2);
num++;
replicaPtr.i = replicaPtr.p->nextReplica;
} while (replicaPtr.i != RNIL);
infoEvent(buf);
}
}
}//Dbdih::execDUMP_STATE_ORD()
void

View file

@ -9960,7 +9960,7 @@ void Dblqh::nextScanConfCopyLab(Signal* signal)
/*---------------------------------------------------------------------------*/
scanptr.p->scanCompletedStatus = ZTRUE;
scanptr.p->scanState = ScanRecord::WAIT_LQHKEY_COPY;
if (ERROR_INSERTED(5042))
if (ERROR_INSERTED(5043))
{
CLEAR_ERROR_INSERT_VALUE;
tcConnectptr.p->copyCountWords = ~0;
@ -18198,6 +18198,89 @@ Dblqh::execDUMP_STATE_ORD(Signal* signal)
return;
}
#ifdef ERROR_INSERT
#ifdef NDB_DEBUG_FULL
if(dumpState->args[0] == DumpStateOrd::LCPContinue){
switch(cerrorInsert){
case 5904:
CLEAR_ERROR_INSERT_VALUE;
g_trace_lcp.restore(*globalData.getBlock(BACKUP), signal);
return;
default:
return;
}
}
#endif
#endif
if(arg == 2304 || arg == 2305)
{
jam();
Uint32 i;
GcpRecordPtr gcp; gcp.i = RNIL;
for(i = 0; i<4; i++)
{
logPartPtr.i = i;
ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
ndbout_c("LP %d state: %d WW_Gci: %d gcprec: %d flq: %d currfile: %d tailFileNo: %d logTailMbyte: %d",
i,
logPartPtr.p->logPartState,
logPartPtr.p->waitWriteGciLog,
logPartPtr.p->gcprec,
logPartPtr.p->firstLogQueue,
logPartPtr.p->currentLogfile,
logPartPtr.p->logTailFileNo,
logPartPtr.p->logTailMbyte);
if(gcp.i == RNIL && logPartPtr.p->gcprec != RNIL)
gcp.i = logPartPtr.p->gcprec;
LogFileRecordPtr logFilePtr;
Uint32 first= logFilePtr.i= logPartPtr.p->firstLogfile;
do
{
ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
ndbout_c(" file %d(%d) FileChangeState: %d logFileStatus: %d currentMbyte: %d currentFilepage",
logFilePtr.p->fileNo,
logFilePtr.i,
logFilePtr.p->fileChangeState,
logFilePtr.p->logFileStatus,
logFilePtr.p->currentMbyte,
logFilePtr.p->currentFilepage);
logFilePtr.i = logFilePtr.p->nextLogFile;
} while(logFilePtr.i != first);
}
if(gcp.i != RNIL)
{
ptrCheckGuard(gcp, cgcprecFileSize, gcpRecord);
for(i = 0; i<4; i++)
{
ndbout_c(" GCP %d file: %d state: %d sync: %d page: %d word: %d",
i, gcp.p->gcpFilePtr[i], gcp.p->gcpLogPartState[i],
gcp.p->gcpSyncReady[i],
gcp.p->gcpPageNo[i],
gcp.p->gcpWordNo[i]);
}
}
if(arg== 2305)
{
progError(__LINE__, NDBD_EXIT_SYSTEM_ERROR,
"Please report this as a bug. "
"Provide as much info as possible, expecially all the "
"ndb_*_out.log files, Thanks. "
"Shutting down node due to failed handling of GCP_SAVEREQ");
}
}
if (dumpState->args[0] == DumpStateOrd::LqhErrorInsert5042 && signal->getLength() == 2)
{
c_error_insert_table_id = dumpState->args[1];
SET_ERROR_INSERT_VALUE(5042);
}
TcConnectionrec *regTcConnectionrec = tcConnectionrec;
Uint32 ttcConnectrecFileSize = ctcConnectrecFileSize;
if(arg == 2306)
@ -18363,88 +18446,6 @@ Dblqh::execDUMP_STATE_ORD(Signal* signal)
ndbrequire(arg != 2308);
}
#ifdef ERROR_INSERT
#ifdef NDB_DEBUG_FULL
if(dumpState->args[0] == DumpStateOrd::LCPContinue){
switch(cerrorInsert){
case 5904:
CLEAR_ERROR_INSERT_VALUE;
g_trace_lcp.restore(*globalData.getBlock(BACKUP), signal);
return;
default:
return;
}
}
#endif
#endif
if(arg == 2304 || arg == 2305)
{
jam();
Uint32 i;
GcpRecordPtr gcp; gcp.i = RNIL;
for(i = 0; i<4; i++)
{
logPartPtr.i = i;
ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
ndbout_c("LP %d state: %d WW_Gci: %d gcprec: %d flq: %d currfile: %d tailFileNo: %d logTailMbyte: %d",
i,
logPartPtr.p->logPartState,
logPartPtr.p->waitWriteGciLog,
logPartPtr.p->gcprec,
logPartPtr.p->firstLogQueue,
logPartPtr.p->currentLogfile,
logPartPtr.p->logTailFileNo,
logPartPtr.p->logTailMbyte);
if(gcp.i == RNIL && logPartPtr.p->gcprec != RNIL)
gcp.i = logPartPtr.p->gcprec;
LogFileRecordPtr logFilePtr;
Uint32 first= logFilePtr.i= logPartPtr.p->firstLogfile;
do
{
ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
ndbout_c(" file %d(%d) FileChangeState: %d logFileStatus: %d currentMbyte: %d currentFilepage",
logFilePtr.p->fileNo,
logFilePtr.i,
logFilePtr.p->fileChangeState,
logFilePtr.p->logFileStatus,
logFilePtr.p->currentMbyte,
logFilePtr.p->currentFilepage);
logFilePtr.i = logFilePtr.p->nextLogFile;
} while(logFilePtr.i != first);
}
if(gcp.i != RNIL)
{
ptrCheckGuard(gcp, cgcprecFileSize, gcpRecord);
for(i = 0; i<4; i++)
{
ndbout_c(" GCP %d file: %d state: %d sync: %d page: %d word: %d",
i, gcp.p->gcpFilePtr[i], gcp.p->gcpLogPartState[i],
gcp.p->gcpSyncReady[i],
gcp.p->gcpPageNo[i],
gcp.p->gcpWordNo[i]);
}
}
if(arg== 2305)
{
progError(__LINE__, NDBD_EXIT_SYSTEM_ERROR,
"Please report this as a bug. "
"Provide as much info as possible, expecially all the "
"ndb_*_out.log files, Thanks. "
"Shutting down node due to failed handling of GCP_SAVEREQ");
}
}
if (dumpState->args[0] == DumpStateOrd::LqhErrorInsert5042 && signal->getLength() == 2)
{
c_error_insert_table_id = dumpState->args[1];
SET_ERROR_INSERT_VALUE(5042);
}
}//Dblqh::execDUMP_STATE_ORD()
void Dblqh::execSET_VAR_REQ(Signal* signal)

View file

@ -213,14 +213,6 @@ public:
LTS_ACTIVE = 1
};
enum TakeOverState {
TOS_NOT_DEFINED = 0,
TOS_IDLE = 1,
TOS_ACTIVE = 2,
TOS_COMPLETED = 3,
TOS_NODE_FAILED = 4
};
enum FailState {
FS_IDLE = 0,
FS_LISTENING = 1,
@ -645,6 +637,7 @@ public:
ConnectionState apiConnectstate;
UintR transid[2];
UintR firstTcConnect;
NdbNodeBitmask m_transaction_nodes;
//---------------------------------------------------
// Second 16 byte cache line. Hot variables.
@ -937,7 +930,6 @@ public:
struct HostRecord {
HostState hostStatus;
LqhTransState lqhTransStatus;
TakeOverState takeOverStatus;
bool inPackedList;
UintR noOfPackedWordsLqh;
UintR packedWordsLqh[26];
@ -946,6 +938,17 @@ public:
UintR noOfWordsTCINDXCONF;
UintR packedWordsTCINDXCONF[30];
BlockReference hostLqhBlockRef;
enum NodeFailBits
{
NF_TAKEOVER = 0x1,
NF_CHECK_SCAN = 0x2,
NF_CHECK_TRANSACTION = 0x4,
NF_CHECK_DROP_TAB = 0x8,
NF_NODE_FAIL_BITS = 0xF // All bits...
};
Uint32 m_nf_bits;
NdbNodeBitmask m_lqh_trans_conf;
}; /* p2c: size = 128 bytes */
typedef Ptr<HostRecord> HostRecordPtr;
@ -1593,7 +1596,7 @@ private:
void wrongSchemaVersionErrorLab(Signal* signal);
void noFreeConnectionErrorLab(Signal* signal);
void tckeyreq050Lab(Signal* signal);
void timeOutFoundLab(Signal* signal, UintR anAdd);
void timeOutFoundLab(Signal* signal, UintR anAdd, Uint32 errCode);
void completeTransAtTakeOverLab(Signal* signal, UintR TtakeOverInd);
void completeTransAtTakeOverDoLast(Signal* signal, UintR TtakeOverInd);
void completeTransAtTakeOverDoOne(Signal* signal, UintR TtakeOverInd);
@ -1615,6 +1618,9 @@ private:
void checkScanFragList(Signal*, Uint32 failedNodeId, ScanRecord * scanP,
LocalDLList<ScanFragRec>::Head&);
void nodeFailCheckTransactions(Signal*,Uint32 transPtrI,Uint32 failedNodeId);
void checkNodeFailComplete(Signal* signal, Uint32 failedNodeId, Uint32 bit);
// Initialisation
void initData();
void initRecords();
@ -1641,6 +1647,7 @@ private:
HostRecord *hostRecord;
HostRecordPtr hostptr;
UintR chostFilesize;
NdbNodeBitmask c_alive_nodes;
GcpRecord *gcpRecord;
GcpRecordPtr gcpPtr;

View file

@ -267,6 +267,10 @@ void Dbtc::execCONTINUEB(Signal* signal)
jam();
checkScanActiveInFailedLqh(signal, Tdata0, Tdata1);
return;
case TcContinueB::ZNF_CHECK_TRANSACTIONS:
jam();
nodeFailCheckTransactions(signal, Tdata0, Tdata1);
return;
case TcContinueB::CHECK_WAIT_DROP_TAB_FAILED_LQH:
jam();
checkWaitDropTabFailedLqh(signal, Tdata0, Tdata1);
@ -304,8 +308,8 @@ void Dbtc::execINCL_NODEREQ(Signal* signal)
hostptr.i = signal->theData[1];
ptrCheckGuard(hostptr, chostFilesize, hostRecord);
hostptr.p->hostStatus = HS_ALIVE;
hostptr.p->takeOverStatus = TOS_IDLE;
signal->theData[0] = cownref;
c_alive_nodes.set(hostptr.i);
sendSignal(tblockref, GSN_INCL_NODECONF, signal, 1, JBB);
}
@ -504,6 +508,7 @@ Dbtc::checkWaitDropTabFailedLqh(Signal* signal, Uint32 nodeId, Uint32 tableId)
* Finished
*/
jam();
checkNodeFailComplete(signal, nodeId, HostRecord::NF_CHECK_DROP_TAB);
return;
}
@ -869,8 +874,6 @@ void Dbtc::execREAD_NODESCONF(Signal* signal)
hostptr.i = i;
ptrCheckGuard(hostptr, chostFilesize, hostRecord);
hostptr.p->takeOverStatus = TOS_IDLE;
if (NodeBitmask::get(readNodes->inactiveNodes, i)) {
jam();
hostptr.p->hostStatus = HS_DEAD;
@ -878,6 +881,7 @@ void Dbtc::execREAD_NODESCONF(Signal* signal)
jam();
con_lineNodes++;
hostptr.p->hostStatus = HS_ALIVE;
c_alive_nodes.set(i);
}//if
}//if
}//for
@ -2380,6 +2384,7 @@ void Dbtc::initApiConnectRec(Signal* signal,
regApiPtr->commitAckMarker = RNIL;
regApiPtr->buddyPtr = RNIL;
regApiPtr->currSavePointId = 0;
regApiPtr->m_transaction_nodes.clear();
// Trigger data
releaseFiredTriggerData(&regApiPtr->theFiredTriggers),
// Index data
@ -2968,6 +2973,10 @@ void Dbtc::tckeyreq050Lab(Signal* signal)
signal->theData[0] = TdihConnectptr;
signal->theData[1] = Ttableref;
signal->theData[2] = TdistrHashValue;
signal->theData[3] = 0;
signal->theData[4] = 0;
signal->theData[5] = 0;
signal->theData[6] = 0;
/*-------------------------------------------------------------*/
/* FOR EFFICIENCY REASONS WE AVOID THE SIGNAL SENDING HERE AND */
@ -3148,6 +3157,7 @@ void Dbtc::sendlqhkeyreq(Signal* signal,
ApiConnectRecord * const regApiPtr = apiConnectptr.p;
CacheRecord * const regCachePtr = cachePtr.p;
Uint32 version = getNodeInfo(refToNode(TBRef)).m_version;
UintR sig0, sig1, sig2, sig3, sig4, sig5, sig6;
#ifdef ERROR_INSERT
if (ERROR_INSERTED(8002)) {
systemErrorLab(signal, __LINE__);
@ -3185,6 +3195,9 @@ void Dbtc::sendlqhkeyreq(Signal* signal,
LqhKeyReq::setScanTakeOverFlag(tslrAttrLen, regCachePtr->scanTakeOverInd);
Tdata10 = 0;
sig0 = regCachePtr->opSimple;
sig1 = regTcPtr->operation;
bool simpleRead = (sig1 == ZREAD && sig0 == ZTRUE);
LqhKeyReq::setKeyLen(Tdata10, regCachePtr->keylen);
LqhKeyReq::setLastReplicaNo(Tdata10, regTcPtr->lastReplicaNo);
if (unlikely(version < NDBD_ROWID_VERSION))
@ -3199,8 +3212,8 @@ void Dbtc::sendlqhkeyreq(Signal* signal,
LqhKeyReq::setApplicationAddressFlag(Tdata10, 1);
LqhKeyReq::setDirtyFlag(Tdata10, regTcPtr->dirtyOp);
LqhKeyReq::setInterpretedFlag(Tdata10, regCachePtr->opExec);
LqhKeyReq::setSimpleFlag(Tdata10, regCachePtr->opSimple);
LqhKeyReq::setOperation(Tdata10, regTcPtr->operation);
LqhKeyReq::setSimpleFlag(Tdata10, sig0);
LqhKeyReq::setOperation(Tdata10, sig1);
LqhKeyReq::setNoDiskFlag(Tdata10, regCachePtr->m_no_disk_flag);
/* -----------------------------------------------------------------------
@ -3215,18 +3228,16 @@ void Dbtc::sendlqhkeyreq(Signal* signal,
* ----------------------------------------------------------------------- */
//LqhKeyReq::setAPIVersion(Tdata10, regCachePtr->apiVersionNo);
Uint32 commitAckMarker = regTcPtr->commitAckMarker;
const Uint32 noOfLqhs = regTcPtr->noOfNodes;
if(commitAckMarker != RNIL){
jam();
LqhKeyReq::setMarkerFlag(Tdata10, 1);
CommitAckMarker * tmp;
tmp = m_commitAckMarkerHash.getPtr(commitAckMarker);
CommitAckMarker * tmp = m_commitAckMarkerHash.getPtr(commitAckMarker);
/**
* Populate LQH array
*/
const Uint32 noOfLqhs = regTcPtr->noOfNodes;
tmp->noOfLqhs = noOfLqhs;
for(Uint32 i = 0; i<noOfLqhs; i++){
tmp->lqhNodeId[i] = regTcPtr->tcNodedata[i];
@ -3237,7 +3248,6 @@ void Dbtc::sendlqhkeyreq(Signal* signal,
/* NO READ LENGTH SENT FROM TC. SEQUENTIAL NUMBER IS 1 AND IT */
/* IS SENT TO A PRIMARY NODE. */
/* ************************************************************> */
UintR sig0, sig1, sig2, sig3, sig4, sig5, sig6;
LqhKeyReq * const lqhKeyReq = (LqhKeyReq *)signal->getDataPtrSend();
@ -3261,6 +3271,14 @@ void Dbtc::sendlqhkeyreq(Signal* signal,
sig5 = regTcPtr->clientData;
sig6 = regCachePtr->scanInfo;
if (! simpleRead)
{
regApiPtr->m_transaction_nodes.set(regTcPtr->tcNodedata[0]);
regApiPtr->m_transaction_nodes.set(regTcPtr->tcNodedata[1]);
regApiPtr->m_transaction_nodes.set(regTcPtr->tcNodedata[2]);
regApiPtr->m_transaction_nodes.set(regTcPtr->tcNodedata[3]);
}
lqhKeyReq->tableSchemaVersion = sig0;
lqhKeyReq->fragmentData = sig1;
lqhKeyReq->transId1 = sig2;
@ -4646,6 +4664,7 @@ void Dbtc::copyApi(Signal* signal)
UintR TgcpPointer = regTmpApiPtr->gcpPointer;
UintR TgcpFilesize = cgcpFilesize;
UintR TcommitAckMarker = regTmpApiPtr->commitAckMarker;
NdbNodeBitmask Tnodes = regTmpApiPtr->m_transaction_nodes;
GcpRecord *localGcpRecord = gcpRecord;
regApiPtr->ndbapiBlockref = regTmpApiPtr->ndbapiBlockref;
@ -4656,6 +4675,7 @@ void Dbtc::copyApi(Signal* signal)
regApiPtr->transid[1] = Ttransid2;
regApiPtr->lqhkeyconfrec = Tlqhkeyconfrec;
regApiPtr->commitAckMarker = TcommitAckMarker;
regApiPtr->m_transaction_nodes = Tnodes;
gcpPtr.i = TgcpPointer;
ptrCheckGuard(gcpPtr, TgcpFilesize, localGcpRecord);
@ -4666,6 +4686,7 @@ void Dbtc::copyApi(Signal* signal)
regTmpApiPtr->commitAckMarker = RNIL;
regTmpApiPtr->firstTcConnect = RNIL;
regTmpApiPtr->lastTcConnect = RNIL;
regTmpApiPtr->m_transaction_nodes.clear();
releaseAllSeizedIndexOperations(regTmpApiPtr);
}//Dbtc::copyApi()
@ -4925,7 +4946,7 @@ void Dbtc::releaseTransResources(Signal* signal)
TcConnectRecordPtr localTcConnectptr;
UintR TtcConnectFilesize = ctcConnectFilesize;
TcConnectRecord *localTcConnectRecord = tcConnectRecord;
apiConnectptr.p->m_transaction_nodes.clear();
localTcConnectptr.i = apiConnectptr.p->firstTcConnect;
do {
jam();
@ -5343,7 +5364,8 @@ void Dbtc::execTC_COMMITREQ(Signal* signal)
break;
case CS_ABORTING:
jam();
errorCode = ZABORTINPROGRESS;
errorCode = regApiPtr->returncode ?
regApiPtr->returncode : ZABORTINPROGRESS;
break;
case CS_START_SCAN:
jam();
@ -5882,9 +5904,9 @@ void Dbtc::abort010Lab(Signal* signal)
if (transP->firstTcConnect == RNIL) {
jam();
/*-----------------------------------------------------------------------*/
/* WE HAVE NO PARTICIPANTS IN THE TRANSACTION. */
/*-----------------------------------------------------------------------*/
/*--------------------------------------------------------------------*/
/* WE HAVE NO PARTICIPANTS IN THE TRANSACTION. */
/*--------------------------------------------------------------------*/
releaseAbortResources(signal);
return;
}//if
@ -6161,10 +6183,12 @@ void Dbtc::timeOutLoopStartLab(Signal* signal, Uint32 api_con_ptr)
if (api_timer != 0) {
time_out_value= time_out_param + (api_con_ptr & mask_value);
time_passed= tc_timer - api_timer;
if (time_passed > time_out_value) {
if (time_passed > time_out_value)
{
jam();
timeOutFoundLab(signal, api_con_ptr);
return;
timeOutFoundLab(signal, api_con_ptr, ZTIME_OUT_ERROR);
api_con_ptr++;
break;
}
}
}
@ -6184,10 +6208,8 @@ void Dbtc::timeOutLoopStartLab(Signal* signal, Uint32 api_con_ptr)
return;
}//Dbtc::timeOutLoopStartLab()
void Dbtc::timeOutFoundLab(Signal* signal, Uint32 TapiConPtr)
void Dbtc::timeOutFoundLab(Signal* signal, Uint32 TapiConPtr, Uint32 errCode)
{
sendContinueTimeOutControl(signal, TapiConPtr + 1);
apiConnectptr.i = TapiConPtr;
ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
/*------------------------------------------------------------------*/
@ -6200,7 +6222,8 @@ void Dbtc::timeOutFoundLab(Signal* signal, Uint32 TapiConPtr)
<< "Time-out in state = " << apiConnectptr.p->apiConnectstate
<< " apiConnectptr.i = " << apiConnectptr.i
<< " - exec: " << apiConnectptr.p->m_exec_flag
<< " - place: " << c_apiConTimer_line[apiConnectptr.i]);
<< " - place: " << c_apiConTimer_line[apiConnectptr.i]
<< " code: " << errCode);
switch (apiConnectptr.p->apiConnectstate) {
case CS_STARTED:
if(apiConnectptr.p->lqhkeyreqrec == apiConnectptr.p->lqhkeyconfrec){
@ -6217,7 +6240,7 @@ void Dbtc::timeOutFoundLab(Signal* signal, Uint32 TapiConPtr)
}//if
}
apiConnectptr.p->returnsignal = RS_TCROLLBACKREP;
apiConnectptr.p->returncode = ZTIME_OUT_ERROR;
apiConnectptr.p->returncode = errCode;
abort010Lab(signal);
return;
case CS_RECEIVING:
@ -6230,7 +6253,7 @@ void Dbtc::timeOutFoundLab(Signal* signal, Uint32 TapiConPtr)
/* START ABORTING THE TRANSACTION. ALSO START CHECKING THE */
/* REMAINING TRANSACTIONS. */
/*------------------------------------------------------------------*/
terrorCode = ZTIME_OUT_ERROR;
terrorCode = errCode;
abortErrorLab(signal);
return;
case CS_COMMITTING:
@ -6437,6 +6460,7 @@ void Dbtc::sendAbortedAfterTimeout(Signal* signal, int Tcheck)
return;
}
bool found = false;
OperationState tmp[16];
Uint32 TloopCount = 0;
@ -6444,7 +6468,31 @@ void Dbtc::sendAbortedAfterTimeout(Signal* signal, int Tcheck)
jam();
if (tcConnectptr.i == RNIL) {
jam();
if (Tcheck == 0) {
#ifdef VM_TRACE
ndbout_c("found: %d Tcheck: %d apiConnectptr.p->counter: %d",
found, Tcheck, apiConnectptr.p->counter);
#endif
if (found || apiConnectptr.p->counter)
{
jam();
/**
* We sent atleast one ABORT/ABORTED
* or ZABORT_TIMEOUT_BREAK is in job buffer
* wait for reception...
*/
return;
}
if (Tcheck == 1)
{
jam();
releaseAbortResources(signal);
return;
}
if (Tcheck == 0)
{
jam();
/*------------------------------------------------------------------
* All nodes had already reported ABORTED for all tcConnect records.
@ -6453,9 +6501,11 @@ void Dbtc::sendAbortedAfterTimeout(Signal* signal, int Tcheck)
*------------------------------------------------------------------*/
char buf[96]; buf[0] = 0;
char buf2[96];
BaseString::snprintf(buf, sizeof(buf), "TC %d: %d ops:",
__LINE__, apiConnectptr.i);
for(Uint32 i = 0; i<TloopCount; i++){
BaseString::snprintf(buf, sizeof(buf), "TC %d: %d counter: %d ops:",
__LINE__, apiConnectptr.i,
apiConnectptr.p->counter);
for(Uint32 i = 0; i<TloopCount; i++)
{
BaseString::snprintf(buf2, sizeof(buf2), "%s %d", buf, tmp[i]);
BaseString::snprintf(buf, sizeof(buf), buf2);
}
@ -6463,7 +6513,9 @@ void Dbtc::sendAbortedAfterTimeout(Signal* signal, int Tcheck)
ndbout_c(buf);
ndbrequire(false);
releaseAbortResources(signal);
return;
}
return;
}//if
TloopCount++;
@ -6478,7 +6530,16 @@ void Dbtc::sendAbortedAfterTimeout(Signal* signal, int Tcheck)
signal->theData[0] = TcContinueB::ZABORT_TIMEOUT_BREAK;
signal->theData[1] = tcConnectptr.i;
signal->theData[2] = apiConnectptr.i;
sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB);
if (ERROR_INSERTED(8050))
{
ndbout_c("sending ZABORT_TIMEOUT_BREAK delayed (%d %d)",
Tcheck, apiConnectptr.p->counter);
sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 2000, 3);
}
else
{
sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB);
}
return;
}//if
ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
@ -6501,7 +6562,7 @@ void Dbtc::sendAbortedAfterTimeout(Signal* signal, int Tcheck)
jam();
if (tcConnectptr.p->tcNodedata[Ti] != 0) {
TloopCount += 31;
Tcheck = 1;
found = true;
hostptr.i = tcConnectptr.p->tcNodedata[Ti];
ptrCheckGuard(hostptr, chostFilesize, hostRecord);
if (hostptr.p->hostStatus == HS_ALIVE) {
@ -6874,58 +6935,44 @@ void Dbtc::execNODE_FAILREP(Signal* signal)
const Uint32 tnewMasterId = nodeFail->masterNodeId;
arrGuard(tnoOfNodes, MAX_NDB_NODES);
Uint32 i;
int index = 0;
for (unsigned i = 1; i< MAX_NDB_NODES; i++) {
if(NodeBitmask::get(nodeFail->theNodes, i)){
for (i = 1; i< MAX_NDB_NODES; i++)
{
if(NodeBitmask::get(nodeFail->theNodes, i))
{
cdata[index] = i;
index++;
}//if
}//for
cmasterNodeId = tnewMasterId;
tcNodeFailptr.i = 0;
ptrAss(tcNodeFailptr, tcFailRecord);
Uint32 tindex;
for (tindex = 0; tindex < tnoOfNodes; tindex++) {
for (i = 0; i < tnoOfNodes; i++)
{
jam();
hostptr.i = cdata[tindex];
hostptr.i = cdata[i];
ptrCheckGuard(hostptr, chostFilesize, hostRecord);
/*------------------------------------------------------------*/
/* SET STATUS OF THE FAILED NODE TO DEAD SINCE IT HAS */
/* FAILED. */
/*------------------------------------------------------------*/
hostptr.p->hostStatus = HS_DEAD;
hostptr.p->m_nf_bits = HostRecord::NF_NODE_FAIL_BITS;
c_alive_nodes.clear(hostptr.i);
if (hostptr.p->takeOverStatus == TOS_COMPLETED) {
jam();
/*------------------------------------------------------------*/
/* A VERY UNUSUAL SITUATION. THE TAKE OVER WAS COMPLETED*/
/* EVEN BEFORE WE HEARD ABOUT THE NODE FAILURE REPORT. */
/* HOWEVER UNUSUAL THIS SITUATION IS POSSIBLE. */
/*------------------------------------------------------------*/
/* RELEASE THE CURRENTLY UNUSED LQH CONNECTIONS. THE */
/* REMAINING WILL BE RELEASED WHEN THE TRANSACTION THAT */
/* USED THEM IS COMPLETED. */
/*------------------------------------------------------------*/
{
NFCompleteRep * const nfRep = (NFCompleteRep *)&signal->theData[0];
nfRep->blockNo = DBTC;
nfRep->nodeId = cownNodeid;
nfRep->failedNodeId = hostptr.i;
}
sendSignal(cdihblockref, GSN_NF_COMPLETEREP, signal,
NFCompleteRep::SignalLength, JBB);
} else {
ndbrequire(hostptr.p->takeOverStatus == TOS_IDLE);
hostptr.p->takeOverStatus = TOS_NODE_FAILED;
}//if
if (tcNodeFailptr.p->failStatus == FS_LISTENING) {
if (tcNodeFailptr.p->failStatus == FS_LISTENING)
{
jam();
/*------------------------------------------------------------*/
/* THE CURRENT TAKE OVER CAN BE AFFECTED BY THIS NODE */
/* FAILURE. */
/*------------------------------------------------------------*/
if (hostptr.p->lqhTransStatus == LTS_ACTIVE) {
if (hostptr.p->lqhTransStatus == LTS_ACTIVE)
{
jam();
/*------------------------------------------------------------*/
/* WE WERE WAITING FOR THE FAILED NODE IN THE TAKE OVER */
@ -6937,86 +6984,46 @@ void Dbtc::execNODE_FAILREP(Signal* signal)
}//if
}//if
}//for
const bool masterFailed = (cmasterNodeId != tnewMasterId);
cmasterNodeId = tnewMasterId;
if(getOwnNodeId() == cmasterNodeId && masterFailed){
/**
* Master has failed and I'm the new master
*/
jam();
for (hostptr.i = 1; hostptr.i < MAX_NDB_NODES; hostptr.i++) {
if (getOwnNodeId() != tnewMasterId)
{
jam();
ptrAss(hostptr, hostRecord);
if (hostptr.p->hostStatus != HS_ALIVE) {
jam();
if (hostptr.p->takeOverStatus == TOS_COMPLETED) {
jam();
/*------------------------------------------------------------*/
/* SEND TAKE OVER CONFIRMATION TO ALL ALIVE NODES IF */
/* TAKE OVER IS COMPLETED. THIS IS PERFORMED TO ENSURE */
/* THAT ALL NODES AGREE ON THE IDLE STATE OF THE TAKE */
/* OVER. THIS MIGHT BE MISSED IN AN ERROR SITUATION IF */
/* MASTER FAILS AFTER SENDING CONFIRMATION TO NEW */
/* MASTER BUT FAILING BEFORE SENDING TO ANOTHER NODE */
/* WHICH WAS NOT MASTER. IF THIS NODE LATER BECOMES */
/* MASTER IT MIGHT START A NEW TAKE OVER EVEN AFTER THE */
/* CRASHED NODE HAVE ALREADY RECOVERED. */
/*------------------------------------------------------------*/
for(tmpHostptr.i = 1; tmpHostptr.i < MAX_NDB_NODES;tmpHostptr.i++) {
jam();
ptrAss(tmpHostptr, hostRecord);
if (tmpHostptr.p->hostStatus == HS_ALIVE) {
jam();
tblockref = calcTcBlockRef(tmpHostptr.i);
signal->theData[0] = hostptr.i;
sendSignal(tblockref, GSN_TAKE_OVERTCCONF, signal, 1, JBB);
}//if
}//for
}//if
}//if
}//for
}
if(getOwnNodeId() == cmasterNodeId){
jam();
for (hostptr.i = 1; hostptr.i < MAX_NDB_NODES; hostptr.i++) {
/**
* Only master does takeover currently
*/
hostptr.p->m_nf_bits &= ~HostRecord::NF_TAKEOVER;
}
else
{
jam();
ptrAss(hostptr, hostRecord);
if (hostptr.p->hostStatus != HS_ALIVE) {
jam();
if (hostptr.p->takeOverStatus == TOS_NODE_FAILED) {
jam();
/*------------------------------------------------------------*/
/* CONCLUDE ALL ACTIVITIES THE FAILED TC DID CONTROL */
/* SINCE WE ARE THE MASTER. THIS COULD HAVE BEEN STARTED*/
/* BY A PREVIOUS MASTER BUT HAVE NOT BEEN CONCLUDED YET.*/
/*------------------------------------------------------------*/
hostptr.p->takeOverStatus = TOS_ACTIVE;
signal->theData[0] = hostptr.i;
sendSignal(cownref, GSN_TAKE_OVERTCREQ, signal, 1, JBB);
}//if
}//if
}//for
}//if
for (tindex = 0; tindex < tnoOfNodes; tindex++) {
jam();
hostptr.i = cdata[tindex];
ptrCheckGuard(hostptr, chostFilesize, hostRecord);
/*------------------------------------------------------------*/
/* LOOP THROUGH AND ABORT ALL SCANS THAT WHERE */
/* CONTROLLED BY THIS TC AND ACTIVE IN THE FAILED */
/* NODE'S LQH */
/*------------------------------------------------------------*/
signal->theData[0] = hostptr.i;
sendSignal(cownref, GSN_TAKE_OVERTCREQ, signal, 1, JBB);
}
checkScanActiveInFailedLqh(signal, 0, hostptr.i);
checkWaitDropTabFailedLqh(signal, hostptr.i, 0); // nodeid, tableid
}//for
nodeFailCheckTransactions(signal, 0, hostptr.i);
}
}//Dbtc::execNODE_FAILREP()
void
Dbtc::checkNodeFailComplete(Signal* signal,
Uint32 failedNodeId,
Uint32 bit)
{
hostptr.i = failedNodeId;
ptrCheckGuard(hostptr, chostFilesize, hostRecord);
hostptr.p->m_nf_bits &= ~bit;
if (hostptr.p->m_nf_bits == 0)
{
NFCompleteRep * const nfRep = (NFCompleteRep *)&signal->theData[0];
nfRep->blockNo = DBTC;
nfRep->nodeId = cownNodeid;
nfRep->failedNodeId = hostptr.i;
sendSignal(cdihblockref, GSN_NF_COMPLETEREP, signal,
NFCompleteRep::SignalLength, JBB);
}
}
void Dbtc::checkScanActiveInFailedLqh(Signal* signal,
Uint32 scanPtrI,
Uint32 failedNodeId){
@ -7058,8 +7065,44 @@ void Dbtc::checkScanActiveInFailedLqh(Signal* signal,
sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB);
return;
}//for
checkNodeFailComplete(signal, failedNodeId, HostRecord::NF_CHECK_SCAN);
}
void
Dbtc::nodeFailCheckTransactions(Signal* signal,
Uint32 transPtrI,
Uint32 failedNodeId)
{
jam();
Ptr<ApiConnectRecord> transPtr;
for (transPtr.i = transPtrI; transPtr.i < capiConnectFilesize; transPtr.i++)
{
ptrCheckGuard(transPtr, capiConnectFilesize, apiConnectRecord);
if (transPtr.p->m_transaction_nodes.get(failedNodeId))
{
jam();
// Force timeout regardless of state
Uint32 save = c_appl_timeout_value;
c_appl_timeout_value = 1;
setApiConTimer(transPtr.i, 0, __LINE__);
timeOutFoundLab(signal, transPtr.i, ZNODEFAIL_BEFORE_COMMIT);
c_appl_timeout_value = save;
}
// Send CONTINUEB to continue later
signal->theData[0] = TcContinueB::ZNF_CHECK_TRANSACTIONS;
signal->theData[1] = transPtr.i + 1; // Check next
signal->theData[2] = failedNodeId;
sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB);
return;
}
checkNodeFailComplete(signal, failedNodeId,
HostRecord::NF_CHECK_TRANSACTION);
}
void
Dbtc::checkScanFragList(Signal* signal,
Uint32 failedNodeId,
@ -7075,54 +7118,14 @@ void Dbtc::execTAKE_OVERTCCONF(Signal* signal)
tfailedNodeId = signal->theData[0];
hostptr.i = tfailedNodeId;
ptrCheckGuard(hostptr, chostFilesize, hostRecord);
switch (hostptr.p->takeOverStatus) {
case TOS_IDLE:
if (signal->getSendersBlockRef() != reference())
{
jam();
/*------------------------------------------------------------*/
/* THIS MESSAGE ARRIVED EVEN BEFORE THE NODE_FAILREP */
/* MESSAGE. THIS IS POSSIBLE IN EXTREME SITUATIONS. */
/* WE SET THE STATE TO TAKE_OVER_COMPLETED AND WAIT */
/* FOR THE NODE_FAILREP MESSAGE. */
/*------------------------------------------------------------*/
hostptr.p->takeOverStatus = TOS_COMPLETED;
break;
case TOS_NODE_FAILED:
case TOS_ACTIVE:
jam();
/*------------------------------------------------------------*/
/* WE ARE NOT MASTER AND THE TAKE OVER IS ACTIVE OR WE */
/* ARE MASTER AND THE TAKE OVER IS ACTIVE. IN BOTH */
/* WE SET THE STATE TO TAKE_OVER_COMPLETED. */
/*------------------------------------------------------------*/
/* RELEASE THE CURRENTLY UNUSED LQH CONNECTIONS. THE */
/* REMAINING WILL BE RELEASED WHEN THE TRANSACTION THAT */
/* USED THEM IS COMPLETED. */
/*------------------------------------------------------------*/
hostptr.p->takeOverStatus = TOS_COMPLETED;
{
NFCompleteRep * const nfRep = (NFCompleteRep *)&signal->theData[0];
nfRep->blockNo = DBTC;
nfRep->nodeId = cownNodeid;
nfRep->failedNodeId = hostptr.i;
}
sendSignal(cdihblockref, GSN_NF_COMPLETEREP, signal,
NFCompleteRep::SignalLength, JBB);
break;
case TOS_COMPLETED:
jam();
/*------------------------------------------------------------*/
/* WE HAVE ALREADY RECEIVED THE CONF SIGNAL. IT IS MOST */
/* LIKELY SENT FROM A NEW MASTER WHICH WASN'T SURE IF */
/* THIS NODE HEARD THE CONF SIGNAL FROM THE OLD MASTER. */
/* WE SIMPLY IGNORE THE MESSAGE. */
/*------------------------------------------------------------*/
/*empty*/;
break;
default:
jam();
systemErrorLab(signal, __LINE__);
return;
}//switch
}
checkNodeFailComplete(signal, hostptr.i, HostRecord::NF_TAKEOVER);
}//Dbtc::execTAKE_OVERTCCONF()
void Dbtc::execTAKE_OVERTCREQ(Signal* signal)
@ -7362,16 +7365,10 @@ void Dbtc::completeTransAtTakeOverDoLast(Signal* signal, UintR TtakeOverInd)
/* TO REPORT THE COMPLETION OF THE TAKE OVER TO ALL */
/* NODES THAT ARE ALIVE. */
/*------------------------------------------------------------*/
for (hostptr.i = 1; hostptr.i < MAX_NDB_NODES; hostptr.i++) {
jam();
ptrAss(hostptr, hostRecord);
if (hostptr.p->hostStatus == HS_ALIVE) {
jam();
tblockref = calcTcBlockRef(hostptr.i);
signal->theData[0] = tcNodeFailptr.p->takeOverNode;
sendSignal(tblockref, GSN_TAKE_OVERTCCONF, signal, 1, JBB);
}//if
}//for
NodeReceiverGroup rg(DBTC, c_alive_nodes);
signal->theData[0] = tcNodeFailptr.p->takeOverNode;
sendSignal(rg, GSN_TAKE_OVERTCCONF, signal, 1, JBB);
if (tcNodeFailptr.p->queueIndex > 0) {
jam();
/*------------------------------------------------------------*/
@ -8053,6 +8050,7 @@ void Dbtc::initApiConnectFail(Signal* signal)
apiConnectptr.p->ndbapiBlockref = 0;
apiConnectptr.p->ndbapiConnect = 0;
apiConnectptr.p->buddyPtr = RNIL;
apiConnectptr.p->m_transaction_nodes.clear();
setApiConTimer(apiConnectptr.i, 0, __LINE__);
switch(ttransStatus){
case LqhTransConf::Committed:
@ -9886,6 +9884,7 @@ void Dbtc::initApiConnect(Signal* signal)
apiConnectptr.p->executingIndexOp = RNIL;
apiConnectptr.p->buddyPtr = RNIL;
apiConnectptr.p->currSavePointId = 0;
apiConnectptr.p->m_transaction_nodes.clear();
}//for
apiConnectptr.i = tiacTmp - 1;
ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
@ -9913,6 +9912,7 @@ void Dbtc::initApiConnect(Signal* signal)
apiConnectptr.p->executingIndexOp = RNIL;
apiConnectptr.p->buddyPtr = RNIL;
apiConnectptr.p->currSavePointId = 0;
apiConnectptr.p->m_transaction_nodes.clear();
}//for
apiConnectptr.i = (2 * tiacTmp) - 1;
ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
@ -9940,6 +9940,7 @@ void Dbtc::initApiConnect(Signal* signal)
apiConnectptr.p->executingIndexOp = RNIL;
apiConnectptr.p->buddyPtr = RNIL;
apiConnectptr.p->currSavePointId = 0;
apiConnectptr.p->m_transaction_nodes.clear();
}//for
apiConnectptr.i = (3 * tiacTmp) - 1;
ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
@ -10000,13 +10001,13 @@ void Dbtc::inithost(Signal* signal)
ptrAss(hostptr, hostRecord);
hostptr.p->hostStatus = HS_DEAD;
hostptr.p->inPackedList = false;
hostptr.p->takeOverStatus = TOS_NOT_DEFINED;
hostptr.p->lqhTransStatus = LTS_IDLE;
hostptr.p->noOfWordsTCKEYCONF = 0;
hostptr.p->noOfWordsTCINDXCONF = 0;
hostptr.p->noOfPackedWordsLqh = 0;
hostptr.p->hostLqhBlockRef = calcLqhBlockRef(hostptr.i);
}//for
c_alive_nodes.clear();
}//Dbtc::inithost()
void Dbtc::initialiseRecordsLab(Signal* signal, UintR Tdata0,
@ -10260,6 +10261,7 @@ void Dbtc::releaseAbortResources(Signal* signal)
}//while
apiConnectptr.p->firstTcConnect = RNIL;
apiConnectptr.p->lastTcConnect = RNIL;
apiConnectptr.p->m_transaction_nodes.clear();
// MASV let state be CS_ABORTING until all
// signals in the "air" have been received. Reset to CS_CONNECTED
@ -10333,6 +10335,7 @@ void Dbtc::releaseApiCon(Signal* signal, UintR TapiConnectPtr)
cfirstfreeApiConnect = TlocalApiConnectptr.i;
setApiConTimer(TlocalApiConnectptr.i, 0, __LINE__);
TlocalApiConnectptr.p->apiConnectstate = CS_DISCONNECTED;
ndbassert(TlocalApiConnectptr.p->m_transaction_nodes.isclear());
ndbassert(TlocalApiConnectptr.p->apiScanRec == RNIL);
TlocalApiConnectptr.p->ndbapiBlockref = 0;
}//Dbtc::releaseApiCon()
@ -10868,6 +10871,34 @@ Dbtc::execDUMP_STATE_ORD(Signal* signal)
c_theIndexOperationPool.getSize(),
c_theIndexOperationPool.getNoOfFree());
}
if (dumpState->args[0] == 2514)
{
if (signal->getLength() == 2)
{
dumpState->args[0] = DumpStateOrd::TcDumpOneApiConnectRec;
execDUMP_STATE_ORD(signal);
}
NodeReceiverGroup rg(CMVMI, c_alive_nodes);
dumpState->args[0] = 15;
sendSignal(rg, GSN_DUMP_STATE_ORD, signal, 1, JBB);
signal->theData[0] = 2515;
sendSignalWithDelay(cownref, GSN_DUMP_STATE_ORD, signal, 1000, 1);
return;
}
if (dumpState->args[0] == 2515)
{
NdbNodeBitmask mask = c_alive_nodes;
mask.clear(getOwnNodeId());
NodeReceiverGroup rg(NDBCNTR, mask);
sendSignal(rg, GSN_SYSTEM_ERROR, signal, 1, JBB);
sendSignalWithDelay(cownref, GSN_SYSTEM_ERROR, signal, 300, 1);
return;
}
}//Dbtc::execDUMP_STATE_ORD()
void Dbtc::execSET_VAR_REQ(Signal* signal)

View file

@ -278,6 +278,7 @@ void Qmgr::setArbitTimeout(UintR aArbitTimeout)
void Qmgr::execCONNECT_REP(Signal* signal)
{
jamEntry();
const Uint32 nodeId = signal->theData[0];
c_connectedNodes.set(nodeId);
NodeRecPtr nodePtr;
@ -285,9 +286,13 @@ void Qmgr::execCONNECT_REP(Signal* signal)
ptrCheckGuard(nodePtr, MAX_NODES, nodeRec);
switch(nodePtr.p->phase){
case ZSTARTING:
jam();
break;
case ZRUNNING:
jam();
if(!c_start.m_nodes.isWaitingFor(nodeId)){
jam();
return;
}
break;
case ZPREPARE_FAIL:
case ZFAIL_CLOSING:
jam();
@ -298,21 +303,28 @@ void Qmgr::execCONNECT_REP(Signal* signal)
case ZAPI_INACTIVE:
return;
}
if(!c_start.m_nodes.isWaitingFor(nodeId)){
jam();
return;
}
switch(c_start.m_gsn){
case GSN_CM_REGREQ:
jam();
sendCmRegReq(signal, nodeId);
return;
case GSN_CM_NODEINFOREQ:{
case GSN_CM_NODEINFOREQ:
jam();
sendCmNodeInfoReq(signal, nodeId, nodePtr.p);
return;
case GSN_CM_ADD:{
jam();
ndbrequire(getOwnNodeId() != cpresident);
c_start.m_nodes.clearWaitingFor(nodeId);
c_start.m_gsn = RNIL;
NodeRecPtr addNodePtr;
addNodePtr.i = nodeId;
ptrCheckGuard(addNodePtr, MAX_NDB_NODES, nodeRec);
cmAddPrepare(signal, addNodePtr, nodePtr.p);
return;
}
default:
return;
@ -945,15 +957,27 @@ Qmgr::cmAddPrepare(Signal* signal, NodeRecPtr nodePtr, const NodeRec * self){
return;
case ZFAIL_CLOSING:
jam();
#ifdef VM_TRACE
ndbout_c("Enabling communication to CM_ADD node state=%d",
nodePtr.p->phase);
#endif
#if 1
warningEvent("Recieved request to incorperate node %u, "
"while error handling has not yet completed",
nodePtr.i);
ndbrequire(getOwnNodeId() != cpresident);
ndbrequire(signal->header.theVerId_signalNumber == GSN_CM_ADD);
c_start.m_nodes.clearWaitingFor();
c_start.m_nodes.setWaitingFor(nodePtr.i);
c_start.m_gsn = GSN_CM_ADD;
#else
warningEvent("Enabling communication to CM_ADD node %u state=%d",
nodePtr.i,
nodePtr.p->phase);
nodePtr.p->phase = ZSTARTING;
nodePtr.p->failState = NORMAL;
signal->theData[0] = 0;
signal->theData[1] = nodePtr.i;
sendSignal(CMVMI_REF, GSN_OPEN_COMREQ, signal, 2, JBA);
#endif
return;
case ZSTARTING:
break;
@ -1788,11 +1812,27 @@ void Qmgr::execNDB_FAILCONF(Signal* signal)
jamEntry();
failedNodePtr.i = signal->theData[0];
if (ERROR_INSERTED(930))
{
CLEAR_ERROR_INSERT_VALUE;
infoEvent("Discarding NDB_FAILCONF for %u", failedNodePtr.i);
return;
}
ptrCheckGuard(failedNodePtr, MAX_NODES, nodeRec);
if (failedNodePtr.p->failState == WAITING_FOR_NDB_FAILCONF){
failedNodePtr.p->failState = NORMAL;
} else {
jam();
char buf[100];
BaseString::snprintf(buf, 100,
"Received NDB_FAILCONF for node %u with state: %d %d",
failedNodePtr.i,
failedNodePtr.p->phase,
failedNodePtr.p->failState);
progError(__LINE__, 0, buf);
systemErrorLab(signal, __LINE__);
}//if
if (cpresident == getOwnNodeId()) {
@ -2115,10 +2155,42 @@ void Qmgr::failReportLab(Signal* signal, Uint16 aFailedNode,
ptrCheckGuard(failedNodePtr, MAX_NODES, nodeRec);
if (failedNodePtr.i == getOwnNodeId()) {
jam();
systemErrorLab(signal, __LINE__);
const char * msg = 0;
switch(aFailCause){
case FailRep::ZOWN_FAILURE:
msg = "Own failure";
break;
case FailRep::ZOTHER_NODE_WHEN_WE_START:
case FailRep::ZOTHERNODE_FAILED_DURING_START:
msg = "Other node died during start";
break;
case FailRep::ZIN_PREP_FAIL_REQ:
msg = "Prep fail";
break;
case FailRep::ZSTART_IN_REGREQ:
msg = "Start timeout";
break;
case FailRep::ZHEARTBEAT_FAILURE:
msg = "Hearbeat failure";
break;
case FailRep::ZLINK_FAILURE:
msg = "Connection failure";
break;
}
char buf[100];
BaseString::snprintf(buf, 100,
"We(%u) have been declared dead by %u reason: %s(%u)",
getOwnNodeId(),
refToNode(signal->getSendersBlockRef()),
aFailCause,
msg ? msg : "<Unknown>");
progError(__LINE__, 0, buf);
return;
}//if
myNodePtr.i = getOwnNodeId();
ptrCheckGuard(myNodePtr, MAX_NDB_NODES, nodeRec);
if (myNodePtr.p->phase != ZRUNNING) {
@ -2829,6 +2901,7 @@ void Qmgr::failReport(Signal* signal,
cfailureNr = cprepareFailureNr;
ctoFailureNr = 0;
ctoStatus = Q_ACTIVE;
c_start.reset(); // Don't take over nodes being started
if (cnoCommitFailedNodes > 0) {
jam();
/**-----------------------------------------------------------------

View file

@ -85,6 +85,9 @@ static const ErrStruct errArray[] =
*/
{NDBD_EXIT_OS_SIGNAL_RECEIVED, XIE, "Error OS signal received"},
{NDBD_EXIT_SR_RESTARTCONFLICT, XRE,
"Partial system restart causing conflicting file systems"},
/* VM */
{NDBD_EXIT_OUT_OF_LONG_SIGNAL_MEMORY, XCR,
"Signal lost, out of long signal memory, please increase LongMessageBuffer"},

View file

@ -312,15 +312,16 @@ inline
void
DLFifoListImpl<P,T,U>::release()
{
Ptr<T> p;
while(head.firstItem != RNIL)
Ptr<T> ptr;
Uint32 curr = head.firstItem;
while(curr != RNIL)
{
p.i = head.firstItem;
p.p = thePool.getPtr(head.firstItem);
T * t = p.p;
head.firstItem = t->U::nextList;
release(p);
thePool.getPtr(ptr, curr);
curr = ptr.p->U::nextList;
thePool.release(ptr);
}
head.firstItem = RNIL;
head.lastItem = RNIL;
}
template <typename P, typename T, typename U>

View file

@ -332,13 +332,15 @@ void
DLListImpl<P,T,U>::release()
{
Ptr<T> ptr;
while((ptr.i = head.firstItem) != RNIL)
Uint32 curr = head.firstItem;
while(curr != RNIL)
{
thePool.getPtr(ptr);
head.firstItem = ptr.p->U::nextList;
thePool.getPtr(ptr, curr);
curr = ptr.p->U::nextList;
thePool.release(ptr);
}
}
head.firstItem = RNIL;
}
template <typename P, typename T, typename U>
inline

View file

@ -197,7 +197,7 @@ public:
/**
* Return an object to pool
*/
void release(Ptr<T> &);
void release(Ptr<T>);
private:
P m_pool;
};
@ -213,10 +213,15 @@ inline
void
RecordPool<T, P>::init(Uint32 type_id, const Pool_context& pc)
{
T tmp;
const char * off_base = (char*)&tmp;
const char * off_next = (char*)&tmp.nextPool;
const char * off_magic = (char*)&tmp.m_magic;
Record_info ri;
ri.m_size = sizeof(T);
ri.m_offset_next_pool = offsetof(T, nextPool);
ri.m_offset_magic = offsetof(T, m_magic);
ri.m_offset_next_pool = Uint32(off_next - off_base);
ri.m_offset_magic = Uint32(off_magic - off_base);
ri.m_type_id = type_id;
m_pool.init(ri, pc);
}
@ -226,10 +231,14 @@ inline
void
RecordPool<T, P>::wo_pool_init(Uint32 type_id, const Pool_context& pc)
{
T tmp;
const char * off_base = (char*)&tmp;
const char * off_magic = (char*)&tmp.m_magic;
Record_info ri;
ri.m_size = sizeof(T);
ri.m_offset_next_pool = 0;
ri.m_offset_magic = offsetof(T, m_magic);
ri.m_offset_magic = Uint32(off_magic - off_base);
ri.m_type_id = type_id;
m_pool.init(ri, pc);
}
@ -313,7 +322,7 @@ RecordPool<T, P>::release(Uint32 i)
template <typename T, typename P>
inline
void
RecordPool<T, P>::release(Ptr<T> & ptr)
RecordPool<T, P>::release(Ptr<T> ptr)
{
m_pool.release(*(Ptr<void>*)&ptr);
}

View file

@ -302,13 +302,15 @@ void
SLListImpl<P, T, U>::release()
{
Ptr<T> ptr;
while((ptr.i = head.firstItem) != RNIL)
Uint32 curr = head.firstItem;
while(curr != RNIL)
{
thePool.getPtr(ptr);
head.firstItem = ptr.p->U::nextList;
thePool.getPtr(ptr, curr);
curr = ptr.p->U::nextList;
thePool.release(ptr);
}
}
head.firstItem = RNIL;
}
template <typename P, typename T, typename U>
inline

View file

@ -55,7 +55,9 @@ static const ConnectionError connectionError[] =
const char *lookupConnectionError(Uint32 err)
{
int i= 0;
while ((Uint32)connectionError[i].err != err && (Uint32)connectionError[i].err != -1);
while ((Uint32)connectionError[i].err != err &&
(Uint32)connectionError[i].err != -1)
i++;
return connectionError[i].text;
}

View file

@ -26,7 +26,7 @@
#include <signaldata/IndxAttrInfo.hpp>
NdbIndexOperation::NdbIndexOperation(Ndb* aNdb) :
NdbOperation(aNdb),
NdbOperation(aNdb, NdbOperation::UniqueIndexAccess),
m_theIndex(NULL)
{
m_tcReqGSN = GSN_TCINDXREQ;
@ -164,6 +164,12 @@ int NdbIndexOperation::interpretedDeleteTuple()
return NdbOperation::interpretedDeleteTuple();
}
const NdbDictionary::Index*
NdbIndexOperation::getIndex() const
{
return m_theIndex;
}
int
NdbIndexOperation::prepareSend(Uint32 aTC_ConnectPtr, Uint64 aTransactionId)
{

View file

@ -37,7 +37,8 @@
* aTable: Pointers to the Table object
* Remark: Creat an object of NdbOperation.
****************************************************************************/
NdbOperation::NdbOperation(Ndb* aNdb) :
NdbOperation::NdbOperation(Ndb* aNdb, NdbOperation::Type aType) :
m_type(aType),
theReceiver(aNdb),
theErrorLine(0),
theNdb(aNdb),

View file

@ -37,8 +37,8 @@
#define DEBUG_NEXT_RESULT 0
NdbScanOperation::NdbScanOperation(Ndb* aNdb) :
NdbOperation(aNdb),
NdbScanOperation::NdbScanOperation(Ndb* aNdb, NdbOperation::Type aType) :
NdbOperation(aNdb, aType),
m_transConnection(NULL)
{
theParallelism = 0;
@ -1032,7 +1032,7 @@ NdbScanOperation::getBlobHandle(Uint32 anAttrId)
}
NdbIndexScanOperation::NdbIndexScanOperation(Ndb* aNdb)
: NdbScanOperation(aNdb)
: NdbScanOperation(aNdb, NdbOperation::OrderedIndexScan)
{
}

View file

@ -434,12 +434,12 @@ NdbTransaction::executeNoBlobs(ExecType aTypeOfExec,
//------------------------------------------------------------------------
Ndb* tNdb = theNdb;
Uint32 timeout = theNdb->theImpl->m_transporter_facade->m_waitfor_timeout;
m_waitForReply = false;
executeAsynchPrepare(aTypeOfExec, NULL, NULL, abortOption);
if (m_waitForReply){
while (1) {
int noOfComp = tNdb->sendPollNdb((3 * WAITFOR_RESPONSE_TIMEOUT),
1, forceSend);
int noOfComp = tNdb->sendPollNdb(3 * timeout, 1, forceSend);
if (noOfComp == 0) {
/**
* This timeout situation can occur if NDB crashes.
@ -1193,6 +1193,8 @@ NdbTransaction::getNdbIndexScanOperation(const NdbIndexImpl* index,
{
tOp->m_currentTable = table;
}
// Mark that this really an NdbIndexScanOperation
tOp->m_type = NdbOperation::OrderedIndexScan;
return tOp;
} else {
setOperationErrorCodeAbort(4271);
@ -1254,6 +1256,8 @@ NdbTransaction::getNdbScanOperation(const NdbTableImpl * tab)
if (tOp->init(tab, this) != -1) {
define_scan_op(tOp);
// Mark that this NdbIndexScanOperation is used as NdbScanOperation
tOp->m_type = NdbOperation::TableScan;
return tOp;
} else {
theNdb->releaseScanOperation(tOp);

View file

@ -1055,6 +1055,7 @@ Ndb::pollCompleted(NdbTransaction** aCopyArray)
void
Ndb::check_send_timeout()
{
Uint32 timeout = theImpl->m_transporter_facade->m_waitfor_timeout;
NDB_TICKS current_time = NdbTick_CurrentMillisecond();
assert(current_time >= the_last_check_time);
if (current_time - the_last_check_time > 1000) {
@ -1062,17 +1063,18 @@ Ndb::check_send_timeout()
Uint32 no_of_sent = theNoOfSentTransactions;
for (Uint32 i = 0; i < no_of_sent; i++) {
NdbTransaction* a_con = theSentTransactionsArray[i];
if ((current_time - a_con->theStartTransTime) >
WAITFOR_RESPONSE_TIMEOUT) {
if ((current_time - a_con->theStartTransTime) > timeout)
{
#ifdef VM_TRACE
a_con->printState();
Uint32 t1 = a_con->theTransactionId;
Uint32 t2 = a_con->theTransactionId >> 32;
ndbout_c("[%.8x %.8x]", t1, t2);
abort();
ndbout_c("4012 [%.8x %.8x]", t1, t2);
//abort();
#endif
a_con->theReleaseOnClose = true;
a_con->setOperationErrorCodeAbort(4012);
a_con->theCommitStatus = NdbTransaction::Aborted;
a_con->theCommitStatus = NdbTransaction::NeedAbort;
a_con->theCompletionStatus = NdbTransaction::CompletedFailure;
a_con->handleExecuteCompletion();
remove_sent_list(i);

View file

@ -721,6 +721,19 @@ TransporterFacade::init(Uint32 nodeId, const ndb_mgm_configuration* props)
m_batch_size= batch_size;
}
Uint32 timeout = 120000;
iter.first();
for (iter.first(); iter.valid(); iter.next())
{
Uint32 tmp1 = 0, tmp2 = 0;
iter.get(CFG_DB_TRANSACTION_CHECK_INTERVAL, &tmp1);
iter.get(CFG_DB_TRANSACTION_DEADLOCK_TIMEOUT, &tmp2);
tmp1 += tmp2;
if (tmp1 > timeout)
timeout = tmp1;
}
m_waitfor_timeout = timeout;
if (!theTransporterRegistry->start_service(m_socket_server)){
ndbout_c("Unable to start theTransporterRegistry->start_service");
DBUG_RETURN(false);

View file

@ -121,6 +121,7 @@ public:
Uint32 get_scan_batch_size();
Uint32 get_batch_byte_size();
Uint32 get_batch_size();
Uint32 m_waitfor_timeout; // in milli seconds...
TransporterRegistry* get_registry() { return theTransporterRegistry;};

View file

@ -44,6 +44,8 @@ public:
static const NdbDictionary::Table* getTable(int _num);
static int getNumTables();
static int create_default_tablespace(Ndb* pNdb);
private:
static const NdbDictionary::Table* tableWithPkSize(const char* _nam, Uint32 pkSize);
};

View file

@ -62,6 +62,7 @@ public:
int dumpStateAllNodes(int * _args, int _num_args);
int getMasterNodeId();
int getRandomNodeSameNodeGroup(int nodeId, int randomNumber);
int getRandomNodeOtherNodeGroup(int nodeId, int randomNumber);
int getRandomNotMasterNodeId(int randomNumber);

View file

@ -19,7 +19,7 @@
#include <NdbSleep.h>
#include <UtilTransactions.hpp>
Bank::Bank(Ndb_cluster_connection& con, bool _init, char * dbase):
Bank::Bank(Ndb_cluster_connection& con, bool _init, const char * dbase):
m_ndb(&con, dbase),
m_maxAccount(-1),
m_initialized(false)

View file

@ -27,9 +27,9 @@
class Bank {
public:
Bank(Ndb_cluster_connection&, bool init = true, char *dbase="BANK");
Bank(Ndb_cluster_connection&, bool init = true, const char *dbase="BANK");
int createAndLoadBank(bool overWrite, int num_accounts=10);
int createAndLoadBank(bool overWrite, bool disk= false, int num_accounts=10);
int dropBank();
int performTransactions(int maxSleepBetweenTrans = 20, int yield=0);
@ -121,8 +121,8 @@ private:
int prepareReadSystemValueOp(NdbConnection*, SystemValueId sysValId, Uint64 &time);
int prepareGetCurrTimeOp(NdbConnection*, Uint64 &time);
int createTables();
int createTable(const char* tabName);
int createTables(bool disk);
int createTable(const char* tabName, bool disk);
int dropTables();
int dropTable(const char* tabName);

View file

@ -53,7 +53,7 @@ int Bank::getNumAccountTypes(){
return accountTypesSize;
}
int Bank::createAndLoadBank(bool ovrWrt, int num_accounts){
int Bank::createAndLoadBank(bool ovrWrt, bool disk, int num_accounts){
m_ndb.init();
if (m_ndb.waitUntilReady() != 0)
@ -69,7 +69,7 @@ int Bank::createAndLoadBank(bool ovrWrt, int num_accounts){
}
}
if (createTables() != NDBT_OK)
if (createTables(disk) != NDBT_OK)
return NDBT_FAILED;
if (clearTables() != NDBT_OK)
@ -104,9 +104,9 @@ int Bank::dropBank(){
}
int Bank::createTables(){
int Bank::createTables(bool disk){
for (int i = 0; i < tableNamesSize; i++){
if (createTable(tableNames[i]) != NDBT_OK)
if (createTable(tableNames[i], disk) != NDBT_OK)
return NDBT_FAILED;
}
return NDBT_OK;
@ -136,7 +136,7 @@ int Bank::clearTable(const char* tabName){
return NDBT_OK;
}
int Bank::createTable(const char* tabName){
int Bank::createTable(const char* tabName, bool disk){
ndbout << "createTable " << tabName << endl;
const NdbDictionary::Table* pTab = NDBT_Tables::getTable(tabName);
@ -146,7 +146,8 @@ int Bank::createTable(const char* tabName){
const NdbDictionary::Table* org =
m_ndb.getDictionary()->getTable(tabName);
if (org != 0 && pTab->equal(* org)){
if (org != 0 && (disk || pTab->equal(* org)))
{
return NDBT_OK;
}
@ -154,11 +155,31 @@ int Bank::createTable(const char* tabName){
ndbout << "Different table with same name exists" << endl;
return NDBT_FAILED;
}
if(m_ndb.getDictionary()->createTable(* pTab) == -1){
ndbout << "Failed to create table: " <<
m_ndb.getNdbError() << endl;
return NDBT_FAILED;
if (disk)
{
if (NDBT_Tables::create_default_tablespace(&m_ndb))
{
ndbout << "Failed to create tablespaces" << endl;
return NDBT_FAILED;
}
NdbDictionary::Table copy(* pTab);
copy.setTablespace("DEFAULT-TS");
for (Uint32 i = 0; i<copy.getNoOfColumns(); i++)
copy.getColumn(i)->setStorageType(NdbDictionary::Column::StorageTypeDisk);
if(m_ndb.getDictionary()->createTable(copy) == -1){
ndbout << "Failed to create table: " <<
m_ndb.getNdbError() << endl;
return NDBT_FAILED;
}
}
else
{
if(m_ndb.getDictionary()->createTable(* pTab) == -1){
ndbout << "Failed to create table: " <<
m_ndb.getNdbError() << endl;
return NDBT_FAILED;
}
}
return NDBT_OK;

View file

@ -30,9 +30,11 @@ int main(int argc, const char** argv){
ndb_init();
int _help = 0;
char * _database = "BANK";
int disk = 0;
struct getargs args[] = {
{ "database", 'd', arg_string, &_database, "Database name", ""},
{ "disk", 0, arg_flag, &disk, "Use disk tables", "" },
{ "usage", '?', arg_flag, &_help, "Print help", "" }
};
int num_args = sizeof(args) / sizeof(args[0]);
@ -53,7 +55,7 @@ int main(int argc, const char** argv){
Bank bank(con,_database);
int overWriteExisting = true;
if (bank.createAndLoadBank(overWriteExisting) != NDBT_OK)
if (bank.createAndLoadBank(overWriteExisting, disk) != NDBT_OK)
return NDBT_ProgramExit(NDBT_FAILED);
return NDBT_ProgramExit(NDBT_OK);

View file

@ -31,8 +31,9 @@
#include "Bank.hpp"
const char* _database = "BANK";
int runCreateBank(NDBT_Context* ctx, NDBT_Step* step){
char * _database = "BANK";
Bank bank(ctx->m_cluster_connection, _database);
int overWriteExisting = true;
if (bank.createAndLoadBank(overWriteExisting) != NDBT_OK)
@ -41,7 +42,6 @@ int runCreateBank(NDBT_Context* ctx, NDBT_Step* step){
}
int runBankTimer(NDBT_Context* ctx, NDBT_Step* step){
char * _database = "BANK";
Bank bank(ctx->m_cluster_connection, _database);
int wait = 30; // Max seconds between each "day"
int yield = 1; // Loops before bank returns
@ -53,7 +53,6 @@ int runBankTimer(NDBT_Context* ctx, NDBT_Step* step){
}
int runBankTransactions(NDBT_Context* ctx, NDBT_Step* step){
char * _database = "BANK";
Bank bank(ctx->m_cluster_connection, _database);
int wait = 10; // Max ms between each transaction
int yield = 100; // Loops before bank returns
@ -65,7 +64,6 @@ int runBankTransactions(NDBT_Context* ctx, NDBT_Step* step){
}
int runBankGL(NDBT_Context* ctx, NDBT_Step* step){
char * _database = "BANK";
Bank bank(ctx->m_cluster_connection, _database);
int yield = 20; // Loops before bank returns
int result = NDBT_OK;
@ -80,7 +78,6 @@ int runBankGL(NDBT_Context* ctx, NDBT_Step* step){
}
int runBankSum(NDBT_Context* ctx, NDBT_Step* step){
char * _database = "BANK";
Bank bank(ctx->m_cluster_connection, _database);
int wait = 2000; // Max ms between each sum of accounts
int yield = 1; // Loops before bank returns
@ -96,7 +93,6 @@ int runBankSum(NDBT_Context* ctx, NDBT_Step* step){
}
int runDropBank(NDBT_Context* ctx, NDBT_Step* step){
char * _database = "BANK";
Bank bank(ctx->m_cluster_connection, _database);
if (bank.dropBank() != NDBT_OK)
return NDBT_FAILED;

View file

@ -439,6 +439,14 @@ int runBug15587(NDBT_Context* ctx, NDBT_Step* step){
if (restarter.startNodes(&nodeId, 1))
return NDBT_FAILED;
restarter.waitNodesStartPhase(&nodeId, 1, 3);
if (restarter.waitNodesNoStart(&nodeId, 1))
return NDBT_FAILED;
if (restarter.startNodes(&nodeId, 1))
return NDBT_FAILED;
if (restarter.waitNodesStarted(&nodeId, 1))
return NDBT_FAILED;
@ -535,6 +543,119 @@ err:
return NDBT_FAILED;
}
int
runBug16772(NDBT_Context* ctx, NDBT_Step* step){
NdbRestarter restarter;
if (restarter.getNumDbNodes() < 2)
{
ctx->stopTest();
return NDBT_OK;
}
int aliveNodeId = restarter.getRandomNotMasterNodeId(rand());
int deadNodeId = aliveNodeId;
while (deadNodeId == aliveNodeId)
deadNodeId = restarter.getDbNodeId(rand() % restarter.getNumDbNodes());
if (restarter.insertErrorInNode(aliveNodeId, 930))
return NDBT_FAILED;
if (restarter.restartOneDbNode(deadNodeId,
/** initial */ false,
/** nostart */ true,
/** abort */ true))
return NDBT_FAILED;
if (restarter.waitNodesNoStart(&deadNodeId, 1))
return NDBT_FAILED;
if (restarter.startNodes(&deadNodeId, 1))
return NDBT_FAILED;
// It should now be hanging since we throw away NDB_FAILCONF
int ret = restarter.waitNodesStartPhase(&deadNodeId, 1, 3, 10);
// So this should fail...i.e it should not reach startphase 3
// Now send a NDB_FAILCONF for deadNo
int dump[] = { 7020, 323, 252, 0 };
dump[3] = deadNodeId;
if (restarter.dumpStateOneNode(aliveNodeId, dump, 4))
return NDBT_FAILED;
if (restarter.waitNodesStarted(&deadNodeId, 1))
return NDBT_FAILED;
return ret ? NDBT_OK : NDBT_FAILED;
}
int
runBug18414(NDBT_Context* ctx, NDBT_Step* step){
NdbRestarter restarter;
if (restarter.getNumDbNodes() < 2)
{
ctx->stopTest();
return NDBT_OK;
}
Ndb* pNdb = GETNDB(step);
HugoOperations hugoOps(*ctx->getTab());
HugoTransactions hugoTrans(*ctx->getTab());
int loop = 0;
do
{
if(hugoOps.startTransaction(pNdb) != 0)
goto err;
if(hugoOps.pkUpdateRecord(pNdb, 0, 128, rand()) != 0)
goto err;
if(hugoOps.execute_NoCommit(pNdb) != 0)
goto err;
int node1 = hugoOps.getTransaction()->getConnectedNodeId();
int node2 = restarter.getRandomNodeSameNodeGroup(node1, rand());
if (node1 == -1 || node2 == -1)
break;
if (loop & 1)
{
if (restarter.insertErrorInNode(node1, 8050))
goto err;
}
if (restarter.insertErrorInNode(node2, 5003))
goto err;
int res= hugoOps.execute_Rollback(pNdb);
if (restarter.waitNodesNoStart(&node2, 1) != 0)
goto err;
if (restarter.insertErrorInAllNodes(0))
goto err;
if (restarter.startNodes(&node2, 1) != 0)
goto err;
if (restarter.waitClusterStarted() != 0)
goto err;
if (hugoTrans.scanUpdateRecords(pNdb, 128) != 0)
goto err;
hugoOps.closeTransaction(pNdb);
} while(++loop < 5);
return NDBT_OK;
err:
hugoOps.closeTransaction(pNdb);
return NDBT_FAILED;
}
NDBT_TESTSUITE(testNodeRestart);
TESTCASE("NoLoad",
@ -820,6 +941,16 @@ TESTCASE("Bug15685",
STEP(runBug15685);
FINALIZER(runClearTable);
}
TESTCASE("Bug16772",
"Test bug with restarting before NF handling is complete"){
STEP(runBug16772);
}
TESTCASE("Bug18414",
"Test bug with NF during NR"){
INITIALIZER(runLoadTable);
STEP(runBug18414);
FINALIZER(runClearTable);
}
NDBT_TESTSUITE_END(testNodeRestart);
int main(int argc, const char** argv){

View file

@ -22,10 +22,12 @@
#include "bank/Bank.hpp"
bool disk = false;
int runCreateBank(NDBT_Context* ctx, NDBT_Step* step){
Bank bank(ctx->m_cluster_connection);
int overWriteExisting = true;
if (bank.createAndLoadBank(overWriteExisting, 10) != NDBT_OK)
if (bank.createAndLoadBank(overWriteExisting, disk, 10) != NDBT_OK)
return NDBT_FAILED;
return NDBT_OK;
}
@ -406,8 +408,20 @@ TESTCASE("Mix",
}
NDBT_TESTSUITE_END(testSRBank);
int main(int argc, const char** argv){
int
main(int argc, const char** argv){
ndb_init();
for (int i = 0; i<argc; i++)
{
if (strcmp(argv[i], "--disk") == 0)
{
argc--;
disk = true;
for (; i<argc; i++)
argv[i] = argv[i+1];
break;
}
}
return testSRBank.execute(argc, argv);
}

View file

@ -1051,6 +1051,52 @@ int runSystemRestart9(NDBT_Context* ctx, NDBT_Step* step){
return result;
}
int runBug18385(NDBT_Context* ctx, NDBT_Step* step){
NdbRestarter restarter;
const Uint32 nodeCount = restarter.getNumDbNodes();
if(nodeCount < 2){
g_info << "Bug18385 - Needs atleast 2 nodes to test" << endl;
return NDBT_OK;
}
int node1 = restarter.getDbNodeId(rand() % nodeCount);
int node2 = restarter.getRandomNodeSameNodeGroup(node1, rand());
if (node1 == -1 || node2 == -1)
return NDBT_OK;
int dump[] = { DumpStateOrd::DihSetTimeBetweenGcp, 300 };
int result = NDBT_OK;
do {
CHECK(restarter.dumpStateAllNodes(dump, 2) == 0);
CHECK(restarter.restartOneDbNode(node1, false, true, false) == 0);
NdbSleep_SecSleep(3);
CHECK(restarter.restartAll(false, true, false) == 0);
Uint32 cnt = 0;
int nodes[128];
for(Uint32 i = 0; i<nodeCount; i++)
if ((nodes[cnt] = restarter.getDbNodeId(i)) != node2)
cnt++;
assert(cnt == nodeCount - 1);
CHECK(restarter.startNodes(nodes, cnt) == 0);
CHECK(restarter.waitNodesStarted(nodes, cnt, 300) == 0);
CHECK(restarter.insertErrorInNode(node2, 7170) == 0);
CHECK(restarter.waitNodesNoStart(&node2, 1) == 0);
CHECK(restarter.restartOneDbNode(node2, true, false, true) == 0);
CHECK(restarter.waitNodesStarted(&node2, 1) == 0);
} while(0);
g_info << "Bug18385 finished" << endl;
return result;
}
int runWaitStarted(NDBT_Context* ctx, NDBT_Step* step){
NdbRestarter restarter;
@ -1234,6 +1280,13 @@ TESTCASE("SR9",
STEP(runSystemRestart9);
FINALIZER(runClearTable);
}
TESTCASE("Bug18385",
"Perform partition system restart with other nodes with higher GCI"){
INITIALIZER(runWaitStarted);
INITIALIZER(runClearTable);
STEP(runBug18385);
FINALIZER(runClearTable);
}
NDBT_TESTSUITE_END(testSystemRestart);
int main(int argc, const char** argv){

View file

@ -24,6 +24,7 @@
#define TIMEOUT (Uint32)3000
Uint32 g_org_timeout = 3000;
Uint32 g_org_deadlock = 3000;
int
setTransactionTimeout(NDBT_Context* ctx, NDBT_Step* step){
@ -59,6 +60,60 @@ resetTransactionTimeout(NDBT_Context* ctx, NDBT_Step* step){
return NDBT_OK;
}
int
setDeadlockTimeout(NDBT_Context* ctx, NDBT_Step* step){
NdbRestarter restarter;
int timeout = ctx->getProperty("TransactionDeadlockTimeout", TIMEOUT);
NdbConfig conf(GETNDB(step)->getNodeId()+1);
unsigned int nodeId = conf.getMasterNodeId();
if (!conf.getProperty(nodeId,
NODE_TYPE_DB,
CFG_DB_TRANSACTION_DEADLOCK_TIMEOUT,
&g_org_deadlock))
return NDBT_FAILED;
g_err << "Setting timeout: " << timeout << endl;
int val[] = { DumpStateOrd::TcSetTransactionTimeout, timeout };
if(restarter.dumpStateAllNodes(val, 2) != 0){
return NDBT_FAILED;
}
return NDBT_OK;
}
int
getDeadlockTimeout(NDBT_Context* ctx, NDBT_Step* step){
NdbRestarter restarter;
Uint32 val = 0;
NdbConfig conf(GETNDB(step)->getNodeId()+1);
unsigned int nodeId = conf.getMasterNodeId();
if (!conf.getProperty(nodeId,
NODE_TYPE_DB,
CFG_DB_TRANSACTION_DEADLOCK_TIMEOUT,
&val))
return NDBT_FAILED;
if (val < 120000)
val = 120000;
ctx->setProperty("TransactionDeadlockTimeout", 4*val);
return NDBT_OK;
}
int
resetDeadlockTimeout(NDBT_Context* ctx, NDBT_Step* step){
NdbRestarter restarter;
int val[] = { DumpStateOrd::TcSetTransactionTimeout, g_org_deadlock };
if(restarter.dumpStateAllNodes(val, 2) != 0){
return NDBT_FAILED;
}
return NDBT_OK;
}
int runLoadTable(NDBT_Context* ctx, NDBT_Step* step){
@ -333,6 +388,43 @@ int runBuddyTransNoTimeout(NDBT_Context* ctx, NDBT_Step* step){
return result;
}
int
runError4012(NDBT_Context* ctx, NDBT_Step* step){
int result = NDBT_OK;
int loops = ctx->getNumLoops();
int stepNo = step->getStepNo();
int timeout = ctx->getProperty("TransactionDeadlockTimeout", TIMEOUT);
HugoOperations hugoOps(*ctx->getTab());
Ndb* pNdb = GETNDB(step);
do{
// Commit transaction
CHECK(hugoOps.startTransaction(pNdb) == 0);
CHECK(hugoOps.pkUpdateRecord(pNdb, 0) == 0);
int ret = hugoOps.execute_NoCommit(pNdb);
if (ret == 0)
{
int sleep = timeout;
ndbout << "Sleeping for " << sleep << " milliseconds" << endl;
NdbSleep_MilliSleep(sleep);
// Expect that transaction has NOT timed-out
CHECK(hugoOps.execute_Commit(pNdb) == 0);
}
else
{
CHECK(ret == 4012);
}
} while(false);
hugoOps.closeTransaction(pNdb);
return result;
}
NDBT_TESTSUITE(testTimeout);
TESTCASE("DontTimeoutTransaction",
"Test that the transaction does not timeout "\
@ -403,6 +495,15 @@ TESTCASE("BuddyTransNoTimeout5",
FINALIZER(resetTransactionTimeout);
FINALIZER(runClearTable);
}
TESTCASE("Error4012", ""){
TC_PROPERTY("TransactionDeadlockTimeout", 120000);
INITIALIZER(runLoadTable);
INITIALIZER(getDeadlockTimeout);
INITIALIZER(setDeadlockTimeout);
STEPS(runError4012, 2);
FINALIZER(runClearTable);
}
NDBT_TESTSUITE_END(testTimeout);
int main(int argc, const char** argv){

View file

@ -7,14 +7,13 @@ include $(top_srcdir)/storage/ndb/config/type_mgmapiclient.mk.am
test_PROGRAMS = atrt
test_DATA=daily-basic-tests.txt daily-devel-tests.txt 16node-tests.txt \
conf-daily-basic-ndbmaster.txt \
conf-daily-basic-shark.txt \
conf-daily-devel-ndbmaster.txt \
conf-daily-sql-ndbmaster.txt \
conf-daily-basic-dl145a.txt \
conf-daily-basic-ndb08.txt \
conf-daily-devel-ndb08.txt \
conf-daily-sql-ndb08.txt
conf-daily-sql-ndb08.txt \
conf-ndbmaster.txt \
conf-shark.txt \
conf-dl145a.txt
test_SCRIPTS=atrt-analyze-result.sh atrt-gather-result.sh atrt-setup.sh \
atrt-clear-result.sh make-config.sh make-index.sh make-html-reports.sh

View file

@ -17,3 +17,6 @@ FileSystemPath: CHOOSE_dir/run
PortNumber: 16000
ArbitrationRank: 1
DataDir: .
[TCP DEFAULT]
SendBufferMemory: 2M

View file

@ -17,3 +17,6 @@ FileSystemPath: /home/ndbdev/autotest/run
PortNumber: 14000
ArbitrationRank: 1
DataDir: .
[TCP DEFAULT]
SendBufferMemory: 2M

View file

@ -17,3 +17,6 @@ FileSystemPath: CHOOSE_dir/run
PortNumber: 14000
ArbitrationRank: 1
DataDir: .
[TCP DEFAULT]
SendBufferMemory: 2M

View file

@ -17,3 +17,6 @@ FileSystemPath: /space/autotest/run
PortNumber: 14000
ArbitrationRank: 1
DataDir: .
[TCP DEFAULT]
SendBufferMemory: 2M

View file

@ -445,6 +445,18 @@ max-time: 500
cmd: testNodeRestart
args: -n Bug15685 T1
max-time: 500
cmd: testNodeRestart
args: -n Bug16772 T1
#max-time: 500
#cmd: testSystemRestart
#args: -n Bug18385 T1
#
max-time: 500
cmd: testNodeRestart
args: -n Bug18414 T1
#
# DICT TESTS
max-time: 1500

View file

@ -13,7 +13,7 @@ save_args=$*
VERSION="ndb-autotest.sh version 1.04"
DATE=`date '+%Y-%m-%d'`
HOST=`hostname`
HOST=`hostname -s`
export DATE HOST
set -e
@ -67,7 +67,7 @@ done
if [ -f $conf ]
then
. ./$conf
. $conf
else
echo "Can't find config file: $conf"
exit
@ -299,9 +299,12 @@ choose_conf(){
elif [ -f $test_dir/conf-$1.txt ]
then
echo "$test_dir/conf-$1.txt"
elif [ -f $test_dir/conf-$HOST.txt ]
echo "$test_dir/conf-$HOST.txt"
else
echo "Unable to find conf file looked for" 1>&2
echo "$test_dir/conf-$1-$HOST.txt and" 1>&2
echo "$test_dir/conf-$HOST.txt" 1>&2
echo "$test_dir/conf-$1.txt" 1>&2
exit
fi

View file

@ -848,9 +848,8 @@ NDBT_Tables::createAllTables(Ndb* pNdb){
return createAllTables(pNdb, false);
}
static
int
create_default_tablespace(Ndb* pNdb)
NDBT_Tables::create_default_tablespace(Ndb* pNdb)
{
NdbDictionary::Dictionary* pDict = pNdb->getDictionary();

View file

@ -174,6 +174,39 @@ NdbRestarter::getRandomNodeOtherNodeGroup(int nodeId, int rand){
return -1;
}
int
NdbRestarter::getRandomNodeSameNodeGroup(int nodeId, int rand){
if (!isConnected())
return -1;
if (getStatus() != 0)
return -1;
int node_group = -1;
for(size_t i = 0; i < ndbNodes.size(); i++){
if(ndbNodes[i].node_id == nodeId){
node_group = ndbNodes[i].node_group;
break;
}
}
if(node_group == -1){
return -1;
}
Uint32 counter = 0;
rand = rand % ndbNodes.size();
while(counter++ < ndbNodes.size() &&
(ndbNodes[rand].node_id == nodeId ||
ndbNodes[rand].node_group != node_group))
rand = (rand + 1) % ndbNodes.size();
if(ndbNodes[rand].node_group == node_group &&
ndbNodes[rand].node_id != nodeId)
return ndbNodes[rand].node_id;
return -1;
}
int
NdbRestarter::waitClusterStarted(unsigned int _timeout){
return waitClusterState(NDB_MGM_NODE_STATUS_STARTED, _timeout);

View file

@ -30,6 +30,7 @@ NDB_STD_OPTS_VARS;
static const char* _dbname = "TEST_DB";
static int _unqualified = 0;
static int _partinfo = 0;
static struct my_option my_long_options[] =
{
NDB_STD_OPTS("ndb_desc"),
@ -39,6 +40,9 @@ static struct my_option my_long_options[] =
{ "unqualified", 'u', "Use unqualified table names",
(gptr*) &_unqualified, (gptr*) &_unqualified, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "extra-partition-info", 'p', "Print more info per partition",
(gptr*) &_partinfo, (gptr*) &_partinfo, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
};
static void usage()
@ -52,6 +56,8 @@ static void usage()
my_print_variables(my_long_options);
}
static void print_part_info(Ndb* pNdb, NDBT_Table* pTab);
int main(int argc, char** argv){
NDB_INIT(argv[0]);
const char *load_default_groups[]= { "mysql_cluster",0 };
@ -260,5 +266,76 @@ int desc_table(Ndb *myndb, char* name)
}
ndbout << endl;
if (_partinfo)
print_part_info(myndb, pTab);
return 1;
}
struct InfoInfo
{
const char * m_title;
NdbRecAttr* m_rec_attr;
const NdbDictionary::Column* m_column;
};
static
void print_part_info(Ndb* pNdb, NDBT_Table* pTab)
{
InfoInfo g_part_info[] = {
{ "Partition", 0, NdbDictionary::Column::FRAGMENT },
{ "Row count", 0, NdbDictionary::Column::ROW_COUNT },
{ "Commit count", 0, NdbDictionary::Column::COMMIT_COUNT },
{ "Frag memory", 0, NdbDictionary::Column::FRAGMENT_MEMORY },
{ 0, 0, 0 }
};
ndbout << "-- Per partition info -- " << endl;
NdbConnection* pTrans = pNdb->startTransaction();
if (pTrans == 0)
return;
do
{
NdbScanOperation* pOp= pTrans->getNdbScanOperation(pTab->getName());
if (pOp == NULL)
break;
int rs = pOp->readTuples(NdbOperation::LM_CommittedRead);
if (rs != 0)
break;
if (pOp->interpret_exit_last_row() != 0)
break;
Uint32 i = 0;
for(i = 0; g_part_info[i].m_title != 0; i++)
{
if ((g_part_info[i].m_rec_attr = pOp->getValue(g_part_info[i].m_column)) == 0)
break;
}
if (g_part_info[i].m_title != 0)
break;
if (pTrans->execute(NoCommit) != 0)
break;
for (i = 0; g_part_info[i].m_title != 0; i++)
ndbout << g_part_info[i].m_title << "\t";
ndbout << endl;
while(pOp->nextResult() == 0)
{
for(i = 0; g_part_info[i].m_title != 0; i++)
{
ndbout << *g_part_info[i].m_rec_attr << "\t";
}
ndbout << endl;
}
} while(0);
pTrans->close();
}