Merge tulin@bk-internal.mysql.com:/home/bk/mysql-5.1-new-ndb

into  poseidon.mysql.com:/home/tomas/mysql-5.1-new-ndb


sql/ha_ndbcluster.cc:
  Auto merged
storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp:
  Auto merged
storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp:
  Auto merged
storage/ndb/tools/restore/Restore.cpp:
  Auto merged
storage/ndb/tools/restore/consumer_restore.cpp:
  Auto merged
This commit is contained in:
unknown 2007-03-21 16:34:23 +01:00
commit 0ca0887b24
21 changed files with 151 additions and 48 deletions

View file

@ -35,8 +35,8 @@ if [ -d ../sql ] ; then
exec_mgmtsrvr=$ndbtop/src/mgmsrv/ndb_mgmd
exec_waiter=$ndbtop/tools/ndb_waiter
exec_test=$ndbtop/tools/ndb_test_platform
exec_test_ndberror=
exec_test_ndberror=$ndbtop/src/ndbapi/ndberror_check
exec_mgmtclient=$ndbtop/src/mgmclient/ndb_mgm
else
BINARY_DIST=1
if test -x "$BASEDIR/libexec/ndbd"

View file

@ -60,11 +60,11 @@ hex(c2) hex(c3) c1
0 1 BCDEF
1 0 CD
0 0 DEFGHIJKL
SELECT @the_epoch:=MAX(epoch) FROM mysql.apply_status;
SELECT @the_epoch:=MAX(epoch) FROM mysql.ndb_apply_status;
@the_epoch:=MAX(epoch)
<the_epoch>
SELECT @the_pos:=Position,@the_file:=SUBSTRING_INDEX(FILE, '/', -1)
FROM mysql.binlog_index WHERE epoch > <the_epoch> ORDER BY epoch ASC LIMIT 1;
FROM mysql.ndb_binlog_index WHERE epoch > <the_epoch> ORDER BY epoch ASC LIMIT 1;
@the_pos:=Position @the_file:=SUBSTRING_INDEX(FILE, '/', -1)
<the_pos> master-bin.000001
CHANGE MASTER TO
@ -89,8 +89,8 @@ hex(c2) hex(c3) c1
DROP DATABASE ndbsynctest;
STOP SLAVE;
reset master;
select * from mysql.binlog_index;
select * from mysql.ndb_binlog_index;
Position File epoch inserts updates deletes schemaops
reset slave;
select * from mysql.apply_status;
select * from mysql.ndb_apply_status;
server_id epoch

View file

@ -1398,18 +1398,19 @@ id val
1 test1
2 test2
INSERT INTO t1 VALUES (2,'test2') ON DUPLICATE KEY UPDATE val=VALUES(val);
INSERT INTO t1 VALUES (3,'test3') ON DUPLICATE KEY UPDATE val=VALUES(val);
INSERT INTO t1 VALUES (2,'test3') ON DUPLICATE KEY UPDATE val=VALUES(val);
INSERT INTO t1 VALUES (3,'test4') ON DUPLICATE KEY UPDATE val=VALUES(val);
SELECT * FROM t1;
id val
1 test1
2 test2
3 test3
2 test3
3 test4
SELECT * FROM t2;
id val
1 test1
2 test2
3 test2
4 test3
3 test3
4 test4
DROP TRIGGER trg27006_a_insert;
DROP TRIGGER trg27006_a_update;
drop table t1,t2;

View file

@ -16,8 +16,6 @@ concurrent_innodb : BUG#21579 2006-08-11 mleich innodb_concurrent random
ndb_autodiscover : BUG#18952 2006-02-16 jmiller Needs to be fixed w.r.t binlog
ndb_autodiscover2 : BUG#18952 2006-02-16 jmiller Needs to be fixed w.r.t binlog
ndb_load : BUG#17233 2006-05-04 tomas failed load data from infile causes mysqld dbug_assert, binlog not flushed
ndb_restore_partition : Problem with cluster/def/schema table that is in std_data/ndb_backup51; Pekka will schdule this to someone
rpl_ndb_sync : Problem with cluster/def/schema table that is in std_data/ndb_backup51; Pekka will schdule this to someone
partition_03ndb : BUG#16385 2006-03-24 mikael Partitions: crash when updating a range partitioned NDB table
rpl_ndb_2innodb : BUG#19227 2006-04-20 pekka pk delete apparently not replicated
@ -37,5 +35,4 @@ synchronization : Bug#24529 Test 'synchronization' fails on Mac pushb
plugin : Bug#25659 memory leak via "plugins" test
rpl_ndb_dd_advance : Bug#25913 rpl_ndb_dd_advance fails randomly
ndb_alter_table : Bug##25774 ndb_alter_table.test fails in DBUG_ASSERT() on Linux x64
ndb_single_user : Bug#27021 Error codes in mysqld in single user mode varies

View file

@ -1700,8 +1700,7 @@ DROP PROCEDURE bug22580_proc_1;
DROP PROCEDURE bug22580_proc_2;
#
# Bug#27006: AFTER UPDATE triggers not fired with INSERT ... ON DUPLICATE KEY
# UPDATE if the row wasn't actually changed.
# Bug#27006: AFTER UPDATE triggers not fired with INSERT ... ON DUPLICATE
#
--disable_warnings
DROP TRIGGER IF EXISTS trg27006_a_update;
@ -1730,7 +1729,8 @@ INSERT INTO t1(val) VALUES ('test1'),('test2');
SELECT * FROM t1;
SELECT * FROM t2;
INSERT INTO t1 VALUES (2,'test2') ON DUPLICATE KEY UPDATE val=VALUES(val);
INSERT INTO t1 VALUES (3,'test3') ON DUPLICATE KEY UPDATE val=VALUES(val);
INSERT INTO t1 VALUES (2,'test3') ON DUPLICATE KEY UPDATE val=VALUES(val);
INSERT INTO t1 VALUES (3,'test4') ON DUPLICATE KEY UPDATE val=VALUES(val);
SELECT * FROM t1;
SELECT * FROM t2;
DROP TRIGGER trg27006_a_insert;

View file

@ -1008,7 +1008,7 @@ int ha_ndbcluster::get_metadata(const char *path)
DBUG_ASSERT(m_table == NULL);
DBUG_ASSERT(m_table_info == NULL);
const void *data, *pack_data;
const void *data= NULL, *pack_data= NULL;
uint length, pack_length;
/*
@ -4732,7 +4732,7 @@ int ha_ndbcluster::create(const char *name,
NDBTAB tab;
NDBCOL col;
uint pack_length, length, i, pk_length= 0;
const void *data, *pack_data;
const void *data= NULL, *pack_data= NULL;
bool create_from_engine= (create_info->table_options & HA_OPTION_CREATE_FROM_ENGINE);
bool is_truncate= (thd->lex->sql_command == SQLCOM_TRUNCATE);
char tablespace[FN_LEN];
@ -5065,7 +5065,7 @@ int ha_ndbcluster::create_handler_files(const char *file,
{
Ndb* ndb;
const NDBTAB *tab;
const void *data, *pack_data;
const void *data= NULL, *pack_data= NULL;
uint length, pack_length;
int error= 0;
@ -6108,7 +6108,7 @@ int ndbcluster_discover(handlerton *hton, THD* thd, const char *db,
int error= 0;
NdbError ndb_error;
uint len;
const void* data;
const void* data= NULL;
Ndb* ndb;
char key[FN_REFLEN];
DBUG_ENTER("ndbcluster_discover");
@ -6187,6 +6187,7 @@ int ndbcluster_discover(handlerton *hton, THD* thd, const char *db,
DBUG_RETURN(0);
err:
my_free((char*)data, MYF(MY_ALLOW_ZERO_PTR));
if (share)
{
/* ndb_share reference temporary free */
@ -7608,7 +7609,9 @@ int handle_trailing_share(NDB_SHARE *share)
/*
Ndb share has not been released as it should
*/
#ifdef NOT_YET
DBUG_ASSERT(FALSE);
#endif
/*
This is probably an error. We can however save the situation

View file

@ -15,7 +15,9 @@
*/
#define NDB_REP_DB "mysql"
#define OLD_NDB_REP_DB "cluster"
#define NDB_REP_TABLE "ndb_binlog_index"
#define NDB_APPLY_TABLE "ndb_apply_status"
#define OLD_NDB_APPLY_TABLE "apply_status"
#define NDB_SCHEMA_TABLE "ndb_schema"
#define OLD_NDB_SCHEMA_TABLE "schema"

View file

@ -1260,22 +1260,23 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
compare_record(table))
{
info->updated++;
/*
If ON DUP KEY UPDATE updates a row instead of inserting one, it's
like a regular UPDATE statement: it should not affect the value of a
next SELECT LAST_INSERT_ID() or mysql_insert_id().
Except if LAST_INSERT_ID(#) was in the INSERT query, which is
handled separately by THD::arg_of_last_insert_id_function.
*/
insert_id_for_cur_row= table->file->insert_id_for_cur_row= 0;
if (table->next_number_field)
table->file->adjust_next_insert_id_after_explicit_value(
table->next_number_field->val_int());
trg_error= (table->triggers &&
table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
TRG_ACTION_AFTER, TRUE));
info->copied++;
}
/*
If ON DUP KEY UPDATE updates a row instead of inserting one, it's
like a regular UPDATE statement: it should not affect the value of a
next SELECT LAST_INSERT_ID() or mysql_insert_id().
Except if LAST_INSERT_ID(#) was in the INSERT query, which is
handled separately by THD::arg_of_last_insert_id_function.
*/
insert_id_for_cur_row= table->file->insert_id_for_cur_row= 0;
if (table->next_number_field)
table->file->adjust_next_insert_id_after_explicit_value(
table->next_number_field->val_int());
trg_error= (table->triggers &&
table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
TRG_ACTION_AFTER, TRUE));
goto ok_or_after_trg_err;
}
else /* DUP_REPLACE */

View file

@ -744,9 +744,9 @@ void getTextEventBufferStatus(QQQQ) {
"Event buffer status: used=%d%s(%d%) alloc=%d%s(%d%) "
"max=%d%s apply_gci=%lld latest_gci=%lld",
used, used_unit,
theData[2] ? (theData[1]*100)/theData[2] : 0,
theData[2] ? (Uint32)((((Uint64)theData[1])*100)/theData[2]) : 0,
alloc, alloc_unit,
theData[3] ? (theData[2]*100)/theData[3] : 0,
theData[3] ? (Uint32)((((Uint64)theData[2])*100)/theData[3]) : 0,
max_, max_unit,
theData[4]+(((Uint64)theData[5])<<32),
theData[6]+(((Uint64)theData[7])<<32));

View file

@ -5,7 +5,7 @@ Next DBACC 3002
Next DBTUP 4029
Next DBLQH 5045
Next DBDICT 6007
Next DBDIH 7181
Next DBDIH 7183
Next DBTC 8039
Next CMVMI 9000
Next BACKUP 10038

View file

@ -1645,6 +1645,8 @@ private:
// NR
Uint32 c_dictLockSlavePtrI_nodeRestart; // userPtr for NR
void recvDictLockConf_nodeRestart(Signal* signal, Uint32 data, Uint32 ret);
Uint32 c_error_7181_ref;
};
#if (DIH_CDATA_SIZE < _SYSFILE_SIZE32)

View file

@ -5096,6 +5096,16 @@ void Dbdih::execMASTER_GCPREQ(Signal* signal)
} else {
ndbrequire(failedNodePtr.p->nodeStatus == NodeRecord::DYING);
}//if
if (ERROR_INSERTED(7181))
{
ndbout_c("execGCP_TCFINISHED in MASTER_GCPREQ");
CLEAR_ERROR_INSERT_VALUE;
signal->theData[0] = c_error_7181_ref;
signal->theData[1] = coldgcp;
execGCP_TCFINISHED(signal);
}
MasterGCPConf::State gcpState;
switch (cgcpParticipantState) {
case GCP_PARTICIPANT_READY:
@ -5162,6 +5172,15 @@ void Dbdih::execMASTER_GCPREQ(Signal* signal)
masterGCPConf->lcpActive[i] = SYSFILE->lcpActive[i];
sendSignal(newMasterBlockref, GSN_MASTER_GCPCONF, signal,
MasterGCPConf::SignalLength, JBB);
if (ERROR_INSERTED(7182))
{
ndbout_c("execGCP_TCFINISHED in MASTER_GCPREQ");
CLEAR_ERROR_INSERT_VALUE;
signal->theData[0] = c_error_7181_ref;
signal->theData[1] = coldgcp;
execGCP_TCFINISHED(signal);
}
}//Dbdih::execMASTER_GCPREQ()
void Dbdih::execMASTER_GCPCONF(Signal* signal)
@ -7923,10 +7942,10 @@ void Dbdih::execGCP_NODEFINISH(Signal* signal)
} else if (cmasterState == MASTER_TAKE_OVER_GCP) {
jam();
//-------------------------------------------------------------
// We are currently taking over as master. We will delay the
// signal until we have completed the take over gcp handling.
// We are currently taking over as master. Ignore
// signal in this case since we will discover it in reception of
// MASTER_GCPCONF.
//-------------------------------------------------------------
sendSignalWithDelay(reference(), GSN_GCP_NODEFINISH, signal, 20, 3);
return;
} else {
ndbrequire(cmasterState == MASTER_ACTIVE);
@ -8061,6 +8080,7 @@ void Dbdih::execGCP_COMMIT(Signal* signal)
cgckptflag = false;
emptyverificbuffer(signal, true);
cgcpParticipantState = GCP_PARTICIPANT_COMMIT_RECEIVED;
signal->theData[0] = calcDihBlockRef(masterNodeId);
signal->theData[1] = coldgcp;
sendSignal(clocaltcblockref, GSN_GCP_NOMORETRANS, signal, 2, JBB);
return;
@ -8070,14 +8090,25 @@ void Dbdih::execGCP_TCFINISHED(Signal* signal)
{
jamEntry();
CRASH_INSERTION(7007);
Uint32 retRef = signal->theData[0];
Uint32 gci = signal->theData[1];
ndbrequire(gci == coldgcp);
if (ERROR_INSERTED(7181) || ERROR_INSERTED(7182))
{
c_error_7181_ref = retRef; // Save ref
ndbout_c("killing %d", refToNode(cmasterdihref));
signal->theData[0] = 9999;
sendSignal(numberToRef(CMVMI, refToNode(cmasterdihref)),
GSN_NDB_TAMPER, signal, 1, JBB);
return;
}
cgcpParticipantState = GCP_PARTICIPANT_TC_FINISHED;
signal->theData[0] = cownNodeId;
signal->theData[1] = coldgcp;
signal->theData[2] = cfailurenr;
sendSignal(cmasterdihref, GSN_GCP_NODEFINISH, signal, 3, JBB);
sendSignal(retRef, GSN_GCP_NODEFINISH, signal, 3, JBB);
}//Dbdih::execGCP_TCFINISHED()
/*****************************************************************************/

View file

@ -1960,5 +1960,8 @@ private:
// those variables should be removed and exchanged for stack
// variable communication.
/**************************************************************************/
Uint32 c_gcp_ref;
};
#endif

View file

@ -6970,6 +6970,7 @@ next:
void Dbtc::execGCP_NOMORETRANS(Signal* signal)
{
jamEntry();
c_gcp_ref = signal->theData[0];
tcheckGcpId = signal->theData[1];
if (cfirstgcp != RNIL) {
jam();
@ -10026,6 +10027,7 @@ void Dbtc::sendScanTabConf(Signal* signal, ScanRecordPtr scanPtr) {
void Dbtc::gcpTcfinished(Signal* signal)
{
signal->theData[0] = c_gcp_ref;
signal->theData[1] = tcheckGcpId;
sendSignal(cdihblockref, GSN_GCP_TCFINISHED, signal, 2, JBB);
}//Dbtc::gcpTcfinished()

View file

@ -556,7 +556,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
true,
ConfigInfo::CI_INT,
"0",
"1",
"0",
"2" },
{

View file

@ -2136,6 +2136,8 @@ MgmtSrvr::alloc_node_id_req(NodeId free_node_id, enum ndb_mgm_node_type type)
{
do_send = 1;
nodeId = refToNode(ref->masterRef);
if (!theFacade->get_node_alive(nodeId))
nodeId = 0;
continue;
}
return ref->errorCode;
@ -2626,6 +2628,8 @@ MgmtSrvr::startBackup(Uint32& backupId, int waitCompleted)
ndbout_c("I'm not master resending to %d", nodeId);
#endif
do_send = 1; // try again
if (!theFacade->get_node_alive(nodeId))
m_master_node = nodeId = 0;
continue;
}
event.Event = BackupEvent::BackupFailedToStart;

View file

@ -2101,15 +2101,17 @@ NdbEventBuffer::alloc_mem(EventBufData* data,
NdbMem_Free((char*)data->memory);
assert(m_total_alloc >= data->sz);
m_total_alloc -= data->sz;
data->memory = 0;
data->sz = 0;
data->memory = (Uint32*)NdbMem_Allocate(alloc_size);
if (data->memory == 0)
{
m_total_alloc -= data->sz;
DBUG_RETURN(-1);
}
data->sz = alloc_size;
m_total_alloc += data->sz;
m_total_alloc += add_sz;
if (change_sz != NULL)
*change_sz += add_sz;
@ -2781,7 +2783,7 @@ NdbEventBuffer::reportStatus()
else
apply_gci= latest_gci;
if (100*m_free_data_sz < m_min_free_thresh*m_total_alloc &&
if (100*(Uint64)m_free_data_sz < m_min_free_thresh*(Uint64)m_total_alloc &&
m_total_alloc > 1024*1024)
{
/* report less free buffer than m_free_thresh,
@ -2792,7 +2794,7 @@ NdbEventBuffer::reportStatus()
goto send_report;
}
if (100*m_free_data_sz > m_max_free_thresh*m_total_alloc &&
if (100*(Uint64)m_free_data_sz > m_max_free_thresh*(Uint64)m_total_alloc &&
m_total_alloc > 1024*1024)
{
/* report more free than 2 * m_free_thresh

View file

@ -1475,6 +1475,49 @@ runBug27003(NDBT_Context* ctx, NDBT_Step* step)
return NDBT_OK;
}
int
runBug27283(NDBT_Context* ctx, NDBT_Step* step)
{
int result = NDBT_OK;
int loops = ctx->getNumLoops();
int records = ctx->getNumRecords();
NdbRestarter res;
if (res.getNumDbNodes() < 2)
{
return NDBT_OK;
}
static const int errnos[] = { 7181, 7182, 0 };
Uint32 pos = 0;
for (Uint32 i = 0; i<loops; i++)
{
while (errnos[pos] != 0)
{
int master = res.getMasterNodeId();
int next = res.getNextMasterNodeId(master);
int next2 = res.getNextMasterNodeId(next);
int node = (i & 1) ? next : next2;
ndbout_c("Tesing err: %d", errnos[pos]);
if (res.insertErrorInNode(next, errnos[pos]))
return NDBT_FAILED;
NdbSleep_SecSleep(3);
if (res.waitClusterStarted())
return NDBT_FAILED;
pos++;
}
pos = 0;
}
return NDBT_OK;
}
NDBT_TESTSUITE(testNodeRestart);
TESTCASE("NoLoad",
"Test that one node at a time can be stopped and then restarted "\
@ -1826,6 +1869,9 @@ TESTCASE("Bug26450", ""){
TESTCASE("Bug27003", ""){
INITIALIZER(runBug27003);
}
TESTCASE("Bug27283", ""){
INITIALIZER(runBug27283);
}
NDBT_TESTSUITE_END(testNodeRestart);
int main(int argc, const char** argv){

View file

@ -473,6 +473,10 @@ max-time: 1000
cmd: testNodeRestart
args: -n Bug27003 T1
max-time: 1000
cmd: testNodeRestart
args: -n Bug27283 T1
max-time: 500
cmd: testNodeRestart
args: -n Bug15587 T1

View file

@ -312,7 +312,8 @@ RestoreMetaData::markSysTables()
"cluster_replication" -> "cluster" -> "mysql"
*/
strcmp(tableName, "cluster_replication/def/" OLD_NDB_APPLY_TABLE) == 0 ||
strcmp(tableName, "cluster/def/" OLD_NDB_APPLY_TABLE) == 0 ||
strcmp(tableName, OLD_NDB_REP_DB "/def/" OLD_NDB_APPLY_TABLE) == 0 ||
strcmp(tableName, OLD_NDB_REP_DB "/def/" OLD_NDB_SCHEMA_TABLE) == 0 ||
strcmp(tableName, NDB_REP_DB "/def/" NDB_APPLY_TABLE) == 0 ||
strcmp(tableName, NDB_REP_DB "/def/" NDB_SCHEMA_TABLE)== 0 )

View file

@ -703,7 +703,11 @@ err:
}
bool
BackupRestore::table_equal(const TableS &tableS){
BackupRestore::table_equal(const TableS &tableS)
{
if (!m_restore)
return true;
const char *tablename = tableS.getTableName();
if(tableS.m_dictTable == NULL){