mirror of
https://github.com/MariaDB/server.git
synced 2025-02-01 03:21:53 +01:00
Merge whalegate.ndb.mysql.com:/home/tomas/cge-5.1
into whalegate.ndb.mysql.com:/home/tomas/mysql-5.1-new-ndb-merge
This commit is contained in:
commit
369b4848a8
18 changed files with 408 additions and 31 deletions
|
@ -2715,12 +2715,12 @@ View Create View character_set_client collation_connection
|
|||
v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select (year(`t1`.`test_date`) - year(`t1`.`DOB`)) AS `Age` from `t1` having (`Age` < 75) latin1 latin1_swedish_ci
|
||||
SELECT (year(test_date)-year(DOB)) AS Age FROM t1 HAVING Age < 75;
|
||||
Age
|
||||
43
|
||||
39
|
||||
44
|
||||
40
|
||||
SELECT * FROM v1;
|
||||
Age
|
||||
43
|
||||
39
|
||||
44
|
||||
40
|
||||
DROP VIEW v1;
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1 (id int NOT NULL PRIMARY KEY, a char(6) DEFAULT 'xxx');
|
||||
|
|
37
mysql-test/suite/ndb/r/ndb_autoinc.result
Normal file
37
mysql-test/suite/ndb/r/ndb_autoinc.result
Normal file
|
@ -0,0 +1,37 @@
|
|||
DROP TABLE IF EXISTS t1,t2,t3;
|
||||
USE test;
|
||||
CREATE TABLE t1 (
|
||||
id INT AUTO_INCREMENT,
|
||||
PRIMARY KEY(id)
|
||||
) ENGINE=NDBCLUSTER;
|
||||
CREATE TABLE t2 (
|
||||
id INT AUTO_INCREMENT,
|
||||
KEY(id)
|
||||
) ENGINE=NDBCLUSTER;
|
||||
ERROR HY000: Can't create table 'test.t2' (errno: 4335)
|
||||
SHOW TABLES;
|
||||
Tables_in_test
|
||||
t1
|
||||
CREATE TABLE t3 (
|
||||
id INT AUTO_INCREMENT,
|
||||
KEY(id)
|
||||
) ENGINE=MYISAM;
|
||||
ALTER TABLE t3
|
||||
ENGINE NDBCLUSTER;
|
||||
SHOW CREATE TABLE t3;
|
||||
Table Create Table
|
||||
t3 CREATE TABLE `t3` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
KEY `id` (`id`)
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1
|
||||
ALTER TABLE t3
|
||||
ADD PRIMARY KEY (id);
|
||||
SHOW CREATE TABLE t3;
|
||||
Table Create Table
|
||||
t3 CREATE TABLE `t3` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `id` (`id`)
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1
|
||||
DROP TABLE t1, t3;
|
||||
End of 5.1 tests
|
45
mysql-test/suite/ndb/t/ndb_autoinc.test
Normal file
45
mysql-test/suite/ndb/t/ndb_autoinc.test
Normal file
|
@ -0,0 +1,45 @@
|
|||
-- source include/have_ndb.inc
|
||||
-- source include/not_embedded.inc
|
||||
|
||||
--disable_warnings
|
||||
DROP TABLE IF EXISTS t1,t2,t3;
|
||||
--enable_warnings
|
||||
|
||||
USE test;
|
||||
|
||||
CREATE TABLE t1 (
|
||||
id INT AUTO_INCREMENT,
|
||||
PRIMARY KEY(id)
|
||||
) ENGINE=NDBCLUSTER;
|
||||
|
||||
# Test For bug#30417
|
||||
--error 1005
|
||||
|
||||
CREATE TABLE t2 (
|
||||
id INT AUTO_INCREMENT,
|
||||
KEY(id)
|
||||
) ENGINE=NDBCLUSTER;
|
||||
|
||||
SHOW TABLES;
|
||||
|
||||
CREATE TABLE t3 (
|
||||
id INT AUTO_INCREMENT,
|
||||
KEY(id)
|
||||
) ENGINE=MYISAM;
|
||||
|
||||
--disable_result_log
|
||||
--error 1005
|
||||
ALTER TABLE t3
|
||||
ENGINE NDBCLUSTER;
|
||||
--enable_result_log
|
||||
|
||||
SHOW CREATE TABLE t3;
|
||||
|
||||
ALTER TABLE t3
|
||||
ADD PRIMARY KEY (id);
|
||||
|
||||
SHOW CREATE TABLE t3;
|
||||
|
||||
DROP TABLE t1, t3;
|
||||
|
||||
--echo End of 5.1 tests
|
|
@ -10,6 +10,8 @@
|
|||
|
||||
# Storage engine to be used in CREATE TABLE
|
||||
--source include/have_ndb.inc
|
||||
connection default;
|
||||
|
||||
let $engine_type= NDB;
|
||||
|
||||
|
||||
|
|
|
@ -28,6 +28,17 @@
|
|||
#define MAX_NODES 64
|
||||
#define UNDEF_NODEGROUP 0xFFFF
|
||||
|
||||
/**************************************************************************
|
||||
* IT SHOULD BE (MAX_NDB_NODES - 1).
|
||||
* WHEN MAX_NDB_NODE IS CHANGED, IT SHOULD BE CHANGED ALSO
|
||||
**************************************************************************/
|
||||
#define MAX_DATA_NODE_ID 48
|
||||
/**************************************************************************
|
||||
* IT SHOULD BE (MAX_NODES - 1).
|
||||
* WHEN MAX_NODES IS CHANGED, IT SHOULD BE CHANGED ALSO
|
||||
**************************************************************************/
|
||||
#define MAX_NODES_ID 63
|
||||
|
||||
/**
|
||||
* MAX_API_NODES = MAX_NODES - No of NDB Nodes in use
|
||||
*/
|
||||
|
|
|
@ -163,7 +163,8 @@ struct CreateFileImplRef {
|
|||
InvalidFileMetadata = 1510,
|
||||
OutOfMemory = 1511,
|
||||
FileReadError = 1512,
|
||||
FilegroupNotOnline = 1513
|
||||
FilegroupNotOnline = 1513,
|
||||
FileSizeTooLarge = 1515
|
||||
};
|
||||
|
||||
Uint32 senderData;
|
||||
|
|
|
@ -553,7 +553,13 @@ extern "C" {
|
|||
/** Log event specific data for for corresponding NDB_LE_ log event */
|
||||
struct {
|
||||
int gth;
|
||||
unsigned page_size_kb;
|
||||
/* union is for compatibility backward.
|
||||
* page_size_kb member variable should be removed in the future
|
||||
*/
|
||||
union {
|
||||
unsigned page_size_kb;
|
||||
unsigned page_size_bytes;
|
||||
};
|
||||
unsigned pages_used;
|
||||
unsigned pages_total;
|
||||
unsigned block;
|
||||
|
|
|
@ -11,7 +11,10 @@ Next CMVMI 9000
|
|||
Next BACKUP 10038
|
||||
Next DBUTIL 11002
|
||||
Next DBTUX 12008
|
||||
Next SUMA 13034
|
||||
Next SUMA 13036
|
||||
Next LGMAN 15001
|
||||
Next TSMAN 16001
|
||||
|
||||
TESTING NODE FAILURE, ARBITRATION
|
||||
---------------------------------
|
||||
|
@ -547,3 +550,11 @@ NDBCNTR:
|
|||
|
||||
1000: Crash insertion on SystemError::CopyFragRef
|
||||
1001: Delay sending NODE_FAILREP (to own node), until error is cleared
|
||||
|
||||
LGMAN:
|
||||
-----
|
||||
15000: Fail to create log file
|
||||
|
||||
TSMAN:
|
||||
-----
|
||||
16000: Fail to create data file
|
||||
|
|
|
@ -967,6 +967,7 @@ Backup::checkNodeFail(Signal* signal,
|
|||
ref->backupPtr = ptr.i;
|
||||
ref->backupId = ptr.p->backupId;
|
||||
ref->errorCode = AbortBackupOrd::BackupFailureDueToNodeFail;
|
||||
ref->nodeId = getOwnNodeId();
|
||||
gsn= GSN_STOP_BACKUP_REF;
|
||||
len= StopBackupRef::SignalLength;
|
||||
pos= &ref->nodeId - signal->getDataPtr();
|
||||
|
@ -2081,6 +2082,15 @@ Backup::sendDropTrig(Signal* signal, BackupRecordPtr ptr)
|
|||
/**
|
||||
* Insert footers
|
||||
*/
|
||||
//if backup error, we needn't insert footers
|
||||
if(ptr.p->checkError())
|
||||
{
|
||||
jam();
|
||||
closeFiles(signal, ptr);
|
||||
ptr.p->errorCode = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
{
|
||||
BackupFilePtr filePtr LINT_SET_PTR;
|
||||
ptr.p->files.getPtr(filePtr, ptr.p->logFilePtr);
|
||||
|
@ -4187,6 +4197,37 @@ Backup::checkFile(Signal* signal, BackupFilePtr filePtr)
|
|||
#if 0
|
||||
ndbout << "Ptr to data = " << hex << tmp << endl;
|
||||
#endif
|
||||
BackupRecordPtr ptr LINT_SET_PTR;
|
||||
c_backupPool.getPtr(ptr, filePtr.p->backupPtr);
|
||||
|
||||
if (ERROR_INSERTED(10036))
|
||||
{
|
||||
jam();
|
||||
filePtr.p->m_flags &= ~(Uint32)BackupFile::BF_FILE_THREAD;
|
||||
filePtr.p->errorCode = 2810;
|
||||
ptr.p->setErrorCode(2810);
|
||||
|
||||
if(ptr.p->m_gsn == GSN_STOP_BACKUP_REQ)
|
||||
{
|
||||
jam();
|
||||
closeFile(signal, ptr, filePtr);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if(filePtr.p->errorCode != 0)
|
||||
{
|
||||
jam();
|
||||
ptr.p->setErrorCode(filePtr.p->errorCode);
|
||||
|
||||
if(ptr.p->m_gsn == GSN_STOP_BACKUP_REQ)
|
||||
{
|
||||
jam();
|
||||
closeFile(signal, ptr, filePtr);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (!ready_to_write(ready, sz, eof, filePtr.p))
|
||||
{
|
||||
jam();
|
||||
|
@ -4218,8 +4259,6 @@ Backup::checkFile(Signal* signal, BackupFilePtr filePtr)
|
|||
ndbrequire(flags & BackupFile::BF_OPEN);
|
||||
ndbrequire(flags & BackupFile::BF_FILE_THREAD);
|
||||
|
||||
BackupRecordPtr ptr LINT_SET_PTR;
|
||||
c_backupPool.getPtr(ptr, filePtr.p->backupPtr);
|
||||
closeFile(signal, ptr, filePtr);
|
||||
}
|
||||
|
||||
|
@ -4582,6 +4621,22 @@ Backup::closeFilesDone(Signal* signal, BackupRecordPtr ptr)
|
|||
|
||||
jam();
|
||||
|
||||
//error when do insert footer or close file
|
||||
if(ptr.p->checkError())
|
||||
{
|
||||
StopBackupRef * ref = (StopBackupRef*)signal->getDataPtr();
|
||||
ref->backupPtr = ptr.i;
|
||||
ref->backupId = ptr.p->backupId;
|
||||
ref->errorCode = ptr.p->errorCode;
|
||||
ref->nodeId = getOwnNodeId();
|
||||
sendSignal(ptr.p->masterRef, GSN_STOP_BACKUP_REF, signal,
|
||||
StopBackupConf::SignalLength, JBB);
|
||||
|
||||
ptr.p->m_gsn = GSN_STOP_BACKUP_REF;
|
||||
ptr.p->slaveState.setState(CLEANING);
|
||||
return;
|
||||
}
|
||||
|
||||
StopBackupConf* conf = (StopBackupConf*)signal->getDataPtrSend();
|
||||
conf->backupId = ptr.p->backupId;
|
||||
conf->backupPtr = ptr.i;
|
||||
|
|
|
@ -8464,6 +8464,12 @@ void Dbdih::execDIHNDBTAMPER(Signal* signal)
|
|||
} else if (tuserpointer < 15000) {
|
||||
jam();
|
||||
tuserblockref = DBDICT_REF;
|
||||
} else if (tuserpointer < 16000) {
|
||||
jam();
|
||||
tuserblockref = LGMAN_REF;
|
||||
} else if (tuserpointer < 17000) {
|
||||
jam();
|
||||
tuserblockref = TSMAN_REF;
|
||||
} else if (tuserpointer < 30000) {
|
||||
/*--------------------------------------------------------------------*/
|
||||
// Ignore errors in the 20000-range.
|
||||
|
|
|
@ -547,6 +547,22 @@ Lgman::execCREATE_FILE_REQ(Signal* signal)
|
|||
break;
|
||||
}
|
||||
|
||||
if(ERROR_INSERTED(15000) ||
|
||||
(sizeof(void*) == 4 && req->file_size_hi & 0xFFFFFFFF))
|
||||
{
|
||||
jam();
|
||||
if(signal->getNoOfSections())
|
||||
releaseSections(signal);
|
||||
|
||||
CreateFileImplRef* ref= (CreateFileImplRef*)signal->getDataPtr();
|
||||
ref->senderData = senderData;
|
||||
ref->senderRef = reference();
|
||||
ref->errorCode = CreateFileImplRef::FileSizeTooLarge;
|
||||
sendSignal(senderRef, GSN_CREATE_FILE_REF, signal,
|
||||
CreateFileImplRef::SignalLength, JBB);
|
||||
return;
|
||||
}
|
||||
|
||||
new (file_ptr.p) Undofile(req, ptr.i);
|
||||
|
||||
Local_undofile_list tmp(m_file_pool, ptr.p->m_meta_files);
|
||||
|
|
|
@ -537,6 +537,22 @@ Tsman::execCREATE_FILE_REQ(Signal* signal){
|
|||
break;
|
||||
}
|
||||
|
||||
if(ERROR_INSERTED(16000) ||
|
||||
(sizeof(void*) == 4 && req->file_size_hi & 0xFFFFFFFF))
|
||||
{
|
||||
jam();
|
||||
if(signal->getNoOfSections())
|
||||
releaseSections(signal);
|
||||
|
||||
CreateFileImplRef* ref= (CreateFileImplRef*)signal->getDataPtr();
|
||||
ref->senderData = senderData;
|
||||
ref->senderRef = reference();
|
||||
ref->errorCode = CreateFileImplRef::FileSizeTooLarge;
|
||||
sendSignal(senderRef, GSN_CREATE_FILE_REF, signal,
|
||||
CreateFileImplRef::SignalLength, JBB);
|
||||
return;
|
||||
}
|
||||
|
||||
new (file_ptr.p) Datafile(req);
|
||||
Local_datafile_list tmp(m_file_pool, ptr.p->m_meta_files);
|
||||
tmp.add(file_ptr);
|
||||
|
|
|
@ -256,7 +256,7 @@ struct Ndb_logevent_body_row ndb_logevent_body[]= {
|
|||
ROW( ReceiveBytesStatistic, "mean_received_bytes", 2, mean_received_bytes),
|
||||
|
||||
ROW( MemoryUsage, "gth", 1, gth),
|
||||
ROW( MemoryUsage, "page_size_kb", 2, page_size_kb),
|
||||
ROW( MemoryUsage, "page_size_bytes", 2, page_size_bytes),
|
||||
ROW( MemoryUsage, "pages_used", 3, pages_used),
|
||||
ROW( MemoryUsage, "pages_total", 4, pages_total),
|
||||
ROW( MemoryUsage, "block", 5, block),
|
||||
|
|
|
@ -397,7 +397,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
|
|||
ConfigInfo::CI_INT,
|
||||
MANDATORY,
|
||||
"1",
|
||||
STR_VALUE(MAX_NODES) },
|
||||
STR_VALUE(MAX_DATA_NODE_ID) },
|
||||
|
||||
{
|
||||
CFG_NODE_ID,
|
||||
|
@ -409,7 +409,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
|
|||
ConfigInfo::CI_INT,
|
||||
MANDATORY,
|
||||
"1",
|
||||
STR_VALUE(MAX_NODES) },
|
||||
STR_VALUE(MAX_DATA_NODE_ID) },
|
||||
|
||||
{
|
||||
KEY_INTERNAL,
|
||||
|
@ -1404,7 +1404,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
|
|||
ConfigInfo::CI_INT,
|
||||
MANDATORY,
|
||||
"1",
|
||||
STR_VALUE(MAX_NODES) },
|
||||
STR_VALUE(MAX_NODES_ID) },
|
||||
|
||||
{
|
||||
CFG_NODE_ID,
|
||||
|
@ -1416,7 +1416,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
|
|||
ConfigInfo::CI_INT,
|
||||
MANDATORY,
|
||||
"1",
|
||||
STR_VALUE(MAX_NODES) },
|
||||
STR_VALUE(MAX_NODES_ID) },
|
||||
|
||||
{
|
||||
KEY_INTERNAL,
|
||||
|
@ -1547,7 +1547,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
|
|||
ConfigInfo::CI_INT,
|
||||
MANDATORY,
|
||||
"1",
|
||||
STR_VALUE(MAX_NODES) },
|
||||
STR_VALUE(MAX_NODES_ID) },
|
||||
|
||||
{
|
||||
CFG_NODE_ID,
|
||||
|
@ -1559,7 +1559,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
|
|||
ConfigInfo::CI_INT,
|
||||
MANDATORY,
|
||||
"1",
|
||||
STR_VALUE(MAX_NODES) },
|
||||
STR_VALUE(MAX_NODES_ID) },
|
||||
|
||||
{
|
||||
CFG_LOG_DESTINATION,
|
||||
|
|
|
@ -2340,6 +2340,22 @@ NdbDictionaryImpl::createTable(NdbTableImpl &t)
|
|||
{
|
||||
DBUG_ENTER("NdbDictionaryImpl::createTable");
|
||||
|
||||
|
||||
bool autoIncrement = false;
|
||||
Uint64 initialValue = 0;
|
||||
for (Uint32 i = 0; i < t.m_columns.size(); i++) {
|
||||
const NdbColumnImpl* c = t.m_columns[i];
|
||||
assert(c != NULL);
|
||||
if (c->m_autoIncrement) {
|
||||
if (autoIncrement) {
|
||||
m_error.code = 4335;
|
||||
DBUG_RETURN(-1);
|
||||
}
|
||||
autoIncrement = true;
|
||||
initialValue = c->m_autoIncrementInitialValue;
|
||||
}
|
||||
}
|
||||
|
||||
// if the new name has not been set, use the copied name
|
||||
if (t.m_newExternalName.empty())
|
||||
{
|
||||
|
@ -2377,21 +2393,6 @@ NdbDictionaryImpl::createTable(NdbTableImpl &t)
|
|||
|
||||
// auto-increment - use "t" because initial value is not in DICT
|
||||
{
|
||||
bool autoIncrement = false;
|
||||
Uint64 initialValue = 0;
|
||||
for (Uint32 i = 0; i < t.m_columns.size(); i++) {
|
||||
const NdbColumnImpl* c = t.m_columns[i];
|
||||
assert(c != NULL);
|
||||
if (c->m_autoIncrement) {
|
||||
if (autoIncrement) {
|
||||
m_error.code = 4335;
|
||||
delete t2;
|
||||
DBUG_RETURN(-1);
|
||||
}
|
||||
autoIncrement = true;
|
||||
initialValue = c->m_autoIncrementInitialValue;
|
||||
}
|
||||
}
|
||||
if (autoIncrement) {
|
||||
// XXX unlikely race condition - t.m_id may no longer be same table
|
||||
// the tuple id range is not used on input
|
||||
|
|
|
@ -426,6 +426,7 @@ ErrorBundle ErrorCodes[] = {
|
|||
{ 1512, DMEC, SE, "File read error" },
|
||||
{ 1513, DMEC, IE, "Filegroup not online" },
|
||||
{ 1514, DMEC, SE, "Currently there is a limit of one logfile group" },
|
||||
{ 1515, DMEC, SE, "Currently there is a 4G limit of one undo/data-file in 32-bit host" },
|
||||
|
||||
{ 773, DMEC, SE, "Out of string memory, please modify StringMemory config parameter" },
|
||||
{ 775, DMEC, SE, "Create file is not supported when Diskless=1" },
|
||||
|
@ -625,6 +626,8 @@ ErrorBundle ErrorCodes[] = {
|
|||
{ 4274, DMEC, IE, "Corrupted main table PK in blob operation" },
|
||||
{ 4275, DMEC, AE, "The blob method is incompatible with operation type or lock mode" },
|
||||
{ 4294, DMEC, AE, "Scan filter is too large, discarded" },
|
||||
{ 2810, DMEC, TR, "No space left on the device" },
|
||||
{ 2815, DMEC, TR, "Error in reading files, please check file system" },
|
||||
|
||||
{ NO_CONTACT_WITH_PROCESS, DMEC, AE,
|
||||
"No contact with the process (dead ?)."},
|
||||
|
|
|
@ -2357,6 +2357,168 @@ runBug24631(NDBT_Context* ctx, NDBT_Step* step)
|
|||
return NDBT_OK;
|
||||
}
|
||||
|
||||
int
|
||||
runBug29186(NDBT_Context* ctx, NDBT_Step* step)
|
||||
{
|
||||
int lgError = 15000;
|
||||
int tsError = 16000;
|
||||
int res;
|
||||
char lgname[256];
|
||||
char ufname[256];
|
||||
char tsname[256];
|
||||
char dfname[256];
|
||||
|
||||
NdbRestarter restarter;
|
||||
|
||||
if (restarter.getNumDbNodes() < 2){
|
||||
ctx->stopTest();
|
||||
return NDBT_OK;
|
||||
}
|
||||
|
||||
Ndb* pNdb = GETNDB(step);
|
||||
NdbDictionary::Dictionary* pDict = pNdb->getDictionary();
|
||||
NdbDictionary::Dictionary::List list;
|
||||
|
||||
if (pDict->listObjects(list) == -1)
|
||||
return NDBT_FAILED;
|
||||
|
||||
// 1.create logfile group
|
||||
const char * lgfound = 0;
|
||||
|
||||
for (Uint32 i = 0; i<list.count; i++)
|
||||
{
|
||||
switch(list.elements[i].type){
|
||||
case NdbDictionary::Object::LogfileGroup:
|
||||
lgfound = list.elements[i].name;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
if (lgfound)
|
||||
break;
|
||||
}
|
||||
|
||||
if (lgfound == 0)
|
||||
{
|
||||
BaseString::snprintf(lgname, sizeof(lgname), "LG-%u", rand());
|
||||
NdbDictionary::LogfileGroup lg;
|
||||
|
||||
lg.setName(lgname);
|
||||
lg.setUndoBufferSize(8*1024*1024);
|
||||
if(pDict->createLogfileGroup(lg) != 0)
|
||||
{
|
||||
g_err << "Failed to create logfilegroup:"
|
||||
<< endl << pDict->getNdbError() << endl;
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
BaseString::snprintf(lgname, sizeof(lgname), "%s", lgfound);
|
||||
}
|
||||
|
||||
if(restarter.waitClusterStarted(60)){
|
||||
g_err << "waitClusterStarted failed"<< endl;
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
if(restarter.insertErrorInAllNodes(lgError) != 0){
|
||||
g_err << "failed to set error insert"<< endl;
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
g_info << "error inserted" << endl;
|
||||
g_info << "waiting some before add log file" << endl;
|
||||
g_info << "starting create log file group" << endl;
|
||||
|
||||
NdbDictionary::Undofile uf;
|
||||
BaseString::snprintf(ufname, sizeof(ufname), "%s-%u", lgname, rand());
|
||||
uf.setPath(ufname);
|
||||
uf.setSize(2*1024*1024);
|
||||
uf.setLogfileGroup(lgname);
|
||||
|
||||
if(pDict->createUndofile(uf) == 0)
|
||||
{
|
||||
g_err << "Create log file group should fail on error_insertion " << lgError << endl;
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
//clear lg error
|
||||
if(restarter.insertErrorInAllNodes(15099) != 0){
|
||||
g_err << "failed to set error insert"<< endl;
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
NdbSleep_SecSleep(5);
|
||||
|
||||
//lg error has been cleared, so we can add undo file
|
||||
if(pDict->createUndofile(uf) != 0)
|
||||
{
|
||||
g_err << "Failed to create undofile:"
|
||||
<< endl << pDict->getNdbError() << endl;
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
if(restarter.waitClusterStarted(60)){
|
||||
g_err << "waitClusterStarted failed"<< endl;
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
if(restarter.insertErrorInAllNodes(tsError) != 0){
|
||||
g_err << "failed to set error insert"<< endl;
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
g_info << "error inserted" << endl;
|
||||
g_info << "waiting some before create table space" << endl;
|
||||
g_info << "starting create table space" << endl;
|
||||
|
||||
//r = runCreateTablespace(ctx, step);
|
||||
BaseString::snprintf(tsname, sizeof(tsname), "TS-%u", rand());
|
||||
BaseString::snprintf(dfname, sizeof(dfname), "%s-%u-1.dat", tsname, rand());
|
||||
|
||||
NdbDictionary::Tablespace ts;
|
||||
ts.setName(tsname);
|
||||
ts.setExtentSize(1024*1024);
|
||||
ts.setDefaultLogfileGroup(lgname);
|
||||
|
||||
if(pDict->createTablespace(ts) != 0)
|
||||
{
|
||||
g_err << "Failed to create tablespace:"
|
||||
<< endl << pDict->getNdbError() << endl;
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
NdbDictionary::Datafile df;
|
||||
df.setPath(dfname);
|
||||
df.setSize(1*1024*1024);
|
||||
df.setTablespace(tsname);
|
||||
|
||||
if(pDict->createDatafile(df) == 0)
|
||||
{
|
||||
g_err << "Create table space should fail on error_insertion " << tsError << endl;
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
//Clear the inserted error
|
||||
if(restarter.insertErrorInAllNodes(16099) != 0){
|
||||
g_err << "failed to set error insert"<< endl;
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
NdbSleep_SecSleep(5);
|
||||
|
||||
if (pDict->dropTablespace(pDict->getTablespace(tsname)) != 0)
|
||||
{
|
||||
g_err << "Failed to drop tablespace: " << pDict->getNdbError() << endl;
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
if (lgfound == 0)
|
||||
{
|
||||
if (pDict->dropLogfileGroup(pDict->getLogfileGroup(lgname)) != 0)
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
return NDBT_OK;
|
||||
}
|
||||
|
||||
struct RandSchemaOp
|
||||
{
|
||||
struct Obj
|
||||
|
@ -3123,6 +3285,10 @@ TESTCASE("Bug24631",
|
|||
""){
|
||||
INITIALIZER(runBug24631);
|
||||
}
|
||||
TESTCASE("Bug29186",
|
||||
""){
|
||||
INITIALIZER(runBug29186);
|
||||
}
|
||||
NDBT_TESTSUITE_END(testDict);
|
||||
|
||||
int main(int argc, const char** argv){
|
||||
|
|
|
@ -350,7 +350,8 @@ FailS_codes[] = {
|
|||
10025,
|
||||
10027,
|
||||
10033,
|
||||
10035
|
||||
10035,
|
||||
10036
|
||||
};
|
||||
|
||||
int
|
||||
|
|
Loading…
Add table
Reference in a new issue