mirror of
https://github.com/MariaDB/server.git
synced 2025-01-19 13:32:33 +01:00
Fixed wrong number of fragment records in DIH
Fixed bug in failed create table Fixed new test case for failed create table using new ERROR_INSERT
This commit is contained in:
parent
03511a0df1
commit
eaff990883
5 changed files with 54 additions and 29 deletions
|
@ -5,7 +5,7 @@ Next DBACC 3001
|
|||
Next DBTUP 4007
|
||||
Next DBLQH 5040
|
||||
Next DBDICT 6006
|
||||
Next DBDIH 7173
|
||||
Next DBDIH 7174
|
||||
Next DBTC 8035
|
||||
Next CMVMI 9000
|
||||
Next BACKUP 10022
|
||||
|
@ -387,6 +387,11 @@ Backup Stuff:
|
|||
|
||||
5028: Crash when receiving LQHKEYREQ (in non-master)
|
||||
|
||||
Failed Create Table:
|
||||
--------------------
|
||||
7173: Create table failed due to not sufficient number of fragment or
|
||||
replica records.
|
||||
|
||||
Drop Table/Index:
|
||||
-----------------
|
||||
4001: Crash on REL_TABMEMREQ in TUP
|
||||
|
|
|
@ -6425,6 +6425,10 @@ void Dbdih::execDIADDTABREQ(Signal* signal)
|
|||
tabPtr.p->totalfragments = noFragments;
|
||||
ndbrequire(noReplicas == cnoReplicas); // Only allowed
|
||||
|
||||
if (ERROR_INSERTED(7173)) {
|
||||
addtabrefuseLab(signal, connectPtr, ZREPLERROR1);
|
||||
return;
|
||||
}
|
||||
if ((noReplicas * noFragments) > cnoFreeReplicaRec) {
|
||||
jam();
|
||||
addtabrefuseLab(signal, connectPtr, ZREPLERROR1);
|
||||
|
@ -6736,13 +6740,15 @@ void Dbdih::tableDeleteLab(Signal* signal, FileRecordPtr filePtr)
|
|||
void Dbdih::releaseTable(TabRecordPtr tabPtr)
|
||||
{
|
||||
FragmentstorePtr fragPtr;
|
||||
for (Uint32 fragId = 0; fragId < tabPtr.p->totalfragments; fragId++) {
|
||||
jam();
|
||||
getFragstore(tabPtr.p, fragId, fragPtr);
|
||||
releaseReplicas(fragPtr.p->storedReplicas);
|
||||
releaseReplicas(fragPtr.p->oldStoredReplicas);
|
||||
}//for
|
||||
releaseFragments(tabPtr);
|
||||
if (tabPtr.p->noOfFragChunks > 0) {
|
||||
for (Uint32 fragId = 0; fragId < tabPtr.p->totalfragments; fragId++) {
|
||||
jam();
|
||||
getFragstore(tabPtr.p, fragId, fragPtr);
|
||||
releaseReplicas(fragPtr.p->storedReplicas);
|
||||
releaseReplicas(fragPtr.p->oldStoredReplicas);
|
||||
}//for
|
||||
releaseFragments(tabPtr);
|
||||
}
|
||||
if (tabPtr.p->tabFile[0] != RNIL) {
|
||||
jam();
|
||||
releaseFile(tabPtr.p->tabFile[0]);
|
||||
|
@ -6875,9 +6881,6 @@ Uint32 Dbdih::extractNodeInfo(const Fragmentstore * fragPtr, Uint32 nodes[])
|
|||
return nodeCount;
|
||||
}//Dbdih::extractNodeInfo()
|
||||
|
||||
#define NO_OF_FRAGS_PER_CHUNK 16
|
||||
#define LOG_NO_OF_FRAGS_PER_CHUNK 4
|
||||
|
||||
void
|
||||
Dbdih::getFragstore(TabRecord * tab, //In parameter
|
||||
Uint32 fragNo, //In parameter
|
||||
|
|
|
@ -599,8 +599,12 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
|
|||
cfg.put(CFG_DIH_CONNECT,
|
||||
noOfOperations + noOfTransactions + 46);
|
||||
|
||||
Uint32 noFragPerTable= ((noOfDBNodes + NO_OF_FRAGS_PER_CHUNK - 1) >>
|
||||
LOG_NO_OF_FRAGS_PER_CHUNK) <<
|
||||
LOG_NO_OF_FRAGS_PER_CHUNK;
|
||||
|
||||
cfg.put(CFG_DIH_FRAG_CONNECT,
|
||||
NO_OF_FRAG_PER_NODE * noOfMetaTables * noOfDBNodes);
|
||||
noFragPerTable * noOfMetaTables);
|
||||
|
||||
int temp;
|
||||
temp = noOfReplicas - 2;
|
||||
|
@ -611,7 +615,7 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
|
|||
cfg.put(CFG_DIH_MORE_NODES,
|
||||
temp * NO_OF_FRAG_PER_NODE *
|
||||
noOfMetaTables * noOfDBNodes);
|
||||
|
||||
|
||||
cfg.put(CFG_DIH_REPLICAS,
|
||||
NO_OF_FRAG_PER_NODE * noOfMetaTables *
|
||||
noOfDBNodes * noOfReplicas);
|
||||
|
|
|
@ -150,6 +150,13 @@
|
|||
#define NO_OF_FRAG_PER_NODE 1
|
||||
#define MAX_FRAG_PER_NODE 8
|
||||
|
||||
/**
|
||||
* DIH allocates fragments in chunk for fast find of fragment record.
|
||||
* These parameters define chunk size and log of chunk size.
|
||||
*/
|
||||
#define NO_OF_FRAGS_PER_CHUNK 8
|
||||
#define LOG_NO_OF_FRAGS_PER_CHUNK 3
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
// To avoid synching too big chunks at a time we synch after writing
|
||||
// a certain number of data/UNDO pages. (e.g. 2 MBytes).
|
||||
|
|
|
@ -1002,11 +1002,13 @@ int runGetPrimaryKey(NDBT_Context* ctx, NDBT_Step* step){
|
|||
return result;
|
||||
}
|
||||
|
||||
int
|
||||
struct ErrorCodes { int error_id; bool crash;};
|
||||
ErrorCodes
|
||||
NF_codes[] = {
|
||||
6003
|
||||
,6004
|
||||
//,6005
|
||||
{6003, true},
|
||||
{6004, true},
|
||||
//,6005, true,
|
||||
{7173, false}
|
||||
};
|
||||
|
||||
int
|
||||
|
@ -1042,7 +1044,9 @@ runNF1(NDBT_Context* ctx, NDBT_Step* step){
|
|||
for(int i = 0; i<sz; i++){
|
||||
int rand = myRandom48(restarter.getNumDbNodes());
|
||||
int nodeId = restarter.getRandomNotMasterNodeId(rand);
|
||||
int error = NF_codes[i];
|
||||
struct ErrorCodes err_struct = NF_codes[i];
|
||||
int error = err_struct.error_id;
|
||||
bool crash = err_struct.crash;
|
||||
|
||||
g_info << "NF1: node = " << nodeId << " error code = " << error << endl;
|
||||
|
||||
|
@ -1057,31 +1061,33 @@ runNF1(NDBT_Context* ctx, NDBT_Step* step){
|
|||
CHECK2(dict->createTable(* pTab) == 0,
|
||||
"failed to create table");
|
||||
|
||||
CHECK2(restarter.waitNodesNoStart(&nodeId, 1) == 0,
|
||||
if (crash) {
|
||||
CHECK2(restarter.waitNodesNoStart(&nodeId, 1) == 0,
|
||||
"waitNodesNoStart failed");
|
||||
|
||||
if(myRandom48(100) > 50){
|
||||
CHECK2(restarter.startNodes(&nodeId, 1) == 0,
|
||||
if(myRandom48(100) > 50){
|
||||
CHECK2(restarter.startNodes(&nodeId, 1) == 0,
|
||||
"failed to start node");
|
||||
|
||||
CHECK2(restarter.waitClusterStarted() == 0,
|
||||
CHECK2(restarter.waitClusterStarted() == 0,
|
||||
"waitClusterStarted failed");
|
||||
|
||||
CHECK2(dict->dropTable(pTab->getName()) == 0,
|
||||
CHECK2(dict->dropTable(pTab->getName()) == 0,
|
||||
"drop table failed");
|
||||
} else {
|
||||
CHECK2(dict->dropTable(pTab->getName()) == 0,
|
||||
} else {
|
||||
CHECK2(dict->dropTable(pTab->getName()) == 0,
|
||||
"drop table failed");
|
||||
|
||||
CHECK2(restarter.startNodes(&nodeId, 1) == 0,
|
||||
CHECK2(restarter.startNodes(&nodeId, 1) == 0,
|
||||
"failed to start node");
|
||||
|
||||
CHECK2(restarter.waitClusterStarted() == 0,
|
||||
CHECK2(restarter.waitClusterStarted() == 0,
|
||||
"waitClusterStarted failed");
|
||||
}
|
||||
}
|
||||
|
||||
CHECK2(restarter.dumpStateOneNode(nodeId, &val, 1) == 0,
|
||||
CHECK2(restarter.dumpStateOneNode(nodeId, &val, 1) == 0,
|
||||
"Failed to set LCP to min value");
|
||||
}
|
||||
}
|
||||
}
|
||||
end:
|
||||
|
|
Loading…
Reference in a new issue