diff --git a/ndb/src/kernel/blocks/ERROR_codes.txt b/ndb/src/kernel/blocks/ERROR_codes.txt index af575de4f62..43532a973f9 100644 --- a/ndb/src/kernel/blocks/ERROR_codes.txt +++ b/ndb/src/kernel/blocks/ERROR_codes.txt @@ -5,7 +5,7 @@ Next DBACC 3001 Next DBTUP 4007 Next DBLQH 5040 Next DBDICT 6006 -Next DBDIH 7173 +Next DBDIH 7174 Next DBTC 8035 Next CMVMI 9000 Next BACKUP 10022 @@ -387,6 +387,11 @@ Backup Stuff: 5028: Crash when receiving LQHKEYREQ (in non-master) +Failed Create Table: +-------------------- +7173: Create table failed due to not sufficient number of fragment or + replica records. + Drop Table/Index: ----------------- 4001: Crash on REL_TABMEMREQ in TUP diff --git a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp index a49c27e7b08..9498f3f41d5 100644 --- a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp +++ b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp @@ -6425,6 +6425,10 @@ void Dbdih::execDIADDTABREQ(Signal* signal) tabPtr.p->totalfragments = noFragments; ndbrequire(noReplicas == cnoReplicas); // Only allowed + if (ERROR_INSERTED(7173)) { + addtabrefuseLab(signal, connectPtr, ZREPLERROR1); + return; + } if ((noReplicas * noFragments) > cnoFreeReplicaRec) { jam(); addtabrefuseLab(signal, connectPtr, ZREPLERROR1); @@ -6736,13 +6740,15 @@ void Dbdih::tableDeleteLab(Signal* signal, FileRecordPtr filePtr) void Dbdih::releaseTable(TabRecordPtr tabPtr) { FragmentstorePtr fragPtr; - for (Uint32 fragId = 0; fragId < tabPtr.p->totalfragments; fragId++) { - jam(); - getFragstore(tabPtr.p, fragId, fragPtr); - releaseReplicas(fragPtr.p->storedReplicas); - releaseReplicas(fragPtr.p->oldStoredReplicas); - }//for - releaseFragments(tabPtr); + if (tabPtr.p->noOfFragChunks > 0) { + for (Uint32 fragId = 0; fragId < tabPtr.p->totalfragments; fragId++) { + jam(); + getFragstore(tabPtr.p, fragId, fragPtr); + releaseReplicas(fragPtr.p->storedReplicas); + releaseReplicas(fragPtr.p->oldStoredReplicas); + }//for + releaseFragments(tabPtr); + } if (tabPtr.p->tabFile[0] != RNIL) { jam(); releaseFile(tabPtr.p->tabFile[0]); @@ -6875,9 +6881,6 @@ Uint32 Dbdih::extractNodeInfo(const Fragmentstore * fragPtr, Uint32 nodes[]) return nodeCount; }//Dbdih::extractNodeInfo() -#define NO_OF_FRAGS_PER_CHUNK 16 -#define LOG_NO_OF_FRAGS_PER_CHUNK 4 - void Dbdih::getFragstore(TabRecord * tab, //In parameter Uint32 fragNo, //In parameter diff --git a/ndb/src/kernel/vm/Configuration.cpp b/ndb/src/kernel/vm/Configuration.cpp index 9469a39a6f5..956c11c44d5 100644 --- a/ndb/src/kernel/vm/Configuration.cpp +++ b/ndb/src/kernel/vm/Configuration.cpp @@ -599,8 +599,12 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){ cfg.put(CFG_DIH_CONNECT, noOfOperations + noOfTransactions + 46); + Uint32 noFragPerTable= ((noOfDBNodes + NO_OF_FRAGS_PER_CHUNK - 1) >> + LOG_NO_OF_FRAGS_PER_CHUNK) << + LOG_NO_OF_FRAGS_PER_CHUNK; + cfg.put(CFG_DIH_FRAG_CONNECT, - NO_OF_FRAG_PER_NODE * noOfMetaTables * noOfDBNodes); + noFragPerTable * noOfMetaTables); int temp; temp = noOfReplicas - 2; @@ -611,7 +615,7 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){ cfg.put(CFG_DIH_MORE_NODES, temp * NO_OF_FRAG_PER_NODE * noOfMetaTables * noOfDBNodes); - + cfg.put(CFG_DIH_REPLICAS, NO_OF_FRAG_PER_NODE * noOfMetaTables * noOfDBNodes * noOfReplicas); diff --git a/ndb/src/kernel/vm/pc.hpp b/ndb/src/kernel/vm/pc.hpp index 5ac95d661a4..6ef7be30f94 100644 --- a/ndb/src/kernel/vm/pc.hpp +++ b/ndb/src/kernel/vm/pc.hpp @@ -150,6 +150,13 @@ #define NO_OF_FRAG_PER_NODE 1 #define MAX_FRAG_PER_NODE 8 +/** +* DIH allocates fragments in chunk for fast find of fragment record. +* These parameters define chunk size and log of chunk size. +*/ +#define NO_OF_FRAGS_PER_CHUNK 8 +#define LOG_NO_OF_FRAGS_PER_CHUNK 3 + /* ---------------------------------------------------------------- */ // To avoid synching too big chunks at a time we synch after writing // a certain number of data/UNDO pages. (e.g. 2 MBytes). diff --git a/ndb/test/ndbapi/testDict.cpp b/ndb/test/ndbapi/testDict.cpp index e7597c26960..c527bbd655b 100644 --- a/ndb/test/ndbapi/testDict.cpp +++ b/ndb/test/ndbapi/testDict.cpp @@ -1002,11 +1002,13 @@ int runGetPrimaryKey(NDBT_Context* ctx, NDBT_Step* step){ return result; } -int +struct ErrorCodes { int error_id; bool crash;}; +ErrorCodes NF_codes[] = { - 6003 - ,6004 - //,6005 + {6003, true}, + {6004, true}, + //,6005, true, + {7173, false} }; int @@ -1042,7 +1044,9 @@ runNF1(NDBT_Context* ctx, NDBT_Step* step){ for(int i = 0; i 50){ - CHECK2(restarter.startNodes(&nodeId, 1) == 0, + if(myRandom48(100) > 50){ + CHECK2(restarter.startNodes(&nodeId, 1) == 0, "failed to start node"); - CHECK2(restarter.waitClusterStarted() == 0, + CHECK2(restarter.waitClusterStarted() == 0, "waitClusterStarted failed"); - CHECK2(dict->dropTable(pTab->getName()) == 0, + CHECK2(dict->dropTable(pTab->getName()) == 0, "drop table failed"); - } else { - CHECK2(dict->dropTable(pTab->getName()) == 0, + } else { + CHECK2(dict->dropTable(pTab->getName()) == 0, "drop table failed"); - CHECK2(restarter.startNodes(&nodeId, 1) == 0, + CHECK2(restarter.startNodes(&nodeId, 1) == 0, "failed to start node"); - CHECK2(restarter.waitClusterStarted() == 0, + CHECK2(restarter.waitClusterStarted() == 0, "waitClusterStarted failed"); - } + } - CHECK2(restarter.dumpStateOneNode(nodeId, &val, 1) == 0, + CHECK2(restarter.dumpStateOneNode(nodeId, &val, 1) == 0, "Failed to set LCP to min value"); + } } } end: