mirror of
https://github.com/MariaDB/server.git
synced 2025-01-17 12:32:27 +01:00
Merge build.mysql.com:/home/bk/mysql-4.1-ndb
into build.mysql.com:/users/mronstrom/wl2056 ndb/src/common/mgmcommon/ConfigInfo.cpp: Auto merged ndb/src/common/mgmcommon/LocalConfig.cpp: Auto merged ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp: Auto merged ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp: Auto merged ndb/src/kernel/vm/Configuration.cpp: Auto merged BitKeeper/etc/logging_ok: Logging to logging@openlogging.org accepted
This commit is contained in:
commit
2c7754be96
28 changed files with 295 additions and 161 deletions
|
@ -118,6 +118,7 @@ monty@tik.
|
|||
monty@tik.mysql.fi
|
||||
monty@tramp.mysql.fi
|
||||
monty@work.mysql.com
|
||||
mronstrom@build.mysql.com
|
||||
mronstrom@mysql.com
|
||||
mskold@mysql.com
|
||||
msvensson@build.mysql.com
|
||||
|
|
|
@ -74,7 +74,7 @@ FsCloseReq::getRemoveFileFlag(const UintR & fileflag){
|
|||
inline
|
||||
void
|
||||
FsCloseReq::setRemoveFileFlag(UintR & fileflag, bool removefile){
|
||||
ASSERT_BOOL(removefile, "FsCloseReq::setRemoveFileFlag");
|
||||
// ASSERT_BOOL(removefile, "FsCloseReq::setRemoveFileFlag");
|
||||
if (removefile == true)
|
||||
fileflag = 1;
|
||||
else
|
||||
|
|
|
@ -77,6 +77,16 @@
|
|||
|
||||
#define CFG_DB_DISCLESS 148
|
||||
|
||||
#define CFG_DB_NO_ORDERED_INDEXES 149
|
||||
#define CFG_DB_NO_UNIQUE_HASH_INDEXES 150
|
||||
#define CFG_DB_NO_LOCAL_OPS 151
|
||||
#define CFG_DB_NO_LOCAL_SCANS 152
|
||||
#define CFG_DB_BATCH_SIZE 153
|
||||
|
||||
#define CFG_DB_UNDO_INDEX_BUFFER 154
|
||||
#define CFG_DB_UNDO_DATA_BUFFER 155
|
||||
#define CFG_DB_REDO_BUFFER 156
|
||||
|
||||
#define CFG_NODE_ARBIT_RANK 200
|
||||
#define CFG_NODE_ARBIT_DELAY 201
|
||||
|
||||
|
|
|
@ -409,12 +409,36 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
|
|||
8,
|
||||
MAX_INT_RNIL },
|
||||
|
||||
{
|
||||
CFG_DB_NO_ORDERED_INDEXES,
|
||||
"MaxNoOfOrderedIndexes",
|
||||
"DB",
|
||||
"Total number of ordered indexes that can be defined in the system",
|
||||
ConfigInfo::USED,
|
||||
false,
|
||||
ConfigInfo::INT,
|
||||
128,
|
||||
0,
|
||||
MAX_INT_RNIL },
|
||||
|
||||
{
|
||||
CFG_DB_NO_UNIQUE_HASH_INDEXES,
|
||||
"MaxNoOfUniqueHashIndexes",
|
||||
"DB",
|
||||
"Total number of unique hash indexes that can be defined in the system",
|
||||
ConfigInfo::USED,
|
||||
false,
|
||||
ConfigInfo::INT,
|
||||
64,
|
||||
0,
|
||||
MAX_INT_RNIL },
|
||||
|
||||
{
|
||||
CFG_DB_NO_INDEXES,
|
||||
"MaxNoOfIndexes",
|
||||
"DB",
|
||||
"Total number of indexes that can be defined in the system",
|
||||
ConfigInfo::USED,
|
||||
ConfigInfo::DEPRICATED,
|
||||
false,
|
||||
ConfigInfo::INT,
|
||||
128,
|
||||
|
@ -534,7 +558,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
|
|||
CFG_DB_NO_OPS,
|
||||
"MaxNoOfConcurrentOperations",
|
||||
"DB",
|
||||
"Max no of op:s on DB (op:s within a transaction are concurrently executed)",
|
||||
"Max number of operation records in transaction coordinator",
|
||||
ConfigInfo::USED,
|
||||
false,
|
||||
ConfigInfo::INT,
|
||||
|
@ -542,6 +566,43 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
|
|||
32,
|
||||
MAX_INT_RNIL },
|
||||
|
||||
{
|
||||
CFG_DB_NO_LOCAL_OPS,
|
||||
"MaxNoOfLocalOperations",
|
||||
"DB",
|
||||
"Max number of operation records defined in the local storage node",
|
||||
ConfigInfo::USED,
|
||||
false,
|
||||
ConfigInfo::INT,
|
||||
0, //0 means that it is calculated using MaxNoOfConcurrentOperations
|
||||
32,
|
||||
MAX_INT_RNIL },
|
||||
|
||||
{
|
||||
CFG_DB_NO_LOCAL_SCANS,
|
||||
"MaxNoOfLocalScans",
|
||||
"DB",
|
||||
"Max number of fragment scans in parallel in the local storage node",
|
||||
ConfigInfo::USED,
|
||||
false,
|
||||
ConfigInfo::INT,
|
||||
0, //0 means that it is calculated using MaxNoOfConcurrentScans
|
||||
32,
|
||||
MAX_INT_RNIL },
|
||||
|
||||
{
|
||||
CFG_DB_BATCH_SIZE,
|
||||
"BatchSizePerLocalScan",
|
||||
"DB",
|
||||
"Used to calculate the number of lock records for scan with hold lock",
|
||||
ConfigInfo::USED,
|
||||
false,
|
||||
ConfigInfo::INT,
|
||||
32,
|
||||
1,
|
||||
MAX_INT_RNIL },
|
||||
|
||||
|
||||
{
|
||||
CFG_DB_NO_TRANSACTIONS,
|
||||
"MaxNoOfConcurrentTransactions",
|
||||
|
@ -586,7 +647,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
|
|||
ConfigInfo::USED,
|
||||
false,
|
||||
ConfigInfo::INT64,
|
||||
3 * 1024 * 8192,
|
||||
24 * (1024 * 1024),
|
||||
128 * 8192,
|
||||
((Uint64)MAX_INT_RNIL) * ((Uint64)8192) },
|
||||
|
||||
|
@ -598,10 +659,46 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
|
|||
ConfigInfo::USED,
|
||||
false,
|
||||
ConfigInfo::INT64,
|
||||
10 * 1024 * 8192,
|
||||
80 * (1024 * 1024),
|
||||
128 * 8192,
|
||||
((Uint64)MAX_INT_RNIL) * ((Uint64)8192) },
|
||||
|
||||
{
|
||||
CFG_DB_UNDO_INDEX_BUFFER,
|
||||
"UndoIndexBuffer",
|
||||
"DB",
|
||||
"Number bytes on each DB node allocated for storing data",
|
||||
ConfigInfo::USED,
|
||||
false,
|
||||
ConfigInfo::INT,
|
||||
2 * (1024 * 1024),
|
||||
1 * (1024 * 1024),
|
||||
MAX_INT_RNIL},
|
||||
|
||||
{
|
||||
CFG_DB_UNDO_DATA_BUFFER,
|
||||
"UndoDataBuffer",
|
||||
"DB",
|
||||
"Number bytes on each DB node allocated for storing data",
|
||||
ConfigInfo::USED,
|
||||
false,
|
||||
ConfigInfo::INT,
|
||||
16 * (1024 * 1024),
|
||||
1 * (1024 * 1024),
|
||||
MAX_INT_RNIL},
|
||||
|
||||
{
|
||||
CFG_DB_REDO_BUFFER,
|
||||
"RedoBuffer",
|
||||
"DB",
|
||||
"Number bytes on each DB node allocated for storing data",
|
||||
ConfigInfo::USED,
|
||||
false,
|
||||
ConfigInfo::INT,
|
||||
8 * (1024 * 1024),
|
||||
1 * (1024 * 1024),
|
||||
MAX_INT_RNIL},
|
||||
|
||||
{
|
||||
CFG_DB_START_PARTIAL_TIMEOUT,
|
||||
"StartPartialTimeout",
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <NdbEnv.h>
|
||||
#include <NdbConfig.h>
|
||||
#include <NdbAutoPtr.hpp>
|
||||
#include <NdbMem.h>
|
||||
|
||||
LocalConfig::LocalConfig(){
|
||||
error_line = 0; error_msg[0] = 0;
|
||||
|
@ -242,7 +243,7 @@ bool LocalConfig::readFile(const char * filename, bool &fopenError)
|
|||
}
|
||||
|
||||
int sz = 1024;
|
||||
char* theString = (char*)malloc(sz);
|
||||
char* theString = (char*)NdbMem_Allocate(sz);
|
||||
theString[0] = 0;
|
||||
|
||||
fgets(theString, sz, file);
|
||||
|
@ -250,7 +251,7 @@ bool LocalConfig::readFile(const char * filename, bool &fopenError)
|
|||
line[0] = ';';
|
||||
while (strlen(theString) + strlen(line) >= sz) {
|
||||
sz = sz*2;
|
||||
char *newString = (char*)malloc(sz);
|
||||
char *newString = (char*)NdbMem_Allocate(sz);
|
||||
strcpy(newString, theString);
|
||||
free(theString);
|
||||
theString = newString;
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include <ndb_global.h>
|
||||
#include <NdbConfig.h>
|
||||
#include <NdbEnv.h>
|
||||
#include <NdbMem.h>
|
||||
|
||||
static char*
|
||||
NdbConfig_AllocHomePath(int _len)
|
||||
|
@ -30,7 +31,7 @@ NdbConfig_AllocHomePath(int _len)
|
|||
path_len= strlen(path);
|
||||
|
||||
len+= path_len;
|
||||
buf= malloc(len);
|
||||
buf= NdbMem_Allocate(len);
|
||||
if (path_len > 0)
|
||||
snprintf(buf, len, "%s%c", path, DIR_SEPARATOR);
|
||||
else
|
||||
|
@ -48,7 +49,7 @@ NdbConfig_NdbCfgName(int with_ndb_home){
|
|||
buf= NdbConfig_AllocHomePath(128);
|
||||
len= strlen(buf);
|
||||
} else
|
||||
buf= malloc(128);
|
||||
buf= NdbMem_Allocate(128);
|
||||
snprintf(buf+len, 128, "Ndb.cfg");
|
||||
return buf;
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include <NdbCondition.h>
|
||||
#include <NdbThread.h>
|
||||
#include <NdbMutex.h>
|
||||
#include <NdbMem.h>
|
||||
|
||||
struct NdbCondition
|
||||
{
|
||||
|
@ -34,7 +35,7 @@ NdbCondition_Create(void)
|
|||
struct NdbCondition* tmpCond;
|
||||
int result;
|
||||
|
||||
tmpCond = (struct NdbCondition*)malloc(sizeof(struct NdbCondition));
|
||||
tmpCond = (struct NdbCondition*)NdbMem_Allocate(sizeof(struct NdbCondition));
|
||||
|
||||
if (tmpCond == NULL)
|
||||
return NULL;
|
||||
|
|
|
@ -31,10 +31,13 @@ void NdbMem_Destroy()
|
|||
return;
|
||||
}
|
||||
|
||||
|
||||
void* NdbMem_Allocate(size_t size)
|
||||
{
|
||||
void* mem_allocated;
|
||||
assert(size > 0);
|
||||
return (void*)malloc(size);
|
||||
mem_allocated= (void*)malloc(size);
|
||||
return mem_allocated;
|
||||
}
|
||||
|
||||
void* NdbMem_AllocateAlign(size_t size, size_t alignment)
|
||||
|
|
|
@ -19,13 +19,14 @@
|
|||
|
||||
#include <NdbThread.h>
|
||||
#include <NdbMutex.h>
|
||||
#include <NdbMem.h>
|
||||
|
||||
NdbMutex* NdbMutex_Create(void)
|
||||
{
|
||||
NdbMutex* pNdbMutex;
|
||||
int result;
|
||||
|
||||
pNdbMutex = (NdbMutex*)malloc(sizeof(NdbMutex));
|
||||
pNdbMutex = (NdbMutex*)NdbMem_Allocate(sizeof(NdbMutex));
|
||||
|
||||
if (pNdbMutex == NULL)
|
||||
return NULL;
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <ndb_global.h>
|
||||
#include <NdbThread.h>
|
||||
#include <pthread.h>
|
||||
#include <NdbMem.h>
|
||||
|
||||
#define MAX_THREAD_NAME 16
|
||||
|
||||
|
@ -44,7 +45,7 @@ struct NdbThread* NdbThread_Create(NDB_THREAD_FUNC *p_thread_func,
|
|||
if (p_thread_func == NULL)
|
||||
return 0;
|
||||
|
||||
tmpThread = (struct NdbThread*)malloc(sizeof(struct NdbThread));
|
||||
tmpThread = (struct NdbThread*)NdbMem_Allocate(sizeof(struct NdbThread));
|
||||
if (tmpThread == NULL)
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
|
||||
#include <ndb_global.h>
|
||||
#include <NdbMem.h>
|
||||
|
||||
extern "C" {
|
||||
void (* ndb_new_handler)() = 0;
|
||||
|
@ -9,7 +10,7 @@ extern "C" {
|
|||
|
||||
void *operator new (size_t sz)
|
||||
{
|
||||
void * p = malloc (sz ? sz : 1);
|
||||
void * p = NdbMem_Allocate(sz ? sz : 1);
|
||||
if(p)
|
||||
return p;
|
||||
if(ndb_new_handler)
|
||||
|
@ -19,7 +20,7 @@ void *operator new (size_t sz)
|
|||
|
||||
void *operator new[] (size_t sz)
|
||||
{
|
||||
void * p = (void *) malloc (sz ? sz : 1);
|
||||
void * p = (void *) NdbMem_Allocate(sz ? sz : 1);
|
||||
if(p)
|
||||
return p;
|
||||
if(ndb_new_handler)
|
||||
|
@ -30,13 +31,13 @@ void *operator new[] (size_t sz)
|
|||
void operator delete (void *ptr)
|
||||
{
|
||||
if (ptr)
|
||||
free(ptr);
|
||||
NdbMem_Free(ptr);
|
||||
}
|
||||
|
||||
void operator delete[] (void *ptr) throw ()
|
||||
{
|
||||
if (ptr)
|
||||
free(ptr);
|
||||
NdbMem_Free(ptr);
|
||||
}
|
||||
|
||||
#endif // USE_MYSYS_NEW
|
||||
|
|
|
@ -194,7 +194,6 @@ ndbout << "Ptr: " << ptr.p->word32 << " \tIndex: " << tmp_string << " \tValue: "
|
|||
#define ZTABLESIZE 16
|
||||
#define ZTABMAXINDEX 3
|
||||
#define ZUNDEFINED_OP 6
|
||||
#define ZUNDOPAGESIZE 64
|
||||
#define ZUNDOHEADSIZE 7
|
||||
#define ZUNLOCKED 1
|
||||
#define ZUNDOPAGE_BASE_ADD 2
|
||||
|
@ -894,8 +893,8 @@ struct SrVersionRec {
|
|||
/* TABREC */
|
||||
/* --------------------------------------------------------------------------------- */
|
||||
struct Tabrec {
|
||||
Uint32 fragholder[NO_OF_FRAG_PER_NODE];
|
||||
Uint32 fragptrholder[NO_OF_FRAG_PER_NODE];
|
||||
Uint32 fragholder[MAX_FRAG_PER_NODE];
|
||||
Uint32 fragptrholder[MAX_FRAG_PER_NODE];
|
||||
Uint32 tabUserPtr;
|
||||
BlockReference tabUserRef;
|
||||
};
|
||||
|
|
|
@ -32,7 +32,6 @@ void Dbacc::initData()
|
|||
crootfragmentsize = ZROOTFRAGMENTSIZE;
|
||||
cdirrangesize = ZDIRRANGESIZE;
|
||||
coverflowrecsize = ZOVERFLOWRECSIZE;
|
||||
cundopagesize = ZUNDOPAGESIZE;
|
||||
cfsConnectsize = ZFS_CONNECTSIZE;
|
||||
cfsOpsize = ZFS_OPSIZE;
|
||||
cscanRecSize = ZSCAN_REC_SIZE;
|
||||
|
@ -136,8 +135,26 @@ void Dbacc::initRecords()
|
|||
Dbacc::Dbacc(const class Configuration & conf):
|
||||
SimulatedBlock(DBACC, conf)
|
||||
{
|
||||
Uint32 log_page_size= 0;
|
||||
BLOCK_CONSTRUCTOR(Dbacc);
|
||||
|
||||
const ndb_mgm_configuration_iterator * p = conf.getOwnConfigIterator();
|
||||
ndbrequire(p != 0);
|
||||
|
||||
ndb_mgm_get_int_parameter(p, CFG_DB_UNDO_INDEX_BUFFER,
|
||||
&log_page_size);
|
||||
|
||||
/**
|
||||
* Always set page size in half MBytes
|
||||
*/
|
||||
cundopagesize= (log_page_size / sizeof(Undopage));
|
||||
Uint32 mega_byte_part= cundopagesize & 15;
|
||||
if (mega_byte_part != 0) {
|
||||
jam();
|
||||
cundopagesize+= (16 - mega_byte_part);
|
||||
}
|
||||
ndbout << "ACC: No of Undo Pages = " << cundopagesize << endl;
|
||||
|
||||
// Transit signals
|
||||
addRecSignal(GSN_DUMP_STATE_ORD, &Dbacc::execDUMP_STATE_ORD);
|
||||
addRecSignal(GSN_DEBUG_SIG, &Dbacc::execDEBUG_SIG);
|
||||
|
|
|
@ -1021,7 +1021,7 @@ void Dbacc::initialiseTableRec(Signal* signal)
|
|||
for (tabptr.i = 0; tabptr.i < ctablesize; tabptr.i++) {
|
||||
refresh_watch_dog();
|
||||
ptrAss(tabptr, tabrec);
|
||||
for (Uint32 i = 0; i < NO_OF_FRAG_PER_NODE; i++) {
|
||||
for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
|
||||
tabptr.p->fragholder[i] = RNIL;
|
||||
tabptr.p->fragptrholder[i] = RNIL;
|
||||
}//for
|
||||
|
@ -1187,7 +1187,7 @@ void Dbacc::releaseRootFragResources(Signal* signal, Uint32 tableId)
|
|||
TabrecPtr tabPtr;
|
||||
tabPtr.i = tableId;
|
||||
ptrCheckGuard(tabPtr, ctablesize, tabrec);
|
||||
for (Uint32 i = 0; i < NO_OF_FRAG_PER_NODE; i++) {
|
||||
for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
|
||||
jam();
|
||||
if (tabPtr.p->fragholder[i] != RNIL) {
|
||||
jam();
|
||||
|
@ -1419,7 +1419,7 @@ void Dbacc::execFSREMOVEREF(Signal* signal)
|
|||
/* -------------------------------------------------------------------------- */
|
||||
bool Dbacc::addfragtotab(Signal* signal, Uint32 rootIndex, Uint32 fid)
|
||||
{
|
||||
for (Uint32 i = 0; i < NO_OF_FRAG_PER_NODE; i++) {
|
||||
for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
|
||||
jam();
|
||||
if (tabptr.p->fragholder[i] == RNIL) {
|
||||
jam();
|
||||
|
@ -2435,7 +2435,7 @@ void Dbacc::execACC_LOCKREQ(Signal* signal)
|
|||
ptrCheckGuard(tabptr, ctablesize, tabrec);
|
||||
// find fragment (TUX will know it)
|
||||
if (req->fragPtrI == RNIL) {
|
||||
for (Uint32 i = 0; i < NO_OF_FRAG_PER_NODE; i++) {
|
||||
for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
|
||||
jam();
|
||||
if (tabptr.p->fragptrholder[i] != RNIL) {
|
||||
rootfragrecptr.i = tabptr.p->fragptrholder[i];
|
||||
|
@ -12184,7 +12184,7 @@ void Dbacc::takeOutReadyScanQueue(Signal* signal)
|
|||
|
||||
bool Dbacc::getrootfragmentrec(Signal* signal, RootfragmentrecPtr& rootPtr, Uint32 fid)
|
||||
{
|
||||
for (Uint32 i = 0; i < NO_OF_FRAG_PER_NODE; i++) {
|
||||
for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
|
||||
jam();
|
||||
if (tabptr.p->fragholder[i] == fid) {
|
||||
jam();
|
||||
|
|
|
@ -64,25 +64,12 @@
|
|||
/* CONSTANTS OF THE LOG PAGES */
|
||||
/* ------------------------------------------------------------------------- */
|
||||
#define ZPAGE_HEADER_SIZE 32
|
||||
#if defined NDB_OSE
|
||||
/**
|
||||
* Set the fragment log file size to 2Mb in OSE
|
||||
* This is done in order to speed up the initial start
|
||||
*/
|
||||
#define ZNO_MBYTES_IN_FILE 2
|
||||
#define ZPAGE_SIZE 2048
|
||||
#define ZPAGES_IN_MBYTE 128
|
||||
#define ZTWOLOG_NO_PAGES_IN_MBYTE 7
|
||||
#define ZTWOLOG_PAGE_SIZE 11
|
||||
#define ZMAX_MM_BUFFER_SIZE 32 // Main memory window during log execution
|
||||
#else
|
||||
#define ZNO_MBYTES_IN_FILE 16
|
||||
#define ZPAGE_SIZE 8192
|
||||
#define ZPAGES_IN_MBYTE 32
|
||||
#define ZTWOLOG_NO_PAGES_IN_MBYTE 5
|
||||
#define ZTWOLOG_PAGE_SIZE 13
|
||||
#define ZMAX_MM_BUFFER_SIZE 32 // Main memory window during log execution
|
||||
#endif
|
||||
|
||||
#define ZMAX_PAGES_WRITTEN 8 // Max pages before writing to disk (=> config)
|
||||
#define ZMIN_READ_BUFFER_SIZE 2 // Minimum number of pages to execute log
|
||||
|
@ -1829,11 +1816,7 @@ public:
|
|||
* - There is no more information needed.
|
||||
* The next mbyte will always refer to the start of the next mbyte.
|
||||
*/
|
||||
#ifdef NDB_OSE
|
||||
UintR logPageWord[2048]; // Size 8 kbytes
|
||||
#else
|
||||
UintR logPageWord[8192]; // Size 32 kbytes
|
||||
#endif
|
||||
};
|
||||
typedef Ptr<LogPageRecord> LogPageRecordPtr;
|
||||
|
||||
|
@ -1855,8 +1838,8 @@ public:
|
|||
PREP_DROP_TABLE_DONE = 4
|
||||
};
|
||||
|
||||
UintR fragrec[NO_OF_FRAG_PER_NODE];
|
||||
Uint16 fragid[NO_OF_FRAG_PER_NODE];
|
||||
UintR fragrec[MAX_FRAG_PER_NODE];
|
||||
Uint16 fragid[MAX_FRAG_PER_NODE];
|
||||
/**
|
||||
* Status of the table
|
||||
*/
|
||||
|
@ -2643,7 +2626,6 @@ private:
|
|||
UintR cfirstfreeLfo;
|
||||
UintR clfoFileSize;
|
||||
|
||||
#define ZLOG_PAGE_FILE_SIZE 256 // 8 MByte
|
||||
LogPageRecord *logPageRecord;
|
||||
LogPageRecordPtr logPagePtr;
|
||||
UintR cfirstfreeLogPage;
|
||||
|
|
|
@ -33,7 +33,6 @@ void Dblqh::initData()
|
|||
chostFileSize = MAX_NDB_NODES;
|
||||
clcpFileSize = ZNO_CONCURRENT_LCP;
|
||||
clcpLocrecFileSize = ZLCP_LOCREC_FILE_SIZE;
|
||||
clogPageFileSize = ZLOG_PAGE_FILE_SIZE;
|
||||
clfoFileSize = ZLFO_FILE_SIZE;
|
||||
clogFileFileSize = 0;
|
||||
clogPartFileSize = ZLOG_PART_FILE_SIZE;
|
||||
|
@ -176,8 +175,26 @@ Dblqh::Dblqh(const class Configuration & conf):
|
|||
m_commitAckMarkerHash(m_commitAckMarkerPool),
|
||||
c_scanTakeOverHash(c_scanRecordPool)
|
||||
{
|
||||
Uint32 log_page_size= 0;
|
||||
BLOCK_CONSTRUCTOR(Dblqh);
|
||||
|
||||
const ndb_mgm_configuration_iterator * p = conf.getOwnConfigIterator();
|
||||
ndbrequire(p != 0);
|
||||
|
||||
ndb_mgm_get_int_parameter(p, CFG_DB_REDO_BUFFER,
|
||||
&log_page_size);
|
||||
|
||||
/**
|
||||
* Always set page size in half MBytes
|
||||
*/
|
||||
clogPageFileSize= (log_page_size / sizeof(LogPageRecord));
|
||||
Uint32 mega_byte_part= clogPageFileSize & 15;
|
||||
if (mega_byte_part != 0) {
|
||||
jam();
|
||||
clogPageFileSize+= (16 - mega_byte_part);
|
||||
}
|
||||
ndbout << "LQH: No of REDO pages = " << clogPageFileSize << endl;
|
||||
|
||||
addRecSignal(GSN_PACKED_SIGNAL, &Dblqh::execPACKED_SIGNAL);
|
||||
addRecSignal(GSN_DEBUG_SIG, &Dblqh::execDEBUG_SIG);
|
||||
addRecSignal(GSN_ATTRINFO, &Dblqh::execATTRINFO);
|
||||
|
|
|
@ -991,7 +991,7 @@ void Dblqh::execLQHFRAGREQ(Signal* signal)
|
|||
ptrCheckGuard(tTablePtr, ctabrecFileSize, tablerec);
|
||||
FragrecordPtr tFragPtr;
|
||||
tFragPtr.i = RNIL;
|
||||
for (Uint32 i = 0; i < NO_OF_FRAG_PER_NODE; i++) {
|
||||
for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
|
||||
if (tTablePtr.p->fragid[i] == fragptr.p->fragId) {
|
||||
jam();
|
||||
tFragPtr.i = tTablePtr.p->fragrec[i];
|
||||
|
@ -1916,7 +1916,7 @@ void Dblqh::removeTable(Uint32 tableId)
|
|||
tabptr.i = tableId;
|
||||
ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
|
||||
|
||||
for (Uint32 i = (NO_OF_FRAG_PER_NODE - 1); (Uint32)~i; i--) {
|
||||
for (Uint32 i = (MAX_FRAG_PER_NODE - 1); (Uint32)~i; i--) {
|
||||
jam();
|
||||
if (tabptr.p->fragid[i] != ZNIL) {
|
||||
jam();
|
||||
|
@ -15864,7 +15864,7 @@ void Dblqh::deleteFragrec(Uint32 fragId)
|
|||
{
|
||||
Uint32 indexFound= RNIL;
|
||||
fragptr.i = RNIL;
|
||||
for (Uint32 i = (NO_OF_FRAG_PER_NODE - 1); (Uint32)~i; i--) {
|
||||
for (Uint32 i = (MAX_FRAG_PER_NODE - 1); (Uint32)~i; i--) {
|
||||
jam();
|
||||
if (tabptr.p->fragid[i] == fragId) {
|
||||
fragptr.i = tabptr.p->fragrec[i];
|
||||
|
@ -15972,7 +15972,7 @@ void Dblqh::getFirstInLogQueue(Signal* signal)
|
|||
/* ---------------------------------------------------------------- */
|
||||
bool Dblqh::getFragmentrec(Signal* signal, Uint32 fragId)
|
||||
{
|
||||
for (Uint32 i = (NO_OF_FRAG_PER_NODE - 1); (UintR)~i; i--) {
|
||||
for (Uint32 i = (MAX_FRAG_PER_NODE - 1); (UintR)~i; i--) {
|
||||
jam();
|
||||
if (tabptr.p->fragid[i] == fragId) {
|
||||
fragptr.i = tabptr.p->fragrec[i];
|
||||
|
@ -16394,7 +16394,7 @@ void Dblqh::initialiseTabrec(Signal* signal)
|
|||
ptrAss(tabptr, tablerec);
|
||||
tabptr.p->tableStatus = Tablerec::NOT_DEFINED;
|
||||
tabptr.p->usageCount = 0;
|
||||
for (Uint32 i = 0; i <= (NO_OF_FRAG_PER_NODE - 1); i++) {
|
||||
for (Uint32 i = 0; i <= (MAX_FRAG_PER_NODE - 1); i++) {
|
||||
tabptr.p->fragid[i] = ZNIL;
|
||||
tabptr.p->fragrec[i] = RNIL;
|
||||
}//for
|
||||
|
@ -16716,7 +16716,7 @@ bool Dblqh::insertFragrec(Signal* signal, Uint32 fragId)
|
|||
return false;
|
||||
}//if
|
||||
seizeFragmentrec(signal);
|
||||
for (Uint32 i = (NO_OF_FRAG_PER_NODE - 1); (Uint32)~i; i--) {
|
||||
for (Uint32 i = (MAX_FRAG_PER_NODE - 1); (Uint32)~i; i--) {
|
||||
jam();
|
||||
if (tabptr.p->fragid[i] == ZNIL) {
|
||||
jam();
|
||||
|
|
|
@ -542,13 +542,11 @@ public:
|
|||
attrInfo(abp),
|
||||
expectedTransIdAI(0),
|
||||
transIdAI(abp),
|
||||
tcIndxReq(new TcIndxReq()),
|
||||
indexReadTcConnect(RNIL)
|
||||
{}
|
||||
|
||||
~TcIndexOperation()
|
||||
{
|
||||
delete tcIndxReq;
|
||||
}
|
||||
|
||||
// Index data
|
||||
|
@ -561,7 +559,7 @@ public:
|
|||
Uint32 expectedTransIdAI;
|
||||
AttributeBuffer transIdAI; // For accumulating TransId_AI
|
||||
|
||||
TcIndxReq* tcIndxReq;
|
||||
TcIndxReq tcIndxReq;
|
||||
UintR connectionIndex;
|
||||
UintR indexReadTcConnect; //
|
||||
|
||||
|
|
|
@ -73,6 +73,7 @@ void Dbtc::initData()
|
|||
|
||||
void Dbtc::initRecords()
|
||||
{
|
||||
void *p;
|
||||
// Records with dynamic sizes
|
||||
cacheRecord = (CacheRecord*)allocRecord("CacheRecord",
|
||||
sizeof(CacheRecord),
|
||||
|
@ -83,7 +84,7 @@ void Dbtc::initRecords()
|
|||
capiConnectFilesize);
|
||||
|
||||
for(unsigned i = 0; i<capiConnectFilesize; i++) {
|
||||
void * p = &apiConnectRecord[i];
|
||||
p = &apiConnectRecord[i];
|
||||
new (p) ApiConnectRecord(c_theFiredTriggerPool,
|
||||
c_theSeizedIndexOperationPool);
|
||||
}
|
||||
|
@ -91,7 +92,8 @@ void Dbtc::initRecords()
|
|||
DLFifoList<TcFiredTriggerData> triggers(c_theFiredTriggerPool);
|
||||
FiredTriggerPtr tptr;
|
||||
while(triggers.seize(tptr) == true) {
|
||||
new (tptr.p) TcFiredTriggerData();
|
||||
p= tptr.p;
|
||||
new (p) TcFiredTriggerData();
|
||||
}
|
||||
triggers.release();
|
||||
|
||||
|
@ -109,7 +111,8 @@ void Dbtc::initRecords()
|
|||
ArrayList<TcIndexOperation> indexOps(c_theIndexOperationPool);
|
||||
TcIndexOperationPtr ioptr;
|
||||
while(indexOps.seize(ioptr) == true) {
|
||||
new (ioptr.p) TcIndexOperation(c_theAttributeBufferPool);
|
||||
p= ioptr.p;
|
||||
new (p) TcIndexOperation(c_theAttributeBufferPool);
|
||||
}
|
||||
indexOps.release();
|
||||
|
||||
|
@ -179,7 +182,6 @@ Dbtc::Dbtc(const class Configuration & conf):
|
|||
c_maxNumberOfIndexOperations(0),
|
||||
m_commitAckMarkerHash(m_commitAckMarkerPool)
|
||||
{
|
||||
|
||||
BLOCK_CONSTRUCTOR(Dbtc);
|
||||
|
||||
const ndb_mgm_configuration_iterator * p = conf.getOwnConfigIterator();
|
||||
|
@ -191,7 +193,7 @@ Dbtc::Dbtc(const class Configuration & conf):
|
|||
|
||||
ndb_mgm_get_int_parameter(p, CFG_DB_TRANS_BUFFER_MEM,
|
||||
&transactionBufferMemory);
|
||||
ndb_mgm_get_int_parameter(p, CFG_DB_NO_INDEXES,
|
||||
ndb_mgm_get_int_parameter(p, CFG_DB_NO_UNIQUE_HASH_INDEXES,
|
||||
&maxNoOfIndexes);
|
||||
ndb_mgm_get_int_parameter(p, CFG_DB_NO_INDEX_OPS,
|
||||
&maxNoOfConcurrentIndexOperations);
|
||||
|
|
|
@ -11033,7 +11033,7 @@ void Dbtc::execTCINDXREQ(Signal* signal)
|
|||
indexOp->indexOpId = indexOpPtr.i;
|
||||
|
||||
// Save original signal
|
||||
*indexOp->tcIndxReq = *tcIndxReq;
|
||||
indexOp->tcIndxReq = *tcIndxReq;
|
||||
indexOp->connectionIndex = TapiIndex;
|
||||
regApiPtr->accumulatingIndexOp = indexOp->indexOpId;
|
||||
|
||||
|
@ -11342,7 +11342,7 @@ void Dbtc::execTCKEYCONF(Signal* signal)
|
|||
// Should never happen, abort
|
||||
TcIndxRef * const tcIndxRef = (TcIndxRef *)signal->getDataPtrSend();
|
||||
|
||||
tcIndxRef->connectPtr = indexOp->tcIndxReq->senderData;
|
||||
tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
|
||||
tcIndxRef->transId[0] = regApiPtr->transid[0];
|
||||
tcIndxRef->transId[1] = regApiPtr->transid[1];
|
||||
tcIndxRef->errorCode = 4349;
|
||||
|
@ -11361,7 +11361,7 @@ void Dbtc::execTCKEYCONF(Signal* signal)
|
|||
// Double TCKEYCONF, should never happen, abort
|
||||
TcIndxRef * const tcIndxRef = (TcIndxRef *)signal->getDataPtrSend();
|
||||
|
||||
tcIndxRef->connectPtr = indexOp->tcIndxReq->senderData;
|
||||
tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
|
||||
tcIndxRef->transId[0] = regApiPtr->transid[0];
|
||||
tcIndxRef->transId[1] = regApiPtr->transid[1];
|
||||
tcIndxRef->errorCode = 4349;
|
||||
|
@ -11382,7 +11382,7 @@ void Dbtc::execTCKEYCONF(Signal* signal)
|
|||
// Copy reply from TcKeyConf
|
||||
|
||||
regApiPtr->noIndexOp--; // Decrease count
|
||||
regApiPtr->tcIndxSendArray[Ttcindxrec] = indexOp->tcIndxReq->senderData;
|
||||
regApiPtr->tcIndxSendArray[Ttcindxrec] = indexOp->tcIndxReq.senderData;
|
||||
regApiPtr->tcIndxSendArray[Ttcindxrec + 1] =
|
||||
tcKeyConf->operations[0].attrInfoLen;
|
||||
regApiPtr->tcindxrec = Ttcindxrec + 2;
|
||||
|
@ -11415,7 +11415,7 @@ void Dbtc::execTCKEYREF(Signal* signal)
|
|||
}
|
||||
const UintR TconnectIndex = indexOp->connectionIndex;
|
||||
ApiConnectRecord * const regApiPtr = &apiConnectRecord[TconnectIndex];
|
||||
Uint32 tcKeyRequestInfo = indexOp->tcIndxReq->requestInfo;
|
||||
Uint32 tcKeyRequestInfo = indexOp->tcIndxReq.requestInfo;
|
||||
Uint32 commitFlg = TcKeyReq::getCommitFlag(tcKeyRequestInfo);
|
||||
|
||||
switch(indexOp->indexOpState) {
|
||||
|
@ -11445,7 +11445,7 @@ void Dbtc::execTCKEYREF(Signal* signal)
|
|||
// Send TCINDXREF
|
||||
|
||||
jam();
|
||||
TcIndxReq * const tcIndxReq = indexOp->tcIndxReq;
|
||||
TcIndxReq * const tcIndxReq = &indexOp->tcIndxReq;
|
||||
TcIndxRef * const tcIndxRef = (TcIndxRef *)signal->getDataPtrSend();
|
||||
|
||||
regApiPtr->noIndexOp--; // Decrease count
|
||||
|
@ -11523,7 +11523,7 @@ void Dbtc::execTRANSID_AI(Signal* signal)
|
|||
// Failed to allocate space for TransIdAI
|
||||
TcIndxRef * const tcIndxRef = (TcIndxRef *)signal->getDataPtrSend();
|
||||
|
||||
tcIndxRef->connectPtr = indexOp->tcIndxReq->senderData;
|
||||
tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
|
||||
tcIndxRef->transId[0] = regApiPtr->transid[0];
|
||||
tcIndxRef->transId[1] = regApiPtr->transid[1];
|
||||
tcIndxRef->errorCode = 4000;
|
||||
|
@ -11538,7 +11538,7 @@ void Dbtc::execTRANSID_AI(Signal* signal)
|
|||
// Should never happen, abort
|
||||
TcIndxRef * const tcIndxRef = (TcIndxRef *)signal->getDataPtrSend();
|
||||
|
||||
tcIndxRef->connectPtr = indexOp->tcIndxReq->senderData;
|
||||
tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
|
||||
tcIndxRef->transId[0] = regApiPtr->transid[0];
|
||||
tcIndxRef->transId[1] = regApiPtr->transid[1];
|
||||
tcIndxRef->errorCode = 4349;
|
||||
|
@ -11566,7 +11566,7 @@ void Dbtc::execTRANSID_AI(Signal* signal)
|
|||
// Too many TRANSID_AI
|
||||
TcIndxRef * const tcIndxRef = (TcIndxRef *)signal->getDataPtrSend();
|
||||
|
||||
tcIndexRef->connectPtr = indexOp->tcIndxReq->senderData;
|
||||
tcIndexRef->connectPtr = indexOp->tcIndxReq.senderData;
|
||||
tcIndxRef->transId[0] = regApiPtr->transid[0];
|
||||
tcIndxRef->transId[1] = regApiPtr->transid[1];
|
||||
tcIndxRef->errorCode = 4349;
|
||||
|
@ -11591,7 +11591,7 @@ void Dbtc::execTRANSID_AI(Signal* signal)
|
|||
jam();
|
||||
TcIndxRef * const tcIndxRef = (TcIndxRef *)signal->getDataPtrSend();
|
||||
|
||||
tcIndxRef->connectPtr = indexOp->tcIndxReq->senderData;
|
||||
tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
|
||||
tcIndxRef->transId[0] = regApiPtr->transid[0];
|
||||
tcIndxRef->transId[1] = regApiPtr->transid[1];
|
||||
tcIndxRef->errorCode = 4349;
|
||||
|
@ -11611,7 +11611,7 @@ void Dbtc::execTCROLLBACKREP(Signal* signal)
|
|||
TcIndexOperation* indexOp = c_theIndexOperations.getPtr(indexOpPtr.i);
|
||||
indexOpPtr.p = indexOp;
|
||||
tcRollbackRep = (TcRollbackRep *)signal->getDataPtrSend();
|
||||
tcRollbackRep->connectPtr = indexOp->tcIndxReq->senderData;
|
||||
tcRollbackRep->connectPtr = indexOp->tcIndxReq.senderData;
|
||||
sendSignal(apiConnectptr.p->ndbapiBlockref,
|
||||
GSN_TCROLLBACKREP, signal, TcRollbackRep::SignalLength, JBB);
|
||||
}
|
||||
|
@ -11628,23 +11628,23 @@ void Dbtc::readIndexTable(Signal* signal,
|
|||
TcKeyReq * const tcKeyReq = (TcKeyReq *)signal->getDataPtrSend();
|
||||
Uint32 * dataPtr = &tcKeyReq->scanInfo;
|
||||
Uint32 tcKeyLength = TcKeyReq::StaticLength;
|
||||
Uint32 tcKeyRequestInfo = indexOp->tcIndxReq->requestInfo;
|
||||
Uint32 tcKeyRequestInfo = indexOp->tcIndxReq.requestInfo;
|
||||
AttributeBuffer::DataBufferIterator keyIter;
|
||||
Uint32 keyLength = TcKeyReq::getKeyLength(tcKeyRequestInfo);
|
||||
TcIndexData* indexData;
|
||||
Uint32 transId1 = indexOp->tcIndxReq->transId1;
|
||||
Uint32 transId2 = indexOp->tcIndxReq->transId2;
|
||||
Uint32 transId1 = indexOp->tcIndxReq.transId1;
|
||||
Uint32 transId2 = indexOp->tcIndxReq.transId2;
|
||||
|
||||
const Operation_t opType =
|
||||
(Operation_t)TcKeyReq::getOperationType(tcKeyRequestInfo);
|
||||
|
||||
// Find index table
|
||||
if ((indexData = c_theIndexes.getPtr(indexOp->tcIndxReq->indexId)) == NULL) {
|
||||
if ((indexData = c_theIndexes.getPtr(indexOp->tcIndxReq.indexId)) == NULL) {
|
||||
jam();
|
||||
// Failed to find index record
|
||||
TcIndxRef * const tcIndxRef = (TcIndxRef *)signal->getDataPtrSend();
|
||||
|
||||
tcIndxRef->connectPtr = indexOp->tcIndxReq->senderData;
|
||||
tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
|
||||
tcIndxRef->transId[0] = regApiPtr->transid[0];
|
||||
tcIndxRef->transId[1] = regApiPtr->transid[1];
|
||||
tcIndxRef->errorCode = 4000;
|
||||
|
@ -11656,7 +11656,7 @@ void Dbtc::readIndexTable(Signal* signal,
|
|||
tcKeyReq->transId2 = transId2;
|
||||
tcKeyReq->tableId = indexData->indexId;
|
||||
tcKeyLength += MIN(keyLength, keyBufSize);
|
||||
tcKeyReq->tableSchemaVersion = indexOp->tcIndxReq->indexSchemaVersion;
|
||||
tcKeyReq->tableSchemaVersion = indexOp->tcIndxReq.indexSchemaVersion;
|
||||
TcKeyReq::setOperationType(tcKeyRequestInfo,
|
||||
opType == ZREAD ? opType : ZREAD_EX);
|
||||
TcKeyReq::setAIInTcKeyReq(tcKeyRequestInfo, 1); // Allways send one AttrInfo
|
||||
|
@ -11705,7 +11705,7 @@ void Dbtc::readIndexTable(Signal* signal,
|
|||
// Send KEYINFO sequence
|
||||
KeyInfo * const keyInfo = (KeyInfo *)signal->getDataPtrSend();
|
||||
|
||||
keyInfo->connectPtr = indexOp->tcIndxReq->apiConnectPtr;
|
||||
keyInfo->connectPtr = indexOp->tcIndxReq.apiConnectPtr;
|
||||
keyInfo->transId[0] = transId1;
|
||||
keyInfo->transId[1] = transId2;
|
||||
dataPtr = (Uint32 *) &keyInfo->keyData;
|
||||
|
@ -11745,7 +11745,7 @@ void Dbtc::executeIndexOperation(Signal* signal,
|
|||
Uint32 keyBufSize = 8; // Maximum for key in TCKEYREQ
|
||||
Uint32 attrBufSize = 5;
|
||||
Uint32 dataPos = 0;
|
||||
TcIndxReq * const tcIndxReq = indexOp->tcIndxReq;
|
||||
TcIndxReq * const tcIndxReq = &indexOp->tcIndxReq;
|
||||
TcKeyReq * const tcKeyReq = (TcKeyReq *)signal->getDataPtrSend();
|
||||
Uint32 * dataPtr = &tcKeyReq->scanInfo;
|
||||
Uint32 tcKeyLength = TcKeyReq::StaticLength;
|
||||
|
@ -11761,7 +11761,7 @@ void Dbtc::executeIndexOperation(Signal* signal,
|
|||
// Failed to find index record
|
||||
TcIndxRef * const tcIndxRef = (TcIndxRef *)signal->getDataPtrSend();
|
||||
|
||||
tcIndxRef->connectPtr = indexOp->tcIndxReq->senderData;
|
||||
tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
|
||||
tcIndxRef->transId[0] = regApiPtr->transid[0];
|
||||
tcIndxRef->transId[1] = regApiPtr->transid[1];
|
||||
tcIndxRef->errorCode = 4349;
|
||||
|
@ -11861,7 +11861,7 @@ void Dbtc::executeIndexOperation(Signal* signal,
|
|||
// Send KEYINFO sequence
|
||||
KeyInfo * const keyInfo = (KeyInfo *)signal->getDataPtrSend();
|
||||
|
||||
keyInfo->connectPtr = indexOp->tcIndxReq->apiConnectPtr;
|
||||
keyInfo->connectPtr = indexOp->tcIndxReq.apiConnectPtr;
|
||||
keyInfo->transId[0] = regApiPtr->transid[0];
|
||||
keyInfo->transId[1] = regApiPtr->transid[1];
|
||||
dataPtr = (Uint32 *) &keyInfo->keyData;
|
||||
|
@ -11897,7 +11897,7 @@ void Dbtc::executeIndexOperation(Signal* signal,
|
|||
AttrInfo * const attrInfo = (AttrInfo *)signal->getDataPtrSend();
|
||||
Uint32 attrInfoPos = 0;
|
||||
|
||||
attrInfo->connectPtr = indexOp->tcIndxReq->apiConnectPtr;
|
||||
attrInfo->connectPtr = indexOp->tcIndxReq.apiConnectPtr;
|
||||
attrInfo->transId[0] = regApiPtr->transid[0];
|
||||
attrInfo->transId[1] = regApiPtr->transid[1];
|
||||
dataPtr = (Uint32 *) &attrInfo->attrData;
|
||||
|
|
|
@ -85,21 +85,12 @@ typedef bool (Dbtup::* UpdateFunction)(Uint32*,
|
|||
#define ZNO_OF_CONCURRENT_OPEN_OP 40 /* NUMBER OF CONCURRENT OPENS */
|
||||
#define ZNO_OF_CONCURRENT_WRITE_OP 80 /* NUMBER OF CONCURRENT DISK WRITES*/
|
||||
#define ZNO_OF_FRAGOPREC 20 /* NUMBER OF CONCURRENT ADD FRAG. */
|
||||
#define ZNO_OF_FRAGREC 64 /* SIZE OF FRAGMENT FILE. */
|
||||
#define ZNO_OF_LCP_REC 10 /* NUMBER OF CONCURRENT CHECKPOINTS*/
|
||||
#define ZNO_OF_OPREC 116 /* SIZE OF OPERATION RECORD FILE */
|
||||
#define TOT_PAGE_RECORD_SPACE 262144 /* SIZE OF PAGE RECORD FILE. */
|
||||
#define ZNO_OF_PAGE TOT_PAGE_RECORD_SPACE/ZWORDS_ON_PAGE
|
||||
#define ZNO_OF_PAGE_RANGE_REC 128 /* SIZE OF PAGE RANGE FILE */
|
||||
#define ZNO_OF_PARALLELL_UNDO_FILES 16 /* NUMBER OF PARALLEL UNDO FILES */
|
||||
#define ZNO_OF_RESTART_INFO_REC 10 /* MAXIMUM PARALLELL RESTART INFOS */
|
||||
#define ZNO_OF_TAB_DESCR_REC 484 /* SIZE OF TABLE DESCRIPTOR FILE */
|
||||
#define ZNO_OF_TABLEREC 16 /* SIZE OF TABLE RECORD FILE. */
|
||||
#ifdef NDB_OSE
|
||||
#define ZNO_OF_UNDO_PAGE 80 // Must be multiple of 8
|
||||
#else
|
||||
#define ZNO_OF_UNDO_PAGE 500 // Must be multiple of 8
|
||||
#endif
|
||||
/* 24 SEGMENTS WITH 8 PAGES IN EACH*/
|
||||
/* PLUS ONE UNDO BUFFER CACHE */
|
||||
// Undo record identifiers are 32-bits with page index 13-bits
|
||||
|
@ -823,8 +814,8 @@ struct Tablerec {
|
|||
// List of ordered indexes
|
||||
ArrayList<TupTriggerData> tuxCustomTriggers;
|
||||
|
||||
Uint32 fragid[2 * NO_OF_FRAG_PER_NODE];
|
||||
Uint32 fragrec[2 * NO_OF_FRAG_PER_NODE];
|
||||
Uint32 fragid[2 * MAX_FRAG_PER_NODE];
|
||||
Uint32 fragrec[2 * MAX_FRAG_PER_NODE];
|
||||
|
||||
struct {
|
||||
Uint32 tabUserPtr;
|
||||
|
|
|
@ -44,16 +44,10 @@ void Dbtup::initData()
|
|||
cnoOfLcpRec = ZNO_OF_LCP_REC;
|
||||
cnoOfConcurrentOpenOp = ZNO_OF_CONCURRENT_OPEN_OP;
|
||||
cnoOfConcurrentWriteOp = ZNO_OF_CONCURRENT_WRITE_OP;
|
||||
cnoOfFragoprec = 2 * NO_OF_FRAG_PER_NODE;
|
||||
cnoOfFragrec = ZNO_OF_FRAGREC;
|
||||
cnoOfOprec = ZNO_OF_OPREC;
|
||||
cnoOfPage = ZNO_OF_PAGE;
|
||||
cnoOfFragoprec = 2 * MAX_FRAG_PER_NODE;
|
||||
cnoOfPageRangeRec = ZNO_OF_PAGE_RANGE_REC;
|
||||
cnoOfParallellUndoFiles = ZNO_OF_PARALLELL_UNDO_FILES;
|
||||
cnoOfRestartInfoRec = ZNO_OF_RESTART_INFO_REC;
|
||||
cnoOfTablerec = ZNO_OF_TABLEREC;
|
||||
cnoOfTabDescrRec = ZNO_OF_TAB_DESCR_REC;
|
||||
cnoOfUndoPage = ZNO_OF_UNDO_PAGE;
|
||||
c_maxTriggersPerTable = ZDEFAULT_MAX_NO_TRIGGERS_PER_TABLE;
|
||||
c_noOfBuildIndexRec = 32;
|
||||
|
||||
|
@ -83,9 +77,26 @@ Dbtup::Dbtup(const class Configuration & conf)
|
|||
c_storedProcPool(),
|
||||
c_buildIndexList(c_buildIndexPool)
|
||||
{
|
||||
|
||||
Uint32 log_page_size= 0;
|
||||
BLOCK_CONSTRUCTOR(Dbtup);
|
||||
|
||||
const ndb_mgm_configuration_iterator * p = conf.getOwnConfigIterator();
|
||||
ndbrequire(p != 0);
|
||||
|
||||
ndb_mgm_get_int_parameter(p, CFG_DB_UNDO_DATA_BUFFER,
|
||||
&log_page_size);
|
||||
|
||||
/**
|
||||
* Always set page size in half MBytes
|
||||
*/
|
||||
cnoOfUndoPage= (log_page_size / sizeof(UndoPage));
|
||||
Uint32 mega_byte_part= cnoOfUndoPage & 15;
|
||||
if (mega_byte_part != 0) {
|
||||
jam();
|
||||
cnoOfUndoPage+= (16 - mega_byte_part);
|
||||
}
|
||||
ndbout << "TUP: No of Undo Pages = " << cnoOfUndoPage << endl;
|
||||
|
||||
addRecSignal(GSN_DEBUG_SIG, &Dbtup::execDEBUG_SIG);
|
||||
addRecSignal(GSN_CONTINUEB, &Dbtup::execCONTINUEB);
|
||||
|
||||
|
@ -1049,7 +1060,7 @@ void Dbtup::initializeTablerec()
|
|||
void
|
||||
Dbtup::initTab(Tablerec* const regTabPtr)
|
||||
{
|
||||
for (Uint32 i = 0; i < (2 * NO_OF_FRAG_PER_NODE); i++) {
|
||||
for (Uint32 i = 0; i < (2 * MAX_FRAG_PER_NODE); i++) {
|
||||
regTabPtr->fragid[i] = RNIL;
|
||||
regTabPtr->fragrec[i] = RNIL;
|
||||
}//for
|
||||
|
@ -1160,7 +1171,7 @@ void Dbtup::execTUPSEIZEREQ(Signal* signal)
|
|||
return;
|
||||
}//Dbtup::execTUPSEIZEREQ()
|
||||
|
||||
#define printFragment(t){ for(Uint32 i = 0; i < (2 * NO_OF_FRAG_PER_NODE);i++){\
|
||||
#define printFragment(t){ for(Uint32 i = 0; i < (2 * MAX_FRAG_PER_NODE);i++){\
|
||||
ndbout_c("table = %d fragid[%d] = %d fragrec[%d] = %d", \
|
||||
t.i, t.p->fragid[i], i, t.p->fragrec[i]); }}
|
||||
|
||||
|
|
|
@ -349,14 +349,14 @@ Dbtup::buildIndex(Signal* signal, Uint32 buildPtrI)
|
|||
do {
|
||||
// get fragment
|
||||
FragrecordPtr fragPtr;
|
||||
if (buildPtr.p->m_fragNo == 2 * NO_OF_FRAG_PER_NODE) {
|
||||
if (buildPtr.p->m_fragNo == 2 * MAX_FRAG_PER_NODE) {
|
||||
ljam();
|
||||
// build ready
|
||||
buildIndexReply(signal, buildPtr.p);
|
||||
c_buildIndexList.release(buildPtr);
|
||||
return;
|
||||
}
|
||||
ndbrequire(buildPtr.p->m_fragNo < 2 * NO_OF_FRAG_PER_NODE);
|
||||
ndbrequire(buildPtr.p->m_fragNo < 2 * MAX_FRAG_PER_NODE);
|
||||
fragPtr.i = tablePtr.p->fragrec[buildPtr.p->m_fragNo];
|
||||
if (fragPtr.i == RNIL) {
|
||||
ljam();
|
||||
|
|
|
@ -188,7 +188,7 @@ void Dbtup::execTUPFRAGREQ(Signal* signal)
|
|||
/* -------------------------------------------------------------------- */
|
||||
bool Dbtup::addfragtotab(Tablerec* const regTabPtr, Uint32 fragId, Uint32 fragIndex)
|
||||
{
|
||||
for (Uint32 i = 0; i < (2 * NO_OF_FRAG_PER_NODE); i++) {
|
||||
for (Uint32 i = 0; i < (2 * MAX_FRAG_PER_NODE); i++) {
|
||||
ljam();
|
||||
if (regTabPtr->fragid[i] == RNIL) {
|
||||
ljam();
|
||||
|
@ -202,7 +202,7 @@ bool Dbtup::addfragtotab(Tablerec* const regTabPtr, Uint32 fragId, Uint32 fragIn
|
|||
|
||||
void Dbtup::getFragmentrec(FragrecordPtr& regFragPtr, Uint32 fragId, Tablerec* const regTabPtr)
|
||||
{
|
||||
for (Uint32 i = 0; i < (2 * NO_OF_FRAG_PER_NODE); i++) {
|
||||
for (Uint32 i = 0; i < (2 * MAX_FRAG_PER_NODE); i++) {
|
||||
ljam();
|
||||
if (regTabPtr->fragid[i] == fragId) {
|
||||
ljam();
|
||||
|
@ -456,7 +456,7 @@ void Dbtup::releaseFragoperrec(FragoperrecPtr fragOperPtr)
|
|||
|
||||
void Dbtup::deleteFragTab(Tablerec* const regTabPtr, Uint32 fragId)
|
||||
{
|
||||
for (Uint32 i = 0; i < (2 * NO_OF_FRAG_PER_NODE); i++) {
|
||||
for (Uint32 i = 0; i < (2 * MAX_FRAG_PER_NODE); i++) {
|
||||
ljam();
|
||||
if (regTabPtr->fragid[i] == fragId) {
|
||||
ljam();
|
||||
|
@ -515,7 +515,7 @@ void Dbtup::releaseFragment(Signal* signal, Uint32 tableId)
|
|||
Uint32 fragIndex = RNIL;
|
||||
Uint32 fragId = RNIL;
|
||||
Uint32 i = 0;
|
||||
for (i = 0; i < (2 * NO_OF_FRAG_PER_NODE); i++) {
|
||||
for (i = 0; i < (2 * MAX_FRAG_PER_NODE); i++) {
|
||||
ljam();
|
||||
if (tabPtr.p->fragid[i] != RNIL) {
|
||||
ljam();
|
||||
|
|
|
@ -510,7 +510,7 @@ AsyncFile::extendfile(Request* request) {
|
|||
DEBUG(ndbout_c("extendfile: maxOffset=%d, size=%d", maxOffset, maxSize));
|
||||
|
||||
// Allocate a buffer and fill it with zeros
|
||||
void* pbuf = malloc(maxSize);
|
||||
void* pbuf = NdbMem_Allocate(maxSize);
|
||||
memset(pbuf, 0, maxSize);
|
||||
for (int p = 0; p <= maxOffset; p = p + maxSize) {
|
||||
int return_value;
|
||||
|
|
|
@ -67,7 +67,7 @@ Ndbfs::Ndbfs(const Configuration & conf) :
|
|||
//ndb_mgm_get_int_parameter(p, CFG_DB_MAX_OPEN_FILES, &m_maxFiles);
|
||||
|
||||
// Create idle AsyncFiles
|
||||
Uint32 noIdleFiles = 16;
|
||||
Uint32 noIdleFiles = 27;
|
||||
for (Uint32 i = 0; i < noIdleFiles; i++){
|
||||
theIdleFiles.push_back(createAsyncFile());
|
||||
}
|
||||
|
|
|
@ -289,7 +289,7 @@ Configuration::setupConfiguration(){
|
|||
if(pFileSystemPath[strlen(pFileSystemPath) - 1] == '/')
|
||||
_fsPath = strdup(pFileSystemPath);
|
||||
else {
|
||||
_fsPath = (char *)malloc(strlen(pFileSystemPath) + 2);
|
||||
_fsPath = (char *)NdbMem_Allocate(strlen(pFileSystemPath) + 2);
|
||||
strcpy(_fsPath, pFileSystemPath);
|
||||
strcat(_fsPath, "/");
|
||||
}
|
||||
|
@ -385,7 +385,8 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
|
|||
char buf[255];
|
||||
|
||||
unsigned int noOfTables = 0;
|
||||
unsigned int noOfIndexes = 0;
|
||||
unsigned int noOfUniqueHashIndexes = 0;
|
||||
unsigned int noOfOrderedIndexes = 0;
|
||||
unsigned int noOfReplicas = 0;
|
||||
unsigned int noOfDBNodes = 0;
|
||||
unsigned int noOfAPINodes = 0;
|
||||
|
@ -393,33 +394,28 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
|
|||
unsigned int noOfNodes = 0;
|
||||
unsigned int noOfAttributes = 0;
|
||||
unsigned int noOfOperations = 0;
|
||||
unsigned int noOfLocalOperations = 0;
|
||||
unsigned int noOfTransactions = 0;
|
||||
unsigned int noOfIndexPages = 0;
|
||||
unsigned int noOfDataPages = 0;
|
||||
unsigned int noOfScanRecords = 0;
|
||||
unsigned int noOfLocalScanRecords = 0;
|
||||
unsigned int noBatchSize = 0;
|
||||
m_logLevel = new LogLevel();
|
||||
|
||||
/**
|
||||
* {"NoOfConcurrentCheckpointsDuringRestart", &cd.ispValues[1][5] },
|
||||
* {"NoOfConcurrentCheckpointsAfterRestart", &cd.ispValues[2][4] },
|
||||
* {"NoOfConcurrentProcessesHandleTakeover", &cd.ispValues[1][7] },
|
||||
* {"TimeToWaitAlive", &cd.ispValues[0][0] },
|
||||
*/
|
||||
struct AttribStorage { int paramId; Uint32 * storage; };
|
||||
AttribStorage tmp[] = {
|
||||
{ CFG_DB_NO_SCANS, &noOfScanRecords },
|
||||
{ CFG_DB_NO_LOCAL_SCANS, &noOfLocalScanRecords },
|
||||
{ CFG_DB_BATCH_SIZE, &noBatchSize },
|
||||
{ CFG_DB_NO_TABLES, &noOfTables },
|
||||
{ CFG_DB_NO_INDEXES, &noOfIndexes },
|
||||
{ CFG_DB_NO_ORDERED_INDEXES, &noOfOrderedIndexes },
|
||||
{ CFG_DB_NO_UNIQUE_HASH_INDEXES, &noOfUniqueHashIndexes },
|
||||
{ CFG_DB_NO_REPLICAS, &noOfReplicas },
|
||||
{ CFG_DB_NO_ATTRIBUTES, &noOfAttributes },
|
||||
{ CFG_DB_NO_OPS, &noOfOperations },
|
||||
{ CFG_DB_NO_LOCAL_OPS, &noOfLocalOperations },
|
||||
{ CFG_DB_NO_TRANSACTIONS, &noOfTransactions }
|
||||
#if 0
|
||||
{ "NoOfDiskPagesToDiskDuringRestartTUP", &cd.ispValues[3][8] },
|
||||
{ "NoOfDiskPagesToDiskAfterRestartTUP", &cd.ispValues[3][9] },
|
||||
{ "NoOfDiskPagesToDiskDuringRestartACC", &cd.ispValues[3][10] },
|
||||
{ "NoOfDiskPagesToDiskAfterRestartACC", &cd.ispValues[3][11] },
|
||||
#endif
|
||||
};
|
||||
|
||||
ndb_mgm_configuration_iterator db(*(ndb_mgm_configuration*)ownConfig, 0);
|
||||
|
@ -518,31 +514,32 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
|
|||
*/
|
||||
ConfigValuesFactory cfg(ownConfig);
|
||||
|
||||
noOfTables++; // Remove impact of system table
|
||||
noOfTables += noOfIndexes; // Indexes are tables too
|
||||
noOfAttributes += 2; // ---"----
|
||||
noOfTables *= 2; // Remove impact of Dict need 2 ids for each table
|
||||
noOfTables+= 2; // Add System tables
|
||||
noOfAttributes += 5; // Add System table attributes
|
||||
|
||||
if (noOfDBNodes > 15) {
|
||||
noOfDBNodes = 15;
|
||||
}//if
|
||||
Uint32 noOfLocalScanRecords = (noOfDBNodes * noOfScanRecords) + 1;
|
||||
if (noOfLocalScanRecords == 0) {
|
||||
noOfLocalScanRecords = (noOfDBNodes * noOfScanRecords) + 1;
|
||||
}
|
||||
if (noOfLocalOperations == 0) {
|
||||
noOfLocalOperations= (11 * noOfOperations) / 10;
|
||||
}
|
||||
Uint32 noOfTCScanRecords = noOfScanRecords;
|
||||
|
||||
{
|
||||
Uint32 noOfAccTables= noOfTables + noOfUniqueHashIndexes;
|
||||
/**
|
||||
* Acc Size Alt values
|
||||
*/
|
||||
// Can keep 65536 pages (= 0.5 GByte)
|
||||
cfg.put(CFG_ACC_DIR_RANGE,
|
||||
4 * NO_OF_FRAG_PER_NODE * noOfTables* noOfReplicas);
|
||||
4 * NO_OF_FRAG_PER_NODE * noOfAccTables* noOfReplicas);
|
||||
|
||||
cfg.put(CFG_ACC_DIR_ARRAY,
|
||||
(noOfIndexPages >> 8) +
|
||||
4 * NO_OF_FRAG_PER_NODE * noOfTables* noOfReplicas);
|
||||
4 * NO_OF_FRAG_PER_NODE * noOfAccTables* noOfReplicas);
|
||||
|
||||
cfg.put(CFG_ACC_FRAGMENT,
|
||||
2 * NO_OF_FRAG_PER_NODE * noOfTables* noOfReplicas);
|
||||
2 * NO_OF_FRAG_PER_NODE * noOfAccTables* noOfReplicas);
|
||||
|
||||
/*-----------------------------------------------------------------------*/
|
||||
// The extra operation records added are used by the scan and node
|
||||
|
@ -552,25 +549,27 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
|
|||
// The remainder are allowed for use by the scan processes.
|
||||
/*-----------------------------------------------------------------------*/
|
||||
cfg.put(CFG_ACC_OP_RECS,
|
||||
((11 * noOfOperations) / 10 + 50) +
|
||||
(noOfLocalScanRecords * MAX_PARALLEL_SCANS_PER_FRAG) +
|
||||
(noOfLocalOperations + 50) +
|
||||
(noOfLocalScanRecords * noBatchSize) +
|
||||
NODE_RECOVERY_SCAN_OP_RECORDS);
|
||||
|
||||
cfg.put(CFG_ACC_OVERFLOW_RECS,
|
||||
noOfIndexPages +
|
||||
2 * NO_OF_FRAG_PER_NODE * noOfTables* noOfReplicas);
|
||||
2 * NO_OF_FRAG_PER_NODE * noOfAccTables* noOfReplicas);
|
||||
|
||||
cfg.put(CFG_ACC_PAGE8,
|
||||
noOfIndexPages + 32);
|
||||
|
||||
cfg.put(CFG_ACC_ROOT_FRAG,
|
||||
NO_OF_FRAG_PER_NODE * noOfTables* noOfReplicas);
|
||||
NO_OF_FRAG_PER_NODE * noOfAccTables* noOfReplicas);
|
||||
|
||||
cfg.put(CFG_ACC_TABLE, noOfTables);
|
||||
cfg.put(CFG_ACC_TABLE, noOfAccTables);
|
||||
|
||||
cfg.put(CFG_ACC_SCAN, noOfLocalScanRecords);
|
||||
}
|
||||
|
||||
Uint32 noOfMetaTables= noOfTables + noOfOrderedIndexes +
|
||||
noOfUniqueHashIndexes;
|
||||
{
|
||||
/**
|
||||
* Dict Size Alt values
|
||||
|
@ -579,7 +578,7 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
|
|||
noOfAttributes);
|
||||
|
||||
cfg.put(CFG_DICT_TABLE,
|
||||
noOfTables);
|
||||
noOfMetaTables);
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -593,7 +592,7 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
|
|||
noOfOperations + noOfTransactions + 46);
|
||||
|
||||
cfg.put(CFG_DIH_FRAG_CONNECT,
|
||||
NO_OF_FRAG_PER_NODE * noOfTables * noOfDBNodes);
|
||||
NO_OF_FRAG_PER_NODE * noOfMetaTables * noOfDBNodes);
|
||||
|
||||
int temp;
|
||||
temp = noOfReplicas - 2;
|
||||
|
@ -603,14 +602,14 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
|
|||
temp++;
|
||||
cfg.put(CFG_DIH_MORE_NODES,
|
||||
temp * NO_OF_FRAG_PER_NODE *
|
||||
noOfTables * noOfDBNodes);
|
||||
noOfMetaTables * noOfDBNodes);
|
||||
|
||||
cfg.put(CFG_DIH_REPLICAS,
|
||||
NO_OF_FRAG_PER_NODE * noOfTables *
|
||||
NO_OF_FRAG_PER_NODE * noOfMetaTables *
|
||||
noOfDBNodes * noOfReplicas);
|
||||
|
||||
cfg.put(CFG_DIH_TABLE,
|
||||
noOfTables);
|
||||
noOfMetaTables);
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -618,13 +617,13 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
|
|||
* Lqh Size Alt values
|
||||
*/
|
||||
cfg.put(CFG_LQH_FRAG,
|
||||
NO_OF_FRAG_PER_NODE * noOfTables * noOfReplicas);
|
||||
NO_OF_FRAG_PER_NODE * noOfMetaTables * noOfReplicas);
|
||||
|
||||
cfg.put(CFG_LQH_TABLE,
|
||||
noOfTables);
|
||||
noOfMetaTables);
|
||||
|
||||
cfg.put(CFG_LQH_TC_CONNECT,
|
||||
(11 * noOfOperations) / 10 + 50);
|
||||
noOfLocalOperations + 50);
|
||||
|
||||
cfg.put(CFG_LQH_SCAN,
|
||||
noOfLocalScanRecords);
|
||||
|
@ -641,7 +640,7 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
|
|||
(2 * noOfOperations) + 16 + noOfTransactions);
|
||||
|
||||
cfg.put(CFG_TC_TABLE,
|
||||
noOfTables);
|
||||
noOfMetaTables);
|
||||
|
||||
cfg.put(CFG_TC_LOCAL_SCAN,
|
||||
noOfLocalScanRecords);
|
||||
|
@ -655,23 +654,23 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
|
|||
* Tup Size Alt values
|
||||
*/
|
||||
cfg.put(CFG_TUP_FRAG,
|
||||
2 * NO_OF_FRAG_PER_NODE * noOfTables* noOfReplicas);
|
||||
2 * NO_OF_FRAG_PER_NODE * noOfMetaTables* noOfReplicas);
|
||||
|
||||
cfg.put(CFG_TUP_OP_RECS,
|
||||
(11 * noOfOperations) / 10 + 50);
|
||||
noOfLocalOperations + 50);
|
||||
|
||||
cfg.put(CFG_TUP_PAGE,
|
||||
noOfDataPages);
|
||||
|
||||
cfg.put(CFG_TUP_PAGE_RANGE,
|
||||
4 * NO_OF_FRAG_PER_NODE * noOfTables* noOfReplicas);
|
||||
4 * NO_OF_FRAG_PER_NODE * noOfMetaTables* noOfReplicas);
|
||||
|
||||
cfg.put(CFG_TUP_TABLE,
|
||||
noOfTables);
|
||||
noOfMetaTables);
|
||||
|
||||
cfg.put(CFG_TUP_TABLE_DESC,
|
||||
4 * NO_OF_FRAG_PER_NODE * noOfAttributes* noOfReplicas +
|
||||
12 * NO_OF_FRAG_PER_NODE * noOfTables* noOfReplicas );
|
||||
12 * NO_OF_FRAG_PER_NODE * noOfMetaTables* noOfReplicas );
|
||||
|
||||
cfg.put(CFG_TUP_STORED_PROC,
|
||||
noOfLocalScanRecords);
|
||||
|
@ -682,13 +681,13 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
|
|||
* Tux Size Alt values
|
||||
*/
|
||||
cfg.put(CFG_TUX_INDEX,
|
||||
noOfTables);
|
||||
noOfOrderedIndexes);
|
||||
|
||||
cfg.put(CFG_TUX_FRAGMENT,
|
||||
2 * NO_OF_FRAG_PER_NODE * noOfTables * noOfReplicas);
|
||||
2 * NO_OF_FRAG_PER_NODE * noOfOrderedIndexes * noOfReplicas);
|
||||
|
||||
cfg.put(CFG_TUX_ATTRIBUTE,
|
||||
noOfIndexes * 4);
|
||||
noOfOrderedIndexes * 4);
|
||||
|
||||
cfg.put(CFG_TUX_SCAN_OP, noOfLocalScanRecords);
|
||||
}
|
||||
|
|
|
@ -147,7 +147,8 @@
|
|||
// in future version since small tables want small value and large tables
|
||||
// need large value.
|
||||
/* ------------------------------------------------------------------------- */
|
||||
#define NO_OF_FRAG_PER_NODE 8
|
||||
#define NO_OF_FRAG_PER_NODE 1
|
||||
#define MAX_FRAG_PER_NODE (NO_OF_FRAG_PER_NODE * MAX_REPLICAS)
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
// To avoid synching too big chunks at a time we synch after writing
|
||||
|
|
Loading…
Reference in a new issue