BitKeeper/etc/logging_ok:
  auto-union
ndb/include/mgmapi/mgmapi_config_parameters.h:
  Auto merged
ndb/src/kernel/blocks/dbacc/Dbacc.hpp:
  Auto merged
ndb/src/kernel/blocks/dbacc/DbaccInit.cpp:
  Auto merged
ndb/src/kernel/blocks/dbacc/DbaccMain.cpp:
  Auto merged
ndb/src/kernel/blocks/dbdih/DbdihMain.cpp:
  Auto merged
ndb/src/kernel/blocks/dblqh/Dblqh.hpp:
  Auto merged
ndb/src/kernel/blocks/dblqh/DblqhInit.cpp:
  Auto merged
ndb/src/kernel/blocks/dblqh/DblqhMain.cpp:
  Auto merged
ndb/src/kernel/blocks/dbtc/Dbtc.hpp:
  Auto merged
ndb/src/kernel/blocks/dbtc/DbtcMain.cpp:
  Auto merged
ndb/src/kernel/blocks/dbtup/Dbtup.hpp:
  Auto merged
ndb/src/kernel/blocks/suma/Suma.cpp:
  Auto merged
This commit is contained in:
unknown 2004-08-25 12:17:43 +02:00
commit db2343e8d7
43 changed files with 370 additions and 227 deletions

View file

@ -74,7 +74,7 @@ FsCloseReq::getRemoveFileFlag(const UintR & fileflag){
inline
void
FsCloseReq::setRemoveFileFlag(UintR & fileflag, bool removefile){
ASSERT_BOOL(removefile, "FsCloseReq::setRemoveFileFlag");
// ASSERT_BOOL(removefile, "FsCloseReq::setRemoveFileFlag");
if (removefile == true)
fileflag = 1;
else

View file

@ -105,8 +105,6 @@ public:
enum ErrorCode {
NoError = 0,
InvalidRequest = 800,
NoFreeFragmentOper = 830,
NoFreeIndexFragment = 852,
NoFreeFragment = 604,
NoFreeAttributes = 827
};

View file

@ -77,6 +77,18 @@
#define CFG_DB_DISCLESS 148
#define CFG_DB_NO_ORDERED_INDEXES 149
#define CFG_DB_NO_UNIQUE_HASH_INDEXES 150
#define CFG_DB_NO_LOCAL_OPS 151
#define CFG_DB_NO_LOCAL_SCANS 152
#define CFG_DB_BATCH_SIZE 153
#define CFG_DB_UNDO_INDEX_BUFFER 154
#define CFG_DB_UNDO_DATA_BUFFER 155
#define CFG_DB_REDO_BUFFER 156
#define CFG_DB_LONG_SIGNAL_BUFFER 157
#define CFG_NODE_ARBIT_RANK 200
#define CFG_NODE_ARBIT_DELAY 201

View file

@ -416,12 +416,36 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
"8",
STR_VALUE(MAX_INT_RNIL) },
{
CFG_DB_NO_ORDERED_INDEXES,
"MaxNoOfOrderedIndexes",
"DB",
"Total number of ordered indexes that can be defined in the system",
ConfigInfo::USED,
false,
ConfigInfo::INT,
"128",
"0",
MAX_INT_RNIL_STRING },
{
CFG_DB_NO_UNIQUE_HASH_INDEXES,
"MaxNoOfUniqueHashIndexes",
"DB",
"Total number of unique hash indexes that can be defined in the system",
ConfigInfo::USED,
false,
ConfigInfo::INT,
"64",
"0",
MAX_INT_RNIL_STRING },
{
CFG_DB_NO_INDEXES,
"MaxNoOfIndexes",
"DB",
"Total number of indexes that can be defined in the system",
ConfigInfo::USED,
ConfigInfo::DEPRICATED,
false,
ConfigInfo::INT,
"128",
@ -540,13 +564,49 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
CFG_DB_NO_OPS,
"MaxNoOfConcurrentOperations",
"DB",
"Max no of op:s on DB (op:s within a transaction are concurrently executed)",
"Max number of operation records in transaction coordinator",
ConfigInfo::USED,
false,
ConfigInfo::INT,
"32K",
"32k",
"32",
STR_VALUE(MAX_INT_RNIL) },
MAX_INT_RNIL_STRING },
{
CFG_DB_NO_LOCAL_OPS,
"MaxNoOfLocalOperations",
"DB",
"Max number of operation records defined in the local storage node",
ConfigInfo::USED,
false,
ConfigInfo::INT,
UNDEFINED,
"32",
MAX_INT_RNIL_STRING },
{
CFG_DB_NO_LOCAL_SCANS,
"MaxNoOfLocalScans",
"DB",
"Max number of fragment scans in parallel in the local storage node",
ConfigInfo::USED,
false,
ConfigInfo::INT,
UNDEFINED,
"32",
MAX_INT_RNIL_STRING },
{
CFG_DB_BATCH_SIZE,
"BatchSizePerLocalScan",
"DB",
"Used to calculate the number of lock records for scan with hold lock",
ConfigInfo::USED,
false,
ConfigInfo::INT,
STR_VALUE(DEF_BATCH_SIZE),
"1",
STR_VALUE(MAX_PARALLEL_OP_PER_SCAN) },
{
CFG_DB_NO_TRANSACTIONS,
@ -608,6 +668,54 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
"1M",
"1024G" },
{
CFG_DB_UNDO_INDEX_BUFFER,
"UndoIndexBuffer",
"DB",
"Number bytes on each DB node allocated for writing UNDO logs for index part",
ConfigInfo::USED,
false,
ConfigInfo::INT,
"2M",
"1M",
MAX_INT_RNIL_STRING},
{
CFG_DB_UNDO_DATA_BUFFER,
"UndoDataBuffer",
"DB",
"Number bytes on each DB node allocated for writing UNDO logs for data part",
ConfigInfo::USED,
false,
ConfigInfo::INT,
"16M",
"1M",
MAX_INT_RNIL_STRING},
{
CFG_DB_REDO_BUFFER,
"RedoBuffer",
"DB",
"Number bytes on each DB node allocated for writing REDO logs",
ConfigInfo::USED,
false,
ConfigInfo::INT,
"8M",
"1M",
MAX_INT_RNIL_STRING},
{
CFG_DB_LONG_SIGNAL_BUFFER,
"LongMessageBuffer",
"DB",
"Number bytes on each DB node allocated for internal long messages",
ConfigInfo::USED,
false,
ConfigInfo::INT,
"1M",
"512k",
MAX_INT_RNIL_STRING},
{
CFG_DB_START_PARTIAL_TIMEOUT,
"StartPartialTimeout",

View file

@ -18,6 +18,7 @@
#include <NdbEnv.h>
#include <NdbConfig.h>
#include <NdbAutoPtr.hpp>
#include <NdbMem.h>
LocalConfig::LocalConfig(){
error_line = 0; error_msg[0] = 0;
@ -242,7 +243,7 @@ bool LocalConfig::readFile(const char * filename, bool &fopenError)
}
int sz = 1024;
char* theString = (char*)malloc(sz);
char* theString = (char*)NdbMem_Allocate(sz);
theString[0] = 0;
fgets(theString, sz, file);
@ -250,7 +251,7 @@ bool LocalConfig::readFile(const char * filename, bool &fopenError)
line[0] = ';';
while (strlen(theString) + strlen(line) >= sz) {
sz = sz*2;
char *newString = (char*)malloc(sz);
char *newString = (char*)NdbMem_Allocate(sz);
strcpy(newString, theString);
free(theString);
theString = newString;

View file

@ -17,6 +17,7 @@
#include <ndb_global.h>
#include <NdbConfig.h>
#include <NdbEnv.h>
#include <NdbMem.h>
static char*
NdbConfig_AllocHomePath(int _len)
@ -30,7 +31,7 @@ NdbConfig_AllocHomePath(int _len)
path_len= strlen(path);
len+= path_len;
buf= malloc(len);
buf= NdbMem_Allocate(len);
if (path_len > 0)
snprintf(buf, len, "%s%c", path, DIR_SEPARATOR);
else
@ -48,7 +49,7 @@ NdbConfig_NdbCfgName(int with_ndb_home){
buf= NdbConfig_AllocHomePath(128);
len= strlen(buf);
} else
buf= malloc(128);
buf= NdbMem_Allocate(128);
snprintf(buf+len, 128, "Ndb.cfg");
return buf;
}

View file

@ -20,6 +20,7 @@
#include <NdbCondition.h>
#include <NdbThread.h>
#include <NdbMutex.h>
#include <NdbMem.h>
struct NdbCondition
{
@ -34,7 +35,7 @@ NdbCondition_Create(void)
struct NdbCondition* tmpCond;
int result;
tmpCond = (struct NdbCondition*)malloc(sizeof(struct NdbCondition));
tmpCond = (struct NdbCondition*)NdbMem_Allocate(sizeof(struct NdbCondition));
if (tmpCond == NULL)
return NULL;

View file

@ -31,10 +31,13 @@ void NdbMem_Destroy()
return;
}
void* NdbMem_Allocate(size_t size)
{
void* mem_allocated;
assert(size > 0);
return (void*)malloc(size);
mem_allocated= (void*)malloc(size);
return mem_allocated;
}
void* NdbMem_AllocateAlign(size_t size, size_t alignment)

View file

@ -19,13 +19,14 @@
#include <NdbThread.h>
#include <NdbMutex.h>
#include <NdbMem.h>
NdbMutex* NdbMutex_Create(void)
{
NdbMutex* pNdbMutex;
int result;
pNdbMutex = (NdbMutex*)malloc(sizeof(NdbMutex));
pNdbMutex = (NdbMutex*)NdbMem_Allocate(sizeof(NdbMutex));
if (pNdbMutex == NULL)
return NULL;

View file

@ -18,6 +18,7 @@
#include <ndb_global.h>
#include <NdbThread.h>
#include <pthread.h>
#include <NdbMem.h>
#define MAX_THREAD_NAME 16
@ -44,7 +45,7 @@ struct NdbThread* NdbThread_Create(NDB_THREAD_FUNC *p_thread_func,
if (p_thread_func == NULL)
return 0;
tmpThread = (struct NdbThread*)malloc(sizeof(struct NdbThread));
tmpThread = (struct NdbThread*)NdbMem_Allocate(sizeof(struct NdbThread));
if (tmpThread == NULL)
return NULL;

View file

@ -1,5 +1,6 @@
#include <ndb_global.h>
#include <NdbMem.h>
extern "C" {
void (* ndb_new_handler)() = 0;
@ -9,7 +10,7 @@ extern "C" {
void *operator new (size_t sz)
{
void * p = malloc (sz ? sz : 1);
void * p = NdbMem_Allocate(sz ? sz : 1);
if(p)
return p;
if(ndb_new_handler)
@ -19,7 +20,7 @@ void *operator new (size_t sz)
void *operator new[] (size_t sz)
{
void * p = (void *) malloc (sz ? sz : 1);
void * p = (void *) NdbMem_Allocate(sz ? sz : 1);
if(p)
return p;
if(ndb_new_handler)
@ -30,13 +31,13 @@ void *operator new[] (size_t sz)
void operator delete (void *ptr)
{
if (ptr)
free(ptr);
NdbMem_Free(ptr);
}
void operator delete[] (void *ptr) throw ()
{
if (ptr)
free(ptr);
NdbMem_Free(ptr);
}
#endif // USE_MYSYS_NEW

View file

@ -45,16 +45,9 @@ Backup::Backup(const Configuration & conf) :
ndb_mgm_get_int_parameter(p, CFG_DB_PARALLEL_BACKUPS, &noBackups);
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_NO_TABLES, &noTables));
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_NO_ATTRIBUTES, &noAttribs));
// To allow for user tables AND SYSTAB
// See ClusterConfig
//TODO get this infor from NdbCntr
noTables += 2;
// Considering also TR527, this is a KISS work-around to be able to
// continue testing the real thing
noAttribs += 2 + 1;
noAttribs++; //RT 527 bug fix
c_backupPool.setSize(noBackups);
c_backupFilePool.setSize(3 * noBackups);
c_tablePool.setSize(noBackups * noTables);

View file

@ -54,6 +54,16 @@ Cmvmi::Cmvmi(const Configuration & conf) :
{
BLOCK_CONSTRUCTOR(Cmvmi);
Uint32 long_sig_buffer_size;
const ndb_mgm_configuration_iterator * p = conf.getOwnConfigIterator();
ndbrequire(p != 0);
ndb_mgm_get_int_parameter(p, CFG_DB_LONG_SIGNAL_BUFFER,
&long_sig_buffer_size);
long_sig_buffer_size= long_sig_buffer_size / 256;
g_sectionSegmentPool.setSize(long_sig_buffer_size);
// Add received signals
addRecSignal(GSN_CONNECT_REP, &Cmvmi::execCONNECT_REP);
addRecSignal(GSN_DISCONNECT_REP, &Cmvmi::execDISCONNECT_REP);

View file

@ -194,7 +194,6 @@ ndbout << "Ptr: " << ptr.p->word32 << " \tIndex: " << tmp_string << " \tValue: "
#define ZTABLESIZE 16
#define ZTABMAXINDEX 3
#define ZUNDEFINED_OP 6
#define ZUNDOPAGESIZE 64
#define ZUNDOHEADSIZE 7
#define ZUNLOCKED 1
#define ZUNDOPAGE_BASE_ADD 2
@ -894,8 +893,8 @@ struct SrVersionRec {
/* TABREC */
/* --------------------------------------------------------------------------------- */
struct Tabrec {
Uint32 fragholder[NO_OF_FRAG_PER_NODE];
Uint32 fragptrholder[NO_OF_FRAG_PER_NODE];
Uint32 fragholder[MAX_FRAG_PER_NODE];
Uint32 fragptrholder[MAX_FRAG_PER_NODE];
Uint32 tabUserPtr;
BlockReference tabUserRef;
};

View file

@ -32,7 +32,6 @@ void Dbacc::initData()
crootfragmentsize = ZROOTFRAGMENTSIZE;
cdirrangesize = ZDIRRANGESIZE;
coverflowrecsize = ZOVERFLOWRECSIZE;
cundopagesize = ZUNDOPAGESIZE;
cfsConnectsize = ZFS_CONNECTSIZE;
cfsOpsize = ZFS_OPSIZE;
cscanRecSize = ZSCAN_REC_SIZE;
@ -136,8 +135,25 @@ void Dbacc::initRecords()
Dbacc::Dbacc(const class Configuration & conf):
SimulatedBlock(DBACC, conf)
{
Uint32 log_page_size= 0;
BLOCK_CONSTRUCTOR(Dbacc);
const ndb_mgm_configuration_iterator * p = conf.getOwnConfigIterator();
ndbrequire(p != 0);
ndb_mgm_get_int_parameter(p, CFG_DB_UNDO_INDEX_BUFFER,
&log_page_size);
/**
* Always set page size in half MBytes
*/
cundopagesize= (log_page_size / sizeof(Undopage));
Uint32 mega_byte_part= cundopagesize & 15;
if (mega_byte_part != 0) {
jam();
cundopagesize+= (16 - mega_byte_part);
}
// Transit signals
addRecSignal(GSN_DUMP_STATE_ORD, &Dbacc::execDUMP_STATE_ORD);
addRecSignal(GSN_DEBUG_SIG, &Dbacc::execDEBUG_SIG);

View file

@ -1021,7 +1021,7 @@ void Dbacc::initialiseTableRec(Signal* signal)
for (tabptr.i = 0; tabptr.i < ctablesize; tabptr.i++) {
refresh_watch_dog();
ptrAss(tabptr, tabrec);
for (Uint32 i = 0; i < NO_OF_FRAG_PER_NODE; i++) {
for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
tabptr.p->fragholder[i] = RNIL;
tabptr.p->fragptrholder[i] = RNIL;
}//for
@ -1187,7 +1187,7 @@ void Dbacc::releaseRootFragResources(Signal* signal, Uint32 tableId)
TabrecPtr tabPtr;
tabPtr.i = tableId;
ptrCheckGuard(tabPtr, ctablesize, tabrec);
for (Uint32 i = 0; i < NO_OF_FRAG_PER_NODE; i++) {
for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
jam();
if (tabPtr.p->fragholder[i] != RNIL) {
jam();
@ -1419,7 +1419,7 @@ void Dbacc::execFSREMOVEREF(Signal* signal)
/* -------------------------------------------------------------------------- */
bool Dbacc::addfragtotab(Signal* signal, Uint32 rootIndex, Uint32 fid)
{
for (Uint32 i = 0; i < NO_OF_FRAG_PER_NODE; i++) {
for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
jam();
if (tabptr.p->fragholder[i] == RNIL) {
jam();
@ -2435,7 +2435,7 @@ void Dbacc::execACC_LOCKREQ(Signal* signal)
ptrCheckGuard(tabptr, ctablesize, tabrec);
// find fragment (TUX will know it)
if (req->fragPtrI == RNIL) {
for (Uint32 i = 0; i < NO_OF_FRAG_PER_NODE; i++) {
for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
jam();
if (tabptr.p->fragptrholder[i] != RNIL) {
rootfragrecptr.i = tabptr.p->fragptrholder[i];
@ -12184,7 +12184,7 @@ void Dbacc::takeOutReadyScanQueue(Signal* signal)
bool Dbacc::getrootfragmentrec(Signal* signal, RootfragmentrecPtr& rootPtr, Uint32 fid)
{
for (Uint32 i = 0; i < NO_OF_FRAG_PER_NODE; i++) {
for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
jam();
if (tabptr.p->fragholder[i] == fid) {
jam();

View file

@ -6196,7 +6196,7 @@ void Dbdih::execCREATE_FRAGMENTATION_REQ(Signal * signal){
}
if(err)
break;
NodeGroupRecordPtr NGPtr;
TabRecordPtr primTabPtr;
if (primaryTableId == RNIL) {

View file

@ -64,25 +64,12 @@
/* CONSTANTS OF THE LOG PAGES */
/* ------------------------------------------------------------------------- */
#define ZPAGE_HEADER_SIZE 32
#if defined NDB_OSE
/**
* Set the fragment log file size to 2Mb in OSE
* This is done in order to speed up the initial start
*/
#define ZNO_MBYTES_IN_FILE 2
#define ZPAGE_SIZE 2048
#define ZPAGES_IN_MBYTE 128
#define ZTWOLOG_NO_PAGES_IN_MBYTE 7
#define ZTWOLOG_PAGE_SIZE 11
#define ZMAX_MM_BUFFER_SIZE 32 // Main memory window during log execution
#else
#define ZNO_MBYTES_IN_FILE 16
#define ZPAGE_SIZE 8192
#define ZPAGES_IN_MBYTE 32
#define ZTWOLOG_NO_PAGES_IN_MBYTE 5
#define ZTWOLOG_PAGE_SIZE 13
#define ZMAX_MM_BUFFER_SIZE 32 // Main memory window during log execution
#endif
#define ZMAX_PAGES_WRITTEN 8 // Max pages before writing to disk (=> config)
#define ZMIN_READ_BUFFER_SIZE 2 // Minimum number of pages to execute log
@ -1840,11 +1827,7 @@ public:
* - There is no more information needed.
* The next mbyte will always refer to the start of the next mbyte.
*/
#ifdef NDB_OSE
UintR logPageWord[2048]; // Size 8 kbytes
#else
UintR logPageWord[8192]; // Size 32 kbytes
#endif
};
typedef Ptr<LogPageRecord> LogPageRecordPtr;
@ -1866,8 +1849,8 @@ public:
PREP_DROP_TABLE_DONE = 4
};
UintR fragrec[NO_OF_FRAG_PER_NODE];
Uint16 fragid[NO_OF_FRAG_PER_NODE];
UintR fragrec[MAX_FRAG_PER_NODE];
Uint16 fragid[MAX_FRAG_PER_NODE];
/**
* Status of the table
*/
@ -2665,7 +2648,6 @@ private:
UintR cfirstfreeLfo;
UintR clfoFileSize;
#define ZLOG_PAGE_FILE_SIZE 256 // 8 MByte
LogPageRecord *logPageRecord;
LogPageRecordPtr logPagePtr;
UintR cfirstfreeLogPage;

View file

@ -34,7 +34,6 @@ void Dblqh::initData()
chostFileSize = MAX_NDB_NODES;
clcpFileSize = ZNO_CONCURRENT_LCP;
clcpLocrecFileSize = ZLCP_LOCREC_FILE_SIZE;
clogPageFileSize = ZLOG_PAGE_FILE_SIZE;
clfoFileSize = ZLFO_FILE_SIZE;
clogFileFileSize = 0;
clogPartFileSize = ZLOG_PART_FILE_SIZE;
@ -177,8 +176,25 @@ Dblqh::Dblqh(const class Configuration & conf):
m_commitAckMarkerHash(m_commitAckMarkerPool),
c_scanTakeOverHash(c_scanRecordPool)
{
Uint32 log_page_size= 0;
BLOCK_CONSTRUCTOR(Dblqh);
const ndb_mgm_configuration_iterator * p = conf.getOwnConfigIterator();
ndbrequire(p != 0);
ndb_mgm_get_int_parameter(p, CFG_DB_REDO_BUFFER,
&log_page_size);
/**
* Always set page size in half MBytes
*/
clogPageFileSize= (log_page_size / sizeof(LogPageRecord));
Uint32 mega_byte_part= clogPageFileSize & 15;
if (mega_byte_part != 0) {
jam();
clogPageFileSize+= (16 - mega_byte_part);
}
addRecSignal(GSN_PACKED_SIGNAL, &Dblqh::execPACKED_SIGNAL);
addRecSignal(GSN_DEBUG_SIG, &Dblqh::execDEBUG_SIG);
addRecSignal(GSN_ATTRINFO, &Dblqh::execATTRINFO);

View file

@ -991,7 +991,7 @@ void Dblqh::execLQHFRAGREQ(Signal* signal)
ptrCheckGuard(tTablePtr, ctabrecFileSize, tablerec);
FragrecordPtr tFragPtr;
tFragPtr.i = RNIL;
for (Uint32 i = 0; i < NO_OF_FRAG_PER_NODE; i++) {
for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
if (tTablePtr.p->fragid[i] == fragptr.p->fragId) {
jam();
tFragPtr.i = tTablePtr.p->fragrec[i];
@ -1916,7 +1916,7 @@ void Dblqh::removeTable(Uint32 tableId)
tabptr.i = tableId;
ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
for (Uint32 i = (NO_OF_FRAG_PER_NODE - 1); (Uint32)~i; i--) {
for (Uint32 i = (MAX_FRAG_PER_NODE - 1); (Uint32)~i; i--) {
jam();
if (tabptr.p->fragid[i] != ZNIL) {
jam();
@ -15979,7 +15979,7 @@ void Dblqh::deleteFragrec(Uint32 fragId)
{
Uint32 indexFound= RNIL;
fragptr.i = RNIL;
for (Uint32 i = (NO_OF_FRAG_PER_NODE - 1); (Uint32)~i; i--) {
for (Uint32 i = (MAX_FRAG_PER_NODE - 1); (Uint32)~i; i--) {
jam();
if (tabptr.p->fragid[i] == fragId) {
fragptr.i = tabptr.p->fragrec[i];
@ -16087,7 +16087,7 @@ void Dblqh::getFirstInLogQueue(Signal* signal)
/* ---------------------------------------------------------------- */
bool Dblqh::getFragmentrec(Signal* signal, Uint32 fragId)
{
for (Uint32 i = (NO_OF_FRAG_PER_NODE - 1); (UintR)~i; i--) {
for (Uint32 i = (MAX_FRAG_PER_NODE - 1); (UintR)~i; i--) {
jam();
if (tabptr.p->fragid[i] == fragId) {
fragptr.i = tabptr.p->fragrec[i];
@ -16511,7 +16511,7 @@ void Dblqh::initialiseTabrec(Signal* signal)
ptrAss(tabptr, tablerec);
tabptr.p->tableStatus = Tablerec::NOT_DEFINED;
tabptr.p->usageCount = 0;
for (Uint32 i = 0; i <= (NO_OF_FRAG_PER_NODE - 1); i++) {
for (Uint32 i = 0; i <= (MAX_FRAG_PER_NODE - 1); i++) {
tabptr.p->fragid[i] = ZNIL;
tabptr.p->fragrec[i] = RNIL;
}//for
@ -16833,7 +16833,7 @@ bool Dblqh::insertFragrec(Signal* signal, Uint32 fragId)
return false;
}//if
seizeFragmentrec(signal);
for (Uint32 i = (NO_OF_FRAG_PER_NODE - 1); (Uint32)~i; i--) {
for (Uint32 i = (MAX_FRAG_PER_NODE - 1); (Uint32)~i; i--) {
jam();
if (tabptr.p->fragid[i] == ZNIL) {
jam();

View file

@ -542,13 +542,11 @@ public:
attrInfo(abp),
expectedTransIdAI(0),
transIdAI(abp),
tcIndxReq(new TcIndxReq()),
indexReadTcConnect(RNIL)
{}
~TcIndexOperation()
{
delete tcIndxReq;
}
// Index data
@ -561,7 +559,7 @@ public:
Uint32 expectedTransIdAI;
AttributeBuffer transIdAI; // For accumulating TransId_AI
TcIndxReq* tcIndxReq;
TcIndxReq tcIndxReq;
UintR connectionIndex;
UintR indexReadTcConnect; //

View file

@ -73,6 +73,7 @@ void Dbtc::initData()
void Dbtc::initRecords()
{
void *p;
// Records with dynamic sizes
cacheRecord = (CacheRecord*)allocRecord("CacheRecord",
sizeof(CacheRecord),
@ -83,7 +84,7 @@ void Dbtc::initRecords()
capiConnectFilesize);
for(unsigned i = 0; i<capiConnectFilesize; i++) {
void * p = &apiConnectRecord[i];
p = &apiConnectRecord[i];
new (p) ApiConnectRecord(c_theFiredTriggerPool,
c_theSeizedIndexOperationPool);
}
@ -91,7 +92,8 @@ void Dbtc::initRecords()
DLFifoList<TcFiredTriggerData> triggers(c_theFiredTriggerPool);
FiredTriggerPtr tptr;
while(triggers.seize(tptr) == true) {
new (tptr.p) TcFiredTriggerData();
p= tptr.p;
new (p) TcFiredTriggerData();
}
triggers.release();
@ -109,7 +111,8 @@ void Dbtc::initRecords()
ArrayList<TcIndexOperation> indexOps(c_theIndexOperationPool);
TcIndexOperationPtr ioptr;
while(indexOps.seize(ioptr) == true) {
new (ioptr.p) TcIndexOperation(c_theAttributeBufferPool);
p= ioptr.p;
new (p) TcIndexOperation(c_theAttributeBufferPool);
}
indexOps.release();
@ -179,7 +182,6 @@ Dbtc::Dbtc(const class Configuration & conf):
c_maxNumberOfIndexOperations(0),
m_commitAckMarkerHash(m_commitAckMarkerPool)
{
BLOCK_CONSTRUCTOR(Dbtc);
const ndb_mgm_configuration_iterator * p = conf.getOwnConfigIterator();
@ -191,7 +193,7 @@ Dbtc::Dbtc(const class Configuration & conf):
ndb_mgm_get_int_parameter(p, CFG_DB_TRANS_BUFFER_MEM,
&transactionBufferMemory);
ndb_mgm_get_int_parameter(p, CFG_DB_NO_INDEXES,
ndb_mgm_get_int_parameter(p, CFG_DB_NO_UNIQUE_HASH_INDEXES,
&maxNoOfIndexes);
ndb_mgm_get_int_parameter(p, CFG_DB_NO_INDEX_OPS,
&maxNoOfConcurrentIndexOperations);

View file

@ -11013,7 +11013,7 @@ void Dbtc::execTCINDXREQ(Signal* signal)
indexOp->indexOpId = indexOpPtr.i;
// Save original signal
*indexOp->tcIndxReq = *tcIndxReq;
indexOp->tcIndxReq = *tcIndxReq;
indexOp->connectionIndex = TapiIndex;
regApiPtr->accumulatingIndexOp = indexOp->indexOpId;
@ -11322,7 +11322,7 @@ void Dbtc::execTCKEYCONF(Signal* signal)
// Should never happen, abort
TcIndxRef * const tcIndxRef = (TcIndxRef *)signal->getDataPtrSend();
tcIndxRef->connectPtr = indexOp->tcIndxReq->senderData;
tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4349;
@ -11341,7 +11341,7 @@ void Dbtc::execTCKEYCONF(Signal* signal)
// Double TCKEYCONF, should never happen, abort
TcIndxRef * const tcIndxRef = (TcIndxRef *)signal->getDataPtrSend();
tcIndxRef->connectPtr = indexOp->tcIndxReq->senderData;
tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4349;
@ -11362,7 +11362,7 @@ void Dbtc::execTCKEYCONF(Signal* signal)
// Copy reply from TcKeyConf
regApiPtr->noIndexOp--; // Decrease count
regApiPtr->tcIndxSendArray[Ttcindxrec] = indexOp->tcIndxReq->senderData;
regApiPtr->tcIndxSendArray[Ttcindxrec] = indexOp->tcIndxReq.senderData;
regApiPtr->tcIndxSendArray[Ttcindxrec + 1] =
tcKeyConf->operations[0].attrInfoLen;
regApiPtr->tcindxrec = Ttcindxrec + 2;
@ -11395,7 +11395,7 @@ void Dbtc::execTCKEYREF(Signal* signal)
}
const UintR TconnectIndex = indexOp->connectionIndex;
ApiConnectRecord * const regApiPtr = &apiConnectRecord[TconnectIndex];
Uint32 tcKeyRequestInfo = indexOp->tcIndxReq->requestInfo;
Uint32 tcKeyRequestInfo = indexOp->tcIndxReq.requestInfo;
Uint32 commitFlg = TcKeyReq::getCommitFlag(tcKeyRequestInfo);
switch(indexOp->indexOpState) {
@ -11425,7 +11425,7 @@ void Dbtc::execTCKEYREF(Signal* signal)
// Send TCINDXREF
jam();
TcIndxReq * const tcIndxReq = indexOp->tcIndxReq;
TcIndxReq * const tcIndxReq = &indexOp->tcIndxReq;
TcIndxRef * const tcIndxRef = (TcIndxRef *)signal->getDataPtrSend();
regApiPtr->noIndexOp--; // Decrease count
@ -11503,7 +11503,7 @@ void Dbtc::execTRANSID_AI(Signal* signal)
// Failed to allocate space for TransIdAI
TcIndxRef * const tcIndxRef = (TcIndxRef *)signal->getDataPtrSend();
tcIndxRef->connectPtr = indexOp->tcIndxReq->senderData;
tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4000;
@ -11518,7 +11518,7 @@ void Dbtc::execTRANSID_AI(Signal* signal)
// Should never happen, abort
TcIndxRef * const tcIndxRef = (TcIndxRef *)signal->getDataPtrSend();
tcIndxRef->connectPtr = indexOp->tcIndxReq->senderData;
tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4349;
@ -11546,7 +11546,7 @@ void Dbtc::execTRANSID_AI(Signal* signal)
// Too many TRANSID_AI
TcIndxRef * const tcIndxRef = (TcIndxRef *)signal->getDataPtrSend();
tcIndexRef->connectPtr = indexOp->tcIndxReq->senderData;
tcIndexRef->connectPtr = indexOp->tcIndxReq.senderData;
tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4349;
@ -11571,7 +11571,7 @@ void Dbtc::execTRANSID_AI(Signal* signal)
jam();
TcIndxRef * const tcIndxRef = (TcIndxRef *)signal->getDataPtrSend();
tcIndxRef->connectPtr = indexOp->tcIndxReq->senderData;
tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4349;
@ -11591,7 +11591,7 @@ void Dbtc::execTCROLLBACKREP(Signal* signal)
TcIndexOperation* indexOp = c_theIndexOperations.getPtr(indexOpPtr.i);
indexOpPtr.p = indexOp;
tcRollbackRep = (TcRollbackRep *)signal->getDataPtrSend();
tcRollbackRep->connectPtr = indexOp->tcIndxReq->senderData;
tcRollbackRep->connectPtr = indexOp->tcIndxReq.senderData;
sendSignal(apiConnectptr.p->ndbapiBlockref,
GSN_TCROLLBACKREP, signal, TcRollbackRep::SignalLength, JBB);
}
@ -11608,23 +11608,23 @@ void Dbtc::readIndexTable(Signal* signal,
TcKeyReq * const tcKeyReq = (TcKeyReq *)signal->getDataPtrSend();
Uint32 * dataPtr = &tcKeyReq->scanInfo;
Uint32 tcKeyLength = TcKeyReq::StaticLength;
Uint32 tcKeyRequestInfo = indexOp->tcIndxReq->requestInfo;
Uint32 tcKeyRequestInfo = indexOp->tcIndxReq.requestInfo;
AttributeBuffer::DataBufferIterator keyIter;
Uint32 keyLength = TcKeyReq::getKeyLength(tcKeyRequestInfo);
TcIndexData* indexData;
Uint32 transId1 = indexOp->tcIndxReq->transId1;
Uint32 transId2 = indexOp->tcIndxReq->transId2;
Uint32 transId1 = indexOp->tcIndxReq.transId1;
Uint32 transId2 = indexOp->tcIndxReq.transId2;
const Operation_t opType =
(Operation_t)TcKeyReq::getOperationType(tcKeyRequestInfo);
// Find index table
if ((indexData = c_theIndexes.getPtr(indexOp->tcIndxReq->indexId)) == NULL) {
if ((indexData = c_theIndexes.getPtr(indexOp->tcIndxReq.indexId)) == NULL) {
jam();
// Failed to find index record
TcIndxRef * const tcIndxRef = (TcIndxRef *)signal->getDataPtrSend();
tcIndxRef->connectPtr = indexOp->tcIndxReq->senderData;
tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4000;
@ -11636,7 +11636,7 @@ void Dbtc::readIndexTable(Signal* signal,
tcKeyReq->transId2 = transId2;
tcKeyReq->tableId = indexData->indexId;
tcKeyLength += MIN(keyLength, keyBufSize);
tcKeyReq->tableSchemaVersion = indexOp->tcIndxReq->indexSchemaVersion;
tcKeyReq->tableSchemaVersion = indexOp->tcIndxReq.indexSchemaVersion;
TcKeyReq::setOperationType(tcKeyRequestInfo,
opType == ZREAD ? opType : ZREAD_EX);
TcKeyReq::setAIInTcKeyReq(tcKeyRequestInfo, 1); // Allways send one AttrInfo
@ -11685,7 +11685,7 @@ void Dbtc::readIndexTable(Signal* signal,
// Send KEYINFO sequence
KeyInfo * const keyInfo = (KeyInfo *)signal->getDataPtrSend();
keyInfo->connectPtr = indexOp->tcIndxReq->apiConnectPtr;
keyInfo->connectPtr = indexOp->tcIndxReq.apiConnectPtr;
keyInfo->transId[0] = transId1;
keyInfo->transId[1] = transId2;
dataPtr = (Uint32 *) &keyInfo->keyData;
@ -11725,7 +11725,7 @@ void Dbtc::executeIndexOperation(Signal* signal,
Uint32 keyBufSize = 8; // Maximum for key in TCKEYREQ
Uint32 attrBufSize = 5;
Uint32 dataPos = 0;
TcIndxReq * const tcIndxReq = indexOp->tcIndxReq;
TcIndxReq * const tcIndxReq = &indexOp->tcIndxReq;
TcKeyReq * const tcKeyReq = (TcKeyReq *)signal->getDataPtrSend();
Uint32 * dataPtr = &tcKeyReq->scanInfo;
Uint32 tcKeyLength = TcKeyReq::StaticLength;
@ -11741,7 +11741,7 @@ void Dbtc::executeIndexOperation(Signal* signal,
// Failed to find index record
TcIndxRef * const tcIndxRef = (TcIndxRef *)signal->getDataPtrSend();
tcIndxRef->connectPtr = indexOp->tcIndxReq->senderData;
tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4349;
@ -11841,7 +11841,7 @@ void Dbtc::executeIndexOperation(Signal* signal,
// Send KEYINFO sequence
KeyInfo * const keyInfo = (KeyInfo *)signal->getDataPtrSend();
keyInfo->connectPtr = indexOp->tcIndxReq->apiConnectPtr;
keyInfo->connectPtr = indexOp->tcIndxReq.apiConnectPtr;
keyInfo->transId[0] = regApiPtr->transid[0];
keyInfo->transId[1] = regApiPtr->transid[1];
dataPtr = (Uint32 *) &keyInfo->keyData;
@ -11877,7 +11877,7 @@ void Dbtc::executeIndexOperation(Signal* signal,
AttrInfo * const attrInfo = (AttrInfo *)signal->getDataPtrSend();
Uint32 attrInfoPos = 0;
attrInfo->connectPtr = indexOp->tcIndxReq->apiConnectPtr;
attrInfo->connectPtr = indexOp->tcIndxReq.apiConnectPtr;
attrInfo->transId[0] = regApiPtr->transid[0];
attrInfo->transId[1] = regApiPtr->transid[1];
dataPtr = (Uint32 *) &attrInfo->attrData;

View file

@ -85,21 +85,12 @@ typedef bool (Dbtup::* UpdateFunction)(Uint32*,
#define ZNO_OF_CONCURRENT_OPEN_OP 40 /* NUMBER OF CONCURRENT OPENS */
#define ZNO_OF_CONCURRENT_WRITE_OP 80 /* NUMBER OF CONCURRENT DISK WRITES*/
#define ZNO_OF_FRAGOPREC 20 /* NUMBER OF CONCURRENT ADD FRAG. */
#define ZNO_OF_FRAGREC 64 /* SIZE OF FRAGMENT FILE. */
#define ZNO_OF_LCP_REC 10 /* NUMBER OF CONCURRENT CHECKPOINTS*/
#define ZNO_OF_OPREC 116 /* SIZE OF OPERATION RECORD FILE */
#define TOT_PAGE_RECORD_SPACE 262144 /* SIZE OF PAGE RECORD FILE. */
#define ZNO_OF_PAGE TOT_PAGE_RECORD_SPACE/ZWORDS_ON_PAGE
#define ZNO_OF_PAGE_RANGE_REC 128 /* SIZE OF PAGE RANGE FILE */
#define ZNO_OF_PARALLELL_UNDO_FILES 16 /* NUMBER OF PARALLEL UNDO FILES */
#define ZNO_OF_RESTART_INFO_REC 10 /* MAXIMUM PARALLELL RESTART INFOS */
#define ZNO_OF_TAB_DESCR_REC 484 /* SIZE OF TABLE DESCRIPTOR FILE */
#define ZNO_OF_TABLEREC 16 /* SIZE OF TABLE RECORD FILE. */
#ifdef NDB_OSE
#define ZNO_OF_UNDO_PAGE 80 // Must be multiple of 8
#else
#define ZNO_OF_UNDO_PAGE 500 // Must be multiple of 8
#endif
/* 24 SEGMENTS WITH 8 PAGES IN EACH*/
/* PLUS ONE UNDO BUFFER CACHE */
// Undo record identifiers are 32-bits with page index 13-bits
@ -826,8 +817,8 @@ struct Tablerec {
// List of ordered indexes
ArrayList<TupTriggerData> tuxCustomTriggers;
Uint32 fragid[2 * NO_OF_FRAG_PER_NODE];
Uint32 fragrec[2 * NO_OF_FRAG_PER_NODE];
Uint32 fragid[2 * MAX_FRAG_PER_NODE];
Uint32 fragrec[2 * MAX_FRAG_PER_NODE];
struct {
Uint32 tabUserPtr;

View file

@ -44,16 +44,10 @@ void Dbtup::initData()
cnoOfLcpRec = ZNO_OF_LCP_REC;
cnoOfConcurrentOpenOp = ZNO_OF_CONCURRENT_OPEN_OP;
cnoOfConcurrentWriteOp = ZNO_OF_CONCURRENT_WRITE_OP;
cnoOfFragoprec = 2 * NO_OF_FRAG_PER_NODE;
cnoOfFragrec = ZNO_OF_FRAGREC;
cnoOfOprec = ZNO_OF_OPREC;
cnoOfPage = ZNO_OF_PAGE;
cnoOfFragoprec = 2 * MAX_FRAG_PER_NODE;
cnoOfPageRangeRec = ZNO_OF_PAGE_RANGE_REC;
cnoOfParallellUndoFiles = ZNO_OF_PARALLELL_UNDO_FILES;
cnoOfRestartInfoRec = ZNO_OF_RESTART_INFO_REC;
cnoOfTablerec = ZNO_OF_TABLEREC;
cnoOfTabDescrRec = ZNO_OF_TAB_DESCR_REC;
cnoOfUndoPage = ZNO_OF_UNDO_PAGE;
c_maxTriggersPerTable = ZDEFAULT_MAX_NO_TRIGGERS_PER_TABLE;
c_noOfBuildIndexRec = 32;
@ -83,9 +77,25 @@ Dbtup::Dbtup(const class Configuration & conf)
c_storedProcPool(),
c_buildIndexList(c_buildIndexPool)
{
Uint32 log_page_size= 0;
BLOCK_CONSTRUCTOR(Dbtup);
const ndb_mgm_configuration_iterator * p = conf.getOwnConfigIterator();
ndbrequire(p != 0);
ndb_mgm_get_int_parameter(p, CFG_DB_UNDO_DATA_BUFFER,
&log_page_size);
/**
* Always set page size in half MBytes
*/
cnoOfUndoPage= (log_page_size / sizeof(UndoPage));
Uint32 mega_byte_part= cnoOfUndoPage & 15;
if (mega_byte_part != 0) {
jam();
cnoOfUndoPage+= (16 - mega_byte_part);
}
addRecSignal(GSN_DEBUG_SIG, &Dbtup::execDEBUG_SIG);
addRecSignal(GSN_CONTINUEB, &Dbtup::execCONTINUEB);
@ -628,6 +638,7 @@ void Dbtup::execREAD_CONFIG_REQ(Signal* signal)
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUP_PAGE, &tmp));
Uint64 pages = (tmp * 2048 + (ZWORDS_ON_PAGE - 1))/ (Uint64)ZWORDS_ON_PAGE;
cnoOfPage = (Uint32)pages;
Uint32 noOfTriggers= 0;
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUP_PAGE_RANGE, &tmp));
initPageRangeSize(tmp);
@ -637,10 +648,13 @@ void Dbtup::execREAD_CONFIG_REQ(Signal* signal)
Uint32 noOfStoredProc;
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUP_STORED_PROC,
&noOfStoredProc));
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_NO_TRIGGERS,
&noOfTriggers));
cnoOfTabDescrRec = (cnoOfTabDescrRec & 0xFFFFFFF0) + 16;
c_storedProcPool.setSize(noOfStoredProc);
c_buildIndexPool.setSize(c_noOfBuildIndexRec);
c_triggerPool.setSize(noOfTriggers);
initRecords();
czero = 0;
@ -715,8 +729,6 @@ void Dbtup::initRecords()
sizeof(RestartInfoRecord),
cnoOfRestartInfoRec);
// Trigger data
c_triggerPool.setSize(cnoOfTablerec*c_maxTriggersPerTable);
tablerec = (Tablerec*)allocRecord("Tablerec",
sizeof(Tablerec),
@ -1049,7 +1061,7 @@ void Dbtup::initializeTablerec()
void
Dbtup::initTab(Tablerec* const regTabPtr)
{
for (Uint32 i = 0; i < (2 * NO_OF_FRAG_PER_NODE); i++) {
for (Uint32 i = 0; i < (2 * MAX_FRAG_PER_NODE); i++) {
regTabPtr->fragid[i] = RNIL;
regTabPtr->fragrec[i] = RNIL;
}//for
@ -1160,7 +1172,7 @@ void Dbtup::execTUPSEIZEREQ(Signal* signal)
return;
}//Dbtup::execTUPSEIZEREQ()
#define printFragment(t){ for(Uint32 i = 0; i < (2 * NO_OF_FRAG_PER_NODE);i++){\
#define printFragment(t){ for(Uint32 i = 0; i < (2 * MAX_FRAG_PER_NODE);i++){\
ndbout_c("table = %d fragid[%d] = %d fragrec[%d] = %d", \
t.i, t.p->fragid[i], i, t.p->fragrec[i]); }}

View file

@ -349,14 +349,14 @@ Dbtup::buildIndex(Signal* signal, Uint32 buildPtrI)
do {
// get fragment
FragrecordPtr fragPtr;
if (buildPtr.p->m_fragNo == 2 * NO_OF_FRAG_PER_NODE) {
if (buildPtr.p->m_fragNo == 2 * MAX_FRAG_PER_NODE) {
ljam();
// build ready
buildIndexReply(signal, buildPtr.p);
c_buildIndexList.release(buildPtr);
return;
}
ndbrequire(buildPtr.p->m_fragNo < 2 * NO_OF_FRAG_PER_NODE);
ndbrequire(buildPtr.p->m_fragNo < 2 * MAX_FRAG_PER_NODE);
fragPtr.i = tablePtr.p->fragrec[buildPtr.p->m_fragNo];
if (fragPtr.i == RNIL) {
ljam();

View file

@ -188,7 +188,7 @@ void Dbtup::execTUPFRAGREQ(Signal* signal)
/* -------------------------------------------------------------------- */
bool Dbtup::addfragtotab(Tablerec* const regTabPtr, Uint32 fragId, Uint32 fragIndex)
{
for (Uint32 i = 0; i < (2 * NO_OF_FRAG_PER_NODE); i++) {
for (Uint32 i = 0; i < (2 * MAX_FRAG_PER_NODE); i++) {
ljam();
if (regTabPtr->fragid[i] == RNIL) {
ljam();
@ -202,7 +202,7 @@ bool Dbtup::addfragtotab(Tablerec* const regTabPtr, Uint32 fragId, Uint32 fragIn
void Dbtup::getFragmentrec(FragrecordPtr& regFragPtr, Uint32 fragId, Tablerec* const regTabPtr)
{
for (Uint32 i = 0; i < (2 * NO_OF_FRAG_PER_NODE); i++) {
for (Uint32 i = 0; i < (2 * MAX_FRAG_PER_NODE); i++) {
ljam();
if (regTabPtr->fragid[i] == fragId) {
ljam();
@ -456,7 +456,7 @@ void Dbtup::releaseFragoperrec(FragoperrecPtr fragOperPtr)
void Dbtup::deleteFragTab(Tablerec* const regTabPtr, Uint32 fragId)
{
for (Uint32 i = 0; i < (2 * NO_OF_FRAG_PER_NODE); i++) {
for (Uint32 i = 0; i < (2 * MAX_FRAG_PER_NODE); i++) {
ljam();
if (regTabPtr->fragid[i] == fragId) {
ljam();
@ -515,7 +515,7 @@ void Dbtup::releaseFragment(Signal* signal, Uint32 tableId)
Uint32 fragIndex = RNIL;
Uint32 fragId = RNIL;
Uint32 i = 0;
for (i = 0; i < (2 * NO_OF_FRAG_PER_NODE); i++) {
for (i = 0; i < (2 * MAX_FRAG_PER_NODE); i++) {
ljam();
if (tabPtr.p->fragid[i] != RNIL) {
ljam();

View file

@ -108,7 +108,7 @@ public:
private:
// sizes are in words (Uint32)
static const unsigned MaxIndexFragments = 2 * NO_OF_FRAG_PER_NODE;
static const unsigned MaxIndexFragments = 2 * MAX_FRAG_PER_NODE;
static const unsigned MaxIndexAttributes = MAX_ATTRIBUTES_IN_INDEX;
static const unsigned MaxAttrDataSize = 2048;
public:

View file

@ -53,11 +53,7 @@ Dbtux::execTUXFRAGREQ(Signal* signal)
}
// get new operation record
c_fragOpPool.seize(fragOpPtr);
if (fragOpPtr.i == RNIL) {
jam();
errorCode = TuxFragRef::NoFreeFragmentOper;
break;
}
ndbrequire(fragOpPtr.i != RNIL);
new (fragOpPtr.p) FragOp();
fragOpPtr.p->m_userPtr = req->userPtr;
fragOpPtr.p->m_userRef = req->userRef;
@ -66,11 +62,7 @@ Dbtux::execTUXFRAGREQ(Signal* signal)
fragOpPtr.p->m_fragNo = indexPtr.p->m_numFrags;
fragOpPtr.p->m_numAttrsRecvd = 0;
// check if index has place for more fragments
if (indexPtr.p->m_numFrags == MaxIndexFragments) {
jam();
errorCode = TuxFragRef::NoFreeIndexFragment;
break;
}
ndbrequire(indexPtr.p->m_numFrags < MaxIndexFragments);
// seize new fragment record
FragPtr fragPtr;
c_fragPool.seize(fragPtr);

View file

@ -112,7 +112,7 @@ DbUtil::DbUtil(const Configuration & conf) :
addRecSignal(GSN_UTIL_RELEASE_CONF, &DbUtil::execUTIL_RELEASE_CONF);
addRecSignal(GSN_UTIL_RELEASE_REF, &DbUtil::execUTIL_RELEASE_REF);
c_pagePool.setSize(100);
c_pagePool.setSize(10);
c_preparePool.setSize(1); // one parallel prepare at a time
c_preparedOperationPool.setSize(5); // three hardcoded, two for test
c_operationPool.setSize(64); // 64 parallel operations

View file

@ -37,7 +37,7 @@
#include <signaldata/UtilLock.hpp>
#include <SimpleProperties.hpp>
#define UTIL_WORDS_PER_PAGE 8191
#define UTIL_WORDS_PER_PAGE 1023
/**
* @class DbUtil

View file

@ -510,7 +510,7 @@ AsyncFile::extendfile(Request* request) {
DEBUG(ndbout_c("extendfile: maxOffset=%d, size=%d", maxOffset, maxSize));
// Allocate a buffer and fill it with zeros
void* pbuf = malloc(maxSize);
void* pbuf = NdbMem_Allocate(maxSize);
memset(pbuf, 0, maxSize);
for (int p = 0; p <= maxOffset; p = p + maxSize) {
int return_value;

View file

@ -67,7 +67,7 @@ Ndbfs::Ndbfs(const Configuration & conf) :
//ndb_mgm_get_int_parameter(p, CFG_DB_MAX_OPEN_FILES, &m_maxFiles);
// Create idle AsyncFiles
Uint32 noIdleFiles = 16;
Uint32 noIdleFiles = 27;
for (Uint32 i = 0; i < noIdleFiles; i++){
theIdleFiles.push_back(createAsyncFile());
}

View file

@ -673,6 +673,7 @@ Suma::execDUMP_STATE_ORD(Signal* signal){
*
*/
#if 0
void
SumaParticipant::convertNameToId(SubscriptionPtr subPtr, Signal * signal)
{
@ -703,6 +704,7 @@ SumaParticipant::convertNameToId(SubscriptionPtr subPtr, Signal * signal)
sendSubCreateConf(signal, subPtr.p->m_subscriberRef, subPtr);
}
}
#endif
void
@ -719,6 +721,7 @@ SumaParticipant::addTableId(Uint32 tableId,
psyncRec->m_tableList.append(&tableId, 1);
}
#if 0
void
SumaParticipant::execGET_TABLEID_CONF(Signal * signal)
{
@ -766,6 +769,8 @@ SumaParticipant::execGET_TABLEID_REF(Signal * signal)
SubCreateRef::SignalLength,
JBB);
}
#endif
/*************************************************************
*
@ -999,6 +1004,7 @@ SumaParticipant::execSUB_CREATE_REQ(Signal* signal) {
}
}
break;
#if 0
case SubCreateReq::SelectiveTableSnapshot:
/**
* Tables specified by the user that does not exist
@ -1041,6 +1047,7 @@ SumaParticipant::execSUB_CREATE_REQ(Signal* signal) {
return;
}
break;
#endif
case SubCreateReq::DatabaseSnapshot:
{
jam();

View file

@ -62,9 +62,10 @@ protected:
void execLIST_TABLES_CONF(Signal* signal);
void execGET_TABINFOREF(Signal* signal);
void execGET_TABINFO_CONF(Signal* signal);
#if 0
void execGET_TABLEID_CONF(Signal* signal);
void execGET_TABLEID_REF(Signal* signal);
#endif
/**
* Scan interface
*/
@ -275,7 +276,9 @@ public:
*/
// TODO we've got to fix this, this is to inefficient. Tomas
char m_tables[MAX_TABLES];
#if 0
char m_tableNames[MAX_TABLES][MAX_TAB_NAME_SIZE];
#endif
/**
* "Iterator" used to iterate through m_tableNames
*/

View file

@ -51,9 +51,10 @@ SumaParticipant::SumaParticipant(const Configuration & conf) :
//addRecSignal(GSN_GET_TABINFOREF, &SumaParticipant::execGET_TABINFO_REF);
addRecSignal(GSN_GET_TABINFO_CONF, &SumaParticipant::execGET_TABINFO_CONF);
addRecSignal(GSN_GET_TABINFOREF, &SumaParticipant::execGET_TABINFOREF);
#if 0
addRecSignal(GSN_GET_TABLEID_CONF, &SumaParticipant::execGET_TABLEID_CONF);
addRecSignal(GSN_GET_TABLEID_REF, &SumaParticipant::execGET_TABLEID_REF);
#endif
/**
* Dih interface
*/
@ -93,9 +94,15 @@ SumaParticipant::SumaParticipant(const Configuration & conf) :
/**
* @todo: fix pool sizes
*/
Uint32 noTables;
const ndb_mgm_configuration_iterator * p = conf.getOwnConfigIterator();
ndbrequire(p != 0);
c_tablePool_.setSize(MAX_TABLES);
c_tables.setSize(MAX_TABLES);
ndb_mgm_get_int_parameter(p, CFG_DB_NO_TABLES,
&noTables);
c_tablePool_.setSize(noTables);
c_tables.setSize(noTables);
c_subscriptions.setSize(20); //10
c_subscriberPool.setSize(64);

View file

@ -47,8 +47,7 @@ Trix::Trix(const Configuration & conf) :
c_masterTrixRef(0),
c_noNodesFailed(0),
c_noActiveNodes(0),
c_theSubscriptions(c_theSubscriptionRecPool),
c_thePages(c_thePagePool)
c_theSubscriptions(c_theSubscriptionRecPool)
{
BLOCK_CONSTRUCTOR(Trix);
@ -90,7 +89,6 @@ Trix::Trix(const Configuration & conf) :
// Allocate pool sizes
c_theAttrOrderBufferPool.setSize(100);
c_theSubscriptionRecPool.setSize(100);
c_thePagePool.setSize(16);
ArrayList<SubscriptionRecord> subscriptions(c_theSubscriptionRecPool);
SubscriptionRecPtr subptr;

View file

@ -138,19 +138,6 @@ private:
*/
ArrayList<SubscriptionRecord> c_theSubscriptions;
// Linear memory abstraction
#define TRIX_WORDS_PER_PAGE 8191
struct Page32 {
Uint32 data[TRIX_WORDS_PER_PAGE];
Uint32 nextPool;
};
typedef Ptr<Page32> Page32Ptr;
ArrayPool<Page32> c_thePagePool;
Array<Page32> c_thePages;
// Private methods
// System start
void execSTTOR(Signal* signal);
void execNDB_STTOR(Signal* signal);

View file

@ -289,7 +289,7 @@ Configuration::setupConfiguration(){
if(pFileSystemPath[strlen(pFileSystemPath) - 1] == '/')
_fsPath = strdup(pFileSystemPath);
else {
_fsPath = (char *)malloc(strlen(pFileSystemPath) + 2);
_fsPath = (char *)NdbMem_Allocate(strlen(pFileSystemPath) + 2);
strcpy(_fsPath, pFileSystemPath);
strcat(_fsPath, "/");
}
@ -385,7 +385,8 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
char buf[255];
unsigned int noOfTables = 0;
unsigned int noOfIndexes = 0;
unsigned int noOfUniqueHashIndexes = 0;
unsigned int noOfOrderedIndexes = 0;
unsigned int noOfReplicas = 0;
unsigned int noOfDBNodes = 0;
unsigned int noOfAPINodes = 0;
@ -393,33 +394,28 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
unsigned int noOfNodes = 0;
unsigned int noOfAttributes = 0;
unsigned int noOfOperations = 0;
unsigned int noOfLocalOperations = 0;
unsigned int noOfTransactions = 0;
unsigned int noOfIndexPages = 0;
unsigned int noOfDataPages = 0;
unsigned int noOfScanRecords = 0;
unsigned int noOfLocalScanRecords = 0;
unsigned int noBatchSize = 0;
m_logLevel = new LogLevel();
/**
* {"NoOfConcurrentCheckpointsDuringRestart", &cd.ispValues[1][5] },
* {"NoOfConcurrentCheckpointsAfterRestart", &cd.ispValues[2][4] },
* {"NoOfConcurrentProcessesHandleTakeover", &cd.ispValues[1][7] },
* {"TimeToWaitAlive", &cd.ispValues[0][0] },
*/
struct AttribStorage { int paramId; Uint32 * storage; };
struct AttribStorage { int paramId; Uint32 * storage; bool computable; };
AttribStorage tmp[] = {
{ CFG_DB_NO_SCANS, &noOfScanRecords },
{ CFG_DB_NO_TABLES, &noOfTables },
{ CFG_DB_NO_INDEXES, &noOfIndexes },
{ CFG_DB_NO_REPLICAS, &noOfReplicas },
{ CFG_DB_NO_ATTRIBUTES, &noOfAttributes },
{ CFG_DB_NO_OPS, &noOfOperations },
{ CFG_DB_NO_TRANSACTIONS, &noOfTransactions }
#if 0
{ "NoOfDiskPagesToDiskDuringRestartTUP", &cd.ispValues[3][8] },
{ "NoOfDiskPagesToDiskAfterRestartTUP", &cd.ispValues[3][9] },
{ "NoOfDiskPagesToDiskDuringRestartACC", &cd.ispValues[3][10] },
{ "NoOfDiskPagesToDiskAfterRestartACC", &cd.ispValues[3][11] },
#endif
{ CFG_DB_NO_SCANS, &noOfScanRecords, false },
{ CFG_DB_NO_LOCAL_SCANS, &noOfLocalScanRecords, true },
{ CFG_DB_BATCH_SIZE, &noBatchSize, false },
{ CFG_DB_NO_TABLES, &noOfTables, false },
{ CFG_DB_NO_ORDERED_INDEXES, &noOfOrderedIndexes, false },
{ CFG_DB_NO_UNIQUE_HASH_INDEXES, &noOfUniqueHashIndexes, false },
{ CFG_DB_NO_REPLICAS, &noOfReplicas, false },
{ CFG_DB_NO_ATTRIBUTES, &noOfAttributes, false },
{ CFG_DB_NO_OPS, &noOfOperations, false },
{ CFG_DB_NO_LOCAL_OPS, &noOfLocalOperations, true },
{ CFG_DB_NO_TRANSACTIONS, &noOfTransactions, false }
};
ndb_mgm_configuration_iterator db(*(ndb_mgm_configuration*)ownConfig, 0);
@ -427,8 +423,12 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
const int sz = sizeof(tmp)/sizeof(AttribStorage);
for(int i = 0; i<sz; i++){
if(ndb_mgm_get_int_parameter(&db, tmp[i].paramId, tmp[i].storage)){
snprintf(buf, sizeof(buf), "ConfigParam: %d not found", tmp[i].paramId);
ERROR_SET(fatal, ERR_INVALID_CONFIG, msg, buf);
if (tmp[i].computable) {
*tmp[i].storage = 0;
} else {
snprintf(buf, sizeof(buf),"ConfigParam: %d not found", tmp[i].paramId);
ERROR_SET(fatal, ERR_INVALID_CONFIG, msg, buf);
}
}
}
@ -512,37 +512,42 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
}
}
noOfNodes = nodeNo;
noOfTables+= 2; // Add System tables
noOfAttributes += 9; // Add System table attributes
ConfigValues::Iterator it2(*ownConfig, db.m_config);
it2.set(CFG_DB_NO_TABLES, noOfTables);
it2.set(CFG_DB_NO_ATTRIBUTES, noOfAttributes);
/**
* Do size calculations
*/
ConfigValuesFactory cfg(ownConfig);
noOfTables++; // Remove impact of system table
noOfTables += noOfIndexes; // Indexes are tables too
noOfAttributes += 2; // ---"----
noOfTables *= 2; // Remove impact of Dict need 2 ids for each table
if (noOfDBNodes > 15) {
noOfDBNodes = 15;
}//if
Uint32 noOfLocalScanRecords = (noOfDBNodes * noOfScanRecords) + 1;
if (noOfLocalScanRecords == 0) {
noOfLocalScanRecords = (noOfDBNodes * noOfScanRecords) + 1;
}
if (noOfLocalOperations == 0) {
noOfLocalOperations= (11 * noOfOperations) / 10;
}
Uint32 noOfTCScanRecords = noOfScanRecords;
{
Uint32 noOfAccTables= noOfTables + noOfUniqueHashIndexes;
/**
* Acc Size Alt values
*/
// Can keep 65536 pages (= 0.5 GByte)
cfg.put(CFG_ACC_DIR_RANGE,
4 * NO_OF_FRAG_PER_NODE * noOfTables* noOfReplicas);
4 * NO_OF_FRAG_PER_NODE * noOfAccTables* noOfReplicas);
cfg.put(CFG_ACC_DIR_ARRAY,
(noOfIndexPages >> 8) +
4 * NO_OF_FRAG_PER_NODE * noOfTables* noOfReplicas);
4 * NO_OF_FRAG_PER_NODE * noOfAccTables* noOfReplicas);
cfg.put(CFG_ACC_FRAGMENT,
2 * NO_OF_FRAG_PER_NODE * noOfTables* noOfReplicas);
2 * NO_OF_FRAG_PER_NODE * noOfAccTables* noOfReplicas);
/*-----------------------------------------------------------------------*/
// The extra operation records added are used by the scan and node
@ -552,25 +557,27 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
// The remainder are allowed for use by the scan processes.
/*-----------------------------------------------------------------------*/
cfg.put(CFG_ACC_OP_RECS,
((11 * noOfOperations) / 10 + 50) +
(noOfLocalScanRecords * MAX_PARALLEL_OP_PER_SCAN) +
(noOfLocalOperations + 50) +
(noOfLocalScanRecords * noBatchSize) +
NODE_RECOVERY_SCAN_OP_RECORDS);
cfg.put(CFG_ACC_OVERFLOW_RECS,
noOfIndexPages +
2 * NO_OF_FRAG_PER_NODE * noOfTables* noOfReplicas);
2 * NO_OF_FRAG_PER_NODE * noOfAccTables* noOfReplicas);
cfg.put(CFG_ACC_PAGE8,
noOfIndexPages + 32);
cfg.put(CFG_ACC_ROOT_FRAG,
NO_OF_FRAG_PER_NODE * noOfTables* noOfReplicas);
NO_OF_FRAG_PER_NODE * noOfAccTables* noOfReplicas);
cfg.put(CFG_ACC_TABLE, noOfTables);
cfg.put(CFG_ACC_TABLE, noOfAccTables);
cfg.put(CFG_ACC_SCAN, noOfLocalScanRecords);
}
Uint32 noOfMetaTables= noOfTables + noOfOrderedIndexes +
noOfUniqueHashIndexes;
{
/**
* Dict Size Alt values
@ -579,7 +586,7 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
noOfAttributes);
cfg.put(CFG_DICT_TABLE,
noOfTables);
noOfMetaTables);
}
{
@ -593,7 +600,7 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
noOfOperations + noOfTransactions + 46);
cfg.put(CFG_DIH_FRAG_CONNECT,
NO_OF_FRAG_PER_NODE * noOfTables * noOfDBNodes);
NO_OF_FRAG_PER_NODE * noOfMetaTables * noOfDBNodes);
int temp;
temp = noOfReplicas - 2;
@ -603,14 +610,14 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
temp++;
cfg.put(CFG_DIH_MORE_NODES,
temp * NO_OF_FRAG_PER_NODE *
noOfTables * noOfDBNodes);
noOfMetaTables * noOfDBNodes);
cfg.put(CFG_DIH_REPLICAS,
NO_OF_FRAG_PER_NODE * noOfTables *
NO_OF_FRAG_PER_NODE * noOfMetaTables *
noOfDBNodes * noOfReplicas);
cfg.put(CFG_DIH_TABLE,
noOfTables);
noOfMetaTables);
}
{
@ -618,13 +625,13 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
* Lqh Size Alt values
*/
cfg.put(CFG_LQH_FRAG,
NO_OF_FRAG_PER_NODE * noOfTables * noOfReplicas);
NO_OF_FRAG_PER_NODE * noOfMetaTables * noOfReplicas);
cfg.put(CFG_LQH_TABLE,
noOfTables);
noOfMetaTables);
cfg.put(CFG_LQH_TC_CONNECT,
(11 * noOfOperations) / 10 + 50);
noOfLocalOperations + 50);
cfg.put(CFG_LQH_SCAN,
noOfLocalScanRecords);
@ -641,7 +648,7 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
(2 * noOfOperations) + 16 + noOfTransactions);
cfg.put(CFG_TC_TABLE,
noOfTables);
noOfMetaTables);
cfg.put(CFG_TC_LOCAL_SCAN,
noOfLocalScanRecords);
@ -655,23 +662,23 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
* Tup Size Alt values
*/
cfg.put(CFG_TUP_FRAG,
2 * NO_OF_FRAG_PER_NODE * noOfTables* noOfReplicas);
2 * NO_OF_FRAG_PER_NODE * noOfMetaTables* noOfReplicas);
cfg.put(CFG_TUP_OP_RECS,
(11 * noOfOperations) / 10 + 50);
noOfLocalOperations + 50);
cfg.put(CFG_TUP_PAGE,
noOfDataPages);
cfg.put(CFG_TUP_PAGE_RANGE,
4 * NO_OF_FRAG_PER_NODE * noOfTables* noOfReplicas);
4 * NO_OF_FRAG_PER_NODE * noOfMetaTables* noOfReplicas);
cfg.put(CFG_TUP_TABLE,
noOfTables);
noOfMetaTables);
cfg.put(CFG_TUP_TABLE_DESC,
4 * NO_OF_FRAG_PER_NODE * noOfAttributes* noOfReplicas +
12 * NO_OF_FRAG_PER_NODE * noOfTables* noOfReplicas );
12 * NO_OF_FRAG_PER_NODE * noOfMetaTables* noOfReplicas );
cfg.put(CFG_TUP_STORED_PROC,
noOfLocalScanRecords);
@ -682,13 +689,13 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
* Tux Size Alt values
*/
cfg.put(CFG_TUX_INDEX,
noOfTables);
noOfOrderedIndexes);
cfg.put(CFG_TUX_FRAGMENT,
2 * NO_OF_FRAG_PER_NODE * noOfTables * noOfReplicas);
2 * NO_OF_FRAG_PER_NODE * noOfOrderedIndexes * noOfReplicas);
cfg.put(CFG_TUX_ATTRIBUTE,
noOfIndexes * 4);
noOfOrderedIndexes * 4);
cfg.put(CFG_TUX_SCAN_OP, noOfLocalScanRecords);
}

View file

@ -38,13 +38,6 @@
*/
SectionSegmentPool g_sectionSegmentPool;
static int f(int v){
g_sectionSegmentPool.setSize(v);
return v;
}
static int v = f(2048);
bool
import(Ptr<SectionSegment> & first, const Uint32 * src, Uint32 len){
/**

View file

@ -147,7 +147,8 @@
// in future version since small tables want small value and large tables
// need large value.
/* ------------------------------------------------------------------------- */
#define NO_OF_FRAG_PER_NODE 8
#define NO_OF_FRAG_PER_NODE 1
#define MAX_FRAG_PER_NODE 8
/* ---------------------------------------------------------------- */
// To avoid synching too big chunks at a time we synch after writing

View file

@ -13,6 +13,9 @@ INCLUDES += -I$(top_srcdir)/ndb/include/mgmapi -I$(top_srcdir)/ndb/src/common/mg
LDADD_LOC = $(top_builddir)/ndb/src/libndbclient.la \
$(top_builddir)/ndb/src/common/editline/libeditline.a \
$(top_builddir)/dbug/libdbug.a \
$(top_builddir)/mysys/libmysys.a \
$(top_builddir)/strings/libmystrings.a \
@TERMCAP_LIB@
ndb_mgm_LDFLAGS = @ndb_bin_am_ldflags@

View file

@ -19,7 +19,6 @@ INCLUDES_LOC = -I$(top_srcdir)/ndb/src/ndbapi \
LDADD_LOC = $(top_builddir)/ndb/src/libndbclient.la \
$(top_builddir)/ndb/src/common/editline/libeditline.a \
$(top_builddir)/ndb/src/libndbclient.la \
$(top_builddir)/dbug/libdbug.a \
$(top_builddir)/mysys/libmysys.a \
$(top_builddir)/strings/libmystrings.a