mirror of
https://github.com/MariaDB/server.git
synced 2025-01-31 11:01:52 +01:00
Merge mysql.com:/home/jonas/src/mysql-4.1
into mysql.com:/home/jonas/src/mysql-4.1-ndb
This commit is contained in:
commit
e51de97ebe
18 changed files with 342 additions and 77 deletions
|
@ -21,6 +21,28 @@ insert into t1 values(7,8,3);
|
|||
select * from t1 where b = 4 order by a;
|
||||
a b c
|
||||
3 4 6
|
||||
insert into t1 values(8, 2, 3);
|
||||
ERROR 23000: Can't write, because of unique constraint, to table 't1'
|
||||
select * from t1 order by a;
|
||||
a b c
|
||||
1 2 3
|
||||
2 3 5
|
||||
3 4 6
|
||||
4 5 8
|
||||
5 6 2
|
||||
6 7 2
|
||||
7 8 3
|
||||
delete from t1 where a = 1;
|
||||
insert into t1 values(8, 2, 3);
|
||||
select * from t1 order by a;
|
||||
a b c
|
||||
2 3 5
|
||||
3 4 6
|
||||
4 5 8
|
||||
5 6 2
|
||||
6 7 2
|
||||
7 8 3
|
||||
8 2 3
|
||||
drop table t1;
|
||||
CREATE TABLE t2 (
|
||||
a int unsigned NOT NULL PRIMARY KEY,
|
||||
|
@ -42,6 +64,28 @@ insert into t2 values(7,8,3);
|
|||
select * from t2 where b = 4 order by a;
|
||||
a b c
|
||||
3 4 6
|
||||
insert into t2 values(8, 2, 3);
|
||||
ERROR 23000: Can't write, because of unique constraint, to table 't2'
|
||||
select * from t2 order by a;
|
||||
a b c
|
||||
1 2 3
|
||||
2 3 5
|
||||
3 4 6
|
||||
4 5 8
|
||||
5 6 2
|
||||
6 7 2
|
||||
7 8 3
|
||||
delete from t2 where a = 1;
|
||||
insert into t2 values(8, 2, 3);
|
||||
select * from t2 order by a;
|
||||
a b c
|
||||
2 3 5
|
||||
3 4 6
|
||||
4 5 8
|
||||
5 6 2
|
||||
6 7 2
|
||||
7 8 3
|
||||
8 2 3
|
||||
drop table t2;
|
||||
CREATE TABLE t3 (
|
||||
a int unsigned NOT NULL,
|
||||
|
@ -74,8 +118,10 @@ INSERT INTO t1 VALUES (8,'dummy');
|
|||
CREATE TABLE t2 (
|
||||
cid bigint(20) unsigned NOT NULL auto_increment,
|
||||
cap varchar(255) NOT NULL default '',
|
||||
PRIMARY KEY (cid)
|
||||
PRIMARY KEY (cid),
|
||||
UNIQUE KEY (cid, cap)
|
||||
) engine=ndbcluster;
|
||||
INSERT INTO t2 VALUES (NULL,'another dummy');
|
||||
CREATE TABLE t3 (
|
||||
gid bigint(20) unsigned NOT NULL auto_increment,
|
||||
gn varchar(255) NOT NULL default '',
|
||||
|
@ -132,6 +178,9 @@ cid cv
|
|||
8 dummy
|
||||
select * from t1 where cv = 'test';
|
||||
cid cv
|
||||
select * from t2 where cap = 'another dummy';
|
||||
cid cap
|
||||
0 another dummy
|
||||
select * from t4 where uid = 1 and gid=1 and rid=2 and cid=4;
|
||||
uid gid rid cid
|
||||
1 1 2 4
|
||||
|
|
|
@ -114,3 +114,23 @@ select * from t1 where b=4 and c<=5 order by a;
|
|||
select * from t1 where b<=4 and c<=5 order by a;
|
||||
select * from t1 where b<=5 and c=0 or b<=5 and c=2;
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# Indexing NULL values
|
||||
#
|
||||
|
||||
#CREATE TABLE t1 (
|
||||
# a int unsigned NOT NULL PRIMARY KEY,
|
||||
# b int unsigned,
|
||||
# c int unsigned,
|
||||
# KEY bc(b,c)
|
||||
#) engine = ndb;
|
||||
|
||||
#insert into t1 values(1,1,1),(2,NULL,2),(3,NULL,NULL),(4,4,NULL);
|
||||
#select * from t1 use index (bc);
|
||||
#select count(*) from t1 use index (bc);
|
||||
#select count(*) from t1 use index (PRIMARY) where b IS NULL;
|
||||
#select count(*) from t1 use index (bc) where b IS NULL;
|
||||
#select count(*) from t1 use index (bc) where b IS NULL and c = 2;
|
||||
#select count(*) from t1 use index (bc) where b IS NOT NULL;
|
||||
#drop table t1;
|
||||
|
|
|
@ -21,6 +21,13 @@ select * from t1 where b = 4 order by b;
|
|||
insert into t1 values(7,8,3);
|
||||
select * from t1 where b = 4 order by a;
|
||||
|
||||
-- error 1169
|
||||
insert into t1 values(8, 2, 3);
|
||||
select * from t1 order by a;
|
||||
delete from t1 where a = 1;
|
||||
insert into t1 values(8, 2, 3);
|
||||
select * from t1 order by a;
|
||||
|
||||
drop table t1;
|
||||
|
||||
|
||||
|
@ -42,6 +49,13 @@ select * from t2 where c = 6;
|
|||
insert into t2 values(7,8,3);
|
||||
select * from t2 where b = 4 order by a;
|
||||
|
||||
-- error 1169
|
||||
insert into t2 values(8, 2, 3);
|
||||
select * from t2 order by a;
|
||||
delete from t2 where a = 1;
|
||||
insert into t2 values(8, 2, 3);
|
||||
select * from t2 order by a;
|
||||
|
||||
drop table t2;
|
||||
|
||||
#
|
||||
|
@ -64,6 +78,48 @@ select * from t3 where b = 4 order by a;
|
|||
|
||||
drop table t3;
|
||||
|
||||
#
|
||||
# Indexes on NULL-able columns
|
||||
#
|
||||
|
||||
#CREATE TABLE t1 (
|
||||
# pk int NOT NULL PRIMARY KEY,
|
||||
# a int unsigned,
|
||||
# UNIQUE KEY (a)
|
||||
#) engine=ndbcluster;
|
||||
|
||||
#insert into t1 values (-1,NULL), (0,0), (1,NULL),(2,2),(3,NULL),(4,4);
|
||||
|
||||
#select * from t1 order by pk;
|
||||
|
||||
#--error 1169
|
||||
#insert into t1 values (5,0);
|
||||
#select * from t1 order by pk;
|
||||
#delete from t1 where a = 0;
|
||||
#insert into t1 values (5,0);
|
||||
#select * from t1 order by pk;
|
||||
|
||||
#CREATE TABLE t2 (
|
||||
# pk int NOT NULL PRIMARY KEY,
|
||||
# a int unsigned,
|
||||
# b tinyint NOT NULL,
|
||||
# c VARCHAR(10),
|
||||
# UNIQUE KEY si(a, c)
|
||||
#) engine=ndbcluster;
|
||||
|
||||
#insert into t2 values (-1,1,17,NULL),(0,NULL,18,NULL),(1,3,19,'abc');
|
||||
|
||||
#select * from t2 order by pk;
|
||||
|
||||
#--error 1169
|
||||
#insert into t2 values(2,3,19,'abc');
|
||||
#select * from t2 order by pk;
|
||||
#delete from t2 where c IS NOT NULL;
|
||||
#insert into t2 values(2,3,19,'abc');
|
||||
#select * from t2 order by pk;
|
||||
|
||||
#drop table t1, t2;
|
||||
|
||||
#
|
||||
# More complex tables
|
||||
#
|
||||
|
@ -78,8 +134,10 @@ INSERT INTO t1 VALUES (8,'dummy');
|
|||
CREATE TABLE t2 (
|
||||
cid bigint(20) unsigned NOT NULL auto_increment,
|
||||
cap varchar(255) NOT NULL default '',
|
||||
PRIMARY KEY (cid)
|
||||
PRIMARY KEY (cid),
|
||||
UNIQUE KEY (cid, cap)
|
||||
) engine=ndbcluster;
|
||||
INSERT INTO t2 VALUES (NULL,'another dummy');
|
||||
CREATE TABLE t3 (
|
||||
gid bigint(20) unsigned NOT NULL auto_increment,
|
||||
gn varchar(255) NOT NULL default '',
|
||||
|
@ -134,6 +192,7 @@ INSERT INTO t7 VALUES(10, 5, 1, 1, 10);
|
|||
|
||||
select * from t1 where cv = 'dummy';
|
||||
select * from t1 where cv = 'test';
|
||||
select * from t2 where cap = 'another dummy';
|
||||
select * from t4 where uid = 1 and gid=1 and rid=2 and cid=4;
|
||||
select * from t4 where uid = 1 and gid=1 and rid=1 and cid=4;
|
||||
select * from t4 where uid = 1 order by cid;
|
||||
|
|
|
@ -154,11 +154,15 @@ bool add_node_connections(Vector<ConfigInfo::ConfigRuleSection>§ions,
|
|||
bool add_server_ports(Vector<ConfigInfo::ConfigRuleSection>§ions,
|
||||
struct InitConfigFileParser::Context &ctx,
|
||||
const char * rule_data);
|
||||
bool check_node_vs_replicas(Vector<ConfigInfo::ConfigRuleSection>§ions,
|
||||
struct InitConfigFileParser::Context &ctx,
|
||||
const char * rule_data);
|
||||
|
||||
const ConfigInfo::ConfigRule
|
||||
ConfigInfo::m_ConfigRules[] = {
|
||||
{ add_node_connections, 0 },
|
||||
{ add_server_ports, 0 },
|
||||
{ check_node_vs_replicas, 0 },
|
||||
{ 0, 0 }
|
||||
};
|
||||
|
||||
|
@ -2197,6 +2201,13 @@ transformNode(InitConfigFileParser::Context & ctx, const char * data){
|
|||
ctx.m_userProperties.get("NoOfNodes", &nodes);
|
||||
ctx.m_userProperties.put("NoOfNodes", ++nodes, true);
|
||||
|
||||
/**
|
||||
* Update count (per type)
|
||||
*/
|
||||
nodes = 0;
|
||||
ctx.m_userProperties.get(ctx.fname, &nodes);
|
||||
ctx.m_userProperties.put(ctx.fname, ++nodes, true);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -2991,6 +3002,7 @@ add_node_connections(Vector<ConfigInfo::ConfigRuleSection>§ions,
|
|||
return true;
|
||||
}
|
||||
|
||||
|
||||
bool add_server_ports(Vector<ConfigInfo::ConfigRuleSection>§ions,
|
||||
struct InitConfigFileParser::Context &ctx,
|
||||
const char * rule_data)
|
||||
|
@ -3030,4 +3042,22 @@ bool add_server_ports(Vector<ConfigInfo::ConfigRuleSection>§ions,
|
|||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
check_node_vs_replicas(Vector<ConfigInfo::ConfigRuleSection>§ions,
|
||||
struct InitConfigFileParser::Context &ctx,
|
||||
const char * rule_data)
|
||||
{
|
||||
Uint32 db_nodes = 0;
|
||||
Uint32 replicas = 0;
|
||||
ctx.m_userProperties.get("DB", &db_nodes);
|
||||
ctx.m_userProperties.get("NoOfReplicas", &replicas);
|
||||
if((db_nodes % replicas) != 0){
|
||||
ctx.reportError("Invalid no of db nodes wrt no of replicas.\n"
|
||||
"No of nodes must be dividable with no or replicas");
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
template class Vector<ConfigInfo::ConfigRuleSection>;
|
||||
|
|
|
@ -3,7 +3,7 @@ Next NDBCNTR 1000
|
|||
Next NDBFS 2000
|
||||
Next DBACC 3001
|
||||
Next DBTUP 4007
|
||||
Next DBLQH 5036
|
||||
Next DBLQH 5040
|
||||
Next DBDICT 6006
|
||||
Next DBDIH 7173
|
||||
Next DBTC 8035
|
||||
|
@ -190,6 +190,10 @@ Delay execution of ABORTREQ signal 2 seconds to generate time-out.
|
|||
|
||||
5035: Delay ACC_CONTOPCONT
|
||||
|
||||
5038: Drop LQHKEYREQ + set 5039
|
||||
5039: Drop ABORT + set 5003
|
||||
|
||||
|
||||
ERROR CODES FOR TESTING TIME-OUT HANDLING IN DBTC
|
||||
-------------------------------------------------
|
||||
8040:
|
||||
|
|
|
@ -6255,16 +6255,6 @@ Dbdict::createIndex_toCreateTable(Signal* signal, OpCreateIndexPtr opPtr)
|
|||
jam();
|
||||
found = true;
|
||||
const Uint32 a = aRec->attributeDescriptor;
|
||||
bool isNullable = AttributeDescriptor::getNullable(a);
|
||||
// We do not allow more than one NULLable attribute for hash index
|
||||
if (isNullable &&
|
||||
indexPtr.p->isHashIndex() &&
|
||||
(opPtr.p->m_attrList.sz > 1)) {
|
||||
jam();
|
||||
opPtr.p->m_errorCode = CreateIndxRef::AttributeNullable;
|
||||
opPtr.p->m_errorLine = __LINE__;
|
||||
return;
|
||||
}
|
||||
if (indexPtr.p->isHashIndex()) {
|
||||
const Uint32 s1 = AttributeDescriptor::getSize(a);
|
||||
const Uint32 s2 = AttributeDescriptor::getArraySize(a);
|
||||
|
|
|
@ -324,6 +324,31 @@ Dblqh::Dblqh(const class Configuration & conf):
|
|||
addRecSignal(GSN_TUX_ADD_ATTRREF, &Dblqh::execTUX_ADD_ATTRREF);
|
||||
|
||||
initData();
|
||||
|
||||
#ifdef VM_TRACE
|
||||
{
|
||||
void* tmp[] = {
|
||||
&addfragptr,
|
||||
&attrinbufptr,
|
||||
&databufptr,
|
||||
&fragptr,
|
||||
&gcpPtr,
|
||||
&lcpPtr,
|
||||
&lcpLocptr,
|
||||
&logPartPtr,
|
||||
&logFilePtr,
|
||||
&lfoPtr,
|
||||
&logPagePtr,
|
||||
&pageRefPtr,
|
||||
&scanptr,
|
||||
&tabptr,
|
||||
&tcConnectptr,
|
||||
&tcNodeFailptr,
|
||||
};
|
||||
init_globals_list(tmp, sizeof(tmp)/sizeof(tmp[0]));
|
||||
}
|
||||
#endif
|
||||
|
||||
}//Dblqh::Dblqh()
|
||||
|
||||
Dblqh::~Dblqh()
|
||||
|
|
|
@ -3190,6 +3190,13 @@ void Dblqh::execLQHKEYREQ(Signal* signal)
|
|||
noFreeRecordLab(signal, lqhKeyReq, ZNO_TC_CONNECT_ERROR);
|
||||
return;
|
||||
}//if
|
||||
|
||||
if(ERROR_INSERTED(5038) &&
|
||||
refToNode(signal->getSendersBlockRef()) != getOwnNodeId()){
|
||||
jam();
|
||||
SET_ERROR_INSERT_VALUE(5039);
|
||||
return;
|
||||
}
|
||||
|
||||
c_Counters.operations++;
|
||||
|
||||
|
@ -3567,6 +3574,7 @@ void Dblqh::prepareContinueAfterBlockedLab(Signal* signal)
|
|||
/* -------------------------------------------------------------------------- */
|
||||
/* ALSO AFTER NORMAL PROCEDURE WE CONTINUE HERE */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
Uint32 tc_ptr_i = tcConnectptr.i;
|
||||
TcConnectionrec * const regTcPtr = tcConnectptr.p;
|
||||
if (regTcPtr->indTakeOver == ZTRUE) {
|
||||
jam();
|
||||
|
@ -3670,14 +3678,14 @@ void Dblqh::prepareContinueAfterBlockedLab(Signal* signal)
|
|||
EXECUTE_DIRECT(refToBlock(regTcPtr->tcAccBlockref), GSN_ACCKEYREQ,
|
||||
signal, 7 + regTcPtr->primKeyLen);
|
||||
if (signal->theData[0] < RNIL) {
|
||||
signal->theData[0] = tcConnectptr.i;
|
||||
signal->theData[0] = tc_ptr_i;
|
||||
execACCKEYCONF(signal);
|
||||
return;
|
||||
} else if (signal->theData[0] == RNIL) {
|
||||
;
|
||||
} else {
|
||||
ndbrequire(signal->theData[0] == (UintR)-1);
|
||||
signal->theData[0] = tcConnectptr.i;
|
||||
signal->theData[0] = tc_ptr_i;
|
||||
execACCKEYREF(signal);
|
||||
}//if
|
||||
return;
|
||||
|
@ -5692,9 +5700,7 @@ void Dblqh::execABORT(Signal* signal)
|
|||
BlockReference tcBlockref = signal->theData[1];
|
||||
Uint32 transid1 = signal->theData[2];
|
||||
Uint32 transid2 = signal->theData[3];
|
||||
if (ERROR_INSERTED(5003)) {
|
||||
systemErrorLab(signal);
|
||||
}
|
||||
CRASH_INSERTION(5003);
|
||||
if (ERROR_INSERTED(5015)) {
|
||||
CLEAR_ERROR_INSERT_VALUE;
|
||||
sendSignalWithDelay(cownref, GSN_ABORT, signal, 2000, 4);
|
||||
|
@ -5704,6 +5710,21 @@ void Dblqh::execABORT(Signal* signal)
|
|||
transid2,
|
||||
tcOprec) != ZOK) {
|
||||
jam();
|
||||
|
||||
if(ERROR_INSERTED(5039) &&
|
||||
refToNode(signal->getSendersBlockRef()) != getOwnNodeId()){
|
||||
jam();
|
||||
SET_ERROR_INSERT_VALUE(5040);
|
||||
return;
|
||||
}
|
||||
|
||||
if(ERROR_INSERTED(5040) &&
|
||||
refToNode(signal->getSendersBlockRef()) != getOwnNodeId()){
|
||||
jam();
|
||||
SET_ERROR_INSERT_VALUE(5003);
|
||||
return;
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
// SEND ABORTED EVEN IF NOT FOUND.
|
||||
//THE TRANSACTION MIGHT NEVER HAVE ARRIVED HERE.
|
||||
|
@ -10618,6 +10639,8 @@ void Dblqh::execEND_LCPCONF(Signal* signal)
|
|||
clcpCompletedState = LCP_IDLE;
|
||||
}//if
|
||||
}//if
|
||||
lcpPtr.i = 0;
|
||||
ptrAss(lcpPtr, lcpRecord);
|
||||
sendLCP_COMPLETE_REP(signal, lcpPtr.p->currentFragment.lcpFragOrd.lcpId);
|
||||
}//Dblqh::execEND_LCPCONF()
|
||||
|
||||
|
|
|
@ -139,6 +139,7 @@
|
|||
#define ZNOT_FOUND 626
|
||||
#define ZALREADYEXIST 630
|
||||
#define ZINCONSISTENTHASHINDEX 892
|
||||
#define ZNOTUNIQUE 893
|
||||
#endif
|
||||
|
||||
class Dbtc: public SimulatedBlock {
|
||||
|
|
|
@ -293,6 +293,23 @@ Dbtc::Dbtc(const class Configuration & conf):
|
|||
addRecSignal(GSN_ALTER_TAB_REQ, &Dbtc::execALTER_TAB_REQ);
|
||||
|
||||
initData();
|
||||
|
||||
#ifdef VM_TRACE
|
||||
{
|
||||
void* tmp[] = { &apiConnectptr,
|
||||
&tcConnectptr,
|
||||
&cachePtr,
|
||||
&attrbufptr,
|
||||
&hostptr,
|
||||
&gcpPtr,
|
||||
&tmpApiConnectptr,
|
||||
&timeOutptr,
|
||||
&scanFragptr,
|
||||
&databufptr,
|
||||
&tmpDatabufptr };
|
||||
init_globals_list(tmp, sizeof(tmp)/sizeof(tmp[0]));
|
||||
}
|
||||
#endif
|
||||
}//Dbtc::Dbtc()
|
||||
|
||||
Dbtc::~Dbtc()
|
||||
|
@ -348,5 +365,3 @@ Dbtc::~Dbtc()
|
|||
|
||||
BLOCK_FUNCTIONS(Dbtc);
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -65,6 +65,7 @@
|
|||
#include <signaldata/DictTabInfo.hpp>
|
||||
|
||||
#include <NdbOut.hpp>
|
||||
#include <DebuggerNames.hpp>
|
||||
|
||||
// Use DEBUG to print messages that should be
|
||||
// seen only when we debug the product
|
||||
|
@ -260,6 +261,7 @@ void Dbtc::execCONTINUEB(Signal* signal)
|
|||
tcConnectptr.i = Tdata0;
|
||||
apiConnectptr.i = Tdata1;
|
||||
ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
|
||||
apiConnectptr.p->counter--;
|
||||
sendAbortedAfterTimeout(signal, 1);
|
||||
return;
|
||||
case TcContinueB::ZHANDLE_FAILED_API_NODE_REMOVE_MARKERS:
|
||||
|
@ -4925,7 +4927,9 @@ void Dbtc::execLQHKEYREF(Signal* signal)
|
|||
|
||||
// The operation executed an index trigger
|
||||
const Uint32 opType = regTcPtr->operation;
|
||||
if (!(opType == ZDELETE && errCode == ZNOT_FOUND)) {
|
||||
if (errCode == ZALREADYEXIST)
|
||||
errCode = terrorCode = ZNOTUNIQUE;
|
||||
else if (!(opType == ZDELETE && errCode == ZNOT_FOUND)) {
|
||||
jam();
|
||||
/**
|
||||
* "Normal path"
|
||||
|
@ -5007,6 +5011,8 @@ void Dbtc::execLQHKEYREF(Signal* signal)
|
|||
regApiPtr->lqhkeyreqrec--; // Compensate for extra during read
|
||||
tcKeyRef->connectPtr = indexOp;
|
||||
EXECUTE_DIRECT(DBTC, GSN_TCKEYREF, signal, TcKeyRef::SignalLength);
|
||||
apiConnectptr.i = regTcPtr->apiConnect;
|
||||
apiConnectptr.p = regApiPtr;
|
||||
} else {
|
||||
jam();
|
||||
tcKeyRef->connectPtr = clientData;
|
||||
|
@ -6039,7 +6045,8 @@ void Dbtc::timeOutFoundLab(Signal* signal, Uint32 TapiConPtr)
|
|||
<< " H'" << apiConnectptr.p->transid[1] << "] " << dec
|
||||
<< "Time-out in state = " << apiConnectptr.p->apiConnectstate
|
||||
<< " apiConnectptr.i = " << apiConnectptr.i
|
||||
<< " - exec: " << apiConnectptr.p->m_exec_flag);
|
||||
<< " - exec: " << apiConnectptr.p->m_exec_flag
|
||||
<< " - place: " << c_apiConTimer_line[apiConnectptr.i]);
|
||||
switch (apiConnectptr.p->apiConnectstate) {
|
||||
case CS_STARTED:
|
||||
if(apiConnectptr.p->lqhkeyreqrec == apiConnectptr.p->lqhkeyconfrec){
|
||||
|
@ -6300,9 +6307,8 @@ void Dbtc::sendAbortedAfterTimeout(Signal* signal, int Tcheck)
|
|||
warningEvent(buf);
|
||||
ndbout_c(buf);
|
||||
ndbrequire(false);
|
||||
releaseAbortResources(signal);
|
||||
return;
|
||||
}//if
|
||||
}
|
||||
releaseAbortResources(signal);
|
||||
return;
|
||||
}//if
|
||||
TloopCount++;
|
||||
|
@ -6313,6 +6319,7 @@ void Dbtc::sendAbortedAfterTimeout(Signal* signal, int Tcheck)
|
|||
// away the job buffer.
|
||||
/*------------------------------------------------------------------*/
|
||||
setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
|
||||
apiConnectptr.p->counter++;
|
||||
signal->theData[0] = TcContinueB::ZABORT_TIMEOUT_BREAK;
|
||||
signal->theData[1] = tcConnectptr.i;
|
||||
signal->theData[2] = apiConnectptr.i;
|
||||
|
@ -10039,7 +10046,8 @@ void Dbtc::releaseAbortResources(Signal* signal)
|
|||
}//if
|
||||
|
||||
}
|
||||
setApiConTimer(apiConnectptr.i, 0, __LINE__);
|
||||
setApiConTimer(apiConnectptr.i, 0,
|
||||
100000+c_apiConTimer_line[apiConnectptr.i]);
|
||||
if (apiConnectptr.p->apiFailState == ZTRUE) {
|
||||
jam();
|
||||
handleApiFailState(signal, apiConnectptr.i);
|
||||
|
@ -11326,6 +11334,8 @@ void Dbtc::execTCKEYCONF(Signal* signal)
|
|||
}
|
||||
const UintR TconnectIndex = indexOp->connectionIndex;
|
||||
ApiConnectRecord * const regApiPtr = &apiConnectRecord[TconnectIndex];
|
||||
apiConnectptr.p = regApiPtr;
|
||||
apiConnectptr.i = TconnectIndex;
|
||||
switch(indexOp->indexOpState) {
|
||||
case(IOS_NOOP): {
|
||||
jam();
|
||||
|
@ -12168,34 +12178,33 @@ void Dbtc::insertIntoIndexTable(Signal* signal,
|
|||
// Calculate key length and renumber attribute id:s
|
||||
AttributeBuffer::DataBufferPool & pool = c_theAttributeBufferPool;
|
||||
LocalDataBuffer<11> afterValues(pool, firedTriggerData->afterValues);
|
||||
bool skipNull = false;
|
||||
for(bool moreKeyAttrs = afterValues.first(iter); moreKeyAttrs; attrId++) {
|
||||
jam();
|
||||
AttributeHeader* attrHeader = (AttributeHeader *) iter.data;
|
||||
|
||||
// Filter out NULL valued attributes
|
||||
if (attrHeader->isNULL()) {
|
||||
skipNull = true;
|
||||
break;
|
||||
}
|
||||
attrHeader->setAttributeId(attrId);
|
||||
keyLength += attrHeader->getDataSize();
|
||||
hops = attrHeader->getHeaderSize() + attrHeader->getDataSize();
|
||||
moreKeyAttrs = afterValues.next(iter, hops);
|
||||
}
|
||||
|
||||
// Filter out single NULL attributes
|
||||
if (attrId == 1) {
|
||||
if (skipNull) {
|
||||
jam();
|
||||
afterValues.first(iter);
|
||||
AttributeHeader* attrHeader = (AttributeHeader *) iter.data;
|
||||
if (attrHeader->isNULL() && !afterValues.next(iter)) {
|
||||
opRecord->triggerExecutionCount--;
|
||||
if (opRecord->triggerExecutionCount == 0) {
|
||||
/*
|
||||
We have completed current trigger execution
|
||||
Continue triggering operation
|
||||
*/
|
||||
jam();
|
||||
opRecord->triggerExecutionCount--;
|
||||
if (opRecord->triggerExecutionCount == 0) {
|
||||
/*
|
||||
We have completed current trigger execution
|
||||
Continue triggering operation
|
||||
*/
|
||||
jam();
|
||||
continueTriggeringOp(signal, opRecord);
|
||||
}//if
|
||||
return;
|
||||
continueTriggeringOp(signal, opRecord);
|
||||
}//if
|
||||
return;
|
||||
}//if
|
||||
|
||||
// Calculate total length of primary key to be stored in index table
|
||||
|
@ -12523,36 +12532,36 @@ void Dbtc::deleteFromIndexTable(Signal* signal,
|
|||
// Calculate key length and renumber attribute id:s
|
||||
AttributeBuffer::DataBufferPool & pool = c_theAttributeBufferPool;
|
||||
LocalDataBuffer<11> beforeValues(pool, firedTriggerData->beforeValues);
|
||||
bool skipNull = false;
|
||||
for(bool moreKeyAttrs = beforeValues.first(iter);
|
||||
(moreKeyAttrs);
|
||||
attrId++) {
|
||||
jam();
|
||||
AttributeHeader* attrHeader = (AttributeHeader *) iter.data;
|
||||
|
||||
// Filter out NULL valued attributes
|
||||
if (attrHeader->isNULL()) {
|
||||
skipNull = true;
|
||||
break;
|
||||
}
|
||||
attrHeader->setAttributeId(attrId);
|
||||
keyLength += attrHeader->getDataSize();
|
||||
hops = attrHeader->getHeaderSize() + attrHeader->getDataSize();
|
||||
moreKeyAttrs = beforeValues.next(iter, hops);
|
||||
}
|
||||
|
||||
// Filter out single NULL attributes
|
||||
if (attrId == 1) {
|
||||
if (skipNull) {
|
||||
jam();
|
||||
beforeValues.first(iter);
|
||||
AttributeHeader* attrHeader = (AttributeHeader *) iter.data;
|
||||
if (attrHeader->isNULL() && !beforeValues.next(iter)) {
|
||||
jam();
|
||||
opRecord->triggerExecutionCount--;
|
||||
if (opRecord->triggerExecutionCount == 0) {
|
||||
/*
|
||||
opRecord->triggerExecutionCount--;
|
||||
if (opRecord->triggerExecutionCount == 0) {
|
||||
/*
|
||||
We have completed current trigger execution
|
||||
Continue triggering operation
|
||||
*/
|
||||
jam();
|
||||
continueTriggeringOp(signal, opRecord);
|
||||
}//if
|
||||
return;
|
||||
*/
|
||||
jam();
|
||||
continueTriggeringOp(signal, opRecord);
|
||||
}//if
|
||||
return;
|
||||
}//if
|
||||
|
||||
TcKeyReq::setKeyLength(tcKeyRequestInfo, keyLength);
|
||||
|
|
|
@ -814,8 +814,8 @@ void Trix::executeInsertTransaction(Signal* signal,
|
|||
for(Uint32 i = 0; i < headerPtr.sz; i++) {
|
||||
AttributeHeader* keyAttrHead = (AttributeHeader *) headerBuffer + i;
|
||||
|
||||
// Filter out single NULL attributes
|
||||
if (keyAttrHead->isNULL() && (i == (Uint32)0) && (headerPtr.sz == (Uint32)2))
|
||||
// Filter out NULL attributes
|
||||
if (keyAttrHead->isNULL())
|
||||
return;
|
||||
|
||||
if (i < subRec->noOfIndexColumns)
|
||||
|
|
|
@ -202,7 +202,7 @@ NdbShutdown(NdbShutdownType type,
|
|||
|
||||
if(type != NST_Normal && type != NST_Restart){
|
||||
ndbout << "Error handler shutdown completed - " << exitAbort << endl;
|
||||
#if defined VM_TRACE && ( ! ( defined NDB_OSE || defined NDB_SOFTOSE) )
|
||||
#if ( defined VM_TRACE || defined ERROR_INSERT ) && ( ! ( defined NDB_OSE || defined NDB_SOFTOSE) )
|
||||
signal(6, SIG_DFL);
|
||||
abort();
|
||||
#else
|
||||
|
|
|
@ -104,6 +104,11 @@ SimulatedBlock::SimulatedBlock(BlockNumber blockNumber,
|
|||
UpgradeStartup::installEXEC(this);
|
||||
|
||||
CLEAR_ERROR_INSERT_VALUE;
|
||||
|
||||
#ifdef VM_TRACE
|
||||
m_global_variables = new Ptr<void> * [1];
|
||||
m_global_variables[0] = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
SimulatedBlock::~SimulatedBlock()
|
||||
|
@ -112,6 +117,10 @@ SimulatedBlock::~SimulatedBlock()
|
|||
#ifdef VM_TRACE_TIME
|
||||
printTimes(stdout);
|
||||
#endif
|
||||
|
||||
#ifdef VM_TRACE
|
||||
delete [] m_global_variables;
|
||||
#endif
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -1771,3 +1780,25 @@ SimulatedBlock::execUPGRADE(Signal* signal){
|
|||
break;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef VM_TRACE
|
||||
void
|
||||
SimulatedBlock::clear_global_variables(){
|
||||
Ptr<void> ** tmp = m_global_variables;
|
||||
while(* tmp != 0){
|
||||
(* tmp)->i = RNIL;
|
||||
(* tmp)->p = 0;
|
||||
tmp++;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
SimulatedBlock::init_globals_list(void ** tmp, size_t cnt){
|
||||
m_global_variables = new Ptr<void> * [cnt+1];
|
||||
for(size_t i = 0; i<cnt; i++){
|
||||
m_global_variables[i] = (Ptr<void>*)tmp[i];
|
||||
}
|
||||
m_global_variables[cnt] = 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -96,7 +96,7 @@ protected:
|
|||
* Handling of execFunctions
|
||||
*/
|
||||
typedef void (SimulatedBlock::* ExecFunction)(Signal* signal);
|
||||
void addRecSignalImpl(GlobalSignalNumber g, ExecFunction fun, bool f = false);
|
||||
void addRecSignalImpl(GlobalSignalNumber g, ExecFunction fun, bool f =false);
|
||||
void installSimulatedBlockFunctions();
|
||||
ExecFunction theExecArray[MAX_GSN+1];
|
||||
public:
|
||||
|
@ -447,6 +447,12 @@ public:
|
|||
} m_timeTrace[MAX_GSN+1];
|
||||
Uint32 m_currentGsn;
|
||||
#endif
|
||||
|
||||
#ifdef VM_TRACE
|
||||
Ptr<void> **m_global_variables;
|
||||
void clear_global_variables();
|
||||
void init_globals_list(void ** tmp, size_t cnt);
|
||||
#endif
|
||||
};
|
||||
|
||||
inline
|
||||
|
@ -454,6 +460,9 @@ void
|
|||
SimulatedBlock::executeFunction(GlobalSignalNumber gsn, Signal* signal){
|
||||
ExecFunction f = theExecArray[gsn];
|
||||
if(gsn <= MAX_GSN && f != 0){
|
||||
#ifdef VM_TRACE
|
||||
clear_global_variables();
|
||||
#endif
|
||||
(this->*f)(signal);
|
||||
return;
|
||||
}
|
||||
|
@ -679,6 +688,5 @@ BLOCK::addRecSignal(GlobalSignalNumber gsn, ExecSignalLocal f, bool force){ \
|
|||
addRecSignalImpl(gsn, (ExecFunction)f, force);\
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -1851,13 +1851,6 @@ NdbDictInterface::createIndex(Ndb & ndb,
|
|||
m_error.code = 4245;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (it == DictTabInfo::UniqueHashIndex &&
|
||||
(col->m_nullable) && (attributeList.sz > 1)) {
|
||||
// We only support one NULL attribute
|
||||
m_error.code = 4246;
|
||||
return -1;
|
||||
}
|
||||
attributeList.id[i] = col->m_attrId;
|
||||
}
|
||||
if (it == DictTabInfo::UniqueHashIndex) {
|
||||
|
|
|
@ -91,6 +91,9 @@ ErrorBundle ErrorCodes[] = {
|
|||
{ 4029, NR, "Node failure caused abort of transaction" },
|
||||
{ 4031, NR, "Node failure caused abort of transaction" },
|
||||
{ 4033, NR, "Send to NDB failed" },
|
||||
{ 4115, NR,
|
||||
"Transaction was committed but all read information was not "
|
||||
"received due to node crash" },
|
||||
|
||||
/**
|
||||
* Node shutdown
|
||||
|
@ -114,9 +117,6 @@ ErrorBundle ErrorCodes[] = {
|
|||
"Time-out, most likely caused by simple read or cluster failure" },
|
||||
{ 4024, UR,
|
||||
"Time-out, most likely caused by simple read or cluster failure" },
|
||||
{ 4115, UR,
|
||||
"Transaction was committed but all read information was not "
|
||||
"received due to node crash" },
|
||||
|
||||
/**
|
||||
* TemporaryResourceError
|
||||
|
@ -404,7 +404,6 @@ ErrorBundle ErrorCodes[] = {
|
|||
{ 4243, AE, "Index not found" },
|
||||
{ 4244, AE, "Index or table with given name already exists" },
|
||||
{ 4245, AE, "Index attribute must be defined as stored, i.e. the StorageAttributeType must be defined as NormalStorageAttribute"},
|
||||
{ 4246, AE, "Combined index attributes are not allowed to be NULL attributes" },
|
||||
{ 4247, AE, "Illegal index/trigger create/drop/alter request" },
|
||||
{ 4248, AE, "Trigger/index name invalid" },
|
||||
{ 4249, AE, "Invalid table" },
|
||||
|
|
|
@ -1040,11 +1040,11 @@ int ha_ndbcluster::set_bounds(NdbIndexScanOperation *op,
|
|||
bounds[bound],
|
||||
field->field_name));
|
||||
DBUG_DUMP("key", (char*)key_ptr, field_len);
|
||||
|
||||
|
||||
if (op->setBound(field->field_name,
|
||||
bound,
|
||||
key_ptr,
|
||||
field_len) != 0)
|
||||
field->is_null() ? 0 : key_ptr,
|
||||
field->is_null() ? 0 : field_len) != 0)
|
||||
ERR_RETURN(op->getNdbError());
|
||||
|
||||
key_ptr+= field_len;
|
||||
|
@ -1293,8 +1293,6 @@ int ha_ndbcluster::write_row(byte *record)
|
|||
update_timestamp(record+table->timestamp_default_now-1);
|
||||
has_auto_increment= (table->next_number_field && record == table->record[0]);
|
||||
skip_auto_increment= table->auto_increment_field_not_null;
|
||||
if ((has_auto_increment) && (!skip_auto_increment))
|
||||
update_auto_increment();
|
||||
|
||||
if (!(op= trans->getNdbOperation(m_tabname)))
|
||||
ERR_RETURN(trans->getNdbError());
|
||||
|
@ -1313,6 +1311,10 @@ int ha_ndbcluster::write_row(byte *record)
|
|||
else
|
||||
{
|
||||
int res;
|
||||
|
||||
if ((has_auto_increment) && (!skip_auto_increment))
|
||||
update_auto_increment();
|
||||
|
||||
if ((res= set_primary_key(op)))
|
||||
return res;
|
||||
}
|
||||
|
@ -1323,7 +1325,10 @@ int ha_ndbcluster::write_row(byte *record)
|
|||
Field *field= table->field[i];
|
||||
if (!(field->flags & PRI_KEY_FLAG) &&
|
||||
set_ndb_value(op, field, i))
|
||||
{
|
||||
skip_auto_increment= true;
|
||||
ERR_RETURN(op->getNdbError());
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1345,7 +1350,10 @@ int ha_ndbcluster::write_row(byte *record)
|
|||
(int)rows_inserted, (int)bulk_insert_rows));
|
||||
bulk_insert_not_flushed= false;
|
||||
if (trans->execute(NoCommit) != 0)
|
||||
{
|
||||
skip_auto_increment= true;
|
||||
DBUG_RETURN(ndb_err(trans));
|
||||
}
|
||||
}
|
||||
if ((has_auto_increment) && (skip_auto_increment))
|
||||
{
|
||||
|
@ -3068,6 +3076,7 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
|
|||
m_ndb(NULL),
|
||||
m_table(NULL),
|
||||
m_table_flags(HA_REC_NOT_IN_SEQ |
|
||||
//HA_NULL_IN_KEY |
|
||||
HA_NOT_EXACT_COUNT |
|
||||
HA_NO_PREFIX_CHAR_KEYS),
|
||||
m_use_write(false),
|
||||
|
|
Loading…
Add table
Reference in a new issue