Merge joreland@bk-internal.mysql.com:/home/bk/mysql-5.0

into  eel.(none):/home/jonas/src/mysql-5.0-push
This commit is contained in:
unknown 2005-08-19 07:52:42 +02:00
commit d2797ea959
32 changed files with 344 additions and 127 deletions

View file

@ -179,8 +179,24 @@ a b c
2 two two
alter table t1 drop index c;
select * from t1 where b = 'two';
ERROR HY000: Table definition has changed, please retry transaction
a b c
2 two two
select * from t1 where b = 'two';
a b c
2 two two
drop table t1;
create table t3 (a int primary key) engine=ndbcluster;
begin;
insert into t3 values (1);
alter table t3 rename t4;
delete from t3;
insert into t3 values (1);
commit;
select * from t3;
ERROR HY000: Can't lock file (errno: 155)
select * from t4;
a
1
drop table t4;
show tables;
Tables_in_test

View file

@ -151,7 +151,6 @@ select * from t1 where b = 'two';
connection server1;
alter table t1 drop index c;
connection server2;
--error 1412
select * from t1 where b = 'two';
select * from t1 where b = 'two';
connection server1;
@ -180,4 +179,29 @@ drop table t1;
#select count(*) from t2;
#drop table t2;
connection server1;
create table t3 (a int primary key) engine=ndbcluster;
connection server2;
begin;
insert into t3 values (1);
connection server1;
alter table t3 rename t4;
connection server2;
# This should work as transaction is ongoing...
delete from t3;
insert into t3 values (1);
commit;
# This should fail as its a new transaction
--error 1015
select * from t3;
select * from t4;
drop table t4;
show tables;
connection server1;
# End of 4.1 tests

View file

@ -1,4 +1,5 @@
-- source include/have_ndb.inc
-- source include/ndb_default_cluster.inc
-- source include/not_embedded.inc
--exec $NDB_TOOLS_DIR/ndb_config --no-defaults --query=type,nodeid,host 2> /dev/null

View file

@ -44,8 +44,7 @@
#define TRIX 0xFF
#define DBUTIL 0x100
#define SUMA 0x101
#define GREP 0x102
#define DBTUX 0x103
#define DBTUX 0x102
const BlockReference BACKUP_REF = numberToRef(BACKUP, 0);
const BlockReference DBTC_REF = numberToRef(DBTC, 0);
@ -61,7 +60,6 @@ const BlockReference CMVMI_REF = numberToRef(CMVMI, 0);
const BlockReference TRIX_REF = numberToRef(TRIX, 0);
const BlockReference DBUTIL_REF = numberToRef(DBUTIL, 0);
const BlockReference SUMA_REF = numberToRef(SUMA, 0);
const BlockReference GREP_REF = numberToRef(GREP, 0);
const BlockReference DBTUX_REF = numberToRef(DBTUX, 0);
const BlockNumber MIN_BLOCK_NO = BACKUP;

View file

@ -777,8 +777,8 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES;
/**
* Grep signals
*/
#define GSN_GREP_SUB_CREATE_REQ 606
#define GSN_GREP_SUB_CREATE_REF 607
#define GSN_ALTER_TABLE_REP 606
#define GSN_API_BROADCAST_REP 607
#define GSN_GREP_SUB_CREATE_CONF 608
#define GSN_GREP_CREATE_REQ 609
#define GSN_GREP_CREATE_REF 610

View file

@ -36,6 +36,13 @@ enum Operation_t {
#endif
};
inline
Uint32
table_version_major(Uint32 ver)
{
return ver & 0x00FFFFFF;
}
#endif

View file

@ -129,7 +129,8 @@ public:
InvalidPrimaryKeySize = 739,
NullablePrimaryKey = 740,
UnsupportedChange = 741,
BackupInProgress = 762
BackupInProgress = 762,
IncompatibleVersions = 763
};
private:
@ -177,4 +178,26 @@ private:
Uint32 tableVersion;
};
/**
* Inform API about change of table definition
*/
struct AlterTableRep
{
friend bool printALTER_TABLE_REP(FILE*, const Uint32*, Uint32, Uint16);
STATIC_CONST( SignalLength = 3 );
enum Change_type
{
CT_ALTERED = 0x1,
CT_DROPPED = 0x2
};
Uint32 tableId;
Uint32 tableVersion;
Uint32 changeType;
SECTION( TABLE_NAME = 0 );
};
#endif

View file

@ -0,0 +1,31 @@
/* Copyright (C) 2003 MySQL AB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#ifndef API_BROADCAST_HPP
#define API_BROADCAST_HPP
#include "SignalData.hpp"
struct ApiBroadcastRep
{
STATIC_CONST( SignalLength = 2 );
Uint32 gsn;
Uint32 minVersion;
Uint32 theData[1];
};
#endif

View file

@ -78,9 +78,10 @@ public:
///< changes to take effect
Retrieved, ///< The object exist and has been read
///< into main memory from NDB Kernel
Invalid ///< The object has been invalidated
Invalid, ///< The object has been invalidated
///< and should not be used
Altered ///< Table has been altered in NDB kernel
///< but is still valid for usage
};
/**

View file

@ -32,7 +32,6 @@ const BlockName BlockNames[] = {
{ "BACKUP", BACKUP },
{ "DBUTIL", DBUTIL },
{ "SUMA", SUMA },
{ "GREP", GREP },
{ "DBTUX", DBTUX }
};

View file

@ -92,9 +92,10 @@ void ndbSetOwnVersion() {}
#ifndef TEST_VERSION
struct NdbUpGradeCompatible ndbCompatibleTable_full[] = {
{ MAKE_VERSION(5,0,NDB_VERSION_BUILD), MAKE_VERSION(5,0,3), UG_Range},
{ MAKE_VERSION(5,0,3), MAKE_VERSION(5,0,2), UG_Exact },
{ MAKE_VERSION(4,1,12), MAKE_VERSION(4,1,10), UG_Range },
{ MAKE_VERSION(5,0,NDB_VERSION_BUILD), MAKE_VERSION(5,0,12), UG_Range},
{ MAKE_VERSION(5,0,11), MAKE_VERSION(5,0,2), UG_Range},
{ MAKE_VERSION(4,1,NDB_VERSION_BUILD), MAKE_VERSION(4,1,15), UG_Range },
{ MAKE_VERSION(4,1,14), MAKE_VERSION(4,1,10), UG_Range },
{ MAKE_VERSION(4,1,10), MAKE_VERSION(4,1,9), UG_Exact },
{ MAKE_VERSION(4,1,9), MAKE_VERSION(4,1,8), UG_Exact },
{ MAKE_VERSION(3,5,2), MAKE_VERSION(3,5,1), UG_Exact },
@ -102,7 +103,9 @@ struct NdbUpGradeCompatible ndbCompatibleTable_full[] = {
};
struct NdbUpGradeCompatible ndbCompatibleTable_upgrade[] = {
{ MAKE_VERSION(5,0,12), MAKE_VERSION(5,0,11), UG_Exact },
{ MAKE_VERSION(5,0,2), MAKE_VERSION(4,1,8), UG_Exact },
{ MAKE_VERSION(4,1,15), MAKE_VERSION(4,1,14), UG_Exact },
{ MAKE_VERSION(3,5,4), MAKE_VERSION(3,5,3), UG_Exact },
{ 0, 0, UG_Null }
};

View file

@ -30,7 +30,6 @@
#include <Backup.hpp>
#include <DbUtil.hpp>
#include <Suma.hpp>
#include <Grep.hpp>
#include <Dbtux.hpp>
#include <NdbEnv.h>
@ -97,12 +96,13 @@ SimBlockList::load(const Configuration & conf){
theList[11] = NEW_BLOCK(Backup)(conf);
theList[12] = NEW_BLOCK(DbUtil)(conf);
theList[13] = NEW_BLOCK(Suma)(conf);
theList[14] = NEW_BLOCK(Grep)(conf);
theList[14] = 0; //NEW_BLOCK(Grep)(conf);
theList[15] = NEW_BLOCK(Dbtux)(conf);
// Metadata common part shared by block instances
ptrMetaDataCommon = new MetaData::Common(*dbdict, *dbdih);
for (int i = 0; i < noOfBlocks; i++)
if(theList[i])
theList[i]->setMetaDataCommon(ptrMetaDataCommon);
}

View file

@ -13,7 +13,6 @@ SUBDIRS = \
backup \
dbutil \
suma \
grep \
dbtux
windoze-dsp:

View file

@ -988,7 +988,6 @@ Cmvmi::execDUMP_STATE_ORD(Signal* signal)
sendSignal(BACKUP_REF, GSN_DUMP_STATE_ORD, signal, signal->length(), JBB);
sendSignal(DBUTIL_REF, GSN_DUMP_STATE_ORD, signal, signal->length(), JBB);
sendSignal(SUMA_REF, GSN_DUMP_STATE_ORD, signal, signal->length(), JBB);
sendSignal(GREP_REF, GSN_DUMP_STATE_ORD, signal, signal->length(), JBB);
sendSignal(TRIX_REF, GSN_DUMP_STATE_ORD, signal, signal->length(), JBB);
sendSignal(DBTUX_REF, GSN_DUMP_STATE_ORD, signal, signal->length(), JBB);

View file

@ -77,6 +77,7 @@
#include <signaldata/CreateFragmentation.hpp>
#include <signaldata/CreateTab.hpp>
#include <NdbSleep.h>
#include <signaldata/ApiBroadcast.hpp>
#define ZNOT_FOUND 626
#define ZALREADYEXIST 630
@ -91,6 +92,27 @@
#define DIV(x,y) (((x)+(y)-1)/(y))
#include <ndb_version.h>
static
Uint32
alter_table_inc_schema_version(Uint32 old)
{
return (old & 0x00FFFFFF) + ((old + 0x1000000) & 0xFF000000);
}
static
Uint32
alter_table_dec_schema_version(Uint32 old)
{
return (old & 0x00FFFFFF) + ((old - 0x1000000) & 0xFF000000);
}
static
Uint32
create_table_inc_schema_version(Uint32 old)
{
return (old + 0x00000001) & 0x00FFFFFF;
}
/* **************************************************************** */
/* ---------------------------------------------------------------- */
/* MODULE: GENERAL MODULE -------------------------------- */
@ -604,7 +626,7 @@ void Dbdict::openTableFile(Signal* signal,
jam();
fsOpenReq->fileFlags = FsOpenReq::OM_READONLY;
}//if
ndbrequire(tablePtr.p->tableVersion < ZNIL);
fsOpenReq->fileNumber[3] = 0; // Initialise before byte changes
FsOpenReq::setVersion(fsOpenReq->fileNumber, 1);
FsOpenReq::setSuffix(fsOpenReq->fileNumber, FsOpenReq::S_TABLELIST);
@ -794,7 +816,7 @@ Dbdict::updateSchemaState(Signal* signal, Uint32 tableId,
case SchemaFile::ADD_STARTED:
jam();
ok = true;
ndbrequire((oldVersion + 1) == newVersion);
ndbrequire(create_table_inc_schema_version(oldVersion) == newVersion);
ndbrequire(oldState == SchemaFile::INIT ||
oldState == SchemaFile::DROP_TABLE_COMMITTED);
break;
@ -807,7 +829,7 @@ Dbdict::updateSchemaState(Signal* signal, Uint32 tableId,
case SchemaFile::ALTER_TABLE_COMMITTED:
jam();
ok = true;
ndbrequire((oldVersion + 1) == newVersion);
ndbrequire(alter_table_inc_schema_version(oldVersion) == newVersion);
ndbrequire(oldState == SchemaFile::TABLE_ADD_COMMITTED ||
oldState == SchemaFile::ALTER_TABLE_COMMITTED);
break;
@ -2982,6 +3004,21 @@ Dbdict::execBACKUP_FRAGMENT_REQ(Signal* signal)
}
}
bool
Dbdict::check_ndb_versions() const
{
Uint32 node = 0;
Uint32 version = getNodeInfo(getOwnNodeId()).m_version;
while((node = c_aliveNodes.find(node + 1)) != BitmaskImpl::NotFound)
{
if(getNodeInfo(node).m_version != version)
{
return false;
}
}
return true;
}
void
Dbdict::execALTER_TABLE_REQ(Signal* signal)
{
@ -3019,6 +3056,13 @@ Dbdict::execALTER_TABLE_REQ(Signal* signal)
return;
}
if (!check_ndb_versions())
{
jam();
alterTableRef(signal, req, AlterTableRef::IncompatibleVersions);
return;
}
const TableRecord::TabState tabState = tablePtr.p->tabState;
bool ok = false;
switch(tabState){
@ -3168,7 +3212,7 @@ Dbdict::alterTable_backup_mutex_locked(Signal* signal,
lreq->clientData = alterTabPtr.p->m_senderData;
lreq->changeMask = alterTabPtr.p->m_changeMask;
lreq->tableId = tablePtr.p->tableId;
lreq->tableVersion = tablePtr.p->tableVersion + 1;
lreq->tableVersion = alter_table_inc_schema_version(tablePtr.p->tableVersion);
lreq->gci = tablePtr.p->gciTableCreated;
lreq->requestType = AlterTabReq::AlterTablePrepare;
@ -3248,6 +3292,14 @@ Dbdict::execALTER_TAB_REQ(Signal * signal)
alterTabRef(signal, req, AlterTableRef::Busy);
return;
}
if (!check_ndb_versions())
{
jam();
alterTabRef(signal, req, AlterTableRef::IncompatibleVersions);
return;
}
alterTabPtr.p->m_alterTableId = tableId;
alterTabPtr.p->m_coordinatorRef = senderRef;
@ -3290,7 +3342,7 @@ Dbdict::execALTER_TAB_REQ(Signal * signal)
}
ndbrequire(ok);
if(tablePtr.p->tableVersion + 1 != tableVersion){
if(alter_table_inc_schema_version(tablePtr.p->tableVersion) != tableVersion){
jam();
alterTabRef(signal, req, AlterTableRef::InvalidTableVersion);
return;
@ -3775,7 +3827,7 @@ void Dbdict::revertAlterTable(Signal * signal,
// Restore name
strcpy(tablePtr.p->tableName, alterTabPtrP->previousTableName);
// Revert schema version
tablePtr.p->tableVersion = tablePtr.p->tableVersion - 1;
tablePtr.p->tableVersion = alter_table_dec_schema_version(tablePtr.p->tableVersion);
// Put it back
#ifdef VM_TRACE
ndbrequire(!c_tableRecordHash.find(tmp, * tablePtr.p));
@ -3835,6 +3887,27 @@ Dbdict::alterTab_writeTableConf(Signal* signal,
conf->requestType = AlterTabReq::AlterTableCommit;
sendSignal(coordinatorRef, GSN_ALTER_TAB_CONF, signal,
AlterTabConf::SignalLength, JBB);
{
ApiBroadcastRep* api= (ApiBroadcastRep*)signal->getDataPtrSend();
api->gsn = GSN_ALTER_TABLE_REP;
api->minVersion = MAKE_VERSION(4,1,15);
AlterTableRep* rep = (AlterTableRep*)api->theData;
rep->tableId = tabPtr.p->tableId;
rep->tableVersion = alter_table_dec_schema_version(tabPtr.p->tableVersion);
rep->changeType = AlterTableRep::CT_ALTERED;
LinearSectionPtr ptr[3];
ptr[0].p = (Uint32*)alterTabPtr.p->previousTableName;
ptr[0].sz = (sizeof(alterTabPtr.p->previousTableName) + 3) >> 2;
sendSignal(QMGR_REF, GSN_API_BROADCAST_REP, signal,
ApiBroadcastRep::SignalLength + AlterTableRep::SignalLength,
JBB, ptr,1);
}
if(coordinatorRef != reference()) {
jam();
// Release resources
@ -3886,7 +3959,7 @@ Dbdict::execCREATE_FRAGMENTATION_CONF(Signal* signal){
XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0];
SchemaFile::TableEntry * tabEntry = getTableEntry(xsf, tabPtr.i);
tabPtr.p->tableVersion = tabEntry->m_tableVersion + 1;
tabPtr.p->tableVersion = create_table_inc_schema_version(tabEntry->m_tableVersion);
/**
* Pack
@ -3915,7 +3988,7 @@ Dbdict::execCREATE_FRAGMENTATION_CONF(Signal* signal){
req->gci = 0;
req->tableId = tabPtr.i;
req->tableVersion = tabEntry->m_tableVersion + 1;
req->tableVersion = create_table_inc_schema_version(tabEntry->m_tableVersion);
sendFragmentedSignal(rg, GSN_CREATE_TAB_REQ, signal,
CreateTabReq::SignalLength, JBB);

View file

@ -587,6 +587,7 @@ private:
void execALTER_TAB_REQ(Signal* signal);
void execALTER_TAB_REF(Signal* signal);
void execALTER_TAB_CONF(Signal* signal);
bool check_ndb_versions() const;
/*
* 2.4 COMMON STORED VARIABLES

View file

@ -85,7 +85,8 @@ print_old(const char * filename, const SchemaFile * sf)
te.m_tableState != SchemaFile::DROP_TABLE_COMMITTED)) {
ndbout << "Table " << i << ":"
<< " State = " << te.m_tableState
<< " version = " << te.m_tableVersion
<< " version = " << table_version_major(te.m_tableVersion) <<
<< "(" << table_version_minor(te.m_tableVersion) << ")"
<< " type = " << te.m_tableType
<< " noOfPages = " << te.m_noOfPages
<< " gcp: " << te.m_gcp << endl;

View file

@ -1886,8 +1886,6 @@ void Dbdih::execINCL_NODECONF(Signal* signal)
// Suma will not send response to this for now, later...
sendSignal(SUMA_REF, GSN_INCL_NODEREQ, signal, 2, JBB);
// Grep will not send response to this for now, later...
sendSignal(GREP_REF, GSN_INCL_NODEREQ, signal, 2, JBB);
return;
}//if
if (TstartNode_or_blockref == numberToRef(BACKUP, getOwnNodeId())){

View file

@ -3522,7 +3522,8 @@ void Dblqh::execLQHKEYREQ(Signal* signal)
LQHKEY_abort(signal, 4);
return;
}
if(tabptr.p->schemaVersion != schemaVersion){
if(table_version_major(tabptr.p->schemaVersion) !=
table_version_major(schemaVersion)){
LQHKEY_abort(signal, 5);
return;
}
@ -4461,7 +4462,7 @@ void Dblqh::packLqhkeyreqLab(Signal* signal)
lqhKeyReq->requestInfo = Treqinfo;
lqhKeyReq->tcBlockref = sig4;
sig0 = regTcPtr->tableref + (regTcPtr->schemaVersion << 16);
sig0 = regTcPtr->tableref + ((regTcPtr->schemaVersion << 16) & 0xFFFF0000);
sig1 = regTcPtr->fragmentid + (regTcPtr->nodeAfterNext[0] << 16);
sig2 = regTcPtr->transid[0];
sig3 = regTcPtr->transid[1];
@ -15922,7 +15923,7 @@ Uint32 Dblqh::checkIfExecLog(Signal* signal)
tabptr.i = tcConnectptr.p->tableref;
ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
if (getFragmentrec(signal, tcConnectptr.p->fragmentid) &&
(tabptr.p->schemaVersion == tcConnectptr.p->schemaVersion)) {
(table_version_major(tabptr.p->schemaVersion) == table_version_major(tcConnectptr.p->schemaVersion))) {
if (fragptr.p->execSrStatus != Fragrecord::IDLE) {
if (fragptr.p->execSrNoReplicas > logPartPtr.p->execSrExecuteIndex) {
ndbrequire((fragptr.p->execSrNoReplicas - 1) < 4);

View file

@ -968,7 +968,8 @@ public:
} keyAttr[MAX_ATTRIBUTES_IN_INDEX];
bool checkTable(Uint32 schemaVersion) const {
return enabled && !dropping && (schemaVersion == currentSchemaVersion);
return enabled && !dropping &&
(table_version_major(schemaVersion) == table_version_major(currentSchemaVersion));
}
Uint32 getErrorCode(Uint32 schemaVersion) const;

View file

@ -3347,7 +3347,7 @@ void Dbtc::sendlqhkeyreq(Signal* signal,
lqhKeyReq->tcBlockref = sig4;
lqhKeyReq->savePointId = sig5;
sig0 = regCachePtr->tableref + (regCachePtr->schemaVersion << 16);
sig0 = regCachePtr->tableref + ((regCachePtr->schemaVersion << 16) & 0xFFFF0000);
sig1 = regCachePtr->fragmentid + (regTcPtr->tcNodedata[1] << 16);
sig2 = regApiPtr->transid[0];
sig3 = regApiPtr->transid[1];
@ -13093,7 +13093,7 @@ Dbtc::TableRecord::getErrorCode(Uint32 schemaVersion) const {
return ZNO_SUCH_TABLE;
if(dropping)
return ZDROP_TABLE_IN_PROGRESS;
if(schemaVersion != currentSchemaVersion)
if(table_version_major(schemaVersion) != table_version_major(currentSchemaVersion))
return ZWRONG_SCHEMA_VERSION_ERROR;
ErrorReporter::handleAssert("Dbtc::TableRecord::getErrorCode",
__FILE__, __LINE__);

View file

@ -76,7 +76,6 @@ static BlockInfo ALL_BLOCKS[] = {
{ BACKUP_REF, 1 , 10000, 10999 },
{ DBUTIL_REF, 1 , 11000, 11999 },
{ SUMA_REF, 1 , 13000, 13999 },
{ GREP_REF, 1 , 0, 0 },
{ DBTUX_REF, 1 , 12000, 12999 }
};
@ -1453,9 +1452,6 @@ void Ndbcntr::execNODE_FAILREP(Signal* signal)
sendSignal(SUMA_REF, GSN_NODE_FAILREP, signal,
NodeFailRep::SignalLength, JBB);
sendSignal(GREP_REF, GSN_NODE_FAILREP, signal,
NodeFailRep::SignalLength, JBB);
Uint32 nodeId = 0;
while(!allFailed.isclear()){
nodeId = allFailed.find(nodeId + 1);
@ -2377,7 +2373,6 @@ void Ndbcntr::execREAD_CONFIG_CONF(Signal* signal){
void Ndbcntr::execSTART_ORD(Signal* signal){
jamEntry();
ndbrequire(NO_OF_BLOCKS == ALL_BLOCKS_SZ);
c_missra.execSTART_ORD(signal);
}
@ -2452,7 +2447,7 @@ void Ndbcntr::Missra::sendNextREAD_CONFIG_REQ(Signal* signal){
* Finished...
*/
currentStartPhase = 0;
for(Uint32 i = 0; i<NO_OF_BLOCKS; i++){
for(Uint32 i = 0; i<ALL_BLOCKS_SZ; i++){
if(ALL_BLOCKS[i].NextSP < currentStartPhase)
currentStartPhase = ALL_BLOCKS[i].NextSP;
}

View file

@ -219,6 +219,7 @@ private:
void execAPI_VERSION_REQ(Signal* signal);
void execAPI_BROADCAST_REP(Signal* signal);
// Arbitration signals
void execARBIT_CFG(Signal* signal);

View file

@ -83,6 +83,7 @@ Qmgr::Qmgr(const class Configuration & conf)
addRecSignal(GSN_API_FAILCONF, &Qmgr::execAPI_FAILCONF);
addRecSignal(GSN_READ_NODESREQ, &Qmgr::execREAD_NODESREQ);
addRecSignal(GSN_SET_VAR_REQ, &Qmgr::execSET_VAR_REQ);
addRecSignal(GSN_API_BROADCAST_REP, &Qmgr::execAPI_BROADCAST_REP);
// Arbitration signals
addRecSignal(GSN_ARBIT_PREPREQ, &Qmgr::execARBIT_PREPREQ);

View file

@ -34,6 +34,7 @@
#include <signaldata/BlockCommitOrd.hpp>
#include <signaldata/FailRep.hpp>
#include <signaldata/DisconnectRep.hpp>
#include <signaldata/ApiBroadcast.hpp>
#include <ndb_version.h>
@ -1703,16 +1704,6 @@ void Qmgr::sendApiFailReq(Signal* signal, Uint16 failedNodeNo)
sendSignal(DBDICT_REF, GSN_API_FAILREQ, signal, 2, JBA);
sendSignal(SUMA_REF, GSN_API_FAILREQ, signal, 2, JBA);
/**
* GREP also need the information that an API node
* (actually a REP node) has failed.
*
* GREP does however NOT send a CONF on this signal, i.e.
* the API_FAILREQ signal to GREP is like a REP signal
* (i.e. without any confirmation).
*/
sendSignal(GREP_REF, GSN_API_FAILREQ, signal, 2, JBA);
/**-------------------------------------------------------------------------
* THE OTHER NODE WAS AN API NODE. THE COMMUNICATION LINK IS ALREADY
* BROKEN AND THUS NO ACTION IS NEEDED TO BREAK THE CONNECTION.
@ -3926,3 +3917,30 @@ void Qmgr::execSET_VAR_REQ(Signal* signal)
}// switch
#endif
}//execSET_VAR_REQ()
void
Qmgr::execAPI_BROADCAST_REP(Signal* signal)
{
jamEntry();
ApiBroadcastRep api= *(const ApiBroadcastRep*)signal->getDataPtr();
Uint32 len = signal->getLength() - ApiBroadcastRep::SignalLength;
memmove(signal->theData, signal->theData+ApiBroadcastRep::SignalLength,
4*len);
NodeBitmask mask;
NodeRecPtr nodePtr;
for (nodePtr.i = 1; nodePtr.i < MAX_NODES; nodePtr.i++)
{
jam();
ptrAss(nodePtr, nodeRec);
if (nodePtr.p->phase == ZAPI_ACTIVE &&
getNodeInfo(nodePtr.i).m_version >= api.minVersion)
{
mask.set(nodePtr.i);
}
}
NodeReceiverGroup rg(API_CLUSTERMGR, mask);
sendSignal(rg, api.gsn, signal, len, JBB); // forward sections
}

View file

@ -2185,7 +2185,8 @@ SumaParticipant::execSUB_START_REQ(Signal* signal){
case SubCreateReq::DatabaseSnapshot:
case SubCreateReq::SelectiveTableSnapshot:
jam();
subbPtr.p->m_subscriberRef = GREP_REF;
ndbrequire(false);
//subbPtr.p->m_subscriberRef = GREP_REF;
subbPtr.p->m_subscriberData = subPtr.p->m_subscriberData;
break;
case SubCreateReq::SingleTableScan:
@ -3017,16 +3018,6 @@ SumaParticipant::execSUB_GCP_COMPLETE_REP(Signal* signal){
Uint32 gci = rep->gci;
c_lastCompleteGCI = gci;
/**
* always send SUB_GCP_COMPLETE_REP to Grep (so
* Lars can do funky stuff calculating intervals,
* even before the subscription is started
*/
rep->senderRef = reference();
rep->senderData = 0; //ignored in grep
EXECUTE_DIRECT(refToBlock(GREP_REF), GSN_SUB_GCP_COMPLETE_REP, signal,
SubGcpCompleteRep::SignalLength);
/**
* Signal to subscriber(s)
*/
@ -3051,13 +3042,6 @@ SumaParticipant::execSUB_GCP_COMPLETE_REP(Signal* signal){
ndbout_c("GSN_SUB_GCP_COMPLETE_REP to %s:",
getBlockName(refToBlock(ref)));
#else
/**
* Ignore sending to GREP (since we sent earlier)
*/
if (ref == GREP_REF) {
jam();
continue;
}
CRASH_INSERTION(13018);

View file

@ -2581,44 +2581,7 @@ MgmtSrvr::backupCallback(BackupEvent & event)
int
MgmtSrvr::repCommand(Uint32* repReqId, Uint32 request, bool waitCompleted)
{
bool next;
NodeId nodeId = 0;
while((next = getNextNodeId(&nodeId, NDB_MGM_NODE_TYPE_NDB)) == true &&
theFacade->get_node_alive(nodeId) == false);
if(!next){
return NO_CONTACT_WITH_DB_NODES;
}
NdbApiSignal* signal = getSignal();
if (signal == NULL) {
return COULD_NOT_ALLOCATE_MEMORY;
}
GrepReq* req = CAST_PTR(GrepReq, signal->getDataPtrSend());
signal->set(TestOrd::TraceAPI, GREP, GSN_GREP_REQ, GrepReq::SignalLength);
req->senderRef = _ownReference;
req->request = request;
int result;
if (waitCompleted)
result = sendRecSignal(nodeId, NO_WAIT, signal, true);
else
result = sendRecSignal(nodeId, NO_WAIT, signal, true);
if (result == -1) {
return SEND_OR_RECEIVE_FAILED;
}
/**
* @todo
* Maybe add that we should receive a confirmation that the
* request was received ok.
* Then we should give the user the correct repReqId.
*/
*repReqId = 4711;
abort();
return 0;
}

View file

@ -21,6 +21,9 @@
#include <NdbCondition.h>
#include <NdbSleep.h>
static NdbTableImpl f_invalid_table;
static NdbTableImpl f_altered_table;
Ndb_local_table_info *
Ndb_local_table_info::create(NdbTableImpl *table_impl, Uint32 sz)
{
@ -203,21 +206,41 @@ GlobalDictCache::put(const char * name, NdbTableImpl * tab)
TableVersion & ver = vers->back();
if(ver.m_status != RETREIVING ||
ver.m_impl != 0 ||
!(ver.m_impl == 0 ||
ver.m_impl == &f_invalid_table || ver.m_impl == &f_altered_table) ||
ver.m_version != 0 ||
ver.m_refCount == 0){
abort();
}
if(tab == 0){
if(tab == 0)
{
DBUG_PRINT("info", ("No table found in db"));
vers->erase(sz - 1);
} else {
}
else if (ver.m_impl == 0) {
ver.m_impl = tab;
ver.m_version = tab->m_version;
ver.m_status = OK;
}
else if (ver.m_impl == &f_invalid_table)
{
ver.m_impl = tab;
ver.m_version = tab->m_version;
ver.m_status = DROPPED;
ver.m_impl->m_status = NdbDictionary::Object::Invalid;
}
else if(ver.m_impl == &f_altered_table)
{
ver.m_impl = tab;
ver.m_version = tab->m_version;
ver.m_status = DROPPED;
ver.m_impl->m_status = NdbDictionary::Object::Altered;
}
else
{
abort();
}
NdbCondition_Broadcast(m_waitForTableCondition);
DBUG_RETURN(tab);
}
@ -325,4 +348,45 @@ GlobalDictCache::release(NdbTableImpl * tab)
abort();
}
void
GlobalDictCache::alter_table_rep(const char * name,
Uint32 tableId,
Uint32 tableVersion,
bool altered)
{
const Uint32 len = strlen(name);
Vector<TableVersion> * vers =
m_tableHash.getData(name, len);
if(vers == 0)
{
return;
}
const Uint32 sz = vers->size();
if(sz == 0)
{
return;
}
for(Uint32 i = 0; i < sz; i++)
{
TableVersion & ver = (* vers)[i];
if(ver.m_version == tableVersion && ver.m_impl &&
ver.m_impl->m_tableId == tableId)
{
ver.m_status = DROPPED;
ver.m_impl->m_status = altered ?
NdbDictionary::Object::Altered : NdbDictionary::Object::Invalid;
return;
}
if(i == sz - 1 && ver.m_status == RETREIVING)
{
ver.m_impl = altered ? &f_altered_table : &f_invalid_table;
return;
}
}
}
template class Vector<GlobalDictCache::TableVersion>;

View file

@ -68,6 +68,9 @@ public:
NdbTableImpl* put(const char * name, NdbTableImpl *);
void drop(NdbTableImpl *);
void release(NdbTableImpl *);
void alter_table_rep(const char * name,
Uint32 tableId, Uint32 tableVersion, bool altered);
public:
enum Status {
OK = 0,

View file

@ -1534,25 +1534,22 @@ int NdbDictionaryImpl::alterTable(NdbTableImpl &impl)
const char * originalInternalName = internalName.c_str();
DBUG_ENTER("NdbDictionaryImpl::alterTable");
if(!get_local_table_info(internalName, false)){
m_error.code= 709;
Ndb_local_table_info * local = 0;
if((local= get_local_table_info(originalInternalName, false)) == 0)
{
m_error.code = 709;
DBUG_RETURN(-1);
}
// Alter the table
int ret = m_receiver.alterTable(m_ndb, impl);
if(ret == 0){
// Remove cached information and let it be refreshed at next access
if (m_localHash.get(originalInternalName) != NULL) {
m_localHash.drop(originalInternalName);
m_globalHash->lock();
NdbTableImpl * cachedImpl = m_globalHash->get(originalInternalName);
// If in local cache it must be in global
if (!cachedImpl)
abort();
cachedImpl->m_status = NdbDictionary::Object::Invalid;
m_globalHash->drop(cachedImpl);
local->m_table_impl->m_status = NdbDictionary::Object::Invalid;
m_globalHash->drop(local->m_table_impl);
m_globalHash->unlock();
}
m_localHash.drop(originalInternalName);
}
DBUG_RETURN(ret);
}

View file

@ -35,6 +35,7 @@
#include <ndb_version.h>
#include <SignalLoggerManager.hpp>
#include <kernel/ndb_limits.h>
#include <signaldata/AlterTable.hpp>
//#define REPORT_TRANSPORTER
//#define API_TRACE;
@ -309,6 +310,17 @@ execute(void * callbackObj, SignalHeader * const header,
theFacade->theArbitMgr->doStop(theData);
break;
case GSN_ALTER_TABLE_REP:
{
const AlterTableRep* rep = (const AlterTableRep*)theData;
theFacade->m_globalDictCache.lock();
theFacade->m_globalDictCache.
alter_table_rep((const char*)ptr[0].p,
rep->tableId,
rep->tableVersion,
rep->changeType == AlterTableRep::CT_ALTERED);
theFacade->m_globalDictCache.unlock();
}
default:
break;

View file

@ -3298,7 +3298,10 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
DBUG_PRINT("info", ("Table schema version: %d",
tab->getObjectVersion()));
// Check if thread has stale local cache
if (tab->getObjectStatus() == NdbDictionary::Object::Invalid)
// New transaction must not use old tables... (trans != 0)
// Running might...
if ((trans && tab->getObjectStatus() != NdbDictionary::Object::Retrieved)
|| tab->getObjectStatus() == NdbDictionary::Object::Invalid)
{
invalidate_dictionary_cache(FALSE);
if (!(tab= dict->getTable(m_tabname, &tab_info)))