mirror of
https://github.com/MariaDB/server.git
synced 2025-01-18 04:53:01 +01:00
Merge tulin@bk-internal.mysql.com:/home/bk/mysql-5.1-new
into poseidon.ndb.mysql.com:/home/tomas/mysql-5.1-new-ndb
This commit is contained in:
commit
3ab132bdeb
17 changed files with 703 additions and 416 deletions
24
mysql-test/r/ndb_rename.result
Normal file
24
mysql-test/r/ndb_rename.result
Normal file
|
@ -0,0 +1,24 @@
|
|||
DROP TABLE IF EXISTS t1,t2;
|
||||
drop database if exists mysqltest;
|
||||
CREATE TABLE t1 (
|
||||
pk1 INT NOT NULL PRIMARY KEY,
|
||||
attr1 INT NOT NULL,
|
||||
attr2 INT,
|
||||
attr3 VARCHAR(10),
|
||||
INDEX i1(attr1)
|
||||
) ENGINE=ndbcluster;
|
||||
INSERT INTO t1 VALUES (0,0,0,"zero"),(1,1,1,"one"),(2,2,2,"two");
|
||||
SELECT * FROM t1 WHERE attr1 = 1;
|
||||
pk1 attr1 attr2 attr3
|
||||
1 1 1 one
|
||||
alter table t1 rename t2;
|
||||
SELECT * FROM t2 WHERE attr1 = 1;
|
||||
pk1 attr1 attr2 attr3
|
||||
1 1 1 one
|
||||
create database ndbtest;
|
||||
alter table t2 rename ndbtest.t2;
|
||||
SELECT * FROM ndbtest.t2 WHERE attr1 = 1;
|
||||
pk1 attr1 attr2 attr3
|
||||
1 1 1 one
|
||||
drop table ndbtest.t2;
|
||||
drop database ndbtest;
|
|
@ -47,6 +47,10 @@ master-bin.000001 # Table_map 1 # table_id: # (test.t1)
|
|||
flush logs;
|
||||
create table t3 (a int)ENGINE=NDB;
|
||||
start slave;
|
||||
|
||||
let $result_pattern= '%127.0.0.1%root%master-bin.000002%slave-relay-bin.000005%Yes%Yes%0%0%None%' ;
|
||||
|
||||
--source include/wait_slave_status.inc
|
||||
flush logs;
|
||||
stop slave;
|
||||
create table t2 (n int)ENGINE=NDB;
|
||||
|
|
|
@ -28,7 +28,7 @@ rpl_ndb_commit_afterflush : BUG#19328 2006-05-04 tomas Slave timeout with COM_RE
|
|||
rpl_ndb_dd_partitions : BUG#19259 2006-04-21 rpl_ndb_dd_partitions fails on s/AMD
|
||||
rpl_ndb_ddl : BUG#18946 result file needs update + test needs to checked
|
||||
rpl_ndb_innodb2ndb : Bug #19710 Cluster replication to partition table fails on DELETE FROM statement
|
||||
rpl_ndb_log : BUG#18947 2006-03-21 tomas CRBR: order in binlog of create table and insert (on different table) not determ
|
||||
#rpl_ndb_log : BUG#18947 2006-03-21 tomas CRBR: order in binlog of create table and insert (on different table) not determ
|
||||
rpl_ndb_myisam2ndb : Bug #19710 Cluster replication to partition table fails on DELETE FROM statement
|
||||
rpl_switch_stm_row_mixed : BUG#18590 2006-03-28 brian
|
||||
rpl_row_blob_innodb : BUG#18980 2006-04-10 kent Test fails randomly
|
||||
|
|
36
mysql-test/t/ndb_rename.test
Normal file
36
mysql-test/t/ndb_rename.test
Normal file
|
@ -0,0 +1,36 @@
|
|||
-- source include/have_ndb.inc
|
||||
-- source include/not_embedded.inc
|
||||
|
||||
--disable_warnings
|
||||
DROP TABLE IF EXISTS t1,t2;
|
||||
drop database if exists mysqltest;
|
||||
--enable_warnings
|
||||
|
||||
#
|
||||
# Table rename tests
|
||||
#
|
||||
|
||||
#
|
||||
# Create a normal table with primary key
|
||||
#
|
||||
CREATE TABLE t1 (
|
||||
pk1 INT NOT NULL PRIMARY KEY,
|
||||
attr1 INT NOT NULL,
|
||||
attr2 INT,
|
||||
attr3 VARCHAR(10),
|
||||
INDEX i1(attr1)
|
||||
) ENGINE=ndbcluster;
|
||||
|
||||
INSERT INTO t1 VALUES (0,0,0,"zero"),(1,1,1,"one"),(2,2,2,"two");
|
||||
SELECT * FROM t1 WHERE attr1 = 1;
|
||||
alter table t1 rename t2;
|
||||
SELECT * FROM t2 WHERE attr1 = 1;
|
||||
|
||||
create database ndbtest;
|
||||
alter table t2 rename ndbtest.t2;
|
||||
SELECT * FROM ndbtest.t2 WHERE attr1 = 1;
|
||||
|
||||
drop table ndbtest.t2;
|
||||
drop database ndbtest;
|
||||
|
||||
# End of 4.1 tests
|
|
@ -4648,7 +4648,7 @@ int ha_ndbcluster::create(const char *name,
|
|||
share->db, share->table_name,
|
||||
m_table->getObjectId(),
|
||||
m_table->getObjectVersion(),
|
||||
SOT_CREATE_TABLE);
|
||||
SOT_CREATE_TABLE, 0, 0, 1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -4921,13 +4921,17 @@ int ha_ndbcluster::rename_table(const char *from, const char *to)
|
|||
{
|
||||
NDBDICT *dict;
|
||||
char old_dbname[FN_HEADLEN];
|
||||
char new_dbname[FN_HEADLEN];
|
||||
char new_tabname[FN_HEADLEN];
|
||||
const NDBTAB *orig_tab;
|
||||
int result;
|
||||
bool recreate_indexes= FALSE;
|
||||
NDBDICT::List index_list;
|
||||
|
||||
DBUG_ENTER("ha_ndbcluster::rename_table");
|
||||
DBUG_PRINT("info", ("Renaming %s to %s", from, to));
|
||||
set_dbname(from, old_dbname);
|
||||
set_dbname(to, new_dbname);
|
||||
set_tabname(from);
|
||||
set_tabname(to, new_tabname);
|
||||
|
||||
|
@ -4952,6 +4956,11 @@ int ha_ndbcluster::rename_table(const char *from, const char *to)
|
|||
DBUG_ASSERT(r == 0);
|
||||
}
|
||||
#endif
|
||||
if (my_strcasecmp(system_charset_info, new_dbname, old_dbname))
|
||||
{
|
||||
dict->listIndexes(index_list, *orig_tab);
|
||||
recreate_indexes= TRUE;
|
||||
}
|
||||
// Change current database to that of target table
|
||||
set_dbname(to);
|
||||
ndb->setDatabaseName(m_dbname);
|
||||
|
@ -5030,7 +5039,33 @@ int ha_ndbcluster::rename_table(const char *from, const char *to)
|
|||
old_dbname, m_tabname,
|
||||
ndb_table_id, ndb_table_version,
|
||||
SOT_RENAME_TABLE,
|
||||
m_dbname, new_tabname);
|
||||
m_dbname, new_tabname, 1);
|
||||
}
|
||||
|
||||
// If we are moving tables between databases, we need to recreate
|
||||
// indexes
|
||||
if (recreate_indexes)
|
||||
{
|
||||
for (unsigned i = 0; i < index_list.count; i++)
|
||||
{
|
||||
NDBDICT::List::Element& index_el = index_list.elements[i];
|
||||
// Recreate any indexes not stored in the system database
|
||||
if (my_strcasecmp(system_charset_info,
|
||||
index_el.database, NDB_SYSTEM_DATABASE))
|
||||
{
|
||||
set_dbname(from);
|
||||
ndb->setDatabaseName(m_dbname);
|
||||
const NDBINDEX * index= dict->getIndexGlobal(index_el.name, new_tab);
|
||||
DBUG_PRINT("info", ("Creating index %s/%s",
|
||||
index_el.database, index->getName()));
|
||||
dict->createIndex(*index, new_tab);
|
||||
DBUG_PRINT("info", ("Dropping index %s/%s",
|
||||
index_el.database, index->getName()));
|
||||
set_dbname(from);
|
||||
ndb->setDatabaseName(m_dbname);
|
||||
dict->dropIndexGlobal(*index);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (share)
|
||||
free_share(&share);
|
||||
|
@ -5053,6 +5088,7 @@ ha_ndbcluster::delete_table(ha_ndbcluster *h, Ndb *ndb,
|
|||
const char *db,
|
||||
const char *table_name)
|
||||
{
|
||||
THD *thd= current_thd;
|
||||
DBUG_ENTER("ha_ndbcluster::ndbcluster_delete_table");
|
||||
NDBDICT *dict= ndb->getDictionary();
|
||||
#ifdef HAVE_NDB_BINLOG
|
||||
|
@ -5084,7 +5120,7 @@ ha_ndbcluster::delete_table(ha_ndbcluster *h, Ndb *ndb,
|
|||
ndb_table_version= h->m_table->getObjectVersion();
|
||||
}
|
||||
#endif
|
||||
h->release_metadata(current_thd, ndb);
|
||||
h->release_metadata(thd, ndb);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -5150,11 +5186,11 @@ ha_ndbcluster::delete_table(ha_ndbcluster *h, Ndb *ndb,
|
|||
|
||||
if (!IS_TMP_PREFIX(table_name) && share)
|
||||
{
|
||||
ndbcluster_log_schema_op(current_thd, share,
|
||||
current_thd->query, current_thd->query_length,
|
||||
ndbcluster_log_schema_op(thd, share,
|
||||
thd->query, thd->query_length,
|
||||
share->db, share->table_name,
|
||||
ndb_table_id, ndb_table_version,
|
||||
SOT_DROP_TABLE);
|
||||
SOT_DROP_TABLE, 0, 0, 1);
|
||||
}
|
||||
else if (table_dropped && share && share->op) /* ndbcluster_log_schema_op
|
||||
will do a force GCP */
|
||||
|
@ -5733,6 +5769,7 @@ int ndbcluster_drop_database_impl(const char *path)
|
|||
|
||||
static void ndbcluster_drop_database(char *path)
|
||||
{
|
||||
THD *thd= current_thd;
|
||||
DBUG_ENTER("ndbcluster_drop_database");
|
||||
#ifdef HAVE_NDB_BINLOG
|
||||
/*
|
||||
|
@ -5750,9 +5787,9 @@ static void ndbcluster_drop_database(char *path)
|
|||
#ifdef HAVE_NDB_BINLOG
|
||||
char db[FN_REFLEN];
|
||||
ha_ndbcluster::set_dbname(path, db);
|
||||
ndbcluster_log_schema_op(current_thd, 0,
|
||||
current_thd->query, current_thd->query_length,
|
||||
db, "", 0, 0, SOT_DROP_DB);
|
||||
ndbcluster_log_schema_op(thd, 0,
|
||||
thd->query, thd->query_length,
|
||||
db, "", 0, 0, SOT_DROP_DB, 0, 0, 0);
|
||||
#endif
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
@ -6831,6 +6868,7 @@ static void dbug_print_open_tables()
|
|||
*/
|
||||
int handle_trailing_share(NDB_SHARE *share)
|
||||
{
|
||||
THD *thd= current_thd;
|
||||
static ulong trailing_share_id= 0;
|
||||
DBUG_ENTER("handle_trailing_share");
|
||||
|
||||
|
@ -6841,7 +6879,7 @@ int handle_trailing_share(NDB_SHARE *share)
|
|||
bzero((char*) &table_list,sizeof(table_list));
|
||||
table_list.db= share->db;
|
||||
table_list.alias= table_list.table_name= share->table_name;
|
||||
close_cached_tables(current_thd, 0, &table_list, TRUE);
|
||||
close_cached_tables(thd, 0, &table_list, TRUE);
|
||||
|
||||
pthread_mutex_lock(&ndbcluster_mutex);
|
||||
if (!--share->use_count)
|
||||
|
@ -9944,13 +9982,13 @@ int ndbcluster_alter_tablespace(THD* thd, st_alter_tablespace *info)
|
|||
thd->query, thd->query_length,
|
||||
"", info->tablespace_name,
|
||||
0, 0,
|
||||
SOT_TABLESPACE);
|
||||
SOT_TABLESPACE, 0, 0, 0);
|
||||
else
|
||||
ndbcluster_log_schema_op(thd, 0,
|
||||
thd->query, thd->query_length,
|
||||
"", info->logfile_group_name,
|
||||
0, 0,
|
||||
SOT_LOGFILE_GROUP);
|
||||
SOT_LOGFILE_GROUP, 0, 0, 0);
|
||||
#endif
|
||||
DBUG_RETURN(FALSE);
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -138,8 +138,9 @@ int ndbcluster_log_schema_op(THD *thd, NDB_SHARE *share,
|
|||
uint32 ndb_table_id,
|
||||
uint32 ndb_table_version,
|
||||
enum SCHEMA_OP_TYPE type,
|
||||
const char *new_db= 0,
|
||||
const char *new_table_name= 0);
|
||||
const char *new_db,
|
||||
const char *new_table_name,
|
||||
int have_lock_open);
|
||||
int ndbcluster_handle_drop_table(Ndb *ndb, const char *event_name,
|
||||
NDB_SHARE *share,
|
||||
const char *type_str);
|
||||
|
|
|
@ -1002,6 +1002,9 @@ typedef void (* NdbEventCallback)(NdbEventOperation*, Ndb*, void*);
|
|||
#define WAITFOR_RESPONSE_TIMEOUT 120000 // Milliseconds
|
||||
#endif
|
||||
|
||||
#define NDB_SYSTEM_DATABASE "sys"
|
||||
#define NDB_SYSTEM_SCHEMA "def"
|
||||
|
||||
/**
|
||||
* @class Ndb
|
||||
* @brief Represents the NDB kernel and is the main class of the NDB API.
|
||||
|
@ -1672,6 +1675,8 @@ private:
|
|||
const char * externalizeIndexName(const char * internalIndexName,
|
||||
bool fullyQualifiedNames);
|
||||
const char * externalizeIndexName(const char * internalIndexName);
|
||||
const BaseString old_internalize_index_name(const NdbTableImpl * table,
|
||||
const char * external_name) const;
|
||||
const BaseString internalize_index_name(const NdbTableImpl * table,
|
||||
const char * external_name) const;
|
||||
|
||||
|
|
|
@ -1635,6 +1635,16 @@ public:
|
|||
int listIndexes(List & list, const char * tableName);
|
||||
int listIndexes(List & list, const char * tableName) const;
|
||||
|
||||
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
|
||||
/**
|
||||
* Fetch list of indexes of given table.
|
||||
* @param list Reference to list where to store the listed indexes
|
||||
* @param table Reference to table that index belongs to.
|
||||
* @return 0 if successful, otherwise -1
|
||||
*/
|
||||
int listIndexes(List & list, const Table &table) const;
|
||||
#endif
|
||||
|
||||
/** @} *******************************************************************/
|
||||
/**
|
||||
* @name Events
|
||||
|
|
|
@ -1311,6 +1311,35 @@ Ndb::internalize_table_name(const char *external_name) const
|
|||
DBUG_RETURN(ret);
|
||||
}
|
||||
|
||||
const BaseString
|
||||
Ndb::old_internalize_index_name(const NdbTableImpl * table,
|
||||
const char * external_name) const
|
||||
{
|
||||
BaseString ret;
|
||||
DBUG_ENTER("old_internalize_index_name");
|
||||
DBUG_PRINT("enter", ("external_name: %s, table_id: %d",
|
||||
external_name, table ? table->m_id : ~0));
|
||||
if (!table)
|
||||
{
|
||||
DBUG_PRINT("error", ("!table"));
|
||||
DBUG_RETURN(ret);
|
||||
}
|
||||
|
||||
if (fullyQualifiedNames)
|
||||
{
|
||||
/* Internal index name format <db>/<schema>/<tabid>/<table> */
|
||||
ret.assfmt("%s%d%c%s",
|
||||
theImpl->m_prefix.c_str(),
|
||||
table->m_id,
|
||||
table_name_separator,
|
||||
external_name);
|
||||
}
|
||||
else
|
||||
ret.assign(external_name);
|
||||
|
||||
DBUG_PRINT("exit", ("internal_name: %s", ret.c_str()));
|
||||
DBUG_RETURN(ret);
|
||||
}
|
||||
|
||||
const BaseString
|
||||
Ndb::internalize_index_name(const NdbTableImpl * table,
|
||||
|
@ -1328,9 +1357,9 @@ Ndb::internalize_index_name(const NdbTableImpl * table,
|
|||
|
||||
if (fullyQualifiedNames)
|
||||
{
|
||||
/* Internal index name format <db>/<schema>/<tabid>/<table> */
|
||||
/* Internal index name format sys/def/<tabid>/<table> */
|
||||
ret.assfmt("%s%d%c%s",
|
||||
theImpl->m_prefix.c_str(),
|
||||
theImpl->m_systemPrefix.c_str(),
|
||||
table->m_id,
|
||||
table_name_separator,
|
||||
external_name);
|
||||
|
|
|
@ -1618,6 +1618,14 @@ NdbDictionary::Dictionary::listIndexes(List& list,
|
|||
return m_impl.listIndexes(list, tab->getTableId());
|
||||
}
|
||||
|
||||
int
|
||||
NdbDictionary::Dictionary::listIndexes(List& list,
|
||||
const NdbDictionary::Table &table) const
|
||||
{
|
||||
return m_impl.listIndexes(list, table.getTableId());
|
||||
}
|
||||
|
||||
|
||||
const struct NdbError &
|
||||
NdbDictionary::Dictionary::getNdbError() const {
|
||||
return m_impl.getNdbError();
|
||||
|
|
|
@ -56,7 +56,6 @@
|
|||
DBUG_RETURN(b);\
|
||||
}
|
||||
|
||||
extern Uint64 g_latest_trans_gci;
|
||||
int ndb_dictionary_is_mysqld = 0;
|
||||
|
||||
bool
|
||||
|
@ -1509,9 +1508,21 @@ NdbTableImpl *
|
|||
NdbDictionaryImpl::getIndexTable(NdbIndexImpl * index,
|
||||
NdbTableImpl * table)
|
||||
{
|
||||
const char *current_db= m_ndb.getDatabaseName();
|
||||
NdbTableImpl *index_table;
|
||||
const BaseString internalName(
|
||||
m_ndb.internalize_index_name(table, index->getName()));
|
||||
return getTable(m_ndb.externalizeTableName(internalName.c_str()));
|
||||
// Get index table in system database
|
||||
m_ndb.setDatabaseName(NDB_SYSTEM_DATABASE);
|
||||
index_table= getTable(m_ndb.externalizeTableName(internalName.c_str()));
|
||||
m_ndb.setDatabaseName(current_db);
|
||||
if (!index_table)
|
||||
{
|
||||
// Index table not found
|
||||
// Try geting index table in current database (old format)
|
||||
index_table= getTable(m_ndb.externalizeTableName(internalName.c_str()));
|
||||
}
|
||||
return index_table;
|
||||
}
|
||||
|
||||
#if 0
|
||||
|
@ -4223,7 +4234,6 @@ NdbDictInterface::execWAIT_GCP_CONF(NdbApiSignal* signal,
|
|||
{
|
||||
const WaitGCPConf * const conf=
|
||||
CAST_CONSTPTR(WaitGCPConf, signal->getDataPtr());
|
||||
g_latest_trans_gci= conf->gcp;
|
||||
m_waiter.signal(NO_WAIT);
|
||||
}
|
||||
|
||||
|
|
|
@ -1020,6 +1020,33 @@ NdbDictionaryImpl::getIndexGlobal(const char * index_name,
|
|||
}
|
||||
break;
|
||||
}
|
||||
{
|
||||
// Index not found, try old format
|
||||
const BaseString
|
||||
old_internal_indexname(m_ndb.old_internalize_index_name(&ndbtab,
|
||||
index_name));
|
||||
retry= 2;
|
||||
while (retry)
|
||||
{
|
||||
NdbTableImpl *tab=
|
||||
fetchGlobalTableImplRef(InitIndex(old_internal_indexname,
|
||||
index_name, ndbtab));
|
||||
if (tab)
|
||||
{
|
||||
// tab->m_index sould be set. otherwise tab == 0
|
||||
NdbIndexImpl *idx= tab->m_index;
|
||||
if (idx->m_table_id != (unsigned)ndbtab.getObjectId() ||
|
||||
idx->m_table_version != (unsigned)ndbtab.getObjectVersion())
|
||||
{
|
||||
releaseIndexGlobal(*idx, 1);
|
||||
retry--;
|
||||
continue;
|
||||
}
|
||||
DBUG_RETURN(idx);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
m_error.code= 4243;
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
@ -1086,17 +1113,41 @@ NdbDictionaryImpl::getIndex(const char* index_name,
|
|||
index_name,
|
||||
prim));
|
||||
if (!tab)
|
||||
goto err;
|
||||
goto retry;
|
||||
|
||||
info= Ndb_local_table_info::create(tab, 0);
|
||||
if (!info)
|
||||
goto err;
|
||||
goto retry;
|
||||
m_localHash.put(internal_indexname.c_str(), info);
|
||||
}
|
||||
else
|
||||
tab= info->m_table_impl;
|
||||
|
||||
return tab->m_index;
|
||||
|
||||
retry:
|
||||
// Index not found, try fetching it from current database
|
||||
const BaseString
|
||||
old_internal_indexname(m_ndb.old_internalize_index_name(&prim, index_name));
|
||||
|
||||
info= m_localHash.get(old_internal_indexname.c_str());
|
||||
if (info == 0)
|
||||
{
|
||||
tab= fetchGlobalTableImplRef(InitIndex(old_internal_indexname,
|
||||
index_name,
|
||||
prim));
|
||||
if (!tab)
|
||||
goto err;
|
||||
|
||||
info= Ndb_local_table_info::create(tab, 0);
|
||||
if (!info)
|
||||
goto err;
|
||||
m_localHash.put(old_internal_indexname.c_str(), info);
|
||||
}
|
||||
else
|
||||
tab= info->m_table_impl;
|
||||
|
||||
return tab->m_index;
|
||||
|
||||
err:
|
||||
m_error.code= 4243;
|
||||
|
|
|
@ -93,6 +93,8 @@ public:
|
|||
m_schemaname.c_str(), table_name_separator);
|
||||
}
|
||||
|
||||
BaseString m_systemPrefix; // Buffer for preformatted for <sys>/<def>/
|
||||
|
||||
/**
|
||||
* NOTE free lists must be _after_ theNdbObjectIdMap take
|
||||
* assure that destructors are run in correct order
|
||||
|
|
|
@ -32,6 +32,8 @@
|
|||
#include <signaldata/TcKeyFailConf.hpp>
|
||||
#include <signaldata/TcHbRep.hpp>
|
||||
|
||||
Uint64 g_latest_trans_gci = 0;
|
||||
|
||||
/*****************************************************************************
|
||||
NdbTransaction( Ndb* aNdb );
|
||||
|
||||
|
@ -1568,6 +1570,9 @@ NdbTransaction::receiveTC_COMMITCONF(const TcCommitConf * commitConf)
|
|||
theCommitStatus = Committed;
|
||||
theCompletionStatus = CompletedSuccess;
|
||||
theGlobalCheckpointId = commitConf->gci;
|
||||
// theGlobalCheckpointId == 0 if NoOp transaction
|
||||
if (theGlobalCheckpointId)
|
||||
g_latest_trans_gci = theGlobalCheckpointId;
|
||||
return 0;
|
||||
} else {
|
||||
#ifdef NDB_NO_DROPPED_SIGNAL
|
||||
|
@ -1746,6 +1751,8 @@ from other transactions.
|
|||
if (tCommitFlag == 1) {
|
||||
theCommitStatus = Committed;
|
||||
theGlobalCheckpointId = tGCI;
|
||||
assert(tGCI);
|
||||
g_latest_trans_gci = tGCI;
|
||||
} else if ((tNoComp >= tNoSent) &&
|
||||
(theLastExecOpInList->theCommitIndicator == 1)){
|
||||
|
||||
|
@ -1922,6 +1929,8 @@ NdbTransaction::receiveTCINDXCONF(const TcIndxConf * indxConf,
|
|||
if (tCommitFlag == 1) {
|
||||
theCommitStatus = Committed;
|
||||
theGlobalCheckpointId = tGCI;
|
||||
assert(tGCI);
|
||||
g_latest_trans_gci = tGCI;
|
||||
} else if ((tNoComp >= tNoSent) &&
|
||||
(theLastExecOpInList->theCommitIndicator == 1)){
|
||||
/**********************************************************************/
|
||||
|
|
|
@ -46,7 +46,6 @@
|
|||
|
||||
#include <EventLogger.hpp>
|
||||
extern EventLogger g_eventLogger;
|
||||
Uint64 g_latest_trans_gci= 0;
|
||||
|
||||
/******************************************************************************
|
||||
* int init( int aNrOfCon, int aNrOfOp );
|
||||
|
@ -367,7 +366,6 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3])
|
|||
tCon = void2con(tFirstDataPtr);
|
||||
if ((tCon->checkMagicNumber() == 0) &&
|
||||
(tCon->theSendStatus == NdbTransaction::sendTC_OP)) {
|
||||
g_latest_trans_gci= keyConf->gci;
|
||||
tReturnCode = tCon->receiveTCKEYCONF(keyConf, tLen);
|
||||
if (tReturnCode != -1) {
|
||||
completedTransaction(tCon);
|
||||
|
@ -520,7 +518,6 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3])
|
|||
tCon = void2con(tFirstDataPtr);
|
||||
if ((tCon->checkMagicNumber() == 0) &&
|
||||
(tCon->theSendStatus == NdbTransaction::sendTC_COMMIT)) {
|
||||
g_latest_trans_gci= commitConf->gci;
|
||||
tReturnCode = tCon->receiveTC_COMMITCONF(commitConf);
|
||||
if (tReturnCode != -1) {
|
||||
completedTransaction(tCon);
|
||||
|
@ -855,7 +852,6 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3])
|
|||
tCon = void2con(tFirstDataPtr);
|
||||
if ((tCon->checkMagicNumber() == 0) &&
|
||||
(tCon->theSendStatus == NdbTransaction::sendTC_OP)) {
|
||||
g_latest_trans_gci= indxConf->gci;
|
||||
tReturnCode = tCon->receiveTCINDXCONF(indxConf, tLen);
|
||||
if (tReturnCode != -1) {
|
||||
completedTransaction(tCon);
|
||||
|
|
|
@ -219,6 +219,9 @@ NdbImpl::NdbImpl(Ndb_cluster_connection *ndb_cluster_connection,
|
|||
}
|
||||
m_optimized_node_selection=
|
||||
m_ndb_cluster_connection.m_optimized_node_selection;
|
||||
|
||||
m_systemPrefix.assfmt("%s%c%s%c", NDB_SYSTEM_DATABASE, table_name_separator,
|
||||
NDB_SYSTEM_SCHEMA, table_name_separator);
|
||||
}
|
||||
|
||||
NdbImpl::~NdbImpl()
|
||||
|
|
Loading…
Reference in a new issue