mirror of
https://github.com/MariaDB/server.git
synced 2025-01-18 04:53:01 +01:00
Merge mskold@bk-internal.mysql.com:/home/bk/mysql-5.0
into mysql.com:/usr/local/home/marty/MySQL/mysql-5.0 BitKeeper/deleted/.del-acinclude.m4~f4ab416bac5003: Auto merged configure.in: Auto merged
This commit is contained in:
commit
ae587036bc
6 changed files with 122 additions and 24 deletions
|
@ -13,6 +13,26 @@ a
|
|||
show status like 'handler_discover%';
|
||||
Variable_name Value
|
||||
Handler_discover 0
|
||||
select * from t1;
|
||||
a
|
||||
2
|
||||
drop table t1;
|
||||
create table t1 (a int) engine=ndbcluster;
|
||||
insert into t1 value (2);
|
||||
select * from t1;
|
||||
a
|
||||
2
|
||||
show status like 'handler_discover%';
|
||||
Variable_name Value
|
||||
Handler_discover 0
|
||||
drop table t1;
|
||||
create table t1 (a int) engine=ndbcluster;
|
||||
insert into t1 value (2);
|
||||
select * from t1;
|
||||
ERROR HY000: Got error 241 'Invalid schema object version' from ndbcluster
|
||||
select * from t1;
|
||||
a
|
||||
2
|
||||
flush status;
|
||||
select * from t1;
|
||||
a
|
||||
|
@ -20,7 +40,7 @@ a
|
|||
update t1 set a=3 where a=2;
|
||||
show status like 'handler_discover%';
|
||||
Variable_name Value
|
||||
Handler_discover 1
|
||||
Handler_discover 0
|
||||
create table t3 (a int not null primary key, b varchar(22),
|
||||
c int, last_col text) engine=ndb;
|
||||
insert into t3 values(1, 'Hi!', 89, 'Longtext column');
|
||||
|
|
|
@ -18,6 +18,30 @@ select * from t1;
|
|||
select * from t2;
|
||||
show status like 'handler_discover%';
|
||||
|
||||
# Check dropping and recreating table on same server
|
||||
connect (con1,localhost,,,test);
|
||||
connect (con2,localhost,,,test);
|
||||
connection con1;
|
||||
select * from t1;
|
||||
connection con2;
|
||||
drop table t1;
|
||||
create table t1 (a int) engine=ndbcluster;
|
||||
insert into t1 value (2);
|
||||
connection con1;
|
||||
select * from t1;
|
||||
|
||||
# Check dropping and recreating table on different server
|
||||
connection server2;
|
||||
show status like 'handler_discover%';
|
||||
drop table t1;
|
||||
create table t1 (a int) engine=ndbcluster;
|
||||
insert into t1 value (2);
|
||||
connection server1;
|
||||
# Currently a retry is required remotely
|
||||
--error 1296
|
||||
select * from t1;
|
||||
select * from t1;
|
||||
|
||||
# Connect to server2 and use the tables from there
|
||||
connection server2;
|
||||
flush status;
|
||||
|
|
|
@ -76,8 +76,11 @@ public:
|
|||
Changed, ///< The object has been modified in memory
|
||||
///< and has to be commited in NDB Kernel for
|
||||
///< changes to take effect
|
||||
Retrieved ///< The object exist and has been read
|
||||
Retrieved, ///< The object exist and has been read
|
||||
///< into main memory from NDB Kernel
|
||||
Invalid ///< The object has been invalidated
|
||||
///< and should not be used
|
||||
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -1526,6 +1526,7 @@ int NdbDictionaryImpl::alterTable(NdbTableImpl &impl)
|
|||
// If in local cache it must be in global
|
||||
if (!cachedImpl)
|
||||
abort();
|
||||
cachedImpl->m_status = NdbDictionary::Object::Invalid;
|
||||
m_globalHash->drop(cachedImpl);
|
||||
m_globalHash->unlock();
|
||||
}
|
||||
|
@ -1830,8 +1831,8 @@ NdbDictionaryImpl::dropTable(const char * name)
|
|||
|
||||
DBUG_PRINT("info",("INCOMPATIBLE_VERSION internal_name: %s", internalTableName));
|
||||
m_localHash.drop(internalTableName);
|
||||
|
||||
m_globalHash->lock();
|
||||
tab->m_status = NdbDictionary::Object::Invalid;
|
||||
m_globalHash->drop(tab);
|
||||
m_globalHash->unlock();
|
||||
DBUG_RETURN(dropTable(name));
|
||||
|
@ -1875,10 +1876,11 @@ NdbDictionaryImpl::dropTable(NdbTableImpl & impl)
|
|||
int ret = m_receiver.dropTable(impl);
|
||||
if(ret == 0 || m_error.code == 709){
|
||||
const char * internalTableName = impl.m_internalName.c_str();
|
||||
|
||||
|
||||
m_localHash.drop(internalTableName);
|
||||
|
||||
m_globalHash->lock();
|
||||
impl.m_status = NdbDictionary::Object::Invalid;
|
||||
m_globalHash->drop(&impl);
|
||||
m_globalHash->unlock();
|
||||
|
||||
|
@ -1976,6 +1978,7 @@ NdbDictionaryImpl::invalidateObject(NdbTableImpl & impl)
|
|||
|
||||
m_localHash.drop(internalTableName);
|
||||
m_globalHash->lock();
|
||||
impl.m_status = NdbDictionary::Object::Invalid;
|
||||
m_globalHash->drop(&impl);
|
||||
m_globalHash->unlock();
|
||||
return 0;
|
||||
|
@ -2242,8 +2245,8 @@ NdbDictionaryImpl::dropIndex(const char * indexName,
|
|||
m_ndb.internalizeTableName(indexName); // Index is also a table
|
||||
|
||||
m_localHash.drop(internalIndexName);
|
||||
|
||||
m_globalHash->lock();
|
||||
idx->m_table->m_status = NdbDictionary::Object::Invalid;
|
||||
m_globalHash->drop(idx->m_table);
|
||||
m_globalHash->unlock();
|
||||
return dropIndex(indexName, tableName);
|
||||
|
@ -2277,8 +2280,8 @@ NdbDictionaryImpl::dropIndex(NdbIndexImpl & impl, const char * tableName)
|
|||
int ret = m_receiver.dropIndex(impl, *timpl);
|
||||
if(ret == 0){
|
||||
m_localHash.drop(internalIndexName);
|
||||
|
||||
m_globalHash->lock();
|
||||
impl.m_table->m_status = NdbDictionary::Object::Invalid;
|
||||
m_globalHash->drop(impl.m_table);
|
||||
m_globalHash->unlock();
|
||||
}
|
||||
|
|
|
@ -418,11 +418,28 @@ void ha_ndbcluster::no_uncommitted_rows_reset(THD *thd)
|
|||
# The mapped error code
|
||||
*/
|
||||
|
||||
void ha_ndbcluster::invalidateDictionaryCache()
|
||||
void ha_ndbcluster::invalidate_dictionary_cache(bool global)
|
||||
{
|
||||
NDBDICT *dict= get_ndb()->getDictionary();
|
||||
DBUG_ENTER("invalidate_dictionary_cache");
|
||||
DBUG_PRINT("info", ("invalidating %s", m_tabname));
|
||||
dict->invalidateTable(m_tabname);
|
||||
|
||||
if (global)
|
||||
{
|
||||
const NDBTAB *tab= dict->getTable(m_tabname);
|
||||
if (!tab)
|
||||
DBUG_VOID_RETURN;
|
||||
if (tab->getObjectStatus() == NdbDictionary::Object::Invalid)
|
||||
{
|
||||
// Global cache has already been invalidated
|
||||
dict->removeCachedTable(m_tabname);
|
||||
global= FALSE;
|
||||
}
|
||||
else
|
||||
dict->invalidateTable(m_tabname);
|
||||
}
|
||||
else
|
||||
dict->removeCachedTable(m_tabname);
|
||||
table->s->version=0L; /* Free when thread is ready */
|
||||
/* Invalidate indexes */
|
||||
for (uint i= 0; i < table->s->keys; i++)
|
||||
|
@ -434,18 +451,28 @@ void ha_ndbcluster::invalidateDictionaryCache()
|
|||
switch(idx_type) {
|
||||
case(PRIMARY_KEY_ORDERED_INDEX):
|
||||
case(ORDERED_INDEX):
|
||||
dict->invalidateIndex(index->getName(), m_tabname);
|
||||
if (global)
|
||||
dict->invalidateIndex(index->getName(), m_tabname);
|
||||
else
|
||||
dict->removeCachedIndex(index->getName(), m_tabname);
|
||||
break;
|
||||
case(UNIQUE_ORDERED_INDEX):
|
||||
dict->invalidateIndex(index->getName(), m_tabname);
|
||||
if (global)
|
||||
dict->invalidateIndex(index->getName(), m_tabname);
|
||||
else
|
||||
dict->removeCachedIndex(index->getName(), m_tabname);
|
||||
case(UNIQUE_INDEX):
|
||||
dict->invalidateIndex(unique_index->getName(), m_tabname);
|
||||
if (global)
|
||||
dict->invalidateIndex(unique_index->getName(), m_tabname);
|
||||
else
|
||||
dict->removeCachedIndex(unique_index->getName(), m_tabname);
|
||||
break;
|
||||
case(PRIMARY_KEY_INDEX):
|
||||
case(UNDEFINED_INDEX):
|
||||
break;
|
||||
}
|
||||
}
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
int ha_ndbcluster::ndb_err(NdbTransaction *trans)
|
||||
|
@ -457,7 +484,7 @@ int ha_ndbcluster::ndb_err(NdbTransaction *trans)
|
|||
ERR_PRINT(err);
|
||||
switch (err.classification) {
|
||||
case NdbError::SchemaError:
|
||||
invalidateDictionaryCache();
|
||||
invalidate_dictionary_cache(TRUE);
|
||||
|
||||
if (err.code==284)
|
||||
{
|
||||
|
@ -882,7 +909,14 @@ int ha_ndbcluster::get_metadata(const char *path)
|
|||
|
||||
if (!(tab= dict->getTable(m_tabname)))
|
||||
ERR_RETURN(dict->getNdbError());
|
||||
DBUG_PRINT("info", ("Table schema version: %d", tab->getObjectVersion()));
|
||||
// Check if thread has stale local cache
|
||||
if (tab->getObjectStatus() == NdbDictionary::Object::Invalid)
|
||||
{
|
||||
invalidate_dictionary_cache(FALSE);
|
||||
if (!(tab= dict->getTable(m_tabname)))
|
||||
ERR_RETURN(dict->getNdbError());
|
||||
DBUG_PRINT("info", ("Table schema version: %d", tab->getObjectVersion()));
|
||||
}
|
||||
/*
|
||||
Compare FrmData in NDB with frm file from disk.
|
||||
*/
|
||||
|
@ -901,7 +935,7 @@ int ha_ndbcluster::get_metadata(const char *path)
|
|||
if (!invalidating_ndb_table)
|
||||
{
|
||||
DBUG_PRINT("info", ("Invalidating table"));
|
||||
invalidateDictionaryCache();
|
||||
invalidate_dictionary_cache(TRUE);
|
||||
invalidating_ndb_table= TRUE;
|
||||
}
|
||||
else
|
||||
|
@ -927,7 +961,7 @@ int ha_ndbcluster::get_metadata(const char *path)
|
|||
if (error)
|
||||
DBUG_RETURN(error);
|
||||
|
||||
m_tableVersion= tab->getObjectVersion();
|
||||
m_table_version= tab->getObjectVersion();
|
||||
m_table= (void *)tab;
|
||||
m_table_info= NULL; // Set in external lock
|
||||
|
||||
|
@ -3269,15 +3303,25 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
|
|||
void *tab_info;
|
||||
if (!(tab= dict->getTable(m_tabname, &tab_info)))
|
||||
ERR_RETURN(dict->getNdbError());
|
||||
DBUG_PRINT("info", ("Table schema version: %d", tab->getObjectVersion()));
|
||||
if (m_table != (void *)tab || m_tableVersion != tab->getObjectVersion())
|
||||
DBUG_PRINT("info", ("Table schema version: %d",
|
||||
tab->getObjectVersion()));
|
||||
// Check if thread has stale local cache
|
||||
if (tab->getObjectStatus() == NdbDictionary::Object::Invalid)
|
||||
{
|
||||
invalidate_dictionary_cache(FALSE);
|
||||
if (!(tab= dict->getTable(m_tabname)))
|
||||
ERR_RETURN(dict->getNdbError());
|
||||
DBUG_PRINT("info", ("Table schema version: %d",
|
||||
tab->getObjectVersion()));
|
||||
}
|
||||
if (m_table != (void *)tab || m_table_version < tab->getObjectVersion())
|
||||
{
|
||||
/*
|
||||
The table has been altered, refresh the index list
|
||||
*/
|
||||
build_index_list(ndb, table, ILBP_OPEN);
|
||||
m_table= (void *)tab;
|
||||
m_tableVersion = tab->getObjectVersion();
|
||||
m_table_version = tab->getObjectVersion();
|
||||
}
|
||||
m_table_info= tab_info;
|
||||
}
|
||||
|
@ -3321,7 +3365,6 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
|
|||
thd_ndb->stmt= NULL;
|
||||
}
|
||||
}
|
||||
m_table= NULL;
|
||||
m_table_info= NULL;
|
||||
|
||||
/*
|
||||
|
@ -4036,7 +4079,13 @@ int ha_ndbcluster::rename_table(const char *from, const char *to)
|
|||
dict= ndb->getDictionary();
|
||||
if (!(orig_tab= dict->getTable(m_tabname)))
|
||||
ERR_RETURN(dict->getNdbError());
|
||||
|
||||
// Check if thread has stale local cache
|
||||
if (orig_tab->getObjectStatus() == NdbDictionary::Object::Invalid)
|
||||
{
|
||||
dict->removeCachedTable(m_tabname);
|
||||
if (!(orig_tab= dict->getTable(m_tabname)))
|
||||
ERR_RETURN(dict->getNdbError());
|
||||
}
|
||||
m_table= (void *)orig_tab;
|
||||
// Change current database to that of target table
|
||||
set_dbname(to);
|
||||
|
@ -4159,7 +4208,7 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
|
|||
m_active_trans(NULL),
|
||||
m_active_cursor(NULL),
|
||||
m_table(NULL),
|
||||
m_tableVersion(-1),
|
||||
m_table_version(-1),
|
||||
m_table_info(NULL),
|
||||
m_table_flags(HA_REC_NOT_IN_SEQ |
|
||||
HA_NULL_IN_KEY |
|
||||
|
@ -4409,7 +4458,6 @@ int ndbcluster_discover(THD* thd, const char *db, const char *name,
|
|||
DBUG_RETURN(1);
|
||||
ERR_RETURN(err);
|
||||
}
|
||||
|
||||
DBUG_PRINT("info", ("Found table %s", tab->getName()));
|
||||
|
||||
len= tab->getFrmLength();
|
||||
|
|
|
@ -558,7 +558,7 @@ private:
|
|||
void print_results();
|
||||
|
||||
ulonglong get_auto_increment();
|
||||
void invalidateDictionaryCache();
|
||||
void invalidate_dictionary_cache(bool global);
|
||||
int ndb_err(NdbTransaction*);
|
||||
bool uses_blob_value(bool all_fields);
|
||||
|
||||
|
@ -596,7 +596,7 @@ private:
|
|||
NdbTransaction *m_active_trans;
|
||||
NdbScanOperation *m_active_cursor;
|
||||
void *m_table;
|
||||
int m_tableVersion;
|
||||
int m_table_version;
|
||||
void *m_table_info;
|
||||
char m_dbname[FN_HEADLEN];
|
||||
//char m_schemaname[FN_HEADLEN];
|
||||
|
|
Loading…
Reference in a new issue