mirror of
https://github.com/MariaDB/server.git
synced 2025-01-19 05:22:25 +01:00
Merge bk-internal.mysql.com:/home/bk/mysql-5.0
into neptunus.(none):/home/msvensson/mysql/mysql-5.0
This commit is contained in:
commit
f9dd856d9e
5 changed files with 87 additions and 16 deletions
|
@ -6,7 +6,7 @@ AC_PREREQ(2.50)dnl Minimum Autoconf version required.
|
|||
AC_INIT(sql/mysqld.cc)
|
||||
AC_CANONICAL_SYSTEM
|
||||
# Don't forget to also update the NDB lines below.
|
||||
AM_INIT_AUTOMAKE(mysql, 5.0.4-beta)
|
||||
AM_INIT_AUTOMAKE(mysql, 5.0.5-beta)
|
||||
AM_CONFIG_HEADER(config.h)
|
||||
|
||||
PROTOCOL_VERSION=10
|
||||
|
|
|
@ -607,3 +607,33 @@ primary key (a))
|
|||
engine=ndb
|
||||
max_rows=1;
|
||||
drop table t1;
|
||||
create table t1
|
||||
(counter int(64) NOT NULL auto_increment,
|
||||
datavalue char(40) default 'XXXX',
|
||||
primary key (counter)
|
||||
) ENGINE=ndbcluster;
|
||||
insert into t1 (datavalue) values ('newval');
|
||||
insert into t1 (datavalue) values ('newval');
|
||||
select * from t1 order by counter;
|
||||
counter datavalue
|
||||
1 newval
|
||||
2 newval
|
||||
insert into t1 (datavalue) select datavalue from t1 where counter < 100;
|
||||
select * from t1 order by counter;
|
||||
counter datavalue
|
||||
1 newval
|
||||
2 newval
|
||||
3 newval
|
||||
4 newval
|
||||
insert into t1 (datavalue) select datavalue from t1 where counter < 100;
|
||||
select * from t1 order by counter;
|
||||
counter datavalue
|
||||
1 newval
|
||||
2 newval
|
||||
3 newval
|
||||
4 newval
|
||||
35 newval
|
||||
36 newval
|
||||
37 newval
|
||||
38 newval
|
||||
drop table t1;
|
||||
|
|
|
@ -577,3 +577,28 @@ create table t1
|
|||
engine=ndb
|
||||
max_rows=1;
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# Test auto_increment
|
||||
#
|
||||
|
||||
connect (con1,localhost,,,test);
|
||||
connect (con2,localhost,,,test);
|
||||
|
||||
create table t1
|
||||
(counter int(64) NOT NULL auto_increment,
|
||||
datavalue char(40) default 'XXXX',
|
||||
primary key (counter)
|
||||
) ENGINE=ndbcluster;
|
||||
|
||||
connection con1;
|
||||
insert into t1 (datavalue) values ('newval');
|
||||
insert into t1 (datavalue) values ('newval');
|
||||
select * from t1 order by counter;
|
||||
insert into t1 (datavalue) select datavalue from t1 where counter < 100;
|
||||
select * from t1 order by counter;
|
||||
connection con2;
|
||||
insert into t1 (datavalue) select datavalue from t1 where counter < 100;
|
||||
select * from t1 order by counter;
|
||||
|
||||
drop table t1;
|
||||
|
|
|
@ -756,26 +756,28 @@ Remark: Returns a new TupleId to the application.
|
|||
Uint64
|
||||
Ndb::getAutoIncrementValue(const char* aTableName, Uint32 cacheSize)
|
||||
{
|
||||
DEBUG_TRACE("getAutoIncrementValue");
|
||||
DBUG_ENTER("getAutoIncrementValue");
|
||||
const char * internalTableName = internalizeTableName(aTableName);
|
||||
Ndb_local_table_info *info=
|
||||
theDictionary->get_local_table_info(internalTableName, false);
|
||||
if (info == 0)
|
||||
return ~0;
|
||||
DBUG_RETURN(~0);
|
||||
const NdbTableImpl *table= info->m_table_impl;
|
||||
Uint64 tupleId = getTupleIdFromNdb(table->m_tableId, cacheSize);
|
||||
return tupleId;
|
||||
DBUG_PRINT("info", ("value %u", tupleId));
|
||||
DBUG_RETURN(tupleId);
|
||||
}
|
||||
|
||||
Uint64
|
||||
Ndb::getAutoIncrementValue(const NdbDictionary::Table * aTable, Uint32 cacheSize)
|
||||
{
|
||||
DEBUG_TRACE("getAutoIncrementValue");
|
||||
DBUG_ENTER("getAutoIncrementValue");
|
||||
if (aTable == 0)
|
||||
return ~0;
|
||||
DBUG_RETURN(~0);
|
||||
const NdbTableImpl* table = & NdbTableImpl::getImpl(*aTable);
|
||||
Uint64 tupleId = getTupleIdFromNdb(table->m_tableId, cacheSize);
|
||||
return tupleId;
|
||||
DBUG_PRINT("info", ("value %u", tupleId));
|
||||
DBUG_RETURN(tupleId);
|
||||
}
|
||||
|
||||
Uint64
|
||||
|
@ -790,39 +792,45 @@ Ndb::getTupleIdFromNdb(const char* aTableName, Uint32 cacheSize)
|
|||
Uint64
|
||||
Ndb::getTupleIdFromNdb(Uint32 aTableId, Uint32 cacheSize)
|
||||
{
|
||||
DBUG_ENTER("getTupleIdFromNdb");
|
||||
if ( theFirstTupleId[aTableId] != theLastTupleId[aTableId] )
|
||||
{
|
||||
theFirstTupleId[aTableId]++;
|
||||
return theFirstTupleId[aTableId];
|
||||
DBUG_PRINT("info", ("next cached value %u", theFirstTupleId[aTableId]));
|
||||
DBUG_RETURN(theFirstTupleId[aTableId]);
|
||||
}
|
||||
else // theFirstTupleId == theLastTupleId
|
||||
{
|
||||
return opTupleIdOnNdb(aTableId, cacheSize, 0);
|
||||
DBUG_PRINT("info",("reading %u values from database",
|
||||
(cacheSize == 0) ? 1 : cacheSize));
|
||||
DBUG_RETURN(opTupleIdOnNdb(aTableId, (cacheSize == 0) ? 1 : cacheSize, 0));
|
||||
}
|
||||
}
|
||||
|
||||
Uint64
|
||||
Ndb::readAutoIncrementValue(const char* aTableName)
|
||||
{
|
||||
DEBUG_TRACE("readtAutoIncrementValue");
|
||||
DBUG_ENTER("readtAutoIncrementValue");
|
||||
const NdbTableImpl* table = theDictionary->getTable(aTableName);
|
||||
if (table == 0) {
|
||||
theError= theDictionary->getNdbError();
|
||||
return ~0;
|
||||
DBUG_RETURN(~0);
|
||||
}
|
||||
Uint64 tupleId = readTupleIdFromNdb(table->m_tableId);
|
||||
return tupleId;
|
||||
DBUG_PRINT("info", ("value %u", tupleId));
|
||||
DBUG_RETURN(tupleId);
|
||||
}
|
||||
|
||||
Uint64
|
||||
Ndb::readAutoIncrementValue(const NdbDictionary::Table * aTable)
|
||||
{
|
||||
DEBUG_TRACE("readtAutoIncrementValue");
|
||||
DBUG_ENTER("readtAutoIncrementValue");
|
||||
if (aTable == 0)
|
||||
return ~0;
|
||||
DBUG_RETURN(~0);
|
||||
const NdbTableImpl* table = & NdbTableImpl::getImpl(*aTable);
|
||||
Uint64 tupleId = readTupleIdFromNdb(table->m_tableId);
|
||||
return tupleId;
|
||||
DBUG_PRINT("info", ("value %u", tupleId));
|
||||
DBUG_RETURN(tupleId);
|
||||
}
|
||||
|
||||
Uint64
|
||||
|
|
|
@ -2989,6 +2989,10 @@ void ha_ndbcluster::start_bulk_insert(ha_rows rows)
|
|||
DBUG_PRINT("enter", ("rows: %d", (int)rows));
|
||||
|
||||
m_rows_inserted= 0;
|
||||
if (rows == 0)
|
||||
/* We don't know how many will be inserted, guess */
|
||||
m_rows_to_insert= m_autoincrement_prefetch;
|
||||
else
|
||||
m_rows_to_insert= rows;
|
||||
|
||||
/*
|
||||
|
@ -4099,6 +4103,10 @@ ulonglong ha_ndbcluster::get_auto_increment()
|
|||
DBUG_ENTER("get_auto_increment");
|
||||
DBUG_PRINT("enter", ("m_tabname: %s", m_tabname));
|
||||
Ndb *ndb= get_ndb();
|
||||
|
||||
if (m_rows_inserted > m_rows_to_insert)
|
||||
/* We guessed too low */
|
||||
m_rows_to_insert+= m_autoincrement_prefetch;
|
||||
cache_size=
|
||||
(m_rows_to_insert - m_rows_inserted < m_autoincrement_prefetch) ?
|
||||
m_rows_to_insert - m_rows_inserted
|
||||
|
|
Loading…
Reference in a new issue