mirror of
https://github.com/MariaDB/server.git
synced 2025-01-31 11:01:52 +01:00
Merge bk-internal:/home/bk/mysql-4.1
into mysql.com:/home/jimw/my/mysql-4.1-clean
This commit is contained in:
commit
02e1937910
5 changed files with 36 additions and 33 deletions
|
@ -1447,7 +1447,8 @@ fil_write_flushed_lsn_to_data_files(
|
|||
cache. Note that all data files in the system tablespace 0 are
|
||||
always open. */
|
||||
|
||||
if (space->purpose == FIL_TABLESPACE) {
|
||||
if (space->purpose == FIL_TABLESPACE
|
||||
&& space->id == 0) {
|
||||
sum_of_sizes = 0;
|
||||
|
||||
node = UT_LIST_GET_FIRST(space->chain);
|
||||
|
|
|
@ -3305,12 +3305,12 @@ rec_loop:
|
|||
err = sel_set_rec_lock(rec, index,
|
||||
prebuilt->select_lock_type,
|
||||
LOCK_ORDINARY, thr);
|
||||
if (err != DB_SUCCESS) {
|
||||
|
||||
goto lock_wait_or_error;
|
||||
}
|
||||
}
|
||||
|
||||
if (err != DB_SUCCESS) {
|
||||
|
||||
goto lock_wait_or_error;
|
||||
}
|
||||
}
|
||||
/* A page supremum record cannot be in the result set: skip
|
||||
it now that we have placed a possible lock on it */
|
||||
|
@ -3413,12 +3413,12 @@ rec_loop:
|
|||
err = sel_set_rec_lock(rec, index,
|
||||
prebuilt->select_lock_type,
|
||||
LOCK_GAP, thr);
|
||||
if (err != DB_SUCCESS) {
|
||||
|
||||
goto lock_wait_or_error;
|
||||
}
|
||||
}
|
||||
|
||||
if (err != DB_SUCCESS) {
|
||||
|
||||
goto lock_wait_or_error;
|
||||
}
|
||||
}
|
||||
|
||||
btr_pcur_store_position(pcur, &mtr);
|
||||
|
@ -3446,12 +3446,12 @@ rec_loop:
|
|||
err = sel_set_rec_lock(rec, index,
|
||||
prebuilt->select_lock_type,
|
||||
LOCK_GAP, thr);
|
||||
if (err != DB_SUCCESS) {
|
||||
|
||||
goto lock_wait_or_error;
|
||||
}
|
||||
}
|
||||
|
||||
if (err != DB_SUCCESS) {
|
||||
|
||||
goto lock_wait_or_error;
|
||||
}
|
||||
}
|
||||
|
||||
btr_pcur_store_position(pcur, &mtr);
|
||||
|
|
|
@ -727,7 +727,7 @@ Ndb::getAutoIncrementValue(const char* aTableName, Uint32 cacheSize)
|
|||
Ndb_local_table_info *info=
|
||||
theDictionary->get_local_table_info(internalTableName, false);
|
||||
if (info == 0)
|
||||
DBUG_RETURN(~0);
|
||||
DBUG_RETURN(~(Uint64)0);
|
||||
const NdbTableImpl *table= info->m_table_impl;
|
||||
Uint64 tupleId = getTupleIdFromNdb(table->m_tableId, cacheSize);
|
||||
DBUG_PRINT("info", ("value %u", tupleId));
|
||||
|
@ -739,7 +739,7 @@ Ndb::getAutoIncrementValue(const NdbDictionary::Table * aTable, Uint32 cacheSize
|
|||
{
|
||||
DBUG_ENTER("getAutoIncrementValue");
|
||||
if (aTable == 0)
|
||||
DBUG_RETURN(~0);
|
||||
DBUG_RETURN(~(Uint64)0);
|
||||
const NdbTableImpl* table = & NdbTableImpl::getImpl(*aTable);
|
||||
Uint64 tupleId = getTupleIdFromNdb(table->m_tableId, cacheSize);
|
||||
DBUG_PRINT("info", ("value %u", tupleId));
|
||||
|
@ -751,7 +751,7 @@ Ndb::getTupleIdFromNdb(const char* aTableName, Uint32 cacheSize)
|
|||
{
|
||||
const NdbTableImpl* table = theDictionary->getTable(aTableName);
|
||||
if (table == 0)
|
||||
return ~0;
|
||||
return ~(Uint64)0;
|
||||
return getTupleIdFromNdb(table->m_tableId, cacheSize);
|
||||
}
|
||||
|
||||
|
@ -780,7 +780,7 @@ Ndb::readAutoIncrementValue(const char* aTableName)
|
|||
const NdbTableImpl* table = theDictionary->getTable(aTableName);
|
||||
if (table == 0) {
|
||||
theError= theDictionary->getNdbError();
|
||||
DBUG_RETURN(~0);
|
||||
DBUG_RETURN(~(Uint64)0);
|
||||
}
|
||||
Uint64 tupleId = readTupleIdFromNdb(table->m_tableId);
|
||||
DBUG_PRINT("info", ("value %u", tupleId));
|
||||
|
@ -792,7 +792,7 @@ Ndb::readAutoIncrementValue(const NdbDictionary::Table * aTable)
|
|||
{
|
||||
DBUG_ENTER("readtAutoIncrementValue");
|
||||
if (aTable == 0)
|
||||
DBUG_RETURN(~0);
|
||||
DBUG_RETURN(~(Uint64)0);
|
||||
const NdbTableImpl* table = & NdbTableImpl::getImpl(*aTable);
|
||||
Uint64 tupleId = readTupleIdFromNdb(table->m_tableId);
|
||||
DBUG_PRINT("info", ("value %u", tupleId));
|
||||
|
@ -829,7 +829,7 @@ Ndb::setAutoIncrementValue(const NdbDictionary::Table * aTable, Uint64 val, bool
|
|||
{
|
||||
DEBUG_TRACE("setAutoIncrementValue " << val);
|
||||
if (aTable == 0)
|
||||
return ~0;
|
||||
return ~(Uint64)0;
|
||||
const NdbTableImpl* table = & NdbTableImpl::getImpl(*aTable);
|
||||
return setTupleIdInNdb(table->m_tableId, val, increase);
|
||||
}
|
||||
|
@ -979,7 +979,7 @@ Ndb::opTupleIdOnNdb(Uint32 aTableId, Uint64 opValue, Uint32 op)
|
|||
setDatabaseName(currentDb.c_str());
|
||||
setDatabaseSchemaName(currentSchema.c_str());
|
||||
|
||||
return ~0;
|
||||
return ~(Uint64)0;
|
||||
}
|
||||
|
||||
Uint32
|
||||
|
|
|
@ -1880,7 +1880,7 @@ int ha_ndbcluster::write_row(byte *record)
|
|||
m_rows_inserted++;
|
||||
no_uncommitted_rows_update(1);
|
||||
m_bulk_insert_not_flushed= TRUE;
|
||||
if ((m_rows_to_insert == 1) ||
|
||||
if ((m_rows_to_insert == (ha_rows) 1) ||
|
||||
((m_rows_inserted % m_bulk_insert_rows) == 0) ||
|
||||
set_blob_value)
|
||||
{
|
||||
|
@ -2945,8 +2945,8 @@ void ha_ndbcluster::start_bulk_insert(ha_rows rows)
|
|||
DBUG_ENTER("start_bulk_insert");
|
||||
DBUG_PRINT("enter", ("rows: %d", (int)rows));
|
||||
|
||||
m_rows_inserted= 0;
|
||||
if (rows == 0)
|
||||
m_rows_inserted= (ha_rows) 0;
|
||||
if (rows == (ha_rows) 0)
|
||||
/* We don't know how many will be inserted, guess */
|
||||
m_rows_to_insert= m_autoincrement_prefetch;
|
||||
else
|
||||
|
@ -2984,7 +2984,7 @@ int ha_ndbcluster::end_bulk_insert()
|
|||
// Send rows to NDB
|
||||
DBUG_PRINT("info", ("Sending inserts to NDB, "\
|
||||
"rows_inserted:%d, bulk_insert_rows: %d",
|
||||
m_rows_inserted, m_bulk_insert_rows));
|
||||
(int) m_rows_inserted, (int) m_bulk_insert_rows));
|
||||
m_bulk_insert_not_flushed= FALSE;
|
||||
if (execute_no_commit(this,trans) != 0) {
|
||||
no_uncommitted_rows_execute_failure();
|
||||
|
@ -2992,8 +2992,8 @@ int ha_ndbcluster::end_bulk_insert()
|
|||
}
|
||||
}
|
||||
|
||||
m_rows_inserted= 0;
|
||||
m_rows_to_insert= 1;
|
||||
m_rows_inserted= (ha_rows) 0;
|
||||
m_rows_to_insert= (ha_rows) 1;
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
|
||||
|
@ -3182,7 +3182,8 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
|
|||
// store thread specific data first to set the right context
|
||||
m_force_send= thd->variables.ndb_force_send;
|
||||
m_ha_not_exact_count= !thd->variables.ndb_use_exact_count;
|
||||
m_autoincrement_prefetch= thd->variables.ndb_autoincrement_prefetch_sz;
|
||||
m_autoincrement_prefetch=
|
||||
(ha_rows) thd->variables.ndb_autoincrement_prefetch_sz;
|
||||
if (!thd->transaction.on)
|
||||
m_transaction_on= FALSE;
|
||||
else
|
||||
|
@ -3596,7 +3597,7 @@ static int create_ndb_column(NDBCOL &col,
|
|||
|
||||
static void ndb_set_fragmentation(NDBTAB &tab, TABLE *form, uint pk_length)
|
||||
{
|
||||
if (form->max_rows == 0) /* default setting, don't set fragmentation */
|
||||
if (form->max_rows == (ha_rows) 0) /* default setting, don't set fragmentation */
|
||||
return;
|
||||
/**
|
||||
* get the number of fragments right
|
||||
|
@ -3964,6 +3965,7 @@ longlong ha_ndbcluster::get_auto_increment()
|
|||
/* We guessed too low */
|
||||
m_rows_to_insert+= m_autoincrement_prefetch;
|
||||
int cache_size=
|
||||
(int)
|
||||
(m_rows_to_insert - m_rows_inserted < m_autoincrement_prefetch) ?
|
||||
m_rows_to_insert - m_rows_inserted
|
||||
: (m_rows_to_insert > m_autoincrement_prefetch) ?
|
||||
|
@ -3998,9 +4000,9 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
|
|||
m_primary_key_update(FALSE),
|
||||
m_retrieve_all_fields(FALSE),
|
||||
m_retrieve_primary_key(FALSE),
|
||||
m_rows_to_insert(1),
|
||||
m_rows_inserted(0),
|
||||
m_bulk_insert_rows(1024),
|
||||
m_rows_to_insert((ha_rows) 1),
|
||||
m_rows_inserted((ha_rows) 0),
|
||||
m_bulk_insert_rows((ha_rows) 1024),
|
||||
m_bulk_insert_not_flushed(FALSE),
|
||||
m_ops_pending(0),
|
||||
m_skip_auto_increment(TRUE),
|
||||
|
@ -4010,7 +4012,7 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
|
|||
m_dupkey((uint) -1),
|
||||
m_ha_not_exact_count(FALSE),
|
||||
m_force_send(TRUE),
|
||||
m_autoincrement_prefetch(32),
|
||||
m_autoincrement_prefetch((ha_rows) 32),
|
||||
m_transaction_on(TRUE),
|
||||
m_use_local_query_cache(FALSE)
|
||||
{
|
||||
|
|
|
@ -4551,7 +4551,7 @@ Disable with --skip-ndbcluster (will save memory).",
|
|||
"Specify number of autoincrement values that are prefetched.",
|
||||
(gptr*) &global_system_variables.ndb_autoincrement_prefetch_sz,
|
||||
(gptr*) &global_system_variables.ndb_autoincrement_prefetch_sz,
|
||||
0, GET_INT, REQUIRED_ARG, 32, 1, 256, 0, 0, 0},
|
||||
0, GET_ULONG, REQUIRED_ARG, 32, 1, 256, 0, 0, 0},
|
||||
{"ndb-force-send", OPT_NDB_FORCE_SEND,
|
||||
"Force send of buffers to ndb immediately without waiting for "
|
||||
"other threads.",
|
||||
|
|
Loading…
Add table
Reference in a new issue