diff --git a/ndb/include/ndbapi/AttrType.hpp b/ndb/include/ndbapi/AttrType.hpp
deleted file mode 100644
index e6e00c77130..00000000000
--- a/ndb/include/ndbapi/AttrType.hpp
+++ /dev/null
@@ -1,329 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-/**
- * @file AttrType.hpp
- */
-
-#ifndef AttrType_H
-#define AttrType_H
-
-/**
- * Max number of Ndb objects in different threads.
- * (Ndb objects should not be shared by different threads.)
- */
-const unsigned MAX_NO_THREADS = 4711;
-
-/**
- * Max number of attributes in a table.
- */
-const unsigned MAXNROFATTRIBUTES = 128;
-
-/**
- * Max number of tuple keys for a table in NDB Cluster.
- *
- * A tuple key of a table is an attribute
- * which is either part of the
- * primary key or the tuple id of a table.
- */
-const unsigned MAXNROFTUPLEKEY = 16;
-
-/**
- * Max number of words in a tuple key attribute.
- *
- * Tuple keys can not have values larger than
- * 4092 bytes (i.e. 1023 words).
- */
-const unsigned MAXTUPLEKEYLENOFATTERIBUTEINWORD = 1023;
-
-/**
- * Max number of ErrorCode in NDB Cluster range 0 - 1999.
- */
-const unsigned MAXNDBCLUSTERERROR = 1999;
-
-/**
- * Max number of theErrorCode NDB API range 4000 - 4999.
- */
-const unsigned MAXNROFERRORCODE = 5000;
-
-/**
- * Missing explanation
- */
-enum ReturnType {
- ReturnSuccess, ///< Missing explanation
- ReturnFailure ///< Missing explanation
-};
-
-/**
- *
- */
-enum SendStatusType {
- NotInit, ///< Missing explanation
- InitState, ///< Missing explanation
- sendOperations, ///< Missing explanation
- sendCompleted, ///< Missing explanation
- sendCOMMITstate, ///< Missing explanation
- sendABORT, ///< Missing explanation
- sendABORTfail, ///< Missing explanation
- sendTC_ROLLBACK, ///< Missing explanation
- sendTC_COMMIT, ///< Missing explanation
- sendTC_OP ///< Missing explanation
-};
-
-/**
- * Missing explanation
- */
-enum ListState {
- NotInList, ///< Missing explanation
- InPreparedList, ///< Missing explanation
- InSendList, ///< Missing explanation
- InCompletedList ///< Missing explanation
-};
-
-/**
- * Commit status of the transaction
- */
-enum CommitStatusType {
- NotStarted, ///< Transaction not yet started
- Started, ///< Missing explanation
- Committed, ///< Transaction has been committed
- Aborted, ///< Transaction has been aborted
- NeedAbort ///< Missing explanation
-};
-
-/**
- * Commit type of transaction
- */
-enum AbortOption {
-#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
- CommitIfFailFree = 0,
- CommitAsMuchAsPossible = 2, ///< Commit transaction with as many
- TryCommit = 0, ///< Missing explanation
-#endif
- AbortOnError = 0, ///< Abort transaction on failed operation
- IgnoreError = 2 ///< Transaction continues on failed operation
-};
-
-typedef AbortOption CommitType;
-
-/**
- * Missing explanation
- */
-enum InitType {
- NotConstructed, ///< Missing explanation
- NotInitialised, ///< Missing explanation
- StartingInit, ///< Missing explanation
- Initialised, ///< Missing explanation
- InitConfigError ///< Missing explanation
-};
-
-/**
- * Type of attribute
- */
-enum AttrType {
- Signed, ///< Attributes of this type can be read with:
- ///< NdbRecAttr::int64_value,
- ///< NdbRecAttr::int32_value,
- ///< NdbRecAttr::short_value,
- ///< NdbRecAttr::char_value
- UnSigned, ///< Attributes of this type can be read with:
- ///< NdbRecAttr::u_64_value,
- ///< NdbRecAttr::u_32_value,
- ///< NdbRecAttr::u_short_value,
- ///< NdbRecAttr::u_char_value
- Float, ///< Attributes of this type can be read with:
- ///< NdbRecAttr::float_value and
- ///< NdbRecAttr::double_value
- String, ///< Attributes of this type can be read with:
- ///< NdbRecAttr::aRef,
- ///< NdbRecAttr::getAttributeObject
- NoAttrTypeDef ///< Used for debugging only
-};
-
-/**
- * Execution type of transaction
- */
-enum ExecType {
- NoExecTypeDef = -1, ///< Erroneous type (Used for debugging only)
- Prepare, ///< Missing explanation
- NoCommit, ///< Execute the transaction as far as it has
- ///< been defined, but do not yet commit it
- Commit, ///< Execute and try to commit the transaction
- Rollback ///< Rollback transaction
-};
-
-/**
- * Indicates whether the attribute is part of a primary key or not
- */
-enum KeyType {
- Undefined = -1, ///< Used for debugging only
- NoKey, ///< Attribute is not part of primary key
- ///< or tuple identity
- TupleKey, ///< Attribute is part of primary key
- TupleId ///< Attribute is part of tuple identity
- ///< (This type of attribute is created
- ///< internally, and should not be
- ///< manually created.)
-};
-
-/**
- * Indicate whether the attribute should be stored on disk or not
- */
-enum StorageMode {
- MMBased = 0, ///< Main memory
- DiskBased = 1, ///< Disk (Not yet supported.)
- NoStorageTypeDef ///< Used for debugging only
-};
-
-/**
- * Where attribute is stored.
- *
- * This is used to indicate whether a primary key
- * should only be stored in the index storage and not in the data storage
- * or if it should be stored in both places.
- * The first alternative makes the attribute take less space,
- * but makes it impossible to scan using attribute.
- *
- * @note Use NormalStorageAttribute for most cases.
- * (IndexStorageAttribute should only be used on primary key
- * attributes and only if you do not want to scan using the attribute.)
- */
-enum StorageAttributeType {
- NoStorageAttributeTypeDefined = -1, ///< Missing explanation
- IndexStorageAttribute, ///< Attribute is only stored in
- ///< index storage (ACC)
- NormalStorageAttribute ///< Attribute values are stored
- ///< both in the index (ACC) and
- ///< in the data storage (TUP)
-};
-
-/**
- * Missing explanation
- */
-enum OperationStatus{
- Init, ///< Missing explanation
- OperationDefined, ///< Missing explanation
- TupleKeyDefined, ///< Missing explanation
- GetValue, ///< Missing explanation
- SetValue, ///< Missing explanation
- ExecInterpretedValue, ///< Missing explanation
- SetValueInterpreted, ///< Missing explanation
- FinalGetValue, ///< Missing explanation
- SubroutineExec, ///< Missing explanation
- SubroutineEnd, ///< Missing explanation
- SetBound, ///< Setting bounds in range scan
- WaitResponse, ///< Missing explanation
- WaitCommitResponse, ///< Missing explanation
- Finished, ///< Missing explanation
- ReceiveFinished ///< Missing explanation
-};
-
-/**
- * Type of operation
- */
-enum OperationType {
- ReadRequest = 0, ///< Read operation
- UpdateRequest = 1, ///< Update Operation
- InsertRequest = 2, ///< Insert Operation
- DeleteRequest = 3, ///< Delete Operation
- WriteRequest = 4, ///< Write Operation
- ReadExclusive = 5, ///< Read exclusive
- OpenScanRequest, ///< Scan Operation
- OpenRangeScanRequest, ///< Range scan operation
- NotDefined2, ///< Missing explanation
- NotDefined ///< Missing explanation
-};
-
-/**
- * Missing explanation
- */
-enum ConStatusType {
- NotConnected, ///< Missing explanation
- Connecting, ///< Missing explanation
- Connected, ///< Missing explanation
- DisConnecting, ///< Missing explanation
- ConnectFailure ///< Missing explanation
-};
-
-/**
- * Missing explanation
- */
-enum CompletionStatus {
- NotCompleted, ///< Missing explanation
- CompletedSuccess, ///< Missing explanation
- CompletedFailure, ///< Missing explanation
- DefinitionFailure ///< Missing explanation
-};
-
-/**
- * Type of fragmentation used for a table
- */
-enum FragmentType {
- Default = 0, ///< (All is default!)
- Single = 1, ///< Only one fragment
- All = 2, ///< Default value. One fragment per node group
- DistributionGroup = 3, ///< Distribution Group used for fragmentation.
- ///< One fragment per node group
- DistributionKey = 4, ///< Distribution Key used for fragmentation.
- ///< One fragment per node group.
- AllLarge = 5, ///< Sixten fragments per node group.
- DGroupLarge = 6, ///< Distribution Group used for fragmentation.
- ///< Sixten fragments per node group
- DKeyLarge = 7 ///< Distribution Key used for fragmentation.
- ///< Sixten fragments per node group
-};
-
-/**
- * Type of table or index.
- */
-enum TableType {
- UndefTableType = 0,
- SystemTable = 1, ///< Internal. Table cannot be updated by user
- UserTable = 2, ///< Normal application table
- UniqueHashIndex = 3, ///< Unique un-ordered hash index
- HashIndex = 4, ///< Non-unique un-ordered hash index
- UniqueOrderedIndex = 5, ///< Unique ordered index
- OrderedIndex = 6 ///< Non-unique ordered index
-};
-
-#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
-/**
- * Different types of tampering with the NDB Cluster.
- * Only for debugging purposes only.
- */
-enum TamperType {
- LockGlbChp = 1, ///< Lock GCP
- UnlockGlbChp, ///< Unlock GCP
- CrashNode, ///< Crash an NDB node
- ReadRestartGCI, ///< Request the restart GCI id from NDB Cluster
- InsertError ///< Execute an error in NDB Cluster
- ///< (may crash system)
-};
-#endif
-
-#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
-/**
- * @deprecated
- */
-enum NullAttributeType {
- NoNullTypeDefined = -1,
- NotNullAttribute,
- NullAttribute,
- AttributeDefined
-};
-#endif
-
-#endif
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index 98ddf07b654..960b6c18b20 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -35,21 +35,16 @@
#define USE_DISCOVER_ON_STARTUP
//#define USE_NDB_POOL
-#define USE_EXTRA_ORDERED_INDEX
// Default value for parallelism
static const int parallelism= 240;
+// Default value for max number of transactions
+// createable against NDB from this handler
+static const int max_transactions = 256;
+
#define NDB_HIDDEN_PRIMARY_KEY_LENGTH 8
-/*
- All error messages returned from ha_ndbcluster that are
- not mapped to the corresponding handler(HA_ERR_*) error code
- have NDB_ERR_CODE_OFFSET added to it so that it does not clash with
- the handler error codes. The error number is then "restored"
- to the original error number when get_error_message is called.
-*/
-#define NDB_ERR_CODE_OFFSET 30000
#define ERR_PRINT(err) \
DBUG_PRINT("error", ("Error: %d message: %s", err.code, err.message))
@@ -68,10 +63,6 @@ typedef NdbDictionary::Dictionary NDBDICT;
bool ndbcluster_inited= false;
-#ifdef USE_EXTRA_ORDERED_INDEX
-static const char* unique_suffix= "$unique";
-#endif
-
static Ndb* g_ndb= NULL;
// Handler synchronization
@@ -131,7 +122,7 @@ static int ndb_to_mysql_error(const NdbError *err)
for (i=0 ; err_map[i].ndb_err != err->code ; i++)
{
if (err_map[i].my_err == -1)
- return err->code+NDB_ERR_CODE_OFFSET;
+ return err->code;
}
return err_map[i].my_err;
}
@@ -173,24 +164,20 @@ int ha_ndbcluster::ndb_err(NdbConnection *trans)
error message of NDB
*/
-const char* ha_ndbcluster::get_error_message(int *org_error,
- bool *temporary)
+bool ha_ndbcluster::get_error_message(int error,
+ String *buf)
{
DBUG_ENTER("ha_ndbcluster::get_error_message");
- DBUG_PRINT("enter", ("error: %d", *org_error));
+ DBUG_PRINT("enter", ("error: %d", error));
- int error= *org_error;
- if (error < NDB_ERR_CODE_OFFSET)
- DBUG_RETURN(NULL);
+ if (!m_ndb)
+ DBUG_RETURN(false);
- error-= NDB_ERR_CODE_OFFSET;
- DBUG_ASSERT(m_ndb); // What should be done if not m_ndb is available?
const NdbError err= m_ndb->getNdbError(error);
- *temporary= (err.status==NdbError::TemporaryError);
-
- *org_error= error;
- DBUG_PRINT("exit", ("error: %d, msg: %s", error, err.message));
- DBUG_RETURN(err.message);
+ bool temporary= err.status==NdbError::TemporaryError;
+ buf->set(err.message, strlen(err.message), &my_charset_bin);
+ DBUG_PRINT("exit", ("message: %s, temporary: %d", buf->ptr(), temporary));
+ DBUG_RETURN(temporary);
}
@@ -348,7 +335,7 @@ int ha_ndbcluster::get_metadata(const char *path)
const NDBTAB *tab;
const void *data, *pack_data;
const char **key_name;
- uint ndb_columns, mysql_columns, length, pack_length, i;
+ uint ndb_columns, mysql_columns, length, pack_length;
int error;
DBUG_ENTER("get_metadata");
DBUG_PRINT("enter", ("m_tabname: %s, path: %s", m_tabname, path));
@@ -404,24 +391,28 @@ int ha_ndbcluster::get_metadata(const char *path)
// All checks OK, lets use the table
m_table= (void*)tab;
- for (i= 0; i < MAX_KEY; i++)
- {
- m_indextype[i]= UNDEFINED_INDEX;
- m_unique_index_name[i]= NULL;
- }
+ DBUG_RETURN(build_index_list());
+}
+int ha_ndbcluster::build_index_list()
+{
+ char *name;
+ const char *index_name;
+ static const char* unique_suffix= "$unique";
+ uint i, name_len;
+ DBUG_ENTER("build_index_list");
+
// Save information about all known indexes
- for (i= 0; i < table->keys; i++)
+ for (uint i= 0; i < table->keys; i++)
{
- m_indextype[i]= get_index_type_from_table(i);
-
-#ifdef USE_EXTRA_ORDERED_INDEX
- if (m_indextype[i] == UNIQUE_INDEX)
+ NDB_INDEX_TYPE idx_type= get_index_type_from_table(i);
+ m_indextype[i]= idx_type;
+
+ if (idx_type == UNIQUE_ORDERED_INDEX || idx_type == UNIQUE_INDEX)
{
- char *name;
- const char *index_name= get_index_name(i);
- int name_len= strlen(index_name)+strlen(unique_suffix)+1;
-
+ index_name= get_index_name(i);
+ name_len= strlen(index_name)+strlen(unique_suffix)+1;
+ // Create name for unique index by appending "$unique";
if (!(name= my_malloc(name_len, MYF(MY_WME))))
DBUG_RETURN(2);
strxnmov(name, name_len, index_name, unique_suffix, NullS);
@@ -429,40 +420,42 @@ int ha_ndbcluster::get_metadata(const char *path)
DBUG_PRINT("info", ("Created unique index name: %s for index %d",
name, i));
}
-#endif
}
-
- DBUG_RETURN(0);
+ DBUG_RETURN(0);
}
+
/*
Decode the type of an index from information
provided in table object
*/
-NDB_INDEX_TYPE ha_ndbcluster::get_index_type_from_table(uint index_no) const
+NDB_INDEX_TYPE ha_ndbcluster::get_index_type_from_table(uint inx) const
{
- if (index_no == table->primary_key)
- return PRIMARY_KEY_INDEX;
+ bool is_hash_index= (table->key_info[inx].algorithm == HA_KEY_ALG_HASH);
+ if (inx == table->primary_key)
+ return is_hash_index ? PRIMARY_KEY_INDEX : PRIMARY_KEY_ORDERED_INDEX;
else
- return ((table->key_info[index_no].flags & HA_NOSAME) ?
- UNIQUE_INDEX :
+ return ((table->key_info[inx].flags & HA_NOSAME) ?
+ (is_hash_index ? UNIQUE_INDEX : UNIQUE_ORDERED_INDEX) :
ORDERED_INDEX);
}
-
+
void ha_ndbcluster::release_metadata()
{
- int i;
+ uint i;
DBUG_ENTER("release_metadata");
DBUG_PRINT("enter", ("m_tabname: %s", m_tabname));
m_table= NULL;
+ // Release index list
for (i= 0; i < MAX_KEY; i++)
{
- my_free((char*)m_unique_index_name[i], MYF(MY_ALLOW_ZERO_PTR));
+ if (m_unique_index_name[i])
+ my_free((char*)m_unique_index_name[i], MYF(0));
m_unique_index_name[i]= NULL;
}
@@ -481,6 +474,9 @@ static const ulong index_type_flags[]=
0,
/* PRIMARY_KEY_INDEX */
+ HA_NOT_READ_PREFIX_LAST,
+
+ /* PRIMARY_KEY_ORDERED_INDEX */
/*
Enable HA_KEY_READ_ONLY when "sorted" indexes are supported,
thus ORDERD BY clauses can be optimized by reading directly
@@ -491,6 +487,9 @@ static const ulong index_type_flags[]=
/* UNIQUE_INDEX */
HA_NOT_READ_PREFIX_LAST,
+ /* UNIQUE_ORDERED_INDEX */
+ HA_NOT_READ_PREFIX_LAST,
+
/* ORDERED_INDEX */
HA_READ_NEXT |
HA_READ_PREV |
@@ -506,15 +505,8 @@ inline const char* ha_ndbcluster::get_index_name(uint idx_no) const
inline const char* ha_ndbcluster::get_unique_index_name(uint idx_no) const
{
-#ifdef USE_EXTRA_ORDERED_INDEX
- DBUG_ASSERT(idx_no < MAX_KEY);
- DBUG_ASSERT(m_unique_index_name[idx_no]);
return m_unique_index_name[idx_no];
-#else
- return get_index_name(idx_no);
-#endif
-
- }
+}
inline NDB_INDEX_TYPE ha_ndbcluster::get_index_type(uint idx_no) const
{
@@ -1521,7 +1513,7 @@ int ha_ndbcluster::index_read(byte *buf,
start_key.key= key;
start_key.length= key_len;
start_key.flag= find_flag;
- DBUG_RETURN(read_range_first(&start_key, NULL, true));
+ DBUG_RETURN(read_range_first(&start_key, NULL, false, true));
}
@@ -1573,18 +1565,19 @@ int ha_ndbcluster::index_last(byte *buf)
int ha_ndbcluster::read_range_first(const key_range *start_key,
const key_range *end_key,
- bool sorted)
+ bool eq_range, bool sorted)
{
KEY* key_info;
int error= 1;
byte* buf= table->record[0];
DBUG_ENTER("ha_ndbcluster::read_range_first");
- DBUG_PRINT("info", ("sorted: %d", sorted));
+ DBUG_PRINT("info", ("eq_range: %d, sorted: %d", eq_range, sorted));
if (m_active_cursor)
close_scan();
switch (get_index_type(active_index)){
+ case PRIMARY_KEY_ORDERED_INDEX:
case PRIMARY_KEY_INDEX:
key_info= table->key_info + active_index;
if (start_key &&
@@ -1595,6 +1588,7 @@ int ha_ndbcluster::read_range_first(const key_range *start_key,
DBUG_RETURN(error == HA_ERR_KEY_NOT_FOUND ? HA_ERR_END_OF_FILE : error);
}
break;
+ case UNIQUE_ORDERED_INDEX:
case UNIQUE_INDEX:
key_info= table->key_info + active_index;
if (start_key &&
@@ -1618,7 +1612,7 @@ int ha_ndbcluster::read_range_first(const key_range *start_key,
}
-int ha_ndbcluster::read_range_next(bool eq_range)
+int ha_ndbcluster::read_range_next()
{
DBUG_ENTER("ha_ndbcluster::read_range_next");
DBUG_RETURN(next_result(table->record[0]));
@@ -2042,6 +2036,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
if (lock_type != F_UNLCK)
{
+ DBUG_PRINT("info", ("lock_type != F_UNLCK"));
if (!thd->transaction.ndb_lock_count++)
{
PRINT_OPTION_FLAGS(thd);
@@ -2114,6 +2109,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
}
else
{
+ DBUG_PRINT("info", ("lock_type == F_UNLCK"));
if (!--thd->transaction.ndb_lock_count)
{
DBUG_PRINT("trans", ("Last external_lock"));
@@ -2390,15 +2386,8 @@ int ha_ndbcluster::create(const char *name,
DBUG_PRINT("info", ("Table %s/%s created successfully",
m_dbname, m_tabname));
- // Fetch table from NDB, check that it exists
- const NDBTAB *tab2= dict->getTable(m_tabname);
- if (tab2 == NULL)
- {
- const NdbError err= dict->getNdbError();
- ERR_PRINT(err);
- my_errno= ndb_to_mysql_error(&err);
+ if ((my_errno= build_index_list()))
DBUG_RETURN(my_errno);
- }
// Create secondary indexes
KEY* key_info= form->key_info;
@@ -2407,18 +2396,30 @@ int ha_ndbcluster::create(const char *name,
{
int error= 0;
DBUG_PRINT("info", ("Index %u: %s", i, *key_name));
- if (i == form->primary_key)
- {
-#ifdef USE_EXTRA_ORDERED_INDEX
- error= create_ordered_index(*key_name, key_info);
-#endif
- }
- else if (key_info->flags & HA_NOSAME)
- error= create_unique_index(*key_name, key_info);
- else
- error= create_ordered_index(*key_name, key_info);
-
+ switch (get_index_type_from_table(i)){
+
+ case PRIMARY_KEY_INDEX:
+ // Do nothing, already created
+ break;
+ case PRIMARY_KEY_ORDERED_INDEX:
+ error= create_ordered_index(*key_name, key_info);
+ break;
+ case UNIQUE_ORDERED_INDEX:
+ if (!(error= create_ordered_index(*key_name, key_info)))
+ error= create_unique_index(get_unique_index_name(i), key_info);
+ break;
+ case UNIQUE_INDEX:
+ error= create_unique_index(get_unique_index_name(i), key_info);
+ break;
+ case ORDERED_INDEX:
+ error= create_ordered_index(*key_name, key_info);
+ break;
+ default:
+ DBUG_ASSERT(false);
+ break;
+ }
+
if (error)
{
DBUG_PRINT("error", ("Failed to create index %u", i));
@@ -2442,29 +2443,9 @@ int ha_ndbcluster::create_ordered_index(const char *name,
int ha_ndbcluster::create_unique_index(const char *name,
KEY *key_info)
{
- int error;
- const char* unique_name= name;
+
DBUG_ENTER("create_unique_index");
-
-#ifdef USE_EXTRA_ORDERED_INDEX
- char buf[FN_HEADLEN];
- strxnmov(buf, FN_HEADLEN, name, unique_suffix, NullS);
- unique_name= buf;
-#endif
-
- error= create_index(unique_name, key_info, true);
- if (error)
- DBUG_RETURN(error);
-
-#ifdef USE_EXTRA_ORDERED_INDEX
- /*
- If unique index contains more then one attribute
- an ordered index should be created to support
- partial key search
- */
- error= create_ordered_index(name, key_info);
-#endif
- DBUG_RETURN(error);
+ DBUG_RETURN(create_index(name, key_info, true));
}
@@ -2751,7 +2732,7 @@ Ndb* ha_ndbcluster::seize_ndb()
#else
ndb= new Ndb("");
#endif
- if (ndb->init(NDB_MAX_TRANSACTIONS) != 0)
+ if (ndb->init(max_transactions) != 0)
{
ERR_PRINT(ndb->getNdbError());
/*
@@ -3051,49 +3032,27 @@ ha_rows
ha_ndbcluster::records_in_range(uint inx, key_range *min_key,
key_range *max_key)
{
- ha_rows records= 10; /* Good guess when you don't know anything */
KEY *key_info= table->key_info + inx;
uint key_length= key_info->key_length;
+ NDB_INDEX_TYPE idx_type= get_index_type(inx);
DBUG_ENTER("records_in_range");
DBUG_PRINT("enter", ("inx: %u", inx));
- DBUG_DUMP("start_key", min_key->key, min_key->length);
- DBUG_DUMP("end_key", max_key->key, max_key->length);
- DBUG_PRINT("enter", ("start_search_flag: %u end_search_flag: %u",
- min_key->flag, max_key->flag));
-#ifndef USE_EXTRA_ORDERED_INDEX
- /*
- Check that start_key_len is equal to
- the length of the used index and
- prevent partial scan/read of hash indexes by returning HA_POS_ERROR
- */
- NDB_INDEX_TYPE idx_type= get_index_type(inx);
- if ((idx_type == UNIQUE_INDEX || idx_type == PRIMARY_KEY_INDEX) &&
- min_key->length < key_length)
- {
- DBUG_PRINT("warning", ("Tried to use index which required"
- "full key length: %d, HA_POS_ERROR",
- key_length));
- records= HA_POS_ERROR;
- }
-#else
- /*
- Extra ordered indexes are created primarily
- to support partial key scan/read and range scans of hash indexes.
- I.e. the ordered index are used instead of the hash indexes for
- these queries.
- */
- NDB_INDEX_TYPE idx_type= get_index_type(inx);
- if ((idx_type == UNIQUE_INDEX || idx_type == PRIMARY_KEY_INDEX) &&
- start_key_len == key_length)
- {
- // this is a "const" table which returns only one record!
- records= 1;
- }
-#endif
- DBUG_PRINT("exit", ("records: %d", records));
- DBUG_RETURN(records);
+ // Prevent partial read of hash indexes by returning HA_POS_ERROR
+ if ((idx_type == UNIQUE_INDEX || idx_type == PRIMARY_KEY_INDEX) &&
+ ((min_key && min_key->length < key_length) ||
+ (max_key && max_key->length < key_length)))
+ DBUG_RETURN(HA_POS_ERROR);
+
+ // Read from hash index with full key
+ // This is a "const" table which returns only one record!
+ if ((idx_type != ORDERED_INDEX) &&
+ ((min_key && min_key->length == key_length) ||
+ (max_key && max_key->length == key_length)))
+ DBUG_RETURN(1);
+
+ DBUG_RETURN(10); /* Good guess when you don't know anything */
}
diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h
index 7b182d81ff5..df296648272 100644
--- a/sql/ha_ndbcluster.h
+++ b/sql/ha_ndbcluster.h
@@ -37,8 +37,10 @@ class NdbResultSet; // Forward declaration
typedef enum ndb_index_type {
UNDEFINED_INDEX = 0,
PRIMARY_KEY_INDEX = 1,
- UNIQUE_INDEX = 2,
- ORDERED_INDEX = 3
+ PRIMARY_KEY_ORDERED_INDEX = 2,
+ UNIQUE_INDEX = 3,
+ UNIQUE_ORDERED_INDEX = 4,
+ ORDERED_INDEX = 5
} NDB_INDEX_TYPE;
@@ -78,10 +80,10 @@ class ha_ndbcluster: public handler
void position(const byte *record);
int read_range_first(const key_range *start_key,
const key_range *end_key,
- bool sorted);
- int read_range_next(bool eq_range);
+ bool eq_range, bool sorted);
+ int read_range_next();
- const char* get_error_message(int *error, bool *temporary);
+ bool get_error_message(int error, String *buf);
void info(uint);
int extra(enum ha_extra_function operation);
int extra_opt(enum ha_extra_function operation, ulong cache_size);
@@ -117,6 +119,8 @@ class ha_ndbcluster: public handler
const char* index_type(uint key_number) {
switch (get_index_type(key_number)) {
case ORDERED_INDEX:
+ case UNIQUE_ORDERED_INDEX:
+ case PRIMARY_KEY_ORDERED_INDEX:
return "BTREE";
case UNIQUE_INDEX:
case PRIMARY_KEY_INDEX:
@@ -141,6 +145,7 @@ class ha_ndbcluster: public handler
int create_ordered_index(const char *name, KEY *key_info);
int create_unique_index(const char *name, KEY *key_info);
int initialize_autoincrement(const void* table);
+ int build_index_list();
int get_metadata(const char* path);
void release_metadata();
const char* get_index_name(uint idx_no) const;
diff --git a/sql/handler.cc b/sql/handler.cc
index 3301dd7c04e..0a8e09e4145 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -1123,14 +1123,15 @@ void handler::print_error(int error, myf errflag)
/* The error was "unknown" to this function.
Ask handler if it has got a message for this error */
bool temporary= FALSE;
- const char* msg= get_error_message(&error, &temporary);
- if (msg)
+ String str;
+ temporary= get_error_message(error, &str);
+ if (!str.is_empty())
{
const char* engine= ha_get_storage_engine(table->db_type);
if (temporary)
- my_error(ER_GET_TEMPORARY_ERRMSG,MYF(0),error,msg,engine);
+ my_error(ER_GET_TEMPORARY_ERRMSG,MYF(0),error,str.ptr(),engine);
else
- my_error(ER_GET_ERRMSG,MYF(0),error,msg,engine);
+ my_error(ER_GET_ERRMSG,MYF(0),error,str.ptr(),engine);
}
else
my_error(ER_GET_ERRNO,errflag,error);
@@ -1146,15 +1147,15 @@ void handler::print_error(int error, myf errflag)
Return an error message specific to this handler
SYNOPSIS
- error [in/out] error code previously returned by handler
- temporary [out] temporary error, transaction should be retried if true
+ error error code previously returned by handler
+ buf Pointer to String where to add error message
- The returned pointer to error message should not be freed.
+ Returns true if this is a temporary error
*/
-const char* handler::get_error_message(int *error, bool *temporary)
+bool handler::get_error_message(int error, String* buf)
{
- return NULL;
+ return false;
}
diff --git a/sql/handler.h b/sql/handler.h
index 17151877286..0f8edc2cf12 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -288,7 +288,7 @@ public:
void update_timestamp(byte *record);
void update_auto_increment();
virtual void print_error(int error, myf errflag);
- virtual const char* get_error_message(int *error, bool *temporary);
+ virtual bool get_error_message(int error, String *buf);
uint get_dup_key(int error);
void change_table_ptr(TABLE *table_arg) { table=table_arg; }
virtual double scan_time()