mirror of
https://github.com/MariaDB/server.git
synced 2025-01-29 02:05:57 +01:00
Merge mysql.com:/windows/Linux_space/MySQL/mysql-5.0-ndb
into mysql.com:/windows/Linux_space/MySQL/mysql-5.1-new-ndb mysql-test/suite/ndb/r/ndb_update.result: Auto merged mysql-test/suite/ndb/t/ndb_update.test: Auto merged sql/ha_ndbcluster.cc: Merge sql/ha_ndbcluster.h: Merge
This commit is contained in:
commit
1acfc89033
4 changed files with 50 additions and 5 deletions
|
@ -41,6 +41,14 @@ pk1 b c
|
|||
10 0 0
|
||||
12 2 2
|
||||
14 1 1
|
||||
create unique index ib on t1(b);
|
||||
update t1 set c = 4 where pk1 = 12;
|
||||
update ignore t1 set b = 55 where pk1 = 14;
|
||||
select * from t1 order by pk1;
|
||||
pk1 b c
|
||||
10 0 0
|
||||
12 2 4
|
||||
14 55 1
|
||||
DROP TABLE IF EXISTS t1;
|
||||
CREATE TABLE t1 (a int, b int, KEY (a, b)) ENGINE=ndbcluster;
|
||||
CREATE TABLE t2 (a int, b int, UNIQUE KEY (a, b)) ENGINE=ndbcluster;
|
||||
|
|
|
@ -35,6 +35,11 @@ UPDATE IGNORE t1 set pk1 = 1, c = 2 where pk1 = 4;
|
|||
select * from t1 order by pk1;
|
||||
UPDATE t1 set pk1 = pk1 + 10;
|
||||
select * from t1 order by pk1;
|
||||
# bug#25817
|
||||
create unique index ib on t1(b);
|
||||
update t1 set c = 4 where pk1 = 12;
|
||||
update ignore t1 set b = 55 where pk1 = 14;
|
||||
select * from t1 order by pk1;
|
||||
|
||||
--disable_warnings
|
||||
DROP TABLE IF EXISTS t1;
|
||||
|
|
|
@ -1643,6 +1643,30 @@ int ha_ndbcluster::set_primary_key_from_record(NdbOperation *op, const uchar *re
|
|||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
bool ha_ndbcluster::check_index_fields_in_write_set(uint keyno)
|
||||
{
|
||||
KEY* key_info= table->key_info + keyno;
|
||||
KEY_PART_INFO* key_part= key_info->key_part;
|
||||
KEY_PART_INFO* end= key_part+key_info->key_parts;
|
||||
uint i;
|
||||
DBUG_ENTER("check_index_fields_in_write_set");
|
||||
|
||||
if (m_retrieve_all_fields)
|
||||
{
|
||||
DBUG_RETURN(true);
|
||||
}
|
||||
for (i= 0; key_part != end; key_part++, i++)
|
||||
{
|
||||
Field* field= key_part->field;
|
||||
if (field->query_id != current_thd->query_id)
|
||||
{
|
||||
DBUG_RETURN(false);
|
||||
}
|
||||
}
|
||||
|
||||
DBUG_RETURN(true);
|
||||
}
|
||||
|
||||
int ha_ndbcluster::set_index_key_from_record(NdbOperation *op,
|
||||
const uchar *record, uint keyno)
|
||||
{
|
||||
|
@ -1961,8 +1985,8 @@ check_null_in_record(const KEY* key_info, const uchar *record)
|
|||
* primary key or unique index values
|
||||
*/
|
||||
|
||||
int ha_ndbcluster::peek_indexed_rows(const uchar *record,
|
||||
bool check_pk)
|
||||
int ha_ndbcluster::peek_indexed_rows(const byte *record,
|
||||
NDB_WRITE_OP write_op)
|
||||
{
|
||||
NdbTransaction *trans= m_active_trans;
|
||||
NdbOperation *op;
|
||||
|
@ -2721,7 +2745,7 @@ int ha_ndbcluster::write_row(uchar *record)
|
|||
start_bulk_insert will set parameters to ensure that each
|
||||
write_row is committed individually
|
||||
*/
|
||||
int peek_res= peek_indexed_rows(record, TRUE);
|
||||
int peek_res= peek_indexed_rows(record, NDB_INSERT);
|
||||
|
||||
if (!peek_res)
|
||||
{
|
||||
|
@ -2965,7 +2989,8 @@ int ha_ndbcluster::update_row(const uchar *old_data, uchar *new_data)
|
|||
if (m_ignore_dup_key && (thd->lex->sql_command == SQLCOM_UPDATE ||
|
||||
thd->lex->sql_command == SQLCOM_UPDATE_MULTI))
|
||||
{
|
||||
int peek_res= peek_indexed_rows(new_data, pk_update);
|
||||
NDB_WRITE_OP write_op= (pk_update) ? NDB_PK_UPDATE : NDB_UPDATE;
|
||||
int peek_res= peek_indexed_rows(new_data, write_op);
|
||||
|
||||
if (!peek_res)
|
||||
{
|
||||
|
|
|
@ -81,6 +81,12 @@ typedef struct ndb_index_data {
|
|||
uint index_stat_query_count;
|
||||
} NDB_INDEX_DATA;
|
||||
|
||||
typedef enum ndb_write_op {
|
||||
NDB_INSERT = 0,
|
||||
NDB_UPDATE = 1,
|
||||
NDB_PK_UPDATE = 2
|
||||
} NDB_WRITE_OP;
|
||||
|
||||
typedef union { const NdbRecAttr *rec; NdbBlob *blob; void *ptr; } NdbValue;
|
||||
|
||||
int get_ndb_blobs_value(TABLE* table, NdbValue* value_array,
|
||||
|
@ -438,7 +444,7 @@ private:
|
|||
const NdbOperation *first,
|
||||
const NdbOperation *last,
|
||||
uint errcode);
|
||||
int peek_indexed_rows(const uchar *record, bool check_pk);
|
||||
int peek_indexed_rows(const uchar *record, NDB_WRITE_OP write_op);
|
||||
int fetch_next(NdbScanOperation* op);
|
||||
int next_result(uchar *buf);
|
||||
int define_read_attrs(uchar* buf, NdbOperation* op);
|
||||
|
@ -463,6 +469,7 @@ private:
|
|||
friend int g_get_ndb_blobs_value(NdbBlob *ndb_blob, void *arg);
|
||||
int set_primary_key(NdbOperation *op, const uchar *key);
|
||||
int set_primary_key_from_record(NdbOperation *op, const uchar *record);
|
||||
bool check_index_fields_in_write_set(uint keyno);
|
||||
int set_index_key_from_record(NdbOperation *op, const uchar *record,
|
||||
uint keyno);
|
||||
int set_bounds(NdbIndexScanOperation*, uint inx, bool rir,
|
||||
|
|
Loading…
Add table
Reference in a new issue