mirror of
https://github.com/MariaDB/server.git
synced 2025-01-18 04:53:01 +01:00
Merge tulin@bk-internal.mysql.com:/home/bk/mysql-5.0-ndb
into whalegate.ndb.mysql.com:/home/tomas/mysql-5.0-ndb
This commit is contained in:
commit
77a1d31fbc
12 changed files with 421 additions and 9 deletions
|
@ -160,7 +160,15 @@ enum ha_extra_function {
|
|||
Off by default.
|
||||
*/
|
||||
HA_EXTRA_WRITE_CAN_REPLACE,
|
||||
HA_EXTRA_WRITE_CANNOT_REPLACE
|
||||
HA_EXTRA_WRITE_CANNOT_REPLACE,
|
||||
/*
|
||||
Inform handler that delete_row()/update_row() cannot batch deletes/updates
|
||||
and should perform them immediately. This may be needed when table has
|
||||
AFTER DELETE/UPDATE triggers which access to subject table.
|
||||
These flags are reset by the handler::extra(HA_EXTRA_RESET) call.
|
||||
*/
|
||||
HA_EXTRA_DELETE_CANNOT_BATCH,
|
||||
HA_EXTRA_UPDATE_CANNOT_BATCH
|
||||
};
|
||||
|
||||
/* The following is parameter to ha_panic() */
|
||||
|
|
|
@ -116,4 +116,175 @@ op a b
|
|||
d 1 1.050000000000000000000000000000
|
||||
d 2 2.050000000000000000000000000000
|
||||
drop tables t1, t2, t3;
|
||||
CREATE TABLE t1 (
|
||||
id INT NOT NULL PRIMARY KEY,
|
||||
xy INT
|
||||
) ENGINE=ndbcluster;
|
||||
INSERT INTO t1 VALUES (1, 0);
|
||||
CREATE TRIGGER t1_update AFTER UPDATE ON t1 FOR EACH ROW BEGIN REPLACE INTO t2 SELECT * FROM t1 WHERE t1.id = NEW.id; END //
|
||||
CREATE TABLE t2 (
|
||||
id INT NOT NULL PRIMARY KEY,
|
||||
xy INT
|
||||
) ENGINE=ndbcluster;
|
||||
INSERT INTO t2 VALUES (2, 0);
|
||||
CREATE TABLE t3 (id INT NOT NULL PRIMARY KEY) ENGINE=ndbcluster;
|
||||
INSERT INTO t3 VALUES (1);
|
||||
CREATE TABLE t4 LIKE t1;
|
||||
CREATE TRIGGER t4_update AFTER UPDATE ON t4 FOR EACH ROW BEGIN REPLACE INTO t5 SELECT * FROM t4 WHERE t4.id = NEW.id; END //
|
||||
CREATE TABLE t5 LIKE t2;
|
||||
UPDATE t1 SET xy = 3 WHERE id = 1;
|
||||
SELECT xy FROM t1 where id = 1;
|
||||
xy
|
||||
3
|
||||
SELECT xy FROM t2 where id = 1;
|
||||
xy
|
||||
3
|
||||
UPDATE t1 SET xy = 4 WHERE id IN (SELECT id FROM t3 WHERE id = 1);
|
||||
SELECT xy FROM t1 where id = 1;
|
||||
xy
|
||||
4
|
||||
SELECT xy FROM t2 where id = 1;
|
||||
xy
|
||||
4
|
||||
INSERT INTO t4 SELECT * FROM t1;
|
||||
INSERT INTO t5 SELECT * FROM t2;
|
||||
UPDATE t1,t4 SET t1.xy = 3, t4.xy = 3 WHERE t1.id = 1 AND t4.id = 1;
|
||||
SELECT xy FROM t1 where id = 1;
|
||||
xy
|
||||
3
|
||||
SELECT xy FROM t2 where id = 1;
|
||||
xy
|
||||
3
|
||||
SELECT xy FROM t4 where id = 1;
|
||||
xy
|
||||
3
|
||||
SELECT xy FROM t5 where id = 1;
|
||||
xy
|
||||
3
|
||||
UPDATE t1,t4 SET t1.xy = 4, t4.xy = 4 WHERE t1.id IN (SELECT id FROM t3 WHERE id = 1) AND t4.id IN (SELECT id FROM t3 WHERE id = 1);
|
||||
SELECT xy FROM t1 where id = 1;
|
||||
xy
|
||||
4
|
||||
SELECT xy FROM t2 where id = 1;
|
||||
xy
|
||||
4
|
||||
SELECT xy FROM t4 where id = 1;
|
||||
xy
|
||||
4
|
||||
SELECT xy FROM t5 where id = 1;
|
||||
xy
|
||||
4
|
||||
INSERT INTO t1 VALUES (1,0) ON DUPLICATE KEY UPDATE xy = 5;
|
||||
SELECT xy FROM t1 where id = 1;
|
||||
xy
|
||||
5
|
||||
SELECT xy FROM t2 where id = 1;
|
||||
xy
|
||||
5
|
||||
DROP TRIGGER t1_update;
|
||||
DROP TRIGGER t4_update;
|
||||
CREATE TRIGGER t1_delete AFTER DELETE ON t1 FOR EACH ROW BEGIN REPLACE INTO t2 SELECT * FROM t1 WHERE t1.id > 4; END //
|
||||
CREATE TRIGGER t4_delete AFTER DELETE ON t4 FOR EACH ROW BEGIN REPLACE INTO t5 SELECT * FROM t4 WHERE t4.id > 4; END //
|
||||
INSERT INTO t1 VALUES (5, 0),(6,0);
|
||||
INSERT INTO t2 VALUES (5, 1),(6,1);
|
||||
INSERT INTO t3 VALUES (5);
|
||||
SELECT * FROM t1 order by id;
|
||||
id xy
|
||||
1 5
|
||||
5 0
|
||||
6 0
|
||||
SELECT * FROM t2 order by id;
|
||||
id xy
|
||||
1 5
|
||||
2 0
|
||||
5 1
|
||||
6 1
|
||||
DELETE FROM t1 WHERE id IN (SELECT id FROM t3 WHERE id = 5);
|
||||
SELECT * FROM t1 order by id;
|
||||
id xy
|
||||
1 5
|
||||
6 0
|
||||
SELECT * FROM t2 order by id;
|
||||
id xy
|
||||
1 5
|
||||
2 0
|
||||
5 1
|
||||
6 0
|
||||
INSERT INTO t1 VALUES (5,0);
|
||||
UPDATE t2 SET xy = 1 WHERE id = 6;
|
||||
TRUNCATE t4;
|
||||
INSERT INTO t4 SELECT * FROM t1;
|
||||
TRUNCATE t5;
|
||||
INSERT INTO t5 SELECT * FROM t2;
|
||||
SELECT * FROM t1 order by id;
|
||||
id xy
|
||||
1 5
|
||||
5 0
|
||||
6 0
|
||||
SELECT * FROM t2 order by id;
|
||||
id xy
|
||||
1 5
|
||||
2 0
|
||||
5 1
|
||||
6 1
|
||||
SELECT * FROM t4 order by id;
|
||||
id xy
|
||||
1 5
|
||||
5 0
|
||||
6 0
|
||||
SELECT * FROM t5 order by id;
|
||||
id xy
|
||||
1 5
|
||||
2 0
|
||||
5 1
|
||||
6 1
|
||||
DELETE FROM t1,t4 USING t1,t3,t4 WHERE t1.id IN (SELECT id FROM t3 WHERE id = 5) AND t4.id IN (SELECT id FROM t3 WHERE id = 5);
|
||||
SELECT * FROM t1 order by id;
|
||||
id xy
|
||||
1 5
|
||||
6 0
|
||||
SELECT * FROM t2 order by id;
|
||||
id xy
|
||||
1 5
|
||||
2 0
|
||||
5 1
|
||||
6 0
|
||||
SELECT * FROM t4 order by id;
|
||||
id xy
|
||||
1 5
|
||||
6 0
|
||||
SELECT * FROM t5 order by id;
|
||||
id xy
|
||||
1 5
|
||||
2 0
|
||||
5 1
|
||||
6 0
|
||||
INSERT INTO t1 VALUES (5, 0);
|
||||
REPLACE INTO t2 VALUES (6,1);
|
||||
SELECT * FROM t1 order by id;
|
||||
id xy
|
||||
1 5
|
||||
5 0
|
||||
6 0
|
||||
SELECT * FROM t2 order by id;
|
||||
id xy
|
||||
1 5
|
||||
2 0
|
||||
5 1
|
||||
6 1
|
||||
REPLACE INTO t1 VALUES (5, 1);
|
||||
SELECT * FROM t1 order by id;
|
||||
id xy
|
||||
1 5
|
||||
5 1
|
||||
6 0
|
||||
SELECT * FROM t2 order by id;
|
||||
id xy
|
||||
1 5
|
||||
2 0
|
||||
5 1
|
||||
6 0
|
||||
DROP TRIGGER t1_delete;
|
||||
DROP TRIGGER t4_delete;
|
||||
DROP TABLE t1, t2, t3, t4, t5;
|
||||
End of 5.0 tests
|
||||
|
|
|
@ -89,4 +89,112 @@ select * from t2 order by op, a, b;
|
|||
|
||||
drop tables t1, t2, t3;
|
||||
|
||||
# Test for bug#26242
|
||||
# Verify that AFTER UPDATE/DELETE triggers are executed
|
||||
# after the change has actually taken place
|
||||
|
||||
CREATE TABLE t1 (
|
||||
id INT NOT NULL PRIMARY KEY,
|
||||
xy INT
|
||||
) ENGINE=ndbcluster;
|
||||
|
||||
INSERT INTO t1 VALUES (1, 0);
|
||||
|
||||
DELIMITER //;
|
||||
CREATE TRIGGER t1_update AFTER UPDATE ON t1 FOR EACH ROW BEGIN REPLACE INTO t2 SELECT * FROM t1 WHERE t1.id = NEW.id; END //
|
||||
DELIMITER ;//
|
||||
|
||||
CREATE TABLE t2 (
|
||||
id INT NOT NULL PRIMARY KEY,
|
||||
xy INT
|
||||
) ENGINE=ndbcluster;
|
||||
|
||||
INSERT INTO t2 VALUES (2, 0);
|
||||
|
||||
CREATE TABLE t3 (id INT NOT NULL PRIMARY KEY) ENGINE=ndbcluster;
|
||||
|
||||
INSERT INTO t3 VALUES (1);
|
||||
|
||||
CREATE TABLE t4 LIKE t1;
|
||||
|
||||
DELIMITER //;
|
||||
CREATE TRIGGER t4_update AFTER UPDATE ON t4 FOR EACH ROW BEGIN REPLACE INTO t5 SELECT * FROM t4 WHERE t4.id = NEW.id; END //
|
||||
DELIMITER ;//
|
||||
|
||||
CREATE TABLE t5 LIKE t2;
|
||||
|
||||
UPDATE t1 SET xy = 3 WHERE id = 1;
|
||||
SELECT xy FROM t1 where id = 1;
|
||||
SELECT xy FROM t2 where id = 1;
|
||||
|
||||
UPDATE t1 SET xy = 4 WHERE id IN (SELECT id FROM t3 WHERE id = 1);
|
||||
SELECT xy FROM t1 where id = 1;
|
||||
SELECT xy FROM t2 where id = 1;
|
||||
|
||||
INSERT INTO t4 SELECT * FROM t1;
|
||||
INSERT INTO t5 SELECT * FROM t2;
|
||||
UPDATE t1,t4 SET t1.xy = 3, t4.xy = 3 WHERE t1.id = 1 AND t4.id = 1;
|
||||
SELECT xy FROM t1 where id = 1;
|
||||
SELECT xy FROM t2 where id = 1;
|
||||
SELECT xy FROM t4 where id = 1;
|
||||
SELECT xy FROM t5 where id = 1;
|
||||
|
||||
UPDATE t1,t4 SET t1.xy = 4, t4.xy = 4 WHERE t1.id IN (SELECT id FROM t3 WHERE id = 1) AND t4.id IN (SELECT id FROM t3 WHERE id = 1);
|
||||
SELECT xy FROM t1 where id = 1;
|
||||
SELECT xy FROM t2 where id = 1;
|
||||
SELECT xy FROM t4 where id = 1;
|
||||
SELECT xy FROM t5 where id = 1;
|
||||
|
||||
INSERT INTO t1 VALUES (1,0) ON DUPLICATE KEY UPDATE xy = 5;
|
||||
SELECT xy FROM t1 where id = 1;
|
||||
SELECT xy FROM t2 where id = 1;
|
||||
|
||||
DROP TRIGGER t1_update;
|
||||
DROP TRIGGER t4_update;
|
||||
|
||||
DELIMITER //;
|
||||
CREATE TRIGGER t1_delete AFTER DELETE ON t1 FOR EACH ROW BEGIN REPLACE INTO t2 SELECT * FROM t1 WHERE t1.id > 4; END //
|
||||
DELIMITER ;//
|
||||
|
||||
DELIMITER //;
|
||||
CREATE TRIGGER t4_delete AFTER DELETE ON t4 FOR EACH ROW BEGIN REPLACE INTO t5 SELECT * FROM t4 WHERE t4.id > 4; END //
|
||||
DELIMITER ;//
|
||||
|
||||
INSERT INTO t1 VALUES (5, 0),(6,0);
|
||||
INSERT INTO t2 VALUES (5, 1),(6,1);
|
||||
INSERT INTO t3 VALUES (5);
|
||||
SELECT * FROM t1 order by id;
|
||||
SELECT * FROM t2 order by id;
|
||||
DELETE FROM t1 WHERE id IN (SELECT id FROM t3 WHERE id = 5);
|
||||
SELECT * FROM t1 order by id;
|
||||
SELECT * FROM t2 order by id;
|
||||
|
||||
INSERT INTO t1 VALUES (5,0);
|
||||
UPDATE t2 SET xy = 1 WHERE id = 6;
|
||||
TRUNCATE t4;
|
||||
INSERT INTO t4 SELECT * FROM t1;
|
||||
TRUNCATE t5;
|
||||
INSERT INTO t5 SELECT * FROM t2;
|
||||
SELECT * FROM t1 order by id;
|
||||
SELECT * FROM t2 order by id;
|
||||
SELECT * FROM t4 order by id;
|
||||
SELECT * FROM t5 order by id;
|
||||
DELETE FROM t1,t4 USING t1,t3,t4 WHERE t1.id IN (SELECT id FROM t3 WHERE id = 5) AND t4.id IN (SELECT id FROM t3 WHERE id = 5);
|
||||
SELECT * FROM t1 order by id;
|
||||
SELECT * FROM t2 order by id;
|
||||
SELECT * FROM t4 order by id;
|
||||
SELECT * FROM t5 order by id;
|
||||
|
||||
INSERT INTO t1 VALUES (5, 0);
|
||||
REPLACE INTO t2 VALUES (6,1);
|
||||
SELECT * FROM t1 order by id;
|
||||
SELECT * FROM t2 order by id;
|
||||
REPLACE INTO t1 VALUES (5, 1);
|
||||
SELECT * FROM t1 order by id;
|
||||
SELECT * FROM t2 order by id;
|
||||
|
||||
DROP TRIGGER t1_delete;
|
||||
DROP TRIGGER t4_delete;
|
||||
DROP TABLE t1, t2, t3, t4, t5;
|
||||
|
||||
--echo End of 5.0 tests
|
||||
|
|
|
@ -2523,8 +2523,13 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
|
|||
ERR_RETURN(op->getNdbError());
|
||||
}
|
||||
|
||||
// Execute update operation
|
||||
if (!cursor && execute_no_commit(this,trans,false) != 0) {
|
||||
/*
|
||||
Execute update operation if we are not doing a scan for update
|
||||
and there exist UPDATE AFTER triggers
|
||||
*/
|
||||
|
||||
if ((!cursor || m_update_cannot_batch) &&
|
||||
execute_no_commit(this,trans,false) != 0) {
|
||||
no_uncommitted_rows_execute_failure();
|
||||
DBUG_RETURN(ndb_err(trans));
|
||||
}
|
||||
|
@ -2565,7 +2570,7 @@ int ha_ndbcluster::delete_row(const byte *record)
|
|||
|
||||
no_uncommitted_rows_update(-1);
|
||||
|
||||
if (!m_primary_key_update)
|
||||
if (!(m_primary_key_update || m_delete_cannot_batch))
|
||||
// If deleting from cursor, NoCommit will be handled in next_result
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
@ -3406,6 +3411,16 @@ int ha_ndbcluster::extra(enum ha_extra_function operation)
|
|||
DBUG_PRINT("info", ("Turning OFF use of write instead of insert"));
|
||||
m_use_write= FALSE;
|
||||
break;
|
||||
case HA_EXTRA_DELETE_CANNOT_BATCH:
|
||||
DBUG_PRINT("info", ("HA_EXTRA_DELETE_CANNOT_BATCH"));
|
||||
m_delete_cannot_batch= TRUE;
|
||||
break;
|
||||
case HA_EXTRA_UPDATE_CANNOT_BATCH:
|
||||
DBUG_PRINT("info", ("HA_EXTRA_UPDATE_CANNOT_BATCH"));
|
||||
m_update_cannot_batch= TRUE;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
DBUG_RETURN(0);
|
||||
|
@ -3422,6 +3437,8 @@ int ha_ndbcluster::reset()
|
|||
m_retrieve_primary_key= FALSE;
|
||||
m_ignore_dup_key= FALSE;
|
||||
m_use_write= FALSE;
|
||||
m_delete_cannot_batch= FALSE;
|
||||
m_update_cannot_batch= FALSE;
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
|
@ -4788,6 +4805,8 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
|
|||
m_bulk_insert_rows((ha_rows) 1024),
|
||||
m_rows_changed((ha_rows) 0),
|
||||
m_bulk_insert_not_flushed(FALSE),
|
||||
m_delete_cannot_batch(FALSE),
|
||||
m_update_cannot_batch(FALSE),
|
||||
m_ops_pending(0),
|
||||
m_skip_auto_increment(TRUE),
|
||||
m_blobs_pending(0),
|
||||
|
@ -7127,7 +7146,7 @@ void ndb_serialize_cond(const Item *item, void *arg)
|
|||
Check that the field is part of the table of the handler
|
||||
instance and that we expect a field with of this result type.
|
||||
*/
|
||||
if (context->table == field->table)
|
||||
if (context->table->s == field->table->s)
|
||||
{
|
||||
const NDBTAB *tab= (const NDBTAB *) context->ndb_table;
|
||||
DBUG_PRINT("info", ("FIELD_ITEM"));
|
||||
|
|
|
@ -774,6 +774,8 @@ bool uses_blob_value(bool all_fields);
|
|||
ha_rows m_bulk_insert_rows;
|
||||
ha_rows m_rows_changed;
|
||||
bool m_bulk_insert_not_flushed;
|
||||
bool m_delete_cannot_batch;
|
||||
bool m_update_cannot_batch;
|
||||
ha_rows m_ops_pending;
|
||||
bool m_skip_auto_increment;
|
||||
bool m_blobs_pending;
|
||||
|
|
|
@ -230,6 +230,8 @@ void Item_func::traverse_cond(Cond_traverser traverser,
|
|||
(*traverser)(this, argument);
|
||||
}
|
||||
}
|
||||
else
|
||||
(*traverser)(this, argument);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -844,6 +844,8 @@ bool mysql_insert(THD *thd,TABLE_LIST *table,List<Item> &fields,
|
|||
bool ignore);
|
||||
int check_that_all_fields_are_given_values(THD *thd, TABLE *entry,
|
||||
TABLE_LIST *table_list);
|
||||
void prepare_triggers_for_insert_stmt(THD *thd, TABLE *table,
|
||||
enum_duplicates duplic);
|
||||
void mark_fields_used_by_triggers_for_insert_stmt(THD *thd, TABLE *table,
|
||||
enum_duplicates duplic);
|
||||
bool mysql_prepare_delete(THD *thd, TABLE_LIST *table_list, Item **conds);
|
||||
|
|
|
@ -217,7 +217,19 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
|
|||
thd->proc_info="updating";
|
||||
|
||||
if (table->triggers)
|
||||
{
|
||||
table->triggers->mark_fields_used(thd, TRG_EVENT_DELETE);
|
||||
if (table->triggers->has_triggers(TRG_EVENT_DELETE,
|
||||
TRG_ACTION_AFTER))
|
||||
{
|
||||
/*
|
||||
The table has AFTER DELETE triggers that might access to subject table
|
||||
and therefore might need delete to be done immediately. So we turn-off
|
||||
the batching.
|
||||
*/
|
||||
(void) table->file->extra(HA_EXTRA_DELETE_CANNOT_BATCH);
|
||||
}
|
||||
}
|
||||
|
||||
while (!(error=info.read_record(&info)) && !thd->killed &&
|
||||
!thd->net.report_error)
|
||||
|
@ -540,7 +552,19 @@ multi_delete::initialize_tables(JOIN *join)
|
|||
else
|
||||
normal_tables= 1;
|
||||
if (tbl->triggers)
|
||||
{
|
||||
tbl->triggers->mark_fields_used(thd, TRG_EVENT_DELETE);
|
||||
if (tbl->triggers->has_triggers(TRG_EVENT_DELETE,
|
||||
TRG_ACTION_AFTER))
|
||||
{
|
||||
/*
|
||||
The table has AFTER DELETE triggers that might access to subject
|
||||
table and therefore might need delete to be done immediately.
|
||||
So we turn-off the batching.
|
||||
*/
|
||||
(void) tbl->file->extra(HA_EXTRA_DELETE_CANNOT_BATCH);
|
||||
}
|
||||
}
|
||||
}
|
||||
else if ((tab->type != JT_SYSTEM && tab->type != JT_CONST) &&
|
||||
walk == delete_tables)
|
||||
|
|
|
@ -329,6 +329,51 @@ static int check_update_fields(THD *thd, TABLE_LIST *insert_table_list,
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
Prepare triggers for INSERT-like statement.
|
||||
|
||||
SYNOPSIS
|
||||
prepare_triggers_for_insert_stmt()
|
||||
thd The current thread
|
||||
table Table to which insert will happen
|
||||
duplic Type of duplicate handling for insert which will happen
|
||||
|
||||
NOTE
|
||||
Prepare triggers for INSERT-like statement by marking fields
|
||||
used by triggers and inform handlers that batching of UPDATE/DELETE
|
||||
cannot be done if there are BEFORE UPDATE/DELETE triggers.
|
||||
*/
|
||||
|
||||
void prepare_triggers_for_insert_stmt(THD *thd, TABLE *table,
|
||||
enum_duplicates duplic)
|
||||
{
|
||||
if (table->triggers)
|
||||
{
|
||||
if (table->triggers->has_triggers(TRG_EVENT_DELETE,
|
||||
TRG_ACTION_AFTER))
|
||||
{
|
||||
/*
|
||||
The table has AFTER DELETE triggers that might access to
|
||||
subject table and therefore might need delete to be done
|
||||
immediately. So we turn-off the batching.
|
||||
*/
|
||||
(void) table->file->extra(HA_EXTRA_DELETE_CANNOT_BATCH);
|
||||
}
|
||||
if (table->triggers->has_triggers(TRG_EVENT_UPDATE,
|
||||
TRG_ACTION_AFTER))
|
||||
{
|
||||
/*
|
||||
The table has AFTER UPDATE triggers that might access to subject
|
||||
table and therefore might need update to be done immediately.
|
||||
So we turn-off the batching.
|
||||
*/
|
||||
(void) table->file->extra(HA_EXTRA_UPDATE_CANNOT_BATCH);
|
||||
}
|
||||
mark_fields_used_by_triggers_for_insert_stmt(thd, table, duplic);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Mark fields used by triggers for INSERT-like statement.
|
||||
|
||||
|
@ -589,7 +634,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
|
|||
(MODE_STRICT_TRANS_TABLES |
|
||||
MODE_STRICT_ALL_TABLES)));
|
||||
|
||||
mark_fields_used_by_triggers_for_insert_stmt(thd, table, duplic);
|
||||
prepare_triggers_for_insert_stmt(thd, table, duplic);
|
||||
|
||||
if (table_list->prepare_where(thd, 0, TRUE) ||
|
||||
table_list->prepare_check_option(thd))
|
||||
|
@ -2528,8 +2573,8 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
|
|||
table_list->prepare_check_option(thd));
|
||||
|
||||
if (!res)
|
||||
mark_fields_used_by_triggers_for_insert_stmt(thd, table,
|
||||
info.handle_duplicates);
|
||||
prepare_triggers_for_insert_stmt(thd, table,
|
||||
info.handle_duplicates);
|
||||
DBUG_RETURN(res);
|
||||
}
|
||||
|
||||
|
|
|
@ -222,7 +222,7 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
|
|||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
|
||||
mark_fields_used_by_triggers_for_insert_stmt(thd, table, handle_duplicates);
|
||||
prepare_triggers_for_insert_stmt(thd, table, handle_duplicates);
|
||||
|
||||
uint tot_length=0;
|
||||
bool use_blobs= 0, use_vars= 0;
|
||||
|
|
|
@ -110,6 +110,11 @@ public:
|
|||
const char *old_table,
|
||||
const char *new_db,
|
||||
const char *new_table);
|
||||
bool has_triggers(trg_event_type event_type,
|
||||
trg_action_time_type action_time)
|
||||
{
|
||||
return (bodies[event_type][action_time] != NULL);
|
||||
}
|
||||
bool has_delete_triggers()
|
||||
{
|
||||
return (bodies[TRG_EVENT_DELETE][TRG_ACTION_BEFORE] ||
|
||||
|
|
|
@ -436,7 +436,19 @@ int mysql_update(THD *thd,
|
|||
MODE_STRICT_ALL_TABLES)));
|
||||
|
||||
if (table->triggers)
|
||||
{
|
||||
table->triggers->mark_fields_used(thd, TRG_EVENT_UPDATE);
|
||||
if (table->triggers->has_triggers(TRG_EVENT_UPDATE,
|
||||
TRG_ACTION_AFTER))
|
||||
{
|
||||
/*
|
||||
The table has AFTER UPDATE triggers that might access to subject
|
||||
table and therefore might need update to be done immediately.
|
||||
So we turn-off the batching.
|
||||
*/
|
||||
(void) table->file->extra(HA_EXTRA_UPDATE_CANNOT_BATCH);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
We can use compare_record() to optimize away updates if
|
||||
|
@ -1001,6 +1013,20 @@ int multi_update::prepare(List<Item> ¬_used_values,
|
|||
table->no_keyread=1;
|
||||
table->used_keys.clear_all();
|
||||
table->pos_in_table_list= tl;
|
||||
if (table->triggers)
|
||||
{
|
||||
table->triggers->mark_fields_used(thd, TRG_EVENT_UPDATE);
|
||||
if (table->triggers->has_triggers(TRG_EVENT_UPDATE,
|
||||
TRG_ACTION_AFTER))
|
||||
{
|
||||
/*
|
||||
The table has AFTER UPDATE triggers that might access to subject
|
||||
table and therefore might need update to be done immediately.
|
||||
So we turn-off the batching.
|
||||
*/
|
||||
(void) table->file->extra(HA_EXTRA_UPDATE_CANNOT_BATCH);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue