2009-03-23 15:22:31 +01:00
|
|
|
# Bug38499 flush tables and multitable table update with derived table cause crash
|
|
|
|
# MySQL >= 5.0
|
|
|
|
#
|
|
|
|
|
2021-07-24 20:32:52 +02:00
|
|
|
# The test can take hours with valgrind
|
|
|
|
--source include/not_valgrind.inc
|
|
|
|
|
2009-03-23 15:22:31 +01:00
|
|
|
# Save the initial number of concurrent sessions
|
|
|
|
--source include/count_sessions.inc
|
|
|
|
|
2009-08-28 23:49:16 +02:00
|
|
|
SET @odl_sync_frm = @@global.sync_frm;
|
|
|
|
SET @@global.sync_frm = OFF;
|
|
|
|
|
2009-03-23 15:22:31 +01:00
|
|
|
connect (locker,localhost,root,,);
|
|
|
|
connect (writer,localhost,root,,);
|
|
|
|
|
|
|
|
--connection default
|
|
|
|
--disable_warnings
|
|
|
|
DROP TABLE IF EXISTS t1;
|
|
|
|
--enable_warnings
|
|
|
|
CREATE TABLE t1( a INT, b INT );
|
2010-05-26 22:18:18 +02:00
|
|
|
CREATE TABLE t2( a INT, b INT );
|
2009-03-23 15:22:31 +01:00
|
|
|
INSERT INTO t1 VALUES (1, 1), (2, 2), (3, 3), (4, 4);
|
2010-05-26 22:18:18 +02:00
|
|
|
INSERT INTO t2 VALUES (1, 1), (2, 2), (3, 3), (4, 4);
|
2009-03-23 15:22:31 +01:00
|
|
|
|
|
|
|
--echo # 1. test regular tables
|
|
|
|
--echo # 1.1. test altering of columns that multiupdate doesn't use
|
|
|
|
--echo # 1.1.1. normal mode
|
|
|
|
|
|
|
|
--disable_query_log
|
|
|
|
let $i = 100;
|
|
|
|
while ($i) {
|
|
|
|
--dec $i
|
|
|
|
|
|
|
|
--connection writer
|
2010-05-26 22:18:18 +02:00
|
|
|
send UPDATE t1, (SELECT 1 FROM t2 t1i) d SET a = 0 WHERE 1=0;
|
2009-03-23 15:22:31 +01:00
|
|
|
|
|
|
|
--connection locker
|
MDEV-16329 [5/5] ALTER ONLINE TABLE
* Log rows in online_alter_binlog.
* Table online data is replicated within dedicated binlog file
* Cached data is written on commit.
* Versioning is fully supported.
* Works both wit and without binlog enabled.
* For now savepoints setup is forbidden while ONLINE ALTER goes on.
Extra support is required. We can simply log the SAVEPOINT query events
and replicate them together with row events. But it's not implemented
for now.
* Cache flipping:
We want to care for the possible bottleneck in the online alter binlog
reading/writing in advance.
IO_CACHE does not provide anything better that sequential access,
besides, only a single write is mutex-protected, which is not suitable,
since we should write a transaction atomically.
To solve this, a special layer on top Event_log is implemented.
There are two IO_CACHE files underneath: one for reading, and one for
writing.
Once the read cache is empty, an exclusive lock is acquired (we can wait
for a currently active transaction finish writing), and flip() is emitted,
i.e. the write cache is reopened for read, and the read cache is emptied,
and reopened for writing.
This reminds a buffer flip that happens in accelerated graphics
(DirectX/OpenGL/etc).
Cache_flip_event_log is considered non-blocking for a single reader and a
single writer in this sense, with the only lock held by reader during flip.
An alternative approach by implementing a fair concurrent circular buffer
is described in MDEV-24676.
* Cache managers:
We have two cache sinks: statement and transactional.
It is important that the changes are first cached per-statement and
per-transaction.
If a statement fails, then only statement data is rolled back. The
transaction moves along, however.
Turns out, there's no guarantee that TABLE well persist in
thd->open_tables to the transaction commit moment.
If an error occurs, tables from statement are purged.
Therefore, we can't store te caches in TABLE. Ideally, it should be
handlerton, but we cut the corner and store it in THD in a list.
2020-11-26 12:08:58 +01:00
|
|
|
ALTER TABLE t1 ADD COLUMN (c INT), LOCK=SHARED;
|
|
|
|
ALTER TABLE t1 DROP COLUMN c, LOCK=SHARED;
|
2009-03-23 15:22:31 +01:00
|
|
|
|
|
|
|
--connection writer
|
|
|
|
--reap
|
|
|
|
}
|
|
|
|
|
|
|
|
--echo # 1.1.2. PS mode
|
|
|
|
|
|
|
|
--connection writer
|
2010-05-26 22:18:18 +02:00
|
|
|
PREPARE stmt FROM 'UPDATE t1, (SELECT 1 FROM t2 t1i) d SET a = 0 WHERE 1=0';
|
2009-03-23 15:22:31 +01:00
|
|
|
|
|
|
|
let $i = 100;
|
|
|
|
while ($i) {
|
|
|
|
--dec $i
|
|
|
|
|
|
|
|
--connection writer
|
|
|
|
--send EXECUTE stmt
|
|
|
|
|
|
|
|
--connection locker
|
MDEV-16329 [5/5] ALTER ONLINE TABLE
* Log rows in online_alter_binlog.
* Table online data is replicated within dedicated binlog file
* Cached data is written on commit.
* Versioning is fully supported.
* Works both wit and without binlog enabled.
* For now savepoints setup is forbidden while ONLINE ALTER goes on.
Extra support is required. We can simply log the SAVEPOINT query events
and replicate them together with row events. But it's not implemented
for now.
* Cache flipping:
We want to care for the possible bottleneck in the online alter binlog
reading/writing in advance.
IO_CACHE does not provide anything better that sequential access,
besides, only a single write is mutex-protected, which is not suitable,
since we should write a transaction atomically.
To solve this, a special layer on top Event_log is implemented.
There are two IO_CACHE files underneath: one for reading, and one for
writing.
Once the read cache is empty, an exclusive lock is acquired (we can wait
for a currently active transaction finish writing), and flip() is emitted,
i.e. the write cache is reopened for read, and the read cache is emptied,
and reopened for writing.
This reminds a buffer flip that happens in accelerated graphics
(DirectX/OpenGL/etc).
Cache_flip_event_log is considered non-blocking for a single reader and a
single writer in this sense, with the only lock held by reader during flip.
An alternative approach by implementing a fair concurrent circular buffer
is described in MDEV-24676.
* Cache managers:
We have two cache sinks: statement and transactional.
It is important that the changes are first cached per-statement and
per-transaction.
If a statement fails, then only statement data is rolled back. The
transaction moves along, however.
Turns out, there's no guarantee that TABLE well persist in
thd->open_tables to the transaction commit moment.
If an error occurs, tables from statement are purged.
Therefore, we can't store te caches in TABLE. Ideally, it should be
handlerton, but we cut the corner and store it in THD in a list.
2020-11-26 12:08:58 +01:00
|
|
|
ALTER TABLE t1 ADD COLUMN (c INT), LOCK=SHARED;
|
|
|
|
ALTER TABLE t1 DROP COLUMN c, LOCK=SHARED;
|
2009-03-23 15:22:31 +01:00
|
|
|
|
|
|
|
--connection writer
|
|
|
|
--reap
|
|
|
|
}
|
|
|
|
--enable_query_log
|
|
|
|
|
|
|
|
--echo # 1.2. test altering of columns that multiupdate uses
|
|
|
|
--echo # 1.2.1. normal mode
|
|
|
|
|
|
|
|
--connection default
|
|
|
|
|
|
|
|
--disable_query_log
|
|
|
|
let $i = 100;
|
|
|
|
while ($i) {
|
|
|
|
dec $i;
|
|
|
|
|
|
|
|
--connection locker
|
|
|
|
--error 0,ER_DUP_FIELDNAME
|
MDEV-16329 [5/5] ALTER ONLINE TABLE
* Log rows in online_alter_binlog.
* Table online data is replicated within dedicated binlog file
* Cached data is written on commit.
* Versioning is fully supported.
* Works both wit and without binlog enabled.
* For now savepoints setup is forbidden while ONLINE ALTER goes on.
Extra support is required. We can simply log the SAVEPOINT query events
and replicate them together with row events. But it's not implemented
for now.
* Cache flipping:
We want to care for the possible bottleneck in the online alter binlog
reading/writing in advance.
IO_CACHE does not provide anything better that sequential access,
besides, only a single write is mutex-protected, which is not suitable,
since we should write a transaction atomically.
To solve this, a special layer on top Event_log is implemented.
There are two IO_CACHE files underneath: one for reading, and one for
writing.
Once the read cache is empty, an exclusive lock is acquired (we can wait
for a currently active transaction finish writing), and flip() is emitted,
i.e. the write cache is reopened for read, and the read cache is emptied,
and reopened for writing.
This reminds a buffer flip that happens in accelerated graphics
(DirectX/OpenGL/etc).
Cache_flip_event_log is considered non-blocking for a single reader and a
single writer in this sense, with the only lock held by reader during flip.
An alternative approach by implementing a fair concurrent circular buffer
is described in MDEV-24676.
* Cache managers:
We have two cache sinks: statement and transactional.
It is important that the changes are first cached per-statement and
per-transaction.
If a statement fails, then only statement data is rolled back. The
transaction moves along, however.
Turns out, there's no guarantee that TABLE well persist in
thd->open_tables to the transaction commit moment.
If an error occurs, tables from statement are purged.
Therefore, we can't store te caches in TABLE. Ideally, it should be
handlerton, but we cut the corner and store it in THD in a list.
2020-11-26 12:08:58 +01:00
|
|
|
ALTER TABLE t1 ADD COLUMN a int(11) unsigned default NULL, LOCK=SHARED;
|
2009-03-23 15:22:31 +01:00
|
|
|
UPDATE t1 SET a=b;
|
|
|
|
|
|
|
|
--connection writer
|
2010-05-26 22:18:18 +02:00
|
|
|
--send UPDATE t1, (SELECT 1 FROM t2 t1i) d SET a = 0 WHERE 1=0;
|
2009-03-23 15:22:31 +01:00
|
|
|
|
|
|
|
--connection locker
|
|
|
|
--error 0,ER_CANT_DROP_FIELD_OR_KEY
|
MDEV-16329 [5/5] ALTER ONLINE TABLE
* Log rows in online_alter_binlog.
* Table online data is replicated within dedicated binlog file
* Cached data is written on commit.
* Versioning is fully supported.
* Works both wit and without binlog enabled.
* For now savepoints setup is forbidden while ONLINE ALTER goes on.
Extra support is required. We can simply log the SAVEPOINT query events
and replicate them together with row events. But it's not implemented
for now.
* Cache flipping:
We want to care for the possible bottleneck in the online alter binlog
reading/writing in advance.
IO_CACHE does not provide anything better that sequential access,
besides, only a single write is mutex-protected, which is not suitable,
since we should write a transaction atomically.
To solve this, a special layer on top Event_log is implemented.
There are two IO_CACHE files underneath: one for reading, and one for
writing.
Once the read cache is empty, an exclusive lock is acquired (we can wait
for a currently active transaction finish writing), and flip() is emitted,
i.e. the write cache is reopened for read, and the read cache is emptied,
and reopened for writing.
This reminds a buffer flip that happens in accelerated graphics
(DirectX/OpenGL/etc).
Cache_flip_event_log is considered non-blocking for a single reader and a
single writer in this sense, with the only lock held by reader during flip.
An alternative approach by implementing a fair concurrent circular buffer
is described in MDEV-24676.
* Cache managers:
We have two cache sinks: statement and transactional.
It is important that the changes are first cached per-statement and
per-transaction.
If a statement fails, then only statement data is rolled back. The
transaction moves along, however.
Turns out, there's no guarantee that TABLE well persist in
thd->open_tables to the transaction commit moment.
If an error occurs, tables from statement are purged.
Therefore, we can't store te caches in TABLE. Ideally, it should be
handlerton, but we cut the corner and store it in THD in a list.
2020-11-26 12:08:58 +01:00
|
|
|
ALTER TABLE t1 DROP COLUMN a, LOCK=SHARED;
|
2009-03-23 15:22:31 +01:00
|
|
|
|
|
|
|
--connection writer
|
|
|
|
--error 0,ER_BAD_FIELD_ERROR # unknown column error
|
|
|
|
--reap
|
|
|
|
}
|
|
|
|
--enable_query_log
|
|
|
|
|
|
|
|
--echo # 1.2.2. PS mode
|
|
|
|
|
|
|
|
--disable_query_log
|
|
|
|
let $i = 100;
|
|
|
|
while ($i) {
|
|
|
|
dec $i;
|
|
|
|
|
|
|
|
--connection locker
|
|
|
|
--error 0,ER_DUP_FIELDNAME
|
MDEV-16329 [5/5] ALTER ONLINE TABLE
* Log rows in online_alter_binlog.
* Table online data is replicated within dedicated binlog file
* Cached data is written on commit.
* Versioning is fully supported.
* Works both wit and without binlog enabled.
* For now savepoints setup is forbidden while ONLINE ALTER goes on.
Extra support is required. We can simply log the SAVEPOINT query events
and replicate them together with row events. But it's not implemented
for now.
* Cache flipping:
We want to care for the possible bottleneck in the online alter binlog
reading/writing in advance.
IO_CACHE does not provide anything better that sequential access,
besides, only a single write is mutex-protected, which is not suitable,
since we should write a transaction atomically.
To solve this, a special layer on top Event_log is implemented.
There are two IO_CACHE files underneath: one for reading, and one for
writing.
Once the read cache is empty, an exclusive lock is acquired (we can wait
for a currently active transaction finish writing), and flip() is emitted,
i.e. the write cache is reopened for read, and the read cache is emptied,
and reopened for writing.
This reminds a buffer flip that happens in accelerated graphics
(DirectX/OpenGL/etc).
Cache_flip_event_log is considered non-blocking for a single reader and a
single writer in this sense, with the only lock held by reader during flip.
An alternative approach by implementing a fair concurrent circular buffer
is described in MDEV-24676.
* Cache managers:
We have two cache sinks: statement and transactional.
It is important that the changes are first cached per-statement and
per-transaction.
If a statement fails, then only statement data is rolled back. The
transaction moves along, however.
Turns out, there's no guarantee that TABLE well persist in
thd->open_tables to the transaction commit moment.
If an error occurs, tables from statement are purged.
Therefore, we can't store te caches in TABLE. Ideally, it should be
handlerton, but we cut the corner and store it in THD in a list.
2020-11-26 12:08:58 +01:00
|
|
|
ALTER TABLE t1 ADD COLUMN a INT, LOCK=SHARED;
|
2009-03-23 15:22:31 +01:00
|
|
|
UPDATE t1 SET a=b;
|
|
|
|
|
|
|
|
--connection writer
|
2010-05-26 22:18:18 +02:00
|
|
|
PREPARE stmt FROM 'UPDATE t1, (SELECT 1 FROM t2 t1i) d SET a = 0 WHERE 1=0';
|
2009-03-23 15:22:31 +01:00
|
|
|
--send EXECUTE stmt
|
|
|
|
|
|
|
|
--connection locker
|
|
|
|
--error 0,ER_CANT_DROP_FIELD_OR_KEY
|
MDEV-16329 [5/5] ALTER ONLINE TABLE
* Log rows in online_alter_binlog.
* Table online data is replicated within dedicated binlog file
* Cached data is written on commit.
* Versioning is fully supported.
* Works both wit and without binlog enabled.
* For now savepoints setup is forbidden while ONLINE ALTER goes on.
Extra support is required. We can simply log the SAVEPOINT query events
and replicate them together with row events. But it's not implemented
for now.
* Cache flipping:
We want to care for the possible bottleneck in the online alter binlog
reading/writing in advance.
IO_CACHE does not provide anything better that sequential access,
besides, only a single write is mutex-protected, which is not suitable,
since we should write a transaction atomically.
To solve this, a special layer on top Event_log is implemented.
There are two IO_CACHE files underneath: one for reading, and one for
writing.
Once the read cache is empty, an exclusive lock is acquired (we can wait
for a currently active transaction finish writing), and flip() is emitted,
i.e. the write cache is reopened for read, and the read cache is emptied,
and reopened for writing.
This reminds a buffer flip that happens in accelerated graphics
(DirectX/OpenGL/etc).
Cache_flip_event_log is considered non-blocking for a single reader and a
single writer in this sense, with the only lock held by reader during flip.
An alternative approach by implementing a fair concurrent circular buffer
is described in MDEV-24676.
* Cache managers:
We have two cache sinks: statement and transactional.
It is important that the changes are first cached per-statement and
per-transaction.
If a statement fails, then only statement data is rolled back. The
transaction moves along, however.
Turns out, there's no guarantee that TABLE well persist in
thd->open_tables to the transaction commit moment.
If an error occurs, tables from statement are purged.
Therefore, we can't store te caches in TABLE. Ideally, it should be
handlerton, but we cut the corner and store it in THD in a list.
2020-11-26 12:08:58 +01:00
|
|
|
ALTER TABLE t1 DROP COLUMN a, LOCK=SHARED;
|
2009-03-23 15:22:31 +01:00
|
|
|
|
|
|
|
--connection writer
|
|
|
|
--error 0,ER_BAD_FIELD_ERROR # Unknown column 'a' in 'field list'
|
|
|
|
--reap
|
|
|
|
}
|
|
|
|
--enable_query_log
|
|
|
|
--connection default
|
MDEV-16329 [5/5] ALTER ONLINE TABLE
* Log rows in online_alter_binlog.
* Table online data is replicated within dedicated binlog file
* Cached data is written on commit.
* Versioning is fully supported.
* Works both wit and without binlog enabled.
* For now savepoints setup is forbidden while ONLINE ALTER goes on.
Extra support is required. We can simply log the SAVEPOINT query events
and replicate them together with row events. But it's not implemented
for now.
* Cache flipping:
We want to care for the possible bottleneck in the online alter binlog
reading/writing in advance.
IO_CACHE does not provide anything better that sequential access,
besides, only a single write is mutex-protected, which is not suitable,
since we should write a transaction atomically.
To solve this, a special layer on top Event_log is implemented.
There are two IO_CACHE files underneath: one for reading, and one for
writing.
Once the read cache is empty, an exclusive lock is acquired (we can wait
for a currently active transaction finish writing), and flip() is emitted,
i.e. the write cache is reopened for read, and the read cache is emptied,
and reopened for writing.
This reminds a buffer flip that happens in accelerated graphics
(DirectX/OpenGL/etc).
Cache_flip_event_log is considered non-blocking for a single reader and a
single writer in this sense, with the only lock held by reader during flip.
An alternative approach by implementing a fair concurrent circular buffer
is described in MDEV-24676.
* Cache managers:
We have two cache sinks: statement and transactional.
It is important that the changes are first cached per-statement and
per-transaction.
If a statement fails, then only statement data is rolled back. The
transaction moves along, however.
Turns out, there's no guarantee that TABLE well persist in
thd->open_tables to the transaction commit moment.
If an error occurs, tables from statement are purged.
Therefore, we can't store te caches in TABLE. Ideally, it should be
handlerton, but we cut the corner and store it in THD in a list.
2020-11-26 12:08:58 +01:00
|
|
|
ALTER TABLE t1 ADD COLUMN a INT, LOCK=SHARED;
|
2009-03-23 15:22:31 +01:00
|
|
|
|
|
|
|
--echo # 2. test UNIONs
|
|
|
|
--echo # 2.1. test altering of columns that multiupdate doesn't use
|
|
|
|
--echo # 2.1.1. normal mode
|
|
|
|
|
|
|
|
--disable_query_log
|
|
|
|
let $i = 100;
|
|
|
|
while ($i) {
|
|
|
|
--dec $i
|
|
|
|
|
|
|
|
--connection writer
|
|
|
|
send UPDATE t1, ((SELECT 1 FROM t1 t1i) UNION (SELECT 2 FROM t1 t1ii)) e SET a = 0 WHERE 1=0;
|
|
|
|
|
|
|
|
--connection locker
|
MDEV-16329 [5/5] ALTER ONLINE TABLE
* Log rows in online_alter_binlog.
* Table online data is replicated within dedicated binlog file
* Cached data is written on commit.
* Versioning is fully supported.
* Works both wit and without binlog enabled.
* For now savepoints setup is forbidden while ONLINE ALTER goes on.
Extra support is required. We can simply log the SAVEPOINT query events
and replicate them together with row events. But it's not implemented
for now.
* Cache flipping:
We want to care for the possible bottleneck in the online alter binlog
reading/writing in advance.
IO_CACHE does not provide anything better that sequential access,
besides, only a single write is mutex-protected, which is not suitable,
since we should write a transaction atomically.
To solve this, a special layer on top Event_log is implemented.
There are two IO_CACHE files underneath: one for reading, and one for
writing.
Once the read cache is empty, an exclusive lock is acquired (we can wait
for a currently active transaction finish writing), and flip() is emitted,
i.e. the write cache is reopened for read, and the read cache is emptied,
and reopened for writing.
This reminds a buffer flip that happens in accelerated graphics
(DirectX/OpenGL/etc).
Cache_flip_event_log is considered non-blocking for a single reader and a
single writer in this sense, with the only lock held by reader during flip.
An alternative approach by implementing a fair concurrent circular buffer
is described in MDEV-24676.
* Cache managers:
We have two cache sinks: statement and transactional.
It is important that the changes are first cached per-statement and
per-transaction.
If a statement fails, then only statement data is rolled back. The
transaction moves along, however.
Turns out, there's no guarantee that TABLE well persist in
thd->open_tables to the transaction commit moment.
If an error occurs, tables from statement are purged.
Therefore, we can't store te caches in TABLE. Ideally, it should be
handlerton, but we cut the corner and store it in THD in a list.
2020-11-26 12:08:58 +01:00
|
|
|
ALTER TABLE t1 ADD COLUMN (c INT), LOCK=SHARED;
|
|
|
|
ALTER TABLE t1 DROP COLUMN c, LOCK=SHARED;
|
2009-03-23 15:22:31 +01:00
|
|
|
|
|
|
|
--connection writer
|
|
|
|
--reap
|
|
|
|
}
|
|
|
|
|
|
|
|
--echo # 2.1.2. PS mode
|
|
|
|
|
|
|
|
--connection writer
|
|
|
|
PREPARE stmt FROM 'UPDATE t1, ((SELECT 1 FROM t1 t1i) UNION (SELECT 2 FROM t1 t1ii)) e SET a = 0 WHERE 1=0';
|
|
|
|
|
|
|
|
let $i = 100;
|
|
|
|
while ($i) {
|
|
|
|
--dec $i
|
|
|
|
|
|
|
|
--connection writer
|
|
|
|
--send EXECUTE stmt
|
|
|
|
|
|
|
|
--connection locker
|
MDEV-16329 [5/5] ALTER ONLINE TABLE
* Log rows in online_alter_binlog.
* Table online data is replicated within dedicated binlog file
* Cached data is written on commit.
* Versioning is fully supported.
* Works both wit and without binlog enabled.
* For now savepoints setup is forbidden while ONLINE ALTER goes on.
Extra support is required. We can simply log the SAVEPOINT query events
and replicate them together with row events. But it's not implemented
for now.
* Cache flipping:
We want to care for the possible bottleneck in the online alter binlog
reading/writing in advance.
IO_CACHE does not provide anything better that sequential access,
besides, only a single write is mutex-protected, which is not suitable,
since we should write a transaction atomically.
To solve this, a special layer on top Event_log is implemented.
There are two IO_CACHE files underneath: one for reading, and one for
writing.
Once the read cache is empty, an exclusive lock is acquired (we can wait
for a currently active transaction finish writing), and flip() is emitted,
i.e. the write cache is reopened for read, and the read cache is emptied,
and reopened for writing.
This reminds a buffer flip that happens in accelerated graphics
(DirectX/OpenGL/etc).
Cache_flip_event_log is considered non-blocking for a single reader and a
single writer in this sense, with the only lock held by reader during flip.
An alternative approach by implementing a fair concurrent circular buffer
is described in MDEV-24676.
* Cache managers:
We have two cache sinks: statement and transactional.
It is important that the changes are first cached per-statement and
per-transaction.
If a statement fails, then only statement data is rolled back. The
transaction moves along, however.
Turns out, there's no guarantee that TABLE well persist in
thd->open_tables to the transaction commit moment.
If an error occurs, tables from statement are purged.
Therefore, we can't store te caches in TABLE. Ideally, it should be
handlerton, but we cut the corner and store it in THD in a list.
2020-11-26 12:08:58 +01:00
|
|
|
ALTER TABLE t1 ADD COLUMN (c INT), LOCK=SHARED;
|
|
|
|
ALTER TABLE t1 DROP COLUMN c, LOCK=SHARED;
|
2009-03-23 15:22:31 +01:00
|
|
|
|
|
|
|
--connection writer
|
|
|
|
--reap
|
|
|
|
}
|
|
|
|
--enable_query_log
|
|
|
|
|
|
|
|
--echo # 2.2. test altering of columns that multiupdate uses
|
|
|
|
--echo # 2.2.1. normal mode
|
|
|
|
|
|
|
|
--connection default
|
|
|
|
|
|
|
|
--disable_query_log
|
|
|
|
let $i = 100;
|
|
|
|
while ($i) {
|
|
|
|
dec $i;
|
|
|
|
|
|
|
|
--connection locker
|
|
|
|
--error 0,ER_DUP_FIELDNAME
|
MDEV-16329 [5/5] ALTER ONLINE TABLE
* Log rows in online_alter_binlog.
* Table online data is replicated within dedicated binlog file
* Cached data is written on commit.
* Versioning is fully supported.
* Works both wit and without binlog enabled.
* For now savepoints setup is forbidden while ONLINE ALTER goes on.
Extra support is required. We can simply log the SAVEPOINT query events
and replicate them together with row events. But it's not implemented
for now.
* Cache flipping:
We want to care for the possible bottleneck in the online alter binlog
reading/writing in advance.
IO_CACHE does not provide anything better that sequential access,
besides, only a single write is mutex-protected, which is not suitable,
since we should write a transaction atomically.
To solve this, a special layer on top Event_log is implemented.
There are two IO_CACHE files underneath: one for reading, and one for
writing.
Once the read cache is empty, an exclusive lock is acquired (we can wait
for a currently active transaction finish writing), and flip() is emitted,
i.e. the write cache is reopened for read, and the read cache is emptied,
and reopened for writing.
This reminds a buffer flip that happens in accelerated graphics
(DirectX/OpenGL/etc).
Cache_flip_event_log is considered non-blocking for a single reader and a
single writer in this sense, with the only lock held by reader during flip.
An alternative approach by implementing a fair concurrent circular buffer
is described in MDEV-24676.
* Cache managers:
We have two cache sinks: statement and transactional.
It is important that the changes are first cached per-statement and
per-transaction.
If a statement fails, then only statement data is rolled back. The
transaction moves along, however.
Turns out, there's no guarantee that TABLE well persist in
thd->open_tables to the transaction commit moment.
If an error occurs, tables from statement are purged.
Therefore, we can't store te caches in TABLE. Ideally, it should be
handlerton, but we cut the corner and store it in THD in a list.
2020-11-26 12:08:58 +01:00
|
|
|
ALTER TABLE t1 ADD COLUMN a int(11) unsigned default NULL, LOCK=SHARED;
|
2009-03-23 15:22:31 +01:00
|
|
|
UPDATE t1 SET a=b;
|
|
|
|
|
|
|
|
--connection writer
|
|
|
|
--send UPDATE t1, ((SELECT 1 FROM t1 t1i) UNION (SELECT 2 FROM t1 t1ii)) e SET a = 0 WHERE 1=0;
|
|
|
|
|
|
|
|
--connection locker
|
|
|
|
--error 0,ER_CANT_DROP_FIELD_OR_KEY
|
MDEV-16329 [5/5] ALTER ONLINE TABLE
* Log rows in online_alter_binlog.
* Table online data is replicated within dedicated binlog file
* Cached data is written on commit.
* Versioning is fully supported.
* Works both wit and without binlog enabled.
* For now savepoints setup is forbidden while ONLINE ALTER goes on.
Extra support is required. We can simply log the SAVEPOINT query events
and replicate them together with row events. But it's not implemented
for now.
* Cache flipping:
We want to care for the possible bottleneck in the online alter binlog
reading/writing in advance.
IO_CACHE does not provide anything better that sequential access,
besides, only a single write is mutex-protected, which is not suitable,
since we should write a transaction atomically.
To solve this, a special layer on top Event_log is implemented.
There are two IO_CACHE files underneath: one for reading, and one for
writing.
Once the read cache is empty, an exclusive lock is acquired (we can wait
for a currently active transaction finish writing), and flip() is emitted,
i.e. the write cache is reopened for read, and the read cache is emptied,
and reopened for writing.
This reminds a buffer flip that happens in accelerated graphics
(DirectX/OpenGL/etc).
Cache_flip_event_log is considered non-blocking for a single reader and a
single writer in this sense, with the only lock held by reader during flip.
An alternative approach by implementing a fair concurrent circular buffer
is described in MDEV-24676.
* Cache managers:
We have two cache sinks: statement and transactional.
It is important that the changes are first cached per-statement and
per-transaction.
If a statement fails, then only statement data is rolled back. The
transaction moves along, however.
Turns out, there's no guarantee that TABLE well persist in
thd->open_tables to the transaction commit moment.
If an error occurs, tables from statement are purged.
Therefore, we can't store te caches in TABLE. Ideally, it should be
handlerton, but we cut the corner and store it in THD in a list.
2020-11-26 12:08:58 +01:00
|
|
|
ALTER TABLE t1 DROP COLUMN a, LOCK=SHARED;
|
2009-03-23 15:22:31 +01:00
|
|
|
|
|
|
|
--connection writer
|
|
|
|
--error 0,ER_BAD_FIELD_ERROR # Unknown column 'a' in 'field list'
|
|
|
|
--reap
|
|
|
|
}
|
|
|
|
--enable_query_log
|
|
|
|
|
|
|
|
--echo # 2.2.2. PS mode
|
|
|
|
|
|
|
|
--disable_query_log
|
|
|
|
let $i = 100;
|
|
|
|
while ($i) {
|
|
|
|
dec $i;
|
|
|
|
|
|
|
|
--connection locker
|
|
|
|
--error 0,ER_DUP_FIELDNAME
|
MDEV-16329 [5/5] ALTER ONLINE TABLE
* Log rows in online_alter_binlog.
* Table online data is replicated within dedicated binlog file
* Cached data is written on commit.
* Versioning is fully supported.
* Works both wit and without binlog enabled.
* For now savepoints setup is forbidden while ONLINE ALTER goes on.
Extra support is required. We can simply log the SAVEPOINT query events
and replicate them together with row events. But it's not implemented
for now.
* Cache flipping:
We want to care for the possible bottleneck in the online alter binlog
reading/writing in advance.
IO_CACHE does not provide anything better that sequential access,
besides, only a single write is mutex-protected, which is not suitable,
since we should write a transaction atomically.
To solve this, a special layer on top Event_log is implemented.
There are two IO_CACHE files underneath: one for reading, and one for
writing.
Once the read cache is empty, an exclusive lock is acquired (we can wait
for a currently active transaction finish writing), and flip() is emitted,
i.e. the write cache is reopened for read, and the read cache is emptied,
and reopened for writing.
This reminds a buffer flip that happens in accelerated graphics
(DirectX/OpenGL/etc).
Cache_flip_event_log is considered non-blocking for a single reader and a
single writer in this sense, with the only lock held by reader during flip.
An alternative approach by implementing a fair concurrent circular buffer
is described in MDEV-24676.
* Cache managers:
We have two cache sinks: statement and transactional.
It is important that the changes are first cached per-statement and
per-transaction.
If a statement fails, then only statement data is rolled back. The
transaction moves along, however.
Turns out, there's no guarantee that TABLE well persist in
thd->open_tables to the transaction commit moment.
If an error occurs, tables from statement are purged.
Therefore, we can't store te caches in TABLE. Ideally, it should be
handlerton, but we cut the corner and store it in THD in a list.
2020-11-26 12:08:58 +01:00
|
|
|
ALTER TABLE t1 ADD COLUMN a INT, LOCK=SHARED;
|
2009-03-23 15:22:31 +01:00
|
|
|
UPDATE t1 SET a=b;
|
|
|
|
|
|
|
|
--connection writer
|
|
|
|
PREPARE stmt FROM 'UPDATE t1, ((SELECT 1 FROM t1 t1i) UNION (SELECT 2 FROM t1 t1ii)) e SET a = 0 WHERE 1=0';
|
|
|
|
--send EXECUTE stmt
|
|
|
|
|
|
|
|
--connection locker
|
|
|
|
--error 0,ER_CANT_DROP_FIELD_OR_KEY
|
MDEV-16329 [5/5] ALTER ONLINE TABLE
* Log rows in online_alter_binlog.
* Table online data is replicated within dedicated binlog file
* Cached data is written on commit.
* Versioning is fully supported.
* Works both wit and without binlog enabled.
* For now savepoints setup is forbidden while ONLINE ALTER goes on.
Extra support is required. We can simply log the SAVEPOINT query events
and replicate them together with row events. But it's not implemented
for now.
* Cache flipping:
We want to care for the possible bottleneck in the online alter binlog
reading/writing in advance.
IO_CACHE does not provide anything better that sequential access,
besides, only a single write is mutex-protected, which is not suitable,
since we should write a transaction atomically.
To solve this, a special layer on top Event_log is implemented.
There are two IO_CACHE files underneath: one for reading, and one for
writing.
Once the read cache is empty, an exclusive lock is acquired (we can wait
for a currently active transaction finish writing), and flip() is emitted,
i.e. the write cache is reopened for read, and the read cache is emptied,
and reopened for writing.
This reminds a buffer flip that happens in accelerated graphics
(DirectX/OpenGL/etc).
Cache_flip_event_log is considered non-blocking for a single reader and a
single writer in this sense, with the only lock held by reader during flip.
An alternative approach by implementing a fair concurrent circular buffer
is described in MDEV-24676.
* Cache managers:
We have two cache sinks: statement and transactional.
It is important that the changes are first cached per-statement and
per-transaction.
If a statement fails, then only statement data is rolled back. The
transaction moves along, however.
Turns out, there's no guarantee that TABLE well persist in
thd->open_tables to the transaction commit moment.
If an error occurs, tables from statement are purged.
Therefore, we can't store te caches in TABLE. Ideally, it should be
handlerton, but we cut the corner and store it in THD in a list.
2020-11-26 12:08:58 +01:00
|
|
|
ALTER TABLE t1 DROP COLUMN a, LOCK=SHARED;
|
2009-03-23 15:22:31 +01:00
|
|
|
|
|
|
|
--connection writer
|
|
|
|
--error 0,ER_BAD_FIELD_ERROR # Unknown column 'a' in 'field list'
|
|
|
|
--reap
|
|
|
|
}
|
|
|
|
--enable_query_log
|
|
|
|
--connection default
|
2010-05-26 22:18:18 +02:00
|
|
|
DROP TABLE t1,t2;
|
2009-03-23 15:22:31 +01:00
|
|
|
|
|
|
|
|
|
|
|
# Close connections
|
|
|
|
--disconnect locker
|
|
|
|
--disconnect writer
|
|
|
|
|
2009-08-28 23:49:16 +02:00
|
|
|
SET @@global.sync_frm = @odl_sync_frm;
|
|
|
|
|
2009-03-23 15:22:31 +01:00
|
|
|
# End of 5.0 tests
|
|
|
|
|
|
|
|
# Wait till all disconnects are completed
|
|
|
|
--source include/wait_until_count_sessions.inc
|
|
|
|
|