mirror of
https://github.com/MariaDB/server.git
synced 2025-01-30 18:41:56 +01:00
2fcff8988a
command-line" and BUG#34062 "Maria table corruption on master". Use 5 bytes (instead of 4) to store page's number in the checkpoint record, to allow bigger table (1PB with maria-block-size=1kB). Help pushbuild not run out of memory by moving the portion of maria-recovery.test which generates lots of data into a -big.test. mysql-test/r/maria-recovery.result: result moved mysql-test/t/maria-recovery.test: piece which generates much data moved to maria-recovery-big.test mysys/my_pread.c: To fix BUG#34062, where a 1.1TB file was generated due to a wrong pwrite offset, it was useful to not lose precision on 'offset' in DBUG_PRINT, so that the crazy value is visible. mysys/my_read.c: To fix BUG#34062, where a 1.1TB file was generated due to a wrong pwrite offset, it was useful to not lose precision on 'offset' in DBUG_PRINT, so that the crazy value is visible. mysys/my_write.c: To fix BUG#34062, where a 1.1TB file was generated due to a wrong pwrite offset, it was useful to not lose precision on 'offset' in DBUG_PRINT, so that the crazy value is visible. storage/maria/ha_maria.cc: When starting a bulk insert, we throw away dirty index pages from the cache. Unique (non disabled) key insertions thus read out-of-date pages from the disk leading to BUG#34062 "Maria table corruption on master": a DELETE in procedure viewer_sp() had deleted all rows of viewer_tbl2 one by one, putting index page 1 into key_del; that page was thrown away at start of INSERT SELECT, then the INSERT SELECT needed a page to insert keys, looked at key_del, found 1, read page 1 from disk, and its out-of-date content was used to set the new value of key_del (crazy value of 1TB), then a later insertion needed another index page, tried to read page at this crazy offset and failed, leading to corruption mark. The fix is to destroy out-of-date pages and make the state consistent with that, i.e. call maria_delete_all_rows(). storage/maria/ma_blockrec.c: Special hook for UNDO_BULK_INSERT storage/maria/ma_blockrec.h: special hook for UNDO_BULK_INSERT storage/maria/ma_check.c: Fix for BUG#34114 "maria_chk reports false error when several tables on command-line": if the Nth (on the command line) table was BLOCK_RECORD it would start checks by using the param->record_checksum computed by checks of table N-1. storage/maria/ma_delete_all.c: comment storage/maria/ma_loghandler.c: special hook for UNDO_BULK_INSERT storage/maria/ma_page.c: comment storage/maria/ma_pagecache.c: page number is 5 bytes in checkpoint record now (allows bigger tables) storage/maria/ma_recovery.c: page number is 5 bytes in checkpoint record now storage/maria/ma_recovery_util.c: page number is 5 bytes now storage/maria/ma_write.c: typo mysql-test/r/maria-recovery-big.result: result is correct mysql-test/t/maria-recovery-big-master.opt: usual options for recovery tests mysql-test/t/maria-recovery-big.test: Moving out the big blob test to a -big test (it exhausts memory when using /dev/shm on certain machines)
68 lines
2 KiB
Text
68 lines
2 KiB
Text
# Maria recovery test which cannot run in shared memory
|
|
# because it generates too much data, or which takes a lot of time.
|
|
|
|
--source include/not_embedded.inc
|
|
# Don't test this under valgrind, memory leaks will occur as we crash
|
|
--source include/not_valgrind.inc
|
|
# Binary must be compiled with debug for crash to occur
|
|
--source include/have_debug.inc
|
|
--source include/have_maria.inc
|
|
|
|
set global maria_log_file_size=4294967295;
|
|
|
|
--disable_warnings
|
|
drop database if exists mysqltest;
|
|
--enable_warnings
|
|
create database mysqltest;
|
|
|
|
# Include scripts can perform SQL. For it to not influence the main test
|
|
# they use a separate connection. This way if they use a DDL it would
|
|
# not autocommit in the main test.
|
|
connect (admin, 127.0.0.1, root,,mysqltest,,);
|
|
--enable_reconnect
|
|
|
|
connection default;
|
|
use mysqltest;
|
|
--enable_reconnect
|
|
|
|
#
|
|
# Test with big blobs
|
|
#
|
|
|
|
--echo * TEST of recovery with blobs
|
|
-- source include/maria_empty_logs.inc
|
|
set @@max_allowed_packet=32000000;
|
|
create table t1 (a int, b longtext) engine=maria table_checksum=1;
|
|
let $mms_tables=1;
|
|
-- source include/maria_make_snapshot_for_feeding_recovery.inc
|
|
insert into t1 values (1,"123456789012345678901234567890"),(2,"09876543210987654321");
|
|
-- source include/maria_make_snapshot_for_comparison.inc
|
|
lock table t1 write;
|
|
let $loop=20;
|
|
while ($loop)
|
|
{
|
|
update t1 set b=CONCAT(b,b);
|
|
dec $loop;
|
|
}
|
|
select a,length(b) from t1;
|
|
let $loop=22;
|
|
while ($loop)
|
|
{
|
|
update t1 set b=mid(b,1,length(b)/2);
|
|
dec $loop;
|
|
}
|
|
select a,length(b) from t1;
|
|
# we want recovery to run on the first snapshot made above
|
|
let $mvr_restore_old_snapshot=1;
|
|
let $mms_compare_physically=0;
|
|
let $mvr_debug_option="+d,maria_flush_whole_log,maria_crash";
|
|
let $mvr_crash_statement= set global maria_checkpoint_interval=1;
|
|
-- source include/maria_verify_recovery.inc
|
|
drop table t1;
|
|
|
|
# clean up everything
|
|
let $mms_purpose=feeding_recovery;
|
|
eval drop database mysqltest_for_$mms_purpose;
|
|
let $mms_purpose=comparison;
|
|
eval drop database mysqltest_for_$mms_purpose;
|
|
drop database mysqltest;
|