mirror of
https://github.com/MariaDB/server.git
synced 2025-01-17 04:22:27 +01:00
BUG#11751793 - 42784: ARCHIVE TABLES CAUSE 100% CPU USAGE
AND HANG IN SHOW TABLE STATUS. ISSUE: Table corruption due to concurrent queries. Different threads running insert and check query leads to table corruption. Not properly locked, rows are inserted in between check query. SOLUTION: In check query mutex lock is acquired for a longer time to handle concurrent insert and check query. NOTE: Additionally we backported the fix for CHECKSUM issue(bug#11758979).
This commit is contained in:
parent
82a5902f07
commit
75c08c7935
3 changed files with 72 additions and 9 deletions
|
@ -12772,3 +12772,22 @@ a b c d e f
|
|||
-1 b c d e 1
|
||||
DROP TABLE t1;
|
||||
SET sort_buffer_size=DEFAULT;
|
||||
#
|
||||
# BUG#11758979 - 51252: ARCHIVE TABLES CAUSE 100% CPU USAGE
|
||||
# AND HANG IN SHOW TABLE STATUS
|
||||
# (to be executed with valgrind)
|
||||
CREATE TABLE t1(a BLOB, b VARCHAR(200)) ENGINE=ARCHIVE;
|
||||
INSERT INTO t1 VALUES(NULL, '');
|
||||
FLUSH TABLE t1;
|
||||
# we need this select to workaround BUG#11764364
|
||||
SELECT * FROM t1;
|
||||
a b
|
||||
NULL
|
||||
CHECKSUM TABLE t1 EXTENDED;
|
||||
Table Checksum
|
||||
test.t1 286155052
|
||||
FLUSH TABLE t1;
|
||||
OPTIMIZE TABLE t1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 optimize status OK
|
||||
DROP TABLE t1;
|
||||
|
|
|
@ -1693,3 +1693,17 @@ INSERT INTO t1 SELECT t1.* FROM t1,t1 t2,t1 t3,t1 t4,t1 t5,t1 t6;
|
|||
SELECT * FROM t1 ORDER BY f LIMIT 1;
|
||||
DROP TABLE t1;
|
||||
SET sort_buffer_size=DEFAULT;
|
||||
|
||||
--echo #
|
||||
--echo # BUG#11758979 - 51252: ARCHIVE TABLES CAUSE 100% CPU USAGE
|
||||
--echo # AND HANG IN SHOW TABLE STATUS
|
||||
--echo # (to be executed with valgrind)
|
||||
CREATE TABLE t1(a BLOB, b VARCHAR(200)) ENGINE=ARCHIVE;
|
||||
INSERT INTO t1 VALUES(NULL, '');
|
||||
FLUSH TABLE t1;
|
||||
--echo # we need this select to workaround BUG#11764364
|
||||
SELECT * FROM t1;
|
||||
CHECKSUM TABLE t1 EXTENDED;
|
||||
FLUSH TABLE t1;
|
||||
OPTIMIZE TABLE t1;
|
||||
DROP TABLE t1;
|
||||
|
|
|
@ -760,6 +760,7 @@ uint32 ha_archive::max_row_length(const uchar *buf)
|
|||
ptr != end ;
|
||||
ptr++)
|
||||
{
|
||||
if (!table->field[*ptr]->is_null())
|
||||
length += 2 + ((Field_blob*)table->field[*ptr])->get_length();
|
||||
}
|
||||
|
||||
|
@ -1110,6 +1111,17 @@ int ha_archive::unpack_row(azio_stream *file_to_read, uchar *record)
|
|||
|
||||
/* Copy null bits */
|
||||
const uchar *ptr= record_buffer->buffer;
|
||||
/*
|
||||
Field::unpack() is not called when field is NULL. For VARCHAR
|
||||
Field::unpack() only unpacks as much bytes as occupied by field
|
||||
value. In these cases respective memory area on record buffer is
|
||||
not initialized.
|
||||
|
||||
These uninitialized areas may be accessed by CHECKSUM TABLE or
|
||||
by optimizer using temporary table (BUG#12997905). We may remove
|
||||
this memset() when they're fixed.
|
||||
*/
|
||||
memset(record, 0, table->s->reclength);
|
||||
memcpy(record, ptr, table->s->null_bytes);
|
||||
ptr+= table->s->null_bytes;
|
||||
for (Field **field=table->field ; *field ; field++)
|
||||
|
@ -1578,13 +1590,15 @@ int ha_archive::check(THD* thd, HA_CHECK_OPT* check_opt)
|
|||
{
|
||||
int rc= 0;
|
||||
const char *old_proc_info;
|
||||
ha_rows count= share->rows_recorded;
|
||||
ha_rows count;
|
||||
DBUG_ENTER("ha_archive::check");
|
||||
|
||||
old_proc_info= thd_proc_info(thd, "Checking table");
|
||||
/* Flush any waiting data */
|
||||
pthread_mutex_lock(&share->mutex);
|
||||
azflush(&(share->archive_write), Z_SYNC_FLUSH);
|
||||
count= share->rows_recorded;
|
||||
/* Flush any waiting data */
|
||||
if (share->archive_write_open)
|
||||
azflush(&(share->archive_write), Z_SYNC_FLUSH);
|
||||
pthread_mutex_unlock(&share->mutex);
|
||||
|
||||
if (init_archive_reader())
|
||||
|
@ -1594,18 +1608,34 @@ int ha_archive::check(THD* thd, HA_CHECK_OPT* check_opt)
|
|||
start of the file.
|
||||
*/
|
||||
read_data_header(&archive);
|
||||
for (ha_rows cur_count= count; cur_count; cur_count--)
|
||||
{
|
||||
if ((rc= get_row(&archive, table->record[0])))
|
||||
goto error;
|
||||
}
|
||||
/*
|
||||
Now read records that may have been inserted concurrently.
|
||||
Acquire share->mutex so tail of the table is not modified by
|
||||
concurrent writers.
|
||||
*/
|
||||
pthread_mutex_lock(&share->mutex);
|
||||
count= share->rows_recorded - count;
|
||||
if (share->archive_write_open)
|
||||
azflush(&(share->archive_write), Z_SYNC_FLUSH);
|
||||
while (!(rc= get_row(&archive, table->record[0])))
|
||||
count--;
|
||||
|
||||
thd_proc_info(thd, old_proc_info);
|
||||
pthread_mutex_unlock(&share->mutex);
|
||||
|
||||
if ((rc && rc != HA_ERR_END_OF_FILE) || count)
|
||||
{
|
||||
share->crashed= FALSE;
|
||||
DBUG_RETURN(HA_ADMIN_CORRUPT);
|
||||
}
|
||||
goto error;
|
||||
|
||||
thd_proc_info(thd, old_proc_info);
|
||||
DBUG_RETURN(HA_ADMIN_OK);
|
||||
|
||||
error:
|
||||
thd_proc_info(thd, old_proc_info);
|
||||
share->crashed= FALSE;
|
||||
DBUG_RETURN(HA_ADMIN_CORRUPT);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in a new issue