mirror of
https://github.com/MariaDB/server.git
synced 2025-03-29 18:35:35 +01:00
MDEV-16429: Assertion `!table || (!table->read_set || bitmap_is_set(table->read_set, field_index))' fails upon attempt to update virtual column on partitioned versioned table
When using buffered sort in `UPDATE`, keyread is used. In this case, `TABLE::update_virtual_field` should be aborted, but it actually isn't, because it is called not with a top-level handler, but with the one that is actually going to access the disk. Here the problemm is issued with partitioning, so the solution is to recursively mark for keyread all the underlying partition handlers. * ha_partition: update keyread state for child partitions Closes #800
This commit is contained in:
parent
8893d199ef
commit
c16a54c02e
5 changed files with 106 additions and 42 deletions
|
@ -1866,3 +1866,12 @@ pUpTo10 p-10sp1 This is a long comment (2050 ascii characters) 50 pUpTo10 part
|
|||
pMax pMaxsp0 This is a long comment (2050 ascii characters) 50 pMax partition comment .80-!
|
||||
pMax pMaxsp1 This is a long comment (2050 ascii characters) 50 pMax partition comment .80-!
|
||||
DROP TABLE t1;
|
||||
CREATE OR REPLACE TABLE t1 (
|
||||
pk INT PRIMARY KEY,
|
||||
c CHAR(3) NOT NULL,
|
||||
v CHAR(4) AS (c) VIRTUAL
|
||||
) WITH SYSTEM VERSIONING PARTITION BY HASH(pk);
|
||||
INSERT INTO t1 (pk,c) VALUES (1,'foo'),(2,'bar');
|
||||
UPDATE t1 SET v = 'qux' WHERE pk = 2;
|
||||
ERROR HY000: The value specified for generated column 'v' in table 't1' ignored
|
||||
DROP TABLE t1;
|
||||
|
|
|
@ -2074,3 +2074,21 @@ SELECT PARTITION_NAME, SUBPARTITION_NAME, PARTITION_COMMENT FROM INFORMATION_SCH
|
|||
WHERE TABLE_NAME = 't1' AND TABLE_SCHEMA = 'test';
|
||||
|
||||
DROP TABLE t1;
|
||||
|
||||
#
|
||||
# MDEV-16429
|
||||
# Assertion `!table || (!table->read_set
|
||||
# || bitmap_is_set(table->read_set, field_index))'
|
||||
# fails upon attempt to update virtual column on partitioned versioned table
|
||||
#
|
||||
CREATE OR REPLACE TABLE t1 (
|
||||
pk INT PRIMARY KEY,
|
||||
c CHAR(3) NOT NULL,
|
||||
v CHAR(4) AS (c) VIRTUAL
|
||||
) WITH SYSTEM VERSIONING PARTITION BY HASH(pk);
|
||||
|
||||
INSERT INTO t1 (pk,c) VALUES (1,'foo'),(2,'bar');
|
||||
-- error ER_WARNING_NON_DEFAULT_VALUE_FOR_GENERATED_COLUMN
|
||||
UPDATE t1 SET v = 'qux' WHERE pk = 2;
|
||||
|
||||
DROP TABLE t1;
|
||||
|
|
|
@ -8471,6 +8471,24 @@ err_handler:
|
|||
}
|
||||
|
||||
|
||||
static int extra_cb(handler *h, void *operation)
|
||||
{
|
||||
return h->extra(*(enum ha_extra_function*)operation);
|
||||
}
|
||||
|
||||
|
||||
static int start_keyread_cb(handler* h, void *p)
|
||||
{
|
||||
return h->ha_start_keyread(*(uint*)p);
|
||||
}
|
||||
|
||||
|
||||
static int end_keyread_cb(handler* h, void *unused)
|
||||
{
|
||||
return h->ha_end_keyread();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
General function to prepare handler for certain behavior.
|
||||
|
||||
|
@ -8799,11 +8817,12 @@ int ha_partition::extra(enum ha_extra_function operation)
|
|||
|
||||
switch (operation) {
|
||||
/* Category 1), used by most handlers */
|
||||
case HA_EXTRA_KEYREAD:
|
||||
case HA_EXTRA_NO_KEYREAD:
|
||||
DBUG_RETURN(loop_partitions(end_keyread_cb, NULL));
|
||||
case HA_EXTRA_KEYREAD:
|
||||
case HA_EXTRA_FLUSH:
|
||||
case HA_EXTRA_PREPARE_FOR_FORCED_CLOSE:
|
||||
DBUG_RETURN(loop_extra(operation));
|
||||
DBUG_RETURN(loop_partitions(extra_cb, &operation));
|
||||
case HA_EXTRA_PREPARE_FOR_RENAME:
|
||||
case HA_EXTRA_FORCE_REOPEN:
|
||||
DBUG_RETURN(loop_extra_alter(operation));
|
||||
|
@ -8815,7 +8834,7 @@ int ha_partition::extra(enum ha_extra_function operation)
|
|||
case HA_EXTRA_KEYREAD_PRESERVE_FIELDS:
|
||||
{
|
||||
if (!m_myisam)
|
||||
DBUG_RETURN(loop_extra(operation));
|
||||
DBUG_RETURN(loop_partitions(extra_cb, &operation));
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -8842,7 +8861,7 @@ int ha_partition::extra(enum ha_extra_function operation)
|
|||
case HA_EXTRA_REMEMBER_POS:
|
||||
case HA_EXTRA_RESTORE_POS:
|
||||
{
|
||||
DBUG_RETURN(loop_extra(operation));
|
||||
DBUG_RETURN(loop_partitions(extra_cb, &operation));
|
||||
}
|
||||
case HA_EXTRA_NO_READCHECK:
|
||||
{
|
||||
|
@ -8874,7 +8893,7 @@ int ha_partition::extra(enum ha_extra_function operation)
|
|||
m_extra_cache_size= 0;
|
||||
m_extra_prepare_for_update= FALSE;
|
||||
m_extra_cache_part_id= NO_CURRENT_PART_ID;
|
||||
DBUG_RETURN(loop_extra(operation));
|
||||
DBUG_RETURN(loop_partitions(extra_cb, &operation));
|
||||
}
|
||||
case HA_EXTRA_IGNORE_NO_KEY:
|
||||
case HA_EXTRA_NO_IGNORE_NO_KEY:
|
||||
|
@ -8895,11 +8914,11 @@ int ha_partition::extra(enum ha_extra_function operation)
|
|||
|
||||
At this time, this is safe by limitation of ha_partition
|
||||
*/
|
||||
DBUG_RETURN(loop_extra(operation));
|
||||
DBUG_RETURN(loop_partitions(extra_cb, &operation));
|
||||
}
|
||||
/* Category 7), used by federated handlers */
|
||||
case HA_EXTRA_INSERT_WITH_UPDATE:
|
||||
DBUG_RETURN(loop_extra(operation));
|
||||
DBUG_RETURN(loop_partitions(extra_cb, &operation));
|
||||
/* Category 8) Operations only used by NDB */
|
||||
case HA_EXTRA_DELETE_CANNOT_BATCH:
|
||||
case HA_EXTRA_UPDATE_CANNOT_BATCH:
|
||||
|
@ -8909,13 +8928,13 @@ int ha_partition::extra(enum ha_extra_function operation)
|
|||
}
|
||||
/* Category 9) Operations only used by MERGE */
|
||||
case HA_EXTRA_ADD_CHILDREN_LIST:
|
||||
DBUG_RETURN(loop_extra(operation));
|
||||
DBUG_RETURN(loop_partitions(extra_cb, &operation));
|
||||
case HA_EXTRA_ATTACH_CHILDREN:
|
||||
{
|
||||
int result;
|
||||
uint num_locks;
|
||||
handler **file;
|
||||
if ((result= loop_extra(operation)))
|
||||
if ((result= loop_partitions(extra_cb, &operation)))
|
||||
DBUG_RETURN(result);
|
||||
|
||||
/* Recalculate lock count as each child may have different set of locks */
|
||||
|
@ -8930,9 +8949,9 @@ int ha_partition::extra(enum ha_extra_function operation)
|
|||
break;
|
||||
}
|
||||
case HA_EXTRA_IS_ATTACHED_CHILDREN:
|
||||
DBUG_RETURN(loop_extra(operation));
|
||||
DBUG_RETURN(loop_partitions(extra_cb, &operation));
|
||||
case HA_EXTRA_DETACH_CHILDREN:
|
||||
DBUG_RETURN(loop_extra(operation));
|
||||
DBUG_RETURN(loop_partitions(extra_cb, &operation));
|
||||
case HA_EXTRA_MARK_AS_LOG_TABLE:
|
||||
/*
|
||||
http://dev.mysql.com/doc/refman/5.1/en/partitioning-limitations.html
|
||||
|
@ -8944,7 +8963,7 @@ int ha_partition::extra(enum ha_extra_function operation)
|
|||
case HA_EXTRA_BEGIN_ALTER_COPY:
|
||||
case HA_EXTRA_END_ALTER_COPY:
|
||||
case HA_EXTRA_FAKE_START_STMT:
|
||||
DBUG_RETURN(loop_extra(operation));
|
||||
DBUG_RETURN(loop_partitions(extra_cb, &operation));
|
||||
default:
|
||||
{
|
||||
/* Temporary crash to discover what is wrong */
|
||||
|
@ -8952,7 +8971,7 @@ int ha_partition::extra(enum ha_extra_function operation)
|
|||
break;
|
||||
}
|
||||
}
|
||||
DBUG_RETURN(0);
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
|
||||
|
||||
|
@ -8990,25 +9009,41 @@ int ha_partition::reset(void)
|
|||
}
|
||||
|
||||
/*
|
||||
Special extra method for HA_EXTRA_CACHE with cachesize as extra parameter
|
||||
Special extra method with additional parameter
|
||||
See @ref ha_partition::extra
|
||||
|
||||
SYNOPSIS
|
||||
extra_opt()
|
||||
operation Must be HA_EXTRA_CACHE
|
||||
cachesize Size of cache in full table scan
|
||||
@param[in] operation operation to execute
|
||||
@param[in] arg extra argument
|
||||
|
||||
RETURN VALUE
|
||||
>0 Error code
|
||||
0 Success
|
||||
@return status
|
||||
@retval 0 success
|
||||
@retval >0 error code
|
||||
|
||||
@detail
|
||||
Operations supported by extra_opt:
|
||||
HA_EXTRA_KEYREAD:
|
||||
arg is interpreted as key index
|
||||
HA_EXTRA_CACHE:
|
||||
arg is interpreted as size of cache in full table scan
|
||||
|
||||
For detailed description refer to @ref ha_partition::extra
|
||||
*/
|
||||
|
||||
int ha_partition::extra_opt(enum ha_extra_function operation, ulong cachesize)
|
||||
int ha_partition::extra_opt(enum ha_extra_function operation, ulong arg)
|
||||
{
|
||||
DBUG_ENTER("ha_partition::extra_opt()");
|
||||
DBUG_ENTER("ha_partition::extra_opt");
|
||||
|
||||
DBUG_ASSERT(HA_EXTRA_CACHE == operation);
|
||||
prepare_extra_cache(cachesize);
|
||||
DBUG_RETURN(0);
|
||||
switch (operation)
|
||||
{
|
||||
case HA_EXTRA_KEYREAD:
|
||||
DBUG_RETURN(loop_partitions(start_keyread_cb, &arg));
|
||||
case HA_EXTRA_CACHE:
|
||||
prepare_extra_cache(arg);
|
||||
DBUG_RETURN(0);
|
||||
default:
|
||||
DBUG_ASSERT(0);
|
||||
}
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
|
||||
|
||||
|
@ -9071,28 +9106,28 @@ int ha_partition::loop_extra_alter(enum ha_extra_function operation)
|
|||
if ((tmp= (*file)->extra(operation)))
|
||||
result= tmp;
|
||||
}
|
||||
if ((tmp= loop_extra(operation)))
|
||||
if ((tmp= loop_partitions(extra_cb, &operation)))
|
||||
result= tmp;
|
||||
DBUG_RETURN(result);
|
||||
}
|
||||
|
||||
/*
|
||||
Call extra on all partitions
|
||||
|
||||
SYNOPSIS
|
||||
loop_extra()
|
||||
operation extra operation type
|
||||
/**
|
||||
Call callback(part, param) on all partitions
|
||||
|
||||
RETURN VALUE
|
||||
>0 Error code
|
||||
0 Success
|
||||
@param callback a callback to call for each partition
|
||||
@param param a void*-parameter passed to callback
|
||||
|
||||
@return Operation status
|
||||
@retval >0 Error code
|
||||
@retval 0 Success
|
||||
*/
|
||||
|
||||
int ha_partition::loop_extra(enum ha_extra_function operation)
|
||||
int ha_partition::loop_partitions(handler_callback callback, void *param)
|
||||
{
|
||||
int result= 0, tmp;
|
||||
uint i;
|
||||
DBUG_ENTER("ha_partition::loop_extra()");
|
||||
DBUG_ENTER("ha_partition::loop_partitions");
|
||||
|
||||
for (i= bitmap_get_first_set(&m_part_info->lock_partitions);
|
||||
i < m_tot_parts;
|
||||
|
@ -9103,7 +9138,7 @@ int ha_partition::loop_extra(enum ha_extra_function operation)
|
|||
In this case calling 'extra' can crash.
|
||||
*/
|
||||
if (bitmap_is_set(&m_opened_partitions, i) &&
|
||||
(tmp= m_file[i]->extra(operation)))
|
||||
(tmp= callback(m_file[i], param)))
|
||||
result= tmp;
|
||||
}
|
||||
/* Add all used partitions to be called in reset(). */
|
||||
|
|
|
@ -844,7 +844,7 @@ public:
|
|||
int change_partitions_to_open(List<String> *partition_names);
|
||||
int open_read_partitions(char *name_buff, size_t name_buff_size);
|
||||
virtual int extra(enum ha_extra_function operation);
|
||||
virtual int extra_opt(enum ha_extra_function operation, ulong cachesize);
|
||||
virtual int extra_opt(enum ha_extra_function operation, ulong arg);
|
||||
virtual int reset(void);
|
||||
virtual uint count_query_cache_dependant_tables(uint8 *tables_type);
|
||||
virtual my_bool
|
||||
|
@ -854,6 +854,8 @@ public:
|
|||
uint *n);
|
||||
|
||||
private:
|
||||
typedef int handler_callback(handler *, void *);
|
||||
|
||||
my_bool reg_query_cache_dependant_table(THD *thd,
|
||||
char *engine_key,
|
||||
uint engine_key_len,
|
||||
|
@ -864,7 +866,7 @@ private:
|
|||
**block_table,
|
||||
handler *file, uint *n);
|
||||
static const uint NO_CURRENT_PART_ID= NOT_A_PARTITION_ID;
|
||||
int loop_extra(enum ha_extra_function operation);
|
||||
int loop_partitions(handler_callback callback, void *param);
|
||||
int loop_extra_alter(enum ha_extra_function operations);
|
||||
void late_extra_cache(uint partition_id);
|
||||
void late_extra_no_cache(uint partition_id);
|
||||
|
|
|
@ -3097,7 +3097,7 @@ public:
|
|||
bool keyread_enabled() { return keyread < MAX_KEY; }
|
||||
int ha_start_keyread(uint idx)
|
||||
{
|
||||
int res= keyread_enabled() ? 0 : extra(HA_EXTRA_KEYREAD);
|
||||
int res= keyread_enabled() ? 0 : extra_opt(HA_EXTRA_KEYREAD, idx);
|
||||
keyread= idx;
|
||||
return res;
|
||||
}
|
||||
|
@ -3609,7 +3609,7 @@ public:
|
|||
{ return 0; }
|
||||
virtual int extra(enum ha_extra_function operation)
|
||||
{ return 0; }
|
||||
virtual int extra_opt(enum ha_extra_function operation, ulong cache_size)
|
||||
virtual int extra_opt(enum ha_extra_function operation, ulong arg)
|
||||
{ return extra(operation); }
|
||||
|
||||
/**
|
||||
|
|
Loading…
Add table
Reference in a new issue