mirror of
https://github.com/MariaDB/server.git
synced 2025-01-31 02:51:44 +01:00
handle cardinality with alter add drop
This commit is contained in:
parent
6646250537
commit
38cd1a6cb5
2 changed files with 12 additions and 8 deletions
|
@ -22,6 +22,7 @@ public:
|
|||
alter_txn(NULL),
|
||||
add_index_changed(false),
|
||||
drop_index_changed(false),
|
||||
reset_card(false),
|
||||
compression_changed(false),
|
||||
expand_varchar_update_needed(false),
|
||||
expand_fixed_update_needed(false),
|
||||
|
@ -38,6 +39,7 @@ public:
|
|||
bool add_index_changed;
|
||||
bool incremented_num_DBs, modified_DBs;
|
||||
bool drop_index_changed;
|
||||
bool reset_card;
|
||||
bool compression_changed;
|
||||
enum toku_compression_method orig_compression_method;
|
||||
bool expand_varchar_update_needed;
|
||||
|
@ -366,7 +368,6 @@ bool ha_tokudb::inplace_alter_table(TABLE *altered_table, Alter_inplace_info *ha
|
|||
}
|
||||
if (error == 0 && (ctx->handler_flags & Alter_inplace_info::CHANGE_CREATE_OPTION) && (create_info->used_fields & HA_CREATE_USED_ROW_FORMAT)) {
|
||||
// Get the current compression
|
||||
tokudb_alter_ctx *ctx = static_cast<tokudb_alter_ctx *>(ha_alter_info->handler_ctx);
|
||||
DB *db = share->key_file[0];
|
||||
error = db->get_compression_method(db, &ctx->orig_compression_method);
|
||||
assert(error == 0);
|
||||
|
@ -388,6 +389,9 @@ bool ha_tokudb::inplace_alter_table(TABLE *altered_table, Alter_inplace_info *ha
|
|||
if (error == 0 && ctx->expand_fixed_update_needed)
|
||||
error = alter_table_expand_columns(altered_table, ha_alter_info);
|
||||
|
||||
if (error == 0 && ctx->reset_card)
|
||||
tokudb::set_card_from_status(share->status_block, ctx->alter_txn, table->s, altered_table->s);
|
||||
|
||||
bool result = false; // success
|
||||
if (error) {
|
||||
print_error(error, MYF(0));
|
||||
|
@ -422,7 +426,7 @@ int ha_tokudb::alter_table_add_index(TABLE *altered_table, Alter_inplace_info *h
|
|||
my_free(key_info);
|
||||
|
||||
if (error == 0)
|
||||
tokudb::set_card_from_status(share->status_block, ctx->alter_txn, table->s, altered_table->s);
|
||||
ctx->reset_card = true;
|
||||
|
||||
return error;
|
||||
}
|
||||
|
@ -469,7 +473,7 @@ int ha_tokudb::alter_table_drop_index(TABLE *altered_table, Alter_inplace_info *
|
|||
int error = drop_indexes(table, index_drop_offsets, ha_alter_info->index_drop_count, key_info, ctx->alter_txn);
|
||||
|
||||
if (error == 0)
|
||||
tokudb::set_card_from_status(share->status_block, ctx->alter_txn, table->s, altered_table->s);
|
||||
ctx->reset_card = true;
|
||||
|
||||
return error;
|
||||
}
|
||||
|
|
|
@ -76,25 +76,24 @@ namespace tokudb {
|
|||
return false;
|
||||
}
|
||||
|
||||
// Altered table cardinality = select cardinality data from current table cardinality for keys that exist
|
||||
// in the altered table and the current table.
|
||||
void set_card_from_status(DB *status_db, DB_TXN *txn, TABLE_SHARE *table_share, TABLE_SHARE *altered_table_share) {
|
||||
int error;
|
||||
|
||||
// read existing cardinality data from status
|
||||
uint64_t rec_per_key[table_share->key_parts];
|
||||
error = get_card_from_status(status_db, txn, table_share->key_parts, rec_per_key);
|
||||
|
||||
// set altered records per key to unknown
|
||||
uint64_t altered_rec_per_key[altered_table_share->key_parts];
|
||||
for (uint i = 0; i < altered_table_share->key_parts; i++)
|
||||
altered_rec_per_key[i] = 0;
|
||||
|
||||
// compute the beginning of the key offsets
|
||||
// compute the beginning of the key offsets in the original table
|
||||
uint orig_key_offset[table_share->keys];
|
||||
uint orig_key_parts = 0;
|
||||
for (uint i = 0; i < table_share->keys; i++) {
|
||||
orig_key_offset[i] = orig_key_parts;
|
||||
orig_key_parts += table_share->key_info[i].key_parts;
|
||||
}
|
||||
|
||||
// if orig card data exists, then use it to compute new card data
|
||||
if (error == 0) {
|
||||
uint key_parts = 0;
|
||||
|
@ -123,6 +122,7 @@ namespace tokudb {
|
|||
uint64_t rows = 0;
|
||||
uint64_t unique_rows[num_key_parts];
|
||||
if (is_unique && num_key_parts == 1) {
|
||||
// dont compute for unique keys with a single part. we already know the answer.
|
||||
rows = unique_rows[0] = 1;
|
||||
} else {
|
||||
DBC *cursor = NULL;
|
||||
|
|
Loading…
Add table
Reference in a new issue