From 076550285b4939d3620e9f185f1925882fcd872b Mon Sep 17 00:00:00 2001 From: marko <> Date: Wed, 13 Jan 2010 20:01:10 +0000 Subject: [PATCH] branches/innodb+: Merge revisions 6364:6447 from branches/zip: ------------------------------------------------------------------------ r6367 | marko | 2009-12-28 15:39:19 +0200 (Mon, 28 Dec 2009) | 2 lines Changed paths: M /branches/zip/dict/dict0dict.c branches/zip: dict_index_add_to_cache(): Always free the index object, also when returning DB_CORRUPTION. ------------------------------------------------------------------------ r6425 | marko | 2010-01-12 13:47:11 +0200 (Tue, 12 Jan 2010) | 45 lines Changed paths: M /branches/zip/ChangeLog M /branches/zip/handler/ha_innodb.cc M /branches/zip/handler/ha_innodb.h M /branches/zip/row/row0mysql.c branches/zip: Merge revisions 6350:6424 from branches/5.1: ------------------------------------------------------------------------ r6421 | jyang | 2010-01-12 07:59:16 +0200 (Tue, 12 Jan 2010) | 8 lines Changed paths: M /branches/5.1/row/row0mysql.c branches/5.1: Fix bug #49238: Creating/Dropping a temporary table while at 1023 transactions will cause assert. Handle possible DB_TOO_MANY_CONCURRENT_TRXS when deleting metadata in row_drop_table_for_mysql(). rb://220, approved by Marko ------------------------------------------------------------------------ r6422 | marko | 2010-01-12 11:34:27 +0200 (Tue, 12 Jan 2010) | 3 lines Changed paths: M /branches/5.1/handler/ha_innodb.cc M /branches/5.1/handler/ha_innodb.h branches/5.1: Non-functional change: Make innobase_get_int_col_max_value() a static function. It does not access any fields of class ha_innobase. ------------------------------------------------------------------------ r6424 | marko | 2010-01-12 12:22:19 +0200 (Tue, 12 Jan 2010) | 16 lines Changed paths: M /branches/5.1/handler/ha_innodb.cc M /branches/5.1/handler/ha_innodb.h branches/5.1: In innobase_initialize_autoinc(), do not attempt to read the maximum auto-increment value from the table if innodb_force_recovery is set to at least 4, so that writes are disabled. (Bug #46193) innobase_get_int_col_max_value(): Move the function definition before ha_innobase::innobase_initialize_autoinc(), because that function now calls this function. ha_innobase::innobase_initialize_autoinc(): Change the return type to void. Do not attempt to read the maximum auto-increment value from the table if innodb_force_recovery is set to at least 4. Issue ER_AUTOINC_READ_FAILED to the client when the auto-increment value cannot be read. rb://144 by Sunny, revised by Marko ------------------------------------------------------------------------ ------------------------------------------------------------------------ r6426 | marko | 2010-01-12 15:36:14 +0200 (Tue, 12 Jan 2010) | 2 lines Changed paths: M /branches/zip/row/row0sel.c branches/zip: row_sel_sec_rec_is_for_clust_rec(): Document the return value more accurately. ------------------------------------------------------------------------ r6433 | marko | 2010-01-13 13:19:00 +0200 (Wed, 13 Jan 2010) | 2 lines Changed paths: M /branches/zip/dict/dict0crea.c M /branches/zip/dict/dict0load.c branches/zip: dict_sys_tables_get_flags(), dict_create_sys_*_tuple(): Add some const qualifiers and comments. ------------------------------------------------------------------------ r6445 | marko | 2010-01-13 17:15:29 +0200 (Wed, 13 Jan 2010) | 3 lines Changed paths: M /branches/zip/ChangeLog M /branches/zip/buf/buf0buf.c branches/zip: buf_pool_drop_hash_index(): Check block->page.state before checking block->is_hashed, because the latter may be uninitialized right after server startup. ------------------------------------------------------------------------ r6446 | marko | 2010-01-13 17:20:10 +0200 (Wed, 13 Jan 2010) | 3 lines Changed paths: M /branches/zip/include/mem0dbg.h M /branches/zip/include/mem0dbg.ic M /branches/zip/mem/mem0dbg.c M /branches/zip/sync/sync0sync.c branches/zip: Treat mem_hash_mutex specially in mutex_free(), and explicitly free mem_hash_mutex in mem_close(). This fixes the breakage of UNIV_MEM_DEBUG that was filed as Issue #434. ------------------------------------------------------------------------ r6447 | marko | 2010-01-13 17:43:44 +0200 (Wed, 13 Jan 2010) | 5 lines Changed paths: M /branches/zip/ChangeLog M /branches/zip/row/row0sel.c branches/zip: row_sel_get_clust_rec_for_mysql(): On the READ UNCOMMITTED isolation level, do not attempt to access a clustered index record that has been marked for deletion. This fixes Issue #433. Approved by Heikki over the IM. ------------------------------------------------------------------------ --- ChangeLog | 30 +++++- buf/buf0buf.c | 4 +- dict/dict0crea.c | 102 +++++++++--------- dict/dict0dict.c | 1 + dict/dict0load.c | 2 +- handler/ha_innodb.cc | 239 ++++++++++++++++++++++++------------------- handler/ha_innodb.h | 3 +- include/mem0dbg.h | 7 ++ include/mem0dbg.ic | 3 - mem/mem0dbg.c | 4 + row/row0mysql.c | 32 ++++-- row/row0sel.c | 4 +- sync/sync0sync.c | 19 +++- 13 files changed, 279 insertions(+), 171 deletions(-) diff --git a/ChangeLog b/ChangeLog index dddf37e2334..c0ad21cd132 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,6 +1,34 @@ +2010-01-13 The InnoDB Team + + * row/row0sel.c: + On the READ UNCOMMITTED isolation level, do not attempt to access + a clustered index record that has been marked for deletion. The + built-in InnoDB in MySQL 5.1 and earlier would attempt to retrieve + a previous version of the record in this case. + +2010-01-13 The InnoDB Team + + * buf/buf0buf.c: + When disabling the adaptive hash index, check the block state + before checking block->is_hashed, because the latter may be + uninitialized right after server startup. + +2010-01-12 The InnoDB Team + + * handler/ha_innodb.cc, handler/ha_innodb.h: + Fix Bug #46193 crash when accessing tables after enabling + innodb_force_recovery option + +2010-01-12 The InnoDB Team + + * row/row0mysql.c: + Fix Bug#49238 Creating/Dropping a temporary table while at 1023 + transactions will cause assert. + 2009-12-02 The InnoDB Team - * srv/srv0start.c: Display the zlib version number at startup. + * srv/srv0start.c: + Display the zlib version number at startup. InnoDB compressed tables use zlib, and the implementation depends on the zlib function compressBound(), whose definition was slightly changed in zlib version 1.2.3.1 in 2006. MySQL bundles zlib 1.2.3 diff --git a/buf/buf0buf.c b/buf/buf0buf.c index cff102aa92c..2e44bc89ca0 100644 --- a/buf/buf0buf.c +++ b/buf/buf0buf.c @@ -1060,7 +1060,9 @@ buf_pool_drop_hash_index(void) when we have an x-latch on btr_search_latch; see the comment in buf0buf.h */ - if (!block->is_hashed) { + if (buf_block_get_state(block) + != BUF_BLOCK_FILE_PAGE + || !block->is_hashed) { continue; } diff --git a/dict/dict0crea.c b/dict/dict0crea.c index b0341e5eeab..5bbc2d17ddd 100644 --- a/dict/dict0crea.c +++ b/dict/dict0crea.c @@ -51,16 +51,18 @@ static dtuple_t* dict_create_sys_tables_tuple( /*=========================*/ - dict_table_t* table, /*!< in: table */ - mem_heap_t* heap) /*!< in: memory heap from which the memory for - the built tuple is allocated */ + const dict_table_t* table, /*!< in: table */ + mem_heap_t* heap) /*!< in: memory heap from + which the memory for the built + tuple is allocated */ { dict_table_t* sys_tables; dtuple_t* entry; dfield_t* dfield; byte* ptr; - ut_ad(table && heap); + ut_ad(table); + ut_ad(heap); sys_tables = dict_sys->sys_tables; @@ -69,18 +71,18 @@ dict_create_sys_tables_tuple( dict_table_copy_types(entry, sys_tables); /* 0: NAME -----------------------------*/ - dfield = dtuple_get_nth_field(entry, 0); + dfield = dtuple_get_nth_field(entry, 0/*NAME*/); dfield_set_data(dfield, table->name, ut_strlen(table->name)); /* 3: ID -------------------------------*/ - dfield = dtuple_get_nth_field(entry, 1); + dfield = dtuple_get_nth_field(entry, 1/*ID*/); ptr = mem_heap_alloc(heap, 8); mach_write_to_8(ptr, table->id); dfield_set_data(dfield, ptr, 8); /* 4: N_COLS ---------------------------*/ - dfield = dtuple_get_nth_field(entry, 2); + dfield = dtuple_get_nth_field(entry, 2/*N_COLS*/); #if DICT_TF_COMPACT != 1 #error @@ -91,7 +93,7 @@ dict_create_sys_tables_tuple( | ((table->flags & DICT_TF_COMPACT) << 31)); dfield_set_data(dfield, ptr, 4); /* 5: TYPE -----------------------------*/ - dfield = dtuple_get_nth_field(entry, 3); + dfield = dtuple_get_nth_field(entry, 3/*TYPE*/); ptr = mem_heap_alloc(heap, 4); if (table->flags & (~DICT_TF_COMPACT & ~(~0 << DICT_TF_BITS))) { @@ -107,25 +109,25 @@ dict_create_sys_tables_tuple( dfield_set_data(dfield, ptr, 4); /* 6: MIX_ID (obsolete) ---------------------------*/ - dfield = dtuple_get_nth_field(entry, 4); + dfield = dtuple_get_nth_field(entry, 4/*MIX_ID*/); ptr = mem_heap_zalloc(heap, 8); dfield_set_data(dfield, ptr, 8); /* 7: MIX_LEN (additional flags) --------------------------*/ - dfield = dtuple_get_nth_field(entry, 5); + dfield = dtuple_get_nth_field(entry, 5/*MIX_LEN*/); ptr = mem_heap_alloc(heap, 4); mach_write_to_4(ptr, table->flags >> DICT_TF2_SHIFT); dfield_set_data(dfield, ptr, 4); /* 8: CLUSTER_NAME ---------------------*/ - dfield = dtuple_get_nth_field(entry, 6); + dfield = dtuple_get_nth_field(entry, 6/*CLUSTER_NAME*/); dfield_set_null(dfield); /* not supported */ /* 9: SPACE ----------------------------*/ - dfield = dtuple_get_nth_field(entry, 7); + dfield = dtuple_get_nth_field(entry, 7/*SPACE*/); ptr = mem_heap_alloc(heap, 4); mach_write_to_4(ptr, table->space); @@ -144,19 +146,21 @@ static dtuple_t* dict_create_sys_columns_tuple( /*==========================*/ - dict_table_t* table, /*!< in: table */ - ulint i, /*!< in: column number */ - mem_heap_t* heap) /*!< in: memory heap from which the memory for - the built tuple is allocated */ + const dict_table_t* table, /*!< in: table */ + ulint i, /*!< in: column number */ + mem_heap_t* heap) /*!< in: memory heap from + which the memory for the built + tuple is allocated */ { dict_table_t* sys_columns; dtuple_t* entry; const dict_col_t* column; dfield_t* dfield; byte* ptr; - const char* col_name; + const char* col_name; - ut_ad(table && heap); + ut_ad(table); + ut_ad(heap); column = dict_table_get_nth_col(table, i); @@ -167,47 +171,47 @@ dict_create_sys_columns_tuple( dict_table_copy_types(entry, sys_columns); /* 0: TABLE_ID -----------------------*/ - dfield = dtuple_get_nth_field(entry, 0); + dfield = dtuple_get_nth_field(entry, 0/*TABLE_ID*/); ptr = mem_heap_alloc(heap, 8); mach_write_to_8(ptr, table->id); dfield_set_data(dfield, ptr, 8); /* 1: POS ----------------------------*/ - dfield = dtuple_get_nth_field(entry, 1); + dfield = dtuple_get_nth_field(entry, 1/*POS*/); ptr = mem_heap_alloc(heap, 4); mach_write_to_4(ptr, i); dfield_set_data(dfield, ptr, 4); /* 4: NAME ---------------------------*/ - dfield = dtuple_get_nth_field(entry, 2); + dfield = dtuple_get_nth_field(entry, 2/*NAME*/); col_name = dict_table_get_col_name(table, i); dfield_set_data(dfield, col_name, ut_strlen(col_name)); /* 5: MTYPE --------------------------*/ - dfield = dtuple_get_nth_field(entry, 3); + dfield = dtuple_get_nth_field(entry, 3/*MTYPE*/); ptr = mem_heap_alloc(heap, 4); mach_write_to_4(ptr, column->mtype); dfield_set_data(dfield, ptr, 4); /* 6: PRTYPE -------------------------*/ - dfield = dtuple_get_nth_field(entry, 4); + dfield = dtuple_get_nth_field(entry, 4/*PRTYPE*/); ptr = mem_heap_alloc(heap, 4); mach_write_to_4(ptr, column->prtype); dfield_set_data(dfield, ptr, 4); /* 7: LEN ----------------------------*/ - dfield = dtuple_get_nth_field(entry, 5); + dfield = dtuple_get_nth_field(entry, 5/*LEN*/); ptr = mem_heap_alloc(heap, 4); mach_write_to_4(ptr, column->len); dfield_set_data(dfield, ptr, 4); /* 8: PREC ---------------------------*/ - dfield = dtuple_get_nth_field(entry, 6); + dfield = dtuple_get_nth_field(entry, 6/*PREC*/); ptr = mem_heap_alloc(heap, 4); mach_write_to_4(ptr, 0/* unused */); @@ -325,9 +329,10 @@ static dtuple_t* dict_create_sys_indexes_tuple( /*==========================*/ - dict_index_t* index, /*!< in: index */ - mem_heap_t* heap) /*!< in: memory heap from which the memory for - the built tuple is allocated */ + const dict_index_t* index, /*!< in: index */ + mem_heap_t* heap) /*!< in: memory heap from + which the memory for the built + tuple is allocated */ { dict_table_t* sys_indexes; dict_table_t* table; @@ -336,7 +341,8 @@ dict_create_sys_indexes_tuple( byte* ptr; ut_ad(mutex_own(&(dict_sys->mutex))); - ut_ad(index && heap); + ut_ad(index); + ut_ad(heap); sys_indexes = dict_sys->sys_indexes; @@ -347,32 +353,32 @@ dict_create_sys_indexes_tuple( dict_table_copy_types(entry, sys_indexes); /* 0: TABLE_ID -----------------------*/ - dfield = dtuple_get_nth_field(entry, 0); + dfield = dtuple_get_nth_field(entry, 0/*TABLE_ID*/); ptr = mem_heap_alloc(heap, 8); mach_write_to_8(ptr, table->id); dfield_set_data(dfield, ptr, 8); /* 1: ID ----------------------------*/ - dfield = dtuple_get_nth_field(entry, 1); + dfield = dtuple_get_nth_field(entry, 1/*ID*/); ptr = mem_heap_alloc(heap, 8); mach_write_to_8(ptr, index->id); dfield_set_data(dfield, ptr, 8); /* 4: NAME --------------------------*/ - dfield = dtuple_get_nth_field(entry, 2); + dfield = dtuple_get_nth_field(entry, 2/*NAME*/); dfield_set_data(dfield, index->name, ut_strlen(index->name)); /* 5: N_FIELDS ----------------------*/ - dfield = dtuple_get_nth_field(entry, 3); + dfield = dtuple_get_nth_field(entry, 3/*N_FIELDS*/); ptr = mem_heap_alloc(heap, 4); mach_write_to_4(ptr, index->n_fields); dfield_set_data(dfield, ptr, 4); /* 6: TYPE --------------------------*/ - dfield = dtuple_get_nth_field(entry, 4); + dfield = dtuple_get_nth_field(entry, 4/*TYPE*/); ptr = mem_heap_alloc(heap, 4); mach_write_to_4(ptr, index->type); @@ -384,7 +390,7 @@ dict_create_sys_indexes_tuple( #error "DICT_SYS_INDEXES_SPACE_NO_FIELD != 7" #endif - dfield = dtuple_get_nth_field(entry, 5); + dfield = dtuple_get_nth_field(entry, 5/*SPACE*/); ptr = mem_heap_alloc(heap, 4); mach_write_to_4(ptr, index->space); @@ -396,7 +402,7 @@ dict_create_sys_indexes_tuple( #error "DICT_SYS_INDEXES_PAGE_NO_FIELD != 8" #endif - dfield = dtuple_get_nth_field(entry, 6); + dfield = dtuple_get_nth_field(entry, 6/*PAGE_NO*/); ptr = mem_heap_alloc(heap, 4); mach_write_to_4(ptr, FIL_NULL); @@ -415,10 +421,11 @@ static dtuple_t* dict_create_sys_fields_tuple( /*=========================*/ - dict_index_t* index, /*!< in: index */ - ulint i, /*!< in: field number */ - mem_heap_t* heap) /*!< in: memory heap from which the memory for - the built tuple is allocated */ + const dict_index_t* index, /*!< in: index */ + ulint i, /*!< in: field number */ + mem_heap_t* heap) /*!< in: memory heap from + which the memory for the built + tuple is allocated */ { dict_table_t* sys_fields; dtuple_t* entry; @@ -428,7 +435,8 @@ dict_create_sys_fields_tuple( ibool index_contains_column_prefix_field = FALSE; ulint j; - ut_ad(index && heap); + ut_ad(index); + ut_ad(heap); for (j = 0; j < index->n_fields; j++) { if (dict_index_get_nth_field(index, j)->prefix_len > 0) { @@ -446,7 +454,7 @@ dict_create_sys_fields_tuple( dict_table_copy_types(entry, sys_fields); /* 0: INDEX_ID -----------------------*/ - dfield = dtuple_get_nth_field(entry, 0); + dfield = dtuple_get_nth_field(entry, 0/*INDEX_ID*/); ptr = mem_heap_alloc(heap, 8); mach_write_to_8(ptr, index->id); @@ -454,7 +462,7 @@ dict_create_sys_fields_tuple( dfield_set_data(dfield, ptr, 8); /* 1: POS + PREFIX LENGTH ----------------------------*/ - dfield = dtuple_get_nth_field(entry, 1); + dfield = dtuple_get_nth_field(entry, 1/*POS*/); ptr = mem_heap_alloc(heap, 4); @@ -474,7 +482,7 @@ dict_create_sys_fields_tuple( dfield_set_data(dfield, ptr, 4); /* 4: COL_NAME -------------------------*/ - dfield = dtuple_get_nth_field(entry, 2); + dfield = dtuple_get_nth_field(entry, 2/*COL_NAME*/); dfield_set_data(dfield, field->name, ut_strlen(field->name)); @@ -605,6 +613,7 @@ dict_create_index_tree_step( dict_table_t* sys_indexes; dict_table_t* table; dtuple_t* search_tuple; + ulint zip_size; btr_pcur_t pcur; mtr_t mtr; @@ -629,8 +638,9 @@ dict_create_index_tree_step( btr_pcur_move_to_next_user_rec(&pcur, &mtr); - node->page_no = btr_create(index->type, index->space, - dict_table_zip_size(index->table), + zip_size = dict_table_zip_size(index->table); + + node->page_no = btr_create(index->type, index->space, zip_size, index->id, index, &mtr); /* printf("Created a new index tree in space %lu root page %lu\n", index->space, index->page_no); */ diff --git a/dict/dict0dict.c b/dict/dict0dict.c index 4c62e8de748..ca129c29d20 100644 --- a/dict/dict0dict.c +++ b/dict/dict0dict.c @@ -1460,6 +1460,7 @@ dict_index_add_to_cache( if (!dict_index_find_cols(table, index)) { + dict_mem_index_free(index); return(DB_CORRUPTION); } diff --git a/dict/dict0load.c b/dict/dict0load.c index 2867125e39d..0c72a2e8f81 100644 --- a/dict/dict0load.c +++ b/dict/dict0load.c @@ -260,7 +260,7 @@ dict_sys_tables_get_flags( return(0); } - field = rec_get_nth_field_old(rec, 4, &len); + field = rec_get_nth_field_old(rec, 4/*N_COLS*/, &len); n_cols = mach_read_from_4(field); if (UNIV_UNLIKELY(!(n_cols & 0x80000000UL))) { diff --git a/handler/ha_innodb.cc b/handler/ha_innodb.cc index 5509d0381d3..972430a5976 100644 --- a/handler/ha_innodb.cc +++ b/handler/ha_innodb.cc @@ -3010,59 +3010,150 @@ normalize_table_name( } /********************************************************************//** +Get the upper limit of the MySQL integral and floating-point type. +@return maximum allowed value for the field */ +static +ulonglong +innobase_get_int_col_max_value( +/*===========================*/ + const Field* field) /*!< in: MySQL field */ +{ + ulonglong max_value = 0; + + switch(field->key_type()) { + /* TINY */ + case HA_KEYTYPE_BINARY: + max_value = 0xFFULL; + break; + case HA_KEYTYPE_INT8: + max_value = 0x7FULL; + break; + /* SHORT */ + case HA_KEYTYPE_USHORT_INT: + max_value = 0xFFFFULL; + break; + case HA_KEYTYPE_SHORT_INT: + max_value = 0x7FFFULL; + break; + /* MEDIUM */ + case HA_KEYTYPE_UINT24: + max_value = 0xFFFFFFULL; + break; + case HA_KEYTYPE_INT24: + max_value = 0x7FFFFFULL; + break; + /* LONG */ + case HA_KEYTYPE_ULONG_INT: + max_value = 0xFFFFFFFFULL; + break; + case HA_KEYTYPE_LONG_INT: + max_value = 0x7FFFFFFFULL; + break; + /* BIG */ + case HA_KEYTYPE_ULONGLONG: + max_value = 0xFFFFFFFFFFFFFFFFULL; + break; + case HA_KEYTYPE_LONGLONG: + max_value = 0x7FFFFFFFFFFFFFFFULL; + break; + case HA_KEYTYPE_FLOAT: + /* We use the maximum as per IEEE754-2008 standard, 2^24 */ + max_value = 0x1000000ULL; + break; + case HA_KEYTYPE_DOUBLE: + /* We use the maximum as per IEEE754-2008 standard, 2^53 */ + max_value = 0x20000000000000ULL; + break; + default: + ut_error; + } + + return(max_value); +} + +/************************************************************************ Set the autoinc column max value. This should only be called once from -ha_innobase::open(). Therefore there's no need for a covering lock. -@return DB_SUCCESS or error code */ +ha_innobase::open(). Therefore there's no need for a covering lock. */ UNIV_INTERN -ulint +void ha_innobase::innobase_initialize_autoinc() /*======================================*/ { - dict_index_t* index; ulonglong auto_inc; - const char* col_name; - ulint error; - - col_name = table->found_next_number_field->field_name; - index = innobase_get_index(table->s->next_number_index); - - /* Execute SELECT MAX(col_name) FROM TABLE; */ - error = row_search_max_autoinc(index, col_name, &auto_inc); - - switch (error) { - case DB_SUCCESS: - - /* At the this stage we don't know the increment - or the offset, so use default inrement of 1. */ - ++auto_inc; - break; - - case DB_RECORD_NOT_FOUND: - ut_print_timestamp(stderr); - fprintf(stderr, " InnoDB: MySQL and InnoDB data " - "dictionaries are out of sync.\n" - "InnoDB: Unable to find the AUTOINC column %s in the " - "InnoDB table %s.\n" - "InnoDB: We set the next AUTOINC column value to the " - "maximum possible value,\n" - "InnoDB: in effect disabling the AUTOINC next value " - "generation.\n" - "InnoDB: You can either set the next AUTOINC value " - "explicitly using ALTER TABLE\n" - "InnoDB: or fix the data dictionary by recreating " - "the table.\n", - col_name, index->table->name); + const Field* field = table->found_next_number_field; + if (field != NULL) { + auto_inc = innobase_get_int_col_max_value(field); + } else { + /* We have no idea what's been passed in to us as the + autoinc column. We set it to the MAX_INT of our table + autoinc type. */ auto_inc = 0xFFFFFFFFFFFFFFFFULL; - break; - default: - return(error); + ut_print_timestamp(stderr); + fprintf(stderr, " InnoDB: Unable to determine the AUTOINC " + "column name\n"); + } + + if (srv_force_recovery >= SRV_FORCE_NO_IBUF_MERGE) { + /* If the recovery level is set so high that writes + are disabled we force the AUTOINC counter to the MAX + value effectively disabling writes to the table. + Secondly, we avoid reading the table in case the read + results in failure due to a corrupted table/index. + + We will not return an error to the client, so that the + tables can be dumped with minimal hassle. If an error + were returned in this case, the first attempt to read + the table would fail and subsequent SELECTs would succeed. */ + } else if (field == NULL) { + my_error(ER_AUTOINC_READ_FAILED, MYF(0)); + } else { + dict_index_t* index; + const char* col_name; + ulonglong read_auto_inc; + ulint err; + + update_thd(ha_thd()); + col_name = field->field_name; + index = innobase_get_index(table->s->next_number_index); + + /* Execute SELECT MAX(col_name) FROM TABLE; */ + err = row_search_max_autoinc(index, col_name, &read_auto_inc); + + switch (err) { + case DB_SUCCESS: + /* At the this stage we do not know the increment + or the offset, so use a default increment of 1. */ + auto_inc = read_auto_inc + 1; + break; + + case DB_RECORD_NOT_FOUND: + ut_print_timestamp(stderr); + fprintf(stderr, " InnoDB: MySQL and InnoDB data " + "dictionaries are out of sync.\n" + "InnoDB: Unable to find the AUTOINC column " + "%s in the InnoDB table %s.\n" + "InnoDB: We set the next AUTOINC column " + "value to the maximum possible value,\n" + "InnoDB: in effect disabling the AUTOINC " + "next value generation.\n" + "InnoDB: You can either set the next " + "AUTOINC value explicitly using ALTER TABLE\n" + "InnoDB: or fix the data dictionary by " + "recreating the table.\n", + col_name, index->table->name); + + my_error(ER_AUTOINC_READ_FAILED, MYF(0)); + break; + default: + /* row_search_max_autoinc() should only return + one of DB_SUCCESS or DB_RECORD_NOT_FOUND. */ + ut_error; + } } dict_table_autoinc_initialize(prebuilt->table, auto_inc); - - return(DB_SUCCESS); } /*****************************************************************//** @@ -3269,8 +3360,6 @@ retry: /* Only if the table has an AUTOINC column. */ if (prebuilt->table != NULL && table->found_next_number_field != NULL) { - ulint error; - dict_table_autoinc_lock(prebuilt->table); /* Since a table can already be "open" in InnoDB's internal @@ -3279,8 +3368,7 @@ retry: autoinc value from a previous MySQL open. */ if (dict_table_autoinc_read(prebuilt->table) == 0) { - error = innobase_initialize_autoinc(); - ut_a(error == DB_SUCCESS); + innobase_initialize_autoinc(); } dict_table_autoinc_unlock(prebuilt->table); @@ -4096,67 +4184,6 @@ skip_field: } } -/********************************************************************//** -Get the upper limit of the MySQL integral and floating-point type. */ -UNIV_INTERN -ulonglong -ha_innobase::innobase_get_int_col_max_value( -/*========================================*/ - const Field* field) -{ - ulonglong max_value = 0; - - switch(field->key_type()) { - /* TINY */ - case HA_KEYTYPE_BINARY: - max_value = 0xFFULL; - break; - case HA_KEYTYPE_INT8: - max_value = 0x7FULL; - break; - /* SHORT */ - case HA_KEYTYPE_USHORT_INT: - max_value = 0xFFFFULL; - break; - case HA_KEYTYPE_SHORT_INT: - max_value = 0x7FFFULL; - break; - /* MEDIUM */ - case HA_KEYTYPE_UINT24: - max_value = 0xFFFFFFULL; - break; - case HA_KEYTYPE_INT24: - max_value = 0x7FFFFFULL; - break; - /* LONG */ - case HA_KEYTYPE_ULONG_INT: - max_value = 0xFFFFFFFFULL; - break; - case HA_KEYTYPE_LONG_INT: - max_value = 0x7FFFFFFFULL; - break; - /* BIG */ - case HA_KEYTYPE_ULONGLONG: - max_value = 0xFFFFFFFFFFFFFFFFULL; - break; - case HA_KEYTYPE_LONGLONG: - max_value = 0x7FFFFFFFFFFFFFFFULL; - break; - case HA_KEYTYPE_FLOAT: - /* We use the maximum as per IEEE754-2008 standard, 2^24 */ - max_value = 0x1000000ULL; - break; - case HA_KEYTYPE_DOUBLE: - /* We use the maximum as per IEEE754-2008 standard, 2^53 */ - max_value = 0x20000000000000ULL; - break; - default: - ut_error; - } - - return(max_value); -} - /********************************************************************//** This special handling is really to overcome the limitations of MySQL's binlogging. We need to eliminate the non-determinism that will arise in diff --git a/handler/ha_innodb.h b/handler/ha_innodb.h index 31e88ed8530..0e366a1eb2c 100644 --- a/handler/ha_innodb.h +++ b/handler/ha_innodb.h @@ -91,9 +91,8 @@ class ha_innobase: public handler ulint innobase_reset_autoinc(ulonglong auto_inc); ulint innobase_get_autoinc(ulonglong* value); ulint innobase_update_autoinc(ulonglong auto_inc); - ulint innobase_initialize_autoinc(); + void innobase_initialize_autoinc(); dict_index_t* innobase_get_index(uint keynr); - ulonglong innobase_get_int_col_max_value(const Field* field); /* Init values for the class: */ public: diff --git a/include/mem0dbg.h b/include/mem0dbg.h index a064af5c678..8ddf4a13cba 100644 --- a/include/mem0dbg.h +++ b/include/mem0dbg.h @@ -28,6 +28,13 @@ Created 6/9/1994 Heikki Tuuri check fields whose sizes are given below */ #ifdef UNIV_MEM_DEBUG +# ifndef UNIV_HOTBACKUP +/* The mutex which protects in the debug version the hash table +containing the list of live memory heaps, and also the global +variables in mem0dbg.c. */ +extern mutex_t mem_hash_mutex; +# endif /* !UNIV_HOTBACKUP */ + #define MEM_FIELD_HEADER_SIZE ut_calc_align(2 * sizeof(ulint),\ UNIV_MEM_ALIGNMENT) #define MEM_FIELD_TRAILER_SIZE sizeof(ulint) diff --git a/include/mem0dbg.ic b/include/mem0dbg.ic index cb9245411dc..9c6e5a78263 100644 --- a/include/mem0dbg.ic +++ b/include/mem0dbg.ic @@ -25,9 +25,6 @@ Created 6/8/1994 Heikki Tuuri *************************************************************************/ #ifdef UNIV_MEM_DEBUG -# ifndef UNIV_HOTBACKUP -extern mutex_t mem_hash_mutex; -# endif /* !UNIV_HOTBACKUP */ extern ulint mem_current_allocated_memory; /******************************************************************//** diff --git a/mem/mem0dbg.c b/mem/mem0dbg.c index 01eda20ec45..4973ead4213 100644 --- a/mem/mem0dbg.c +++ b/mem/mem0dbg.c @@ -180,6 +180,10 @@ mem_close(void) { mem_pool_free(mem_comm_pool); mem_comm_pool = NULL; +#ifdef UNIV_MEM_DEBUG + mutex_free(&mem_hash_mutex); + mem_hash_initialized = FALSE; +#endif /* UNIV_MEM_DEBUG */ } #endif /* !UNIV_HOTBACKUP */ diff --git a/row/row0mysql.c b/row/row0mysql.c index 7a43d0f3b92..e9fd12e9747 100644 --- a/row/row0mysql.c +++ b/row/row0mysql.c @@ -3255,19 +3255,13 @@ check_next_foreign: "END;\n" , FALSE, trx); - if (err != DB_SUCCESS) { - ut_a(err == DB_OUT_OF_FILE_SPACE); - - err = DB_MUST_GET_MORE_FILE_SPACE; - - row_mysql_handle_errors(&err, trx, NULL, NULL); - - ut_error; - } else { + switch (err) { ibool is_temp; const char* name_or_path; mem_heap_t* heap; + case DB_SUCCESS: + heap = mem_heap_create(200); /* Clone the name, in case it has been allocated @@ -3333,7 +3327,27 @@ check_next_foreign: } mem_heap_free(heap); + break; + + case DB_TOO_MANY_CONCURRENT_TRXS: + /* Cannot even find a free slot for the + the undo log. We can directly exit here + and return the DB_TOO_MANY_CONCURRENT_TRXS + error. */ + break; + + case DB_OUT_OF_FILE_SPACE: + err = DB_MUST_GET_MORE_FILE_SPACE; + + row_mysql_handle_errors(&err, trx, NULL, NULL); + + /* Fall through to raise error */ + + default: + /* No other possible error returns */ + ut_error; } + funct_exit: if (locked_dictionary) { diff --git a/row/row0sel.c b/row/row0sel.c index 23cd97f6826..e14f29d8d64 100644 --- a/row/row0sel.c +++ b/row/row0sel.c @@ -132,7 +132,8 @@ index record. NOTE: the comparison is NOT done as a binary comparison, but character fields are compared with collation! @return TRUE if the secondary record is equal to the corresponding -fields in the clustered record, when compared with collation */ +fields in the clustered record, when compared with collation; +FALSE if not equal or if the clustered record has been marked for deletion */ static ibool row_sel_sec_rec_is_for_clust_rec( @@ -2977,6 +2978,7 @@ row_sel_get_clust_rec_for_mysql( if (clust_rec && (old_vers + || trx->isolation_level <= TRX_ISO_READ_UNCOMMITTED || rec_get_deleted_flag(rec, dict_table_is_comp( sec_index->table))) && !row_sel_sec_rec_is_for_clust_rec( diff --git a/sync/sync0sync.c b/sync/sync0sync.c index c1f9ecd5fe1..01c809ec1f8 100644 --- a/sync/sync0sync.c +++ b/sync/sync0sync.c @@ -315,6 +315,15 @@ mutex_free( ut_a(mutex_get_lock_word(mutex) == 0); ut_a(mutex_get_waiters(mutex) == 0); +#ifdef UNIV_MEM_DEBUG + if (mutex == &mem_hash_mutex) { + ut_ad(UT_LIST_GET_LEN(mutex_list) == 1); + ut_ad(UT_LIST_GET_FIRST(mutex_list) == &mem_hash_mutex); + UT_LIST_REMOVE(list, mutex_list, mutex); + goto func_exit; + } +#endif /* UNIV_MEM_DEBUG */ + if (mutex != &mutex_list_mutex #ifdef UNIV_SYNC_DEBUG && mutex != &sync_thread_mutex @@ -336,7 +345,9 @@ mutex_free( } os_event_free(mutex->event); - +#ifdef UNIV_MEM_DEBUG +func_exit: +#endif /* UNIV_MEM_DEBUG */ #if !defined(HAVE_ATOMIC_BUILTINS) os_fast_mutex_free(&(mutex->os_fast_mutex)); #endif @@ -1371,6 +1382,12 @@ sync_close(void) mutex = UT_LIST_GET_FIRST(mutex_list); while (mutex) { +#ifdef UNIV_MEM_DEBUG + if (mutex == &mem_hash_mutex) { + mutex = UT_LIST_GET_NEXT(list, mutex); + continue; + } +#endif /* UNIV_MEM_DEBUG */ mutex_free(mutex); mutex = UT_LIST_GET_FIRST(mutex_list); }