Merge tag 'mariadb-10.0.23' into 10.0-galera

This commit is contained in:
Nirbhay Choubey 2015-12-19 14:24:38 -05:00
commit dad555a09c
3579 changed files with 645162 additions and 223253 deletions

View file

@ -112,6 +112,9 @@ this program; if not, write to the Free Software Foundation, Inc.,
#include "ha_innodb.h"
#include "i_s.h"
#include <string>
#include <sstream>
# ifndef MYSQL_PLUGIN_IMPORT
# define MYSQL_PLUGIN_IMPORT /* nothing */
# endif /* MYSQL_PLUGIN_IMPORT */
@ -2097,10 +2100,11 @@ innobase_next_autoinc(
if (next_value == 0) {
ulonglong next;
if (current > offset) {
if (current >= offset) {
next = (current - offset) / step;
} else {
next = (offset - current) / step;
next = 0;
block -= step;
}
ut_a(max_value > next);
@ -13115,8 +13119,9 @@ ha_innobase::update_table_comment(
const char* comment)/*!< in: table comment defined by user */
{
uint length = (uint) strlen(comment);
char* str;
char* str=0;
long flen;
std::string fk_str;
/* We do not know if MySQL can call this function before calling
external_lock(). To be safe, update the thd of the current table
@ -13134,50 +13139,40 @@ ha_innobase::update_table_comment(
possible adaptive hash latch to avoid deadlocks of threads */
trx_search_latch_release_if_reserved(prebuilt->trx);
str = NULL;
/* output the data to a temporary file */
#define SSTR( x ) reinterpret_cast< std::ostringstream & >( \
( std::ostringstream() << std::dec << x ) ).str()
if (!srv_read_only_mode) {
fk_str.append("InnoDB free: ");
fk_str.append(SSTR(fsp_get_available_space_in_free_extents(
prebuilt->table->space)));
mutex_enter(&srv_dict_tmpfile_mutex);
fk_str.append(dict_print_info_on_foreign_keys(
FALSE, prebuilt->trx,
prebuilt->table));
rewind(srv_dict_tmpfile);
flen = fk_str.length();
fprintf(srv_dict_tmpfile, "InnoDB free: %llu kB",
fsp_get_available_space_in_free_extents(
prebuilt->table->space));
if (flen < 0) {
flen = 0;
} else if (length + flen + 3 > 64000) {
flen = 64000 - 3 - length;
}
dict_print_info_on_foreign_keys(
FALSE, srv_dict_tmpfile, prebuilt->trx,
prebuilt->table);
/* allocate buffer for the full string */
flen = ftell(srv_dict_tmpfile);
str = (char*) my_malloc(length + flen + 3, MYF(0));
if (flen < 0) {
flen = 0;
} else if (length + flen + 3 > 64000) {
flen = 64000 - 3 - length;
if (str) {
char* pos = str + length;
if (length) {
memcpy(str, comment, length);
*pos++ = ';';
*pos++ = ' ';
}
/* allocate buffer for the full string, and
read the contents of the temporary file */
str = (char*) my_malloc(length + flen + 3, MYF(0));
if (str) {
char* pos = str + length;
if (length) {
memcpy(str, comment, length);
*pos++ = ';';
*pos++ = ' ';
}
rewind(srv_dict_tmpfile);
flen = (uint) fread(pos, 1, flen, srv_dict_tmpfile);
pos[flen] = 0;
}
mutex_exit(&srv_dict_tmpfile_mutex);
memcpy(pos, fk_str.c_str(), flen);
pos[flen] = 0;
}
prebuilt->trx->op_info = (char*)"";
@ -13195,8 +13190,7 @@ char*
ha_innobase::get_foreign_key_create_info(void)
/*==========================================*/
{
long flen;
char* str = 0;
char* fk_str = 0;
ut_a(prebuilt != NULL);
@ -13214,38 +13208,22 @@ ha_innobase::get_foreign_key_create_info(void)
trx_search_latch_release_if_reserved(prebuilt->trx);
if (!srv_read_only_mode) {
mutex_enter(&srv_dict_tmpfile_mutex);
rewind(srv_dict_tmpfile);
/* Output the data to a temporary file */
dict_print_info_on_foreign_keys(
TRUE, srv_dict_tmpfile, prebuilt->trx,
/* Output the data to a temporary file */
std::string str = dict_print_info_on_foreign_keys(
TRUE, prebuilt->trx,
prebuilt->table);
prebuilt->trx->op_info = (char*)"";
prebuilt->trx->op_info = (char*)"";
flen = ftell(srv_dict_tmpfile);
/* Allocate buffer for the string */
fk_str = (char*) my_malloc(str.length() + 1, MYF(0));
if (flen < 0) {
flen = 0;
}
/* Allocate buffer for the string, and
read the contents of the temporary file */
str = (char*) my_malloc(flen + 1, MYF(0));
if (str) {
rewind(srv_dict_tmpfile);
flen = (uint) fread(str, 1, flen, srv_dict_tmpfile);
str[flen] = 0;
}
mutex_exit(&srv_dict_tmpfile_mutex);
if (fk_str) {
memcpy(fk_str, str.c_str(), str.length());
fk_str[str.length()]='\0';
}
return(str);
return(fk_str);
}
@ -14864,10 +14842,7 @@ ha_innobase::get_auto_increment(
current = *first_value;
/* If the increment step of the auto increment column
decreases then it is not affecting the immediate
next value in the series. */
if (prebuilt->autoinc_increment > increment) {
if (prebuilt->autoinc_increment != increment) {
#ifdef WITH_WSREP
WSREP_DEBUG("autoinc decrease: %llu -> %llu\n"
@ -14885,7 +14860,7 @@ ha_innobase::get_auto_increment(
#endif /* WITH_WSREP */
current = innobase_next_autoinc(
current, 1, increment, 1, col_max_value);
current, 1, increment, offset, col_max_value);
dict_table_autoinc_initialize(prebuilt->table, current);
@ -18012,6 +17987,11 @@ static MYSQL_SYSVAR_BOOL(buffer_pool_dump_at_shutdown, srv_buffer_pool_dump_at_s
"Dump the buffer pool into a file named @@innodb_buffer_pool_filename",
NULL, NULL, FALSE);
static MYSQL_SYSVAR_ULONG(buffer_pool_dump_pct, srv_buf_pool_dump_pct,
PLUGIN_VAR_RQCMDARG,
"Dump only the hottest N% of each buffer pool, defaults to 100",
NULL, NULL, 100, 1, 100, 0);
#ifdef UNIV_DEBUG
static MYSQL_SYSVAR_STR(buffer_pool_evict, srv_buffer_pool_evict,
PLUGIN_VAR_RQCMDARG,
@ -18519,6 +18499,7 @@ static struct st_mysql_sys_var* innobase_system_variables[]= {
MYSQL_SYSVAR(buffer_pool_filename),
MYSQL_SYSVAR(buffer_pool_dump_now),
MYSQL_SYSVAR(buffer_pool_dump_at_shutdown),
MYSQL_SYSVAR(buffer_pool_dump_pct),
#ifdef UNIV_DEBUG
MYSQL_SYSVAR(buffer_pool_evict),
#endif /* UNIV_DEBUG */

View file

@ -166,10 +166,13 @@ my_error_innodb(
/* TODO: report the row, as we do for DB_DUPLICATE_KEY */
my_error(ER_INVALID_USE_OF_NULL, MYF(0));
break;
case DB_TABLESPACE_EXISTS:
my_error(ER_TABLESPACE_EXISTS, MYF(0), table);
break;
#ifdef UNIV_DEBUG
case DB_SUCCESS:
case DB_DUPLICATE_KEY:
case DB_TABLESPACE_EXISTS:
case DB_ONLINE_LOG_TOO_BIG:
/* These codes should not be passed here. */
ut_error;
@ -5145,6 +5148,61 @@ commit_cache_rebuild(
DBUG_VOID_RETURN;
}
/** Store the column number of the columns in a list belonging
to indexes which are not being dropped.
@param[in] ctx In-place ALTER TABLE context
@param[out] drop_col_list list which will be set, containing columns
which is part of index being dropped */
static
void
get_col_list_to_be_dropped(
ha_innobase_inplace_ctx* ctx,
std::set<ulint>& drop_col_list)
{
for (ulint index_count = 0; index_count < ctx->num_to_drop_index;
index_count++) {
dict_index_t* index = ctx->drop_index[index_count];
for (ulint col = 0; col < index->n_user_defined_cols; col++) {
ulint col_no = dict_index_get_nth_col_no(index, col);
drop_col_list.insert(col_no);
}
}
}
/** For each column, which is part of an index which is not going to be
dropped, it checks if the column number of the column is same as col_no
argument passed.
@param[in] table table object
@param[in] col_no column number of the column which is to be checked
@retval true column exists
@retval false column does not exist. */
static
bool
check_col_exists_in_indexes(
const dict_table_t* table,
ulint col_no)
{
for (dict_index_t* index = dict_table_get_first_index(table); index;
index = dict_table_get_next_index(index)) {
if (index->to_be_dropped) {
continue;
}
for (ulint col = 0; col < index->n_user_defined_cols; col++) {
ulint index_col_no = dict_index_get_nth_col_no(
index, col);
if (col_no == index_col_no) {
return(true);
}
}
}
return(false);
}
/** Commit the changes made during prepare_inplace_alter_table()
and inplace_alter_table() inside the data dictionary tables,
when not rebuilding the table.
@ -5280,6 +5338,20 @@ commit_cache_norebuild(
DBUG_ASSERT(!ctx->need_rebuild());
std::set<ulint> drop_list;
std::set<ulint>::const_iterator col_it;
/* Check if the column, part of an index to be dropped is part of any
other index which is not being dropped. If it so, then set the ord_part
of the column to 0. */
get_col_list_to_be_dropped(ctx, drop_list);
for(col_it = drop_list.begin(); col_it != drop_list.end(); ++col_it) {
if (!check_col_exists_in_indexes(ctx->new_table, *col_it)) {
ctx->new_table->cols[*col_it].ord_part = 0;
}
}
for (ulint i = 0; i < ctx->num_to_add_index; i++) {
dict_index_t* index = ctx->add_index[i];
DBUG_ASSERT(dict_index_get_online_status(index)
@ -5480,6 +5552,7 @@ ha_innobase::commit_inplace_alter_table(
Alter_inplace_info* ha_alter_info,
bool commit)
{
dberr_t error;
ha_innobase_inplace_ctx* ctx0
= static_cast<ha_innobase_inplace_ctx*>
(ha_alter_info->handler_ctx);
@ -5561,7 +5634,7 @@ ha_innobase::commit_inplace_alter_table(
transactions collected during crash recovery could be
holding InnoDB locks only, not MySQL locks. */
dberr_t error = row_merge_lock_table(
error = row_merge_lock_table(
prebuilt->trx, ctx->old_table, LOCK_X);
if (error != DB_SUCCESS) {
@ -5696,14 +5769,20 @@ ha_innobase::commit_inplace_alter_table(
= static_cast<ha_innobase_inplace_ctx*>(*pctx);
DBUG_ASSERT(ctx->need_rebuild());
/* Generate the redo log for the file
operations that will be performed in
commit_cache_rebuild(). */
fil_mtr_rename_log(ctx->old_table->space,
ctx->old_table->name,
ctx->new_table->space,
ctx->new_table->name,
ctx->tmp_name, &mtr);
/* Check for any possible problems for any
file operations that will be performed in
commit_cache_rebuild(), and if none, generate
the redo log for these operations. */
error = fil_mtr_rename_log(ctx->old_table,
ctx->new_table,
ctx->tmp_name, &mtr);
if (error != DB_SUCCESS) {
/* Out of memory or a problem will occur
when renaming files. */
fail = true;
my_error_innodb(error, ctx->old_table->name,
ctx->old_table->flags);
}
DBUG_INJECT_CRASH("ib_commit_inplace_crash",
crash_inject_count++);
}
@ -5716,18 +5795,25 @@ ha_innobase::commit_inplace_alter_table(
DBUG_EXECUTE_IF("innodb_alter_commit_crash_before_commit",
log_buffer_flush_to_disk();
DBUG_SUICIDE(););
ut_ad(trx_state_eq(trx, TRX_STATE_ACTIVE));
ut_ad(!trx->fts_trx);
ut_ad(trx->insert_undo || trx->update_undo);
/* The following call commits the
mini-transaction, making the data dictionary
transaction committed at mtr.end_lsn. The
transaction becomes 'durable' by the time when
log_buffer_flush_to_disk() returns. In the
logical sense the commit in the file-based
data structures happens here. */
trx_commit_low(trx, &mtr);
if (fail) {
mtr_set_log_mode(&mtr, MTR_LOG_NO_REDO);
mtr_commit(&mtr);
trx_rollback_for_mysql(trx);
} else {
/* The following call commits the
mini-transaction, making the data dictionary
transaction committed at mtr.end_lsn. The
transaction becomes 'durable' by the time when
log_buffer_flush_to_disk() returns. In the
logical sense the commit in the file-based
data structures happens here. */
ut_ad(trx_state_eq(trx, TRX_STATE_ACTIVE));
ut_ad(trx->insert_undo || trx->update_undo);
trx_commit_low(trx, &mtr);
}
/* If server crashes here, the dictionary in
InnoDB and MySQL will differ. The .ibd files
@ -5749,7 +5835,6 @@ ha_innobase::commit_inplace_alter_table(
update the in-memory structures, close some handles, release
temporary files, and (unless we rolled back) update persistent
statistics. */
dberr_t error = DB_SUCCESS;
for (inplace_alter_handler_ctx** pctx = ctx_array;
*pctx; pctx++) {