2014-02-26 19:11:54 +01:00
|
|
|
/*****************************************************************************
|
|
|
|
|
2016-06-21 14:21:03 +02:00
|
|
|
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
|
2017-03-01 08:27:39 +02:00
|
|
|
Copyright (c) 2016, 2017, MariaDB Corporation.
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
This program is free software; you can redistribute it and/or modify it under
|
|
|
|
the terms of the GNU General Public License as published by the Free Software
|
|
|
|
Foundation; version 2 of the License.
|
|
|
|
|
|
|
|
This program is distributed in the hope that it will be useful, but WITHOUT
|
|
|
|
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
|
|
|
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License along with
|
|
|
|
this program; if not, write to the Free Software Foundation, Inc.,
|
|
|
|
51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
|
|
|
|
|
|
|
|
*****************************************************************************/
|
|
|
|
|
|
|
|
/**************************************************//**
|
|
|
|
@file row/row0ins.cc
|
|
|
|
Insert into a table
|
|
|
|
|
|
|
|
Created 4/20/1996 Heikki Tuuri
|
|
|
|
*******************************************************/
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
#include "ha_prototypes.h"
|
|
|
|
|
2014-02-26 19:11:54 +01:00
|
|
|
#include "row0ins.h"
|
|
|
|
#include "dict0dict.h"
|
|
|
|
#include "dict0boot.h"
|
|
|
|
#include "trx0rec.h"
|
|
|
|
#include "trx0undo.h"
|
|
|
|
#include "btr0btr.h"
|
|
|
|
#include "btr0cur.h"
|
|
|
|
#include "mach0data.h"
|
|
|
|
#include "que0que.h"
|
|
|
|
#include "row0upd.h"
|
|
|
|
#include "row0sel.h"
|
|
|
|
#include "row0row.h"
|
|
|
|
#include "row0log.h"
|
|
|
|
#include "rem0cmp.h"
|
|
|
|
#include "lock0lock.h"
|
|
|
|
#include "log0log.h"
|
|
|
|
#include "eval0eval.h"
|
|
|
|
#include "data0data.h"
|
|
|
|
#include "usr0sess.h"
|
|
|
|
#include "buf0lru.h"
|
|
|
|
#include "fts0fts.h"
|
|
|
|
#include "fts0types.h"
|
|
|
|
#include "m_string.h"
|
2016-08-12 11:17:45 +03:00
|
|
|
#include "gis0geo.h"
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
/*************************************************************************
|
|
|
|
IMPORTANT NOTE: Any operation that generates redo MUST check that there
|
|
|
|
is enough space in the redo log before for that operation. This is
|
|
|
|
done by calling log_free_check(). The reason for checking the
|
|
|
|
availability of the redo log space before the start of the operation is
|
|
|
|
that we MUST not hold any synchonization objects when performing the
|
|
|
|
check.
|
|
|
|
If you make a change in this module make sure that no codepath is
|
|
|
|
introduced where a call to log_free_check() is bypassed. */
|
|
|
|
|
|
|
|
/*********************************************************************//**
|
|
|
|
Creates an insert node struct.
|
2016-08-12 11:17:45 +03:00
|
|
|
@return own: insert node struct */
|
2014-02-26 19:11:54 +01:00
|
|
|
ins_node_t*
|
|
|
|
ins_node_create(
|
|
|
|
/*============*/
|
|
|
|
ulint ins_type, /*!< in: INS_VALUES, ... */
|
|
|
|
dict_table_t* table, /*!< in: table where to insert */
|
|
|
|
mem_heap_t* heap) /*!< in: mem heap where created */
|
|
|
|
{
|
|
|
|
ins_node_t* node;
|
|
|
|
|
|
|
|
node = static_cast<ins_node_t*>(
|
|
|
|
mem_heap_alloc(heap, sizeof(ins_node_t)));
|
|
|
|
|
|
|
|
node->common.type = QUE_NODE_INSERT;
|
|
|
|
|
|
|
|
node->ins_type = ins_type;
|
|
|
|
|
|
|
|
node->state = INS_NODE_SET_IX_LOCK;
|
|
|
|
node->table = table;
|
|
|
|
node->index = NULL;
|
|
|
|
node->entry = NULL;
|
|
|
|
|
|
|
|
node->select = NULL;
|
|
|
|
|
|
|
|
node->trx_id = 0;
|
2016-08-12 11:17:45 +03:00
|
|
|
node->duplicate = NULL;
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
node->entry_sys_heap = mem_heap_create(128);
|
|
|
|
|
|
|
|
node->magic_n = INS_NODE_MAGIC_N;
|
|
|
|
|
|
|
|
return(node);
|
|
|
|
}
|
|
|
|
|
|
|
|
/***********************************************************//**
|
|
|
|
Creates an entry template for each index of a table. */
|
|
|
|
static
|
|
|
|
void
|
|
|
|
ins_node_create_entry_list(
|
|
|
|
/*=======================*/
|
|
|
|
ins_node_t* node) /*!< in: row insert node */
|
|
|
|
{
|
|
|
|
dict_index_t* index;
|
|
|
|
dtuple_t* entry;
|
|
|
|
|
|
|
|
ut_ad(node->entry_sys_heap);
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
UT_LIST_INIT(node->entry_list, &dtuple_t::tuple_list);
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
/* We will include all indexes (include those corrupted
|
|
|
|
secondary indexes) in the entry list. Filteration of
|
|
|
|
these corrupted index will be done in row_ins() */
|
|
|
|
|
|
|
|
for (index = dict_table_get_first_index(node->table);
|
|
|
|
index != 0;
|
|
|
|
index = dict_table_get_next_index(index)) {
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
entry = row_build_index_entry_low(
|
|
|
|
node->row, NULL, index, node->entry_sys_heap,
|
|
|
|
ROW_BUILD_FOR_INSERT);
|
2014-02-26 19:11:54 +01:00
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
UT_LIST_ADD_LAST(node->entry_list, entry);
|
2014-02-26 19:11:54 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*****************************************************************//**
|
|
|
|
Adds system field buffers to a row. */
|
|
|
|
static
|
|
|
|
void
|
|
|
|
row_ins_alloc_sys_fields(
|
|
|
|
/*=====================*/
|
|
|
|
ins_node_t* node) /*!< in: insert node */
|
|
|
|
{
|
|
|
|
dtuple_t* row;
|
|
|
|
dict_table_t* table;
|
|
|
|
mem_heap_t* heap;
|
|
|
|
const dict_col_t* col;
|
|
|
|
dfield_t* dfield;
|
|
|
|
byte* ptr;
|
|
|
|
|
|
|
|
row = node->row;
|
|
|
|
table = node->table;
|
|
|
|
heap = node->entry_sys_heap;
|
|
|
|
|
|
|
|
ut_ad(row && table && heap);
|
|
|
|
ut_ad(dtuple_get_n_fields(row) == dict_table_get_n_cols(table));
|
|
|
|
|
2014-06-09 18:16:00 +02:00
|
|
|
/* allocate buffer to hold the needed system created hidden columns. */
|
2016-12-05 21:04:30 +02:00
|
|
|
const uint len = DATA_ROW_ID_LEN + DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN;
|
2014-06-09 18:16:00 +02:00
|
|
|
ptr = static_cast<byte*>(mem_heap_zalloc(heap, len));
|
2014-02-26 19:11:54 +01:00
|
|
|
|
2014-06-09 18:16:00 +02:00
|
|
|
/* 1. Populate row-id */
|
2014-02-26 19:11:54 +01:00
|
|
|
col = dict_table_get_sys_col(table, DATA_ROW_ID);
|
|
|
|
|
|
|
|
dfield = dtuple_get_nth_field(row, dict_col_get_no(col));
|
|
|
|
|
|
|
|
dfield_set_data(dfield, ptr, DATA_ROW_ID_LEN);
|
|
|
|
|
|
|
|
node->row_id_buf = ptr;
|
|
|
|
|
2014-06-09 18:16:00 +02:00
|
|
|
ptr += DATA_ROW_ID_LEN;
|
2014-02-26 19:11:54 +01:00
|
|
|
|
2014-06-09 18:16:00 +02:00
|
|
|
/* 2. Populate trx id */
|
2014-02-26 19:11:54 +01:00
|
|
|
col = dict_table_get_sys_col(table, DATA_TRX_ID);
|
|
|
|
|
|
|
|
dfield = dtuple_get_nth_field(row, dict_col_get_no(col));
|
|
|
|
|
|
|
|
dfield_set_data(dfield, ptr, DATA_TRX_ID_LEN);
|
|
|
|
|
|
|
|
node->trx_id_buf = ptr;
|
|
|
|
|
2014-06-09 18:16:00 +02:00
|
|
|
ptr += DATA_TRX_ID_LEN;
|
|
|
|
|
2016-12-05 21:04:30 +02:00
|
|
|
col = dict_table_get_sys_col(table, DATA_ROLL_PTR);
|
2014-02-26 19:11:54 +01:00
|
|
|
|
2016-12-05 21:04:30 +02:00
|
|
|
dfield = dtuple_get_nth_field(row, dict_col_get_no(col));
|
2014-02-26 19:11:54 +01:00
|
|
|
|
2016-12-05 21:04:30 +02:00
|
|
|
dfield_set_data(dfield, ptr, DATA_ROLL_PTR_LEN);
|
2014-02-26 19:11:54 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*********************************************************************//**
|
|
|
|
Sets a new row to insert for an INS_DIRECT node. This function is only used
|
|
|
|
if we have constructed the row separately, which is a rare case; this
|
|
|
|
function is quite slow. */
|
|
|
|
void
|
|
|
|
ins_node_set_new_row(
|
|
|
|
/*=================*/
|
|
|
|
ins_node_t* node, /*!< in: insert node */
|
|
|
|
dtuple_t* row) /*!< in: new row (or first row) for the node */
|
|
|
|
{
|
|
|
|
node->state = INS_NODE_SET_IX_LOCK;
|
|
|
|
node->index = NULL;
|
|
|
|
node->entry = NULL;
|
2016-08-12 11:17:45 +03:00
|
|
|
node->duplicate = NULL;
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
node->row = row;
|
|
|
|
|
|
|
|
mem_heap_empty(node->entry_sys_heap);
|
|
|
|
|
|
|
|
/* Create templates for index entries */
|
|
|
|
|
|
|
|
ins_node_create_entry_list(node);
|
|
|
|
|
|
|
|
/* Allocate from entry_sys_heap buffers for sys fields */
|
|
|
|
|
|
|
|
row_ins_alloc_sys_fields(node);
|
|
|
|
|
|
|
|
/* As we allocated a new trx id buf, the trx id should be written
|
|
|
|
there again: */
|
|
|
|
|
|
|
|
node->trx_id = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*******************************************************************//**
|
|
|
|
Does an insert operation by updating a delete-marked existing record
|
|
|
|
in the index. This situation can occur if the delete-marked record is
|
|
|
|
kept in the index for consistent reads.
|
2016-08-12 11:17:45 +03:00
|
|
|
@return DB_SUCCESS or error code */
|
2016-06-21 14:21:03 +02:00
|
|
|
static MY_ATTRIBUTE((nonnull, warn_unused_result))
|
2014-02-26 19:11:54 +01:00
|
|
|
dberr_t
|
|
|
|
row_ins_sec_index_entry_by_modify(
|
|
|
|
/*==============================*/
|
|
|
|
ulint flags, /*!< in: undo logging and locking flags */
|
|
|
|
ulint mode, /*!< in: BTR_MODIFY_LEAF or BTR_MODIFY_TREE,
|
|
|
|
depending on whether mtr holds just a leaf
|
|
|
|
latch or also a tree latch */
|
|
|
|
btr_cur_t* cursor, /*!< in: B-tree cursor */
|
|
|
|
ulint** offsets,/*!< in/out: offsets on cursor->page_cur.rec */
|
|
|
|
mem_heap_t* offsets_heap,
|
|
|
|
/*!< in/out: memory heap that can be emptied */
|
|
|
|
mem_heap_t* heap, /*!< in/out: memory heap */
|
|
|
|
const dtuple_t* entry, /*!< in: index entry to insert */
|
|
|
|
que_thr_t* thr, /*!< in: query thread */
|
|
|
|
mtr_t* mtr) /*!< in: mtr; must be committed before
|
|
|
|
latching any further pages */
|
|
|
|
{
|
|
|
|
big_rec_t* dummy_big_rec;
|
|
|
|
upd_t* update;
|
|
|
|
rec_t* rec;
|
|
|
|
dberr_t err;
|
|
|
|
|
|
|
|
rec = btr_cur_get_rec(cursor);
|
|
|
|
|
|
|
|
ut_ad(!dict_index_is_clust(cursor->index));
|
|
|
|
ut_ad(rec_offs_validate(rec, cursor->index, *offsets));
|
|
|
|
ut_ad(!entry->info_bits);
|
|
|
|
|
|
|
|
/* We know that in the alphabetical ordering, entry and rec are
|
|
|
|
identified. But in their binary form there may be differences if
|
|
|
|
there are char fields in them. Therefore we have to calculate the
|
|
|
|
difference. */
|
|
|
|
|
|
|
|
update = row_upd_build_sec_rec_difference_binary(
|
|
|
|
rec, cursor->index, *offsets, entry, heap);
|
|
|
|
|
|
|
|
if (!rec_get_deleted_flag(rec, rec_offs_comp(*offsets))) {
|
|
|
|
/* We should never insert in place of a record that
|
|
|
|
has not been delete-marked. The only exception is when
|
|
|
|
online CREATE INDEX copied the changes that we already
|
|
|
|
made to the clustered index, and completed the
|
|
|
|
secondary index creation before we got here. In this
|
|
|
|
case, the change would already be there. The CREATE
|
|
|
|
INDEX should be waiting for a MySQL meta-data lock
|
|
|
|
upgrade at least until this INSERT or UPDATE
|
2016-08-12 11:17:45 +03:00
|
|
|
returns. After that point, set_committed(true)
|
|
|
|
would be invoked in commit_inplace_alter_table(). */
|
2014-02-26 19:11:54 +01:00
|
|
|
ut_a(update->n_fields == 0);
|
2016-08-12 11:17:45 +03:00
|
|
|
ut_a(!cursor->index->is_committed());
|
2014-02-26 19:11:54 +01:00
|
|
|
ut_ad(!dict_index_is_online_ddl(cursor->index));
|
|
|
|
return(DB_SUCCESS);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mode == BTR_MODIFY_LEAF) {
|
|
|
|
/* Try an optimistic updating of the record, keeping changes
|
|
|
|
within the page */
|
|
|
|
|
|
|
|
/* TODO: pass only *offsets */
|
|
|
|
err = btr_cur_optimistic_update(
|
|
|
|
flags | BTR_KEEP_SYS_FLAG, cursor,
|
|
|
|
offsets, &offsets_heap, update, 0, thr,
|
|
|
|
thr_get_trx(thr)->id, mtr);
|
|
|
|
switch (err) {
|
|
|
|
case DB_OVERFLOW:
|
|
|
|
case DB_UNDERFLOW:
|
|
|
|
case DB_ZIP_OVERFLOW:
|
|
|
|
err = DB_FAIL;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ut_a(mode == BTR_MODIFY_TREE);
|
|
|
|
if (buf_LRU_buf_pool_running_out()) {
|
|
|
|
|
|
|
|
return(DB_LOCK_TABLE_FULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
err = btr_cur_pessimistic_update(
|
|
|
|
flags | BTR_KEEP_SYS_FLAG, cursor,
|
|
|
|
offsets, &offsets_heap,
|
|
|
|
heap, &dummy_big_rec, update, 0,
|
|
|
|
thr, thr_get_trx(thr)->id, mtr);
|
|
|
|
ut_ad(!dummy_big_rec);
|
|
|
|
}
|
|
|
|
|
|
|
|
return(err);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*******************************************************************//**
|
|
|
|
Does an insert operation by delete unmarking and updating a delete marked
|
|
|
|
existing record in the index. This situation can occur if the delete marked
|
|
|
|
record is kept in the index for consistent reads.
|
2016-08-12 11:17:45 +03:00
|
|
|
@return DB_SUCCESS, DB_FAIL, or error code */
|
2016-06-21 14:21:03 +02:00
|
|
|
static MY_ATTRIBUTE((nonnull, warn_unused_result))
|
2014-02-26 19:11:54 +01:00
|
|
|
dberr_t
|
|
|
|
row_ins_clust_index_entry_by_modify(
|
|
|
|
/*================================*/
|
2016-08-12 11:17:45 +03:00
|
|
|
btr_pcur_t* pcur, /*!< in/out: a persistent cursor pointing
|
|
|
|
to the clust_rec that is being modified. */
|
2014-02-26 19:11:54 +01:00
|
|
|
ulint flags, /*!< in: undo logging and locking flags */
|
|
|
|
ulint mode, /*!< in: BTR_MODIFY_LEAF or BTR_MODIFY_TREE,
|
|
|
|
depending on whether mtr holds just a leaf
|
|
|
|
latch or also a tree latch */
|
|
|
|
ulint** offsets,/*!< out: offsets on cursor->page_cur.rec */
|
|
|
|
mem_heap_t** offsets_heap,
|
|
|
|
/*!< in/out: pointer to memory heap that can
|
|
|
|
be emptied, or NULL */
|
|
|
|
mem_heap_t* heap, /*!< in/out: memory heap */
|
|
|
|
const dtuple_t* entry, /*!< in: index entry to insert */
|
|
|
|
que_thr_t* thr, /*!< in: query thread */
|
|
|
|
mtr_t* mtr) /*!< in: mtr; must be committed before
|
|
|
|
latching any further pages */
|
|
|
|
{
|
|
|
|
const rec_t* rec;
|
2016-08-12 11:17:45 +03:00
|
|
|
upd_t* update;
|
2014-02-26 19:11:54 +01:00
|
|
|
dberr_t err;
|
2016-08-12 11:17:45 +03:00
|
|
|
btr_cur_t* cursor = btr_pcur_get_btr_cur(pcur);
|
2016-09-06 09:43:16 +03:00
|
|
|
TABLE* mysql_table = NULL;
|
2014-02-26 19:11:54 +01:00
|
|
|
ut_ad(dict_index_is_clust(cursor->index));
|
|
|
|
|
|
|
|
rec = btr_cur_get_rec(cursor);
|
|
|
|
|
|
|
|
ut_ad(rec_get_deleted_flag(rec,
|
|
|
|
dict_table_is_comp(cursor->index->table)));
|
|
|
|
|
|
|
|
/* Build an update vector containing all the fields to be modified;
|
|
|
|
NOTE that this vector may NOT contain system columns trx_id or
|
|
|
|
roll_ptr */
|
2016-09-06 09:43:16 +03:00
|
|
|
if (thr->prebuilt != NULL) {
|
|
|
|
mysql_table = thr->prebuilt->m_mysql_table;
|
|
|
|
ut_ad(thr->prebuilt->trx == thr_get_trx(thr));
|
|
|
|
}
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
update = row_upd_build_difference_binary(
|
|
|
|
cursor->index, entry, rec, NULL, true,
|
2016-09-06 09:43:16 +03:00
|
|
|
thr_get_trx(thr), heap, mysql_table);
|
2014-02-26 19:11:54 +01:00
|
|
|
if (mode != BTR_MODIFY_TREE) {
|
|
|
|
ut_ad((mode & ~BTR_ALREADY_S_LATCHED) == BTR_MODIFY_LEAF);
|
|
|
|
|
|
|
|
/* Try optimistic updating of the record, keeping changes
|
|
|
|
within the page */
|
|
|
|
|
|
|
|
err = btr_cur_optimistic_update(
|
|
|
|
flags, cursor, offsets, offsets_heap, update, 0, thr,
|
|
|
|
thr_get_trx(thr)->id, mtr);
|
|
|
|
switch (err) {
|
|
|
|
case DB_OVERFLOW:
|
|
|
|
case DB_UNDERFLOW:
|
|
|
|
case DB_ZIP_OVERFLOW:
|
|
|
|
err = DB_FAIL;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (buf_LRU_buf_pool_running_out()) {
|
|
|
|
|
|
|
|
return(DB_LOCK_TABLE_FULL);
|
|
|
|
|
|
|
|
}
|
2016-08-12 11:17:45 +03:00
|
|
|
|
|
|
|
big_rec_t* big_rec = NULL;
|
|
|
|
|
2014-02-26 19:11:54 +01:00
|
|
|
err = btr_cur_pessimistic_update(
|
|
|
|
flags | BTR_KEEP_POS_FLAG,
|
|
|
|
cursor, offsets, offsets_heap, heap,
|
2016-08-12 11:17:45 +03:00
|
|
|
&big_rec, update, 0, thr, thr_get_trx(thr)->id, mtr);
|
|
|
|
|
|
|
|
if (big_rec) {
|
|
|
|
ut_a(err == DB_SUCCESS);
|
|
|
|
|
|
|
|
DEBUG_SYNC_C("before_row_ins_upd_extern");
|
|
|
|
err = btr_store_big_rec_extern_fields(
|
|
|
|
pcur, update, *offsets, big_rec, mtr,
|
|
|
|
BTR_STORE_INSERT_UPDATE);
|
|
|
|
DEBUG_SYNC_C("after_row_ins_upd_extern");
|
|
|
|
dtuple_big_rec_free(big_rec);
|
|
|
|
}
|
2014-02-26 19:11:54 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return(err);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*********************************************************************//**
|
|
|
|
Returns TRUE if in a cascaded update/delete an ancestor node of node
|
|
|
|
updates (not DELETE, but UPDATE) table.
|
2016-08-12 11:17:45 +03:00
|
|
|
@return TRUE if an ancestor updates table */
|
2014-02-26 19:11:54 +01:00
|
|
|
static
|
|
|
|
ibool
|
|
|
|
row_ins_cascade_ancestor_updates_table(
|
|
|
|
/*===================================*/
|
|
|
|
que_node_t* node, /*!< in: node in a query graph */
|
|
|
|
dict_table_t* table) /*!< in: table */
|
|
|
|
{
|
|
|
|
que_node_t* parent;
|
|
|
|
|
|
|
|
for (parent = que_node_get_parent(node);
|
|
|
|
que_node_get_type(parent) == QUE_NODE_UPDATE;
|
|
|
|
parent = que_node_get_parent(parent)) {
|
|
|
|
|
|
|
|
upd_node_t* upd_node;
|
|
|
|
|
|
|
|
upd_node = static_cast<upd_node_t*>(parent);
|
|
|
|
|
|
|
|
if (upd_node->table == table && upd_node->is_delete == FALSE) {
|
|
|
|
|
|
|
|
return(TRUE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return(FALSE);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*********************************************************************//**
|
|
|
|
Returns the number of ancestor UPDATE or DELETE nodes of a
|
|
|
|
cascaded update/delete node.
|
2016-08-12 11:17:45 +03:00
|
|
|
@return number of ancestors */
|
2016-06-21 14:21:03 +02:00
|
|
|
static MY_ATTRIBUTE((nonnull, warn_unused_result))
|
2014-02-26 19:11:54 +01:00
|
|
|
ulint
|
|
|
|
row_ins_cascade_n_ancestors(
|
|
|
|
/*========================*/
|
|
|
|
que_node_t* node) /*!< in: node in a query graph */
|
|
|
|
{
|
|
|
|
que_node_t* parent;
|
|
|
|
ulint n_ancestors = 0;
|
|
|
|
|
|
|
|
for (parent = que_node_get_parent(node);
|
|
|
|
que_node_get_type(parent) == QUE_NODE_UPDATE;
|
|
|
|
parent = que_node_get_parent(parent)) {
|
|
|
|
|
|
|
|
n_ancestors++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return(n_ancestors);
|
|
|
|
}
|
|
|
|
|
|
|
|
/******************************************************************//**
|
|
|
|
Calculates the update vector node->cascade->update for a child table in
|
|
|
|
a cascaded update.
|
|
|
|
@return number of fields in the calculated update vector; the value
|
|
|
|
can also be 0 if no foreign key fields changed; the returned value is
|
|
|
|
ULINT_UNDEFINED if the column type in the child table is too short to
|
|
|
|
fit the new value in the parent table: that means the update fails */
|
2016-06-21 14:21:03 +02:00
|
|
|
static MY_ATTRIBUTE((nonnull, warn_unused_result))
|
2014-02-26 19:11:54 +01:00
|
|
|
ulint
|
|
|
|
row_ins_cascade_calc_update_vec(
|
|
|
|
/*============================*/
|
|
|
|
upd_node_t* node, /*!< in: update node of the parent
|
|
|
|
table */
|
|
|
|
dict_foreign_t* foreign, /*!< in: foreign key constraint whose
|
|
|
|
type is != 0 */
|
|
|
|
mem_heap_t* heap, /*!< in: memory heap to use as
|
|
|
|
temporary storage */
|
|
|
|
trx_t* trx, /*!< in: update transaction */
|
2016-08-12 11:17:45 +03:00
|
|
|
ibool* fts_col_affected,
|
|
|
|
/*!< out: is FTS column affected */
|
|
|
|
upd_node_t* cascade) /*!< in: cascade update node */
|
2014-02-26 19:11:54 +01:00
|
|
|
{
|
|
|
|
dict_table_t* table = foreign->foreign_table;
|
|
|
|
dict_index_t* index = foreign->foreign_index;
|
|
|
|
upd_t* update;
|
|
|
|
dict_table_t* parent_table;
|
|
|
|
dict_index_t* parent_index;
|
|
|
|
upd_t* parent_update;
|
|
|
|
ulint n_fields_updated;
|
|
|
|
ulint parent_field_no;
|
|
|
|
ulint i;
|
|
|
|
ulint j;
|
|
|
|
ibool doc_id_updated = FALSE;
|
|
|
|
ulint doc_id_pos = 0;
|
|
|
|
doc_id_t new_doc_id = FTS_NULL_DOC_ID;
|
2016-08-12 11:17:45 +03:00
|
|
|
ulint prefix_col;
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
ut_a(node);
|
|
|
|
ut_a(foreign);
|
|
|
|
ut_a(cascade);
|
|
|
|
ut_a(table);
|
|
|
|
ut_a(index);
|
|
|
|
|
|
|
|
/* Calculate the appropriate update vector which will set the fields
|
|
|
|
in the child index record to the same value (possibly padded with
|
|
|
|
spaces if the column is a fixed length CHAR or FIXBINARY column) as
|
|
|
|
the referenced index record will get in the update. */
|
|
|
|
|
|
|
|
parent_table = node->table;
|
|
|
|
ut_a(parent_table == foreign->referenced_table);
|
|
|
|
parent_index = foreign->referenced_index;
|
|
|
|
parent_update = node->update;
|
|
|
|
|
|
|
|
update = cascade->update;
|
|
|
|
|
|
|
|
update->info_bits = 0;
|
|
|
|
|
|
|
|
n_fields_updated = 0;
|
|
|
|
|
|
|
|
*fts_col_affected = FALSE;
|
|
|
|
|
|
|
|
if (table->fts) {
|
|
|
|
doc_id_pos = dict_table_get_nth_col_pos(
|
2016-08-12 11:17:45 +03:00
|
|
|
table, table->fts->doc_col, &prefix_col);
|
2014-02-26 19:11:54 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < foreign->n_fields; i++) {
|
|
|
|
|
|
|
|
parent_field_no = dict_table_get_nth_col_pos(
|
|
|
|
parent_table,
|
2016-08-12 11:17:45 +03:00
|
|
|
dict_index_get_nth_col_no(parent_index, i),
|
|
|
|
&prefix_col);
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
for (j = 0; j < parent_update->n_fields; j++) {
|
|
|
|
const upd_field_t* parent_ufield
|
|
|
|
= &parent_update->fields[j];
|
|
|
|
|
|
|
|
if (parent_ufield->field_no == parent_field_no) {
|
|
|
|
|
|
|
|
ulint min_size;
|
|
|
|
const dict_col_t* col;
|
|
|
|
ulint ufield_len;
|
|
|
|
upd_field_t* ufield;
|
|
|
|
|
|
|
|
col = dict_index_get_nth_col(index, i);
|
|
|
|
|
|
|
|
/* A field in the parent index record is
|
|
|
|
updated. Let us make the update vector
|
|
|
|
field for the child table. */
|
|
|
|
|
|
|
|
ufield = update->fields + n_fields_updated;
|
|
|
|
|
|
|
|
ufield->field_no
|
|
|
|
= dict_table_get_nth_col_pos(
|
2016-08-12 11:17:45 +03:00
|
|
|
table, dict_col_get_no(col),
|
|
|
|
&prefix_col);
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
ufield->orig_len = 0;
|
|
|
|
ufield->exp = NULL;
|
|
|
|
|
|
|
|
ufield->new_val = parent_ufield->new_val;
|
|
|
|
ufield_len = dfield_get_len(&ufield->new_val);
|
|
|
|
|
|
|
|
/* Clear the "external storage" flag */
|
|
|
|
dfield_set_len(&ufield->new_val, ufield_len);
|
|
|
|
|
|
|
|
/* Do not allow a NOT NULL column to be
|
|
|
|
updated as NULL */
|
|
|
|
|
|
|
|
if (dfield_is_null(&ufield->new_val)
|
|
|
|
&& (col->prtype & DATA_NOT_NULL)) {
|
|
|
|
|
|
|
|
return(ULINT_UNDEFINED);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If the new value would not fit in the
|
|
|
|
column, do not allow the update */
|
|
|
|
|
|
|
|
if (!dfield_is_null(&ufield->new_val)
|
|
|
|
&& dtype_get_at_most_n_mbchars(
|
|
|
|
col->prtype, col->mbminmaxlen,
|
|
|
|
col->len,
|
|
|
|
ufield_len,
|
|
|
|
static_cast<char*>(
|
|
|
|
dfield_get_data(
|
|
|
|
&ufield->new_val)))
|
|
|
|
< ufield_len) {
|
|
|
|
|
|
|
|
return(ULINT_UNDEFINED);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If the parent column type has a different
|
|
|
|
length than the child column type, we may
|
|
|
|
need to pad with spaces the new value of the
|
|
|
|
child column */
|
|
|
|
|
|
|
|
min_size = dict_col_get_min_size(col);
|
|
|
|
|
|
|
|
/* Because UNIV_SQL_NULL (the marker
|
|
|
|
of SQL NULL values) exceeds all possible
|
|
|
|
values of min_size, the test below will
|
|
|
|
not hold for SQL NULL columns. */
|
|
|
|
|
|
|
|
if (min_size > ufield_len) {
|
|
|
|
|
|
|
|
byte* pad;
|
|
|
|
ulint pad_len;
|
|
|
|
byte* padded_data;
|
|
|
|
ulint mbminlen;
|
|
|
|
|
|
|
|
padded_data = static_cast<byte*>(
|
|
|
|
mem_heap_alloc(
|
|
|
|
heap, min_size));
|
|
|
|
|
|
|
|
pad = padded_data + ufield_len;
|
|
|
|
pad_len = min_size - ufield_len;
|
|
|
|
|
|
|
|
memcpy(padded_data,
|
|
|
|
dfield_get_data(&ufield
|
|
|
|
->new_val),
|
|
|
|
ufield_len);
|
|
|
|
|
|
|
|
mbminlen = dict_col_get_mbminlen(col);
|
|
|
|
|
|
|
|
ut_ad(!(ufield_len % mbminlen));
|
|
|
|
ut_ad(!(min_size % mbminlen));
|
|
|
|
|
|
|
|
if (mbminlen == 1
|
|
|
|
&& dtype_get_charset_coll(
|
|
|
|
col->prtype)
|
|
|
|
== DATA_MYSQL_BINARY_CHARSET_COLL) {
|
|
|
|
/* Do not pad BINARY columns */
|
|
|
|
return(ULINT_UNDEFINED);
|
|
|
|
}
|
|
|
|
|
|
|
|
row_mysql_pad_col(mbminlen,
|
|
|
|
pad, pad_len);
|
|
|
|
dfield_set_data(&ufield->new_val,
|
|
|
|
padded_data, min_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check whether the current column has
|
|
|
|
FTS index on it */
|
|
|
|
if (table->fts
|
|
|
|
&& dict_table_is_fts_column(
|
|
|
|
table->fts->indexes,
|
2016-08-12 11:17:45 +03:00
|
|
|
dict_col_get_no(col),
|
|
|
|
dict_col_is_virtual(col))
|
2014-02-26 19:11:54 +01:00
|
|
|
!= ULINT_UNDEFINED) {
|
|
|
|
*fts_col_affected = TRUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If Doc ID is updated, check whether the
|
|
|
|
Doc ID is valid */
|
|
|
|
if (table->fts
|
|
|
|
&& ufield->field_no == doc_id_pos) {
|
|
|
|
doc_id_t n_doc_id;
|
|
|
|
|
|
|
|
n_doc_id =
|
|
|
|
table->fts->cache->next_doc_id;
|
|
|
|
|
|
|
|
new_doc_id = fts_read_doc_id(
|
|
|
|
static_cast<const byte*>(
|
|
|
|
dfield_get_data(
|
|
|
|
&ufield->new_val)));
|
|
|
|
|
|
|
|
if (new_doc_id <= 0) {
|
2016-08-12 11:17:45 +03:00
|
|
|
ib::error() << "FTS Doc ID"
|
|
|
|
" must be larger than"
|
|
|
|
" 0";
|
2014-02-26 19:11:54 +01:00
|
|
|
return(ULINT_UNDEFINED);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (new_doc_id < n_doc_id) {
|
2016-08-12 11:17:45 +03:00
|
|
|
ib::error() << "FTS Doc ID"
|
|
|
|
" must be larger than "
|
|
|
|
<< n_doc_id - 1
|
|
|
|
<< " for table "
|
|
|
|
<< table->name;
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
return(ULINT_UNDEFINED);
|
|
|
|
}
|
|
|
|
|
|
|
|
*fts_col_affected = TRUE;
|
|
|
|
doc_id_updated = TRUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
n_fields_updated++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Generate a new Doc ID if FTS index columns get updated */
|
|
|
|
if (table->fts && *fts_col_affected) {
|
|
|
|
if (DICT_TF2_FLAG_IS_SET(table, DICT_TF2_FTS_HAS_DOC_ID)) {
|
|
|
|
doc_id_t doc_id;
|
2016-08-12 11:17:45 +03:00
|
|
|
doc_id_t* next_doc_id;
|
|
|
|
upd_field_t* ufield;
|
|
|
|
|
|
|
|
next_doc_id = static_cast<doc_id_t*>(mem_heap_alloc(
|
|
|
|
heap, sizeof(doc_id_t)));
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
ut_ad(!doc_id_updated);
|
|
|
|
ufield = update->fields + n_fields_updated;
|
2016-08-12 11:17:45 +03:00
|
|
|
fts_get_next_doc_id(table, next_doc_id);
|
|
|
|
doc_id = fts_update_doc_id(table, ufield, next_doc_id);
|
2014-02-26 19:11:54 +01:00
|
|
|
n_fields_updated++;
|
2016-08-12 11:17:45 +03:00
|
|
|
cascade->fts_next_doc_id = doc_id;
|
2014-02-26 19:11:54 +01:00
|
|
|
} else {
|
|
|
|
if (doc_id_updated) {
|
|
|
|
ut_ad(new_doc_id);
|
2016-08-12 11:17:45 +03:00
|
|
|
cascade->fts_next_doc_id = new_doc_id;
|
2014-02-26 19:11:54 +01:00
|
|
|
} else {
|
2016-08-12 11:17:45 +03:00
|
|
|
cascade->fts_next_doc_id = FTS_NULL_DOC_ID;
|
|
|
|
ib::error() << "FTS Doc ID must be updated"
|
|
|
|
" along with FTS indexed column for"
|
|
|
|
" table " << table->name;
|
2014-02-26 19:11:54 +01:00
|
|
|
return(ULINT_UNDEFINED);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
update->n_fields = n_fields_updated;
|
|
|
|
|
|
|
|
return(n_fields_updated);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*********************************************************************//**
|
|
|
|
Set detailed error message associated with foreign key errors for
|
|
|
|
the given transaction. */
|
|
|
|
static
|
|
|
|
void
|
|
|
|
row_ins_set_detailed(
|
|
|
|
/*=================*/
|
|
|
|
trx_t* trx, /*!< in: transaction */
|
|
|
|
dict_foreign_t* foreign) /*!< in: foreign key constraint */
|
|
|
|
{
|
|
|
|
ut_ad(!srv_read_only_mode);
|
|
|
|
|
|
|
|
mutex_enter(&srv_misc_tmpfile_mutex);
|
|
|
|
rewind(srv_misc_tmpfile);
|
|
|
|
|
|
|
|
if (os_file_set_eof(srv_misc_tmpfile)) {
|
2016-08-12 11:17:45 +03:00
|
|
|
ut_print_name(srv_misc_tmpfile, trx,
|
2014-02-26 19:11:54 +01:00
|
|
|
foreign->foreign_table_name);
|
2016-08-12 11:17:45 +03:00
|
|
|
std::string fk_str = dict_print_info_on_foreign_key_in_create_format(
|
2015-12-14 14:34:32 +02:00
|
|
|
trx, foreign, FALSE);
|
|
|
|
fputs(fk_str.c_str(), srv_misc_tmpfile);
|
2014-02-26 19:11:54 +01:00
|
|
|
trx_set_detailed_error_from_file(trx, srv_misc_tmpfile);
|
|
|
|
} else {
|
|
|
|
trx_set_detailed_error(trx, "temp file operation failed");
|
|
|
|
}
|
|
|
|
|
|
|
|
mutex_exit(&srv_misc_tmpfile_mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*********************************************************************//**
|
|
|
|
Acquires dict_foreign_err_mutex, rewinds dict_foreign_err_file
|
|
|
|
and displays information about the given transaction.
|
|
|
|
The caller must release dict_foreign_err_mutex. */
|
|
|
|
static
|
|
|
|
void
|
|
|
|
row_ins_foreign_trx_print(
|
|
|
|
/*======================*/
|
|
|
|
trx_t* trx) /*!< in: transaction */
|
|
|
|
{
|
|
|
|
ulint n_rec_locks;
|
|
|
|
ulint n_trx_locks;
|
|
|
|
ulint heap_size;
|
|
|
|
|
|
|
|
if (srv_read_only_mode) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
lock_mutex_enter();
|
|
|
|
n_rec_locks = lock_number_of_rows_locked(&trx->lock);
|
|
|
|
n_trx_locks = UT_LIST_GET_LEN(trx->lock.trx_locks);
|
|
|
|
heap_size = mem_heap_get_size(trx->lock.lock_heap);
|
|
|
|
lock_mutex_exit();
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
trx_sys_mutex_enter();
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
mutex_enter(&dict_foreign_err_mutex);
|
|
|
|
rewind(dict_foreign_err_file);
|
|
|
|
ut_print_timestamp(dict_foreign_err_file);
|
|
|
|
fputs(" Transaction:\n", dict_foreign_err_file);
|
|
|
|
|
|
|
|
trx_print_low(dict_foreign_err_file, trx, 600,
|
|
|
|
n_rec_locks, n_trx_locks, heap_size);
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
trx_sys_mutex_exit();
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
ut_ad(mutex_own(&dict_foreign_err_mutex));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*********************************************************************//**
|
|
|
|
Reports a foreign key error associated with an update or a delete of a
|
|
|
|
parent table index entry. */
|
|
|
|
static
|
|
|
|
void
|
|
|
|
row_ins_foreign_report_err(
|
|
|
|
/*=======================*/
|
|
|
|
const char* errstr, /*!< in: error string from the viewpoint
|
|
|
|
of the parent table */
|
|
|
|
que_thr_t* thr, /*!< in: query thread whose run_node
|
|
|
|
is an update node */
|
|
|
|
dict_foreign_t* foreign, /*!< in: foreign key constraint */
|
|
|
|
const rec_t* rec, /*!< in: a matching index record in the
|
|
|
|
child table */
|
|
|
|
const dtuple_t* entry) /*!< in: index entry in the parent
|
|
|
|
table */
|
|
|
|
{
|
2015-12-14 14:34:32 +02:00
|
|
|
std::string fk_str;
|
|
|
|
|
2014-02-26 19:11:54 +01:00
|
|
|
if (srv_read_only_mode) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
FILE* ef = dict_foreign_err_file;
|
|
|
|
trx_t* trx = thr_get_trx(thr);
|
|
|
|
|
|
|
|
row_ins_set_detailed(trx, foreign);
|
|
|
|
|
|
|
|
row_ins_foreign_trx_print(trx);
|
|
|
|
|
|
|
|
fputs("Foreign key constraint fails for table ", ef);
|
2016-08-12 11:17:45 +03:00
|
|
|
ut_print_name(ef, trx, foreign->foreign_table_name);
|
2014-02-26 19:11:54 +01:00
|
|
|
fputs(":\n", ef);
|
2015-12-14 14:34:32 +02:00
|
|
|
fk_str = dict_print_info_on_foreign_key_in_create_format(trx, foreign,
|
2014-02-26 19:11:54 +01:00
|
|
|
TRUE);
|
2015-12-14 14:34:32 +02:00
|
|
|
fputs(fk_str.c_str(), ef);
|
2014-02-26 19:11:54 +01:00
|
|
|
putc('\n', ef);
|
|
|
|
fputs(errstr, ef);
|
2016-08-12 11:17:45 +03:00
|
|
|
fprintf(ef, " in parent table, in index %s",
|
|
|
|
foreign->referenced_index->name());
|
2014-02-26 19:11:54 +01:00
|
|
|
if (entry) {
|
|
|
|
fputs(" tuple:\n", ef);
|
|
|
|
dtuple_print(ef, entry);
|
|
|
|
}
|
|
|
|
fputs("\nBut in child table ", ef);
|
2016-08-12 11:17:45 +03:00
|
|
|
ut_print_name(ef, trx, foreign->foreign_table_name);
|
|
|
|
fprintf(ef, ", in index %s", foreign->foreign_index->name());
|
2014-02-26 19:11:54 +01:00
|
|
|
if (rec) {
|
|
|
|
fputs(", there is a record:\n", ef);
|
|
|
|
rec_print(ef, rec, foreign->foreign_index);
|
|
|
|
} else {
|
|
|
|
fputs(", the record is not available\n", ef);
|
|
|
|
}
|
|
|
|
putc('\n', ef);
|
|
|
|
|
|
|
|
mutex_exit(&dict_foreign_err_mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*********************************************************************//**
|
|
|
|
Reports a foreign key error to dict_foreign_err_file when we are trying
|
|
|
|
to add an index entry to a child table. Note that the adding may be the result
|
|
|
|
of an update, too. */
|
|
|
|
static
|
|
|
|
void
|
|
|
|
row_ins_foreign_report_add_err(
|
|
|
|
/*===========================*/
|
|
|
|
trx_t* trx, /*!< in: transaction */
|
|
|
|
dict_foreign_t* foreign, /*!< in: foreign key constraint */
|
|
|
|
const rec_t* rec, /*!< in: a record in the parent table:
|
|
|
|
it does not match entry because we
|
|
|
|
have an error! */
|
|
|
|
const dtuple_t* entry) /*!< in: index entry to insert in the
|
|
|
|
child table */
|
|
|
|
{
|
2015-12-14 14:34:32 +02:00
|
|
|
std::string fk_str;
|
|
|
|
|
2014-02-26 19:11:54 +01:00
|
|
|
if (srv_read_only_mode) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
FILE* ef = dict_foreign_err_file;
|
|
|
|
|
|
|
|
row_ins_set_detailed(trx, foreign);
|
|
|
|
|
|
|
|
row_ins_foreign_trx_print(trx);
|
|
|
|
|
|
|
|
fputs("Foreign key constraint fails for table ", ef);
|
2016-08-12 11:17:45 +03:00
|
|
|
ut_print_name(ef, trx, foreign->foreign_table_name);
|
2014-02-26 19:11:54 +01:00
|
|
|
fputs(":\n", ef);
|
2015-12-14 14:34:32 +02:00
|
|
|
fk_str = dict_print_info_on_foreign_key_in_create_format(trx, foreign,
|
2014-02-26 19:11:54 +01:00
|
|
|
TRUE);
|
2015-12-14 14:34:32 +02:00
|
|
|
fputs(fk_str.c_str(), ef);
|
2016-08-12 11:17:45 +03:00
|
|
|
fprintf(ef, " in parent table, in index %s",
|
|
|
|
foreign->foreign_index->name());
|
2014-02-26 19:11:54 +01:00
|
|
|
if (entry) {
|
|
|
|
fputs(" tuple:\n", ef);
|
|
|
|
/* TODO: DB_TRX_ID and DB_ROLL_PTR may be uninitialized.
|
|
|
|
It would be better to only display the user columns. */
|
|
|
|
dtuple_print(ef, entry);
|
|
|
|
}
|
|
|
|
fputs("\nBut in parent table ", ef);
|
2016-08-12 11:17:45 +03:00
|
|
|
ut_print_name(ef, trx, foreign->referenced_table_name);
|
|
|
|
fprintf(ef, ", in index %s,\n"
|
|
|
|
"the closest match we can find is record:\n",
|
|
|
|
foreign->referenced_index->name());
|
2014-02-26 19:11:54 +01:00
|
|
|
if (rec && page_rec_is_supremum(rec)) {
|
|
|
|
/* If the cursor ended on a supremum record, it is better
|
|
|
|
to report the previous record in the error message, so that
|
|
|
|
the user gets a more descriptive error message. */
|
|
|
|
rec = page_rec_get_prev_const(rec);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rec) {
|
|
|
|
rec_print(ef, rec, foreign->referenced_index);
|
|
|
|
}
|
|
|
|
putc('\n', ef);
|
|
|
|
|
|
|
|
mutex_exit(&dict_foreign_err_mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*********************************************************************//**
|
|
|
|
Invalidate the query cache for the given table. */
|
|
|
|
static
|
|
|
|
void
|
|
|
|
row_ins_invalidate_query_cache(
|
|
|
|
/*===========================*/
|
|
|
|
que_thr_t* thr, /*!< in: query thread whose run_node
|
|
|
|
is an update node */
|
|
|
|
const char* name) /*!< in: table name prefixed with
|
|
|
|
database name and a '/' character */
|
|
|
|
{
|
|
|
|
ulint len = strlen(name) + 1;
|
2016-08-12 11:17:45 +03:00
|
|
|
innobase_invalidate_query_cache(thr_get_trx(thr), name, len);
|
2014-02-26 19:11:54 +01:00
|
|
|
}
|
2016-09-06 09:43:16 +03:00
|
|
|
|
|
|
|
|
|
|
|
/** Fill virtual column information in cascade node for the child table.
|
|
|
|
@param[out] cascade child update node
|
|
|
|
@param[in] rec clustered rec of child table
|
|
|
|
@param[in] index clustered index of child table
|
|
|
|
@param[in] node parent update node
|
|
|
|
@param[in] foreign foreign key information
|
|
|
|
@param[out] err error code. */
|
|
|
|
static
|
|
|
|
void
|
|
|
|
row_ins_foreign_fill_virtual(
|
|
|
|
upd_node_t* cascade,
|
|
|
|
const rec_t* rec,
|
|
|
|
dict_index_t* index,
|
|
|
|
upd_node_t* node,
|
|
|
|
dict_foreign_t* foreign,
|
|
|
|
dberr_t* err)
|
|
|
|
{
|
|
|
|
THD* thd = current_thd;
|
|
|
|
row_ext_t* ext;
|
|
|
|
ulint offsets_[REC_OFFS_NORMAL_SIZE];
|
|
|
|
rec_offs_init(offsets_);
|
|
|
|
const ulint* offsets =
|
|
|
|
rec_get_offsets(rec, index, offsets_,
|
|
|
|
ULINT_UNDEFINED, &cascade->heap);
|
|
|
|
mem_heap_t* v_heap = NULL;
|
|
|
|
upd_t* update = cascade->update;
|
|
|
|
ulint n_v_fld = index->table->n_v_def;
|
|
|
|
ulint n_diff;
|
|
|
|
upd_field_t* upd_field;
|
|
|
|
dict_vcol_set* v_cols = foreign->v_cols;
|
|
|
|
update->old_vrow = row_build(
|
|
|
|
ROW_COPY_POINTERS, index, rec,
|
|
|
|
offsets, index->table, NULL, NULL,
|
|
|
|
&ext, cascade->heap);
|
|
|
|
n_diff = update->n_fields;
|
|
|
|
|
|
|
|
update->n_fields += n_v_fld;
|
|
|
|
|
|
|
|
if (index->table->vc_templ == NULL) {
|
|
|
|
/** This can occur when there is a cascading
|
|
|
|
delete or update after restart. */
|
|
|
|
innobase_init_vc_templ(index->table);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (ulint i = 0; i < n_v_fld; i++) {
|
|
|
|
|
|
|
|
dict_v_col_t* col = dict_table_get_nth_v_col(
|
|
|
|
index->table, i);
|
|
|
|
|
|
|
|
dict_vcol_set::iterator it = v_cols->find(col);
|
|
|
|
|
|
|
|
if (it == v_cols->end()) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
dfield_t* vfield = innobase_get_computed_value(
|
|
|
|
update->old_vrow, col, index,
|
|
|
|
&v_heap, update->heap, NULL, thd, NULL,
|
|
|
|
NULL, NULL, NULL);
|
|
|
|
|
|
|
|
if (vfield == NULL) {
|
|
|
|
*err = DB_COMPUTE_VALUE_FAILED;
|
|
|
|
goto func_exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
upd_field = upd_get_nth_field(update, n_diff);
|
|
|
|
|
|
|
|
upd_field->old_v_val = static_cast<dfield_t*>(
|
|
|
|
mem_heap_alloc(cascade->heap,
|
|
|
|
sizeof *upd_field->old_v_val));
|
|
|
|
|
|
|
|
dfield_copy(upd_field->old_v_val, vfield);
|
|
|
|
|
|
|
|
upd_field_set_v_field_no(upd_field, i, index);
|
|
|
|
|
|
|
|
if (node->is_delete
|
|
|
|
? (foreign->type & DICT_FOREIGN_ON_DELETE_SET_NULL)
|
|
|
|
: (foreign->type & DICT_FOREIGN_ON_UPDATE_SET_NULL)) {
|
|
|
|
|
|
|
|
dfield_set_null(&upd_field->new_val);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!node->is_delete
|
|
|
|
&& (foreign->type & DICT_FOREIGN_ON_UPDATE_CASCADE)) {
|
|
|
|
|
|
|
|
dfield_t* new_vfield = innobase_get_computed_value(
|
|
|
|
update->old_vrow, col, index,
|
|
|
|
&v_heap, update->heap, NULL, thd,
|
|
|
|
NULL, NULL, node->update, foreign);
|
|
|
|
|
|
|
|
if (new_vfield == NULL) {
|
|
|
|
*err = DB_COMPUTE_VALUE_FAILED;
|
|
|
|
goto func_exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
dfield_copy(&(upd_field->new_val), new_vfield);
|
|
|
|
}
|
|
|
|
|
|
|
|
n_diff++;
|
|
|
|
}
|
|
|
|
|
|
|
|
update->n_fields = n_diff;
|
|
|
|
*err = DB_SUCCESS;
|
|
|
|
|
|
|
|
func_exit:
|
|
|
|
if (v_heap) {
|
|
|
|
mem_heap_free(v_heap);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-06 15:39:15 +03:00
|
|
|
#ifdef WITH_WSREP
|
2016-09-06 09:43:16 +03:00
|
|
|
dberr_t wsrep_append_foreign_key(trx_t *trx,
|
2014-08-06 15:39:15 +03:00
|
|
|
dict_foreign_t* foreign,
|
|
|
|
const rec_t* clust_rec,
|
|
|
|
dict_index_t* clust_index,
|
|
|
|
ibool referenced,
|
|
|
|
ibool shared);
|
|
|
|
#endif /* WITH_WSREP */
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
/*********************************************************************//**
|
|
|
|
Perform referential actions or checks when a parent row is deleted or updated
|
|
|
|
and the constraint had an ON DELETE or ON UPDATE condition which was not
|
|
|
|
RESTRICT.
|
2016-08-12 11:17:45 +03:00
|
|
|
@return DB_SUCCESS, DB_LOCK_WAIT, or error code */
|
2016-06-21 14:21:03 +02:00
|
|
|
static MY_ATTRIBUTE((nonnull, warn_unused_result))
|
2014-02-26 19:11:54 +01:00
|
|
|
dberr_t
|
|
|
|
row_ins_foreign_check_on_constraint(
|
|
|
|
/*================================*/
|
|
|
|
que_thr_t* thr, /*!< in: query thread whose run_node
|
|
|
|
is an update node */
|
|
|
|
dict_foreign_t* foreign, /*!< in: foreign key constraint whose
|
|
|
|
type is != 0 */
|
|
|
|
btr_pcur_t* pcur, /*!< in: cursor placed on a matching
|
|
|
|
index record in the child table */
|
|
|
|
dtuple_t* entry, /*!< in: index entry in the parent
|
|
|
|
table */
|
|
|
|
mtr_t* mtr) /*!< in: mtr holding the latch of pcur
|
|
|
|
page */
|
|
|
|
{
|
|
|
|
upd_node_t* node;
|
|
|
|
upd_node_t* cascade;
|
|
|
|
dict_table_t* table = foreign->foreign_table;
|
|
|
|
dict_index_t* index;
|
|
|
|
dict_index_t* clust_index;
|
|
|
|
dtuple_t* ref;
|
|
|
|
const rec_t* rec;
|
|
|
|
const rec_t* clust_rec;
|
|
|
|
const buf_block_t* clust_block;
|
|
|
|
upd_t* update;
|
|
|
|
ulint n_to_update;
|
|
|
|
dberr_t err;
|
|
|
|
ulint i;
|
|
|
|
trx_t* trx;
|
|
|
|
mem_heap_t* tmp_heap = NULL;
|
|
|
|
doc_id_t doc_id = FTS_NULL_DOC_ID;
|
|
|
|
ibool fts_col_affacted = FALSE;
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
DBUG_ENTER("row_ins_foreign_check_on_constraint");
|
2014-02-26 19:11:54 +01:00
|
|
|
ut_a(thr);
|
|
|
|
ut_a(foreign);
|
|
|
|
ut_a(pcur);
|
|
|
|
ut_a(mtr);
|
|
|
|
|
|
|
|
trx = thr_get_trx(thr);
|
|
|
|
|
|
|
|
/* Since we are going to delete or update a row, we have to invalidate
|
|
|
|
the MySQL query cache for table. A deadlock of threads is not possible
|
|
|
|
here because the caller of this function does not hold any latches with
|
2016-08-12 11:17:45 +03:00
|
|
|
the mutex rank above the lock_sys_t::mutex. The query cache mutex
|
|
|
|
has a rank just above the lock_sys_t::mutex. */
|
2014-02-26 19:11:54 +01:00
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
row_ins_invalidate_query_cache(thr, table->name.m_name);
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
node = static_cast<upd_node_t*>(thr->run_node);
|
|
|
|
|
|
|
|
if (node->is_delete && 0 == (foreign->type
|
|
|
|
& (DICT_FOREIGN_ON_DELETE_CASCADE
|
|
|
|
| DICT_FOREIGN_ON_DELETE_SET_NULL))) {
|
|
|
|
|
|
|
|
row_ins_foreign_report_err("Trying to delete",
|
|
|
|
thr, foreign,
|
|
|
|
btr_pcur_get_rec(pcur), entry);
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
DBUG_RETURN(DB_ROW_IS_REFERENCED);
|
2014-02-26 19:11:54 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!node->is_delete && 0 == (foreign->type
|
|
|
|
& (DICT_FOREIGN_ON_UPDATE_CASCADE
|
|
|
|
| DICT_FOREIGN_ON_UPDATE_SET_NULL))) {
|
|
|
|
|
|
|
|
/* This is an UPDATE */
|
|
|
|
|
|
|
|
row_ins_foreign_report_err("Trying to update",
|
|
|
|
thr, foreign,
|
|
|
|
btr_pcur_get_rec(pcur), entry);
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
DBUG_RETURN(DB_ROW_IS_REFERENCED);
|
2014-02-26 19:11:54 +01:00
|
|
|
}
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
cascade = row_create_update_node_for_mysql(table, node->cascade_heap);
|
|
|
|
que_node_set_parent(cascade, node);
|
2014-02-26 19:11:54 +01:00
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
/* For the cascaded operation, all the update nodes are allocated in
|
|
|
|
the same heap. All the update nodes will point to the same heap.
|
|
|
|
This heap is owned by the first update node. And it must be freed
|
|
|
|
only in the first update node */
|
|
|
|
cascade->cascade_heap = node->cascade_heap;
|
|
|
|
cascade->cascade_upd_nodes = node->cascade_upd_nodes;
|
|
|
|
cascade->new_upd_nodes = node->new_upd_nodes;
|
|
|
|
cascade->processed_cascades = node->processed_cascades;
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
cascade->table = table;
|
|
|
|
|
|
|
|
cascade->foreign = foreign;
|
|
|
|
|
|
|
|
if (node->is_delete
|
|
|
|
&& (foreign->type & DICT_FOREIGN_ON_DELETE_CASCADE)) {
|
|
|
|
cascade->is_delete = TRUE;
|
|
|
|
} else {
|
|
|
|
cascade->is_delete = FALSE;
|
|
|
|
|
|
|
|
if (foreign->n_fields > cascade->update_n_fields) {
|
|
|
|
/* We have to make the update vector longer */
|
|
|
|
|
|
|
|
cascade->update = upd_create(foreign->n_fields,
|
|
|
|
node->cascade_heap);
|
|
|
|
cascade->update_n_fields = foreign->n_fields;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We do not allow cyclic cascaded updating (DELETE is allowed,
|
|
|
|
but not UPDATE) of the same table, as this can lead to an infinite
|
|
|
|
cycle. Check that we are not updating the same table which is
|
|
|
|
already being modified in this cascade chain. We have to check
|
|
|
|
this also because the modification of the indexes of a 'parent'
|
|
|
|
table may still be incomplete, and we must avoid seeing the indexes
|
|
|
|
of the parent table in an inconsistent state! */
|
|
|
|
|
|
|
|
if (!cascade->is_delete
|
|
|
|
&& row_ins_cascade_ancestor_updates_table(cascade, table)) {
|
|
|
|
|
|
|
|
/* We do not know if this would break foreign key
|
|
|
|
constraints, but play safe and return an error */
|
|
|
|
|
|
|
|
err = DB_ROW_IS_REFERENCED;
|
|
|
|
|
|
|
|
row_ins_foreign_report_err(
|
|
|
|
"Trying an update, possibly causing a cyclic"
|
|
|
|
" cascaded update\n"
|
|
|
|
"in the child table,", thr, foreign,
|
|
|
|
btr_pcur_get_rec(pcur), entry);
|
|
|
|
|
|
|
|
goto nonstandard_exit_func;
|
|
|
|
}
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
if (row_ins_cascade_n_ancestors(cascade) >= FK_MAX_CASCADE_DEL) {
|
|
|
|
err = DB_FOREIGN_EXCEED_MAX_CASCADE;
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
row_ins_foreign_report_err(
|
|
|
|
"Trying a too deep cascaded delete or update\n",
|
|
|
|
thr, foreign, btr_pcur_get_rec(pcur), entry);
|
|
|
|
|
|
|
|
goto nonstandard_exit_func;
|
|
|
|
}
|
|
|
|
|
|
|
|
index = btr_pcur_get_btr_cur(pcur)->index;
|
|
|
|
|
|
|
|
ut_a(index == foreign->foreign_index);
|
|
|
|
|
|
|
|
rec = btr_pcur_get_rec(pcur);
|
|
|
|
|
|
|
|
tmp_heap = mem_heap_create(256);
|
|
|
|
|
|
|
|
if (dict_index_is_clust(index)) {
|
|
|
|
/* pcur is already positioned in the clustered index of
|
|
|
|
the child table */
|
|
|
|
|
|
|
|
clust_index = index;
|
|
|
|
clust_rec = rec;
|
|
|
|
clust_block = btr_pcur_get_block(pcur);
|
|
|
|
} else {
|
|
|
|
/* We have to look for the record in the clustered index
|
|
|
|
in the child table */
|
|
|
|
|
|
|
|
clust_index = dict_table_get_first_index(table);
|
|
|
|
|
|
|
|
ref = row_build_row_ref(ROW_COPY_POINTERS, index, rec,
|
|
|
|
tmp_heap);
|
|
|
|
btr_pcur_open_with_no_init(clust_index, ref,
|
|
|
|
PAGE_CUR_LE, BTR_SEARCH_LEAF,
|
|
|
|
cascade->pcur, 0, mtr);
|
|
|
|
|
|
|
|
clust_rec = btr_pcur_get_rec(cascade->pcur);
|
|
|
|
clust_block = btr_pcur_get_block(cascade->pcur);
|
|
|
|
|
|
|
|
if (!page_rec_is_user_rec(clust_rec)
|
|
|
|
|| btr_pcur_get_low_match(cascade->pcur)
|
|
|
|
< dict_index_get_n_unique(clust_index)) {
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
ib::error() << "In cascade of a foreign key op index "
|
|
|
|
<< index->name
|
|
|
|
<< " of table " << index->table->name;
|
2014-02-26 19:11:54 +01:00
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
fputs("InnoDB: record ", stderr);
|
2014-02-26 19:11:54 +01:00
|
|
|
rec_print(stderr, rec, index);
|
|
|
|
fputs("\n"
|
|
|
|
"InnoDB: clustered record ", stderr);
|
|
|
|
rec_print(stderr, clust_rec, clust_index);
|
|
|
|
fputs("\n"
|
|
|
|
"InnoDB: Submit a detailed bug report to"
|
|
|
|
" http://bugs.mysql.com\n", stderr);
|
|
|
|
ut_ad(0);
|
|
|
|
err = DB_SUCCESS;
|
|
|
|
|
|
|
|
goto nonstandard_exit_func;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set an X-lock on the row to delete or update in the child table */
|
|
|
|
|
|
|
|
err = lock_table(0, table, LOCK_IX, thr);
|
|
|
|
|
|
|
|
if (err == DB_SUCCESS) {
|
|
|
|
/* Here it suffices to use a LOCK_REC_NOT_GAP type lock;
|
|
|
|
we already have a normal shared lock on the appropriate
|
|
|
|
gap if the search criterion was not unique */
|
|
|
|
|
|
|
|
err = lock_clust_rec_read_check_and_lock_alt(
|
|
|
|
0, clust_block, clust_rec, clust_index,
|
|
|
|
LOCK_X, LOCK_REC_NOT_GAP, thr);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (err != DB_SUCCESS) {
|
|
|
|
|
|
|
|
goto nonstandard_exit_func;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rec_get_deleted_flag(clust_rec, dict_table_is_comp(table))) {
|
|
|
|
/* This can happen if there is a circular reference of
|
|
|
|
rows such that cascading delete comes to delete a row
|
|
|
|
already in the process of being delete marked */
|
|
|
|
err = DB_SUCCESS;
|
|
|
|
|
|
|
|
goto nonstandard_exit_func;
|
|
|
|
}
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
|
2014-02-26 19:11:54 +01:00
|
|
|
if (table->fts) {
|
2016-08-12 11:17:45 +03:00
|
|
|
doc_id = fts_get_doc_id_from_rec(table, clust_rec,
|
|
|
|
clust_index, tmp_heap);
|
2014-02-26 19:11:54 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (node->is_delete
|
|
|
|
? (foreign->type & DICT_FOREIGN_ON_DELETE_SET_NULL)
|
|
|
|
: (foreign->type & DICT_FOREIGN_ON_UPDATE_SET_NULL)) {
|
|
|
|
|
|
|
|
/* Build the appropriate update vector which sets
|
|
|
|
foreign->n_fields first fields in rec to SQL NULL */
|
2016-08-12 11:17:45 +03:00
|
|
|
if (table->fts) {
|
|
|
|
|
|
|
|
/* For the clause ON DELETE SET NULL, the cascade
|
|
|
|
operation is actually an update operation with the new
|
|
|
|
values being null. For FTS, this means that the old
|
|
|
|
values be deleted and no new values to be added.*/
|
|
|
|
cascade->fts_next_doc_id = FTS_NULL_DOC_ID;
|
|
|
|
}
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
update = cascade->update;
|
|
|
|
|
|
|
|
update->info_bits = 0;
|
|
|
|
update->n_fields = foreign->n_fields;
|
|
|
|
UNIV_MEM_INVALID(update->fields,
|
|
|
|
update->n_fields * sizeof *update->fields);
|
|
|
|
|
|
|
|
for (i = 0; i < foreign->n_fields; i++) {
|
|
|
|
upd_field_t* ufield = &update->fields[i];
|
2016-08-12 11:17:45 +03:00
|
|
|
ulint col_no = dict_index_get_nth_col_no(
|
|
|
|
index, i);
|
|
|
|
ulint prefix_col;
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
ufield->field_no = dict_table_get_nth_col_pos(
|
2016-08-12 11:17:45 +03:00
|
|
|
table, col_no, &prefix_col);
|
|
|
|
dict_col_t* col = dict_table_get_nth_col(
|
|
|
|
table, col_no);
|
|
|
|
dict_col_copy_type(col, dfield_get_type(&ufield->new_val));
|
|
|
|
|
2014-02-26 19:11:54 +01:00
|
|
|
ufield->orig_len = 0;
|
|
|
|
ufield->exp = NULL;
|
|
|
|
dfield_set_null(&ufield->new_val);
|
|
|
|
|
|
|
|
if (table->fts && dict_table_is_fts_column(
|
|
|
|
table->fts->indexes,
|
2016-08-12 11:17:45 +03:00
|
|
|
dict_index_get_nth_col_no(index, i),
|
|
|
|
dict_col_is_virtual(
|
|
|
|
dict_index_get_nth_col(index, i)))
|
|
|
|
!= ULINT_UNDEFINED) {
|
2014-02-26 19:11:54 +01:00
|
|
|
fts_col_affacted = TRUE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (fts_col_affacted) {
|
2016-08-12 11:17:45 +03:00
|
|
|
cascade->fts_doc_id = doc_id;
|
2014-02-26 19:11:54 +01:00
|
|
|
}
|
2016-09-06 09:43:16 +03:00
|
|
|
|
|
|
|
if (foreign->v_cols != NULL
|
|
|
|
&& foreign->v_cols->size() > 0) {
|
|
|
|
row_ins_foreign_fill_virtual(
|
|
|
|
cascade, clust_rec, clust_index,
|
|
|
|
node, foreign, &err);
|
|
|
|
|
|
|
|
if (err != DB_SUCCESS) {
|
|
|
|
goto nonstandard_exit_func;
|
|
|
|
}
|
|
|
|
}
|
2014-02-26 19:11:54 +01:00
|
|
|
} else if (table->fts && cascade->is_delete) {
|
|
|
|
/* DICT_FOREIGN_ON_DELETE_CASCADE case */
|
|
|
|
for (i = 0; i < foreign->n_fields; i++) {
|
|
|
|
if (table->fts && dict_table_is_fts_column(
|
|
|
|
table->fts->indexes,
|
2016-08-12 11:17:45 +03:00
|
|
|
dict_index_get_nth_col_no(index, i),
|
|
|
|
dict_col_is_virtual(
|
|
|
|
dict_index_get_nth_col(index, i)))
|
|
|
|
!= ULINT_UNDEFINED) {
|
2014-02-26 19:11:54 +01:00
|
|
|
fts_col_affacted = TRUE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (fts_col_affacted) {
|
2016-08-12 11:17:45 +03:00
|
|
|
cascade->fts_doc_id = doc_id;
|
2014-02-26 19:11:54 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!node->is_delete
|
|
|
|
&& (foreign->type & DICT_FOREIGN_ON_UPDATE_CASCADE)) {
|
|
|
|
|
|
|
|
/* Build the appropriate update vector which sets changing
|
|
|
|
foreign->n_fields first fields in rec to new values */
|
|
|
|
|
|
|
|
n_to_update = row_ins_cascade_calc_update_vec(
|
2016-08-12 11:17:45 +03:00
|
|
|
node, foreign, cascade->cascade_heap,
|
|
|
|
trx, &fts_col_affacted, cascade);
|
2014-02-26 19:11:54 +01:00
|
|
|
|
2016-09-06 09:43:16 +03:00
|
|
|
|
|
|
|
if (foreign->v_cols != NULL
|
|
|
|
&& foreign->v_cols->size() > 0) {
|
|
|
|
row_ins_foreign_fill_virtual(
|
|
|
|
cascade, clust_rec, clust_index,
|
|
|
|
node, foreign, &err);
|
|
|
|
|
|
|
|
if (err != DB_SUCCESS) {
|
|
|
|
goto nonstandard_exit_func;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-26 19:11:54 +01:00
|
|
|
if (n_to_update == ULINT_UNDEFINED) {
|
|
|
|
err = DB_ROW_IS_REFERENCED;
|
|
|
|
|
|
|
|
row_ins_foreign_report_err(
|
|
|
|
"Trying a cascaded update where the"
|
|
|
|
" updated value in the child\n"
|
|
|
|
"table would not fit in the length"
|
|
|
|
" of the column, or the value would\n"
|
|
|
|
"be NULL and the column is"
|
|
|
|
" declared as not NULL in the child table,",
|
|
|
|
thr, foreign, btr_pcur_get_rec(pcur), entry);
|
|
|
|
|
|
|
|
goto nonstandard_exit_func;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cascade->update->n_fields == 0) {
|
|
|
|
|
|
|
|
/* The update does not change any columns referred
|
|
|
|
to in this foreign key constraint: no need to do
|
|
|
|
anything */
|
|
|
|
|
|
|
|
err = DB_SUCCESS;
|
|
|
|
|
|
|
|
goto nonstandard_exit_func;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Mark the old Doc ID as deleted */
|
|
|
|
if (fts_col_affacted) {
|
|
|
|
ut_ad(table->fts);
|
2016-08-12 11:17:45 +03:00
|
|
|
cascade->fts_doc_id = doc_id;
|
2014-02-26 19:11:54 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Store pcur position and initialize or store the cascade node
|
|
|
|
pcur stored position */
|
|
|
|
|
|
|
|
btr_pcur_store_position(pcur, mtr);
|
|
|
|
|
|
|
|
if (index == clust_index) {
|
|
|
|
btr_pcur_copy_stored_position(cascade->pcur, pcur);
|
|
|
|
} else {
|
|
|
|
btr_pcur_store_position(cascade->pcur, mtr);
|
|
|
|
}
|
|
|
|
|
|
|
|
mtr_commit(mtr);
|
|
|
|
|
|
|
|
ut_a(cascade->pcur->rel_pos == BTR_PCUR_ON);
|
|
|
|
|
|
|
|
cascade->state = UPD_NODE_UPDATE_CLUSTERED;
|
|
|
|
|
2014-08-06 15:39:15 +03:00
|
|
|
#ifdef WITH_WSREP
|
|
|
|
err = wsrep_append_foreign_key(
|
|
|
|
thr_get_trx(thr),
|
|
|
|
foreign,
|
|
|
|
clust_rec,
|
|
|
|
clust_index,
|
|
|
|
FALSE, FALSE);
|
|
|
|
if (err != DB_SUCCESS) {
|
|
|
|
fprintf(stderr,
|
|
|
|
"WSREP: foreign key append failed: %d\n", err);
|
|
|
|
} else
|
|
|
|
#endif /* WITH_WSREP */
|
2016-08-12 11:17:45 +03:00
|
|
|
node->new_upd_nodes->push_back(cascade);
|
2014-02-26 19:11:54 +01:00
|
|
|
|
2016-09-09 15:05:59 +04:00
|
|
|
my_atomic_addlint(&table->n_foreign_key_checks_running, 1);
|
2016-08-12 11:17:45 +03:00
|
|
|
|
|
|
|
ut_ad(foreign->foreign_table->n_foreign_key_checks_running > 0);
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
/* Release the data dictionary latch for a while, so that we do not
|
|
|
|
starve other threads from doing CREATE TABLE etc. if we have a huge
|
|
|
|
cascaded operation running. The counter n_foreign_key_checks_running
|
|
|
|
will prevent other users from dropping or ALTERing the table when we
|
|
|
|
release the latch. */
|
|
|
|
|
|
|
|
row_mysql_unfreeze_data_dictionary(thr_get_trx(thr));
|
|
|
|
|
|
|
|
DEBUG_SYNC_C("innodb_dml_cascade_dict_unfreeze");
|
|
|
|
|
|
|
|
row_mysql_freeze_data_dictionary(thr_get_trx(thr));
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
mtr_start(mtr);
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
/* Restore pcur position */
|
|
|
|
|
|
|
|
btr_pcur_restore_position(BTR_SEARCH_LEAF, pcur, mtr);
|
|
|
|
|
|
|
|
if (tmp_heap) {
|
|
|
|
mem_heap_free(tmp_heap);
|
|
|
|
}
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
DBUG_RETURN(err);
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
nonstandard_exit_func:
|
2016-08-12 11:17:45 +03:00
|
|
|
que_graph_free_recursive(cascade);
|
|
|
|
|
2014-02-26 19:11:54 +01:00
|
|
|
if (tmp_heap) {
|
|
|
|
mem_heap_free(tmp_heap);
|
|
|
|
}
|
|
|
|
|
|
|
|
btr_pcur_store_position(pcur, mtr);
|
|
|
|
|
|
|
|
mtr_commit(mtr);
|
2016-08-12 11:17:45 +03:00
|
|
|
mtr_start(mtr);
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
btr_pcur_restore_position(BTR_SEARCH_LEAF, pcur, mtr);
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
DBUG_RETURN(err);
|
2014-02-26 19:11:54 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*********************************************************************//**
|
|
|
|
Sets a shared lock on a record. Used in locking possible duplicate key
|
|
|
|
records and also in checking foreign key constraints.
|
2016-08-12 11:17:45 +03:00
|
|
|
@return DB_SUCCESS, DB_SUCCESS_LOCKED_REC, or error code */
|
2014-02-26 19:11:54 +01:00
|
|
|
static
|
|
|
|
dberr_t
|
|
|
|
row_ins_set_shared_rec_lock(
|
|
|
|
/*========================*/
|
|
|
|
ulint type, /*!< in: LOCK_ORDINARY, LOCK_GAP, or
|
|
|
|
LOCK_REC_NOT_GAP type lock */
|
|
|
|
const buf_block_t* block, /*!< in: buffer block of rec */
|
|
|
|
const rec_t* rec, /*!< in: record */
|
|
|
|
dict_index_t* index, /*!< in: index */
|
|
|
|
const ulint* offsets,/*!< in: rec_get_offsets(rec, index) */
|
|
|
|
que_thr_t* thr) /*!< in: query thread */
|
|
|
|
{
|
|
|
|
dberr_t err;
|
|
|
|
|
|
|
|
ut_ad(rec_offs_validate(rec, index, offsets));
|
|
|
|
|
|
|
|
if (dict_index_is_clust(index)) {
|
|
|
|
err = lock_clust_rec_read_check_and_lock(
|
|
|
|
0, block, rec, index, offsets, LOCK_S, type, thr);
|
|
|
|
} else {
|
|
|
|
err = lock_sec_rec_read_check_and_lock(
|
|
|
|
0, block, rec, index, offsets, LOCK_S, type, thr);
|
|
|
|
}
|
|
|
|
|
|
|
|
return(err);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*********************************************************************//**
|
|
|
|
Sets a exclusive lock on a record. Used in locking possible duplicate key
|
|
|
|
records
|
2016-08-12 11:17:45 +03:00
|
|
|
@return DB_SUCCESS, DB_SUCCESS_LOCKED_REC, or error code */
|
2014-02-26 19:11:54 +01:00
|
|
|
static
|
|
|
|
dberr_t
|
|
|
|
row_ins_set_exclusive_rec_lock(
|
|
|
|
/*===========================*/
|
|
|
|
ulint type, /*!< in: LOCK_ORDINARY, LOCK_GAP, or
|
|
|
|
LOCK_REC_NOT_GAP type lock */
|
|
|
|
const buf_block_t* block, /*!< in: buffer block of rec */
|
|
|
|
const rec_t* rec, /*!< in: record */
|
|
|
|
dict_index_t* index, /*!< in: index */
|
|
|
|
const ulint* offsets,/*!< in: rec_get_offsets(rec, index) */
|
|
|
|
que_thr_t* thr) /*!< in: query thread */
|
|
|
|
{
|
|
|
|
dberr_t err;
|
|
|
|
|
|
|
|
ut_ad(rec_offs_validate(rec, index, offsets));
|
|
|
|
|
|
|
|
if (dict_index_is_clust(index)) {
|
|
|
|
err = lock_clust_rec_read_check_and_lock(
|
|
|
|
0, block, rec, index, offsets, LOCK_X, type, thr);
|
|
|
|
} else {
|
|
|
|
err = lock_sec_rec_read_check_and_lock(
|
|
|
|
0, block, rec, index, offsets, LOCK_X, type, thr);
|
|
|
|
}
|
|
|
|
|
|
|
|
return(err);
|
|
|
|
}
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
/* Decrement a counter in the destructor. */
|
|
|
|
class ib_dec_in_dtor {
|
|
|
|
public:
|
|
|
|
ib_dec_in_dtor(ulint& c): counter(c) {}
|
|
|
|
~ib_dec_in_dtor() {
|
2016-09-09 15:05:59 +04:00
|
|
|
my_atomic_addlint(&counter, -1);
|
2016-08-12 11:17:45 +03:00
|
|
|
}
|
|
|
|
private:
|
|
|
|
ulint& counter;
|
|
|
|
};
|
|
|
|
|
2014-02-26 19:11:54 +01:00
|
|
|
/***************************************************************//**
|
|
|
|
Checks if foreign key constraint fails for an index entry. Sets shared locks
|
|
|
|
which lock either the success or the failure of the constraint. NOTE that
|
|
|
|
the caller must have a shared latch on dict_operation_lock.
|
2016-08-12 11:17:45 +03:00
|
|
|
@return DB_SUCCESS, DB_NO_REFERENCED_ROW, or DB_ROW_IS_REFERENCED */
|
2014-02-26 19:11:54 +01:00
|
|
|
dberr_t
|
|
|
|
row_ins_check_foreign_constraint(
|
|
|
|
/*=============================*/
|
|
|
|
ibool check_ref,/*!< in: TRUE if we want to check that
|
|
|
|
the referenced table is ok, FALSE if we
|
|
|
|
want to check the foreign key table */
|
|
|
|
dict_foreign_t* foreign,/*!< in: foreign constraint; NOTE that the
|
|
|
|
tables mentioned in it must be in the
|
|
|
|
dictionary cache if they exist at all */
|
|
|
|
dict_table_t* table, /*!< in: if check_ref is TRUE, then the foreign
|
|
|
|
table, else the referenced table */
|
|
|
|
dtuple_t* entry, /*!< in: index entry for index */
|
|
|
|
que_thr_t* thr) /*!< in: query thread */
|
|
|
|
{
|
|
|
|
dberr_t err;
|
|
|
|
upd_node_t* upd_node;
|
|
|
|
dict_table_t* check_table;
|
|
|
|
dict_index_t* check_index;
|
|
|
|
ulint n_fields_cmp;
|
|
|
|
btr_pcur_t pcur;
|
|
|
|
int cmp;
|
|
|
|
mtr_t mtr;
|
|
|
|
trx_t* trx = thr_get_trx(thr);
|
|
|
|
mem_heap_t* heap = NULL;
|
|
|
|
ulint offsets_[REC_OFFS_NORMAL_SIZE];
|
|
|
|
ulint* offsets = offsets_;
|
2016-08-12 11:17:45 +03:00
|
|
|
|
2016-11-13 10:31:35 +05:30
|
|
|
bool skip_gap_lock;
|
|
|
|
|
|
|
|
skip_gap_lock = (trx->isolation_level <= TRX_ISO_READ_COMMITTED);
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
DBUG_ENTER("row_ins_check_foreign_constraint");
|
|
|
|
|
2014-02-26 19:11:54 +01:00
|
|
|
rec_offs_init(offsets_);
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_S));
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
err = DB_SUCCESS;
|
|
|
|
|
|
|
|
if (trx->check_foreigns == FALSE) {
|
|
|
|
/* The user has suppressed foreign key checks currently for
|
|
|
|
this session */
|
|
|
|
goto exit_func;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If any of the foreign key fields in entry is SQL NULL, we
|
|
|
|
suppress the foreign key check: this is compatible with Oracle,
|
|
|
|
for example */
|
2016-08-12 11:17:45 +03:00
|
|
|
for (ulint i = 0; i < foreign->n_fields; i++) {
|
|
|
|
if (dfield_is_null(dtuple_get_nth_field(entry, i))) {
|
2014-02-26 19:11:54 +01:00
|
|
|
goto exit_func;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (que_node_get_type(thr->run_node) == QUE_NODE_UPDATE) {
|
|
|
|
upd_node = static_cast<upd_node_t*>(thr->run_node);
|
|
|
|
|
|
|
|
if (!(upd_node->is_delete) && upd_node->foreign == foreign) {
|
|
|
|
/* If a cascaded update is done as defined by a
|
|
|
|
foreign key constraint, do not check that
|
|
|
|
constraint for the child row. In ON UPDATE CASCADE
|
|
|
|
the update of the parent row is only half done when
|
|
|
|
we come here: if we would check the constraint here
|
|
|
|
for the child row it would fail.
|
|
|
|
|
|
|
|
A QUESTION remains: if in the child table there are
|
|
|
|
several constraints which refer to the same parent
|
|
|
|
table, we should merge all updates to the child as
|
|
|
|
one update? And the updates can be contradictory!
|
|
|
|
Currently we just perform the update associated
|
|
|
|
with each foreign key constraint, one after
|
|
|
|
another, and the user has problems predicting in
|
|
|
|
which order they are performed. */
|
|
|
|
|
|
|
|
goto exit_func;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (check_ref) {
|
|
|
|
check_table = foreign->referenced_table;
|
|
|
|
check_index = foreign->referenced_index;
|
|
|
|
} else {
|
|
|
|
check_table = foreign->foreign_table;
|
|
|
|
check_index = foreign->foreign_index;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (check_table == NULL
|
2017-05-06 15:54:31 +03:00
|
|
|
|| !check_table->is_readable()
|
2014-02-26 19:11:54 +01:00
|
|
|
|| check_index == NULL) {
|
|
|
|
|
|
|
|
if (!srv_read_only_mode && check_ref) {
|
|
|
|
FILE* ef = dict_foreign_err_file;
|
2015-12-14 14:34:32 +02:00
|
|
|
std::string fk_str;
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
row_ins_set_detailed(trx, foreign);
|
|
|
|
|
|
|
|
row_ins_foreign_trx_print(trx);
|
|
|
|
|
|
|
|
fputs("Foreign key constraint fails for table ", ef);
|
2016-08-12 11:17:45 +03:00
|
|
|
ut_print_name(ef, trx,
|
2014-02-26 19:11:54 +01:00
|
|
|
foreign->foreign_table_name);
|
|
|
|
fputs(":\n", ef);
|
2015-12-14 14:34:32 +02:00
|
|
|
fk_str = dict_print_info_on_foreign_key_in_create_format(
|
|
|
|
trx, foreign, TRUE);
|
|
|
|
fputs(fk_str.c_str(), ef);
|
2016-08-12 11:17:45 +03:00
|
|
|
fprintf(ef, "\nTrying to add to index %s tuple:\n",
|
|
|
|
foreign->foreign_index->name());
|
2014-02-26 19:11:54 +01:00
|
|
|
dtuple_print(ef, entry);
|
|
|
|
fputs("\nBut the parent table ", ef);
|
2016-08-12 11:17:45 +03:00
|
|
|
ut_print_name(ef, trx,
|
2014-02-26 19:11:54 +01:00
|
|
|
foreign->referenced_table_name);
|
|
|
|
fputs("\nor its .ibd file does"
|
|
|
|
" not currently exist!\n", ef);
|
|
|
|
mutex_exit(&dict_foreign_err_mutex);
|
|
|
|
|
|
|
|
err = DB_NO_REFERENCED_ROW;
|
|
|
|
}
|
|
|
|
|
|
|
|
goto exit_func;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (check_table != table) {
|
|
|
|
/* We already have a LOCK_IX on table, but not necessarily
|
|
|
|
on check_table */
|
|
|
|
|
|
|
|
err = lock_table(0, check_table, LOCK_IS, thr);
|
|
|
|
|
|
|
|
if (err != DB_SUCCESS) {
|
|
|
|
|
|
|
|
goto do_possible_lock_wait;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
mtr_start(&mtr);
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
/* Store old value on n_fields_cmp */
|
|
|
|
|
|
|
|
n_fields_cmp = dtuple_get_n_fields_cmp(entry);
|
|
|
|
|
|
|
|
dtuple_set_n_fields_cmp(entry, foreign->n_fields);
|
|
|
|
|
|
|
|
btr_pcur_open(check_index, entry, PAGE_CUR_GE,
|
|
|
|
BTR_SEARCH_LEAF, &pcur, &mtr);
|
|
|
|
|
|
|
|
/* Scan index records and check if there is a matching record */
|
|
|
|
|
|
|
|
do {
|
|
|
|
const rec_t* rec = btr_pcur_get_rec(&pcur);
|
|
|
|
const buf_block_t* block = btr_pcur_get_block(&pcur);
|
|
|
|
|
|
|
|
if (page_rec_is_infimum(rec)) {
|
|
|
|
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
offsets = rec_get_offsets(rec, check_index,
|
|
|
|
offsets, ULINT_UNDEFINED, &heap);
|
|
|
|
|
|
|
|
if (page_rec_is_supremum(rec)) {
|
|
|
|
|
2016-11-13 10:31:35 +05:30
|
|
|
if (skip_gap_lock) {
|
|
|
|
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2014-02-26 19:11:54 +01:00
|
|
|
err = row_ins_set_shared_rec_lock(LOCK_ORDINARY, block,
|
|
|
|
rec, check_index,
|
|
|
|
offsets, thr);
|
|
|
|
switch (err) {
|
|
|
|
case DB_SUCCESS_LOCKED_REC:
|
|
|
|
case DB_SUCCESS:
|
|
|
|
continue;
|
|
|
|
default:
|
|
|
|
goto end_scan;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
cmp = cmp_dtuple_rec(entry, rec, offsets);
|
|
|
|
|
|
|
|
if (cmp == 0) {
|
2016-11-13 10:31:35 +05:30
|
|
|
|
|
|
|
ulint lock_type;
|
|
|
|
|
|
|
|
lock_type = skip_gap_lock
|
|
|
|
? LOCK_REC_NOT_GAP
|
|
|
|
: LOCK_ORDINARY;
|
|
|
|
|
2014-02-26 19:11:54 +01:00
|
|
|
if (rec_get_deleted_flag(rec,
|
|
|
|
rec_offs_comp(offsets))) {
|
|
|
|
err = row_ins_set_shared_rec_lock(
|
2016-11-13 10:31:35 +05:30
|
|
|
lock_type, block,
|
2014-02-26 19:11:54 +01:00
|
|
|
rec, check_index, offsets, thr);
|
|
|
|
switch (err) {
|
|
|
|
case DB_SUCCESS_LOCKED_REC:
|
|
|
|
case DB_SUCCESS:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
goto end_scan;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* Found a matching record. Lock only
|
|
|
|
a record because we can allow inserts
|
|
|
|
into gaps */
|
|
|
|
|
|
|
|
err = row_ins_set_shared_rec_lock(
|
|
|
|
LOCK_REC_NOT_GAP, block,
|
|
|
|
rec, check_index, offsets, thr);
|
|
|
|
|
|
|
|
switch (err) {
|
|
|
|
case DB_SUCCESS_LOCKED_REC:
|
|
|
|
case DB_SUCCESS:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
goto end_scan;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (check_ref) {
|
|
|
|
err = DB_SUCCESS;
|
2014-08-06 15:39:15 +03:00
|
|
|
#ifdef WITH_WSREP
|
|
|
|
err = wsrep_append_foreign_key(
|
|
|
|
thr_get_trx(thr),
|
|
|
|
foreign,
|
|
|
|
rec,
|
|
|
|
check_index,
|
|
|
|
check_ref, TRUE);
|
|
|
|
#endif /* WITH_WSREP */
|
2014-02-26 19:11:54 +01:00
|
|
|
goto end_scan;
|
|
|
|
} else if (foreign->type != 0) {
|
|
|
|
/* There is an ON UPDATE or ON DELETE
|
|
|
|
condition: check them in a separate
|
|
|
|
function */
|
|
|
|
|
|
|
|
err = row_ins_foreign_check_on_constraint(
|
|
|
|
thr, foreign, &pcur, entry,
|
|
|
|
&mtr);
|
|
|
|
if (err != DB_SUCCESS) {
|
|
|
|
/* Since reporting a plain
|
|
|
|
"duplicate key" error
|
|
|
|
message to the user in
|
|
|
|
cases where a long CASCADE
|
|
|
|
operation would lead to a
|
|
|
|
duplicate key in some
|
|
|
|
other table is very
|
|
|
|
confusing, map duplicate
|
|
|
|
key errors resulting from
|
|
|
|
FK constraints to a
|
|
|
|
separate error code. */
|
|
|
|
|
|
|
|
if (err == DB_DUPLICATE_KEY) {
|
|
|
|
err = DB_FOREIGN_DUPLICATE_KEY;
|
|
|
|
}
|
|
|
|
|
|
|
|
goto end_scan;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* row_ins_foreign_check_on_constraint
|
|
|
|
may have repositioned pcur on a
|
|
|
|
different block */
|
|
|
|
block = btr_pcur_get_block(&pcur);
|
|
|
|
} else {
|
|
|
|
row_ins_foreign_report_err(
|
|
|
|
"Trying to delete or update",
|
|
|
|
thr, foreign, rec, entry);
|
|
|
|
|
|
|
|
err = DB_ROW_IS_REFERENCED;
|
|
|
|
goto end_scan;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ut_a(cmp < 0);
|
|
|
|
|
2016-11-13 10:31:35 +05:30
|
|
|
err = DB_SUCCESS;
|
|
|
|
|
|
|
|
if (!skip_gap_lock) {
|
|
|
|
err = row_ins_set_shared_rec_lock(
|
|
|
|
LOCK_GAP, block,
|
|
|
|
rec, check_index, offsets, thr);
|
|
|
|
}
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
switch (err) {
|
|
|
|
case DB_SUCCESS_LOCKED_REC:
|
|
|
|
case DB_SUCCESS:
|
|
|
|
if (check_ref) {
|
|
|
|
err = DB_NO_REFERENCED_ROW;
|
|
|
|
row_ins_foreign_report_add_err(
|
|
|
|
trx, foreign, rec, entry);
|
|
|
|
} else {
|
|
|
|
err = DB_SUCCESS;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
goto end_scan;
|
|
|
|
}
|
|
|
|
} while (btr_pcur_move_to_next(&pcur, &mtr));
|
|
|
|
|
|
|
|
if (check_ref) {
|
|
|
|
row_ins_foreign_report_add_err(
|
|
|
|
trx, foreign, btr_pcur_get_rec(&pcur), entry);
|
|
|
|
err = DB_NO_REFERENCED_ROW;
|
|
|
|
} else {
|
|
|
|
err = DB_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
end_scan:
|
|
|
|
btr_pcur_close(&pcur);
|
|
|
|
|
|
|
|
mtr_commit(&mtr);
|
|
|
|
|
|
|
|
/* Restore old value */
|
|
|
|
dtuple_set_n_fields_cmp(entry, n_fields_cmp);
|
|
|
|
|
|
|
|
do_possible_lock_wait:
|
|
|
|
if (err == DB_LOCK_WAIT) {
|
2016-08-12 11:17:45 +03:00
|
|
|
/* An object that will correctly decrement the FK check counter
|
|
|
|
when it goes out of this scope. */
|
|
|
|
ib_dec_in_dtor dec(check_table->n_foreign_key_checks_running);
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
trx->error_state = err;
|
|
|
|
|
|
|
|
que_thr_stop_for_mysql(thr);
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
thr->lock_state = QUE_THR_LOCK_ROW;
|
|
|
|
|
|
|
|
/* To avoid check_table being dropped, increment counter */
|
2016-09-09 15:05:59 +04:00
|
|
|
my_atomic_addlint(
|
2016-08-12 11:17:45 +03:00
|
|
|
&check_table->n_foreign_key_checks_running, 1);
|
|
|
|
|
2016-08-10 14:47:36 +05:30
|
|
|
trx_kill_blocking(trx);
|
|
|
|
|
2014-02-26 19:11:54 +01:00
|
|
|
lock_wait_suspend_thread(thr);
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
thr->lock_state = QUE_THR_LOCK_NOLOCK;
|
|
|
|
|
|
|
|
DBUG_PRINT("to_be_dropped",
|
|
|
|
("table: %s", check_table->name.m_name));
|
2014-02-26 19:11:54 +01:00
|
|
|
if (check_table->to_be_dropped) {
|
|
|
|
/* The table is being dropped. We shall timeout
|
|
|
|
this operation */
|
|
|
|
err = DB_LOCK_WAIT_TIMEOUT;
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
goto exit_func;
|
2014-02-26 19:11:54 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
|
2014-02-26 19:11:54 +01:00
|
|
|
exit_func:
|
2016-08-12 11:17:45 +03:00
|
|
|
if (heap != NULL) {
|
2014-02-26 19:11:54 +01:00
|
|
|
mem_heap_free(heap);
|
|
|
|
}
|
2016-08-12 11:17:45 +03:00
|
|
|
|
|
|
|
DBUG_RETURN(err);
|
2014-02-26 19:11:54 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/***************************************************************//**
|
|
|
|
Checks if foreign key constraints fail for an index entry. If index
|
|
|
|
is not mentioned in any constraint, this function does nothing,
|
|
|
|
Otherwise does searches to the indexes of referenced tables and
|
|
|
|
sets shared locks which lock either the success or the failure of
|
|
|
|
a constraint.
|
2016-08-12 11:17:45 +03:00
|
|
|
@return DB_SUCCESS or error code */
|
2016-06-21 14:21:03 +02:00
|
|
|
static MY_ATTRIBUTE((nonnull, warn_unused_result))
|
2014-02-26 19:11:54 +01:00
|
|
|
dberr_t
|
|
|
|
row_ins_check_foreign_constraints(
|
|
|
|
/*==============================*/
|
|
|
|
dict_table_t* table, /*!< in: table */
|
|
|
|
dict_index_t* index, /*!< in: index */
|
|
|
|
dtuple_t* entry, /*!< in: index entry for index */
|
|
|
|
que_thr_t* thr) /*!< in: query thread */
|
|
|
|
{
|
|
|
|
dict_foreign_t* foreign;
|
|
|
|
dberr_t err;
|
|
|
|
trx_t* trx;
|
|
|
|
ibool got_s_lock = FALSE;
|
|
|
|
|
|
|
|
trx = thr_get_trx(thr);
|
|
|
|
|
|
|
|
DEBUG_SYNC_C_IF_THD(thr_get_trx(thr)->mysql_thd,
|
|
|
|
"foreign_constraint_check_for_ins");
|
|
|
|
|
2014-09-11 10:13:35 +02:00
|
|
|
for (dict_foreign_set::iterator it = table->foreign_set.begin();
|
|
|
|
it != table->foreign_set.end();
|
|
|
|
++it) {
|
|
|
|
|
|
|
|
foreign = *it;
|
|
|
|
|
2014-02-26 19:11:54 +01:00
|
|
|
if (foreign->foreign_index == index) {
|
|
|
|
dict_table_t* ref_table = NULL;
|
|
|
|
dict_table_t* referenced_table
|
|
|
|
= foreign->referenced_table;
|
|
|
|
|
|
|
|
if (referenced_table == NULL) {
|
|
|
|
|
|
|
|
ref_table = dict_table_open_on_name(
|
|
|
|
foreign->referenced_table_name_lookup,
|
|
|
|
FALSE, FALSE, DICT_ERR_IGNORE_NONE);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (0 == trx->dict_operation_lock_mode) {
|
|
|
|
got_s_lock = TRUE;
|
|
|
|
|
|
|
|
row_mysql_freeze_data_dictionary(trx);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* NOTE that if the thread ends up waiting for a lock
|
|
|
|
we will release dict_operation_lock temporarily!
|
|
|
|
But the counter on the table protects the referenced
|
|
|
|
table from being dropped while the check is running. */
|
|
|
|
|
|
|
|
err = row_ins_check_foreign_constraint(
|
|
|
|
TRUE, foreign, table, entry, thr);
|
|
|
|
|
|
|
|
if (got_s_lock) {
|
|
|
|
row_mysql_unfreeze_data_dictionary(trx);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ref_table != NULL) {
|
|
|
|
dict_table_close(ref_table, FALSE, FALSE);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (err != DB_SUCCESS) {
|
|
|
|
|
|
|
|
return(err);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return(DB_SUCCESS);
|
|
|
|
}
|
|
|
|
|
|
|
|
/***************************************************************//**
|
|
|
|
Checks if a unique key violation to rec would occur at the index entry
|
|
|
|
insert.
|
2016-08-12 11:17:45 +03:00
|
|
|
@return TRUE if error */
|
2014-02-26 19:11:54 +01:00
|
|
|
static
|
|
|
|
ibool
|
|
|
|
row_ins_dupl_error_with_rec(
|
|
|
|
/*========================*/
|
|
|
|
const rec_t* rec, /*!< in: user record; NOTE that we assume
|
|
|
|
that the caller already has a record lock on
|
|
|
|
the record! */
|
|
|
|
const dtuple_t* entry, /*!< in: entry to insert */
|
|
|
|
dict_index_t* index, /*!< in: index */
|
|
|
|
const ulint* offsets)/*!< in: rec_get_offsets(rec, index) */
|
|
|
|
{
|
|
|
|
ulint matched_fields;
|
|
|
|
ulint n_unique;
|
|
|
|
ulint i;
|
|
|
|
|
|
|
|
ut_ad(rec_offs_validate(rec, index, offsets));
|
|
|
|
|
|
|
|
n_unique = dict_index_get_n_unique(index);
|
|
|
|
|
|
|
|
matched_fields = 0;
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
cmp_dtuple_rec_with_match(entry, rec, offsets, &matched_fields);
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
if (matched_fields < n_unique) {
|
|
|
|
|
|
|
|
return(FALSE);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* In a unique secondary index we allow equal key values if they
|
|
|
|
contain SQL NULLs */
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
if (!dict_index_is_clust(index) && !index->nulls_equal) {
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
for (i = 0; i < n_unique; i++) {
|
|
|
|
if (dfield_is_null(dtuple_get_nth_field(entry, i))) {
|
|
|
|
|
|
|
|
return(FALSE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return(!rec_get_deleted_flag(rec, rec_offs_comp(offsets)));
|
|
|
|
}
|
|
|
|
|
|
|
|
/***************************************************************//**
|
|
|
|
Scans a unique non-clustered index at a given index entry to determine
|
|
|
|
whether a uniqueness violation has occurred for the key value of the entry.
|
|
|
|
Set shared locks on possible duplicate records.
|
2016-08-12 11:17:45 +03:00
|
|
|
@return DB_SUCCESS, DB_DUPLICATE_KEY, or DB_LOCK_WAIT */
|
2016-06-21 14:21:03 +02:00
|
|
|
static MY_ATTRIBUTE((nonnull, warn_unused_result))
|
2014-02-26 19:11:54 +01:00
|
|
|
dberr_t
|
|
|
|
row_ins_scan_sec_index_for_duplicate(
|
|
|
|
/*=================================*/
|
|
|
|
ulint flags, /*!< in: undo logging and locking flags */
|
|
|
|
dict_index_t* index, /*!< in: non-clustered unique index */
|
|
|
|
dtuple_t* entry, /*!< in: index entry */
|
|
|
|
que_thr_t* thr, /*!< in: query thread */
|
|
|
|
bool s_latch,/*!< in: whether index->lock is being held */
|
|
|
|
mtr_t* mtr, /*!< in/out: mini-transaction */
|
|
|
|
mem_heap_t* offsets_heap)
|
|
|
|
/*!< in/out: memory heap that can be emptied */
|
|
|
|
{
|
|
|
|
ulint n_unique;
|
|
|
|
int cmp;
|
|
|
|
ulint n_fields_cmp;
|
|
|
|
btr_pcur_t pcur;
|
|
|
|
dberr_t err = DB_SUCCESS;
|
|
|
|
ulint allow_duplicates;
|
|
|
|
ulint* offsets = NULL;
|
2016-08-12 11:17:45 +03:00
|
|
|
DBUG_ENTER("row_ins_scan_sec_index_for_duplicate");
|
2014-02-26 19:11:54 +01:00
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
|
|
|
|
ut_ad(s_latch == rw_lock_own_flagged(
|
|
|
|
&index->lock, RW_LOCK_FLAG_S | RW_LOCK_FLAG_SX));
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
n_unique = dict_index_get_n_unique(index);
|
|
|
|
|
|
|
|
/* If the secondary index is unique, but one of the fields in the
|
|
|
|
n_unique first fields is NULL, a unique key violation cannot occur,
|
|
|
|
since we define NULL != NULL in this case */
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
if (!index->nulls_equal) {
|
|
|
|
for (ulint i = 0; i < n_unique; i++) {
|
|
|
|
if (UNIV_SQL_NULL == dfield_get_len(
|
|
|
|
dtuple_get_nth_field(entry, i))) {
|
2014-02-26 19:11:54 +01:00
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
DBUG_RETURN(DB_SUCCESS);
|
|
|
|
}
|
2014-02-26 19:11:54 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Store old value on n_fields_cmp */
|
|
|
|
|
|
|
|
n_fields_cmp = dtuple_get_n_fields_cmp(entry);
|
|
|
|
|
|
|
|
dtuple_set_n_fields_cmp(entry, n_unique);
|
|
|
|
|
|
|
|
btr_pcur_open(index, entry, PAGE_CUR_GE,
|
|
|
|
s_latch
|
2017-03-09 10:30:36 +02:00
|
|
|
? BTR_SEARCH_LEAF_ALREADY_S_LATCHED
|
2014-02-26 19:11:54 +01:00
|
|
|
: BTR_SEARCH_LEAF,
|
|
|
|
&pcur, mtr);
|
|
|
|
|
|
|
|
allow_duplicates = thr_get_trx(thr)->duplicates;
|
|
|
|
|
|
|
|
/* Scan index records and check if there is a duplicate */
|
|
|
|
|
|
|
|
do {
|
|
|
|
const rec_t* rec = btr_pcur_get_rec(&pcur);
|
|
|
|
const buf_block_t* block = btr_pcur_get_block(&pcur);
|
2014-11-18 17:41:12 +01:00
|
|
|
const ulint lock_type = LOCK_ORDINARY;
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
if (page_rec_is_infimum(rec)) {
|
|
|
|
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
offsets = rec_get_offsets(rec, index, offsets,
|
|
|
|
ULINT_UNDEFINED, &offsets_heap);
|
|
|
|
|
|
|
|
if (flags & BTR_NO_LOCKING_FLAG) {
|
|
|
|
/* Set no locks when applying log
|
|
|
|
in online table rebuild. */
|
|
|
|
} else if (allow_duplicates) {
|
|
|
|
|
|
|
|
/* If the SQL-query will update or replace
|
|
|
|
duplicate key we will take X-lock for
|
|
|
|
duplicates ( REPLACE, LOAD DATAFILE REPLACE,
|
|
|
|
INSERT ON DUPLICATE KEY UPDATE). */
|
|
|
|
|
|
|
|
err = row_ins_set_exclusive_rec_lock(
|
|
|
|
lock_type, block, rec, index, offsets, thr);
|
|
|
|
} else {
|
|
|
|
|
|
|
|
err = row_ins_set_shared_rec_lock(
|
|
|
|
lock_type, block, rec, index, offsets, thr);
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (err) {
|
|
|
|
case DB_SUCCESS_LOCKED_REC:
|
|
|
|
err = DB_SUCCESS;
|
|
|
|
case DB_SUCCESS:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
goto end_scan;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (page_rec_is_supremum(rec)) {
|
|
|
|
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
cmp = cmp_dtuple_rec(entry, rec, offsets);
|
|
|
|
|
2016-12-05 21:04:30 +02:00
|
|
|
if (cmp == 0) {
|
2014-02-26 19:11:54 +01:00
|
|
|
if (row_ins_dupl_error_with_rec(rec, entry,
|
|
|
|
index, offsets)) {
|
|
|
|
err = DB_DUPLICATE_KEY;
|
|
|
|
|
|
|
|
thr_get_trx(thr)->error_info = index;
|
|
|
|
|
|
|
|
/* If the duplicate is on hidden FTS_DOC_ID,
|
|
|
|
state so in the error log */
|
2016-08-12 11:17:45 +03:00
|
|
|
if (index == index->table->fts_doc_id_index
|
|
|
|
&& DICT_TF2_FLAG_IS_SET(
|
2014-02-26 19:11:54 +01:00
|
|
|
index->table,
|
2016-08-12 11:17:45 +03:00
|
|
|
DICT_TF2_FTS_HAS_DOC_ID)) {
|
|
|
|
|
|
|
|
ib::error() << "Duplicate FTS_DOC_ID"
|
|
|
|
" value on table "
|
|
|
|
<< index->table->name;
|
2014-02-26 19:11:54 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
goto end_scan;
|
|
|
|
}
|
|
|
|
} else {
|
2016-12-05 21:04:30 +02:00
|
|
|
ut_a(cmp < 0);
|
2014-02-26 19:11:54 +01:00
|
|
|
goto end_scan;
|
|
|
|
}
|
|
|
|
} while (btr_pcur_move_to_next(&pcur, mtr));
|
|
|
|
|
|
|
|
end_scan:
|
|
|
|
/* Restore old value */
|
|
|
|
dtuple_set_n_fields_cmp(entry, n_fields_cmp);
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
DBUG_RETURN(err);
|
2014-02-26 19:11:54 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Checks for a duplicate when the table is being rebuilt online.
|
2016-08-12 11:17:45 +03:00
|
|
|
@retval DB_SUCCESS when no duplicate is detected
|
|
|
|
@retval DB_SUCCESS_LOCKED_REC when rec is an exact match of entry or
|
2014-02-26 19:11:54 +01:00
|
|
|
a newer version of entry (the entry should not be inserted)
|
2016-08-12 11:17:45 +03:00
|
|
|
@retval DB_DUPLICATE_KEY when entry is a duplicate of rec */
|
2016-06-21 14:21:03 +02:00
|
|
|
static MY_ATTRIBUTE((nonnull, warn_unused_result))
|
2014-02-26 19:11:54 +01:00
|
|
|
dberr_t
|
|
|
|
row_ins_duplicate_online(
|
|
|
|
/*=====================*/
|
|
|
|
ulint n_uniq, /*!< in: offset of DB_TRX_ID */
|
|
|
|
const dtuple_t* entry, /*!< in: entry that is being inserted */
|
|
|
|
const rec_t* rec, /*!< in: clustered index record */
|
|
|
|
ulint* offsets)/*!< in/out: rec_get_offsets(rec) */
|
|
|
|
{
|
|
|
|
ulint fields = 0;
|
|
|
|
|
|
|
|
/* During rebuild, there should not be any delete-marked rows
|
|
|
|
in the new table. */
|
|
|
|
ut_ad(!rec_get_deleted_flag(rec, rec_offs_comp(offsets)));
|
|
|
|
ut_ad(dtuple_get_n_fields_cmp(entry) == n_uniq);
|
|
|
|
|
|
|
|
/* Compare the PRIMARY KEY fields and the
|
|
|
|
DB_TRX_ID, DB_ROLL_PTR. */
|
|
|
|
cmp_dtuple_rec_with_match_low(
|
2016-08-12 11:17:45 +03:00
|
|
|
entry, rec, offsets, n_uniq + 2, &fields);
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
if (fields < n_uniq) {
|
|
|
|
/* Not a duplicate. */
|
|
|
|
return(DB_SUCCESS);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (fields == n_uniq + 2) {
|
|
|
|
/* rec is an exact match of entry. */
|
|
|
|
return(DB_SUCCESS_LOCKED_REC);
|
|
|
|
}
|
|
|
|
|
|
|
|
return(DB_DUPLICATE_KEY);
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Checks for a duplicate when the table is being rebuilt online.
|
2016-08-12 11:17:45 +03:00
|
|
|
@retval DB_SUCCESS when no duplicate is detected
|
|
|
|
@retval DB_SUCCESS_LOCKED_REC when rec is an exact match of entry or
|
2014-02-26 19:11:54 +01:00
|
|
|
a newer version of entry (the entry should not be inserted)
|
2016-08-12 11:17:45 +03:00
|
|
|
@retval DB_DUPLICATE_KEY when entry is a duplicate of rec */
|
2016-06-21 14:21:03 +02:00
|
|
|
static MY_ATTRIBUTE((nonnull, warn_unused_result))
|
2014-02-26 19:11:54 +01:00
|
|
|
dberr_t
|
|
|
|
row_ins_duplicate_error_in_clust_online(
|
|
|
|
/*====================================*/
|
|
|
|
ulint n_uniq, /*!< in: offset of DB_TRX_ID */
|
|
|
|
const dtuple_t* entry, /*!< in: entry that is being inserted */
|
|
|
|
const btr_cur_t*cursor, /*!< in: cursor on insert position */
|
|
|
|
ulint** offsets,/*!< in/out: rec_get_offsets(rec) */
|
|
|
|
mem_heap_t** heap) /*!< in/out: heap for offsets */
|
|
|
|
{
|
|
|
|
dberr_t err = DB_SUCCESS;
|
|
|
|
const rec_t* rec = btr_cur_get_rec(cursor);
|
|
|
|
|
|
|
|
if (cursor->low_match >= n_uniq && !page_rec_is_infimum(rec)) {
|
|
|
|
*offsets = rec_get_offsets(rec, cursor->index, *offsets,
|
|
|
|
ULINT_UNDEFINED, heap);
|
|
|
|
err = row_ins_duplicate_online(n_uniq, entry, rec, *offsets);
|
|
|
|
if (err != DB_SUCCESS) {
|
|
|
|
return(err);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
rec = page_rec_get_next_const(btr_cur_get_rec(cursor));
|
|
|
|
|
|
|
|
if (cursor->up_match >= n_uniq && !page_rec_is_supremum(rec)) {
|
|
|
|
*offsets = rec_get_offsets(rec, cursor->index, *offsets,
|
|
|
|
ULINT_UNDEFINED, heap);
|
|
|
|
err = row_ins_duplicate_online(n_uniq, entry, rec, *offsets);
|
|
|
|
}
|
|
|
|
|
|
|
|
return(err);
|
|
|
|
}
|
|
|
|
|
|
|
|
/***************************************************************//**
|
|
|
|
Checks if a unique key violation error would occur at an index entry
|
|
|
|
insert. Sets shared locks on possible duplicate records. Works only
|
|
|
|
for a clustered index!
|
|
|
|
@retval DB_SUCCESS if no error
|
|
|
|
@retval DB_DUPLICATE_KEY if error,
|
|
|
|
@retval DB_LOCK_WAIT if we have to wait for a lock on a possible duplicate
|
MDEV-12358 Work around what looks like a bug in GCC 7.1.0
The parameter thr of the function btr_cur_optimistic_insert()
is not declared as nonnull, but GCC 7.1.0 with -O3 is wrongly
optimizing away the first part of the condition
UNIV_UNLIKELY(thr && thr_get_trx(thr)->fake_changes)
when the function is being called by row_merge_insert_index_tuples()
with thr==NULL.
The fake_changes is an XtraDB addition. This GCC bug only appears
to have an impact on XtraDB, not InnoDB.
We work around the problem by not attempting to dereference thr
when both BTR_NO_LOCKING_FLAG and BTR_NO_UNDO_LOG_FLAG are set
in the flags. Probably BTR_NO_LOCKING_FLAG alone should suffice.
btr_cur_optimistic_insert(), btr_cur_pessimistic_insert(),
btr_cur_pessimistic_update(): Correct comments that disagree with
usage and with nonnull attributes. No other parameter than thr can
actually be NULL.
row_ins_duplicate_error_in_clust(): Remove an unused parameter.
innobase_is_fake_change(): Unused function; remove.
ibuf_insert_low(), row_log_table_apply(), row_log_apply(),
row_undo_mod_clust_low():
Because we will be passing BTR_NO_LOCKING_FLAG | BTR_NO_UNDO_LOG_FLAG
in the flags, the trx->fake_changes flag will be treated as false,
which is the right thing to do at these low-level operations
(change buffer merge, ALTER TABLE…LOCK=NONE, or ROLLBACK).
This might be fixing actual XtraDB bugs.
Other callers that pass these two flags are also passing thr=NULL,
implying fake_changes=false. (Some callers in ROLLBACK are passing
BTR_NO_LOCKING_FLAG and a nonnull thr. In these callers, fake_changes
better be false, to avoid corruption.)
2017-05-17 14:08:08 +03:00
|
|
|
record */
|
2016-06-21 14:21:03 +02:00
|
|
|
static MY_ATTRIBUTE((nonnull, warn_unused_result))
|
2014-02-26 19:11:54 +01:00
|
|
|
dberr_t
|
|
|
|
row_ins_duplicate_error_in_clust(
|
|
|
|
ulint flags, /*!< in: undo logging and locking flags */
|
|
|
|
btr_cur_t* cursor, /*!< in: B-tree cursor */
|
|
|
|
const dtuple_t* entry, /*!< in: entry to insert */
|
2017-05-22 09:20:20 +03:00
|
|
|
que_thr_t* thr) /*!< in: query thread */
|
2014-02-26 19:11:54 +01:00
|
|
|
{
|
|
|
|
dberr_t err;
|
|
|
|
rec_t* rec;
|
|
|
|
ulint n_unique;
|
|
|
|
trx_t* trx = thr_get_trx(thr);
|
|
|
|
mem_heap_t*heap = NULL;
|
|
|
|
ulint offsets_[REC_OFFS_NORMAL_SIZE];
|
|
|
|
ulint* offsets = offsets_;
|
|
|
|
rec_offs_init(offsets_);
|
|
|
|
|
|
|
|
ut_ad(dict_index_is_clust(cursor->index));
|
|
|
|
|
|
|
|
/* NOTE: For unique non-clustered indexes there may be any number
|
|
|
|
of delete marked records with the same value for the non-clustered
|
|
|
|
index key (remember multiversioning), and which differ only in
|
|
|
|
the row refererence part of the index record, containing the
|
|
|
|
clustered index key fields. For such a secondary index record,
|
|
|
|
to avoid race condition, we must FIRST do the insertion and after
|
|
|
|
that check that the uniqueness condition is not breached! */
|
|
|
|
|
|
|
|
/* NOTE: A problem is that in the B-tree node pointers on an
|
|
|
|
upper level may match more to the entry than the actual existing
|
|
|
|
user records on the leaf level. So, even if low_match would suggest
|
|
|
|
that a duplicate key violation may occur, this may not be the case. */
|
|
|
|
|
|
|
|
n_unique = dict_index_get_n_unique(cursor->index);
|
|
|
|
|
|
|
|
if (cursor->low_match >= n_unique) {
|
|
|
|
|
|
|
|
rec = btr_cur_get_rec(cursor);
|
|
|
|
|
|
|
|
if (!page_rec_is_infimum(rec)) {
|
|
|
|
offsets = rec_get_offsets(rec, cursor->index, offsets,
|
|
|
|
ULINT_UNDEFINED, &heap);
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
ulint lock_type;
|
|
|
|
|
|
|
|
lock_type =
|
|
|
|
trx->isolation_level <= TRX_ISO_READ_COMMITTED
|
|
|
|
? LOCK_REC_NOT_GAP : LOCK_ORDINARY;
|
|
|
|
|
2014-02-26 19:11:54 +01:00
|
|
|
/* We set a lock on the possible duplicate: this
|
|
|
|
is needed in logical logging of MySQL to make
|
|
|
|
sure that in roll-forward we get the same duplicate
|
|
|
|
errors as in original execution */
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
if (flags & BTR_NO_LOCKING_FLAG) {
|
|
|
|
/* Do nothing if no-locking is set */
|
|
|
|
err = DB_SUCCESS;
|
|
|
|
} else if (trx->duplicates) {
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
/* If the SQL-query will update or replace
|
|
|
|
duplicate key we will take X-lock for
|
|
|
|
duplicates ( REPLACE, LOAD DATAFILE REPLACE,
|
|
|
|
INSERT ON DUPLICATE KEY UPDATE). */
|
|
|
|
|
|
|
|
err = row_ins_set_exclusive_rec_lock(
|
2016-08-12 11:17:45 +03:00
|
|
|
lock_type,
|
2014-02-26 19:11:54 +01:00
|
|
|
btr_cur_get_block(cursor),
|
|
|
|
rec, cursor->index, offsets, thr);
|
|
|
|
} else {
|
|
|
|
|
|
|
|
err = row_ins_set_shared_rec_lock(
|
2016-08-12 11:17:45 +03:00
|
|
|
lock_type,
|
2014-02-26 19:11:54 +01:00
|
|
|
btr_cur_get_block(cursor), rec,
|
|
|
|
cursor->index, offsets, thr);
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (err) {
|
|
|
|
case DB_SUCCESS_LOCKED_REC:
|
|
|
|
case DB_SUCCESS:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
goto func_exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (row_ins_dupl_error_with_rec(
|
|
|
|
rec, entry, cursor->index, offsets)) {
|
|
|
|
duplicate:
|
|
|
|
trx->error_info = cursor->index;
|
|
|
|
err = DB_DUPLICATE_KEY;
|
|
|
|
goto func_exit;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cursor->up_match >= n_unique) {
|
|
|
|
|
|
|
|
rec = page_rec_get_next(btr_cur_get_rec(cursor));
|
|
|
|
|
|
|
|
if (!page_rec_is_supremum(rec)) {
|
|
|
|
offsets = rec_get_offsets(rec, cursor->index, offsets,
|
|
|
|
ULINT_UNDEFINED, &heap);
|
|
|
|
|
|
|
|
if (trx->duplicates) {
|
|
|
|
|
|
|
|
/* If the SQL-query will update or replace
|
|
|
|
duplicate key we will take X-lock for
|
|
|
|
duplicates ( REPLACE, LOAD DATAFILE REPLACE,
|
|
|
|
INSERT ON DUPLICATE KEY UPDATE). */
|
|
|
|
|
|
|
|
err = row_ins_set_exclusive_rec_lock(
|
|
|
|
LOCK_REC_NOT_GAP,
|
|
|
|
btr_cur_get_block(cursor),
|
|
|
|
rec, cursor->index, offsets, thr);
|
|
|
|
} else {
|
|
|
|
|
|
|
|
err = row_ins_set_shared_rec_lock(
|
|
|
|
LOCK_REC_NOT_GAP,
|
|
|
|
btr_cur_get_block(cursor),
|
|
|
|
rec, cursor->index, offsets, thr);
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (err) {
|
|
|
|
case DB_SUCCESS_LOCKED_REC:
|
|
|
|
case DB_SUCCESS:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
goto func_exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (row_ins_dupl_error_with_rec(
|
|
|
|
rec, entry, cursor->index, offsets)) {
|
|
|
|
goto duplicate;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This should never happen */
|
|
|
|
ut_error;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = DB_SUCCESS;
|
|
|
|
func_exit:
|
|
|
|
if (UNIV_LIKELY_NULL(heap)) {
|
|
|
|
mem_heap_free(heap);
|
|
|
|
}
|
|
|
|
return(err);
|
|
|
|
}
|
|
|
|
|
|
|
|
/***************************************************************//**
|
|
|
|
Checks if an index entry has long enough common prefix with an
|
|
|
|
existing record so that the intended insert of the entry must be
|
|
|
|
changed to a modify of the existing record. In the case of a clustered
|
|
|
|
index, the prefix must be n_unique fields long. In the case of a
|
|
|
|
secondary index, all fields must be equal. InnoDB never updates
|
|
|
|
secondary index records in place, other than clearing or setting the
|
|
|
|
delete-mark flag. We could be able to update the non-unique fields
|
|
|
|
of a unique secondary index record by checking the cursor->up_match,
|
|
|
|
but we do not do so, because it could have some locking implications.
|
|
|
|
@return TRUE if the existing record should be updated; FALSE if not */
|
|
|
|
UNIV_INLINE
|
|
|
|
ibool
|
|
|
|
row_ins_must_modify_rec(
|
|
|
|
/*====================*/
|
|
|
|
const btr_cur_t* cursor) /*!< in: B-tree cursor */
|
|
|
|
{
|
|
|
|
/* NOTE: (compare to the note in row_ins_duplicate_error_in_clust)
|
|
|
|
Because node pointers on upper levels of the B-tree may match more
|
|
|
|
to entry than to actual user records on the leaf level, we
|
|
|
|
have to check if the candidate record is actually a user record.
|
|
|
|
A clustered index node pointer contains index->n_unique first fields,
|
|
|
|
and a secondary index node pointer contains all index fields. */
|
|
|
|
|
|
|
|
return(cursor->low_match
|
|
|
|
>= dict_index_get_n_unique_in_tree(cursor->index)
|
|
|
|
&& !page_rec_is_infimum(btr_cur_get_rec(cursor)));
|
|
|
|
}
|
|
|
|
|
2015-05-26 10:01:12 +03:00
|
|
|
/** Insert the externally stored fields (off-page columns)
|
|
|
|
of a clustered index entry.
|
|
|
|
@param[in] entry index entry to insert
|
|
|
|
@param[in] big_rec externally stored fields
|
|
|
|
@param[in,out] offsets rec_get_offsets()
|
|
|
|
@param[in,out] heap memory heap
|
|
|
|
@param[in] thd client connection, or NULL
|
|
|
|
@param[in] index clustered index
|
|
|
|
@return error code
|
|
|
|
@retval DB_SUCCESS
|
|
|
|
@retval DB_OUT_OF_FILE_SPACE */
|
|
|
|
static
|
|
|
|
dberr_t
|
|
|
|
row_ins_index_entry_big_rec(
|
|
|
|
const dtuple_t* entry,
|
|
|
|
const big_rec_t* big_rec,
|
|
|
|
ulint* offsets,
|
|
|
|
mem_heap_t** heap,
|
|
|
|
#ifndef DBUG_OFF
|
|
|
|
const void* thd,
|
|
|
|
#endif /* DBUG_OFF */
|
|
|
|
dict_index_t* index)
|
|
|
|
{
|
|
|
|
mtr_t mtr;
|
|
|
|
btr_pcur_t pcur;
|
|
|
|
rec_t* rec;
|
|
|
|
dberr_t error;
|
|
|
|
|
|
|
|
ut_ad(dict_index_is_clust(index));
|
|
|
|
|
|
|
|
DEBUG_SYNC_C_IF_THD(thd, "before_row_ins_extern_latch");
|
|
|
|
|
|
|
|
mtr_start(&mtr);
|
|
|
|
mtr.set_named_space(index->space);
|
|
|
|
dict_disable_redo_if_temporary(index->table, &mtr);
|
|
|
|
|
|
|
|
btr_pcur_open(index, entry, PAGE_CUR_LE, BTR_MODIFY_TREE,
|
|
|
|
&pcur, &mtr);
|
|
|
|
rec = btr_pcur_get_rec(&pcur);
|
|
|
|
offsets = rec_get_offsets(rec, index, offsets,
|
|
|
|
ULINT_UNDEFINED, heap);
|
|
|
|
|
|
|
|
DEBUG_SYNC_C_IF_THD(thd, "before_row_ins_extern");
|
|
|
|
error = btr_store_big_rec_extern_fields(
|
|
|
|
&pcur, 0, offsets, big_rec, &mtr, BTR_STORE_INSERT);
|
|
|
|
DEBUG_SYNC_C_IF_THD(thd, "after_row_ins_extern");
|
|
|
|
|
|
|
|
if (error == DB_SUCCESS
|
|
|
|
&& dict_index_is_online_ddl(index)) {
|
|
|
|
row_log_table_insert(btr_pcur_get_rec(&pcur), entry,
|
|
|
|
index, offsets);
|
|
|
|
}
|
|
|
|
|
|
|
|
mtr_commit(&mtr);
|
|
|
|
|
|
|
|
btr_pcur_close(&pcur);
|
|
|
|
|
|
|
|
return(error);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef DBUG_OFF
|
|
|
|
# define row_ins_index_entry_big_rec(e,big,ofs,heap,index,thd) \
|
|
|
|
row_ins_index_entry_big_rec(e,big,ofs,heap,index)
|
|
|
|
#else /* DBUG_OFF */
|
|
|
|
# define row_ins_index_entry_big_rec(e,big,ofs,heap,index,thd) \
|
|
|
|
row_ins_index_entry_big_rec(e,big,ofs,heap,thd,index)
|
|
|
|
#endif /* DBUG_OFF */
|
|
|
|
|
2014-02-26 19:11:54 +01:00
|
|
|
/***************************************************************//**
|
|
|
|
Tries to insert an entry into a clustered index, ignoring foreign key
|
|
|
|
constraints. If a record with the same unique key is found, the other
|
|
|
|
record is necessarily marked deleted by a committed transaction, or a
|
|
|
|
unique key violation error occurs. The delete marked record is then
|
|
|
|
updated to an existing record, and we must write an undo log record on
|
|
|
|
the delete marked record.
|
|
|
|
@retval DB_SUCCESS on success
|
|
|
|
@retval DB_LOCK_WAIT on lock wait when !(flags & BTR_NO_LOCKING_FLAG)
|
|
|
|
@retval DB_FAIL if retry with BTR_MODIFY_TREE is needed
|
|
|
|
@return error code */
|
|
|
|
dberr_t
|
|
|
|
row_ins_clust_index_entry_low(
|
|
|
|
/*==========================*/
|
|
|
|
ulint flags, /*!< in: undo logging and locking flags */
|
|
|
|
ulint mode, /*!< in: BTR_MODIFY_LEAF or BTR_MODIFY_TREE,
|
|
|
|
depending on whether we wish optimistic or
|
|
|
|
pessimistic descent down the index tree */
|
|
|
|
dict_index_t* index, /*!< in: clustered index */
|
|
|
|
ulint n_uniq, /*!< in: 0 or index->n_uniq */
|
|
|
|
dtuple_t* entry, /*!< in/out: index entry to insert */
|
|
|
|
ulint n_ext, /*!< in: number of externally stored columns */
|
2016-08-12 11:17:45 +03:00
|
|
|
que_thr_t* thr, /*!< in: query thread */
|
|
|
|
bool dup_chk_only)
|
|
|
|
/*!< in: if true, just do duplicate check
|
|
|
|
and return. don't execute actual insert. */
|
2014-02-26 19:11:54 +01:00
|
|
|
{
|
2016-08-12 11:17:45 +03:00
|
|
|
btr_pcur_t pcur;
|
|
|
|
btr_cur_t* cursor;
|
2015-08-31 19:47:14 +03:00
|
|
|
dberr_t err = DB_SUCCESS;
|
2014-02-26 19:11:54 +01:00
|
|
|
big_rec_t* big_rec = NULL;
|
|
|
|
mtr_t mtr;
|
MDEV-6076 Persistent AUTO_INCREMENT for InnoDB
This should be functionally equivalent to WL#6204 in MySQL 8.0.0, with
the notable difference that the file format changes are limited to
repurposing a previously unused data field in B-tree pages.
For persistent InnoDB tables, write the last used AUTO_INCREMENT
value to the root page of the clustered index, in the previously
unused (0) PAGE_MAX_TRX_ID field, now aliased as PAGE_ROOT_AUTO_INC.
Unlike some other previously unused InnoDB data fields, this one was
actually always zero-initialized, at least since MySQL 3.23.49.
The writes to PAGE_ROOT_AUTO_INC are protected by SX or X latch on the
root page. The SX latch will allow concurrent read access to the root
page. (The field PAGE_ROOT_AUTO_INC will only be read on the
first-time call to ha_innobase::open() from the SQL layer. The
PAGE_ROOT_AUTO_INC can only be updated when executing SQL, so
read/write races are not possible.)
During INSERT, the PAGE_ROOT_AUTO_INC is updated by the low-level
function btr_cur_search_to_nth_level(), adding no extra page
access. [Adaptive hash index lookup will be disabled during INSERT.]
If some rare UPDATE modifies an AUTO_INCREMENT column, the
PAGE_ROOT_AUTO_INC will be adjusted in a separate mini-transaction in
ha_innobase::update_row().
When a page is reorganized, we have to preserve the PAGE_ROOT_AUTO_INC
field.
During ALTER TABLE, the initial AUTO_INCREMENT value will be copied
from the table. ALGORITHM=COPY and online log apply in LOCK=NONE will
update PAGE_ROOT_AUTO_INC in real time.
innodb_col_no(): Determine the dict_table_t::cols[] element index
corresponding to a Field of a non-virtual column.
(The MySQL 5.7 implementation of virtual columns breaks the 1:1
relationship between Field::field_index and dict_table_t::cols[].
Virtual columns are omitted from dict_table_t::cols[]. Therefore,
we must translate the field_index of AUTO_INCREMENT columns into
an index of dict_table_t::cols[].)
Upgrade from old data files:
By default, the AUTO_INCREMENT sequence in old data files would appear
to be reset, because PAGE_MAX_TRX_ID or PAGE_ROOT_AUTO_INC would contain
the value 0 in each clustered index page. In new data files,
PAGE_ROOT_AUTO_INC can only be 0 if the table is empty or does not contain
any AUTO_INCREMENT column.
For backward compatibility, we use the old method of
SELECT MAX(auto_increment_column) for initializing the sequence.
btr_read_autoinc(): Read the AUTO_INCREMENT sequence from a new-format
data file.
btr_read_autoinc_with_fallback(): A variant of btr_read_autoinc()
that will resort to reading MAX(auto_increment_column) for data files
that did not use AUTO_INCREMENT yet. It was manually tested that during
the execution of innodb.autoinc_persist the compatibility logic is
not activated (for new files, PAGE_ROOT_AUTO_INC is never 0 in nonempty
clustered index root pages).
initialize_auto_increment(): Replaces
ha_innobase::innobase_initialize_autoinc(). This initializes
the AUTO_INCREMENT metadata. Only called from ha_innobase::open().
ha_innobase::info_low(): Do not try to lazily initialize
dict_table_t::autoinc. It must already have been initialized by
ha_innobase::open() or ha_innobase::create().
Note: The adjustments to class ha_innopart were not tested, because
the source code (native InnoDB partitioning) is not being compiled.
2016-12-14 19:56:39 +02:00
|
|
|
ib_uint64_t auto_inc = 0;
|
2014-02-26 19:11:54 +01:00
|
|
|
mem_heap_t* offsets_heap = NULL;
|
2016-08-12 11:17:45 +03:00
|
|
|
ulint offsets_[REC_OFFS_NORMAL_SIZE];
|
|
|
|
ulint* offsets = offsets_;
|
|
|
|
rec_offs_init(offsets_);
|
|
|
|
|
|
|
|
DBUG_ENTER("row_ins_clust_index_entry_low");
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
ut_ad(dict_index_is_clust(index));
|
|
|
|
ut_ad(!dict_index_is_unique(index)
|
|
|
|
|| n_uniq == dict_index_get_n_unique(index));
|
|
|
|
ut_ad(!n_uniq || n_uniq == dict_index_get_n_unique(index));
|
2016-08-12 11:17:45 +03:00
|
|
|
ut_ad(!thr_get_trx(thr)->in_rollback);
|
|
|
|
|
|
|
|
mtr_start(&mtr);
|
|
|
|
|
|
|
|
if (dict_table_is_temporary(index->table)) {
|
|
|
|
/* Disable REDO logging as the lifetime of temp-tables is
|
|
|
|
limited to server or connection lifetime and so REDO
|
|
|
|
information is not needed on restart for recovery.
|
|
|
|
Disable locking as temp-tables are local to a connection. */
|
2014-02-26 19:11:54 +01:00
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
ut_ad(flags & BTR_NO_LOCKING_FLAG);
|
MDEV-6076 Persistent AUTO_INCREMENT for InnoDB
This should be functionally equivalent to WL#6204 in MySQL 8.0.0, with
the notable difference that the file format changes are limited to
repurposing a previously unused data field in B-tree pages.
For persistent InnoDB tables, write the last used AUTO_INCREMENT
value to the root page of the clustered index, in the previously
unused (0) PAGE_MAX_TRX_ID field, now aliased as PAGE_ROOT_AUTO_INC.
Unlike some other previously unused InnoDB data fields, this one was
actually always zero-initialized, at least since MySQL 3.23.49.
The writes to PAGE_ROOT_AUTO_INC are protected by SX or X latch on the
root page. The SX latch will allow concurrent read access to the root
page. (The field PAGE_ROOT_AUTO_INC will only be read on the
first-time call to ha_innobase::open() from the SQL layer. The
PAGE_ROOT_AUTO_INC can only be updated when executing SQL, so
read/write races are not possible.)
During INSERT, the PAGE_ROOT_AUTO_INC is updated by the low-level
function btr_cur_search_to_nth_level(), adding no extra page
access. [Adaptive hash index lookup will be disabled during INSERT.]
If some rare UPDATE modifies an AUTO_INCREMENT column, the
PAGE_ROOT_AUTO_INC will be adjusted in a separate mini-transaction in
ha_innobase::update_row().
When a page is reorganized, we have to preserve the PAGE_ROOT_AUTO_INC
field.
During ALTER TABLE, the initial AUTO_INCREMENT value will be copied
from the table. ALGORITHM=COPY and online log apply in LOCK=NONE will
update PAGE_ROOT_AUTO_INC in real time.
innodb_col_no(): Determine the dict_table_t::cols[] element index
corresponding to a Field of a non-virtual column.
(The MySQL 5.7 implementation of virtual columns breaks the 1:1
relationship between Field::field_index and dict_table_t::cols[].
Virtual columns are omitted from dict_table_t::cols[]. Therefore,
we must translate the field_index of AUTO_INCREMENT columns into
an index of dict_table_t::cols[].)
Upgrade from old data files:
By default, the AUTO_INCREMENT sequence in old data files would appear
to be reset, because PAGE_MAX_TRX_ID or PAGE_ROOT_AUTO_INC would contain
the value 0 in each clustered index page. In new data files,
PAGE_ROOT_AUTO_INC can only be 0 if the table is empty or does not contain
any AUTO_INCREMENT column.
For backward compatibility, we use the old method of
SELECT MAX(auto_increment_column) for initializing the sequence.
btr_read_autoinc(): Read the AUTO_INCREMENT sequence from a new-format
data file.
btr_read_autoinc_with_fallback(): A variant of btr_read_autoinc()
that will resort to reading MAX(auto_increment_column) for data files
that did not use AUTO_INCREMENT yet. It was manually tested that during
the execution of innodb.autoinc_persist the compatibility logic is
not activated (for new files, PAGE_ROOT_AUTO_INC is never 0 in nonempty
clustered index root pages).
initialize_auto_increment(): Replaces
ha_innobase::innobase_initialize_autoinc(). This initializes
the AUTO_INCREMENT metadata. Only called from ha_innobase::open().
ha_innobase::info_low(): Do not try to lazily initialize
dict_table_t::autoinc. It must already have been initialized by
ha_innobase::open() or ha_innobase::create().
Note: The adjustments to class ha_innopart were not tested, because
the source code (native InnoDB partitioning) is not being compiled.
2016-12-14 19:56:39 +02:00
|
|
|
ut_ad(!dict_index_is_online_ddl(index));
|
|
|
|
ut_ad(!index->table->persistent_autoinc);
|
2016-08-12 11:17:45 +03:00
|
|
|
mtr.set_log_mode(MTR_LOG_NO_REDO);
|
MDEV-6076 Persistent AUTO_INCREMENT for InnoDB
This should be functionally equivalent to WL#6204 in MySQL 8.0.0, with
the notable difference that the file format changes are limited to
repurposing a previously unused data field in B-tree pages.
For persistent InnoDB tables, write the last used AUTO_INCREMENT
value to the root page of the clustered index, in the previously
unused (0) PAGE_MAX_TRX_ID field, now aliased as PAGE_ROOT_AUTO_INC.
Unlike some other previously unused InnoDB data fields, this one was
actually always zero-initialized, at least since MySQL 3.23.49.
The writes to PAGE_ROOT_AUTO_INC are protected by SX or X latch on the
root page. The SX latch will allow concurrent read access to the root
page. (The field PAGE_ROOT_AUTO_INC will only be read on the
first-time call to ha_innobase::open() from the SQL layer. The
PAGE_ROOT_AUTO_INC can only be updated when executing SQL, so
read/write races are not possible.)
During INSERT, the PAGE_ROOT_AUTO_INC is updated by the low-level
function btr_cur_search_to_nth_level(), adding no extra page
access. [Adaptive hash index lookup will be disabled during INSERT.]
If some rare UPDATE modifies an AUTO_INCREMENT column, the
PAGE_ROOT_AUTO_INC will be adjusted in a separate mini-transaction in
ha_innobase::update_row().
When a page is reorganized, we have to preserve the PAGE_ROOT_AUTO_INC
field.
During ALTER TABLE, the initial AUTO_INCREMENT value will be copied
from the table. ALGORITHM=COPY and online log apply in LOCK=NONE will
update PAGE_ROOT_AUTO_INC in real time.
innodb_col_no(): Determine the dict_table_t::cols[] element index
corresponding to a Field of a non-virtual column.
(The MySQL 5.7 implementation of virtual columns breaks the 1:1
relationship between Field::field_index and dict_table_t::cols[].
Virtual columns are omitted from dict_table_t::cols[]. Therefore,
we must translate the field_index of AUTO_INCREMENT columns into
an index of dict_table_t::cols[].)
Upgrade from old data files:
By default, the AUTO_INCREMENT sequence in old data files would appear
to be reset, because PAGE_MAX_TRX_ID or PAGE_ROOT_AUTO_INC would contain
the value 0 in each clustered index page. In new data files,
PAGE_ROOT_AUTO_INC can only be 0 if the table is empty or does not contain
any AUTO_INCREMENT column.
For backward compatibility, we use the old method of
SELECT MAX(auto_increment_column) for initializing the sequence.
btr_read_autoinc(): Read the AUTO_INCREMENT sequence from a new-format
data file.
btr_read_autoinc_with_fallback(): A variant of btr_read_autoinc()
that will resort to reading MAX(auto_increment_column) for data files
that did not use AUTO_INCREMENT yet. It was manually tested that during
the execution of innodb.autoinc_persist the compatibility logic is
not activated (for new files, PAGE_ROOT_AUTO_INC is never 0 in nonempty
clustered index root pages).
initialize_auto_increment(): Replaces
ha_innobase::innobase_initialize_autoinc(). This initializes
the AUTO_INCREMENT metadata. Only called from ha_innobase::open().
ha_innobase::info_low(): Do not try to lazily initialize
dict_table_t::autoinc. It must already have been initialized by
ha_innobase::open() or ha_innobase::create().
Note: The adjustments to class ha_innopart were not tested, because
the source code (native InnoDB partitioning) is not being compiled.
2016-12-14 19:56:39 +02:00
|
|
|
} else {
|
|
|
|
mtr.set_named_space(index->space);
|
|
|
|
|
|
|
|
if (mode == BTR_MODIFY_LEAF
|
|
|
|
&& dict_index_is_online_ddl(index)) {
|
2017-03-09 10:30:36 +02:00
|
|
|
mode = BTR_MODIFY_LEAF_ALREADY_S_LATCHED;
|
MDEV-6076 Persistent AUTO_INCREMENT for InnoDB
This should be functionally equivalent to WL#6204 in MySQL 8.0.0, with
the notable difference that the file format changes are limited to
repurposing a previously unused data field in B-tree pages.
For persistent InnoDB tables, write the last used AUTO_INCREMENT
value to the root page of the clustered index, in the previously
unused (0) PAGE_MAX_TRX_ID field, now aliased as PAGE_ROOT_AUTO_INC.
Unlike some other previously unused InnoDB data fields, this one was
actually always zero-initialized, at least since MySQL 3.23.49.
The writes to PAGE_ROOT_AUTO_INC are protected by SX or X latch on the
root page. The SX latch will allow concurrent read access to the root
page. (The field PAGE_ROOT_AUTO_INC will only be read on the
first-time call to ha_innobase::open() from the SQL layer. The
PAGE_ROOT_AUTO_INC can only be updated when executing SQL, so
read/write races are not possible.)
During INSERT, the PAGE_ROOT_AUTO_INC is updated by the low-level
function btr_cur_search_to_nth_level(), adding no extra page
access. [Adaptive hash index lookup will be disabled during INSERT.]
If some rare UPDATE modifies an AUTO_INCREMENT column, the
PAGE_ROOT_AUTO_INC will be adjusted in a separate mini-transaction in
ha_innobase::update_row().
When a page is reorganized, we have to preserve the PAGE_ROOT_AUTO_INC
field.
During ALTER TABLE, the initial AUTO_INCREMENT value will be copied
from the table. ALGORITHM=COPY and online log apply in LOCK=NONE will
update PAGE_ROOT_AUTO_INC in real time.
innodb_col_no(): Determine the dict_table_t::cols[] element index
corresponding to a Field of a non-virtual column.
(The MySQL 5.7 implementation of virtual columns breaks the 1:1
relationship between Field::field_index and dict_table_t::cols[].
Virtual columns are omitted from dict_table_t::cols[]. Therefore,
we must translate the field_index of AUTO_INCREMENT columns into
an index of dict_table_t::cols[].)
Upgrade from old data files:
By default, the AUTO_INCREMENT sequence in old data files would appear
to be reset, because PAGE_MAX_TRX_ID or PAGE_ROOT_AUTO_INC would contain
the value 0 in each clustered index page. In new data files,
PAGE_ROOT_AUTO_INC can only be 0 if the table is empty or does not contain
any AUTO_INCREMENT column.
For backward compatibility, we use the old method of
SELECT MAX(auto_increment_column) for initializing the sequence.
btr_read_autoinc(): Read the AUTO_INCREMENT sequence from a new-format
data file.
btr_read_autoinc_with_fallback(): A variant of btr_read_autoinc()
that will resort to reading MAX(auto_increment_column) for data files
that did not use AUTO_INCREMENT yet. It was manually tested that during
the execution of innodb.autoinc_persist the compatibility logic is
not activated (for new files, PAGE_ROOT_AUTO_INC is never 0 in nonempty
clustered index root pages).
initialize_auto_increment(): Replaces
ha_innobase::innobase_initialize_autoinc(). This initializes
the AUTO_INCREMENT metadata. Only called from ha_innobase::open().
ha_innobase::info_low(): Do not try to lazily initialize
dict_table_t::autoinc. It must already have been initialized by
ha_innobase::open() or ha_innobase::create().
Note: The adjustments to class ha_innopart were not tested, because
the source code (native InnoDB partitioning) is not being compiled.
2016-12-14 19:56:39 +02:00
|
|
|
mtr_s_lock(dict_index_get_lock(index), &mtr);
|
|
|
|
}
|
2014-02-26 19:11:54 +01:00
|
|
|
|
MDEV-6076 Persistent AUTO_INCREMENT for InnoDB
This should be functionally equivalent to WL#6204 in MySQL 8.0.0, with
the notable difference that the file format changes are limited to
repurposing a previously unused data field in B-tree pages.
For persistent InnoDB tables, write the last used AUTO_INCREMENT
value to the root page of the clustered index, in the previously
unused (0) PAGE_MAX_TRX_ID field, now aliased as PAGE_ROOT_AUTO_INC.
Unlike some other previously unused InnoDB data fields, this one was
actually always zero-initialized, at least since MySQL 3.23.49.
The writes to PAGE_ROOT_AUTO_INC are protected by SX or X latch on the
root page. The SX latch will allow concurrent read access to the root
page. (The field PAGE_ROOT_AUTO_INC will only be read on the
first-time call to ha_innobase::open() from the SQL layer. The
PAGE_ROOT_AUTO_INC can only be updated when executing SQL, so
read/write races are not possible.)
During INSERT, the PAGE_ROOT_AUTO_INC is updated by the low-level
function btr_cur_search_to_nth_level(), adding no extra page
access. [Adaptive hash index lookup will be disabled during INSERT.]
If some rare UPDATE modifies an AUTO_INCREMENT column, the
PAGE_ROOT_AUTO_INC will be adjusted in a separate mini-transaction in
ha_innobase::update_row().
When a page is reorganized, we have to preserve the PAGE_ROOT_AUTO_INC
field.
During ALTER TABLE, the initial AUTO_INCREMENT value will be copied
from the table. ALGORITHM=COPY and online log apply in LOCK=NONE will
update PAGE_ROOT_AUTO_INC in real time.
innodb_col_no(): Determine the dict_table_t::cols[] element index
corresponding to a Field of a non-virtual column.
(The MySQL 5.7 implementation of virtual columns breaks the 1:1
relationship between Field::field_index and dict_table_t::cols[].
Virtual columns are omitted from dict_table_t::cols[]. Therefore,
we must translate the field_index of AUTO_INCREMENT columns into
an index of dict_table_t::cols[].)
Upgrade from old data files:
By default, the AUTO_INCREMENT sequence in old data files would appear
to be reset, because PAGE_MAX_TRX_ID or PAGE_ROOT_AUTO_INC would contain
the value 0 in each clustered index page. In new data files,
PAGE_ROOT_AUTO_INC can only be 0 if the table is empty or does not contain
any AUTO_INCREMENT column.
For backward compatibility, we use the old method of
SELECT MAX(auto_increment_column) for initializing the sequence.
btr_read_autoinc(): Read the AUTO_INCREMENT sequence from a new-format
data file.
btr_read_autoinc_with_fallback(): A variant of btr_read_autoinc()
that will resort to reading MAX(auto_increment_column) for data files
that did not use AUTO_INCREMENT yet. It was manually tested that during
the execution of innodb.autoinc_persist the compatibility logic is
not activated (for new files, PAGE_ROOT_AUTO_INC is never 0 in nonempty
clustered index root pages).
initialize_auto_increment(): Replaces
ha_innobase::innobase_initialize_autoinc(). This initializes
the AUTO_INCREMENT metadata. Only called from ha_innobase::open().
ha_innobase::info_low(): Do not try to lazily initialize
dict_table_t::autoinc. It must already have been initialized by
ha_innobase::open() or ha_innobase::create().
Note: The adjustments to class ha_innopart were not tested, because
the source code (native InnoDB partitioning) is not being compiled.
2016-12-14 19:56:39 +02:00
|
|
|
if (unsigned ai = index->table->persistent_autoinc) {
|
|
|
|
/* Prepare to persist the AUTO_INCREMENT value
|
|
|
|
from the index entry to PAGE_ROOT_AUTO_INC. */
|
|
|
|
const dfield_t* dfield = dtuple_get_nth_field(
|
|
|
|
entry, ai - 1);
|
|
|
|
auto_inc = dfield_is_null(dfield)
|
|
|
|
? 0
|
|
|
|
: row_parse_int(static_cast<const byte*>(
|
|
|
|
dfield->data),
|
|
|
|
dfield->len,
|
|
|
|
dfield->type.mtype,
|
|
|
|
dfield->type.prtype
|
|
|
|
& DATA_UNSIGNED);
|
|
|
|
}
|
2014-02-26 19:11:54 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Note that we use PAGE_CUR_LE as the search mode, because then
|
|
|
|
the function will return in both low_match and up_match of the
|
|
|
|
cursor sensible values */
|
2017-05-05 10:25:29 +03:00
|
|
|
err = btr_pcur_open_low(index, 0, entry, PAGE_CUR_LE, mode, &pcur,
|
MDEV-6076 Persistent AUTO_INCREMENT for InnoDB
This should be functionally equivalent to WL#6204 in MySQL 8.0.0, with
the notable difference that the file format changes are limited to
repurposing a previously unused data field in B-tree pages.
For persistent InnoDB tables, write the last used AUTO_INCREMENT
value to the root page of the clustered index, in the previously
unused (0) PAGE_MAX_TRX_ID field, now aliased as PAGE_ROOT_AUTO_INC.
Unlike some other previously unused InnoDB data fields, this one was
actually always zero-initialized, at least since MySQL 3.23.49.
The writes to PAGE_ROOT_AUTO_INC are protected by SX or X latch on the
root page. The SX latch will allow concurrent read access to the root
page. (The field PAGE_ROOT_AUTO_INC will only be read on the
first-time call to ha_innobase::open() from the SQL layer. The
PAGE_ROOT_AUTO_INC can only be updated when executing SQL, so
read/write races are not possible.)
During INSERT, the PAGE_ROOT_AUTO_INC is updated by the low-level
function btr_cur_search_to_nth_level(), adding no extra page
access. [Adaptive hash index lookup will be disabled during INSERT.]
If some rare UPDATE modifies an AUTO_INCREMENT column, the
PAGE_ROOT_AUTO_INC will be adjusted in a separate mini-transaction in
ha_innobase::update_row().
When a page is reorganized, we have to preserve the PAGE_ROOT_AUTO_INC
field.
During ALTER TABLE, the initial AUTO_INCREMENT value will be copied
from the table. ALGORITHM=COPY and online log apply in LOCK=NONE will
update PAGE_ROOT_AUTO_INC in real time.
innodb_col_no(): Determine the dict_table_t::cols[] element index
corresponding to a Field of a non-virtual column.
(The MySQL 5.7 implementation of virtual columns breaks the 1:1
relationship between Field::field_index and dict_table_t::cols[].
Virtual columns are omitted from dict_table_t::cols[]. Therefore,
we must translate the field_index of AUTO_INCREMENT columns into
an index of dict_table_t::cols[].)
Upgrade from old data files:
By default, the AUTO_INCREMENT sequence in old data files would appear
to be reset, because PAGE_MAX_TRX_ID or PAGE_ROOT_AUTO_INC would contain
the value 0 in each clustered index page. In new data files,
PAGE_ROOT_AUTO_INC can only be 0 if the table is empty or does not contain
any AUTO_INCREMENT column.
For backward compatibility, we use the old method of
SELECT MAX(auto_increment_column) for initializing the sequence.
btr_read_autoinc(): Read the AUTO_INCREMENT sequence from a new-format
data file.
btr_read_autoinc_with_fallback(): A variant of btr_read_autoinc()
that will resort to reading MAX(auto_increment_column) for data files
that did not use AUTO_INCREMENT yet. It was manually tested that during
the execution of innodb.autoinc_persist the compatibility logic is
not activated (for new files, PAGE_ROOT_AUTO_INC is never 0 in nonempty
clustered index root pages).
initialize_auto_increment(): Replaces
ha_innobase::innobase_initialize_autoinc(). This initializes
the AUTO_INCREMENT metadata. Only called from ha_innobase::open().
ha_innobase::info_low(): Do not try to lazily initialize
dict_table_t::autoinc. It must already have been initialized by
ha_innobase::open() or ha_innobase::create().
Note: The adjustments to class ha_innopart were not tested, because
the source code (native InnoDB partitioning) is not being compiled.
2016-12-14 19:56:39 +02:00
|
|
|
__FILE__, __LINE__, auto_inc, &mtr);
|
2015-08-31 19:47:14 +03:00
|
|
|
if (err != DB_SUCCESS) {
|
MDEV-12253: Buffer pool blocks are accessed after they have been freed
Problem was that bpage was referenced after it was already freed
from LRU. Fixed by adding a new variable encrypted that is
passed down to buf_page_check_corrupt() and used in
buf_page_get_gen() to stop processing page read.
This patch should also address following test failures and
bugs:
MDEV-12419: IMPORT should not look up tablespace in
PageConverter::validate(). This is now removed.
MDEV-10099: encryption.innodb_onlinealter_encryption fails
sporadically in buildbot
MDEV-11420: encryption.innodb_encryption-page-compression
failed in buildbot
MDEV-11222: encryption.encrypt_and_grep failed in buildbot on P8
Removed dict_table_t::is_encrypted and dict_table_t::ibd_file_missing
and replaced these with dict_table_t::file_unreadable. Table
ibd file is missing if fil_get_space(space_id) returns NULL
and encrypted if not. Removed dict_table_t::is_corrupted field.
Ported FilSpace class from 10.2 and using that on buf_page_check_corrupt(),
buf_page_decrypt_after_read(), buf_page_encrypt_before_write(),
buf_dblwr_process(), buf_read_page(), dict_stats_save_defrag_stats().
Added test cases when enrypted page could be read while doing
redo log crash recovery. Also added test case for row compressed
blobs.
btr_cur_open_at_index_side_func(),
btr_cur_open_at_rnd_pos_func(): Avoid referencing block that is
NULL.
buf_page_get_zip(): Issue error if page read fails.
buf_page_get_gen(): Use dberr_t for error detection and
do not reference bpage after we hare freed it.
buf_mark_space_corrupt(): remove bpage from LRU also when
it is encrypted.
buf_page_check_corrupt(): @return DB_SUCCESS if page has
been read and is not corrupted,
DB_PAGE_CORRUPTED if page based on checksum check is corrupted,
DB_DECRYPTION_FAILED if page post encryption checksum matches but
after decryption normal page checksum does not match. In read
case only DB_SUCCESS is possible.
buf_page_io_complete(): use dberr_t for error handling.
buf_flush_write_block_low(),
buf_read_ahead_random(),
buf_read_page_async(),
buf_read_ahead_linear(),
buf_read_ibuf_merge_pages(),
buf_read_recv_pages(),
fil_aio_wait():
Issue error if page read fails.
btr_pcur_move_to_next_page(): Do not reference page if it is
NULL.
Introduced dict_table_t::is_readable() and dict_index_t::is_readable()
that will return true if tablespace exists and pages read from
tablespace are not corrupted or page decryption failed.
Removed buf_page_t::key_version. After page decryption the
key version is not removed from page frame. For unencrypted
pages, old key_version is removed at buf_page_encrypt_before_write()
dict_stats_update_transient_for_index(),
dict_stats_update_transient()
Do not continue if table decryption failed or table
is corrupted.
dict0stats.cc: Introduced a dict_stats_report_error function
to avoid code duplication.
fil_parse_write_crypt_data():
Check that key read from redo log entry is found from
encryption plugin and if it is not, refuse to start.
PageConverter::validate(): Removed access to fil_space_t as
tablespace is not available during import.
Fixed error code on innodb.innodb test.
Merged test cased innodb-bad-key-change5 and innodb-bad-key-shutdown
to innodb-bad-key-change2. Removed innodb-bad-key-change5 test.
Decreased unnecessary complexity on some long lasting tests.
Removed fil_inc_pending_ops(), fil_decr_pending_ops(),
fil_get_first_space(), fil_get_next_space(),
fil_get_first_space_safe(), fil_get_next_space_safe()
functions.
fil_space_verify_crypt_checksum(): Fixed bug found using ASAN
where FIL_PAGE_END_LSN_OLD_CHECKSUM field was incorrectly
accessed from row compressed tables. Fixed out of page frame
bug for row compressed tables in
fil_space_verify_crypt_checksum() found using ASAN. Incorrect
function was called for compressed table.
Added new tests for discard, rename table and drop (we should allow them
even when page decryption fails). Alter table rename is not allowed.
Added test for restart with innodb-force-recovery=1 when page read on
redo-recovery cant be decrypted. Added test for corrupted table where
both page data and FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION is corrupted.
Adjusted the test case innodb_bug14147491 so that it does not anymore
expect crash. Instead table is just mostly not usable.
fil0fil.h: fil_space_acquire_low is not visible function
and fil_space_acquire and fil_space_acquire_silent are
inline functions. FilSpace class uses fil_space_acquire_low
directly.
recv_apply_hashed_log_recs() does not return anything.
2017-04-26 15:19:16 +03:00
|
|
|
index->table->file_unreadable = true;
|
2017-05-05 10:25:29 +03:00
|
|
|
mtr.commit();
|
2015-08-31 19:47:14 +03:00
|
|
|
goto func_exit;
|
|
|
|
}
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
cursor = btr_pcur_get_btr_cur(&pcur);
|
MDEV-6076 Persistent AUTO_INCREMENT for InnoDB
This should be functionally equivalent to WL#6204 in MySQL 8.0.0, with
the notable difference that the file format changes are limited to
repurposing a previously unused data field in B-tree pages.
For persistent InnoDB tables, write the last used AUTO_INCREMENT
value to the root page of the clustered index, in the previously
unused (0) PAGE_MAX_TRX_ID field, now aliased as PAGE_ROOT_AUTO_INC.
Unlike some other previously unused InnoDB data fields, this one was
actually always zero-initialized, at least since MySQL 3.23.49.
The writes to PAGE_ROOT_AUTO_INC are protected by SX or X latch on the
root page. The SX latch will allow concurrent read access to the root
page. (The field PAGE_ROOT_AUTO_INC will only be read on the
first-time call to ha_innobase::open() from the SQL layer. The
PAGE_ROOT_AUTO_INC can only be updated when executing SQL, so
read/write races are not possible.)
During INSERT, the PAGE_ROOT_AUTO_INC is updated by the low-level
function btr_cur_search_to_nth_level(), adding no extra page
access. [Adaptive hash index lookup will be disabled during INSERT.]
If some rare UPDATE modifies an AUTO_INCREMENT column, the
PAGE_ROOT_AUTO_INC will be adjusted in a separate mini-transaction in
ha_innobase::update_row().
When a page is reorganized, we have to preserve the PAGE_ROOT_AUTO_INC
field.
During ALTER TABLE, the initial AUTO_INCREMENT value will be copied
from the table. ALGORITHM=COPY and online log apply in LOCK=NONE will
update PAGE_ROOT_AUTO_INC in real time.
innodb_col_no(): Determine the dict_table_t::cols[] element index
corresponding to a Field of a non-virtual column.
(The MySQL 5.7 implementation of virtual columns breaks the 1:1
relationship between Field::field_index and dict_table_t::cols[].
Virtual columns are omitted from dict_table_t::cols[]. Therefore,
we must translate the field_index of AUTO_INCREMENT columns into
an index of dict_table_t::cols[].)
Upgrade from old data files:
By default, the AUTO_INCREMENT sequence in old data files would appear
to be reset, because PAGE_MAX_TRX_ID or PAGE_ROOT_AUTO_INC would contain
the value 0 in each clustered index page. In new data files,
PAGE_ROOT_AUTO_INC can only be 0 if the table is empty or does not contain
any AUTO_INCREMENT column.
For backward compatibility, we use the old method of
SELECT MAX(auto_increment_column) for initializing the sequence.
btr_read_autoinc(): Read the AUTO_INCREMENT sequence from a new-format
data file.
btr_read_autoinc_with_fallback(): A variant of btr_read_autoinc()
that will resort to reading MAX(auto_increment_column) for data files
that did not use AUTO_INCREMENT yet. It was manually tested that during
the execution of innodb.autoinc_persist the compatibility logic is
not activated (for new files, PAGE_ROOT_AUTO_INC is never 0 in nonempty
clustered index root pages).
initialize_auto_increment(): Replaces
ha_innobase::innobase_initialize_autoinc(). This initializes
the AUTO_INCREMENT metadata. Only called from ha_innobase::open().
ha_innobase::info_low(): Do not try to lazily initialize
dict_table_t::autoinc. It must already have been initialized by
ha_innobase::open() or ha_innobase::create().
Note: The adjustments to class ha_innopart were not tested, because
the source code (native InnoDB partitioning) is not being compiled.
2016-12-14 19:56:39 +02:00
|
|
|
cursor->thr = thr;
|
2015-08-31 19:47:14 +03:00
|
|
|
|
2014-02-26 19:11:54 +01:00
|
|
|
#ifdef UNIV_DEBUG
|
|
|
|
{
|
2016-08-12 11:17:45 +03:00
|
|
|
page_t* page = btr_cur_get_page(cursor);
|
2014-02-26 19:11:54 +01:00
|
|
|
rec_t* first_rec = page_rec_get_next(
|
|
|
|
page_get_infimum_rec(page));
|
|
|
|
|
|
|
|
ut_ad(page_rec_is_supremum(first_rec)
|
2016-08-12 11:17:45 +03:00
|
|
|
|| rec_n_fields_is_sane(index, first_rec, entry));
|
2014-02-26 19:11:54 +01:00
|
|
|
}
|
2016-08-12 11:17:45 +03:00
|
|
|
#endif /* UNIV_DEBUG */
|
2014-02-26 19:11:54 +01:00
|
|
|
|
2016-12-05 21:04:30 +02:00
|
|
|
if (n_uniq
|
2016-08-12 11:17:45 +03:00
|
|
|
&& (cursor->up_match >= n_uniq || cursor->low_match >= n_uniq)) {
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
if (flags
|
|
|
|
== (BTR_CREATE_FLAG | BTR_NO_LOCKING_FLAG
|
|
|
|
| BTR_NO_UNDO_LOG_FLAG | BTR_KEEP_SYS_FLAG)) {
|
|
|
|
/* Set no locks when applying log
|
|
|
|
in online table rebuild. Only check for duplicates. */
|
|
|
|
err = row_ins_duplicate_error_in_clust_online(
|
2016-08-12 11:17:45 +03:00
|
|
|
n_uniq, entry, cursor,
|
2014-02-26 19:11:54 +01:00
|
|
|
&offsets, &offsets_heap);
|
|
|
|
|
|
|
|
switch (err) {
|
|
|
|
case DB_SUCCESS:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
ut_ad(0);
|
|
|
|
/* fall through */
|
|
|
|
case DB_SUCCESS_LOCKED_REC:
|
|
|
|
case DB_DUPLICATE_KEY:
|
2016-08-12 11:17:45 +03:00
|
|
|
thr_get_trx(thr)->error_info = cursor->index;
|
2014-02-26 19:11:54 +01:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* Note that the following may return also
|
|
|
|
DB_LOCK_WAIT */
|
|
|
|
|
|
|
|
err = row_ins_duplicate_error_in_clust(
|
2017-05-22 09:20:20 +03:00
|
|
|
flags, cursor, entry, thr);
|
2014-02-26 19:11:54 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (err != DB_SUCCESS) {
|
|
|
|
err_exit:
|
|
|
|
mtr_commit(&mtr);
|
|
|
|
goto func_exit;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
if (dup_chk_only) {
|
|
|
|
mtr_commit(&mtr);
|
|
|
|
goto func_exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Note: Allowing duplicates would qualify for modification of
|
2016-12-05 21:04:30 +02:00
|
|
|
an existing record as the new entry is exactly same as old entry. */
|
|
|
|
if (row_ins_must_modify_rec(cursor)) {
|
2014-02-26 19:11:54 +01:00
|
|
|
/* There is already an index entry with a long enough common
|
|
|
|
prefix, we must convert the insert into a modify of an
|
|
|
|
existing record */
|
|
|
|
mem_heap_t* entry_heap = mem_heap_create(1024);
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
err = row_ins_clust_index_entry_by_modify(
|
|
|
|
&pcur, flags, mode, &offsets, &offsets_heap,
|
|
|
|
entry_heap, entry, thr, &mtr);
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
if (err == DB_SUCCESS && dict_index_is_online_ddl(index)) {
|
2016-08-12 11:17:45 +03:00
|
|
|
row_log_table_insert(btr_cur_get_rec(cursor), entry,
|
|
|
|
index, offsets);
|
2014-02-26 19:11:54 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
mtr_commit(&mtr);
|
|
|
|
mem_heap_free(entry_heap);
|
|
|
|
} else {
|
|
|
|
rec_t* insert_rec;
|
|
|
|
|
|
|
|
if (mode != BTR_MODIFY_TREE) {
|
|
|
|
ut_ad((mode & ~BTR_ALREADY_S_LATCHED)
|
|
|
|
== BTR_MODIFY_LEAF);
|
|
|
|
err = btr_cur_optimistic_insert(
|
2016-08-12 11:17:45 +03:00
|
|
|
flags, cursor, &offsets, &offsets_heap,
|
2014-02-26 19:11:54 +01:00
|
|
|
entry, &insert_rec, &big_rec,
|
|
|
|
n_ext, thr, &mtr);
|
|
|
|
} else {
|
|
|
|
if (buf_LRU_buf_pool_running_out()) {
|
|
|
|
|
|
|
|
err = DB_LOCK_TABLE_FULL;
|
|
|
|
goto err_exit;
|
|
|
|
}
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
DEBUG_SYNC_C("before_insert_pessimitic_row_ins_clust");
|
|
|
|
|
2014-02-26 19:11:54 +01:00
|
|
|
err = btr_cur_optimistic_insert(
|
2016-08-12 11:17:45 +03:00
|
|
|
flags, cursor,
|
2014-02-26 19:11:54 +01:00
|
|
|
&offsets, &offsets_heap,
|
|
|
|
entry, &insert_rec, &big_rec,
|
|
|
|
n_ext, thr, &mtr);
|
|
|
|
|
|
|
|
if (err == DB_FAIL) {
|
|
|
|
err = btr_cur_pessimistic_insert(
|
2016-08-12 11:17:45 +03:00
|
|
|
flags, cursor,
|
2014-02-26 19:11:54 +01:00
|
|
|
&offsets, &offsets_heap,
|
|
|
|
entry, &insert_rec, &big_rec,
|
|
|
|
n_ext, thr, &mtr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
if (big_rec != NULL) {
|
2014-02-26 19:11:54 +01:00
|
|
|
mtr_commit(&mtr);
|
|
|
|
|
|
|
|
/* Online table rebuild could read (and
|
|
|
|
ignore) the incomplete record at this point.
|
|
|
|
If online rebuild is in progress, the
|
|
|
|
row_ins_index_entry_big_rec() will write log. */
|
|
|
|
|
|
|
|
DBUG_EXECUTE_IF(
|
|
|
|
"row_ins_extern_checkpoint",
|
|
|
|
log_make_checkpoint_at(
|
|
|
|
LSN_MAX, TRUE););
|
|
|
|
err = row_ins_index_entry_big_rec(
|
|
|
|
entry, big_rec, offsets, &offsets_heap, index,
|
2015-05-26 10:01:12 +03:00
|
|
|
thr_get_trx(thr)->mysql_thd);
|
2014-02-26 19:11:54 +01:00
|
|
|
dtuple_convert_back_big_rec(index, entry, big_rec);
|
|
|
|
} else {
|
|
|
|
if (err == DB_SUCCESS
|
|
|
|
&& dict_index_is_online_ddl(index)) {
|
|
|
|
row_log_table_insert(
|
2016-08-12 11:17:45 +03:00
|
|
|
insert_rec, entry, index, offsets);
|
2014-02-26 19:11:54 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
mtr_commit(&mtr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func_exit:
|
2016-08-12 11:17:45 +03:00
|
|
|
if (offsets_heap != NULL) {
|
2014-02-26 19:11:54 +01:00
|
|
|
mem_heap_free(offsets_heap);
|
|
|
|
}
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
btr_pcur_close(&pcur);
|
|
|
|
|
|
|
|
DBUG_RETURN(err);
|
2014-02-26 19:11:54 +01:00
|
|
|
}
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
/** Start a mini-transaction and check if the index will be dropped.
|
|
|
|
@param[in,out] mtr mini-transaction
|
|
|
|
@param[in,out] index secondary index
|
|
|
|
@param[in] check whether to check
|
|
|
|
@param[in] search_mode flags
|
2014-02-26 19:11:54 +01:00
|
|
|
@return true if the index is to be dropped */
|
2016-09-06 09:43:16 +03:00
|
|
|
static MY_ATTRIBUTE((warn_unused_result))
|
2014-02-26 19:11:54 +01:00
|
|
|
bool
|
2016-08-12 11:17:45 +03:00
|
|
|
row_ins_sec_mtr_start_and_check_if_aborted(
|
|
|
|
mtr_t* mtr,
|
|
|
|
dict_index_t* index,
|
|
|
|
bool check,
|
2014-02-26 19:11:54 +01:00
|
|
|
ulint search_mode)
|
|
|
|
{
|
|
|
|
ut_ad(!dict_index_is_clust(index));
|
2016-08-12 11:17:45 +03:00
|
|
|
ut_ad(mtr->is_named_space(index->space));
|
|
|
|
|
|
|
|
const mtr_log_t log_mode = mtr->get_log_mode();
|
2014-02-26 19:11:54 +01:00
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
mtr_start(mtr);
|
|
|
|
mtr->set_named_space(index->space);
|
|
|
|
mtr->set_log_mode(log_mode);
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
if (!check) {
|
|
|
|
return(false);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (search_mode & BTR_ALREADY_S_LATCHED) {
|
|
|
|
mtr_s_lock(dict_index_get_lock(index), mtr);
|
|
|
|
} else {
|
2016-08-12 11:17:45 +03:00
|
|
|
mtr_sx_lock(dict_index_get_lock(index), mtr);
|
2014-02-26 19:11:54 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
switch (index->online_status) {
|
|
|
|
case ONLINE_INDEX_ABORTED:
|
|
|
|
case ONLINE_INDEX_ABORTED_DROPPED:
|
2016-08-12 11:17:45 +03:00
|
|
|
ut_ad(!index->is_committed());
|
2014-02-26 19:11:54 +01:00
|
|
|
return(true);
|
|
|
|
case ONLINE_INDEX_COMPLETE:
|
|
|
|
return(false);
|
|
|
|
case ONLINE_INDEX_CREATION:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
ut_error;
|
|
|
|
return(true);
|
|
|
|
}
|
|
|
|
|
|
|
|
/***************************************************************//**
|
|
|
|
Tries to insert an entry into a secondary index. If a record with exactly the
|
|
|
|
same fields is found, the other record is necessarily marked deleted.
|
|
|
|
It is then unmarked. Otherwise, the entry is just inserted to the index.
|
|
|
|
@retval DB_SUCCESS on success
|
|
|
|
@retval DB_LOCK_WAIT on lock wait when !(flags & BTR_NO_LOCKING_FLAG)
|
|
|
|
@retval DB_FAIL if retry with BTR_MODIFY_TREE is needed
|
|
|
|
@return error code */
|
|
|
|
dberr_t
|
|
|
|
row_ins_sec_index_entry_low(
|
|
|
|
/*========================*/
|
|
|
|
ulint flags, /*!< in: undo logging and locking flags */
|
|
|
|
ulint mode, /*!< in: BTR_MODIFY_LEAF or BTR_MODIFY_TREE,
|
|
|
|
depending on whether we wish optimistic or
|
|
|
|
pessimistic descent down the index tree */
|
|
|
|
dict_index_t* index, /*!< in: secondary index */
|
|
|
|
mem_heap_t* offsets_heap,
|
|
|
|
/*!< in/out: memory heap that can be emptied */
|
|
|
|
mem_heap_t* heap, /*!< in/out: memory heap */
|
|
|
|
dtuple_t* entry, /*!< in/out: index entry to insert */
|
|
|
|
trx_id_t trx_id, /*!< in: PAGE_MAX_TRX_ID during
|
|
|
|
row_log_table_apply(), or 0 */
|
2016-08-12 11:17:45 +03:00
|
|
|
que_thr_t* thr, /*!< in: query thread */
|
|
|
|
bool dup_chk_only)
|
|
|
|
/*!< in: if true, just do duplicate check
|
|
|
|
and return. don't execute actual insert. */
|
2014-02-26 19:11:54 +01:00
|
|
|
{
|
2016-08-12 11:17:45 +03:00
|
|
|
DBUG_ENTER("row_ins_sec_index_entry_low");
|
|
|
|
|
2014-02-26 19:11:54 +01:00
|
|
|
btr_cur_t cursor;
|
2016-08-12 11:17:45 +03:00
|
|
|
ulint search_mode = mode;
|
2014-02-26 19:11:54 +01:00
|
|
|
dberr_t err = DB_SUCCESS;
|
|
|
|
ulint n_unique;
|
|
|
|
mtr_t mtr;
|
2016-08-12 11:17:45 +03:00
|
|
|
ulint offsets_[REC_OFFS_NORMAL_SIZE];
|
|
|
|
ulint* offsets = offsets_;
|
|
|
|
rec_offs_init(offsets_);
|
|
|
|
rtr_info_t rtr_info;
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
ut_ad(!dict_index_is_clust(index));
|
|
|
|
ut_ad(mode == BTR_MODIFY_LEAF || mode == BTR_MODIFY_TREE);
|
|
|
|
|
|
|
|
cursor.thr = thr;
|
2016-08-12 11:17:45 +03:00
|
|
|
cursor.rtr_info = NULL;
|
2016-12-05 21:04:30 +02:00
|
|
|
ut_ad(thr_get_trx(thr)->id != 0);
|
2016-08-12 11:17:45 +03:00
|
|
|
|
|
|
|
mtr_start(&mtr);
|
|
|
|
mtr.set_named_space(index->space);
|
|
|
|
|
|
|
|
if (dict_table_is_temporary(index->table)) {
|
|
|
|
/* Disable REDO logging as the lifetime of temp-tables is
|
|
|
|
limited to server or connection lifetime and so REDO
|
|
|
|
information is not needed on restart for recovery.
|
|
|
|
Disable locking as temp-tables are local to a connection. */
|
|
|
|
|
|
|
|
ut_ad(flags & BTR_NO_LOCKING_FLAG);
|
|
|
|
mtr.set_log_mode(MTR_LOG_NO_REDO);
|
|
|
|
} else if (!dict_index_is_spatial(index)) {
|
|
|
|
/* Enable insert buffering if it's neither temp-table
|
|
|
|
nor spatial index. */
|
|
|
|
search_mode |= BTR_INSERT;
|
|
|
|
}
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
/* Ensure that we acquire index->lock when inserting into an
|
|
|
|
index with index->online_status == ONLINE_INDEX_COMPLETE, but
|
|
|
|
could still be subject to rollback_inplace_alter_table().
|
|
|
|
This prevents a concurrent change of index->online_status.
|
|
|
|
The memory object cannot be freed as long as we have an open
|
|
|
|
reference to the table, or index->table->n_ref_count > 0. */
|
2016-08-12 11:17:45 +03:00
|
|
|
const bool check = !index->is_committed();
|
2014-02-26 19:11:54 +01:00
|
|
|
if (check) {
|
|
|
|
DEBUG_SYNC_C("row_ins_sec_index_enter");
|
|
|
|
if (mode == BTR_MODIFY_LEAF) {
|
|
|
|
search_mode |= BTR_ALREADY_S_LATCHED;
|
|
|
|
mtr_s_lock(dict_index_get_lock(index), &mtr);
|
|
|
|
} else {
|
2016-08-12 11:17:45 +03:00
|
|
|
mtr_sx_lock(dict_index_get_lock(index), &mtr);
|
2014-02-26 19:11:54 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (row_log_online_op_try(
|
|
|
|
index, entry, thr_get_trx(thr)->id)) {
|
|
|
|
goto func_exit;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Note that we use PAGE_CUR_LE as the search mode, because then
|
|
|
|
the function will return in both low_match and up_match of the
|
|
|
|
cursor sensible values */
|
|
|
|
|
|
|
|
if (!thr_get_trx(thr)->check_unique_secondary) {
|
|
|
|
search_mode |= BTR_IGNORE_SEC_UNIQUE;
|
|
|
|
}
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
if (dict_index_is_spatial(index)) {
|
|
|
|
cursor.index = index;
|
|
|
|
rtr_init_rtr_info(&rtr_info, false, &cursor, index, false);
|
|
|
|
rtr_info_update_btr(&cursor, &rtr_info);
|
|
|
|
|
2016-09-06 09:43:16 +03:00
|
|
|
err = btr_cur_search_to_nth_level(
|
2016-08-12 11:17:45 +03:00
|
|
|
index, 0, entry, PAGE_CUR_RTREE_INSERT,
|
|
|
|
search_mode,
|
|
|
|
&cursor, 0, __FILE__, __LINE__, &mtr);
|
|
|
|
|
|
|
|
if (mode == BTR_MODIFY_LEAF && rtr_info.mbr_adj) {
|
|
|
|
mtr_commit(&mtr);
|
|
|
|
rtr_clean_rtr_info(&rtr_info, true);
|
|
|
|
rtr_init_rtr_info(&rtr_info, false, &cursor,
|
|
|
|
index, false);
|
|
|
|
rtr_info_update_btr(&cursor, &rtr_info);
|
|
|
|
mtr_start(&mtr);
|
|
|
|
mtr.set_named_space(index->space);
|
2017-03-01 08:27:39 +02:00
|
|
|
search_mode &= ulint(~BTR_MODIFY_LEAF);
|
2016-08-12 11:17:45 +03:00
|
|
|
search_mode |= BTR_MODIFY_TREE;
|
2016-09-06 09:43:16 +03:00
|
|
|
err = btr_cur_search_to_nth_level(
|
2016-08-12 11:17:45 +03:00
|
|
|
index, 0, entry, PAGE_CUR_RTREE_INSERT,
|
|
|
|
search_mode,
|
|
|
|
&cursor, 0, __FILE__, __LINE__, &mtr);
|
|
|
|
mode = BTR_MODIFY_TREE;
|
|
|
|
}
|
|
|
|
|
|
|
|
DBUG_EXECUTE_IF(
|
|
|
|
"rtree_test_check_count", {
|
|
|
|
goto func_exit;});
|
|
|
|
|
|
|
|
} else {
|
2016-12-05 21:04:30 +02:00
|
|
|
err = btr_cur_search_to_nth_level(
|
|
|
|
index, 0, entry, PAGE_CUR_LE,
|
|
|
|
search_mode,
|
|
|
|
&cursor, 0, __FILE__, __LINE__, &mtr);
|
2016-08-12 11:17:45 +03:00
|
|
|
}
|
2015-08-31 19:47:14 +03:00
|
|
|
|
|
|
|
if (err != DB_SUCCESS) {
|
2016-08-12 11:17:45 +03:00
|
|
|
trx_t* trx = thr_get_trx(thr);
|
|
|
|
|
2015-09-14 08:27:14 +03:00
|
|
|
if (err == DB_DECRYPTION_FAILED) {
|
2015-08-31 19:47:14 +03:00
|
|
|
ib_push_warning(trx->mysql_thd,
|
2015-09-14 08:27:14 +03:00
|
|
|
DB_DECRYPTION_FAILED,
|
2015-08-31 19:47:14 +03:00
|
|
|
"Table %s is encrypted but encryption service or"
|
|
|
|
" used key_id is not available. "
|
|
|
|
" Can't continue reading table.",
|
|
|
|
index->table->name);
|
MDEV-12253: Buffer pool blocks are accessed after they have been freed
Problem was that bpage was referenced after it was already freed
from LRU. Fixed by adding a new variable encrypted that is
passed down to buf_page_check_corrupt() and used in
buf_page_get_gen() to stop processing page read.
This patch should also address following test failures and
bugs:
MDEV-12419: IMPORT should not look up tablespace in
PageConverter::validate(). This is now removed.
MDEV-10099: encryption.innodb_onlinealter_encryption fails
sporadically in buildbot
MDEV-11420: encryption.innodb_encryption-page-compression
failed in buildbot
MDEV-11222: encryption.encrypt_and_grep failed in buildbot on P8
Removed dict_table_t::is_encrypted and dict_table_t::ibd_file_missing
and replaced these with dict_table_t::file_unreadable. Table
ibd file is missing if fil_get_space(space_id) returns NULL
and encrypted if not. Removed dict_table_t::is_corrupted field.
Ported FilSpace class from 10.2 and using that on buf_page_check_corrupt(),
buf_page_decrypt_after_read(), buf_page_encrypt_before_write(),
buf_dblwr_process(), buf_read_page(), dict_stats_save_defrag_stats().
Added test cases when enrypted page could be read while doing
redo log crash recovery. Also added test case for row compressed
blobs.
btr_cur_open_at_index_side_func(),
btr_cur_open_at_rnd_pos_func(): Avoid referencing block that is
NULL.
buf_page_get_zip(): Issue error if page read fails.
buf_page_get_gen(): Use dberr_t for error detection and
do not reference bpage after we hare freed it.
buf_mark_space_corrupt(): remove bpage from LRU also when
it is encrypted.
buf_page_check_corrupt(): @return DB_SUCCESS if page has
been read and is not corrupted,
DB_PAGE_CORRUPTED if page based on checksum check is corrupted,
DB_DECRYPTION_FAILED if page post encryption checksum matches but
after decryption normal page checksum does not match. In read
case only DB_SUCCESS is possible.
buf_page_io_complete(): use dberr_t for error handling.
buf_flush_write_block_low(),
buf_read_ahead_random(),
buf_read_page_async(),
buf_read_ahead_linear(),
buf_read_ibuf_merge_pages(),
buf_read_recv_pages(),
fil_aio_wait():
Issue error if page read fails.
btr_pcur_move_to_next_page(): Do not reference page if it is
NULL.
Introduced dict_table_t::is_readable() and dict_index_t::is_readable()
that will return true if tablespace exists and pages read from
tablespace are not corrupted or page decryption failed.
Removed buf_page_t::key_version. After page decryption the
key version is not removed from page frame. For unencrypted
pages, old key_version is removed at buf_page_encrypt_before_write()
dict_stats_update_transient_for_index(),
dict_stats_update_transient()
Do not continue if table decryption failed or table
is corrupted.
dict0stats.cc: Introduced a dict_stats_report_error function
to avoid code duplication.
fil_parse_write_crypt_data():
Check that key read from redo log entry is found from
encryption plugin and if it is not, refuse to start.
PageConverter::validate(): Removed access to fil_space_t as
tablespace is not available during import.
Fixed error code on innodb.innodb test.
Merged test cased innodb-bad-key-change5 and innodb-bad-key-shutdown
to innodb-bad-key-change2. Removed innodb-bad-key-change5 test.
Decreased unnecessary complexity on some long lasting tests.
Removed fil_inc_pending_ops(), fil_decr_pending_ops(),
fil_get_first_space(), fil_get_next_space(),
fil_get_first_space_safe(), fil_get_next_space_safe()
functions.
fil_space_verify_crypt_checksum(): Fixed bug found using ASAN
where FIL_PAGE_END_LSN_OLD_CHECKSUM field was incorrectly
accessed from row compressed tables. Fixed out of page frame
bug for row compressed tables in
fil_space_verify_crypt_checksum() found using ASAN. Incorrect
function was called for compressed table.
Added new tests for discard, rename table and drop (we should allow them
even when page decryption fails). Alter table rename is not allowed.
Added test for restart with innodb-force-recovery=1 when page read on
redo-recovery cant be decrypted. Added test for corrupted table where
both page data and FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION is corrupted.
Adjusted the test case innodb_bug14147491 so that it does not anymore
expect crash. Instead table is just mostly not usable.
fil0fil.h: fil_space_acquire_low is not visible function
and fil_space_acquire and fil_space_acquire_silent are
inline functions. FilSpace class uses fil_space_acquire_low
directly.
recv_apply_hashed_log_recs() does not return anything.
2017-04-26 15:19:16 +03:00
|
|
|
index->table->file_unreadable = true;
|
2015-08-31 19:47:14 +03:00
|
|
|
}
|
|
|
|
goto func_exit;
|
|
|
|
}
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
if (cursor.flag == BTR_CUR_INSERT_TO_IBUF) {
|
2016-08-12 11:17:45 +03:00
|
|
|
ut_ad(!dict_index_is_spatial(index));
|
2014-02-26 19:11:54 +01:00
|
|
|
/* The insert was buffered during the search: we are done */
|
|
|
|
goto func_exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef UNIV_DEBUG
|
|
|
|
{
|
|
|
|
page_t* page = btr_cur_get_page(&cursor);
|
|
|
|
rec_t* first_rec = page_rec_get_next(
|
|
|
|
page_get_infimum_rec(page));
|
|
|
|
|
|
|
|
ut_ad(page_rec_is_supremum(first_rec)
|
2016-08-12 11:17:45 +03:00
|
|
|
|| rec_n_fields_is_sane(index, first_rec, entry));
|
2014-02-26 19:11:54 +01:00
|
|
|
}
|
2016-08-12 11:17:45 +03:00
|
|
|
#endif /* UNIV_DEBUG */
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
n_unique = dict_index_get_n_unique(index);
|
|
|
|
|
|
|
|
if (dict_index_is_unique(index)
|
|
|
|
&& (cursor.low_match >= n_unique || cursor.up_match >= n_unique)) {
|
|
|
|
mtr_commit(&mtr);
|
|
|
|
|
|
|
|
DEBUG_SYNC_C("row_ins_sec_index_unique");
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
if (row_ins_sec_mtr_start_and_check_if_aborted(
|
|
|
|
&mtr, index, check, search_mode)) {
|
2014-02-26 19:11:54 +01:00
|
|
|
goto func_exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = row_ins_scan_sec_index_for_duplicate(
|
|
|
|
flags, index, entry, thr, check, &mtr, offsets_heap);
|
|
|
|
|
|
|
|
mtr_commit(&mtr);
|
|
|
|
|
|
|
|
switch (err) {
|
|
|
|
case DB_SUCCESS:
|
|
|
|
break;
|
|
|
|
case DB_DUPLICATE_KEY:
|
2016-08-12 11:17:45 +03:00
|
|
|
if (!index->is_committed()) {
|
2014-02-26 19:11:54 +01:00
|
|
|
ut_ad(!thr_get_trx(thr)
|
|
|
|
->dict_operation_lock_mode);
|
|
|
|
mutex_enter(&dict_sys->mutex);
|
2016-08-12 11:17:45 +03:00
|
|
|
dict_set_corrupted_index_cache_only(index);
|
2014-02-26 19:11:54 +01:00
|
|
|
mutex_exit(&dict_sys->mutex);
|
|
|
|
/* Do not return any error to the
|
|
|
|
caller. The duplicate will be reported
|
|
|
|
by ALTER TABLE or CREATE UNIQUE INDEX.
|
|
|
|
Unfortunately we cannot report the
|
|
|
|
duplicate key value to the DDL thread,
|
|
|
|
because the altered_table object is
|
|
|
|
private to its call stack. */
|
|
|
|
err = DB_SUCCESS;
|
|
|
|
}
|
|
|
|
/* fall through */
|
|
|
|
default:
|
2016-08-12 11:17:45 +03:00
|
|
|
if (dict_index_is_spatial(index)) {
|
|
|
|
rtr_clean_rtr_info(&rtr_info, true);
|
|
|
|
}
|
|
|
|
DBUG_RETURN(err);
|
2014-02-26 19:11:54 +01:00
|
|
|
}
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
if (row_ins_sec_mtr_start_and_check_if_aborted(
|
|
|
|
&mtr, index, check, search_mode)) {
|
2014-02-26 19:11:54 +01:00
|
|
|
goto func_exit;
|
|
|
|
}
|
|
|
|
|
2015-10-09 17:21:46 +02:00
|
|
|
DEBUG_SYNC_C("row_ins_sec_index_entry_dup_locks_created");
|
|
|
|
|
2014-02-26 19:11:54 +01:00
|
|
|
/* We did not find a duplicate and we have now
|
|
|
|
locked with s-locks the necessary records to
|
|
|
|
prevent any insertion of a duplicate by another
|
|
|
|
transaction. Let us now reposition the cursor and
|
|
|
|
continue the insertion. */
|
2016-12-05 21:04:30 +02:00
|
|
|
btr_cur_search_to_nth_level(
|
|
|
|
index, 0, entry, PAGE_CUR_LE,
|
|
|
|
(search_mode
|
|
|
|
& ~(BTR_INSERT | BTR_IGNORE_SEC_UNIQUE)),
|
|
|
|
&cursor, 0, __FILE__, __LINE__, &mtr);
|
2016-08-12 11:17:45 +03:00
|
|
|
}
|
2014-02-26 19:11:54 +01:00
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
if (!(flags & BTR_NO_LOCKING_FLAG)
|
|
|
|
&& dict_index_is_unique(index)
|
|
|
|
&& thr_get_trx(thr)->duplicates
|
|
|
|
&& thr_get_trx(thr)->isolation_level >= TRX_ISO_REPEATABLE_READ) {
|
|
|
|
|
|
|
|
/* When using the REPLACE statement or ON DUPLICATE clause, a
|
|
|
|
gap lock is taken on the position of the to-be-inserted record,
|
|
|
|
to avoid other concurrent transactions from inserting the same
|
|
|
|
record. */
|
|
|
|
|
|
|
|
dberr_t err;
|
|
|
|
const rec_t* rec = page_rec_get_next_const(
|
|
|
|
btr_cur_get_rec(&cursor));
|
|
|
|
|
|
|
|
ut_ad(!page_rec_is_infimum(rec));
|
|
|
|
|
|
|
|
offsets = rec_get_offsets(rec, index, offsets,
|
|
|
|
ULINT_UNDEFINED, &offsets_heap);
|
|
|
|
|
|
|
|
err = row_ins_set_exclusive_rec_lock(
|
|
|
|
LOCK_GAP, btr_cur_get_block(&cursor), rec,
|
|
|
|
index, offsets, thr);
|
|
|
|
|
|
|
|
switch (err) {
|
|
|
|
case DB_SUCCESS:
|
|
|
|
case DB_SUCCESS_LOCKED_REC:
|
|
|
|
if (thr_get_trx(thr)->error_state != DB_DUPLICATE_KEY) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* Fall through (skip actual insert) after we have
|
|
|
|
successfully acquired the gap lock. */
|
|
|
|
default:
|
|
|
|
goto func_exit;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ut_ad(thr_get_trx(thr)->error_state == DB_SUCCESS);
|
|
|
|
|
|
|
|
if (dup_chk_only) {
|
|
|
|
goto func_exit;
|
2014-02-26 19:11:54 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (row_ins_must_modify_rec(&cursor)) {
|
|
|
|
/* There is already an index entry with a long enough common
|
|
|
|
prefix, we must convert the insert into a modify of an
|
|
|
|
existing record */
|
|
|
|
offsets = rec_get_offsets(
|
|
|
|
btr_cur_get_rec(&cursor), index, offsets,
|
|
|
|
ULINT_UNDEFINED, &offsets_heap);
|
|
|
|
|
|
|
|
err = row_ins_sec_index_entry_by_modify(
|
|
|
|
flags, mode, &cursor, &offsets,
|
|
|
|
offsets_heap, heap, entry, thr, &mtr);
|
2016-08-12 11:17:45 +03:00
|
|
|
|
|
|
|
if (err == DB_SUCCESS && dict_index_is_spatial(index)
|
|
|
|
&& rtr_info.mbr_adj) {
|
|
|
|
err = rtr_ins_enlarge_mbr(&cursor, thr, &mtr);
|
|
|
|
}
|
2014-02-26 19:11:54 +01:00
|
|
|
} else {
|
|
|
|
rec_t* insert_rec;
|
|
|
|
big_rec_t* big_rec;
|
|
|
|
|
|
|
|
if (mode == BTR_MODIFY_LEAF) {
|
|
|
|
err = btr_cur_optimistic_insert(
|
|
|
|
flags, &cursor, &offsets, &offsets_heap,
|
|
|
|
entry, &insert_rec,
|
|
|
|
&big_rec, 0, thr, &mtr);
|
2016-08-12 11:17:45 +03:00
|
|
|
if (err == DB_SUCCESS
|
|
|
|
&& dict_index_is_spatial(index)
|
|
|
|
&& rtr_info.mbr_adj) {
|
|
|
|
err = rtr_ins_enlarge_mbr(&cursor, thr, &mtr);
|
|
|
|
}
|
2014-02-26 19:11:54 +01:00
|
|
|
} else {
|
|
|
|
ut_ad(mode == BTR_MODIFY_TREE);
|
|
|
|
if (buf_LRU_buf_pool_running_out()) {
|
|
|
|
|
|
|
|
err = DB_LOCK_TABLE_FULL;
|
|
|
|
goto func_exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = btr_cur_optimistic_insert(
|
|
|
|
flags, &cursor,
|
|
|
|
&offsets, &offsets_heap,
|
|
|
|
entry, &insert_rec,
|
|
|
|
&big_rec, 0, thr, &mtr);
|
|
|
|
if (err == DB_FAIL) {
|
|
|
|
err = btr_cur_pessimistic_insert(
|
|
|
|
flags, &cursor,
|
|
|
|
&offsets, &offsets_heap,
|
|
|
|
entry, &insert_rec,
|
|
|
|
&big_rec, 0, thr, &mtr);
|
|
|
|
}
|
2016-08-12 11:17:45 +03:00
|
|
|
if (err == DB_SUCCESS
|
|
|
|
&& dict_index_is_spatial(index)
|
|
|
|
&& rtr_info.mbr_adj) {
|
|
|
|
err = rtr_ins_enlarge_mbr(&cursor, thr, &mtr);
|
|
|
|
}
|
2014-02-26 19:11:54 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (err == DB_SUCCESS && trx_id) {
|
|
|
|
page_update_max_trx_id(
|
|
|
|
btr_cur_get_block(&cursor),
|
|
|
|
btr_cur_get_page_zip(&cursor),
|
|
|
|
trx_id, &mtr);
|
|
|
|
}
|
|
|
|
|
|
|
|
ut_ad(!big_rec);
|
|
|
|
}
|
|
|
|
|
|
|
|
func_exit:
|
2016-08-12 11:17:45 +03:00
|
|
|
if (dict_index_is_spatial(index)) {
|
|
|
|
rtr_clean_rtr_info(&rtr_info, true);
|
|
|
|
}
|
|
|
|
|
2014-02-26 19:11:54 +01:00
|
|
|
mtr_commit(&mtr);
|
2016-08-12 11:17:45 +03:00
|
|
|
DBUG_RETURN(err);
|
2014-02-26 19:11:54 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/***************************************************************//**
|
|
|
|
Inserts an entry into a clustered index. Tries first optimistic,
|
|
|
|
then pessimistic descent down the tree. If the entry matches enough
|
|
|
|
to a delete marked record, performs the insert by updating or delete
|
|
|
|
unmarking the delete marked record.
|
2016-08-12 11:17:45 +03:00
|
|
|
@return DB_SUCCESS, DB_LOCK_WAIT, DB_DUPLICATE_KEY, or some other error code */
|
2014-02-26 19:11:54 +01:00
|
|
|
dberr_t
|
|
|
|
row_ins_clust_index_entry(
|
|
|
|
/*======================*/
|
|
|
|
dict_index_t* index, /*!< in: clustered index */
|
|
|
|
dtuple_t* entry, /*!< in/out: index entry to insert */
|
|
|
|
que_thr_t* thr, /*!< in: query thread */
|
2016-08-12 11:17:45 +03:00
|
|
|
ulint n_ext, /*!< in: number of externally stored columns */
|
|
|
|
bool dup_chk_only)
|
|
|
|
/*!< in: if true, just do duplicate check
|
|
|
|
and return. don't execute actual insert. */
|
2014-02-26 19:11:54 +01:00
|
|
|
{
|
|
|
|
dberr_t err;
|
|
|
|
ulint n_uniq;
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
DBUG_ENTER("row_ins_clust_index_entry");
|
|
|
|
|
2014-09-11 10:13:35 +02:00
|
|
|
if (!index->table->foreign_set.empty()) {
|
2014-02-26 19:11:54 +01:00
|
|
|
err = row_ins_check_foreign_constraints(
|
|
|
|
index->table, index, entry, thr);
|
|
|
|
if (err != DB_SUCCESS) {
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
DBUG_RETURN(err);
|
2014-02-26 19:11:54 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
n_uniq = dict_index_is_unique(index) ? index->n_uniq : 0;
|
|
|
|
|
|
|
|
/* Try first optimistic descent to the B-tree */
|
2016-12-05 21:04:30 +02:00
|
|
|
log_free_check();
|
|
|
|
const ulint flags = dict_table_is_temporary(index->table)
|
|
|
|
? BTR_NO_LOCKING_FLAG
|
|
|
|
: 0;
|
2014-02-26 19:11:54 +01:00
|
|
|
|
2016-12-05 21:04:30 +02:00
|
|
|
err = row_ins_clust_index_entry_low(
|
|
|
|
flags, BTR_MODIFY_LEAF, index, n_uniq, entry,
|
|
|
|
n_ext, thr, dup_chk_only);
|
2016-08-12 11:17:45 +03:00
|
|
|
|
|
|
|
|
|
|
|
DEBUG_SYNC_C_IF_THD(thr_get_trx(thr)->mysql_thd,
|
|
|
|
"after_row_ins_clust_index_entry_leaf");
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
if (err != DB_FAIL) {
|
|
|
|
DEBUG_SYNC_C("row_ins_clust_index_entry_leaf_after");
|
2016-08-12 11:17:45 +03:00
|
|
|
DBUG_RETURN(err);
|
2014-02-26 19:11:54 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Try then pessimistic descent to the B-tree */
|
2016-12-05 21:04:30 +02:00
|
|
|
log_free_check();
|
2014-02-26 19:11:54 +01:00
|
|
|
|
2016-12-05 21:04:30 +02:00
|
|
|
err = row_ins_clust_index_entry_low(
|
|
|
|
flags, BTR_MODIFY_TREE, index, n_uniq, entry,
|
|
|
|
n_ext, thr, dup_chk_only);
|
2014-02-26 19:11:54 +01:00
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
DBUG_RETURN(err);
|
2014-02-26 19:11:54 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/***************************************************************//**
|
|
|
|
Inserts an entry into a secondary index. Tries first optimistic,
|
|
|
|
then pessimistic descent down the tree. If the entry matches enough
|
|
|
|
to a delete marked record, performs the insert by updating or delete
|
|
|
|
unmarking the delete marked record.
|
2016-08-12 11:17:45 +03:00
|
|
|
@return DB_SUCCESS, DB_LOCK_WAIT, DB_DUPLICATE_KEY, or some other error code */
|
2014-02-26 19:11:54 +01:00
|
|
|
dberr_t
|
|
|
|
row_ins_sec_index_entry(
|
|
|
|
/*====================*/
|
|
|
|
dict_index_t* index, /*!< in: secondary index */
|
|
|
|
dtuple_t* entry, /*!< in/out: index entry to insert */
|
2016-08-12 11:17:45 +03:00
|
|
|
que_thr_t* thr, /*!< in: query thread */
|
|
|
|
bool dup_chk_only)
|
|
|
|
/*!< in: if true, just do duplicate check
|
|
|
|
and return. don't execute actual insert. */
|
2014-02-26 19:11:54 +01:00
|
|
|
{
|
|
|
|
dberr_t err;
|
|
|
|
mem_heap_t* offsets_heap;
|
|
|
|
mem_heap_t* heap;
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
DBUG_EXECUTE_IF("row_ins_sec_index_entry_timeout", {
|
|
|
|
DBUG_SET("-d,row_ins_sec_index_entry_timeout");
|
|
|
|
return(DB_LOCK_WAIT);});
|
|
|
|
|
2014-09-11 10:13:35 +02:00
|
|
|
if (!index->table->foreign_set.empty()) {
|
2014-02-26 19:11:54 +01:00
|
|
|
err = row_ins_check_foreign_constraints(index->table, index,
|
|
|
|
entry, thr);
|
|
|
|
if (err != DB_SUCCESS) {
|
|
|
|
|
|
|
|
return(err);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-05 21:04:30 +02:00
|
|
|
ut_ad(thr_get_trx(thr)->id != 0);
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
offsets_heap = mem_heap_create(1024);
|
|
|
|
heap = mem_heap_create(1024);
|
|
|
|
|
|
|
|
/* Try first optimistic descent to the B-tree */
|
|
|
|
|
2016-12-05 21:04:30 +02:00
|
|
|
log_free_check();
|
|
|
|
const ulint flags = dict_table_is_temporary(index->table)
|
|
|
|
? BTR_NO_LOCKING_FLAG
|
|
|
|
: 0;
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
err = row_ins_sec_index_entry_low(
|
2016-08-12 11:17:45 +03:00
|
|
|
flags, BTR_MODIFY_LEAF, index, offsets_heap, heap, entry,
|
|
|
|
0, thr, dup_chk_only);
|
2014-02-26 19:11:54 +01:00
|
|
|
if (err == DB_FAIL) {
|
|
|
|
mem_heap_empty(heap);
|
|
|
|
|
|
|
|
/* Try then pessimistic descent to the B-tree */
|
2016-12-05 21:04:30 +02:00
|
|
|
log_free_check();
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
err = row_ins_sec_index_entry_low(
|
2016-08-12 11:17:45 +03:00
|
|
|
flags, BTR_MODIFY_TREE, index,
|
|
|
|
offsets_heap, heap, entry, 0, thr,
|
|
|
|
dup_chk_only);
|
2014-02-26 19:11:54 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
mem_heap_free(heap);
|
|
|
|
mem_heap_free(offsets_heap);
|
|
|
|
return(err);
|
|
|
|
}
|
|
|
|
|
|
|
|
/***************************************************************//**
|
|
|
|
Inserts an index entry to index. Tries first optimistic, then pessimistic
|
|
|
|
descent down the tree. If the entry matches enough to a delete marked record,
|
|
|
|
performs the insert by updating or delete unmarking the delete marked
|
|
|
|
record.
|
2016-08-12 11:17:45 +03:00
|
|
|
@return DB_SUCCESS, DB_LOCK_WAIT, DB_DUPLICATE_KEY, or some other error code */
|
2014-02-26 19:11:54 +01:00
|
|
|
static
|
|
|
|
dberr_t
|
|
|
|
row_ins_index_entry(
|
|
|
|
/*================*/
|
|
|
|
dict_index_t* index, /*!< in: index */
|
|
|
|
dtuple_t* entry, /*!< in/out: index entry to insert */
|
|
|
|
que_thr_t* thr) /*!< in: query thread */
|
|
|
|
{
|
2016-08-12 11:17:45 +03:00
|
|
|
ut_ad(thr_get_trx(thr)->id != 0);
|
|
|
|
|
2014-05-06 21:13:16 +02:00
|
|
|
DBUG_EXECUTE_IF("row_ins_index_entry_timeout", {
|
|
|
|
DBUG_SET("-d,row_ins_index_entry_timeout");
|
|
|
|
return(DB_LOCK_WAIT);});
|
|
|
|
|
2014-02-26 19:11:54 +01:00
|
|
|
if (dict_index_is_clust(index)) {
|
2016-08-12 11:17:45 +03:00
|
|
|
return(row_ins_clust_index_entry(index, entry, thr, 0, false));
|
2014-02-26 19:11:54 +01:00
|
|
|
} else {
|
2016-08-12 11:17:45 +03:00
|
|
|
return(row_ins_sec_index_entry(index, entry, thr, false));
|
2014-02-26 19:11:54 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
|
|
|
|
/*****************************************************************//**
|
|
|
|
This function generate MBR (Minimum Bounding Box) for spatial objects
|
|
|
|
and set it to spatial index field. */
|
|
|
|
static
|
2014-02-26 19:11:54 +01:00
|
|
|
void
|
2016-08-12 11:17:45 +03:00
|
|
|
row_ins_spatial_index_entry_set_mbr_field(
|
|
|
|
/*======================================*/
|
|
|
|
dfield_t* field, /*!< in/out: mbr field */
|
|
|
|
const dfield_t* row_field) /*!< in: row field */
|
|
|
|
{
|
|
|
|
uchar* dptr = NULL;
|
|
|
|
ulint dlen = 0;
|
|
|
|
double mbr[SPDIMS * 2];
|
|
|
|
|
|
|
|
/* This must be a GEOMETRY datatype */
|
|
|
|
ut_ad(DATA_GEOMETRY_MTYPE(field->type.mtype));
|
|
|
|
|
|
|
|
dptr = static_cast<uchar*>(dfield_get_data(row_field));
|
|
|
|
dlen = dfield_get_len(row_field);
|
|
|
|
|
|
|
|
/* obtain the MBR */
|
|
|
|
rtree_mbr_from_wkb(dptr + GEO_DATA_HEADER_SIZE,
|
|
|
|
static_cast<uint>(dlen - GEO_DATA_HEADER_SIZE),
|
|
|
|
SPDIMS, mbr);
|
|
|
|
|
|
|
|
/* Set mbr as index entry data */
|
|
|
|
dfield_write_mbr(field, mbr);
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Sets the values of the dtuple fields in entry from the values of appropriate
|
|
|
|
columns in row.
|
|
|
|
@param[in] index index handler
|
|
|
|
@param[out] entry index entry to make
|
|
|
|
@param[in] row row
|
|
|
|
|
|
|
|
@return DB_SUCCESS if the set is successful */
|
|
|
|
dberr_t
|
2014-02-26 19:11:54 +01:00
|
|
|
row_ins_index_entry_set_vals(
|
2016-08-12 11:17:45 +03:00
|
|
|
const dict_index_t* index,
|
|
|
|
dtuple_t* entry,
|
|
|
|
const dtuple_t* row)
|
2014-02-26 19:11:54 +01:00
|
|
|
{
|
|
|
|
ulint n_fields;
|
|
|
|
ulint i;
|
2016-08-12 11:17:45 +03:00
|
|
|
ulint num_v = dtuple_get_n_v_fields(entry);
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
n_fields = dtuple_get_n_fields(entry);
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
for (i = 0; i < n_fields + num_v; i++) {
|
|
|
|
dict_field_t* ind_field = NULL;
|
2014-02-26 19:11:54 +01:00
|
|
|
dfield_t* field;
|
|
|
|
const dfield_t* row_field;
|
|
|
|
ulint len;
|
2016-08-12 11:17:45 +03:00
|
|
|
dict_col_t* col;
|
|
|
|
|
|
|
|
if (i >= n_fields) {
|
|
|
|
/* This is virtual field */
|
|
|
|
field = dtuple_get_nth_v_field(entry, i - n_fields);
|
|
|
|
col = &dict_table_get_nth_v_col(
|
|
|
|
index->table, i - n_fields)->m_col;
|
|
|
|
} else {
|
|
|
|
field = dtuple_get_nth_field(entry, i);
|
|
|
|
ind_field = dict_index_get_nth_field(index, i);
|
|
|
|
col = ind_field->col;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dict_col_is_virtual(col)) {
|
|
|
|
const dict_v_col_t* v_col
|
|
|
|
= reinterpret_cast<const dict_v_col_t*>(col);
|
|
|
|
ut_ad(dtuple_get_n_fields(row)
|
|
|
|
== dict_table_get_n_cols(index->table));
|
|
|
|
row_field = dtuple_get_nth_v_field(row, v_col->v_pos);
|
|
|
|
} else {
|
|
|
|
row_field = dtuple_get_nth_field(
|
|
|
|
row, ind_field->col->ind);
|
|
|
|
}
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
len = dfield_get_len(row_field);
|
|
|
|
|
|
|
|
/* Check column prefix indexes */
|
2016-08-12 11:17:45 +03:00
|
|
|
if (ind_field != NULL && ind_field->prefix_len > 0
|
2014-02-26 19:11:54 +01:00
|
|
|
&& dfield_get_len(row_field) != UNIV_SQL_NULL) {
|
|
|
|
|
|
|
|
const dict_col_t* col
|
|
|
|
= dict_field_get_col(ind_field);
|
|
|
|
|
|
|
|
len = dtype_get_at_most_n_mbchars(
|
|
|
|
col->prtype, col->mbminmaxlen,
|
|
|
|
ind_field->prefix_len,
|
|
|
|
len,
|
|
|
|
static_cast<const char*>(
|
|
|
|
dfield_get_data(row_field)));
|
|
|
|
|
|
|
|
ut_ad(!dfield_is_ext(row_field));
|
|
|
|
}
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
/* Handle spatial index. For the first field, replace
|
|
|
|
the data with its MBR (Minimum Bounding Box). */
|
|
|
|
if ((i == 0) && dict_index_is_spatial(index)) {
|
|
|
|
if (!row_field->data
|
|
|
|
|| row_field->len < GEO_DATA_HEADER_SIZE) {
|
|
|
|
return(DB_CANT_CREATE_GEOMETRY_OBJECT);
|
|
|
|
}
|
|
|
|
row_ins_spatial_index_entry_set_mbr_field(
|
|
|
|
field, row_field);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2014-02-26 19:11:54 +01:00
|
|
|
dfield_set_data(field, dfield_get_data(row_field), len);
|
|
|
|
if (dfield_is_ext(row_field)) {
|
|
|
|
ut_ad(dict_index_is_clust(index));
|
|
|
|
dfield_set_ext(field);
|
|
|
|
}
|
|
|
|
}
|
2016-08-12 11:17:45 +03:00
|
|
|
|
|
|
|
return(DB_SUCCESS);
|
2014-02-26 19:11:54 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/***********************************************************//**
|
|
|
|
Inserts a single index entry to the table.
|
|
|
|
@return DB_SUCCESS if operation successfully completed, else error
|
|
|
|
code or DB_LOCK_WAIT */
|
2016-06-21 14:21:03 +02:00
|
|
|
static MY_ATTRIBUTE((nonnull, warn_unused_result))
|
2014-02-26 19:11:54 +01:00
|
|
|
dberr_t
|
|
|
|
row_ins_index_entry_step(
|
|
|
|
/*=====================*/
|
|
|
|
ins_node_t* node, /*!< in: row insert node */
|
|
|
|
que_thr_t* thr) /*!< in: query thread */
|
|
|
|
{
|
|
|
|
dberr_t err;
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
DBUG_ENTER("row_ins_index_entry_step");
|
|
|
|
|
2014-02-26 19:11:54 +01:00
|
|
|
ut_ad(dtuple_check_typed(node->row));
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
err = row_ins_index_entry_set_vals(node->index, node->entry, node->row);
|
|
|
|
|
|
|
|
if (err != DB_SUCCESS) {
|
|
|
|
DBUG_RETURN(err);
|
|
|
|
}
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
ut_ad(dtuple_check_typed(node->entry));
|
|
|
|
|
|
|
|
err = row_ins_index_entry(node->index, node->entry, thr);
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
DEBUG_SYNC_C_IF_THD(thr_get_trx(thr)->mysql_thd,
|
|
|
|
"after_row_ins_index_entry_step");
|
2014-02-26 19:11:54 +01:00
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
DBUG_RETURN(err);
|
2014-02-26 19:11:54 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/***********************************************************//**
|
|
|
|
Allocates a row id for row and inits the node->index field. */
|
|
|
|
UNIV_INLINE
|
|
|
|
void
|
|
|
|
row_ins_alloc_row_id_step(
|
|
|
|
/*======================*/
|
|
|
|
ins_node_t* node) /*!< in: row insert node */
|
|
|
|
{
|
|
|
|
row_id_t row_id;
|
|
|
|
|
|
|
|
ut_ad(node->state == INS_NODE_ALLOC_ROW_ID);
|
|
|
|
|
|
|
|
if (dict_index_is_unique(dict_table_get_first_index(node->table))) {
|
|
|
|
|
|
|
|
/* No row id is stored if the clustered index is unique */
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Fill in row id value to row */
|
|
|
|
|
|
|
|
row_id = dict_sys_get_new_row_id();
|
|
|
|
|
|
|
|
dict_sys_write_row_id(node->row_id_buf, row_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
/***********************************************************//**
|
|
|
|
Gets a row to insert from the values list. */
|
|
|
|
UNIV_INLINE
|
|
|
|
void
|
|
|
|
row_ins_get_row_from_values(
|
|
|
|
/*========================*/
|
|
|
|
ins_node_t* node) /*!< in: row insert node */
|
|
|
|
{
|
|
|
|
que_node_t* list_node;
|
|
|
|
dfield_t* dfield;
|
|
|
|
dtuple_t* row;
|
|
|
|
ulint i;
|
|
|
|
|
|
|
|
/* The field values are copied in the buffers of the select node and
|
|
|
|
it is safe to use them until we fetch from select again: therefore
|
|
|
|
we can just copy the pointers */
|
|
|
|
|
|
|
|
row = node->row;
|
|
|
|
|
|
|
|
i = 0;
|
|
|
|
list_node = node->values_list;
|
|
|
|
|
|
|
|
while (list_node) {
|
|
|
|
eval_exp(list_node);
|
|
|
|
|
|
|
|
dfield = dtuple_get_nth_field(row, i);
|
|
|
|
dfield_copy_data(dfield, que_node_get_val(list_node));
|
|
|
|
|
|
|
|
i++;
|
|
|
|
list_node = que_node_get_next(list_node);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/***********************************************************//**
|
|
|
|
Gets a row to insert from the select list. */
|
|
|
|
UNIV_INLINE
|
|
|
|
void
|
|
|
|
row_ins_get_row_from_select(
|
|
|
|
/*========================*/
|
|
|
|
ins_node_t* node) /*!< in: row insert node */
|
|
|
|
{
|
|
|
|
que_node_t* list_node;
|
|
|
|
dfield_t* dfield;
|
|
|
|
dtuple_t* row;
|
|
|
|
ulint i;
|
|
|
|
|
|
|
|
/* The field values are copied in the buffers of the select node and
|
|
|
|
it is safe to use them until we fetch from select again: therefore
|
|
|
|
we can just copy the pointers */
|
|
|
|
|
|
|
|
row = node->row;
|
|
|
|
|
|
|
|
i = 0;
|
|
|
|
list_node = node->select->select_list;
|
|
|
|
|
|
|
|
while (list_node) {
|
|
|
|
dfield = dtuple_get_nth_field(row, i);
|
|
|
|
dfield_copy_data(dfield, que_node_get_val(list_node));
|
|
|
|
|
|
|
|
i++;
|
|
|
|
list_node = que_node_get_next(list_node);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/***********************************************************//**
|
|
|
|
Inserts a row to a table.
|
|
|
|
@return DB_SUCCESS if operation successfully completed, else error
|
|
|
|
code or DB_LOCK_WAIT */
|
2016-06-21 14:21:03 +02:00
|
|
|
static MY_ATTRIBUTE((nonnull, warn_unused_result))
|
2014-02-26 19:11:54 +01:00
|
|
|
dberr_t
|
|
|
|
row_ins(
|
|
|
|
/*====*/
|
|
|
|
ins_node_t* node, /*!< in: row insert node */
|
|
|
|
que_thr_t* thr) /*!< in: query thread */
|
|
|
|
{
|
|
|
|
dberr_t err;
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
DBUG_ENTER("row_ins");
|
|
|
|
|
|
|
|
DBUG_PRINT("row_ins", ("table: %s", node->table->name.m_name));
|
|
|
|
|
|
|
|
if (node->duplicate) {
|
|
|
|
thr_get_trx(thr)->error_state = DB_DUPLICATE_KEY;
|
|
|
|
}
|
|
|
|
|
2014-02-26 19:11:54 +01:00
|
|
|
if (node->state == INS_NODE_ALLOC_ROW_ID) {
|
|
|
|
|
|
|
|
row_ins_alloc_row_id_step(node);
|
|
|
|
|
|
|
|
node->index = dict_table_get_first_index(node->table);
|
|
|
|
node->entry = UT_LIST_GET_FIRST(node->entry_list);
|
|
|
|
|
|
|
|
if (node->ins_type == INS_SEARCHED) {
|
|
|
|
|
|
|
|
row_ins_get_row_from_select(node);
|
|
|
|
|
|
|
|
} else if (node->ins_type == INS_VALUES) {
|
|
|
|
|
|
|
|
row_ins_get_row_from_values(node);
|
|
|
|
}
|
|
|
|
|
|
|
|
node->state = INS_NODE_INSERT_ENTRIES;
|
|
|
|
}
|
|
|
|
|
|
|
|
ut_ad(node->state == INS_NODE_INSERT_ENTRIES);
|
|
|
|
|
|
|
|
while (node->index != NULL) {
|
|
|
|
if (node->index->type != DICT_FTS) {
|
|
|
|
err = row_ins_index_entry_step(node, thr);
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
switch (err) {
|
|
|
|
case DB_SUCCESS:
|
|
|
|
break;
|
|
|
|
case DB_DUPLICATE_KEY:
|
|
|
|
ut_ad(dict_index_is_unique(node->index));
|
|
|
|
|
|
|
|
if (thr_get_trx(thr)->isolation_level
|
|
|
|
>= TRX_ISO_REPEATABLE_READ
|
|
|
|
&& thr_get_trx(thr)->duplicates) {
|
|
|
|
|
|
|
|
/* When we are in REPLACE statement or
|
|
|
|
INSERT .. ON DUPLICATE UPDATE
|
|
|
|
statement, we process all the
|
|
|
|
unique secondary indexes, even after we
|
|
|
|
encounter a duplicate error. This is
|
|
|
|
done to take necessary gap locks in
|
|
|
|
secondary indexes to block concurrent
|
|
|
|
transactions from inserting the
|
|
|
|
searched records. */
|
|
|
|
if (!node->duplicate) {
|
|
|
|
/* Save 1st dup error. Ignore
|
|
|
|
subsequent dup errors. */
|
|
|
|
node->duplicate = node->index;
|
|
|
|
thr_get_trx(thr)->error_state
|
|
|
|
= DB_DUPLICATE_KEY;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
// fall through
|
|
|
|
default:
|
|
|
|
DBUG_RETURN(err);
|
2014-02-26 19:11:54 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
if (node->duplicate && dict_table_is_temporary(node->table)) {
|
|
|
|
ut_ad(thr_get_trx(thr)->error_state
|
|
|
|
== DB_DUPLICATE_KEY);
|
|
|
|
/* For TEMPORARY TABLE, we won't lock anything,
|
|
|
|
so we can simply break here instead of requiring
|
|
|
|
GAP locks for other unique secondary indexes,
|
|
|
|
pretending we have consumed all indexes. */
|
|
|
|
node->index = NULL;
|
|
|
|
node->entry = NULL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2014-02-26 19:11:54 +01:00
|
|
|
node->index = dict_table_get_next_index(node->index);
|
|
|
|
node->entry = UT_LIST_GET_NEXT(tuple_list, node->entry);
|
|
|
|
|
|
|
|
DBUG_EXECUTE_IF(
|
|
|
|
"row_ins_skip_sec",
|
|
|
|
node->index = NULL; node->entry = NULL; break;);
|
|
|
|
|
|
|
|
/* Skip corrupted secondary index and its entry */
|
|
|
|
while (node->index && dict_index_is_corrupted(node->index)) {
|
|
|
|
|
|
|
|
node->index = dict_table_get_next_index(node->index);
|
|
|
|
node->entry = UT_LIST_GET_NEXT(tuple_list, node->entry);
|
|
|
|
}
|
2016-08-12 11:17:45 +03:00
|
|
|
|
|
|
|
/* After encountering a duplicate key error, we process
|
|
|
|
remaining indexes just to place gap locks and no actual
|
|
|
|
insertion will take place. These gap locks are needed
|
|
|
|
only for unique indexes. So skipping non-unique indexes. */
|
|
|
|
if (node->duplicate) {
|
|
|
|
while (node->index
|
|
|
|
&& !dict_index_is_unique(node->index)) {
|
|
|
|
|
|
|
|
node->index = dict_table_get_next_index(
|
|
|
|
node->index);
|
|
|
|
node->entry = UT_LIST_GET_NEXT(tuple_list,
|
|
|
|
node->entry);
|
|
|
|
}
|
|
|
|
thr_get_trx(thr)->error_state = DB_DUPLICATE_KEY;
|
|
|
|
}
|
2014-02-26 19:11:54 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
ut_ad(node->entry == NULL);
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
thr_get_trx(thr)->error_info = node->duplicate;
|
2014-02-26 19:11:54 +01:00
|
|
|
node->state = INS_NODE_ALLOC_ROW_ID;
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
DBUG_RETURN(node->duplicate ? DB_DUPLICATE_KEY : DB_SUCCESS);
|
2014-02-26 19:11:54 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/***********************************************************//**
|
|
|
|
Inserts a row to a table. This is a high-level function used in SQL execution
|
|
|
|
graphs.
|
2016-08-12 11:17:45 +03:00
|
|
|
@return query thread to run next or NULL */
|
2014-02-26 19:11:54 +01:00
|
|
|
que_thr_t*
|
|
|
|
row_ins_step(
|
|
|
|
/*=========*/
|
|
|
|
que_thr_t* thr) /*!< in: query thread */
|
|
|
|
{
|
|
|
|
ins_node_t* node;
|
|
|
|
que_node_t* parent;
|
|
|
|
sel_node_t* sel_node;
|
|
|
|
trx_t* trx;
|
|
|
|
dberr_t err;
|
|
|
|
|
|
|
|
ut_ad(thr);
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
DEBUG_SYNC_C("innodb_row_ins_step_enter");
|
|
|
|
|
2014-02-26 19:11:54 +01:00
|
|
|
trx = thr_get_trx(thr);
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
trx_start_if_not_started_xa(trx, true);
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
node = static_cast<ins_node_t*>(thr->run_node);
|
|
|
|
|
|
|
|
ut_ad(que_node_get_type(node) == QUE_NODE_INSERT);
|
|
|
|
|
|
|
|
parent = que_node_get_parent(node);
|
|
|
|
sel_node = node->select;
|
|
|
|
|
|
|
|
if (thr->prev_node == parent) {
|
|
|
|
node->state = INS_NODE_SET_IX_LOCK;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If this is the first time this node is executed (or when
|
|
|
|
execution resumes after wait for the table IX lock), set an
|
|
|
|
IX lock on the table and reset the possible select node. MySQL's
|
|
|
|
partitioned table code may also call an insert within the same
|
|
|
|
SQL statement AFTER it has used this table handle to do a search.
|
|
|
|
This happens, for example, when a row update moves it to another
|
|
|
|
partition. In that case, we have already set the IX lock on the
|
|
|
|
table during the search operation, and there is no need to set
|
|
|
|
it again here. But we must write trx->id to node->trx_id_buf. */
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
memset(node->trx_id_buf, 0, DATA_TRX_ID_LEN);
|
2014-02-26 19:11:54 +01:00
|
|
|
trx_write_trx_id(node->trx_id_buf, trx->id);
|
|
|
|
|
|
|
|
if (node->state == INS_NODE_SET_IX_LOCK) {
|
|
|
|
|
|
|
|
node->state = INS_NODE_ALLOC_ROW_ID;
|
|
|
|
|
|
|
|
/* It may be that the current session has not yet started
|
|
|
|
its transaction, or it has been committed: */
|
|
|
|
|
|
|
|
if (trx->id == node->trx_id) {
|
|
|
|
/* No need to do IX-locking */
|
|
|
|
|
|
|
|
goto same_trx;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = lock_table(0, node->table, LOCK_IX, thr);
|
|
|
|
|
|
|
|
DBUG_EXECUTE_IF("ib_row_ins_ix_lock_wait",
|
|
|
|
err = DB_LOCK_WAIT;);
|
|
|
|
|
|
|
|
if (err != DB_SUCCESS) {
|
|
|
|
|
|
|
|
goto error_handling;
|
|
|
|
}
|
|
|
|
|
|
|
|
node->trx_id = trx->id;
|
|
|
|
same_trx:
|
|
|
|
if (node->ins_type == INS_SEARCHED) {
|
|
|
|
/* Reset the cursor */
|
|
|
|
sel_node->state = SEL_NODE_OPEN;
|
|
|
|
|
|
|
|
/* Fetch a row to insert */
|
|
|
|
|
|
|
|
thr->run_node = sel_node;
|
|
|
|
|
|
|
|
return(thr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((node->ins_type == INS_SEARCHED)
|
|
|
|
&& (sel_node->state != SEL_NODE_FETCH)) {
|
|
|
|
|
|
|
|
ut_ad(sel_node->state == SEL_NODE_NO_MORE_ROWS);
|
|
|
|
|
|
|
|
/* No more rows to insert */
|
|
|
|
thr->run_node = parent;
|
|
|
|
|
|
|
|
return(thr);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* DO THE CHECKS OF THE CONSISTENCY CONSTRAINTS HERE */
|
|
|
|
|
|
|
|
err = row_ins(node, thr);
|
|
|
|
|
|
|
|
error_handling:
|
|
|
|
trx->error_state = err;
|
|
|
|
|
|
|
|
if (err != DB_SUCCESS) {
|
|
|
|
/* err == DB_LOCK_WAIT or SQL error detected */
|
|
|
|
return(NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* DO THE TRIGGER ACTIONS HERE */
|
|
|
|
|
|
|
|
if (node->ins_type == INS_SEARCHED) {
|
|
|
|
/* Fetch a row to insert */
|
|
|
|
|
|
|
|
thr->run_node = sel_node;
|
|
|
|
} else {
|
|
|
|
thr->run_node = que_node_get_parent(node);
|
|
|
|
}
|
|
|
|
|
|
|
|
return(thr);
|
|
|
|
}
|