2014-02-26 19:11:54 +01:00
|
|
|
/*****************************************************************************
|
|
|
|
|
2016-06-21 14:21:03 +02:00
|
|
|
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
|
2020-01-17 11:11:19 +02:00
|
|
|
Copyright (c) 2016, 2020, MariaDB Corporation.
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
This program is free software; you can redistribute it and/or modify it under
|
|
|
|
the terms of the GNU General Public License as published by the Free Software
|
|
|
|
Foundation; version 2 of the License.
|
|
|
|
|
|
|
|
This program is distributed in the hope that it will be useful, but WITHOUT
|
|
|
|
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
|
|
|
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License along with
|
|
|
|
this program; if not, write to the Free Software Foundation, Inc.,
|
2019-05-11 19:25:02 +03:00
|
|
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
*****************************************************************************/
|
|
|
|
|
|
|
|
/**************************************************//**
|
|
|
|
@file include/row0row.h
|
|
|
|
General row routines
|
|
|
|
|
|
|
|
Created 4/20/1996 Heikki Tuuri
|
|
|
|
*******************************************************/
|
|
|
|
|
|
|
|
#ifndef row0row_h
|
|
|
|
#define row0row_h
|
|
|
|
|
2018-11-29 12:53:44 +02:00
|
|
|
#include "que0types.h"
|
MDEV-13637 InnoDB change buffer housekeeping can cause redo log overrun and possibly deadlocks
The function ibuf_remove_free_page() may be called while the caller
is holding several mutexes or rw-locks. Because of this, this
housekeeping loop may cause performance glitches for operations that
involve tables that are stored in the InnoDB system tablespace.
Also deadlocks might be possible.
The worst impact of all is that due to the mutexes being held, calls to
log_free_check() had to be skipped during this housekeeping.
This means that the cyclic InnoDB redo log may be overwritten.
If the system crashes during this, it would be unable to recover.
The entry point to the problematic code is ibuf_free_excess_pages().
It would make sense to call it before acquiring any mutexes or rw-locks,
in any 'pessimistic' operation that involves the system tablespace.
fseg_create_general(), fseg_alloc_free_page_general(): Do not call
ibuf_free_excess_pages() while potentially holding some latches.
ibuf_remove_free_page(): Do call log_free_check(), like every operation
that is about to generate redo log should do.
ibuf_free_excess_pages(): Remove some assertions that are replaced
by stricter assertions in the log_free_check() that is now called by
ibuf_remove_free_page().
row_mtr_start(): New function, to perform necessary preparations when
starting a mini-transaction for row operations. For pessimistic operations
on secondary indexes that are located in the system tablespace,
this includes calling ibuf_free_excess_pages().
row_undo_ins_remove_sec_low(), row_undo_mod_del_mark_or_remove_sec_low(),
row_undo_mod_del_unmark_sec_and_undo_update(): Call row_mtr_start().
row_ins_sec_index_entry(): Call ibuf_free_excess_pages() if the operation
may involve allocating pages and change buffering in the system tablespace.
row_upd_sec_index_entry(): Slightly refactor the code. The
delete-marking of the old entry is done in-place. It could be
change-buffered, but the old code should be unlikely to have
invoked ibuf_free_excess_pages() in this case.
2017-08-28 08:57:51 +03:00
|
|
|
#include "ibuf0ibuf.h"
|
2014-02-26 19:11:54 +01:00
|
|
|
#include "trx0types.h"
|
|
|
|
#include "mtr0mtr.h"
|
|
|
|
#include "rem0types.h"
|
|
|
|
#include "row0types.h"
|
|
|
|
#include "btr0types.h"
|
|
|
|
|
|
|
|
/*********************************************************************//**
|
|
|
|
Gets the offset of the DB_TRX_ID field, in bytes relative to the origin of
|
|
|
|
a clustered index record.
|
2016-08-12 11:17:45 +03:00
|
|
|
@return offset of DATA_TRX_ID */
|
2014-02-26 19:11:54 +01:00
|
|
|
UNIV_INLINE
|
|
|
|
ulint
|
|
|
|
row_get_trx_id_offset(
|
|
|
|
/*==================*/
|
|
|
|
const dict_index_t* index, /*!< in: clustered index */
|
2020-04-28 10:46:51 +10:00
|
|
|
const rec_offs* offsets)/*!< in: record offsets */
|
2016-06-21 14:21:03 +02:00
|
|
|
MY_ATTRIBUTE((nonnull, warn_unused_result));
|
2014-02-26 19:11:54 +01:00
|
|
|
/*********************************************************************//**
|
|
|
|
Reads the trx id field from a clustered index record.
|
2016-08-12 11:17:45 +03:00
|
|
|
@return value of the field */
|
2014-02-26 19:11:54 +01:00
|
|
|
UNIV_INLINE
|
|
|
|
trx_id_t
|
|
|
|
row_get_rec_trx_id(
|
|
|
|
/*===============*/
|
|
|
|
const rec_t* rec, /*!< in: record */
|
|
|
|
const dict_index_t* index, /*!< in: clustered index */
|
2020-04-28 10:46:51 +10:00
|
|
|
const rec_offs* offsets)/*!< in: rec_get_offsets(rec, index) */
|
2016-06-21 14:21:03 +02:00
|
|
|
MY_ATTRIBUTE((nonnull, warn_unused_result));
|
2014-02-26 19:11:54 +01:00
|
|
|
/*********************************************************************//**
|
|
|
|
Reads the roll pointer field from a clustered index record.
|
2016-08-12 11:17:45 +03:00
|
|
|
@return value of the field */
|
2014-02-26 19:11:54 +01:00
|
|
|
UNIV_INLINE
|
|
|
|
roll_ptr_t
|
|
|
|
row_get_rec_roll_ptr(
|
|
|
|
/*=================*/
|
|
|
|
const rec_t* rec, /*!< in: record */
|
|
|
|
const dict_index_t* index, /*!< in: clustered index */
|
2020-04-28 10:46:51 +10:00
|
|
|
const rec_offs* offsets)/*!< in: rec_get_offsets(rec, index) */
|
2016-06-21 14:21:03 +02:00
|
|
|
MY_ATTRIBUTE((nonnull, warn_unused_result));
|
2016-08-12 11:17:45 +03:00
|
|
|
|
|
|
|
/* Flags for row build type. */
|
|
|
|
#define ROW_BUILD_NORMAL 0 /*!< build index row */
|
|
|
|
#define ROW_BUILD_FOR_PURGE 1 /*!< build row for purge. */
|
|
|
|
#define ROW_BUILD_FOR_UNDO 2 /*!< build row for undo. */
|
|
|
|
#define ROW_BUILD_FOR_INSERT 3 /*!< build row for insert. */
|
2014-02-26 19:11:54 +01:00
|
|
|
/*****************************************************************//**
|
|
|
|
When an insert or purge to a table is performed, this function builds
|
|
|
|
the entry to be inserted into or purged from an index on the table.
|
|
|
|
@return index entry which should be inserted or purged
|
|
|
|
@retval NULL if the externally stored columns in the clustered index record
|
|
|
|
are unavailable and ext != NULL, or row is missing some needed columns. */
|
|
|
|
dtuple_t*
|
|
|
|
row_build_index_entry_low(
|
|
|
|
/*======================*/
|
|
|
|
const dtuple_t* row, /*!< in: row which should be
|
|
|
|
inserted or purged */
|
|
|
|
const row_ext_t* ext, /*!< in: externally stored column
|
|
|
|
prefixes, or NULL */
|
2018-10-01 09:30:33 +03:00
|
|
|
const dict_index_t* index, /*!< in: index on the table */
|
|
|
|
mem_heap_t* heap, /*!< in,out: memory heap from which
|
2014-02-26 19:11:54 +01:00
|
|
|
the memory for the index entry
|
|
|
|
is allocated */
|
2016-08-12 11:17:45 +03:00
|
|
|
ulint flag) /*!< in: ROW_BUILD_NORMAL,
|
|
|
|
ROW_BUILD_FOR_PURGE
|
|
|
|
or ROW_BUILD_FOR_UNDO */
|
2016-06-21 14:21:03 +02:00
|
|
|
MY_ATTRIBUTE((warn_unused_result, nonnull(1,3,4)));
|
2014-02-26 19:11:54 +01:00
|
|
|
/*****************************************************************//**
|
|
|
|
When an insert or purge to a table is performed, this function builds
|
|
|
|
the entry to be inserted into or purged from an index on the table.
|
|
|
|
@return index entry which should be inserted or purged, or NULL if the
|
|
|
|
externally stored columns in the clustered index record are
|
|
|
|
unavailable and ext != NULL */
|
|
|
|
UNIV_INLINE
|
|
|
|
dtuple_t*
|
|
|
|
row_build_index_entry(
|
|
|
|
/*==================*/
|
|
|
|
const dtuple_t* row, /*!< in: row which should be
|
|
|
|
inserted or purged */
|
|
|
|
const row_ext_t* ext, /*!< in: externally stored column
|
|
|
|
prefixes, or NULL */
|
2018-10-01 09:30:33 +03:00
|
|
|
const dict_index_t* index, /*!< in: index on the table */
|
|
|
|
mem_heap_t* heap) /*!< in,out: memory heap from which
|
2014-02-26 19:11:54 +01:00
|
|
|
the memory for the index entry
|
|
|
|
is allocated */
|
2016-06-21 14:21:03 +02:00
|
|
|
MY_ATTRIBUTE((warn_unused_result, nonnull(1,3,4)));
|
2014-02-26 19:11:54 +01:00
|
|
|
/*******************************************************************//**
|
|
|
|
An inverse function to row_build_index_entry. Builds a row from a
|
|
|
|
record in a clustered index.
|
2016-08-12 11:17:45 +03:00
|
|
|
@return own: row built; see the NOTE below! */
|
2014-02-26 19:11:54 +01:00
|
|
|
dtuple_t*
|
|
|
|
row_build(
|
|
|
|
/*======*/
|
|
|
|
ulint type, /*!< in: ROW_COPY_POINTERS or
|
|
|
|
ROW_COPY_DATA; the latter
|
|
|
|
copies also the data fields to
|
|
|
|
heap while the first only
|
|
|
|
places pointers to data fields
|
|
|
|
on the index page, and thus is
|
|
|
|
more efficient */
|
|
|
|
const dict_index_t* index, /*!< in: clustered index */
|
|
|
|
const rec_t* rec, /*!< in: record in the clustered
|
|
|
|
index; NOTE: in the case
|
|
|
|
ROW_COPY_POINTERS the data
|
|
|
|
fields in the row will point
|
|
|
|
directly into this record,
|
|
|
|
therefore, the buffer page of
|
|
|
|
this record must be at least
|
|
|
|
s-latched and the latch held
|
|
|
|
as long as the row dtuple is used! */
|
2020-04-28 10:46:51 +10:00
|
|
|
const rec_offs* offsets,/*!< in: rec_get_offsets(rec,index)
|
2014-02-26 19:11:54 +01:00
|
|
|
or NULL, in which case this function
|
|
|
|
will invoke rec_get_offsets() */
|
|
|
|
const dict_table_t* col_table,
|
|
|
|
/*!< in: table, to check which
|
|
|
|
externally stored columns
|
|
|
|
occur in the ordering columns
|
|
|
|
of an index, or NULL if
|
|
|
|
index->table should be
|
|
|
|
consulted instead; the user
|
|
|
|
columns in this table should be
|
|
|
|
the same columns as in index->table */
|
2018-04-24 13:15:35 +05:30
|
|
|
const dtuple_t* defaults,
|
2014-02-26 19:11:54 +01:00
|
|
|
/*!< in: default values of
|
2018-04-24 13:15:35 +05:30
|
|
|
added, changed columns, or NULL */
|
2014-02-26 19:11:54 +01:00
|
|
|
const ulint* col_map,/*!< in: mapping of old column
|
|
|
|
numbers to new ones, or NULL */
|
|
|
|
row_ext_t** ext, /*!< out, own: cache of
|
|
|
|
externally stored column
|
|
|
|
prefixes, or NULL */
|
2016-09-06 09:43:16 +03:00
|
|
|
mem_heap_t* heap); /*!< in: memory heap from which
|
2014-02-26 19:11:54 +01:00
|
|
|
the memory needed is allocated */
|
2016-09-06 09:43:16 +03:00
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
/** An inverse function to row_build_index_entry. Builds a row from a
|
|
|
|
record in a clustered index, with possible indexing on ongoing
|
|
|
|
addition of new virtual columns.
|
|
|
|
@param[in] type ROW_COPY_POINTERS or ROW_COPY_DATA;
|
|
|
|
@param[in] index clustered index
|
|
|
|
@param[in] rec record in the clustered index
|
|
|
|
@param[in] offsets rec_get_offsets(rec,index) or NULL
|
|
|
|
@param[in] col_table table, to check which
|
|
|
|
externally stored columns
|
|
|
|
occur in the ordering columns
|
|
|
|
of an index, or NULL if
|
|
|
|
index->table should be
|
|
|
|
consulted instead
|
2018-04-24 13:15:35 +05:30
|
|
|
@param[in] defaults default values of added, changed columns, or NULL
|
2016-08-12 11:17:45 +03:00
|
|
|
@param[in] add_v new virtual columns added
|
|
|
|
along with new indexes
|
|
|
|
@param[in] col_map mapping of old column
|
|
|
|
numbers to new ones, or NULL
|
|
|
|
@param[in] ext cache of externally stored column
|
|
|
|
prefixes, or NULL
|
|
|
|
@param[in] heap memory heap from which
|
|
|
|
the memory needed is allocated
|
|
|
|
@return own: row built */
|
|
|
|
dtuple_t*
|
|
|
|
row_build_w_add_vcol(
|
|
|
|
ulint type,
|
|
|
|
const dict_index_t* index,
|
|
|
|
const rec_t* rec,
|
2020-04-28 10:46:51 +10:00
|
|
|
const rec_offs* offsets,
|
2016-08-12 11:17:45 +03:00
|
|
|
const dict_table_t* col_table,
|
2018-04-24 13:15:35 +05:30
|
|
|
const dtuple_t* defaults,
|
2016-08-12 11:17:45 +03:00
|
|
|
const dict_add_v_col_t* add_v,
|
|
|
|
const ulint* col_map,
|
|
|
|
row_ext_t** ext,
|
|
|
|
mem_heap_t* heap);
|
|
|
|
|
2014-02-26 19:11:54 +01:00
|
|
|
/*******************************************************************//**
|
|
|
|
Converts an index record to a typed data tuple.
|
|
|
|
@return index entry built; does not set info_bits, and the data fields
|
|
|
|
in the entry will point directly to rec */
|
|
|
|
dtuple_t*
|
|
|
|
row_rec_to_index_entry_low(
|
|
|
|
/*=======================*/
|
|
|
|
const rec_t* rec, /*!< in: record in the index */
|
|
|
|
const dict_index_t* index, /*!< in: index */
|
2020-04-28 10:46:51 +10:00
|
|
|
const rec_offs* offsets,/*!< in: rec_get_offsets(rec, index) */
|
2014-02-26 19:11:54 +01:00
|
|
|
mem_heap_t* heap) /*!< in: memory heap from which
|
|
|
|
the memory needed is allocated */
|
2016-09-06 09:43:16 +03:00
|
|
|
MY_ATTRIBUTE((warn_unused_result));
|
2014-02-26 19:11:54 +01:00
|
|
|
/*******************************************************************//**
|
|
|
|
Converts an index record to a typed data tuple. NOTE that externally
|
|
|
|
stored (often big) fields are NOT copied to heap.
|
2016-08-12 11:17:45 +03:00
|
|
|
@return own: index entry built */
|
2014-02-26 19:11:54 +01:00
|
|
|
dtuple_t*
|
|
|
|
row_rec_to_index_entry(
|
|
|
|
/*===================*/
|
|
|
|
const rec_t* rec, /*!< in: record in the index */
|
|
|
|
const dict_index_t* index, /*!< in: index */
|
2020-04-28 10:46:51 +10:00
|
|
|
const rec_offs* offsets,/*!< in/out: rec_get_offsets(rec) */
|
2014-02-26 19:11:54 +01:00
|
|
|
mem_heap_t* heap) /*!< in: memory heap from which
|
|
|
|
the memory needed is allocated */
|
2016-09-06 09:43:16 +03:00
|
|
|
MY_ATTRIBUTE((warn_unused_result));
|
2014-02-26 19:11:54 +01:00
|
|
|
/*******************************************************************//**
|
|
|
|
Builds from a secondary index record a row reference with which we can
|
|
|
|
search the clustered index record.
|
2016-08-12 11:17:45 +03:00
|
|
|
@return own: row reference built; see the NOTE below! */
|
2014-02-26 19:11:54 +01:00
|
|
|
dtuple_t*
|
|
|
|
row_build_row_ref(
|
|
|
|
/*==============*/
|
|
|
|
ulint type, /*!< in: ROW_COPY_DATA, or ROW_COPY_POINTERS:
|
|
|
|
the former copies also the data fields to
|
|
|
|
heap, whereas the latter only places pointers
|
|
|
|
to data fields on the index page */
|
|
|
|
dict_index_t* index, /*!< in: secondary index */
|
|
|
|
const rec_t* rec, /*!< in: record in the index;
|
|
|
|
NOTE: in the case ROW_COPY_POINTERS
|
|
|
|
the data fields in the row will point
|
|
|
|
directly into this record, therefore,
|
|
|
|
the buffer page of this record must be
|
|
|
|
at least s-latched and the latch held
|
|
|
|
as long as the row reference is used! */
|
|
|
|
mem_heap_t* heap) /*!< in: memory heap from which the memory
|
|
|
|
needed is allocated */
|
2016-09-06 09:43:16 +03:00
|
|
|
MY_ATTRIBUTE((warn_unused_result));
|
2014-02-26 19:11:54 +01:00
|
|
|
/*******************************************************************//**
|
|
|
|
Builds from a secondary index record a row reference with which we can
|
|
|
|
search the clustered index record. */
|
|
|
|
void
|
|
|
|
row_build_row_ref_in_tuple(
|
|
|
|
/*=======================*/
|
|
|
|
dtuple_t* ref, /*!< in/out: row reference built;
|
|
|
|
see the NOTE below! */
|
|
|
|
const rec_t* rec, /*!< in: record in the index;
|
|
|
|
NOTE: the data fields in ref
|
|
|
|
will point directly into this
|
|
|
|
record, therefore, the buffer
|
|
|
|
page of this record must be at
|
|
|
|
least s-latched and the latch
|
|
|
|
held as long as the row
|
|
|
|
reference is used! */
|
|
|
|
const dict_index_t* index, /*!< in: secondary index */
|
2020-05-04 16:47:11 +02:00
|
|
|
rec_offs* offsets)/*!< in: rec_get_offsets(rec, index)
|
2014-02-26 19:11:54 +01:00
|
|
|
or NULL */
|
2016-06-21 14:21:03 +02:00
|
|
|
MY_ATTRIBUTE((nonnull(1,2,3)));
|
2014-02-26 19:11:54 +01:00
|
|
|
/*******************************************************************//**
|
|
|
|
Builds from a secondary index record a row reference with which we can
|
|
|
|
search the clustered index record. */
|
|
|
|
UNIV_INLINE
|
|
|
|
void
|
|
|
|
row_build_row_ref_fast(
|
|
|
|
/*===================*/
|
|
|
|
dtuple_t* ref, /*!< in/out: typed data tuple where the
|
|
|
|
reference is built */
|
|
|
|
const ulint* map, /*!< in: array of field numbers in rec
|
|
|
|
telling how ref should be built from
|
|
|
|
the fields of rec */
|
2017-09-23 12:04:15 +03:00
|
|
|
const rec_t* rec, /*!< in: secondary index record;
|
|
|
|
must be preserved while ref is used, as we do
|
2014-02-26 19:11:54 +01:00
|
|
|
not copy field values to heap */
|
2020-04-28 10:46:51 +10:00
|
|
|
const rec_offs* offsets);/*!< in: array returned by rec_get_offsets() */
|
2014-02-26 19:11:54 +01:00
|
|
|
/***************************************************************//**
|
|
|
|
Searches the clustered index record for a row, if we have the row
|
|
|
|
reference.
|
2016-08-12 11:17:45 +03:00
|
|
|
@return TRUE if found */
|
2014-02-26 19:11:54 +01:00
|
|
|
ibool
|
|
|
|
row_search_on_row_ref(
|
|
|
|
/*==================*/
|
|
|
|
btr_pcur_t* pcur, /*!< out: persistent cursor, which must
|
|
|
|
be closed by the caller */
|
|
|
|
ulint mode, /*!< in: BTR_MODIFY_LEAF, ... */
|
|
|
|
const dict_table_t* table, /*!< in: table */
|
|
|
|
const dtuple_t* ref, /*!< in: row reference */
|
|
|
|
mtr_t* mtr) /*!< in/out: mtr */
|
2016-06-21 14:21:03 +02:00
|
|
|
MY_ATTRIBUTE((nonnull, warn_unused_result));
|
2014-02-26 19:11:54 +01:00
|
|
|
/*********************************************************************//**
|
|
|
|
Fetches the clustered index record for a secondary index record. The latches
|
|
|
|
on the secondary index record are preserved.
|
2016-08-12 11:17:45 +03:00
|
|
|
@return record or NULL, if no record found */
|
2014-02-26 19:11:54 +01:00
|
|
|
rec_t*
|
|
|
|
row_get_clust_rec(
|
|
|
|
/*==============*/
|
|
|
|
ulint mode, /*!< in: BTR_MODIFY_LEAF, ... */
|
|
|
|
const rec_t* rec, /*!< in: record in a secondary index */
|
|
|
|
dict_index_t* index, /*!< in: secondary index */
|
|
|
|
dict_index_t** clust_index,/*!< out: clustered index */
|
|
|
|
mtr_t* mtr) /*!< in: mtr */
|
2016-06-21 14:21:03 +02:00
|
|
|
MY_ATTRIBUTE((nonnull, warn_unused_result));
|
2014-02-26 19:11:54 +01:00
|
|
|
|
MDEV-6076 Persistent AUTO_INCREMENT for InnoDB
This should be functionally equivalent to WL#6204 in MySQL 8.0.0, with
the notable difference that the file format changes are limited to
repurposing a previously unused data field in B-tree pages.
For persistent InnoDB tables, write the last used AUTO_INCREMENT
value to the root page of the clustered index, in the previously
unused (0) PAGE_MAX_TRX_ID field, now aliased as PAGE_ROOT_AUTO_INC.
Unlike some other previously unused InnoDB data fields, this one was
actually always zero-initialized, at least since MySQL 3.23.49.
The writes to PAGE_ROOT_AUTO_INC are protected by SX or X latch on the
root page. The SX latch will allow concurrent read access to the root
page. (The field PAGE_ROOT_AUTO_INC will only be read on the
first-time call to ha_innobase::open() from the SQL layer. The
PAGE_ROOT_AUTO_INC can only be updated when executing SQL, so
read/write races are not possible.)
During INSERT, the PAGE_ROOT_AUTO_INC is updated by the low-level
function btr_cur_search_to_nth_level(), adding no extra page
access. [Adaptive hash index lookup will be disabled during INSERT.]
If some rare UPDATE modifies an AUTO_INCREMENT column, the
PAGE_ROOT_AUTO_INC will be adjusted in a separate mini-transaction in
ha_innobase::update_row().
When a page is reorganized, we have to preserve the PAGE_ROOT_AUTO_INC
field.
During ALTER TABLE, the initial AUTO_INCREMENT value will be copied
from the table. ALGORITHM=COPY and online log apply in LOCK=NONE will
update PAGE_ROOT_AUTO_INC in real time.
innodb_col_no(): Determine the dict_table_t::cols[] element index
corresponding to a Field of a non-virtual column.
(The MySQL 5.7 implementation of virtual columns breaks the 1:1
relationship between Field::field_index and dict_table_t::cols[].
Virtual columns are omitted from dict_table_t::cols[]. Therefore,
we must translate the field_index of AUTO_INCREMENT columns into
an index of dict_table_t::cols[].)
Upgrade from old data files:
By default, the AUTO_INCREMENT sequence in old data files would appear
to be reset, because PAGE_MAX_TRX_ID or PAGE_ROOT_AUTO_INC would contain
the value 0 in each clustered index page. In new data files,
PAGE_ROOT_AUTO_INC can only be 0 if the table is empty or does not contain
any AUTO_INCREMENT column.
For backward compatibility, we use the old method of
SELECT MAX(auto_increment_column) for initializing the sequence.
btr_read_autoinc(): Read the AUTO_INCREMENT sequence from a new-format
data file.
btr_read_autoinc_with_fallback(): A variant of btr_read_autoinc()
that will resort to reading MAX(auto_increment_column) for data files
that did not use AUTO_INCREMENT yet. It was manually tested that during
the execution of innodb.autoinc_persist the compatibility logic is
not activated (for new files, PAGE_ROOT_AUTO_INC is never 0 in nonempty
clustered index root pages).
initialize_auto_increment(): Replaces
ha_innobase::innobase_initialize_autoinc(). This initializes
the AUTO_INCREMENT metadata. Only called from ha_innobase::open().
ha_innobase::info_low(): Do not try to lazily initialize
dict_table_t::autoinc. It must already have been initialized by
ha_innobase::open() or ha_innobase::create().
Note: The adjustments to class ha_innopart were not tested, because
the source code (native InnoDB partitioning) is not being compiled.
2016-12-14 19:56:39 +02:00
|
|
|
/** Parse the integer data from specified data, which could be
|
|
|
|
DATA_INT, DATA_FLOAT or DATA_DOUBLE. If the value is less than 0
|
|
|
|
and the type is not unsigned then we reset the value to 0
|
|
|
|
@param[in] data data to read
|
|
|
|
@param[in] len length of data
|
|
|
|
@param[in] mtype mtype of data
|
|
|
|
@param[in] unsigned_type if the data is unsigned
|
|
|
|
@return the integer value from the data */
|
|
|
|
inline
|
|
|
|
ib_uint64_t
|
|
|
|
row_parse_int(
|
|
|
|
const byte* data,
|
|
|
|
ulint len,
|
|
|
|
ulint mtype,
|
|
|
|
bool unsigned_type);
|
|
|
|
|
2014-02-26 19:11:54 +01:00
|
|
|
/** Result of row_search_index_entry */
|
|
|
|
enum row_search_result {
|
|
|
|
ROW_FOUND = 0, /*!< the record was found */
|
|
|
|
ROW_NOT_FOUND, /*!< record not found */
|
|
|
|
ROW_BUFFERED, /*!< one of BTR_INSERT, BTR_DELETE, or
|
|
|
|
BTR_DELETE_MARK was specified, the
|
|
|
|
secondary index leaf page was not in
|
|
|
|
the buffer pool, and the operation was
|
|
|
|
enqueued in the insert/delete buffer */
|
|
|
|
ROW_NOT_DELETED_REF /*!< BTR_DELETE was specified, and
|
|
|
|
row_purge_poss_sec() failed */
|
|
|
|
};
|
|
|
|
|
|
|
|
/***************************************************************//**
|
|
|
|
Searches an index record.
|
2016-08-12 11:17:45 +03:00
|
|
|
@return whether the record was found or buffered */
|
2014-02-26 19:11:54 +01:00
|
|
|
enum row_search_result
|
|
|
|
row_search_index_entry(
|
|
|
|
/*===================*/
|
|
|
|
dict_index_t* index, /*!< in: index */
|
|
|
|
const dtuple_t* entry, /*!< in: index entry */
|
|
|
|
ulint mode, /*!< in: BTR_MODIFY_LEAF, ... */
|
|
|
|
btr_pcur_t* pcur, /*!< in/out: persistent cursor, which must
|
|
|
|
be closed by the caller */
|
|
|
|
mtr_t* mtr) /*!< in: mtr */
|
2016-06-21 14:21:03 +02:00
|
|
|
MY_ATTRIBUTE((nonnull, warn_unused_result));
|
2014-02-26 19:11:54 +01:00
|
|
|
|
|
|
|
#define ROW_COPY_DATA 1
|
|
|
|
#define ROW_COPY_POINTERS 2
|
|
|
|
|
|
|
|
/* The allowed latching order of index records is the following:
|
|
|
|
(1) a secondary index record ->
|
|
|
|
(2) the clustered index record ->
|
|
|
|
(3) rollback segment data for the clustered index record. */
|
|
|
|
|
|
|
|
/*******************************************************************//**
|
|
|
|
Formats the raw data in "data" (in InnoDB on-disk format) using
|
|
|
|
"dict_field" and writes the result to "buf".
|
|
|
|
Not more than "buf_size" bytes are written to "buf".
|
|
|
|
The result is always NUL-terminated (provided buf_size is positive) and the
|
|
|
|
number of bytes that were written to "buf" is returned (including the
|
|
|
|
terminating NUL).
|
2016-08-12 11:17:45 +03:00
|
|
|
@return number of bytes that were written */
|
2014-02-26 19:11:54 +01:00
|
|
|
ulint
|
|
|
|
row_raw_format(
|
|
|
|
/*===========*/
|
|
|
|
const char* data, /*!< in: raw data */
|
|
|
|
ulint data_len, /*!< in: raw data length
|
|
|
|
in bytes */
|
|
|
|
const dict_field_t* dict_field, /*!< in: index field */
|
|
|
|
char* buf, /*!< out: output buffer */
|
|
|
|
ulint buf_size) /*!< in: output buffer size
|
|
|
|
in bytes */
|
2016-06-21 14:21:03 +02:00
|
|
|
MY_ATTRIBUTE((nonnull, warn_unused_result));
|
2014-02-26 19:11:54 +01:00
|
|
|
|
MDEV-13637 InnoDB change buffer housekeeping can cause redo log overrun and possibly deadlocks
The function ibuf_remove_free_page() may be called while the caller
is holding several mutexes or rw-locks. Because of this, this
housekeeping loop may cause performance glitches for operations that
involve tables that are stored in the InnoDB system tablespace.
Also deadlocks might be possible.
The worst impact of all is that due to the mutexes being held, calls to
log_free_check() had to be skipped during this housekeeping.
This means that the cyclic InnoDB redo log may be overwritten.
If the system crashes during this, it would be unable to recover.
The entry point to the problematic code is ibuf_free_excess_pages().
It would make sense to call it before acquiring any mutexes or rw-locks,
in any 'pessimistic' operation that involves the system tablespace.
fseg_create_general(), fseg_alloc_free_page_general(): Do not call
ibuf_free_excess_pages() while potentially holding some latches.
ibuf_remove_free_page(): Do call log_free_check(), like every operation
that is about to generate redo log should do.
ibuf_free_excess_pages(): Remove some assertions that are replaced
by stricter assertions in the log_free_check() that is now called by
ibuf_remove_free_page().
row_mtr_start(): New function, to perform necessary preparations when
starting a mini-transaction for row operations. For pessimistic operations
on secondary indexes that are located in the system tablespace,
this includes calling ibuf_free_excess_pages().
row_undo_ins_remove_sec_low(), row_undo_mod_del_mark_or_remove_sec_low(),
row_undo_mod_del_unmark_sec_and_undo_update(): Call row_mtr_start().
row_ins_sec_index_entry(): Call ibuf_free_excess_pages() if the operation
may involve allocating pages and change buffering in the system tablespace.
row_upd_sec_index_entry(): Slightly refactor the code. The
delete-marking of the old entry is done in-place. It could be
change-buffered, but the old code should be unlikely to have
invoked ibuf_free_excess_pages() in this case.
2017-08-28 08:57:51 +03:00
|
|
|
/** Prepare to start a mini-transaction to modify an index.
|
|
|
|
@param[in,out] mtr mini-transaction
|
|
|
|
@param[in,out] index possibly secondary index
|
|
|
|
@param[in] pessimistic whether this is a pessimistic operation */
|
|
|
|
inline
|
|
|
|
void
|
|
|
|
row_mtr_start(mtr_t* mtr, dict_index_t* index, bool pessimistic)
|
|
|
|
{
|
|
|
|
mtr->start();
|
|
|
|
|
2018-11-22 17:07:35 +02:00
|
|
|
switch (index->table->space_id) {
|
MDEV-13637 InnoDB change buffer housekeeping can cause redo log overrun and possibly deadlocks
The function ibuf_remove_free_page() may be called while the caller
is holding several mutexes or rw-locks. Because of this, this
housekeeping loop may cause performance glitches for operations that
involve tables that are stored in the InnoDB system tablespace.
Also deadlocks might be possible.
The worst impact of all is that due to the mutexes being held, calls to
log_free_check() had to be skipped during this housekeeping.
This means that the cyclic InnoDB redo log may be overwritten.
If the system crashes during this, it would be unable to recover.
The entry point to the problematic code is ibuf_free_excess_pages().
It would make sense to call it before acquiring any mutexes or rw-locks,
in any 'pessimistic' operation that involves the system tablespace.
fseg_create_general(), fseg_alloc_free_page_general(): Do not call
ibuf_free_excess_pages() while potentially holding some latches.
ibuf_remove_free_page(): Do call log_free_check(), like every operation
that is about to generate redo log should do.
ibuf_free_excess_pages(): Remove some assertions that are replaced
by stricter assertions in the log_free_check() that is now called by
ibuf_remove_free_page().
row_mtr_start(): New function, to perform necessary preparations when
starting a mini-transaction for row operations. For pessimistic operations
on secondary indexes that are located in the system tablespace,
this includes calling ibuf_free_excess_pages().
row_undo_ins_remove_sec_low(), row_undo_mod_del_mark_or_remove_sec_low(),
row_undo_mod_del_unmark_sec_and_undo_update(): Call row_mtr_start().
row_ins_sec_index_entry(): Call ibuf_free_excess_pages() if the operation
may involve allocating pages and change buffering in the system tablespace.
row_upd_sec_index_entry(): Slightly refactor the code. The
delete-marking of the old entry is done in-place. It could be
change-buffered, but the old code should be unlikely to have
invoked ibuf_free_excess_pages() in this case.
2017-08-28 08:57:51 +03:00
|
|
|
case IBUF_SPACE_ID:
|
|
|
|
if (pessimistic
|
|
|
|
&& !(index->type & (DICT_UNIQUE | DICT_SPATIAL))) {
|
|
|
|
ibuf_free_excess_pages();
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case SRV_TMP_SPACE_ID:
|
|
|
|
mtr->set_log_mode(MTR_LOG_NO_REDO);
|
|
|
|
break;
|
|
|
|
default:
|
2018-03-22 15:30:54 +02:00
|
|
|
index->set_modified(*mtr);
|
MDEV-13637 InnoDB change buffer housekeeping can cause redo log overrun and possibly deadlocks
The function ibuf_remove_free_page() may be called while the caller
is holding several mutexes or rw-locks. Because of this, this
housekeeping loop may cause performance glitches for operations that
involve tables that are stored in the InnoDB system tablespace.
Also deadlocks might be possible.
The worst impact of all is that due to the mutexes being held, calls to
log_free_check() had to be skipped during this housekeeping.
This means that the cyclic InnoDB redo log may be overwritten.
If the system crashes during this, it would be unable to recover.
The entry point to the problematic code is ibuf_free_excess_pages().
It would make sense to call it before acquiring any mutexes or rw-locks,
in any 'pessimistic' operation that involves the system tablespace.
fseg_create_general(), fseg_alloc_free_page_general(): Do not call
ibuf_free_excess_pages() while potentially holding some latches.
ibuf_remove_free_page(): Do call log_free_check(), like every operation
that is about to generate redo log should do.
ibuf_free_excess_pages(): Remove some assertions that are replaced
by stricter assertions in the log_free_check() that is now called by
ibuf_remove_free_page().
row_mtr_start(): New function, to perform necessary preparations when
starting a mini-transaction for row operations. For pessimistic operations
on secondary indexes that are located in the system tablespace,
this includes calling ibuf_free_excess_pages().
row_undo_ins_remove_sec_low(), row_undo_mod_del_mark_or_remove_sec_low(),
row_undo_mod_del_unmark_sec_and_undo_update(): Call row_mtr_start().
row_ins_sec_index_entry(): Call ibuf_free_excess_pages() if the operation
may involve allocating pages and change buffering in the system tablespace.
row_upd_sec_index_entry(): Slightly refactor the code. The
delete-marking of the old entry is done in-place. It could be
change-buffered, but the old code should be unlikely to have
invoked ibuf_free_excess_pages() in this case.
2017-08-28 08:57:51 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
log_free_check();
|
|
|
|
}
|
|
|
|
|
2014-02-26 19:11:54 +01:00
|
|
|
#include "row0row.ic"
|
|
|
|
|
|
|
|
#endif
|