mirror of
https://github.com/MariaDB/server.git
synced 2025-01-31 02:51:44 +01:00
0b47c126e3
The approach to handling corruption that was chosen by Oracle in
commit 177d8b0c12
is not really useful. Not only did it actually fail to prevent InnoDB
from crashing, but it is making things worse by blocking attempts to
rescue data from or rebuild a partially readable table.
We will try to prevent crashes in a different way: by propagating
errors up the call stack. We will never mark the clustered index
persistently corrupted, so that data recovery may be attempted by
reading from the table, or by rebuilding the table.
This should also fix MDEV-13680 (crash on btr_page_alloc() failure);
it was extensively tested with innodb_file_per_table=0 and a
non-autoextend system tablespace.
We should now avoid crashes in many cases, such as when a page
cannot be read or allocated, or an inconsistency is detected when
attempting to update multiple pages. We will not crash on double-free,
such as on the recovery of DDL in system tablespace in case something
was corrupted.
Crashes on corrupted data are still possible. The fault injection mechanism
that is introduced in the subsequent commit may help catch more of them.
buf_page_import_corrupt_failure: Remove the fault injection, and instead
corrupt some pages using Perl code in the tests.
btr_cur_pessimistic_insert(): Always reserve extents (except for the
change buffer), in order to prevent a subsequent allocation failure.
btr_pcur_open_at_rnd_pos(): Merged to the only caller ibuf_merge_pages().
btr_assert_not_corrupted(), btr_corruption_report(): Remove.
Similar checks are already part of btr_block_get().
FSEG_MAGIC_N_BYTES: Replaces FSEG_MAGIC_N_VALUE.
dict_hdr_get(), trx_rsegf_get_new(), trx_undo_page_get(),
trx_undo_page_get_s_latched(): Replaced with error-checking calls.
trx_rseg_t::get(mtr_t*): Replaces trx_rsegf_get().
trx_rseg_header_create(): Let the caller update the TRX_SYS page if needed.
trx_sys_create_sys_pages(): Merged with trx_sysf_create().
dict_check_tablespaces_and_store_max_id(): Do not access
DICT_HDR_MAX_SPACE_ID, because it was already recovered in dict_boot().
Merge dict_check_sys_tables() with this function.
dir_pathname(): Replaces os_file_make_new_pathname().
row_undo_ins_remove_sec(): Do not modify the undo page by adding
a terminating NUL byte to the record.
btr_decryption_failed(): Report decryption failures
dict_set_corrupted_by_space(), dict_set_encrypted_by_space(),
dict_set_corrupted_index_cache_only(): Remove.
dict_set_corrupted(): Remove the constant parameter dict_locked=false.
Never flag the clustered index corrupted in SYS_INDEXES, because
that would deny further access to the table. It might be possible to
repair the table by executing ALTER TABLE or OPTIMIZE TABLE, in case
no B-tree leaf page is corrupted.
dict_table_skip_corrupt_index(), dict_table_next_uncorrupted_index(),
row_purge_skip_uncommitted_virtual_index(): Remove, and refactor
the callers to read dict_index_t::type only once.
dict_table_is_corrupted(): Remove.
dict_index_t::is_btree(): Determine if the index is a valid B-tree.
BUF_GET_NO_LATCH, BUF_EVICT_IF_IN_POOL: Remove.
UNIV_BTR_DEBUG: Remove. Any inconsistency will no longer trigger
assertion failures, but error codes being returned.
buf_corrupt_page_release(): Replaced with a direct call to
buf_pool.corrupted_evict().
fil_invalid_page_access_msg(): Never crash on an invalid read;
let the caller of buf_page_get_gen() decide.
btr_pcur_t::restore_position(): Propagate failure status to the caller
by returning CORRUPTED.
opt_search_plan_for_table(): Simplify the code.
row_purge_del_mark(), row_purge_upd_exist_or_extern_func(),
row_undo_ins_remove_sec_rec(), row_undo_mod_upd_del_sec(),
row_undo_mod_del_mark_sec(): Avoid mem_heap_create()/mem_heap_free()
when no secondary indexes exist.
row_undo_mod_upd_exist_sec(): Simplify the code.
row_upd_clust_step(), dict_load_table_one(): Return DB_TABLE_CORRUPT
if the clustered index (and therefore the table) is corrupted, similar
to what we do in row_insert_for_mysql().
fut_get_ptr(): Replace with buf_page_get_gen() calls.
buf_page_get_gen(): Return nullptr and *err=DB_CORRUPTION
if the page is marked as freed. For other modes than
BUF_GET_POSSIBLY_FREED or BUF_PEEK_IF_IN_POOL this will
trigger a debug assertion failure. For BUF_GET_POSSIBLY_FREED,
we will return nullptr for freed pages, so that the callers
can be simplified. The purge of transaction history will be
a new user of BUF_GET_POSSIBLY_FREED, to avoid crashes on
corrupted data.
buf_page_get_low(): Never crash on a corrupted page, but simply
return nullptr.
fseg_page_is_allocated(): Replaces fseg_page_is_free().
fts_drop_common_tables(): Return an error if the transaction
was rolled back.
fil_space_t::set_corrupted(): Report a tablespace as corrupted if
it was not reported already.
fil_space_t::io(): Invoke fil_space_t::set_corrupted() to report
out-of-bounds page access or other errors.
Clean up mtr_t::page_lock()
buf_page_get_low(): Validate the page identifier (to check for
recently read corrupted pages) after acquiring the page latch.
buf_page_t::read_complete(): Flag uninitialized (all-zero) pages
with DB_FAIL. Return DB_PAGE_CORRUPTED on page number mismatch.
mtr_t::defer_drop_ahi(): Renamed from mtr_defer_drop_ahi().
recv_sys_t::free_corrupted_page(): Only set_corrupt_fs()
if any log records exist for the page. We do not mind if read-ahead
produces corrupted (or all-zero) pages that were not actually needed
during recovery.
recv_recover_page(): Return whether the operation succeeded.
recv_sys_t::recover_low(): Simplify the logic. Check for recovery error.
Thanks to Matthias Leich for testing this extensively and to the
authors of https://rr-project.org for making it easy to diagnose
and fix any failures that were found during the testing.
416 lines
17 KiB
C++
416 lines
17 KiB
C++
/*****************************************************************************
|
|
|
|
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
|
|
Copyright (c) 2019, 2022, MariaDB Corporation.
|
|
|
|
This program is free software; you can redistribute it and/or modify it under
|
|
the terms of the GNU General Public License as published by the Free Software
|
|
Foundation; version 2 of the License.
|
|
|
|
This program is distributed in the hope that it will be useful, but WITHOUT
|
|
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
|
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
|
|
|
|
You should have received a copy of the GNU General Public License along with
|
|
this program; if not, write to the Free Software Foundation, Inc.,
|
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
|
|
|
|
*****************************************************************************/
|
|
|
|
/******************************************************************//**
|
|
@file fut/fut0lst.cc
|
|
File-based list utilities
|
|
|
|
Created 11/28/1995 Heikki Tuuri
|
|
***********************************************************************/
|
|
|
|
#include "fut0lst.h"
|
|
#include "buf0buf.h"
|
|
#include "page0page.h"
|
|
|
|
|
|
/** Write a file address.
|
|
@param[in] block file page
|
|
@param[in,out] faddr file address location
|
|
@param[in] page page number
|
|
@param[in] boffset byte offset
|
|
@param[in,out] mtr mini-transaction */
|
|
static void flst_write_addr(const buf_block_t& block, byte *faddr,
|
|
uint32_t page, uint16_t boffset, mtr_t* mtr)
|
|
{
|
|
ut_ad(mtr->memo_contains_page_flagged(faddr, MTR_MEMO_PAGE_X_FIX |
|
|
MTR_MEMO_PAGE_SX_FIX));
|
|
ut_a(page == FIL_NULL || boffset >= FIL_PAGE_DATA);
|
|
ut_a(ut_align_offset(faddr, srv_page_size) >= FIL_PAGE_DATA);
|
|
|
|
static_assert(FIL_ADDR_PAGE == 0, "compatibility");
|
|
static_assert(FIL_ADDR_BYTE == 4, "compatibility");
|
|
static_assert(FIL_ADDR_SIZE == 6, "compatibility");
|
|
|
|
const bool same_page= mach_read_from_4(faddr + FIL_ADDR_PAGE) == page;
|
|
const bool same_offset= mach_read_from_2(faddr + FIL_ADDR_BYTE) == boffset;
|
|
if (same_page)
|
|
{
|
|
if (!same_offset)
|
|
mtr->write<2>(block, faddr + FIL_ADDR_BYTE, boffset);
|
|
return;
|
|
}
|
|
if (same_offset)
|
|
mtr->write<4>(block, faddr + FIL_ADDR_PAGE, page);
|
|
else
|
|
{
|
|
alignas(4) byte fil_addr[6];
|
|
mach_write_to_4(fil_addr + FIL_ADDR_PAGE, page);
|
|
mach_write_to_2(fil_addr + FIL_ADDR_BYTE, boffset);
|
|
mtr->memcpy(block, faddr + FIL_ADDR_PAGE, fil_addr, 6);
|
|
}
|
|
}
|
|
|
|
/** Write 2 null file addresses.
|
|
@param[in] b file page
|
|
@param[in,out] addr file address to be zeroed out
|
|
@param[in,out] mtr mini-transaction */
|
|
static void flst_zero_both(const buf_block_t& b, byte *addr, mtr_t *mtr)
|
|
{
|
|
if (mach_read_from_4(addr + FIL_ADDR_PAGE) != FIL_NULL)
|
|
mtr->memset(&b, ulint(addr - b.page.frame) + FIL_ADDR_PAGE, 4, 0xff);
|
|
mtr->write<2,mtr_t::MAYBE_NOP>(b, addr + FIL_ADDR_BYTE, 0U);
|
|
/* Initialize the other address by (MEMMOVE|0x80,offset,FIL_ADDR_SIZE,source)
|
|
which is 4 bytes, or less than FIL_ADDR_SIZE. */
|
|
memcpy(addr + FIL_ADDR_SIZE, addr, FIL_ADDR_SIZE);
|
|
const uint16_t boffset= page_offset(addr);
|
|
mtr->memmove(b, boffset + FIL_ADDR_SIZE, boffset, FIL_ADDR_SIZE);
|
|
}
|
|
|
|
/** Add a node to an empty list. */
|
|
static void flst_add_to_empty(buf_block_t *base, uint16_t boffset,
|
|
buf_block_t *add, uint16_t aoffset, mtr_t *mtr)
|
|
{
|
|
ut_ad(base != add || boffset != aoffset);
|
|
ut_ad(boffset < base->physical_size());
|
|
ut_ad(aoffset < add->physical_size());
|
|
ut_ad(mtr->memo_contains_flagged(base, MTR_MEMO_PAGE_X_FIX |
|
|
MTR_MEMO_PAGE_SX_FIX));
|
|
ut_ad(mtr->memo_contains_flagged(add, MTR_MEMO_PAGE_X_FIX |
|
|
MTR_MEMO_PAGE_SX_FIX));
|
|
|
|
ut_ad(!mach_read_from_4(base->page.frame + boffset + FLST_LEN));
|
|
mtr->write<1>(*base, base->page.frame + boffset + (FLST_LEN + 3), 1U);
|
|
/* Update first and last fields of base node */
|
|
flst_write_addr(*base, base->page.frame + boffset + FLST_FIRST,
|
|
add->page.id().page_no(), aoffset, mtr);
|
|
memcpy(base->page.frame + boffset + FLST_LAST,
|
|
base->page.frame + boffset + FLST_FIRST,
|
|
FIL_ADDR_SIZE);
|
|
/* Initialize FLST_LAST by (MEMMOVE|0x80,offset,FIL_ADDR_SIZE,source)
|
|
which is 4 bytes, or less than FIL_ADDR_SIZE. */
|
|
mtr->memmove(*base, boffset + FLST_LAST, boffset + FLST_FIRST,
|
|
FIL_ADDR_SIZE);
|
|
|
|
/* Set prev and next fields of node to add */
|
|
static_assert(FLST_NEXT == FLST_PREV + FIL_ADDR_SIZE, "compatibility");
|
|
flst_zero_both(*add, add->page.frame + aoffset + FLST_PREV, mtr);
|
|
}
|
|
|
|
/** Insert a node after another one.
|
|
@param[in,out] base base node block
|
|
@param[in] boffset byte offset of the base node
|
|
@param[in,out] cur insert position block
|
|
@param[in] coffset byte offset of the insert position
|
|
@param[in,out] add block to be added
|
|
@param[in] aoffset byte offset of the block to be added
|
|
@param[in,out] mtr mini-transaction */
|
|
static dberr_t flst_insert_after(buf_block_t *base, uint16_t boffset,
|
|
buf_block_t *cur, uint16_t coffset,
|
|
buf_block_t *add, uint16_t aoffset,
|
|
mtr_t *mtr)
|
|
{
|
|
ut_ad(base != cur || boffset != coffset);
|
|
ut_ad(base != add || boffset != aoffset);
|
|
ut_ad(cur != add || coffset != aoffset);
|
|
ut_ad(boffset < base->physical_size());
|
|
ut_ad(coffset < cur->physical_size());
|
|
ut_ad(aoffset < add->physical_size());
|
|
ut_ad(mtr->memo_contains_flagged(base, MTR_MEMO_PAGE_X_FIX |
|
|
MTR_MEMO_PAGE_SX_FIX));
|
|
ut_ad(mtr->memo_contains_flagged(cur, MTR_MEMO_PAGE_X_FIX |
|
|
MTR_MEMO_PAGE_SX_FIX));
|
|
ut_ad(mtr->memo_contains_flagged(add, MTR_MEMO_PAGE_X_FIX |
|
|
MTR_MEMO_PAGE_SX_FIX));
|
|
|
|
fil_addr_t next_addr= flst_get_next_addr(cur->page.frame + coffset);
|
|
|
|
flst_write_addr(*add, add->page.frame + aoffset + FLST_PREV,
|
|
cur->page.id().page_no(), coffset, mtr);
|
|
flst_write_addr(*add, add->page.frame + aoffset + FLST_NEXT,
|
|
next_addr.page, next_addr.boffset, mtr);
|
|
|
|
dberr_t err= DB_SUCCESS;
|
|
|
|
if (next_addr.page == FIL_NULL)
|
|
flst_write_addr(*base, base->page.frame + boffset + FLST_LAST,
|
|
add->page.id().page_no(), aoffset, mtr);
|
|
else if (buf_block_t *block=
|
|
buf_page_get_gen(page_id_t{add->page.id().space(), next_addr.page},
|
|
add->zip_size(), RW_SX_LATCH, nullptr,
|
|
BUF_GET_POSSIBLY_FREED, mtr, &err))
|
|
flst_write_addr(*block, block->page.frame +
|
|
next_addr.boffset + FLST_PREV,
|
|
add->page.id().page_no(), aoffset, mtr);
|
|
|
|
flst_write_addr(*cur, cur->page.frame + coffset + FLST_NEXT,
|
|
add->page.id().page_no(), aoffset, mtr);
|
|
|
|
byte *len= &base->page.frame[boffset + FLST_LEN];
|
|
mtr->write<4>(*base, len, mach_read_from_4(len) + 1);
|
|
return err;
|
|
}
|
|
|
|
/** Insert a node before another one.
|
|
@param[in,out] base base node block
|
|
@param[in] boffset byte offset of the base node
|
|
@param[in,out] cur insert position block
|
|
@param[in] coffset byte offset of the insert position
|
|
@param[in,out] add block to be added
|
|
@param[in] aoffset byte offset of the block to be added
|
|
@param[in,out] mtr mini-transaction
|
|
@return error code */
|
|
static dberr_t flst_insert_before(buf_block_t *base, uint16_t boffset,
|
|
buf_block_t *cur, uint16_t coffset,
|
|
buf_block_t *add, uint16_t aoffset,
|
|
mtr_t *mtr)
|
|
{
|
|
ut_ad(base != cur || boffset != coffset);
|
|
ut_ad(base != add || boffset != aoffset);
|
|
ut_ad(cur != add || coffset != aoffset);
|
|
ut_ad(boffset < base->physical_size());
|
|
ut_ad(coffset < cur->physical_size());
|
|
ut_ad(aoffset < add->physical_size());
|
|
ut_ad(mtr->memo_contains_flagged(base, MTR_MEMO_PAGE_X_FIX |
|
|
MTR_MEMO_PAGE_SX_FIX));
|
|
ut_ad(mtr->memo_contains_flagged(cur, MTR_MEMO_PAGE_X_FIX |
|
|
MTR_MEMO_PAGE_SX_FIX));
|
|
ut_ad(mtr->memo_contains_flagged(add, MTR_MEMO_PAGE_X_FIX |
|
|
MTR_MEMO_PAGE_SX_FIX));
|
|
|
|
fil_addr_t prev_addr= flst_get_prev_addr(cur->page.frame + coffset);
|
|
|
|
flst_write_addr(*add, add->page.frame + aoffset + FLST_PREV,
|
|
prev_addr.page, prev_addr.boffset, mtr);
|
|
flst_write_addr(*add, add->page.frame + aoffset + FLST_NEXT,
|
|
cur->page.id().page_no(), coffset, mtr);
|
|
|
|
dberr_t err= DB_SUCCESS;
|
|
|
|
if (prev_addr.page == FIL_NULL)
|
|
flst_write_addr(*base, base->page.frame + boffset + FLST_FIRST,
|
|
add->page.id().page_no(), aoffset, mtr);
|
|
else if (buf_block_t *block=
|
|
buf_page_get_gen(page_id_t{add->page.id().space(), prev_addr.page},
|
|
add->zip_size(), RW_SX_LATCH, nullptr,
|
|
BUF_GET_POSSIBLY_FREED, mtr, &err))
|
|
flst_write_addr(*block, block->page.frame +
|
|
prev_addr.boffset + FLST_NEXT,
|
|
add->page.id().page_no(), aoffset, mtr);
|
|
|
|
flst_write_addr(*cur, cur->page.frame + coffset + FLST_PREV,
|
|
add->page.id().page_no(), aoffset, mtr);
|
|
|
|
byte *len= &base->page.frame[boffset + FLST_LEN];
|
|
mtr->write<4>(*base, len, mach_read_from_4(len) + 1);
|
|
return err;
|
|
}
|
|
|
|
/** Initialize a list base node.
|
|
@param[in] block file page
|
|
@param[in,out] base base node
|
|
@param[in,out] mtr mini-transaction */
|
|
void flst_init(const buf_block_t& block, byte *base, mtr_t *mtr)
|
|
{
|
|
ut_ad(mtr->memo_contains_page_flagged(base, MTR_MEMO_PAGE_X_FIX |
|
|
MTR_MEMO_PAGE_SX_FIX));
|
|
mtr->write<4,mtr_t::MAYBE_NOP>(block, base + FLST_LEN, 0U);
|
|
static_assert(FLST_LAST == FLST_FIRST + FIL_ADDR_SIZE, "compatibility");
|
|
flst_zero_both(block, base + FLST_FIRST, mtr);
|
|
}
|
|
|
|
/** Append a file list node to a list.
|
|
@param[in,out] base base node block
|
|
@param[in] boffset byte offset of the base node
|
|
@param[in,out] add block to be added
|
|
@param[in] aoffset byte offset of the node to be added
|
|
@param[in,outr] mtr mini-transaction */
|
|
dberr_t flst_add_last(buf_block_t *base, uint16_t boffset,
|
|
buf_block_t *add, uint16_t aoffset, mtr_t *mtr)
|
|
{
|
|
ut_ad(base != add || boffset != aoffset);
|
|
ut_ad(boffset < base->physical_size());
|
|
ut_ad(aoffset < add->physical_size());
|
|
ut_ad(mtr->memo_contains_flagged(base, MTR_MEMO_PAGE_X_FIX |
|
|
MTR_MEMO_PAGE_SX_FIX));
|
|
ut_ad(mtr->memo_contains_flagged(add, MTR_MEMO_PAGE_X_FIX |
|
|
MTR_MEMO_PAGE_SX_FIX));
|
|
if (!flst_get_len(base->page.frame + boffset))
|
|
{
|
|
flst_add_to_empty(base, boffset, add, aoffset, mtr);
|
|
return DB_SUCCESS;
|
|
}
|
|
else
|
|
{
|
|
fil_addr_t addr= flst_get_last(base->page.frame + boffset);
|
|
buf_block_t *cur= add;
|
|
dberr_t err;
|
|
if (addr.page != add->page.id().page_no() &&
|
|
!(cur= buf_page_get_gen(page_id_t{add->page.id().space(), addr.page},
|
|
add->zip_size(), RW_SX_LATCH, nullptr,
|
|
BUF_GET_POSSIBLY_FREED, mtr, &err)))
|
|
return err;
|
|
return flst_insert_after(base, boffset, cur, addr.boffset,
|
|
add, aoffset, mtr);
|
|
}
|
|
}
|
|
|
|
/** Prepend a file list node to a list.
|
|
@param[in,out] base base node block
|
|
@param[in] boffset byte offset of the base node
|
|
@param[in,out] add block to be added
|
|
@param[in] aoffset byte offset of the node to be added
|
|
@param[in,out] mtr mini-transaction
|
|
@return error code */
|
|
dberr_t flst_add_first(buf_block_t *base, uint16_t boffset,
|
|
buf_block_t *add, uint16_t aoffset, mtr_t *mtr)
|
|
{
|
|
ut_ad(base != add || boffset != aoffset);
|
|
ut_ad(boffset < base->physical_size());
|
|
ut_ad(aoffset < add->physical_size());
|
|
ut_ad(mtr->memo_contains_flagged(base, MTR_MEMO_PAGE_X_FIX |
|
|
MTR_MEMO_PAGE_SX_FIX));
|
|
ut_ad(mtr->memo_contains_flagged(add, MTR_MEMO_PAGE_X_FIX |
|
|
MTR_MEMO_PAGE_SX_FIX));
|
|
|
|
if (!flst_get_len(base->page.frame + boffset))
|
|
{
|
|
flst_add_to_empty(base, boffset, add, aoffset, mtr);
|
|
return DB_SUCCESS;
|
|
}
|
|
else
|
|
{
|
|
fil_addr_t addr= flst_get_first(base->page.frame + boffset);
|
|
buf_block_t *cur= add;
|
|
dberr_t err;
|
|
if (addr.page != add->page.id().page_no() &&
|
|
!(cur= buf_page_get_gen(page_id_t{add->page.id().space(), addr.page},
|
|
add->zip_size(), RW_SX_LATCH, nullptr,
|
|
BUF_GET_POSSIBLY_FREED, mtr, &err)))
|
|
return err;
|
|
return flst_insert_before(base, boffset, cur, addr.boffset,
|
|
add, aoffset, mtr);
|
|
}
|
|
}
|
|
|
|
/** Remove a file list node.
|
|
@param[in,out] base base node block
|
|
@param[in] boffset byte offset of the base node
|
|
@param[in,out] cur block to be removed
|
|
@param[in] coffset byte offset of the current record to be removed
|
|
@param[in,out] mtr mini-transaction
|
|
@return error code */
|
|
dberr_t flst_remove(buf_block_t *base, uint16_t boffset,
|
|
buf_block_t *cur, uint16_t coffset, mtr_t *mtr)
|
|
{
|
|
ut_ad(boffset < base->physical_size());
|
|
ut_ad(coffset < cur->physical_size());
|
|
ut_ad(mtr->memo_contains_flagged(base, MTR_MEMO_PAGE_X_FIX |
|
|
MTR_MEMO_PAGE_SX_FIX));
|
|
ut_ad(mtr->memo_contains_flagged(cur, MTR_MEMO_PAGE_X_FIX |
|
|
MTR_MEMO_PAGE_SX_FIX));
|
|
|
|
const fil_addr_t prev_addr= flst_get_prev_addr(cur->page.frame + coffset);
|
|
const fil_addr_t next_addr= flst_get_next_addr(cur->page.frame + coffset);
|
|
dberr_t err= DB_SUCCESS;
|
|
|
|
if (prev_addr.page == FIL_NULL)
|
|
flst_write_addr(*base, base->page.frame + boffset + FLST_FIRST,
|
|
next_addr.page, next_addr.boffset, mtr);
|
|
else
|
|
{
|
|
buf_block_t *b= cur;
|
|
if (prev_addr.page == b->page.id().page_no() ||
|
|
(b= buf_page_get_gen(page_id_t(b->page.id().space(), prev_addr.page),
|
|
b->zip_size(), RW_SX_LATCH, nullptr,
|
|
BUF_GET_POSSIBLY_FREED, mtr, &err)))
|
|
flst_write_addr(*b, b->page.frame + prev_addr.boffset + FLST_NEXT,
|
|
next_addr.page, next_addr.boffset, mtr);
|
|
}
|
|
|
|
if (next_addr.page == FIL_NULL)
|
|
flst_write_addr(*base, base->page.frame + boffset + FLST_LAST,
|
|
prev_addr.page, prev_addr.boffset, mtr);
|
|
else
|
|
{
|
|
dberr_t err2;
|
|
if (next_addr.page == cur->page.id().page_no() ||
|
|
(cur= buf_page_get_gen(page_id_t(cur->page.id().space(),
|
|
next_addr.page),
|
|
cur->zip_size(), RW_SX_LATCH, nullptr,
|
|
BUF_GET_POSSIBLY_FREED, mtr, &err2)))
|
|
flst_write_addr(*cur, cur->page.frame + next_addr.boffset + FLST_PREV,
|
|
prev_addr.page, prev_addr.boffset, mtr);
|
|
else if (err == DB_SUCCESS)
|
|
err= err2;
|
|
}
|
|
|
|
byte *len= &base->page.frame[boffset + FLST_LEN];
|
|
if (UNIV_UNLIKELY(!mach_read_from_4(len)))
|
|
return DB_CORRUPTION;
|
|
mtr->write<4>(*base, len, mach_read_from_4(len) - 1);
|
|
return err;
|
|
}
|
|
|
|
#ifdef UNIV_DEBUG
|
|
/** Validate a file-based list. */
|
|
void flst_validate(const buf_block_t *base, uint16_t boffset, mtr_t *mtr)
|
|
{
|
|
ut_ad(boffset < base->physical_size());
|
|
ut_ad(mtr->memo_contains_flagged(base, MTR_MEMO_PAGE_X_FIX |
|
|
MTR_MEMO_PAGE_SX_FIX));
|
|
|
|
/* We use two mini-transaction handles: the first is used to lock
|
|
the base node, and prevent other threads from modifying the list.
|
|
The second is used to traverse the list. We cannot run the second
|
|
mtr without committing it at times, because if the list is long,
|
|
the x-locked pages could fill the buffer, resulting in a deadlock. */
|
|
mtr_t mtr2;
|
|
|
|
const uint32_t len= flst_get_len(base->page.frame + boffset);
|
|
fil_addr_t addr= flst_get_first(base->page.frame + boffset);
|
|
|
|
for (uint32_t i= len; i--; )
|
|
{
|
|
mtr2.start();
|
|
const buf_block_t *b=
|
|
buf_page_get_gen(page_id_t(base->page.id().space(), addr.page),
|
|
base->zip_size(), RW_SX_LATCH, nullptr, BUF_GET, mtr);
|
|
ut_ad(b);
|
|
addr= flst_get_next_addr(b->page.frame + addr.boffset);
|
|
mtr2.commit();
|
|
}
|
|
|
|
ut_ad(addr.page == FIL_NULL);
|
|
|
|
addr= flst_get_last(base->page.frame + boffset);
|
|
|
|
for (uint32_t i= len; i--; )
|
|
{
|
|
mtr2.start();
|
|
const buf_block_t *b=
|
|
buf_page_get_gen(page_id_t(base->page.id().space(), addr.page),
|
|
base->zip_size(), RW_SX_LATCH, nullptr, BUF_GET, mtr);
|
|
ut_ad(b);
|
|
addr= flst_get_prev_addr(b->page.frame + addr.boffset);
|
|
mtr2.commit();
|
|
}
|
|
|
|
ut_ad(addr.page == FIL_NULL);
|
|
}
|
|
#endif
|