MDEV-32175: Reduce page_align(), page_offset() calls

When srv_page_size and innodb_page_size were introduced,
the functions page_align() and page_offset() got more expensive.
Let us try to replace such calls with simpler pointer arithmetics
with respect to the buffer page frame.

page_rec_get_next_non_del_marked(): Add a page frame as a parameter,
and template<bool comp>.

page_rec_next_get(): A more efficient variant of page_rec_get_next(),
with template<bool comp> and const page_t* parameters.

lock_get_heap_no(): Replaces page_rec_get_heap_no() outside debug checks.

fseg_free_step(), fseg_free_step_not_header(): Take the header block
as a parameter.

Reviewed by: Vladislav Lesin
This commit is contained in:
Marko Mäkelä 2024-11-21 11:01:30 +02:00
commit 895cd553a3
35 changed files with 744 additions and 566 deletions

View file

@ -908,26 +908,31 @@ btr_page_get_father_block(
mtr_t* mtr, /*!< in: mtr */
btr_cur_t* cursor) /*!< out: cursor on node pointer record,
its page x-latched */
noexcept
{
rec_t *rec=
page_rec_get_next(page_get_infimum_rec(cursor->block()->page.frame));
const page_t *page= btr_cur_get_page(cursor);
const rec_t *rec= page_is_comp(page)
? page_rec_next_get<true>(page, page + PAGE_NEW_INFIMUM)
: page_rec_next_get<false>(page, page + PAGE_OLD_INFIMUM);
if (UNIV_UNLIKELY(!rec))
return nullptr;
cursor->page_cur.rec= rec;
cursor->page_cur.rec= const_cast<rec_t*>(rec);
return btr_page_get_parent(offsets, heap, cursor, mtr);
}
/** Seek to the parent page of a B-tree page.
@param[in,out] mtr mini-transaction
@param[in,out] cursor cursor pointing to the x-latched parent page
@param mtr mini-transaction
@param cursor cursor pointing to the x-latched parent page
@return whether the cursor was successfully positioned */
bool btr_page_get_father(mtr_t* mtr, btr_cur_t* cursor)
bool btr_page_get_father(mtr_t *mtr, btr_cur_t *cursor) noexcept
{
rec_t *rec=
page_rec_get_next(page_get_infimum_rec(cursor->block()->page.frame));
page_t *page= btr_cur_get_page(cursor);
const rec_t *rec= page_is_comp(page)
? page_rec_next_get<true>(page, page + PAGE_NEW_INFIMUM)
: page_rec_next_get<false>(page, page + PAGE_OLD_INFIMUM);
if (UNIV_UNLIKELY(!rec))
return false;
cursor->page_cur.rec= rec;
cursor->page_cur.rec= const_cast<rec_t*>(rec);
mem_heap_t *heap= mem_heap_create(100);
const bool got= btr_page_get_parent(nullptr, heap, cursor, mtr);
mem_heap_free(heap);
@ -957,8 +962,7 @@ static void btr_free_root(buf_block_t *block, const fil_space_t &space,
{
/* Free the entire segment in small steps. */
ut_d(mtr->freeing_tree());
while (!fseg_free_step(PAGE_HEADER + PAGE_BTR_SEG_TOP +
block->page.frame, mtr));
while (!fseg_free_step(block, PAGE_HEADER + PAGE_BTR_SEG_TOP, mtr));
}
}
@ -1164,8 +1168,8 @@ leaf_loop:
/* NOTE: page hash indexes are dropped when a page is freed inside
fsp0fsp. */
bool finished = fseg_free_step(PAGE_HEADER + PAGE_BTR_SEG_LEAF
+ block->page.frame, &mtr
bool finished = fseg_free_step(block, PAGE_HEADER + PAGE_BTR_SEG_LEAF,
&mtr
#ifdef BTR_CUR_HASH_ADAPT
, ahi
#endif /* BTR_CUR_HASH_ADAPT */
@ -1183,8 +1187,9 @@ top_loop:
finished = !btr_root_fseg_validate(FIL_PAGE_DATA + PAGE_BTR_SEG_TOP,
*block, *space)
|| fseg_free_step_not_header(PAGE_HEADER + PAGE_BTR_SEG_TOP
+ block->page.frame, &mtr
|| fseg_free_step_not_header(block,
PAGE_HEADER + PAGE_BTR_SEG_TOP,
&mtr
#ifdef BTR_CUR_HASH_ADAPT
,ahi
#endif /* BTR_CUR_HASH_ADAPT */
@ -1925,7 +1930,6 @@ btr_root_raise_and_insert(
dberr_t* err) /*!< out: error code */
{
dict_index_t* index;
rec_t* rec;
dtuple_t* node_ptr;
ulint level;
rec_t* node_ptr_rec;
@ -2082,7 +2086,13 @@ btr_root_raise_and_insert(
}
const uint32_t new_page_no = new_block->page.id().page_no();
rec = page_rec_get_next(page_get_infimum_rec(new_block->page.frame));
const rec_t* rec= page_is_comp(new_block->page.frame)
? page_rec_next_get<true>(new_block->page.frame,
new_block->page.frame
+ PAGE_NEW_INFIMUM)
: page_rec_next_get<false>(new_block->page.frame,
new_block->page.frame
+ PAGE_OLD_INFIMUM);
ut_ad(rec); /* We just created the page. */
/* Build the node pointer (= node key and page address) for the
@ -2157,90 +2167,109 @@ btr_root_raise_and_insert(
/** Decide if the page should be split at the convergence point of inserts
converging to the left.
@param[in] cursor insert position
@param cursor insert position
@return the first record to be moved to the right half page
@retval NULL if no split is recommended */
rec_t* btr_page_get_split_rec_to_left(const btr_cur_t* cursor)
@retval nullptr if no split is recommended */
rec_t *btr_page_get_split_rec_to_left(const btr_cur_t *cursor) noexcept
{
rec_t* split_rec = btr_cur_get_rec(cursor);
const page_t* page = page_align(split_rec);
const rec_t *split_rec= btr_cur_get_rec(cursor);
const page_t *page= btr_cur_get_page(cursor);
const rec_t *const last= page + page_header_get_offs(page, PAGE_LAST_INSERT);
if (page_header_get_ptr(page, PAGE_LAST_INSERT)
!= page_rec_get_next(split_rec)) {
return NULL;
}
if (page_is_comp(page))
{
if (last != page_rec_next_get<true>(page, split_rec))
return nullptr;
/* The metadata record must be present in the leftmost leaf page
of the clustered index, if and only if index->is_instant().
However, during innobase_instant_try(), index->is_instant() would
already hold when row_ins_clust_index_entry_low() is being invoked
to insert the the metadata record. So, we can only assert that
when the metadata record exists, index->is_instant() must hold. */
const rec_t *const infimum= page + PAGE_NEW_INFIMUM;
ut_ad(!page_is_leaf(page) || page_has_prev(page) ||
cursor->index()->is_instant() ||
!(rec_get_info_bits(page_rec_next_get<true>(page, infimum), true) &
REC_INFO_MIN_REC_FLAG));
/* If the convergence is in the middle of a page, include also the
record immediately before the new insert to the upper page.
Otherwise, we could repeatedly move from page to page lots of
records smaller than the convergence point. */
if (split_rec == infimum ||
split_rec == page_rec_next_get<true>(page, infimum))
split_rec= page_rec_next_get<true>(page, split_rec);
}
else
{
if (last != page_rec_next_get<false>(page, split_rec))
return nullptr;
const rec_t *const infimum= page + PAGE_OLD_INFIMUM;
ut_ad(!page_is_leaf(page) || page_has_prev(page) ||
cursor->index()->is_instant() ||
!(rec_get_info_bits(page_rec_next_get<false>(page, infimum), false) &
REC_INFO_MIN_REC_FLAG));
if (split_rec == infimum ||
split_rec == page_rec_next_get<false>(page, infimum))
split_rec= page_rec_next_get<false>(page, split_rec);
}
/* The metadata record must be present in the leftmost leaf page
of the clustered index, if and only if index->is_instant().
However, during innobase_instant_try(), index->is_instant()
would already hold when row_ins_clust_index_entry_low()
is being invoked to insert the the metadata record.
So, we can only assert that when the metadata record exists,
index->is_instant() must hold. */
ut_ad(!page_is_leaf(page) || page_has_prev(page)
|| cursor->index()->is_instant()
|| !(rec_get_info_bits(page_rec_get_next_const(
page_get_infimum_rec(page)),
cursor->index()->table->not_redundant())
& REC_INFO_MIN_REC_FLAG));
const rec_t* infimum = page_get_infimum_rec(page);
/* If the convergence is in the middle of a page, include also
the record immediately before the new insert to the upper
page. Otherwise, we could repeatedly move from page to page
lots of records smaller than the convergence point. */
if (split_rec == infimum
|| split_rec == page_rec_get_next_const(infimum)) {
split_rec = page_rec_get_next(split_rec);
}
return split_rec;
return const_cast<rec_t*>(split_rec);
}
/** Decide if the page should be split at the convergence point of inserts
converging to the right.
@param[in] cursor insert position
@param[out] split_rec if split recommended, the first record
on the right half page, or
NULL if the to-be-inserted record
should be first
@param cursor insert position
@param split_rec if split recommended, the first record on the right
half page, or nullptr if the to-be-inserted record should be first
@return whether split is recommended */
bool
btr_page_get_split_rec_to_right(const btr_cur_t* cursor, rec_t** split_rec)
btr_page_get_split_rec_to_right(const btr_cur_t *cursor, rec_t **split_rec)
noexcept
{
rec_t* insert_point = btr_cur_get_rec(cursor);
const page_t* page = page_align(insert_point);
const rec_t *insert_point= btr_cur_get_rec(cursor);
const page_t *page= btr_cur_get_page(cursor);
/* We use eager heuristics: if the new insert would be right after
the previous insert on the same page, we assume that there is a
pattern of sequential inserts here. */
/* We use eager heuristics: if the new insert would be right after
the previous insert on the same page, we assume that there is a
pattern of sequential inserts here. */
if (page + page_header_get_offs(page, PAGE_LAST_INSERT) != insert_point)
return false;
if (page_header_get_ptr(page, PAGE_LAST_INSERT) != insert_point) {
return false;
}
if (page_is_comp(page))
{
const rec_t *const supremum= page + PAGE_NEW_SUPREMUM;
insert_point= page_rec_next_get<true>(page, insert_point);
if (!insert_point);
else if (insert_point == supremum)
insert_point= nullptr;
else
{
insert_point= page_rec_next_get<true>(page, insert_point);
if (insert_point == supremum)
insert_point= nullptr;
/* If there are >= 2 user records up from the insert point,
split all but 1 off. We want to keep one because then sequential
inserts can do the necessary checks of the right search position
just by looking at the records on this page. */
}
}
else
{
const rec_t *const supremum= page + PAGE_OLD_SUPREMUM;
insert_point= page_rec_next_get<false>(page, insert_point);
if (!insert_point);
else if (insert_point == supremum)
insert_point= nullptr;
else
{
insert_point= page_rec_next_get<false>(page, insert_point);
if (insert_point == supremum)
insert_point= nullptr;
}
}
insert_point = page_rec_get_next(insert_point);
if (!insert_point || page_rec_is_supremum(insert_point)) {
insert_point = NULL;
} else {
insert_point = page_rec_get_next(insert_point);
if (page_rec_is_supremum(insert_point)) {
insert_point = NULL;
}
/* If there are >= 2 user records up from the insert
point, split all but 1 off. We want to keep one because
then sequential inserts can use the adaptive hash
index, as they can do the necessary checks of the right
search position just by looking at the records on this
page. */
}
*split_rec = insert_point;
return true;
*split_rec= const_cast<rec_t*>(insert_point);
return true;
}
/*************************************************************//**
@ -4695,31 +4724,31 @@ btr_index_rec_validate_report(
<< " of table " << index->table->name
<< ", page " << page_id_t(page_get_space_id(page),
page_get_page_no(page))
<< ", at offset " << page_offset(rec);
<< ", at offset " << rec - page;
}
/************************************************************//**
Checks the size and number of fields in a record based on the definition of
the index.
@return TRUE if ok */
ibool
bool
btr_index_rec_validate(
/*===================*/
const rec_t* rec, /*!< in: index record */
const page_cur_t& cur, /*!< in: cursor to index record */
const dict_index_t* index, /*!< in: index */
ibool dump_on_error) /*!< in: TRUE if the function
bool dump_on_error) /*!< in: true if the function
should print hex dump of record
and page on error */
noexcept
{
ulint len;
const page_t* page;
const rec_t* rec = page_cur_get_rec(&cur);
const page_t* page = cur.block->page.frame;
mem_heap_t* heap = NULL;
rec_offs offsets_[REC_OFFS_NORMAL_SIZE];
rec_offs* offsets = offsets_;
rec_offs_init(offsets_);
page = page_align(rec);
ut_ad(index->n_core_fields);
if (index->is_ibuf()) {
@ -4900,7 +4929,7 @@ btr_index_page_validate(
return true;
}
if (!btr_index_rec_validate(cur.rec, index, TRUE)) {
if (!btr_index_rec_validate(cur, index, TRUE)) {
break;
}

View file

@ -158,7 +158,8 @@ inline void PageBulk::insertPage(rec_t *rec, rec_offs *offsets)
const ulint rec_size= rec_offs_size(offsets);
const ulint extra_size= rec_offs_extra_size(offsets);
ut_ad(page_align(m_heap_top + rec_size) == m_page);
ut_d(const bool is_leaf= page_rec_is_leaf(m_cur_rec));
ut_ad(page_align(m_cur_rec) == m_page);
ut_d(const bool is_leaf= page_is_leaf(m_page));
#ifdef UNIV_DEBUG
/* Check whether records are in order. */
@ -180,8 +181,8 @@ inline void PageBulk::insertPage(rec_t *rec, rec_offs *offsets)
/* Insert the record in the linked list. */
if (fmt != REDUNDANT)
{
const rec_t *next_rec= m_page +
page_offset(m_cur_rec + mach_read_from_2(m_cur_rec - REC_NEXT));
const rec_t *next_rec=
m_cur_rec + int16_t(mach_read_from_2(m_cur_rec - REC_NEXT));
if (fmt != COMPRESSED)
m_mtr.write<2>(*m_block, m_cur_rec - REC_NEXT,
static_cast<uint16_t>(insert_rec - m_cur_rec));
@ -204,7 +205,8 @@ inline void PageBulk::insertPage(rec_t *rec, rec_offs *offsets)
else
{
memcpy(const_cast<rec_t*>(rec) - REC_NEXT, m_cur_rec - REC_NEXT, 2);
m_mtr.write<2>(*m_block, m_cur_rec - REC_NEXT, page_offset(insert_rec));
m_mtr.write<2>(*m_block, m_cur_rec - REC_NEXT,
uintptr_t(insert_rec - m_page));
rec_set_bit_field_1(const_cast<rec_t*>(rec), 0,
REC_OLD_N_OWNED, REC_N_OWNED_MASK, REC_N_OWNED_SHIFT);
rec_set_bit_field_2(const_cast<rec_t*>(rec),
@ -214,7 +216,7 @@ inline void PageBulk::insertPage(rec_t *rec, rec_offs *offsets)
if (fmt == COMPRESSED)
/* We already wrote the record. Log is written in PageBulk::compress(). */;
else if (page_offset(m_cur_rec) ==
else if (m_cur_rec - m_page ==
(fmt == REDUNDANT ? PAGE_OLD_INFIMUM : PAGE_NEW_INFIMUM))
m_mtr.memcpy(*m_block, m_heap_top, rec - extra_size, rec_size);
else
@ -246,7 +248,7 @@ inline void PageBulk::insertPage(rec_t *rec, rec_offs *offsets)
if (len > 2)
{
memcpy(b, c, len);
m_mtr.memmove(*m_block, page_offset(b), page_offset(c), len);
m_mtr.memmove(*m_block, b - m_page, c - m_page, len);
c= cm;
b= bm;
r= rm;
@ -285,7 +287,7 @@ no_data:
{
m_mtr.memcpy<mtr_t::FORCED>(*m_block, b, r, m_cur_rec - c);
memcpy(bd, cd, len);
m_mtr.memmove(*m_block, page_offset(bd), page_offset(cd), len);
m_mtr.memmove(*m_block, bd - m_page, cd - m_page, len);
c= cdm;
b= rdm - rd + bd;
r= rdm;
@ -430,7 +432,7 @@ inline void PageBulk::finishPage()
if (count == (PAGE_DIR_SLOT_MAX_N_OWNED + 1) / 2)
{
slot-= PAGE_DIR_SLOT_SIZE;
mach_write_to_2(slot, page_offset(insert_rec));
mach_write_to_2(slot, insert_rec - m_page);
page_rec_set_n_owned<false>(m_block, insert_rec, count, false, &m_mtr);
count= 0;
}
@ -469,7 +471,7 @@ inline void PageBulk::finishPage()
m_mtr.memcpy(*m_block, PAGE_HEADER + m_page, page_header,
sizeof page_header);
m_mtr.write<2>(*m_block, PAGE_HEADER + PAGE_N_RECS + m_page, m_rec_no);
m_mtr.memcpy(*m_block, page_offset(slot), slot0 - slot);
m_mtr.memcpy(*m_block, slot - m_page, slot0 - slot);
}
else
{
@ -701,7 +703,7 @@ PageBulk::copyOut(
ULINT_UNDEFINED, &m_heap);
mach_write_to_2(rec - REC_NEXT, m_is_comp
? static_cast<uint16_t>
(PAGE_NEW_SUPREMUM - page_offset(rec))
(PAGE_NEW_SUPREMUM - (rec - m_page))
: PAGE_OLD_SUPREMUM);
/* Set related members */

View file

@ -237,7 +237,7 @@ unreadable:
}
ut_ad(page_cur_is_before_first(&cur.page_cur));
ut_ad(page_is_leaf(cur.page_cur.block->page.frame));
ut_ad(page_is_leaf(btr_cur_get_page(&cur)));
const rec_t* rec = page_cur_move_to_next(&cur.page_cur);
const ulint comp = dict_table_is_comp(index->table);
@ -591,14 +591,17 @@ btr_intention_t btr_cur_get_and_clear_intention(btr_latch_mode *latch_mode)
/** @return whether the distance between two records is at most the
specified value */
template<bool comp>
static bool
page_rec_distance_is_at_most(const rec_t *left, const rec_t *right, ulint val)
page_rec_distance_is_at_most(const page_t *page, const rec_t *left,
const rec_t *right, ulint val)
noexcept
{
do
{
if (left == right)
return true;
left= page_rec_get_next_const(left);
left= page_rec_next_get<comp>(page, left);
}
while (left && val--);
return false;
@ -669,25 +672,52 @@ btr_cur_will_modify_tree(
}
/* check delete will cause. (BTR_INTENTION_BOTH
or BTR_INTENTION_DELETE) */
if (n_recs <= max_nodes_deleted * 2
|| page_rec_is_first(rec, page)) {
if (n_recs <= max_nodes_deleted * 2) {
/* The cursor record can be the left most record
in this page. */
return true;
}
if (page_has_prev(page)
&& page_rec_distance_is_at_most(
page_get_infimum_rec(page), rec,
max_nodes_deleted)) {
return true;
}
if (page_has_next(page)
&& page_rec_distance_is_at_most(
rec, page_get_supremum_rec(page),
max_nodes_deleted)) {
return true;
if (page_is_comp(page)) {
const rec_t *const infimum
= page + PAGE_NEW_INFIMUM;
if (page_rec_next_get<true>(page, infimum)
== rec) {
return true;
}
if (page_has_prev(page)
&& page_rec_distance_is_at_most<true>(
page, infimum, rec,
max_nodes_deleted)) {
return true;
}
if (page_has_next(page)
&& page_rec_distance_is_at_most<true>(
page, rec,
page + PAGE_NEW_SUPREMUM,
max_nodes_deleted)) {
return true;
}
} else {
const rec_t *const infimum
= page + PAGE_OLD_INFIMUM;
if (page_rec_next_get<false>(page, infimum)
== rec) {
return true;
}
if (page_has_prev(page)
&& page_rec_distance_is_at_most<false>(
page, infimum, rec,
max_nodes_deleted)) {
return true;
}
if (page_has_next(page)
&& page_rec_distance_is_at_most<false>(
page, rec,
page + PAGE_OLD_SUPREMUM,
max_nodes_deleted)) {
return true;
}
}
/* Delete at leftmost record in a page causes delete
@ -2308,7 +2338,7 @@ btr_cur_ins_lock_and_undo(
}
}
if (!index->is_primary() || !page_is_leaf(page_align(rec))) {
if (!index->is_primary() || !page_is_leaf(btr_cur_get_page(cursor))) {
return DB_SUCCESS;
}
@ -2997,7 +3027,8 @@ static dberr_t btr_cur_upd_rec_sys(buf_block_t *block, rec_t *rec,
To save space, we must have d>6, that is, the complete DB_TRX_ID and
the first byte(s) of DB_ROLL_PTR must match the previous record. */
memcpy(dest, src, d);
mtr->memmove(*block, page_offset(dest), page_offset(src), d);
mtr->memmove(*block, dest - block->page.frame, src - block->page.frame,
d);
dest+= d;
len-= d;
/* DB_TRX_ID,DB_ROLL_PTR must be unique in each record when
@ -3185,8 +3216,8 @@ void btr_cur_upd_rec_in_place(rec_t *rec, const dict_index_t *index,
default:
mtr->memset(
block,
page_offset(rec_get_field_start_offs(
rec, n) + rec),
rec_get_field_start_offs(rec, n) + rec
- block->page.frame,
size, 0);
}
ulint l = rec_get_1byte_offs_flag(rec)
@ -3936,7 +3967,9 @@ btr_cur_pess_upd_restore_supremum(
lock_rec_reset_and_inherit_gap_locks(*prev_block, block_id,
PAGE_HEAP_NO_SUPREMUM,
page_rec_get_heap_no(rec));
page_is_comp(page)
? rec_get_heap_no_new(rec)
: rec_get_heap_no_old(rec));
return DB_SUCCESS;
}
@ -4418,7 +4451,7 @@ return_after_reservations:
template<bool flag>
void btr_rec_set_deleted(buf_block_t *block, rec_t *rec, mtr_t *mtr)
{
if (page_rec_is_comp(rec))
if (UNIV_LIKELY(page_is_comp(block->page.frame) != 0))
{
byte *b= &rec[-REC_NEW_INFO_BITS];
const byte v= flag
@ -4661,7 +4694,7 @@ btr_cur_optimistic_delete(
page_t* page = buf_block_get_frame(block);
page_zip_des_t* page_zip= buf_block_get_page_zip(block);
if (UNIV_UNLIKELY(rec_get_info_bits(rec, page_rec_is_comp(rec))
if (UNIV_UNLIKELY(rec_get_info_bits(rec, page_is_comp(page))
& REC_INFO_MIN_REC_FLAG)) {
/* This should be rolling back instant ADD COLUMN.
If this is a recovered transaction, then
@ -4829,7 +4862,7 @@ btr_cur_pessimistic_delete(
if (page_is_leaf(page)) {
const bool is_metadata = rec_is_metadata(
rec, page_rec_is_comp(rec));
rec, page_is_comp(block->page.frame));
if (UNIV_UNLIKELY(is_metadata)) {
/* This should be rolling back instant ALTER TABLE.
If this is a recovered transaction, then
@ -5972,7 +6005,8 @@ struct btr_blob_log_check_t {
uint32_t page_no = FIL_NULL;
if (UNIV_UNLIKELY(m_op == BTR_STORE_INSERT_BULK)) {
offs = page_offset(*m_rec);
offs = *m_rec - (*m_block)->page.frame;
ut_ad(offs == page_offset(*m_rec));
page_no = (*m_block)->page.id().page_no();
(*m_block)->page.fix();
ut_ad(page_no != FIL_NULL);
@ -6081,7 +6115,7 @@ btr_store_big_rec_extern_fields(
ut_ad(buf_block_get_frame(rec_block) == page_align(rec));
ut_a(dict_index_is_clust(index));
if (!fil_page_index_page_check(page_align(rec))) {
if (!fil_page_index_page_check(btr_pcur_get_page(pcur))) {
if (op != BTR_STORE_INSERT_BULK) {
return DB_PAGE_CORRUPTED;
}

View file

@ -684,7 +684,7 @@ btr_search_update_hash_ref(
ut_ad(cursor->flag == BTR_CUR_HASH_FAIL);
ut_ad(block->page.lock.have_x() || block->page.lock.have_s());
ut_ad(page_align(btr_cur_get_rec(cursor)) == block->page.frame);
ut_ad(btr_cur_get_page(cursor) == block->page.frame);
ut_ad(page_is_leaf(block->page.frame));
assert_block_ahi_valid(block);
@ -1278,21 +1278,30 @@ retry:
/* Calculate and cache fold values into an array for fast deletion
from the hash index */
rec = page_get_infimum_rec(page);
rec = page_rec_get_next_low(rec, page_is_comp(page));
const auto comp = page_is_comp(page);
ulint* folds;
ulint n_cached = 0;
ulint prev_fold = 0;
if (rec && rec_is_metadata(rec, *index)) {
rec = page_rec_get_next_low(rec, page_is_comp(page));
if (!--n_recs) {
/* The page only contains the hidden metadata record
for instant ALTER TABLE that the adaptive hash index
never points to. */
folds = nullptr;
goto all_deleted;
if (UNIV_LIKELY(comp != 0)) {
rec = page_rec_next_get<true>(page, page + PAGE_NEW_INFIMUM);
if (rec && rec_is_metadata(rec, TRUE)) {
rec = page_rec_next_get<true>(page, rec);
skipped_metadata:
if (!--n_recs) {
/* The page only contains the hidden
metadata record for instant ALTER
TABLE that the adaptive hash index
never points to. */
folds = nullptr;
goto all_deleted;
}
}
} else {
rec = page_rec_next_get<false>(page, page + PAGE_OLD_INFIMUM);
if (rec && rec_is_metadata(rec, FALSE)) {
rec = page_rec_next_get<false>(page, rec);
goto skipped_metadata;
}
}
@ -1323,9 +1332,16 @@ retry:
folds[n_cached++] = fold;
next_rec:
rec = page_rec_get_next_low(rec, page_rec_is_comp(rec));
if (!rec || page_rec_is_supremum(rec)) {
break;
if (comp) {
rec = page_rec_next_get<true>(page, rec);
if (!rec || rec == page + PAGE_NEW_SUPREMUM) {
break;
}
} else {
rec = page_rec_next_get<false>(page, rec);
if (!rec || rec == page + PAGE_OLD_SUPREMUM) {
break;
}
}
prev_fold = fold;
}

View file

@ -898,7 +898,8 @@ rec_corrupted:
static_assert(FIL_NULL == 0xffffffff, "compatibility");
static_assert(DICT_FLD__SYS_INDEXES__PAGE_NO ==
DICT_FLD__SYS_INDEXES__SPACE + 1, "compatibility");
mtr->memset(btr_pcur_get_block(pcur), page_offset(p + 4), 4, 0xff);
mtr->memset(btr_pcur_get_block(pcur), p + 4 - btr_pcur_get_page(pcur),
4, 0xff);
btr_free_if_exists(s, root_page_no, mach_read_from_8(rec + 8), mtr);
}
s->release();

View file

@ -974,7 +974,7 @@ btr_estimate_number_of_different_key_vals(dict_index_t* index,
rec = page_rec_get_next(cursor.page_cur.rec);
const ulint n_core = index->n_core_fields;
if (rec && !page_rec_is_supremum(rec)) {
if (rec && rec != page_get_supremum_rec(page)) {
not_empty_flag = 1;
offsets_rec = rec_get_offsets(rec, index, offsets_rec,
n_core,
@ -986,10 +986,11 @@ btr_estimate_number_of_different_key_vals(dict_index_t* index,
}
}
while (!page_rec_is_supremum(rec)) {
while (rec != page_get_supremum_rec(page)) {
ulint matched_fields;
rec_t* next_rec = page_rec_get_next(rec);
if (!next_rec || page_rec_is_supremum(next_rec)) {
if (!next_rec
|| next_rec == page_get_supremum_rec(page)) {
total_external_size +=
btr_rec_get_externally_stored_len(
rec, offsets_rec);
@ -1510,14 +1511,12 @@ dict_stats_analyze_index_level(
/* If rec and prev_rec are on different pages, then prev_rec
must have been copied, because we hold latch only on the page
where rec resides. */
if (prev_rec != NULL
&& page_align(rec) != page_align(prev_rec)) {
ut_ad(!prev_rec
|| btr_pcur_get_page(&pcur) == page_align(prev_rec)
|| prev_rec_is_copied);
ut_a(prev_rec_is_copied);
}
rec_is_last_on_page =
page_rec_is_supremum(page_rec_get_next_const(rec));
rec_is_last_on_page = page_rec_get_next_const(rec)
== page_get_supremum_rec(btr_pcur_get_page(&pcur));
/* increment the pages counter at the end of each page */
if (rec_is_last_on_page) {
@ -1534,7 +1533,8 @@ dict_stats_analyze_index_level(
if (level == 0
&& !srv_stats_include_delete_marked
&& rec_get_deleted_flag(rec, page_rec_is_comp(rec))) {
&& rec_get_deleted_flag(
rec, page_is_comp(btr_pcur_get_page(&pcur)))) {
if (rec_is_last_on_page
&& !prev_rec_is_copied
&& prev_rec != NULL) {
@ -1699,34 +1699,23 @@ func_exit:
mem_heap_free(heap);
}
/************************************************************//**
Gets the pointer to the next non delete-marked record on the page.
If all subsequent records are delete-marked, then this function
will return the supremum record.
@return pointer to next non delete-marked record or pointer to supremum */
template<bool comp>
static
const rec_t*
page_rec_get_next_non_del_marked(
/*=============================*/
const rec_t* rec) /*!< in: pointer to record */
page_rec_get_next_non_del_marked(const page_t *page, const rec_t *rec)
{
const page_t *const page= page_align(rec);
ut_ad(!!page_is_comp(page) == comp);
ut_ad(page_align(rec) == page);
if (page_is_comp(page))
{
for (rec= page_rec_get_next_low(rec, TRUE);
rec && rec_get_deleted_flag(rec, TRUE);
rec= page_rec_get_next_low(rec, TRUE));
return rec ? rec : page + PAGE_NEW_SUPREMUM;
}
else
{
for (rec= page_rec_get_next_low(rec, FALSE);
rec && rec_get_deleted_flag(rec, FALSE);
rec= page_rec_get_next_low(rec, FALSE));
return rec ? rec : page + PAGE_OLD_SUPREMUM;
}
for (rec= page_rec_next_get<comp>(page, rec);
rec && rec_get_deleted_flag(rec, comp);
rec= page_rec_next_get<comp>(page, rec));
return rec ? rec : page + (comp ? PAGE_NEW_SUPREMUM : PAGE_OLD_SUPREMUM);
}
/** Scan a page, reading records from left to right and counting the number
@ -1773,10 +1762,13 @@ dict_stats_scan_page(
this memory heap should never be used. */
mem_heap_t* heap = NULL;
ut_ad(!!n_core == page_is_leaf(page));
const rec_t* (*get_next)(const rec_t*)
const rec_t* (*get_next)(const page_t*, const rec_t*)
= !n_core || srv_stats_include_delete_marked
? page_rec_get_next_const
: page_rec_get_next_non_del_marked;
? (page_is_comp(page)
? page_rec_next_get<true> : page_rec_next_get<false>)
: page_is_comp(page)
? page_rec_get_next_non_del_marked<true>
: page_rec_get_next_non_del_marked<false>;
const bool should_count_external_pages = n_external_pages != NULL;
@ -1784,9 +1776,9 @@ dict_stats_scan_page(
*n_external_pages = 0;
}
rec = get_next(page_get_infimum_rec(page));
rec = get_next(page, page_get_infimum_rec(page));
if (!rec || page_rec_is_supremum(rec)) {
if (!rec || rec == page_get_supremum_rec(page)) {
/* the page is empty or contains only delete-marked records */
*n_diff = 0;
*out_rec = NULL;
@ -1801,11 +1793,11 @@ dict_stats_scan_page(
rec, offsets_rec);
}
next_rec = get_next(rec);
next_rec = get_next(page, rec);
*n_diff = 1;
while (next_rec && !page_rec_is_supremum(next_rec)) {
while (next_rec && next_rec != page_get_supremum_rec(page)) {
ulint matched_fields;
@ -1846,7 +1838,7 @@ dict_stats_scan_page(
rec, offsets_rec);
}
next_rec = get_next(next_rec);
next_rec = get_next(page, next_rec);
}
/* offsets1,offsets2 should have been big enough */
@ -1908,8 +1900,8 @@ dict_stats_analyze_index_below_cur(
rec_offs_set_n_alloc(offsets2, size);
rec = btr_cur_get_rec(cur);
page = page_align(rec);
ut_ad(!page_rec_is_leaf(rec));
page = btr_cur_get_page(cur);
ut_ad(!page_is_leaf(page));
offsets_rec = rec_get_offsets(rec, index, offsets1, 0,
ULINT_UNDEFINED, &heap);

View file

@ -446,8 +446,9 @@ Returns page offset of the first page in extent described by a descriptor.
static uint32_t xdes_get_offset(const xdes_t *descr)
{
ut_ad(descr);
return page_get_page_no(page_align(descr)) +
uint32_t(((page_offset(descr) - XDES_ARR_OFFSET) / XDES_SIZE) *
const page_t *page= page_align(descr);
return page_get_page_no(page) +
uint32_t(((descr - page - XDES_ARR_OFFSET) / XDES_SIZE) *
FSP_EXTENT_SIZE);
}
@ -1507,7 +1508,8 @@ static void fsp_free_seg_inode(fil_space_t *space, fseg_inode_t *inode,
return;
}
mtr->memset(iblock, page_offset(inode) + FSEG_ID, FSEG_INODE_SIZE, 0);
mtr->memset(iblock, inode - iblock->page.frame + FSEG_ID,
FSEG_INODE_SIZE, 0);
if (ULINT_UNDEFINED != fsp_seg_inode_page_find_used(iblock->page.frame,
physical_size))
@ -1778,7 +1780,8 @@ page_alloc:
}
mtr->write<2>(*block, byte_offset + FSEG_HDR_OFFSET
+ block->page.frame, page_offset(inode));
+ block->page.frame,
uintptr_t(inode - iblock->page.frame));
mtr->write<4>(*block, byte_offset + FSEG_HDR_PAGE_NO
+ block->page.frame, iblock->page.id().page_no());
@ -1920,11 +1923,12 @@ fseg_alloc_free_extent(
mtr_t* mtr,
dberr_t* err)
{
ut_ad(!((page_offset(inode) - FSEG_ARR_OFFSET) % FSEG_INODE_SIZE));
ut_ad(iblock->page.frame == page_align(inode));
ut_ad(!((inode - iblock->page.frame - FSEG_ARR_OFFSET) % FSEG_INODE_SIZE));
ut_ad(!memcmp(FSEG_MAGIC_N_BYTES, FSEG_MAGIC_N + inode, 4));
ut_d(space->modify_check(*mtr));
if (UNIV_UNLIKELY(page_offset(inode) < FSEG_ARR_OFFSET))
if (UNIV_UNLIKELY(uintptr_t(inode - iblock->page.frame) < FSEG_ARR_OFFSET))
{
corrupted:
*err= DB_CORRUPTION;
@ -2813,33 +2817,18 @@ remove:
return DB_SUCCESS;
}
/** Frees part of a segment. This function can be used to free
a segment by repeatedly calling this function in different
mini-transactions. Doing the freeing in a single mini-transaction
might result in too big a mini-transaction.
@param header segment header; NOTE: if the header resides on first
page of the frag list of the segment, this pointer
becomes obsolete after the last freeing step
@param mtr mini-transaction
@param ahi Drop the adaptive hash index
@return whether the freeing was completed */
bool
fseg_free_step(
fseg_header_t* header,
mtr_t* mtr
bool fseg_free_step(buf_block_t *block, size_t header, mtr_t *mtr
#ifdef BTR_CUR_HASH_ADAPT
,bool ahi
, bool ahi
#endif /* BTR_CUR_HASH_ADAPT */
)
) noexcept
{
ulint n;
fseg_inode_t* inode;
const uint32_t space_id = page_get_space_id(page_align(header));
const uint32_t header_page = page_get_page_no(page_align(header));
fil_space_t* space = mtr->x_lock_space(space_id);
xdes_t* descr = xdes_get_descriptor(space, header_page, mtr);
const page_id_t header_id{block->page.id()};
fil_space_t* space = mtr->x_lock_space(header_id.space());
xdes_t* descr = xdes_get_descriptor(space, header_id.page_no(), mtr);
if (!descr) {
return true;
@ -2849,14 +2838,17 @@ fseg_free_step(
freed yet */
if (UNIV_UNLIKELY(xdes_is_free(descr,
header_page & (FSP_EXTENT_SIZE - 1)))) {
header_id.page_no()
& (FSP_EXTENT_SIZE - 1)))) {
/* Some corruption was detected: stop the freeing
in order to prevent a crash. */
return true;
}
buf_block_t* iblock;
const ulint zip_size = space->zip_size();
inode = fseg_inode_try_get(header, space_id, zip_size, mtr, &iblock);
inode = fseg_inode_try_get(block->page.frame + header,
header_id.space(), zip_size,
mtr, &iblock);
if (!inode || space->is_stopping()) {
return true;
}
@ -2915,33 +2907,31 @@ fseg_free_step(
return false;
}
bool
fseg_free_step_not_header(
fseg_header_t* header,
mtr_t* mtr
bool fseg_free_step_not_header(buf_block_t *block, size_t header, mtr_t *mtr
#ifdef BTR_CUR_HASH_ADAPT
,bool ahi
, bool ahi
#endif /* BTR_CUR_HASH_ADAPT */
)
) noexcept
{
fseg_inode_t* inode;
fseg_inode_t* inode;
const page_id_t header_id{block->page.id()};
ut_ad(mtr->is_named_space(header_id.space()));
const uint32_t space_id = page_get_space_id(page_align(header));
ut_ad(mtr->is_named_space(space_id));
fil_space_t* space = mtr->x_lock_space(header_id.space());
buf_block_t* iblock;
fil_space_t* space = mtr->x_lock_space(space_id);
buf_block_t* iblock;
inode = fseg_inode_try_get(header, space_id, space->zip_size(),
inode = fseg_inode_try_get(block->page.frame + header,
header_id.space(), space->zip_size(),
mtr, &iblock);
if (space->is_stopping()) {
return true;
}
if (!inode) {
ib::warn() << "Double free of "
<< page_id_t(space_id,
page_get_page_no(page_align(header)));
if (UNIV_UNLIKELY(!inode)) {
sql_print_warning("InnoDB: Double free of page " UINT32PF
" in file %s",
header_id.page_no(),
space->chain.start->name);
return true;
}
@ -2973,7 +2963,7 @@ fseg_free_step_not_header(
uint32_t page_no = fseg_get_nth_frag_page_no(inode, n);
if (page_no == page_get_page_no(page_align(header))) {
if (page_no == header_id.page_no()) {
return true;
}
@ -3052,8 +3042,9 @@ static void fseg_print_low(const fseg_inode_t *inode)
ulint page_no;
ib_id_t seg_id;
space = page_get_space_id(page_align(inode));
page_no = page_get_page_no(page_align(inode));
const page_t* inode_page = page_align(inode);
space = page_get_space_id(inode_page);
page_no = page_get_page_no(inode_page);
reserved = fseg_n_reserved_pages_low(inode, &used);

View file

@ -3190,7 +3190,7 @@ fts_fetch_doc_from_rec(
parser = get_doc->index_cache->index->parser;
clust_rec = btr_pcur_get_rec(pcur);
ut_ad(!page_rec_is_comp(clust_rec)
ut_ad(!page_is_comp(btr_pcur_get_page(pcur))
|| rec_get_status(clust_rec) == REC_STATUS_ORDINARY);
for (ulint i = 0; i < index->n_fields; i++) {

View file

@ -72,13 +72,13 @@ static void flst_write_addr(const buf_block_t& block, byte *faddr,
@param[in,out] mtr mini-transaction */
static void flst_zero_both(const buf_block_t& b, byte *addr, mtr_t *mtr)
{
const ulint boffset= ulint(addr - b.page.frame);
if (mach_read_from_4(addr + FIL_ADDR_PAGE) != FIL_NULL)
mtr->memset(&b, ulint(addr - b.page.frame) + FIL_ADDR_PAGE, 4, 0xff);
mtr->memset(&b, boffset + FIL_ADDR_PAGE, 4, 0xff);
mtr->write<2,mtr_t::MAYBE_NOP>(b, addr + FIL_ADDR_BYTE, 0U);
/* Initialize the other address by (MEMMOVE|0x80,offset,FIL_ADDR_SIZE,source)
which is 4 bytes, or less than FIL_ADDR_SIZE. */
memcpy(addr + FIL_ADDR_SIZE, addr, FIL_ADDR_SIZE);
const uint16_t boffset= page_offset(addr);
mtr->memmove(b, boffset + FIL_ADDR_SIZE, boffset, FIL_ADDR_SIZE);
}

View file

@ -221,7 +221,7 @@ rtr_update_mbr_field(
rec_offs* offsets2;
rec = btr_cur_get_rec(cursor);
page = page_align(rec);
page = btr_cur_get_page(cursor);
rec_info = rec_get_info_bits(rec, rec_offs_comp(offsets));
@ -1318,7 +1318,7 @@ rtr_page_copy_rec_list_end_no_locks(
return DB_CORRUPTION;
}
ut_a(page_is_comp(new_page) == page_rec_is_comp(rec));
ut_a(page_is_comp(new_page) == page_is_comp(block->page.frame));
ut_a(mach_read_from_2(new_page + srv_page_size - 10) == (ulint)
(page_is_comp(new_page) ? PAGE_NEW_INFIMUM : PAGE_OLD_INFIMUM));

View file

@ -1284,11 +1284,13 @@ rtr_page_get_father_block(
btr_cur_t* cursor) /*!< out: cursor on node pointer record,
its page x-latched */
{
rec_t *rec=
page_rec_get_next(page_get_infimum_rec(cursor->block()->page.frame));
const page_t *const page= cursor->block()->page.frame;
const rec_t *rec= page_is_comp(page)
? page_rec_next_get<true>(page, page + PAGE_NEW_INFIMUM)
: page_rec_next_get<false>(page, page + PAGE_OLD_INFIMUM);
if (!rec)
return nullptr;
cursor->page_cur.rec= rec;
cursor->page_cur.rec= const_cast<rec_t*>(rec);
return rtr_page_get_father_node_ptr(offsets, heap, sea_cur, cursor, mtr);
}

View file

@ -2010,7 +2010,7 @@ static
ulint
ibuf_get_merge_page_nos_func(
/*=========================*/
const rec_t* rec, /*!< in: insert buffer record */
const btr_cur_t&cur, /*!< in: insert buffer record */
#ifdef UNIV_DEBUG
mtr_t* mtr, /*!< in: mini-transaction holding rec */
#endif /* UNIV_DEBUG */
@ -2032,6 +2032,8 @@ ibuf_get_merge_page_nos_func(
ulint rec_volume;
ulint limit;
ulint n_pages;
const rec_t* rec= btr_cur_get_rec(&cur);
const page_t* page= btr_cur_get_page(&cur);
ut_ad(mtr->memo_contains_page_flagged(rec, MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_S_FIX));
@ -2039,7 +2041,7 @@ ibuf_get_merge_page_nos_func(
*n_stored = 0;
if (page_rec_is_supremum(rec)) {
if (page_rec_is_supremum_low(rec - page)) {
rec = page_rec_get_prev_const(rec);
if (UNIV_UNLIKELY(!rec)) {
@ -2049,9 +2051,9 @@ corruption:
}
}
if (page_rec_is_infimum(rec)) {
rec = page_rec_get_next_const(rec);
if (!rec || page_rec_is_supremum(rec)) {
if (page_rec_is_infimum_low(rec - page)) {
rec = page_rec_next_get<false>(page, rec);
if (!rec || page_rec_is_supremum_low(rec - page)) {
return 0;
}
}
@ -2069,7 +2071,8 @@ corruption:
'merge area', or the page start or the limit of storeable pages is
reached */
while (!page_rec_is_infimum(rec) && UNIV_LIKELY(n_pages < limit)) {
while (!page_rec_is_infimum_low(rec - page)
&& UNIV_LIKELY(n_pages < limit)) {
rec_page_no = ibuf_rec_get_page_no(mtr, rec);
rec_space_id = ibuf_rec_get_space(mtr, rec);
@ -2094,7 +2097,7 @@ corruption:
}
}
rec = page_rec_get_next_const(rec);
rec = page_rec_next_get<false>(page, rec);
/* At the loop start there is no prev page; we mark this with a pair
of space id, page no (0, 0) for which there can never be entries in
@ -2106,7 +2109,7 @@ corruption:
volume_for_page = 0;
while (*n_stored < limit && rec) {
if (page_rec_is_supremum(rec)) {
if (page_rec_is_supremum_low(rec - page)) {
/* When no more records available, mark this with
another 'impossible' pair of space id, page no */
rec_page_no = 1;
@ -2168,7 +2171,7 @@ corruption:
prev_page_no = rec_page_no;
prev_space_id = rec_space_id;
rec = page_rec_get_next_const(rec);
rec = page_rec_next_get<false>(page, rec);
}
#ifdef UNIV_IBUF_DEBUG
@ -2434,7 +2437,7 @@ ATTRIBUTE_COLD ulint ibuf_contract()
}
ulint n_pages = 0;
sum_sizes = ibuf_get_merge_page_nos(btr_cur_get_rec(&cur), &mtr,
sum_sizes = ibuf_get_merge_page_nos(cur, &mtr,
space_ids, page_nos, &n_pages);
ibuf_mtr_commit(&mtr);
@ -2726,10 +2729,10 @@ ibuf_get_volume_buffered(
}
rec = btr_pcur_get_rec(pcur);
page = page_align(rec);
page = btr_pcur_get_page(pcur);
ut_ad(page_validate(page, ibuf.index));
if (page_rec_is_supremum(rec)
if (rec == page + PAGE_OLD_SUPREMUM
&& UNIV_UNLIKELY(!(rec = page_rec_get_prev_const(rec)))) {
corruption:
ut_ad("corrupted page" == 0);
@ -2738,7 +2741,7 @@ corruption:
uint32_t prev_page_no;
for (; !page_rec_is_infimum(rec); ) {
while (rec != page + PAGE_OLD_INFIMUM) {
ut_ad(page_align(rec) == page);
if (page_no != ibuf_rec_get_page_no(mtr, rec)
@ -2818,12 +2821,12 @@ corruption:
count_later:
rec = btr_pcur_get_rec(pcur);
if (!page_rec_is_supremum(rec)) {
rec = page_rec_get_next_const(rec);
if (rec != page + PAGE_OLD_SUPREMUM) {
rec = page_rec_next_get<false>(page, rec);
}
for (; !page_rec_is_supremum(rec);
rec = page_rec_get_next_const(rec)) {
for (; rec != page + PAGE_OLD_SUPREMUM;
rec = page_rec_next_get<false>(page, rec)) {
if (UNIV_UNLIKELY(!rec)) {
return srv_page_size;
}
@ -2864,11 +2867,11 @@ count_later:
return 0;
}
rec = page_get_infimum_rec(next_page);
rec = page_rec_get_next_const(rec);
rec = page_rec_next_get<false>(next_page,
next_page + PAGE_OLD_INFIMUM);
for (; ; rec = page_rec_get_next_const(rec)) {
if (!rec || page_rec_is_supremum(rec)) {
for (;; rec = page_rec_next_get<false>(next_page, rec)) {
if (!rec || rec == next_page + PAGE_OLD_SUPREMUM) {
/* We give up */
return(srv_page_size);
}
@ -3591,16 +3594,26 @@ ibuf_insert_to_index_page(
assert_block_ahi_empty(block);
#endif /* BTR_CUR_HASH_ADAPT */
ut_ad(mtr->is_named_space(block->page.id().space()));
const auto comp = page_is_comp(page);
if (UNIV_UNLIKELY(dict_table_is_comp(index->table)
!= (ibool)!!page_is_comp(page))) {
if (UNIV_UNLIKELY(index->table->not_redundant() != !!comp)) {
return DB_CORRUPTION;
}
rec = page_rec_get_next(page_get_infimum_rec(page));
if (!rec || page_rec_is_supremum(rec)) {
return DB_CORRUPTION;
if (comp) {
rec = const_cast<rec_t*>(
page_rec_next_get<true>(page,
page + PAGE_NEW_INFIMUM));
if (!rec || rec == page + PAGE_NEW_SUPREMUM) {
return DB_CORRUPTION;
}
} else {
rec = const_cast<rec_t*>(
page_rec_next_get<false>(page,
page + PAGE_OLD_INFIMUM));
if (!rec || rec == page + PAGE_OLD_SUPREMUM) {
return DB_CORRUPTION;
}
}
if (!rec_n_fields_is_sane(index, rec, entry)) {
@ -4214,7 +4227,8 @@ loop:
dict_index_t* dummy_index;
ibuf_op_t op = ibuf_rec_get_op_type(&mtr, rec);
max_trx_id = page_get_max_trx_id(page_align(rec));
max_trx_id =
page_get_max_trx_id(btr_pcur_get_page(&pcur));
page_update_max_trx_id(block,
buf_block_get_page_zip(block),
max_trx_id, &mtr);

View file

@ -269,20 +269,19 @@ dberr_t btr_page_reorganize(page_cur_t *cursor, mtr_t *mtr)
MY_ATTRIBUTE((nonnull, warn_unused_result));
/** Decide if the page should be split at the convergence point of inserts
converging to the left.
@param[in] cursor insert position
@param cursor insert position
@return the first record to be moved to the right half page
@retval NULL if no split is recommended */
rec_t* btr_page_get_split_rec_to_left(const btr_cur_t* cursor);
@retval nullptr if no split is recommended */
rec_t *btr_page_get_split_rec_to_left(const btr_cur_t *cursor) noexcept;
/** Decide if the page should be split at the convergence point of inserts
converging to the right.
@param[in] cursor insert position
@param[out] split_rec if split recommended, the first record
on the right half page, or
NULL if the to-be-inserted record
should be first
@param cursor insert position
@param split_rec if split recommended, the first record on the right
half page, or nullptr if the to-be-inserted record should be first
@return whether split is recommended */
bool
btr_page_get_split_rec_to_right(const btr_cur_t* cursor, rec_t** split_rec);
btr_page_get_split_rec_to_right(const btr_cur_t *cursor, rec_t **split_rec)
noexcept;
/*************************************************************//**
Splits an index page to halves and inserts the tuple. It is assumed
@ -333,7 +332,7 @@ inline void btr_set_min_rec_mark(rec_t *rec, const buf_block_t &block,
ut_ad(!page_is_leaf(block.page.frame));
ut_ad(has_prev == page_has_prev(block.page.frame));
rec-= page_rec_is_comp(rec) ? REC_NEW_INFO_BITS : REC_OLD_INFO_BITS;
rec-= page_is_comp(block.page.frame) ? REC_NEW_INFO_BITS : REC_OLD_INFO_BITS;
if (block.page.zip.data)
/* This flag is computed from other contents on a ROW_FORMAT=COMPRESSED
@ -344,11 +343,11 @@ inline void btr_set_min_rec_mark(rec_t *rec, const buf_block_t &block,
}
/** Seek to the parent page of a B-tree page.
@param[in,out] mtr mini-transaction
@param[in,out] cursor cursor pointing to the x-latched parent page
@param mtr mini-transaction
@param cursor cursor pointing to the x-latched parent page
@return whether the cursor was successfully positioned */
bool btr_page_get_father(mtr_t* mtr, btr_cur_t* cursor)
MY_ATTRIBUTE((nonnull,warn_unused_result));
bool btr_page_get_father(mtr_t *mtr, btr_cur_t *cursor) noexcept
MY_ATTRIBUTE((nonnull,warn_unused_result));
#ifdef UNIV_DEBUG
/************************************************************//**
Checks that the node pointer to a page is appropriate.
@ -502,15 +501,15 @@ btr_print_index(
Checks the size and number of fields in a record based on the definition of
the index.
@return TRUE if ok */
ibool
bool
btr_index_rec_validate(
/*===================*/
const rec_t* rec, /*!< in: index record */
const page_cur_t& cur, /*!< in: index record */
const dict_index_t* index, /*!< in: index */
ibool dump_on_error) /*!< in: TRUE if the function
bool dump_on_error) /*!< in: true if the function
should print hex dump of record
and page on error */
MY_ATTRIBUTE((warn_unused_result));
noexcept MY_ATTRIBUTE((warn_unused_result));
/**************************************************************//**
Checks the consistency of an index tree.
@return DB_SUCCESS if ok, error code if not */

View file

@ -460,42 +460,36 @@ fseg_free_page(
dberr_t fseg_page_is_allocated(fil_space_t *space, unsigned page)
MY_ATTRIBUTE((nonnull, warn_unused_result));
MY_ATTRIBUTE((nonnull, warn_unused_result))
/** Frees part of a segment. This function can be used to free
a segment by repeatedly calling this function in different
mini-transactions. Doing the freeing in a single mini-transaction
might result in too big a mini-transaction.
@param header segment header; NOTE: if the header resides on first
page of the frag list of the segment, this pointer
becomes obsolete after the last freeing step
@param mtr mini-transaction
@param ahi Drop the adaptive hash index
@param block segment header block
@param header segment header offset in the block;
NOTE: if the header resides on first page of the frag list of the segment,
this pointer becomes obsolete after the last freeing step
@param mtr mini-transaction
@return whether the freeing was completed */
bool
fseg_free_step(
fseg_header_t* header,
mtr_t* mtr
bool fseg_free_step(buf_block_t *block, size_t header, mtr_t *mtr
#ifdef BTR_CUR_HASH_ADAPT
,bool ahi=false
, bool ahi=false /*!< whether to drop the AHI */
#endif /* BTR_CUR_HASH_ADAPT */
)
MY_ATTRIBUTE((warn_unused_result));
) noexcept;
MY_ATTRIBUTE((nonnull, warn_unused_result))
/** Frees part of a segment. Differs from fseg_free_step because
this function leaves the header page unfreed.
@param header segment header which must reside on the first
fragment page of the segment
@param mtr mini-transaction
@param ahi drop the adaptive hash index
@param block segment header block; must reside on the first
fragment page of the segment
@param header segment header offset in the block
@param mtr mini-transaction
@return whether the freeing was completed, except for the header page */
bool
fseg_free_step_not_header(
fseg_header_t* header,
mtr_t* mtr
bool fseg_free_step_not_header(buf_block_t *block, size_t header, mtr_t *mtr
#ifdef BTR_CUR_HASH_ADAPT
,bool ahi=false
, bool ahi=false /*!< whether to drop the AHI */
#endif /* BTR_CUR_HASH_ADAPT */
)
MY_ATTRIBUTE((warn_unused_result));
) noexcept;
/** Reset the page type.
Data files created before MySQL 5.1.48 may contain garbage in FIL_PAGE_TYPE.

View file

@ -460,7 +460,7 @@ lock_rec_unlock(
/*============*/
trx_t* trx, /*!< in/out: transaction that has
set a record lock */
const page_id_t id, /*!< in: page containing rec */
const buf_block_t& block, /*!< in: page containing rec */
const rec_t* rec, /*!< in: record */
lock_mode lock_mode);/*!< in: LOCK_S or LOCK_X */

View file

@ -210,8 +210,7 @@ inline bool mtr_t::write(const buf_block_t &block, void *ptr, V val)
p--;
}
::memcpy(ptr, buf, l);
memcpy_low(block, static_cast<uint16_t>
(ut_align_offset(p, srv_page_size)), p, end - p);
memcpy_low(block, uint16_t(p - block.page.frame), p, end - p);
return true;
}
@ -490,12 +489,12 @@ inline void mtr_t::memcpy(const buf_block_t &b, void *dest, const void *str,
ulint len)
{
ut_ad(ut_align_down(dest, srv_page_size) == b.page.frame);
char *d= static_cast<char*>(dest);
byte *d= static_cast<byte*>(dest);
const char *s= static_cast<const char*>(str);
if (w != FORCED && is_logged())
{
ut_ad(len);
const char *const end= d + len;
const byte *const end= d + len;
while (*d++ == *s++)
{
if (d == end)
@ -509,7 +508,7 @@ inline void mtr_t::memcpy(const buf_block_t &b, void *dest, const void *str,
len= static_cast<ulint>(end - d);
}
::memcpy(d, s, len);
memcpy(b, ut_align_offset(d, srv_page_size), len);
memcpy(b, d - b.page.frame, len);
}
/** Write an EXTENDED log record.

View file

@ -196,13 +196,14 @@ extern my_bool srv_immediate_scrub_data_uncompressed;
/** Get the start of a page frame.
@param[in] ptr pointer within a page frame
@return start of the page frame */
MY_ATTRIBUTE((const))
inline page_t* page_align(void *ptr)
MY_ATTRIBUTE((const,nonnull))
inline page_t *page_align(void *ptr) noexcept
{
return my_assume_aligned<UNIV_PAGE_SIZE_MIN>
(reinterpret_cast<page_t*>(ut_align_down(ptr, srv_page_size)));
}
inline const page_t *page_align(const void *ptr)
inline const page_t *page_align(const void *ptr) noexcept
{
return page_align(const_cast<void*>(ptr));
}
@ -210,8 +211,8 @@ inline const page_t *page_align(const void *ptr)
/** Gets the byte offset within a page frame.
@param[in] ptr pointer within a page frame
@return offset from the start of the page */
MY_ATTRIBUTE((const))
inline uint16_t page_offset(const void* ptr)
MY_ATTRIBUTE((const,nonnull))
inline uint16_t page_offset(const void *ptr) noexcept
{
return static_cast<uint16_t>(ut_align_offset(ptr, srv_page_size));
}
@ -687,6 +688,7 @@ page_dir_find_owner_slot(
/*=====================*/
const rec_t* rec); /*!< in: the physical record */
#ifdef UNIV_DEBUG
/***************************************************************//**
Returns the heap number of a record.
@return heap number */
@ -695,6 +697,7 @@ ulint
page_rec_get_heap_no(
/*=================*/
const rec_t* rec); /*!< in: the physical record */
#endif
/** Determine whether a page has any siblings.
@param[in] page page frame
@return true if the page has any siblings */
@ -738,15 +741,28 @@ inline uint64_t page_get_autoinc(const page_t *page)
return mach_read_from_8(p);
}
/************************************************************//**
Gets the pointer to the next record on the page.
@return pointer to next record */
UNIV_INLINE
const rec_t*
page_rec_get_next_low(
/*==================*/
const rec_t* rec, /*!< in: pointer to record */
ulint comp); /*!< in: nonzero=compact page layout */
/** Get the pointer to the next record on the page.
@tparam comp whether ROW_FORMAT is not REDUNDANT
@param page index page
@param rec index record
@return successor of rec in the page
@retval nullptr on corruption */
template<bool comp>
inline const rec_t *page_rec_next_get(const page_t *page, const rec_t *rec)
{
ut_ad(!!page_is_comp(page) == comp);
ut_ad(page_align(rec) == page);
ulint offs= rec_get_next_offs(rec, comp);
if (UNIV_UNLIKELY(offs < (comp ? PAGE_NEW_SUPREMUM : PAGE_OLD_SUPREMUM)))
return nullptr;
if (UNIV_UNLIKELY(offs > page_header_get_field(page, PAGE_HEAP_TOP)))
return nullptr;
ut_ad(page_rec_is_infimum(rec) ||
(!page_is_leaf(page) && !page_has_prev(page)) ||
!(rec_get_info_bits(page + offs, comp) & REC_INFO_MIN_REC_FLAG));
return page + offs;
}
/************************************************************//**
Gets the pointer to the next record on the page.
@return pointer to next record */
@ -755,6 +771,7 @@ rec_t*
page_rec_get_next(
/*==============*/
rec_t* rec); /*!< in: pointer to record */
/************************************************************//**
Gets the pointer to the next record on the page.
@return pointer to next record */

View file

@ -128,6 +128,7 @@ inline void page_header_reset_last_insert(buf_block_t *block, mtr_t *mtr)
memset_aligned<2>(&block->page.zip.data[field], 0, 2);
}
#ifdef UNIV_DEBUG
/***************************************************************//**
Returns the heap number of a record.
@return heap number */
@ -143,6 +144,7 @@ page_rec_get_heap_no(
return(rec_get_heap_no_old(rec));
}
}
#endif
/** Determine whether an index page record is a user record.
@param[in] rec record in an index page
@ -235,7 +237,9 @@ page_get_page_no(
/*=============*/
const page_t* page) /*!< in: page */
{
ut_ad(page == page_align((page_t*) page));
#ifndef UNIV_INNOCHECKSUM
ut_ad(page == page_align(page));
#endif /* !UNIV_INNOCHECKSUM */
return mach_read_from_4(my_assume_aligned<4>(page + FIL_PAGE_OFFSET));
}
@ -249,7 +253,7 @@ page_get_space_id(
/*==============*/
const page_t* page) /*!< in: page */
{
ut_ad(page == page_align((page_t*) page));
ut_ad(page == page_align(page));
return mach_read_from_4(my_assume_aligned<2>
(page + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID));
}
@ -357,8 +361,6 @@ page_rec_get_next_low(
const page_t *page= page_align(rec);
ut_ad(page_rec_check(rec));
ulint offs= rec_get_next_offs(rec, comp);
if (!offs)
return nullptr;
if (UNIV_UNLIKELY(offs < (comp ? PAGE_NEW_SUPREMUM : PAGE_OLD_SUPREMUM)))
return nullptr;
if (UNIV_UNLIKELY(offs > page_header_get_field(page, PAGE_HEAP_TOP)))

View file

@ -326,7 +326,7 @@ public:
page_id_t get_page_id() const { return page_id; }
/** Handle the DML undo log and apply it on online indexes */
inline void apply_undo_rec(const trx_undo_rec_t *rec);
inline void apply_undo_rec(const trx_undo_rec_t *rec, uint16_t offset);
~UndorecApplier()
{

View file

@ -2772,8 +2772,8 @@ lock_move_reorganize_page(
old_heap_no= rec_get_heap_no_new(rec2);
new_heap_no= rec_get_heap_no_new(rec1);
rec1= page_rec_get_next_low(rec1, TRUE);
rec2= page_rec_get_next_low(rec2, TRUE);
rec1= page_rec_next_get<true>(block->page.frame, rec1);
rec2= page_rec_next_get<true>(oblock->page.frame, rec2);
}
else
{
@ -2781,8 +2781,8 @@ lock_move_reorganize_page(
new_heap_no= rec_get_heap_no_old(rec1);
ut_ad(!memcmp(rec1, rec2, rec_get_data_size_old(rec2)));
rec1= page_rec_get_next_low(rec1, FALSE);
rec2= page_rec_get_next_low(rec2, FALSE);
rec1= page_rec_next_get<false>(block->page.frame, rec1);
rec2= page_rec_next_get<false>(oblock->page.frame, rec2);
}
trx_t *lock_trx= lock->trx;
@ -2838,9 +2838,10 @@ lock_move_rec_list_end(
const rec_t* rec) /*!< in: record on page: this
is the first record moved */
{
const ulint comp= page_rec_is_comp(rec);
ut_ad(block->page.frame == page_align(rec));
const page_t *const page= block->page.frame;
const page_t *const new_page= new_block->page.frame;
const ulint comp= page_is_comp(page);
ut_ad(page == page_align(rec));
ut_ad(comp == page_is_comp(new_block->page.frame));
const page_id_t id{block->page.id()};
@ -2863,17 +2864,15 @@ lock_move_rec_list_end(
if (comp)
{
if (page_offset(rec1) == PAGE_NEW_INFIMUM)
rec1= page_rec_get_next_low(rec1, TRUE);
rec2= page_rec_get_next_low(new_block->page.frame + PAGE_NEW_INFIMUM,
TRUE);
if (rec1 - page == PAGE_NEW_INFIMUM)
rec1= page_rec_next_get<true>(page, rec1);
rec2= page_rec_next_get<true>(new_page, PAGE_NEW_INFIMUM + new_page);
}
else
{
if (page_offset(rec1) == PAGE_OLD_INFIMUM)
rec1= page_rec_get_next_low(rec1, FALSE);
rec2= page_rec_get_next_low(new_block->page.frame + PAGE_OLD_INFIMUM,
FALSE);
if (rec1 - page == PAGE_OLD_INFIMUM)
rec1= page_rec_next_get<false>(page, rec1);
rec2= page_rec_next_get<false>(new_page, PAGE_OLD_INFIMUM + new_page);
}
if (UNIV_UNLIKELY(!rec1 || !rec2))
@ -2895,19 +2894,19 @@ lock_move_rec_list_end(
if (comp)
{
rec1_heap_no= rec_get_heap_no_new(rec1);
if (!(rec1= page_rec_get_next_low(rec1, TRUE)))
if (!(rec1= page_rec_next_get<true>(page, rec1)))
{
ut_ad(rec1_heap_no == PAGE_HEAP_NO_SUPREMUM);
break;
}
rec2_heap_no= rec_get_heap_no_new(rec2);
rec2= page_rec_get_next_low(rec2, TRUE);
rec2= page_rec_next_get<true>(new_page, rec2);
}
else
{
ut_d(const rec_t *old1= rec1);
rec1_heap_no= rec_get_heap_no_old(rec1);
if (!(rec1= page_rec_get_next_low(rec1, FALSE)))
if (!(rec1= page_rec_next_get<false>(page, rec1)))
{
ut_ad(rec1_heap_no == PAGE_HEAP_NO_SUPREMUM);
break;
@ -2917,7 +2916,7 @@ lock_move_rec_list_end(
ut_ad(!memcmp(old1, rec2, rec_get_data_size_old(old1)));
rec2_heap_no= rec_get_heap_no_old(rec2);
rec2= page_rec_get_next_low(rec2, FALSE);
rec2= page_rec_next_get<false>(new_page, rec2);
}
if (UNIV_UNLIKELY(!rec2))
@ -2941,7 +2940,7 @@ lock_move_rec_list_end(
}
lock_rec_add_to_queue(type_mode, g.cell2(), new_id,
new_block->page.frame,
new_page,
rec2_heap_no, lock->index, lock_trx, true);
}
@ -2980,7 +2979,7 @@ lock_move_rec_list_start(
before the records
were copied */
{
const ulint comp= page_rec_is_comp(rec);
const ulint comp= page_is_comp(block->page.frame);
ut_ad(block->page.frame == page_align(rec));
ut_ad(comp == page_is_comp(new_block->page.frame));
@ -3002,15 +3001,15 @@ lock_move_rec_list_start(
if (comp)
{
rec1= page_rec_get_next_low(block->page.frame + PAGE_NEW_INFIMUM,
TRUE);
rec2= page_rec_get_next_low(old_end, TRUE);
rec1= page_rec_next_get<true>(block->page.frame,
block->page.frame + PAGE_NEW_INFIMUM);
rec2= page_rec_next_get<true>(new_block->page.frame, old_end);
}
else
{
rec1= page_rec_get_next_low(block->page.frame + PAGE_OLD_INFIMUM,
FALSE);
rec2= page_rec_get_next_low(old_end, FALSE);
rec1= page_rec_next_get<false>(block->page.frame,
block->page.frame + PAGE_OLD_INFIMUM);
rec2= page_rec_next_get<false>(new_block->page.frame, old_end);
}
/* Copy lock requests on user records to new page and
@ -3035,8 +3034,8 @@ lock_move_rec_list_start(
rec1_heap_no= rec_get_heap_no_new(rec1);
rec2_heap_no= rec_get_heap_no_new(rec2);
rec1= page_rec_get_next_low(rec1, TRUE);
rec2= page_rec_get_next_low(rec2, TRUE);
rec1= page_rec_next_get<true>(block->page.frame, rec1);
rec2= page_rec_next_get<true>(new_block->page.frame, rec2);
}
else
{
@ -3045,8 +3044,8 @@ lock_move_rec_list_start(
ut_ad(!memcmp(rec1, rec2, rec_get_data_size_old(rec2)));
rec1= page_rec_get_next_low(rec1, FALSE);
rec2= page_rec_get_next_low(rec2, FALSE);
rec1= page_rec_next_get<false>(block->page.frame, rec1);
rec2= page_rec_next_get<false>(new_block->page.frame, rec2);
}
trx_t *lock_trx= lock->trx;
@ -3101,8 +3100,7 @@ lock_rtr_move_rec_list(
if (!num_move)
return;
const ulint comp= page_rec_is_comp(rec_move[0].old_rec);
const ulint comp= page_is_comp(block->page.frame);
ut_ad(block->page.frame == page_align(rec_move[0].old_rec));
ut_ad(new_block->page.frame == page_align(rec_move[0].new_rec));
ut_ad(comp == page_rec_is_comp(rec_move[0].new_rec));
@ -3233,6 +3231,17 @@ static void lock_assert_no_spatial(const page_id_t id)
}
#endif
/** Determine the heap number of an index record
@param block index page
@param rec index record
@return the heap number of the record */
static ulint lock_get_heap_no(const buf_block_t &block, const rec_t *rec)
{
ut_ad(page_align(rec) == block.page.frame);
return page_is_comp(block.page.frame)
? rec_get_heap_no_new(rec) : rec_get_heap_no_old(rec);
}
/*************************************************************//**
Updates the lock table when a page is merged to the right. */
void
@ -3252,6 +3261,7 @@ lock_update_merge_right(
const page_id_t l{left_block->page.id()};
const page_id_t r{right_block->page.id()};
const ulint h= lock_get_heap_no(*right_block, orig_succ);
/* This would likely be too large for a memory transaction. */
LockMultiGuard g{lock_sys.rec_hash, l, r};
@ -3259,8 +3269,7 @@ lock_update_merge_right(
original successor of infimum on the right page, to which the left
page was merged */
lock_rec_inherit_to_gap(g.cell2(), r, g.cell1(), l, right_block->page.frame,
page_rec_get_heap_no(orig_succ),
PAGE_HEAP_NO_SUPREMUM);
h, PAGE_HEAP_NO_SUPREMUM);
/* Reset the locks on the supremum of the left page, releasing
waiting transactions */
@ -3329,21 +3338,38 @@ void lock_update_merge_left(const buf_block_t& left, const rec_t *orig_pred,
ut_ad(left.page.frame == page_align(orig_pred));
const page_id_t l{left.page.id()};
const rec_t *left_next_rec= page_rec_get_next_const(orig_pred);
if (UNIV_UNLIKELY(!left_next_rec))
const auto comp= page_is_comp(left.page.frame);
const rec_t *left_next_rec;
ulint heap_no;
if (comp)
{
ut_ad("corrupted page" == 0);
return;
left_next_rec= page_rec_next_get<true>(left.page.frame, orig_pred);
if (UNIV_UNLIKELY(!left_next_rec))
{
ut_ad("corrupted page" == 0);
return;
}
heap_no= rec_get_heap_no_new(left_next_rec);
}
else
{
left_next_rec= page_rec_next_get<false>(left.page.frame, orig_pred);
if (UNIV_UNLIKELY(!left_next_rec))
{
ut_ad("corrupted page" == 0);
return;
}
heap_no= rec_get_heap_no_old(left_next_rec);
}
/* This would likely be too large for a memory transaction. */
LockMultiGuard g{lock_sys.rec_hash, l, right};
if (!page_rec_is_supremum(left_next_rec))
if (heap_no != PAGE_HEAP_NO_SUPREMUM)
{
/* Inherit the locks on the supremum of the left page to the
first record which was moved from the right page */
lock_rec_inherit_to_gap(g.cell1(), l, g.cell1(), l, left.page.frame,
page_rec_get_heap_no(left_next_rec),
heap_no,
PAGE_HEAP_NO_SUPREMUM);
/* Reset the locks on the supremum of the left page,
@ -3479,16 +3505,16 @@ lock_update_insert(
/* Inherit the gap-locking locks for rec, in gap mode, from the next
record */
if (page_rec_is_comp(rec)) {
if (page_is_comp(block->page.frame)) {
receiver_heap_no = rec_get_heap_no_new(rec);
rec = page_rec_get_next_low(rec, TRUE);
rec = page_rec_next_get<true>(block->page.frame, rec);
if (UNIV_UNLIKELY(!rec)) {
return;
}
donator_heap_no = rec_get_heap_no_new(rec);
} else {
receiver_heap_no = rec_get_heap_no_old(rec);
rec = page_rec_get_next_low(rec, FALSE);
rec = page_rec_next_get<false>(block->page.frame, rec);
if (UNIV_UNLIKELY(!rec)) {
return;
}
@ -3555,9 +3581,7 @@ lock_rec_store_on_page_infimum(
bits are reset on the
record */
{
const ulint heap_no= page_rec_get_heap_no(rec);
ut_ad(block->page.frame == page_align(rec));
const ulint heap_no= lock_get_heap_no(*block, rec);
const page_id_t id{block->page.id()};
#ifdef ENABLED_DEBUG_SYNC
SCOPE_EXIT([]() { DEBUG_SYNC_C("lock_rec_store_on_page_infimum_end"); });
@ -3577,7 +3601,7 @@ whose infimum stored the lock state; lock bits are reset on the infimum */
void lock_rec_restore_from_page_infimum(const buf_block_t &block,
const rec_t *rec, page_id_t donator)
{
const ulint heap_no= page_rec_get_heap_no(rec);
const ulint heap_no= lock_get_heap_no(block, rec);
const page_id_t id{block.page.id()};
LockMultiGuard g{lock_sys.rec_hash, id, donator};
lock_rec_move(g.cell1(), block, id, g.cell2(), donator, heap_no,
@ -4262,21 +4286,22 @@ lock_rec_unlock(
/*============*/
trx_t* trx, /*!< in/out: transaction that has
set a record lock */
const page_id_t id, /*!< in: page containing rec */
const buf_block_t& block, /*!< in: page containing rec */
const rec_t* rec, /*!< in: record */
lock_mode lock_mode)/*!< in: LOCK_S or LOCK_X */
{
lock_t* first_lock;
lock_t* lock;
ulint heap_no;
ut_ad(trx);
ut_ad(rec);
ut_ad(!trx->lock.wait_lock);
ut_ad(trx_state_eq(trx, TRX_STATE_ACTIVE));
ut_ad(page_rec_is_leaf(rec));
ut_ad(!page_rec_is_metadata(rec));
heap_no = page_rec_get_heap_no(rec);
const ulint heap_no = lock_get_heap_no(block, rec);
const page_id_t id{block.page.id()};
LockGuard g{lock_sys.rec_hash, id};
@ -5752,14 +5777,26 @@ lock_rec_insert_check_and_lock(
ut_ad(page_is_leaf(block->page.frame));
ut_ad(!index->table->is_temporary());
const rec_t *next_rec= page_rec_get_next_const(rec);
if (UNIV_UNLIKELY(!next_rec || rec_is_metadata(next_rec, *index)))
return DB_CORRUPTION;
const auto comp= page_is_comp(block->page.frame);
const rec_t *next_rec;
if (UNIV_LIKELY(comp != 0))
{
next_rec= page_rec_next_get<true>(block->page.frame, rec);
if (UNIV_UNLIKELY(!next_rec || rec_is_metadata(next_rec, TRUE)))
return DB_CORRUPTION;
}
else
{
next_rec= page_rec_next_get<false>(block->page.frame, rec);
if (UNIV_UNLIKELY(!next_rec || rec_is_metadata(next_rec, FALSE)))
return DB_CORRUPTION;
}
dberr_t err= DB_SUCCESS;
bool inherit_in= *inherit;
trx_t *trx= thr_get_trx(thr);
ulint heap_no= page_rec_get_heap_no(next_rec);
const ulint heap_no= comp
? rec_get_heap_no_new(next_rec) : rec_get_heap_no_old(next_rec);
const page_id_t id{block->page.id()};
{
@ -5842,12 +5879,12 @@ lock_rec_insert_check_and_lock(
/** Create an explicit record lock for a transaction that currently only
has an implicit lock on the record.
@param trx referenced, active transaction, or nullptr
@param id page identifier
@param block index leaf page
@param rec record in the page
@param index the index B-tree that the record belongs to
@return trx, with the reference released */
static trx_t *lock_rec_convert_impl_to_expl_for_trx(trx_t *trx,
const page_id_t id,
const buf_block_t &block,
const rec_t *rec,
dict_index_t *index)
{
@ -5857,7 +5894,8 @@ static trx_t *lock_rec_convert_impl_to_expl_for_trx(trx_t *trx,
ut_ad(page_rec_is_leaf(rec));
ut_ad(!rec_is_metadata(rec, *index));
ulint heap_no= page_rec_get_heap_no(rec);
const ulint heap_no= lock_get_heap_no(block, rec);
const page_id_t id{block.page.id()};
{
LockGuard g{lock_sys.rec_hash, id};
@ -5962,7 +6000,7 @@ should be created.
@tparam is_primary whether the index is the primary key
@param[in,out] caller_trx current transaction
@param[in] id index tree leaf page identifier
@param[in] block index tree leaf page
@param[in] rec record on the leaf page
@param[in] index the index of the record
@param[in] offsets rec_get_offsets(rec,index)
@ -5973,7 +6011,7 @@ static
const trx_t *
lock_rec_convert_impl_to_expl(
trx_t* caller_trx,
page_id_t id,
const buf_block_t& block,
const rec_t* rec,
dict_index_t* index,
const rec_offs* offsets)
@ -6011,10 +6049,11 @@ lock_rec_convert_impl_to_expl(
return trx;
}
ut_d(lock_rec_other_trx_holds_expl(caller_trx, trx, rec, id));
ut_d(lock_rec_other_trx_holds_expl(caller_trx, trx, rec,
block.page.id()));
}
return lock_rec_convert_impl_to_expl_for_trx(trx, id, rec, index);
return lock_rec_convert_impl_to_expl_for_trx(trx, block, rec, index);
}
/*********************************************************************//**
@ -6055,7 +6094,7 @@ lock_clust_rec_modify_check_and_lock(
trx_t *trx = thr_get_trx(thr);
if (const trx_t *owner =
lock_rec_convert_impl_to_expl<true>(trx, block->page.id(),
lock_rec_convert_impl_to_expl<true>(trx, *block,
rec, index, offsets)) {
if (owner == trx) {
/* We already hold an exclusive lock. */
@ -6105,7 +6144,6 @@ lock_sec_rec_modify_check_and_lock(
ut_ad(!dict_index_is_clust(index));
ut_ad(!dict_index_is_online_ddl(index) || (flags & BTR_CREATE_FLAG));
ut_ad(block->page.frame == page_align(rec));
ut_ad(mtr->is_named_space(index->table->space));
ut_ad(page_rec_is_leaf(rec));
ut_ad(!rec_is_metadata(rec, *index));
@ -6116,7 +6154,7 @@ lock_sec_rec_modify_check_and_lock(
}
ut_ad(!index->table->is_temporary());
heap_no = page_rec_get_heap_no(rec);
heap_no = lock_get_heap_no(*block, rec);
#ifdef WITH_WSREP
trx_t *trx= thr_get_trx(thr);
@ -6229,7 +6267,7 @@ lock_sec_rec_read_check_and_lock(
if (page_rec_is_supremum(rec)) {
} else if (const trx_t *owner =
lock_rec_convert_impl_to_expl<false>(trx, block->page.id(),
lock_rec_convert_impl_to_expl<false>(trx, *block,
rec, index, offsets)) {
if (owner == trx) {
if (gap_mode == LOCK_REC_NOT_GAP) {
@ -6255,7 +6293,7 @@ lock_sec_rec_read_check_and_lock(
#endif /* WITH_WSREP */
err = lock_rec_lock(false, gap_mode | mode,
block, page_rec_get_heap_no(rec), index, thr);
block, lock_get_heap_no(*block, rec), index, thr);
#ifdef WITH_WSREP
if (trx->wsrep == 3) trx->wsrep = 1;
@ -6314,15 +6352,13 @@ lock_clust_rec_read_check_and_lock(
return(DB_SUCCESS);
}
const page_id_t id{block->page.id()};
ulint heap_no = page_rec_get_heap_no(rec);
const ulint heap_no = lock_get_heap_no(*block, rec);
trx_t *trx = thr_get_trx(thr);
if (lock_table_has(trx, index->table, LOCK_X)
|| heap_no == PAGE_HEAP_NO_SUPREMUM) {
} else if (const trx_t *owner =
lock_rec_convert_impl_to_expl<true>(trx, id,
lock_rec_convert_impl_to_expl<true>(trx, *block,
rec, index, offsets)) {
if (owner == trx) {
if (gap_mode == LOCK_REC_NOT_GAP) {
@ -6346,7 +6382,8 @@ lock_clust_rec_read_check_and_lock(
dberr_t err = lock_rec_lock(false, gap_mode | mode,
block, heap_no, index, thr);
ut_ad(lock_rec_queue_validate(false, id, rec, index, offsets));
ut_ad(lock_rec_queue_validate(false, block->page.id(),
rec, index, offsets));
DEBUG_SYNC_C("after_lock_clust_rec_read_check_and_lock");
@ -7234,16 +7271,42 @@ void lock_update_split_and_merge(
ut_ad(page_is_leaf(left_block->page.frame));
ut_ad(page_is_leaf(right_block->page.frame));
ut_ad(page_align(orig_pred) == left_block->page.frame);
const auto comp= page_is_comp(left_block->page.frame);
const page_id_t l{left_block->page.id()};
const page_id_t r{right_block->page.id()};
const rec_t *left_next_rec= page_rec_get_next_const(orig_pred);
if (UNIV_UNLIKELY(!left_next_rec))
const rec_t *left_next_rec;
ulint left_heap_no, right_heap_no;
if (UNIV_LIKELY(comp != 0))
{
ut_ad("corrupted page" == 0);
return;
left_next_rec= page_rec_next_get<true>(left_block->page.frame, orig_pred);
if (UNIV_UNLIKELY(!left_next_rec))
{
ut_ad("corrupted page" == 0);
return;
}
ut_ad(!rec_is_metadata(left_next_rec, comp));
left_heap_no= rec_get_heap_no_new(left_next_rec);
right_heap_no=
rec_get_heap_no_new(right_block->page.frame +
rec_get_next_offs(right_block->page.frame +
PAGE_NEW_INFIMUM, TRUE));
}
else
{
left_next_rec= page_rec_next_get<false>(left_block->page.frame, orig_pred);
if (UNIV_UNLIKELY(!left_next_rec))
{
ut_ad("corrupted page" == 0);
return;
}
ut_ad(!rec_is_metadata(left_next_rec, comp));
left_heap_no= rec_get_heap_no_old(left_next_rec);
right_heap_no=
rec_get_heap_no_old(right_block->page.frame +
rec_get_next_offs(right_block->page.frame +
PAGE_OLD_INFIMUM, FALSE));
}
ut_ad(!page_rec_is_metadata(left_next_rec));
/* This would likely be too large for a memory transaction. */
LockMultiGuard g{lock_sys.rec_hash, l, r};
@ -7251,8 +7314,7 @@ void lock_update_split_and_merge(
/* Inherit the locks on the supremum of the left page to the
first record which was moved from the right page */
lock_rec_inherit_to_gap(g.cell1(), l, g.cell1(), l, left_block->page.frame,
page_rec_get_heap_no(left_next_rec),
PAGE_HEAP_NO_SUPREMUM);
left_heap_no, PAGE_HEAP_NO_SUPREMUM);
/* Reset the locks on the supremum of the left page,
releasing waiting transactions */
@ -7261,6 +7323,5 @@ void lock_update_split_and_merge(
/* Inherit the locks to the supremum of the left page from the
successor of the infimum on the right page */
lock_rec_inherit_to_gap(g.cell1(), l, g.cell2(), r, left_block->page.frame,
PAGE_HEAP_NO_SUPREMUM,
lock_get_min_heap_no(right_block));
PAGE_HEAP_NO_SUPREMUM, right_heap_no);
}

View file

@ -1516,7 +1516,6 @@ void recv_sys_t::debug_free()
inline void recv_sys_t::free(const void *data)
{
ut_ad(!ut_align_offset(data, ALIGNMENT));
data= page_align(data);
mysql_mutex_assert_owner(&mutex);
/* MDEV-14481 FIXME: To prevent race condition with buf_pool.resize(),
@ -1533,7 +1532,7 @@ inline void recv_sys_t::free(const void *data)
if (offs >= chunk->size)
continue;
buf_block_t *block= &chunk->blocks[offs];
ut_ad(block->page.frame == data);
ut_ad(block->page.frame == page_align(data));
ut_ad(block->page.state() == buf_page_t::MEMORY);
ut_ad(static_cast<uint16_t>(block->page.access_time - 1) <
srv_page_size);

View file

@ -925,7 +925,7 @@ static void page_zip_dir_split_slot(buf_block_t *block, ulint s, mtr_t* mtr)
/* Log changes to the compressed page header and the dense page directory. */
memcpy_aligned<2>(&block->page.zip.data[n_slots_f], n_slots_p, 2);
mach_write_to_2(slot, page_offset(rec));
mach_write_to_2(slot, rec - block->page.frame);
page_rec_set_n_owned<true>(block, page_dir_slot_get_rec(slot), half_owned,
true, mtr);
page_rec_set_n_owned<true>(block,
@ -990,7 +990,8 @@ static void page_zip_dir_balance_slot(buf_block_t *block, ulint s, mtr_t *mtr)
/* Transfer one record to the underfilled slot */
page_rec_set_n_owned<true>(block, slot_rec, 0, true, mtr);
const rec_t* new_rec = page_rec_get_next_low(slot_rec, TRUE);
const rec_t* new_rec = page_rec_next_get<true>(block->page.frame,
slot_rec);
/* We do not try to prevent crash on corruption here.
For ROW_FORMAT=COMPRESSED pages, the next-record links should
be validated in page_zip_decompress(). Corruption should only
@ -998,7 +999,7 @@ static void page_zip_dir_balance_slot(buf_block_t *block, ulint s, mtr_t *mtr)
page_rec_set_n_owned<true>(block, const_cast<rec_t*>(new_rec),
PAGE_DIR_SLOT_MIN_N_OWNED,
true, mtr);
mach_write_to_2(slot, page_offset(new_rec));
mach_write_to_2(slot, new_rec - block->page.frame);
page_rec_set_n_owned(up_rec, up_n_owned - 1, true);
}
@ -1060,7 +1061,8 @@ static void page_dir_balance_slot(const buf_block_t &block, ulint s)
if (comp) {
if (UNIV_UNLIKELY(!(new_rec =
page_rec_get_next_low(slot_rec, true)))) {
page_rec_next_get<true>(block.page.frame,
slot_rec)))) {
ut_ad("corrupted page" == 0);
return;
}
@ -1070,7 +1072,8 @@ static void page_dir_balance_slot(const buf_block_t &block, ulint s)
page_rec_set_n_owned(up_rec, up_n_owned - 1, true);
} else {
if (UNIV_UNLIKELY(!(new_rec =
page_rec_get_next_low(slot_rec, false)))) {
page_rec_next_get<false>(block.page.frame,
slot_rec)))) {
ut_ad("corrupted page" == 0);
return;
}
@ -1080,7 +1083,7 @@ static void page_dir_balance_slot(const buf_block_t &block, ulint s)
page_rec_set_n_owned(up_rec, up_n_owned - 1, false);
}
mach_write_to_2(slot, page_offset(new_rec));
mach_write_to_2(slot, new_rec - block.page.frame);
}
/** Allocate space for inserting an index record.
@ -1476,7 +1479,8 @@ use_heap:
rec_get_node_ptr_flag(rec));
/* Write PAGE_LAST_INSERT */
mach_write_to_2(page_last_insert, page_offset(insert_buf + extra_size));
mach_write_to_2(page_last_insert,
insert_buf + extra_size - block->page.frame);
/* Update PAGE_DIRECTION_B, PAGE_N_DIRECTION if needed */
if (block->page.frame[FIL_PAGE_TYPE + 1] != byte(FIL_PAGE_RTREE))
@ -1609,7 +1613,7 @@ inc_dir:
rec_set_bit_field_2(insert_rec, heap_no,
REC_OLD_HEAP_NO, REC_HEAP_NO_MASK, REC_HEAP_NO_SHIFT);
memcpy(insert_rec - REC_NEXT, cur->rec - REC_NEXT, 2);
mach_write_to_2(cur->rec - REC_NEXT, page_offset(insert_rec));
mach_write_to_2(cur->rec - REC_NEXT, insert_rec - block->page.frame);
while (!(n_owned= rec_get_n_owned_old(next_rec)))
{
next_rec= block->page.frame + rec_get_next_offs(next_rec, false);
@ -2049,7 +2053,7 @@ use_heap:
}
/* next record after current before the insertion */
const rec_t *next_rec = page_rec_get_next_low(cursor->rec, TRUE);
const rec_t *next_rec = page_rec_next_get<true>(page, cursor->rec);
if (UNIV_UNLIKELY(!next_rec ||
rec_get_status(next_rec) == REC_STATUS_INFIMUM ||
rec_get_status(cursor->rec) > REC_STATUS_INFIMUM))
@ -2088,7 +2092,7 @@ use_heap:
ut_ad(!last_insert_rec ||
rec_get_node_ptr_flag(page + last_insert_rec) ==
rec_get_node_ptr_flag(insert_rec));
mach_write_to_2(last_insert, page_offset(insert_rec));
mach_write_to_2(last_insert, insert_rec - page);
if (!index->is_spatial())
{
@ -2108,7 +2112,7 @@ no_direction:
inc_dir:
mach_write_to_2(n, mach_read_from_2(n) + 1);
}
else if (*dir != PAGE_RIGHT && page_rec_get_next(insert_rec) ==
else if (*dir != PAGE_RIGHT && page_rec_next_get<true>(page, insert_rec) ==
page + last_insert_rec)
{
*dir= PAGE_LEFT;
@ -2129,7 +2133,7 @@ inc_dir:
ulint n_owned;
while (!(n_owned= rec_get_n_owned_new(next_rec)))
if (!(next_rec= page_rec_get_next_low(next_rec, true)))
if (!(next_rec= page_rec_next_get<true>(page, next_rec)))
return nullptr;
rec_set_bit_field_1(const_cast<rec_t*>(next_rec), n_owned + 1,
@ -2179,12 +2183,13 @@ static void page_mem_free(const buf_block_t &block, rec_t *rec,
byte *page_heap_top= my_assume_aligned<2>(PAGE_HEAP_TOP + PAGE_HEADER +
block.page.frame);
const uint16_t heap_top= mach_read_from_2(page_heap_top);
const size_t extra_savings= heap_top - page_offset(rec + data_size);
const size_t extra_savings= heap_top -
(rec + data_size - block.page.frame);
ut_ad(extra_savings < heap_top);
/* When deleting the last record, do not add it to the PAGE_FREE list.
Instead, decrement PAGE_HEAP_TOP and PAGE_N_HEAP. */
mach_write_to_2(page_heap_top, page_offset(rec - extra_size));
mach_write_to_2(page_heap_top, rec - extra_size - block.page.frame);
mach_write_to_2(my_assume_aligned<2>(page_heap_top + 2), n_heap);
static_assert(PAGE_N_HEAP == PAGE_HEAP_TOP + 2, "compatibility");
if (extra_savings)
@ -2202,7 +2207,7 @@ static void page_mem_free(const buf_block_t &block, rec_t *rec,
block.page.frame);
byte *page_garbage= my_assume_aligned<2>(PAGE_GARBAGE + PAGE_HEADER +
block.page.frame);
mach_write_to_2(page_free, page_offset(rec));
mach_write_to_2(page_free, rec - block.page.frame);
mach_write_to_2(page_garbage, mach_read_from_2(page_garbage) +
extra_size + data_size);
}
@ -2346,7 +2351,8 @@ page_cur_delete_rec(
page_zip_rec_set_owned(block, prev_rec, 1, mtr);
page_zip_rec_set_owned(block, slot_rec, 0, mtr);
slot_rec = prev_rec;
mach_write_to_2(cur_dir_slot, page_offset(slot_rec));
mach_write_to_2(cur_dir_slot,
slot_rec - block->page.frame);
} else if (cur_n_owned == 1
&& !page_rec_is_supremum(slot_rec)) {
page_zip_rec_set_owned(block, slot_rec, 0, mtr);
@ -2371,14 +2377,14 @@ page_cur_delete_rec(
if (current_rec == slot_rec) {
slot_rec = prev_rec;
mach_write_to_2(cur_dir_slot, page_offset(slot_rec));
mach_write_to_2(cur_dir_slot, slot_rec - block->page.frame);
}
const size_t data_size = rec_offs_data_size(offsets);
const size_t extra_size = rec_offs_extra_size(offsets);
if (page_is_comp(block->page.frame)) {
mtr->page_delete(*block, page_offset(prev_rec)
mtr->page_delete(*block, prev_rec - block->page.frame
- PAGE_NEW_INFIMUM,
extra_size - REC_N_NEW_EXTRA_BYTES,
data_size);
@ -2388,7 +2394,7 @@ page_cur_delete_rec(
(slot_rec[-REC_NEW_N_OWNED] & ~REC_N_OWNED_MASK)
| (cur_n_owned - 1) << REC_N_OWNED_SHIFT);
} else {
mtr->page_delete(*block, page_offset(prev_rec)
mtr->page_delete(*block, prev_rec - block->page.frame
- PAGE_OLD_INFIMUM);
memcpy(prev_rec - REC_NEXT, current_rec - REC_NEXT, 2);
slot_rec[-REC_OLD_N_OWNED] = static_cast<byte>(

View file

@ -99,7 +99,7 @@ page_dir_find_owner_slot(
if (page_is_comp(page)) {
while (rec_get_n_owned_new(r) == 0) {
r = page_rec_get_next_low(r, true);
r = page_rec_next_get<true>(page, r);
if (UNIV_UNLIKELY(r < page + PAGE_NEW_SUPREMUM
|| r >= slot)) {
return ULINT_UNDEFINED;
@ -107,7 +107,7 @@ page_dir_find_owner_slot(
}
} else {
while (rec_get_n_owned_old(r) == 0) {
r = page_rec_get_next_low(r, false);
r = page_rec_next_get<false>(page, r);
if (UNIV_UNLIKELY(r < page + PAGE_OLD_SUPREMUM
|| r >= slot)) {
return ULINT_UNDEFINED;
@ -465,7 +465,8 @@ page_copy_rec_list_end_no_locks(
return DB_CORRUPTION;
}
if (UNIV_UNLIKELY(page_is_comp(new_page) != page_rec_is_comp(rec)
if (UNIV_UNLIKELY(page_is_comp(new_page)
!= page_is_comp(block->page.frame)
|| mach_read_from_2(new_page + srv_page_size - 10)
!= ulint(page_is_comp(new_page)
? PAGE_NEW_INFIMUM : PAGE_OLD_INFIMUM))) {
@ -893,11 +894,12 @@ page_delete_rec_list_end(
return DB_SUCCESS;
}
if (page_rec_is_infimum(rec) ||
n_recs == page_get_n_recs(page) ||
rec == (page_is_comp(page)
? page_rec_get_next_low(page + PAGE_NEW_INFIMUM, 1)
: page_rec_get_next_low(page + PAGE_OLD_INFIMUM, 0)))
if (n_recs == page_get_n_recs(page) ||
(page_is_comp(page)
? (rec == page + PAGE_NEW_INFIMUM ||
rec == page_rec_next_get<true>(page, page + PAGE_NEW_INFIMUM))
: (rec == page + PAGE_OLD_INFIMUM ||
rec == page_rec_next_get<false>(page, page + PAGE_OLD_INFIMUM))))
{
/* We are deleting all records. */
page_create_empty(block, index, mtr);
@ -933,13 +935,13 @@ page_delete_rec_list_end(
cur.index= index;
offsets= rec_get_offsets(rec, index, offsets, n_core,
ULINT_UNDEFINED, &heap);
rec= const_cast<rec_t*>(page_rec_get_next_low(rec, true));
rec= const_cast<rec_t*>(page_rec_next_get<true>(page, rec));
#ifdef UNIV_ZIP_DEBUG
ut_a(page_zip_validate(&block->page.zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
page_cur_delete_rec(&cur, offsets, mtr);
}
while (page_offset(rec) != PAGE_NEW_SUPREMUM);
while (rec - page != PAGE_NEW_SUPREMUM);
if (UNIV_LIKELY_NULL(heap))
mem_heap_free(heap);
@ -976,7 +978,7 @@ page_delete_rec_list_end(
n_recs++;
if (scrub)
mtr->memset(block, page_offset(rec2), rec_offs_data_size(offsets), 0);
mtr->memset(block, rec2 - page, rec_offs_data_size(offsets), 0);
rec2= page_rec_get_next(rec2);
}
@ -1000,14 +1002,14 @@ page_delete_rec_list_end(
while (!(n_owned= rec_get_n_owned_new(owner_rec)))
{
count++;
if (!(owner_rec= page_rec_get_next_low(owner_rec, true)))
if (!(owner_rec= page_rec_next_get<true>(page, owner_rec)))
return DB_CORRUPTION;
}
else
while (!(n_owned= rec_get_n_owned_old(owner_rec)))
{
count++;
if (!(owner_rec= page_rec_get_next_low(owner_rec, false)))
if (!(owner_rec= page_rec_next_get<false>(page, owner_rec)))
return DB_CORRUPTION;
}
@ -1030,7 +1032,7 @@ page_delete_rec_list_end(
const uint16_t free= page_header_get_field(page, PAGE_FREE);
static_assert(PAGE_FREE + 2 == PAGE_GARBAGE, "compatibility");
mach_write_to_2(page_header, page_offset(rec));
mach_write_to_2(page_header, rec - page);
mach_write_to_2(my_assume_aligned<2>(page_header + 2),
mach_read_from_2(my_assume_aligned<2>(page_free + 2)) +
size);
@ -1060,19 +1062,17 @@ page_delete_rec_list_end(
PAGE_N_RECS + 2 - PAGE_N_DIR_SLOTS);
// TODO: the equivalent of page_zip_dir_delete() for all records
mach_write_to_2(prev_rec - REC_NEXT, static_cast<uint16_t>
(PAGE_NEW_SUPREMUM - page_offset(prev_rec)));
(PAGE_NEW_SUPREMUM - (prev_rec - page)));
mach_write_to_2(last_rec - REC_NEXT, free
? static_cast<uint16_t>(free - page_offset(last_rec))
: 0U);
? uint16_t(free - (last_rec - block->page.frame)) : 0U);
return DB_SUCCESS;
}
#endif
mtr->write<1,mtr_t::MAYBE_NOP>(*block, owned, new_owned);
mtr->write<2>(*block, prev_rec - REC_NEXT, static_cast<uint16_t>
(PAGE_NEW_SUPREMUM - page_offset(prev_rec)));
(PAGE_NEW_SUPREMUM - (prev_rec - block->page.frame)));
mtr->write<2>(*block, last_rec - REC_NEXT, free
? static_cast<uint16_t>(free - page_offset(last_rec))
: 0U);
? uint16_t(free - (last_rec - block->page.frame)) : 0U);
}
else
{
@ -1196,11 +1196,11 @@ page_rec_get_nth_const(
if (page_is_comp(page)) {
do {
rec = page_rec_get_next_low(rec, TRUE);
rec = page_rec_next_get<true>(page, rec);
} while (rec && nth--);
} else {
do {
rec = page_rec_get_next_low(rec, FALSE);
rec = page_rec_next_get<false>(page, rec);
} while (rec && nth--);
}
@ -1300,7 +1300,7 @@ ulint page_rec_get_n_recs_before(const rec_t *rec)
if (page_is_comp(page))
{
for (; rec_get_n_owned_new(rec) == 0; n--)
if (UNIV_UNLIKELY(!(rec= page_rec_get_next_low(rec, true))))
if (UNIV_UNLIKELY(!(rec= page_rec_next_get<true>(page, rec))))
return ULINT_UNDEFINED;
do
@ -1318,7 +1318,7 @@ ulint page_rec_get_n_recs_before(const rec_t *rec)
else
{
for (; rec_get_n_owned_old(rec) == 0; n--)
if (UNIV_UNLIKELY(!(rec= page_rec_get_next_low(rec, false))))
if (UNIV_UNLIKELY(!(rec= page_rec_next_get<false>(page, rec))))
return ULINT_UNDEFINED;
do
@ -1383,9 +1383,8 @@ page_dir_print(
fprintf(stderr, "--------------------------------\n"
"PAGE DIRECTORY\n"
"Page address %p\n"
"Directory stack top at offs: %lu; number of slots: %lu\n",
page, (ulong) page_offset(page_dir_get_nth_slot(page, n - 1)),
(ulong) n);
"Directory stack top at offs: %zu; number of slots: %zu\n",
page, page_dir_get_nth_slot(page, n - 1) - page, n);
for (i = 0; i < n; i++) {
slot = page_dir_get_nth_slot(page, i);
if ((i == pr_n) && (i < n - pr_n)) {
@ -1393,17 +1392,16 @@ page_dir_print(
}
if ((i < pr_n) || (i >= n - pr_n)) {
fprintf(stderr,
"Contents of slot: %lu: n_owned: %lu,"
" rec offs: %lu\n",
(ulong) i,
(ulong) page_dir_slot_get_n_owned(slot),
(ulong)
page_offset(page_dir_slot_get_rec(slot)));
"Contents of slot: %zu: n_owned: %zu,"
" rec offs: %zu\n",
i,
page_dir_slot_get_n_owned(slot),
page_dir_slot_get_rec(slot) - page);
}
}
fprintf(stderr, "Total of %lu records\n"
fprintf(stderr, "Total of %zu records\n"
"--------------------------------\n",
(ulong) (PAGE_HEAP_NO_USER_LOW + page_get_n_recs(page)));
PAGE_HEAP_NO_USER_LOW + page_get_n_recs(page));
}
/***************************************************************//**
@ -1546,7 +1544,7 @@ page_rec_validate(
page_rec_check(rec);
rec_validate(rec, offsets);
if (page_rec_is_comp(rec)) {
if (page_is_comp(page)) {
n_owned = rec_get_n_owned_new(rec);
heap_no = rec_get_heap_no_new(rec);
} else {
@ -1555,13 +1553,13 @@ page_rec_validate(
}
if (UNIV_UNLIKELY(!(n_owned <= PAGE_DIR_SLOT_MAX_N_OWNED))) {
ib::warn() << "Dir slot of rec " << page_offset(rec)
ib::warn() << "Dir slot of rec " << rec - page
<< ", n owned too big " << n_owned;
return(FALSE);
}
if (UNIV_UNLIKELY(!(heap_no < page_dir_get_n_heap(page)))) {
ib::warn() << "Heap no of rec " << page_offset(rec)
ib::warn() << "Heap no of rec " << rec - page
<< " too big " << heap_no << " "
<< page_dir_get_n_heap(page);
return(FALSE);
@ -1643,8 +1641,7 @@ page_simple_validate_old(
<< "Record heap and dir overlap on a page, heap top "
<< page_header_get_field(page, PAGE_HEAP_TOP)
<< ", dir "
<< page_offset(page_dir_get_nth_slot(page,
n_slots - 1));
<< page_dir_get_nth_slot(page, n_slots - 1) - page;
goto func_exit;
}
@ -1721,7 +1718,7 @@ page_simple_validate_old(
goto func_exit;
}
rec = page_rec_get_next_const(rec);
rec = page_rec_next_get<false>(page, rec);
own_count++;
}
@ -1841,9 +1838,9 @@ page_simple_validate_new(
ib::error() << "Record heap and dir overlap on a page,"
" heap top "
<< page_header_get_field(page, PAGE_HEAP_TOP)
<< ", dir " << page_offset(
page_dir_get_nth_slot(page, n_slots - 1));
<< page_header_get_field(page, PAGE_HEAP_TOP)
<< ", dir "
<< page_dir_get_nth_slot(page, n_slots - 1) - page;
goto func_exit;
}
@ -1861,9 +1858,9 @@ page_simple_validate_new(
for (;;) {
if (UNIV_UNLIKELY(rec < page + PAGE_NEW_INFIMUM
|| rec > rec_heap_top)) {
ib::error() << "Record " << page_offset(rec)
ib::error() << "Record " << rec - page
<< " is out of bounds: "
<< page_offset(rec_heap_top);
<< rec_heap_top - page;
goto func_exit;
}
@ -1875,7 +1872,7 @@ page_simple_validate_new(
ib::error() << "Wrong owned count "
<< rec_get_n_owned_new(rec) << ", "
<< own_count << ", rec "
<< page_offset(rec);
<< rec - page;
goto func_exit;
}
@ -1883,7 +1880,7 @@ page_simple_validate_new(
if (UNIV_UNLIKELY
(page_dir_slot_get_rec(slot) != rec)) {
ib::error() << "Dir slot does not point"
" to right rec " << page_offset(rec);
" to right rec " << rec - page;
goto func_exit;
}
@ -1907,7 +1904,7 @@ page_simple_validate_new(
ib::error() << "Next record offset nonsensical "
<< rec_get_next_offs(rec, TRUE)
<< " for rec " << page_offset(rec);
<< " for rec " << rec - page;
goto func_exit;
}
@ -1920,7 +1917,7 @@ page_simple_validate_new(
goto func_exit;
}
rec = page_rec_get_next_const(rec);
rec = page_rec_next_get<true>(page, rec);
own_count++;
}
@ -1954,15 +1951,15 @@ page_simple_validate_new(
|| rec >= page + srv_page_size)) {
ib::error() << "Free list record has"
" a nonsensical offset " << page_offset(rec);
" a nonsensical offset " << rec - page;
goto func_exit;
}
if (UNIV_UNLIKELY(rec > rec_heap_top)) {
ib::error() << "Free list record " << page_offset(rec)
ib::error() << "Free list record " << rec - page
<< " is above rec heap top "
<< page_offset(rec_heap_top);
<< rec_heap_top - page;
goto func_exit;
}
@ -2272,7 +2269,7 @@ wrong_page_type:
#endif /* UNIV_GIS_DEBUG */
}
offs = page_offset(rec_get_start(rec, offsets));
offs = rec_get_start(rec, offsets) - page;
i = rec_offs_size(offsets);
if (UNIV_UNLIKELY(offs + i >= srv_page_size)) {
ib::error() << "Record offset out of bounds: "
@ -2402,7 +2399,7 @@ next_free:
}
count++;
offs = page_offset(rec_get_start(rec, offsets));
offs = rec_get_start(rec, offsets) - page;
i = rec_offs_size(offsets);
if (UNIV_UNLIKELY(offs + i >= srv_page_size)) {
ib::error() << "Free record offset out of bounds: "
@ -2501,7 +2498,7 @@ const rec_t *page_find_rec_max_not_deleted(const page_t *page)
if (!(rec[-REC_NEW_INFO_BITS] &
(REC_INFO_DELETED_FLAG | REC_INFO_MIN_REC_FLAG)))
prev_rec= rec;
if (!(rec= page_rec_get_next_low(rec, true)))
if (!(rec= page_rec_next_get<true>(page, rec)))
return page + PAGE_NEW_INFIMUM;
} while (rec != page + PAGE_NEW_SUPREMUM);
return prev_rec;
@ -2515,7 +2512,7 @@ const rec_t *page_find_rec_max_not_deleted(const page_t *page)
if (!(rec[-REC_OLD_INFO_BITS] &
(REC_INFO_DELETED_FLAG | REC_INFO_MIN_REC_FLAG)))
prev_rec= rec;
if (!(rec= page_rec_get_next_low(rec, false)))
if (!(rec= page_rec_next_get<false>(page, rec)))
return page + PAGE_OLD_INFIMUM;
} while (rec != page + PAGE_OLD_SUPREMUM);
return prev_rec;

View file

@ -3405,17 +3405,16 @@ page_zip_validate_low(
goto func_exit;
}
rec = page_rec_get_next_low(rec, TRUE);
trec = page_rec_get_next_low(trec, TRUE);
rec = page_rec_next_get<true>(page, rec);
trec = page_rec_next_get<true>(temp_page, trec);
}
/* Compare the records. */
heap = NULL;
offsets = NULL;
rec = page_rec_get_next_low(
page + PAGE_NEW_INFIMUM, TRUE);
trec = page_rec_get_next_low(
temp_page + PAGE_NEW_INFIMUM, TRUE);
rec = page_rec_next_get<true>(page, page + PAGE_NEW_INFIMUM);
trec = page_rec_next_get<true>(temp_page,
temp_page + PAGE_NEW_INFIMUM);
const ulint n_core = (index && page_is_leaf(page))
? index->n_fields : 0;
@ -3447,8 +3446,8 @@ page_zip_validate_low(
}
}
rec = page_rec_get_next_low(rec, TRUE);
trec = page_rec_get_next_low(trec, TRUE);
rec = page_rec_next_get<true>(page, rec);
trec = page_rec_next_get<true>(temp_page, trec);
} while (rec || trec);
if (heap) {

View file

@ -1635,7 +1635,7 @@ row_ins_check_foreign_constraint(
const rec_t* rec = btr_pcur_get_rec(&pcur);
const buf_block_t* block = btr_pcur_get_block(&pcur);
if (page_rec_is_infimum(rec)) {
if (page_rec_is_infimum_low(rec - block->page.frame)) {
continue;
}
@ -1644,7 +1644,7 @@ row_ins_check_foreign_constraint(
check_index->n_core_fields,
ULINT_UNDEFINED, &heap);
if (page_rec_is_supremum(rec)) {
if (page_rec_is_supremum_low(rec - block->page.frame)) {
if (skip_gap_lock) {
@ -2125,7 +2125,7 @@ row_ins_scan_sec_index_for_duplicate(
const buf_block_t* block = btr_pcur_get_block(&pcur);
const ulint lock_type = LOCK_ORDINARY;
if (page_rec_is_infimum(rec)) {
if (page_rec_is_infimum_low(rec - block->page.frame)) {
continue;
}
@ -2161,7 +2161,7 @@ row_ins_scan_sec_index_for_duplicate(
goto end_scan;
}
if (page_rec_is_supremum(rec)) {
if (page_rec_is_supremum_low(rec - block->page.frame)) {
continue;
}
@ -2276,7 +2276,8 @@ row_ins_duplicate_error_in_clust_online(
ut_ad(!cursor->index()->is_instant());
if (cursor->low_match >= n_uniq && !page_rec_is_infimum(rec)) {
if (cursor->low_match >= n_uniq
&& !page_rec_is_infimum_low(rec - btr_cur_get_page(cursor))) {
*offsets = rec_get_offsets(rec, cursor->index(), *offsets,
cursor->index()->n_fields,
ULINT_UNDEFINED, heap);
@ -2286,7 +2287,7 @@ row_ins_duplicate_error_in_clust_online(
}
}
if (!(rec = page_rec_get_next_const(btr_cur_get_rec(cursor)))) {
if (!(rec = page_rec_get_next_const(rec))) {
return DB_CORRUPTION;
}
@ -2346,7 +2347,7 @@ row_ins_duplicate_error_in_clust(
rec = btr_cur_get_rec(cursor);
if (!page_rec_is_infimum(rec)) {
if (!page_rec_is_infimum_low(rec - btr_cur_get_page(cursor))) {
offsets = rec_get_offsets(rec, cursor->index(),
offsets,
cursor->index()
@ -2759,7 +2760,8 @@ skip_bulk_insert:
if (UNIV_UNLIKELY(entry->info_bits != 0)) {
const rec_t* rec = btr_pcur_get_rec(&pcur);
if (rec_get_info_bits(rec, page_rec_is_comp(rec))
if (rec_get_info_bits(rec,
page_is_comp(btr_pcur_get_page(&pcur)))
& REC_INFO_MIN_REC_FLAG) {
trx->error_info = index;
err = DB_DUPLICATE_KEY;

View file

@ -1821,23 +1821,29 @@ err_exit:
trx->error_key_num = 0;
goto func_exit;
} else {
rec_t* rec = page_rec_get_next(btr_pcur_get_rec(&pcur));
const page_t* const page = btr_pcur_get_page(&pcur);
const auto comp = page_is_comp(page);
const rec_t* const rec = comp
? page_rec_next_get<true>(page,
btr_pcur_get_rec(&pcur))
: page_rec_next_get<false>(page,
btr_pcur_get_rec(&pcur));
if (!rec) {
corrupted_metadata:
err = DB_CORRUPTION;
goto err_exit;
}
if (rec_get_info_bits(rec, page_rec_is_comp(rec))
& REC_INFO_MIN_REC_FLAG) {
if (rec_get_info_bits(rec, comp) & REC_INFO_MIN_REC_FLAG) {
if (!clust_index->is_instant()) {
goto corrupted_metadata;
}
if (page_rec_is_comp(rec)
if (comp
&& rec_get_status(rec) != REC_STATUS_INSTANT) {
goto corrupted_metadata;
}
/* Skip the metadata pseudo-record. */
btr_pcur_get_page_cur(&pcur)->rec = rec;
btr_pcur_get_page_cur(&pcur)->rec =
const_cast<rec_t*>(rec);
} else if (clust_index->is_instant()) {
goto corrupted_metadata;
}

View file

@ -1811,7 +1811,7 @@ row_unlock_for_mysql(
lock_rec_unlock(
trx,
btr_pcur_get_block(pcur)->page.id(),
*btr_pcur_get_block(pcur),
rec,
static_cast<enum lock_mode>(
prebuilt->select_lock_type));

View file

@ -1098,7 +1098,7 @@ static void row_purge_reset_trx_id(purge_node_t* node, mtr_t* mtr)
byte* ptr = rec_get_nth_field(
rec, offsets, trx_id_pos, &len);
ut_ad(len == DATA_TRX_ID_LEN);
size_t offs = page_offset(ptr);
size_t offs = ptr - block->page.frame;
mtr->memset(block, offs, DATA_TRX_ID_LEN, 0);
offs += DATA_TRX_ID_LEN;
mtr->write<1,mtr_t::MAYBE_NOP>(

View file

@ -1620,7 +1620,8 @@ row_sel_try_search_shortcut(
return SEL_RETRY;
}
} else if (!srv_read_only_mode) {
trx_id_t trx_id = page_get_max_trx_id(page_align(rec));
trx_id_t trx_id =
page_get_max_trx_id(btr_pcur_get_page(&plan->pcur));
ut_ad(trx_id);
if (!node->read_view->sees(trx_id)) {
return SEL_RETRY;
@ -2041,7 +2042,8 @@ skip_lock:
rec = old_vers;
}
} else if (!srv_read_only_mode) {
trx_id_t trx_id = page_get_max_trx_id(page_align(rec));
trx_id_t trx_id = page_get_max_trx_id(
btr_pcur_get_page(&plan->pcur));
ut_ad(trx_id);
if (!node->read_view->sees(trx_id)) {
cons_read_requires_clust_rec = TRUE;
@ -3411,8 +3413,9 @@ Row_sel_get_clust_rec_for_mysql::operator()(
page and verify that */
if (dict_index_is_spatial(sec_index)
&& btr_cur->rtr_info->matches
&& (page_align(rec)
== btr_cur->rtr_info->matches->block->page.frame
&& (!(ulint(rec
- btr_cur->rtr_info->matches->block->page.frame)
>> srv_page_size_shift)
|| rec != btr_pcur_get_rec(prebuilt->pcur))) {
#ifdef UNIV_DEBUG
rtr_info_t* rtr_info = btr_cur->rtr_info;
@ -3539,7 +3542,7 @@ Row_sel_get_clust_rec_for_mysql::operator()(
prebuilt->clust_pcur)->page;
const lsn_t lsn = mach_read_from_8(
page_align(clust_rec) + FIL_PAGE_LSN);
bpage.frame + FIL_PAGE_LSN);
if (lsn != cached_lsn
|| bpage.id() != cached_page_id
@ -5008,7 +5011,8 @@ wrong_offs:
.buf_fix_count();
ib::error() << "Index corruption: rec offs "
<< page_offset(rec) << " next offs "
<< rec - btr_pcur_get_page(pcur)
<< " next offs "
<< next_offs
<< btr_pcur_get_block(pcur)->page.id()
<< ", index " << index->name
@ -5025,7 +5029,8 @@ wrong_offs:
over the corruption to recover as much as possible. */
ib::info() << "Index corruption: rec offs "
<< page_offset(rec) << " next offs "
<< rec - btr_pcur_get_page(pcur)
<< " next offs "
<< next_offs
<< btr_pcur_get_block(pcur)->page.id()
<< ", index " << index->name
@ -5050,10 +5055,12 @@ wrong_offs:
if (UNIV_UNLIKELY(srv_force_recovery > 0)) {
if (!rec_validate(rec, offsets)
|| !btr_index_rec_validate(rec, index, FALSE)) {
|| !btr_index_rec_validate(pcur->btr_cur.page_cur,
index, FALSE)) {
ib::error() << "Index corruption: rec offs "
<< page_offset(rec) << " next offs "
<< rec - btr_pcur_get_page(pcur)
<< " next offs "
<< next_offs
<< btr_pcur_get_block(pcur)->page.id()
<< ", index " << index->name
@ -5416,7 +5423,7 @@ no_gap_lock:
if (!srv_read_only_mode) {
trx_id_t trx_id = page_get_max_trx_id(
page_align(rec));
btr_pcur_get_page(pcur));
ut_ad(trx_id);
if (trx->read_view.sees(trx_id)) {
goto locks_ok;
@ -6399,7 +6406,8 @@ rec_loop:
goto count_or_not;
}
else if (const trx_id_t page_trx_id= page_get_max_trx_id(page_align(rec)))
else if (const trx_id_t page_trx_id=
page_get_max_trx_id(btr_pcur_get_page(prebuilt->pcur)))
{
if (page_trx_id >= trx_sys.get_max_trx_id())
goto invalid_PAGE_MAX_TRX_ID;

View file

@ -445,7 +445,8 @@ row_undo_mod_clust(
0, 1ULL << ROLL_PTR_INSERT_FLAG_POS,
&mtr);
} else {
size_t offs = page_offset(rec + trx_id_offset);
size_t offs = rec + trx_id_offset
- block->page.frame;
mtr.memset(block, offs, DATA_TRX_ID_LEN, 0);
offs += DATA_TRX_ID_LEN;
mtr.write<1,mtr_t::MAYBE_NOP>(*block,
@ -489,15 +490,14 @@ static bool row_undo_mod_sec_is_unsafe(const rec_t *rec, dict_index_t *index,
mem_heap_t* heap2;
dtuple_t* row;
const dtuple_t* entry;
ulint comp;
dtuple_t* vrow = NULL;
mem_heap_t* v_heap = NULL;
dtuple_t* cur_vrow = NULL;
const bool comp = index->table->not_redundant();
clust_index = dict_table_get_first_index(index->table);
comp = page_rec_is_comp(rec);
ut_ad(!dict_table_is_comp(index->table) == !comp);
ut_ad(!!page_rec_is_comp(rec) == comp);
heap = mem_heap_create(1024);
clust_offsets = rec_get_offsets(rec, clust_index, NULL,
clust_index->n_core_fields,

View file

@ -331,7 +331,9 @@ static buf_block_t* row_undo_rec_get(undo_node_t* node)
}
undo->top_page_no = prev_page->page.id().page_no();
undo->top_offset = page_offset(prev_rec);
undo->top_offset = uint16_t(prev_rec - prev_page->page.frame);
ut_ad(prev_rec - prev_page->page.frame
== page_offset(prev_rec));
undo->top_undo_no = trx_undo_rec_get_undo_no(prev_rec);
ut_ad(!undo->empty());
} else {

View file

@ -149,9 +149,9 @@ row_vers_impl_x_locked_low(
}
}
const ulint comp = page_rec_is_comp(rec);
const bool comp = index->table->not_redundant();
ut_ad(!!page_rec_is_comp(rec) == comp);
ut_ad(index->table == clust_index->table);
ut_ad(!!comp == dict_table_is_comp(index->table));
ut_ad(!comp == !page_rec_is_comp(clust_rec));
const ulint rec_del = rec_get_deleted_flag(rec, comp);

View file

@ -188,7 +188,8 @@ trx_purge_add_undo_to_history(const trx_t* trx, trx_undo_t*& undo, mtr_t* mtr)
/* This function is invoked during transaction commit, which is not
allowed to fail. If we get a corrupted undo header, we will crash here. */
ut_a(undo_page);
trx_ulogf_t *undo_header= undo_page->page.frame + undo->hdr_offset;
const uint16_t undo_header_offset= undo->hdr_offset;
trx_ulogf_t *undo_header= undo_page->page.frame + undo_header_offset;
ut_ad(mach_read_from_2(undo_header + TRX_UNDO_NEEDS_PURGE) <= 1);
ut_ad(rseg->needs_purge > trx->id);
@ -265,9 +266,8 @@ trx_purge_add_undo_to_history(const trx_t* trx, trx_undo_t*& undo, mtr_t* mtr)
than to intentionally violate ACID by committing something
that is known to be corrupted. */
ut_a(flst_add_first(rseg_header, TRX_RSEG + TRX_RSEG_HISTORY, undo_page,
uint16_t(page_offset(undo_header) +
TRX_UNDO_HISTORY_NODE), rseg->space->free_limit,
mtr) == DB_SUCCESS);
uint16_t(undo_header_offset + TRX_UNDO_HISTORY_NODE),
rseg->space->free_limit, mtr) == DB_SUCCESS);
mtr->write<2>(*undo_page, TRX_UNDO_SEG_HDR + TRX_UNDO_STATE +
undo_page->page.frame, undo_state);
@ -287,8 +287,9 @@ static void trx_purge_free_segment(buf_block_t *rseg_hdr, buf_block_t *block,
ut_ad(mtr.memo_contains_flagged(rseg_hdr, MTR_MEMO_PAGE_X_FIX));
ut_ad(mtr.memo_contains_flagged(block, MTR_MEMO_PAGE_X_FIX));
while (!fseg_free_step_not_header(TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER +
block->page.frame, &mtr))
while (!fseg_free_step_not_header(block,
TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER,
&mtr))
{
rseg_hdr->fix();
block->fix();
@ -311,8 +312,8 @@ static void trx_purge_free_segment(buf_block_t *rseg_hdr, buf_block_t *block,
mtr.memo_push(block, MTR_MEMO_PAGE_X_FIX);
}
while (!fseg_free_step(TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER +
block->page.frame, &mtr));
while (!fseg_free_step(block, TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER,
&mtr));
}
void purge_sys_t::rseg_enable(trx_rseg_t &rseg)
@ -926,7 +927,8 @@ bool purge_sys_t::choose_next_log()
goto purge_nothing;
}
offset= page_offset(undo_rec);
offset= uint16_t(undo_rec - b->page.frame);
ut_ad(undo_rec - b->page.frame == page_offset(undo_rec));
tail.undo_no= trx_undo_rec_get_undo_no(undo_rec);
page_no= id.page_no();
}
@ -968,12 +970,14 @@ inline trx_purge_rec_t purge_sys_t::get_next_rec(roll_ptr_t roll_ptr)
return {nullptr, 0};
}
buf_block_t *rec2_page= b;
if (const trx_undo_rec_t *rec2=
trx_undo_page_get_next_rec(b, offset, hdr_page_no, hdr_offset))
{
got_rec:
ut_ad(page_no == page_id.page_no());
offset= page_offset(rec2);
ut_ad(page_offset(rec2) == rec2 - rec2_page->page.frame);
offset= uint16_t(rec2 - rec2_page->page.frame);
tail.undo_no= trx_undo_rec_get_undo_no(rec2);
}
else if (hdr_page_no != page_no ||
@ -989,6 +993,7 @@ inline trx_purge_rec_t purge_sys_t::get_next_rec(roll_ptr_t roll_ptr)
rec2= trx_undo_page_get_first_rec(next_page, hdr_page_no, hdr_offset);
if (rec2)
{
rec2_page= next_page;
page_no= next;
goto got_rec;
}

View file

@ -588,9 +588,8 @@ static dberr_t trx_resurrect_table_locks(trx_t *trx, const trx_undo_t &undo)
{
buf_page_make_young_if_needed(&block->page);
buf_block_t *undo_block= block;
const trx_undo_rec_t *undo_rec= block->page.frame + undo.top_offset;
do
uint16_t undo_rec_offset= undo.top_offset;
for (const trx_undo_rec_t *undo_rec= block->page.frame + undo_rec_offset;;)
{
byte type;
byte cmpl_info;
@ -606,11 +605,14 @@ static dberr_t trx_resurrect_table_locks(trx_t *trx, const trx_undo_t &undo)
trx_undo_rec_get_pars(undo_rec, &type, &cmpl_info,
&updated_extern, &undo_no, &table_id);
tables.emplace(table_id, type == TRX_UNDO_EMPTY);
undo_rec= trx_undo_get_prev_rec(block, page_offset(undo_rec),
ut_ad(page_offset(undo_rec) == undo_rec_offset);
undo_rec= trx_undo_get_prev_rec(block, undo_rec_offset,
undo.hdr_page_no, undo.hdr_offset,
true, &mtr);
if (!undo_rec)
break;
undo_rec_offset= uint16_t(undo_rec - block->page.frame);
}
while (undo_rec);
}
mtr.commit();
@ -1050,13 +1052,13 @@ void trx_t::commit_empty(mtr_t *mtr)
{
mtr->memcpy(*u, TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_START +
u->page.frame, prev + TRX_UNDO_LOG_START, 2);
const ulint free= page_offset(last);
const ulint free= last - u->page.frame;
mtr->write<2>(*u, TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_FREE +
u->page.frame, free);
mtr->write<2>(*u, TRX_UNDO_SEG_HDR + TRX_UNDO_STATE + u->page.frame,
TRX_UNDO_CACHED);
mtr->write<2>(*u, TRX_UNDO_SEG_HDR + TRX_UNDO_LAST_LOG + u->page.frame,
page_offset(prev));
uintptr_t(prev - u->page.frame));
mtr->write<2>(*u, prev + TRX_UNDO_NEXT_LOG, 0U);
mtr->memset(u, free, srv_page_size - FIL_PAGE_DATA_END - free, 0);
@ -1352,10 +1354,8 @@ ATTRIBUTE_NOINLINE static void trx_commit_cleanup(trx_undo_t *&undo)
buf_page_get(page_id_t(SRV_TMP_SPACE_ID, undo->hdr_page_no), 0,
RW_X_LATCH, &mtr))
{
fseg_header_t *file_seg= TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER +
block->page.frame;
finished= fseg_free_step(file_seg, &mtr);
finished= fseg_free_step(block, TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER,
&mtr);
if (!finished);
else if (buf_block_t *rseg_header= rseg->get(&mtr, nullptr))

View file

@ -292,12 +292,12 @@ trx_undo_get_first_rec(const fil_space_t &space, uint32_t page_no,
mtr);
}
inline void UndorecApplier::apply_undo_rec(const trx_undo_rec_t *rec)
inline
void UndorecApplier::apply_undo_rec(const trx_undo_rec_t *rec, uint16_t offset)
{
undo_rec= rec;
if (!undo_rec)
return;
offset= page_offset(undo_rec);
ut_ad(page_offset(undo_rec) == offset);
this->offset= offset;
bool updated_extern= false;
undo_no_t undo_no= 0;
@ -365,10 +365,11 @@ ATTRIBUTE_COLD void trx_t::apply_log()
undo->hdr_offset);
while (rec)
{
const uint16_t offset= uint16_t(rec - block->page.frame);
/* Since we are the only thread who could write to this undo page,
it is safe to dereference rec while only holding a buffer-fix. */
log_applier.apply_undo_rec(rec);
rec= trx_undo_page_get_next_rec(block, page_offset(rec),
log_applier.apply_undo_rec(rec, offset);
rec= trx_undo_page_get_next_rec(block, offset,
page_id.page_no(), undo->hdr_offset);
}