MDEV-21174: Replace mlog_write_ulint() with mtr_t::write()

mtr_t::write(): Replaces mlog_write_ulint(), mlog_write_ull().
Optimize away writes if the page contents does not change,
except when a dummy write has been explicitly requested.

Because the member function template takes a block descriptor as a
parameter, it is possible to introduce better consistency checks.
Due to this, the code for handling file-based lists, undo logs
and user transactions was refactored to pass around buf_block_t.
This commit is contained in:
Marko Mäkelä 2019-12-03 10:19:45 +02:00
parent 504823bcce
commit 56f6dab1d0
53 changed files with 2735 additions and 3531 deletions

File diff suppressed because it is too large Load diff

View file

@ -92,6 +92,8 @@ PageBulk::init()
new_page_zip = buf_block_get_page_zip(new_block);
new_page_no = page_get_page_no(new_page);
byte* index_id = PAGE_HEADER + PAGE_INDEX_ID + new_page;
if (new_page_zip) {
page_create_zip(new_block, m_index, m_level, 0,
&m_mtr);
@ -100,11 +102,9 @@ PageBulk::init()
page_zip_write_header(new_page_zip,
FIL_PAGE_PREV + new_page,
8, &m_mtr);
mach_write_to_8(PAGE_HEADER + PAGE_INDEX_ID + new_page,
m_index->id);
page_zip_write_header(new_page_zip,
PAGE_HEADER + PAGE_INDEX_ID
+ new_page, 8, &m_mtr);
mach_write_to_8(index_id, m_index->id);
page_zip_write_header(new_page_zip, index_id,
8, &m_mtr);
} else {
ut_ad(!dict_index_is_spatial(m_index));
page_create(new_block, &m_mtr,
@ -114,10 +114,10 @@ PageBulk::init()
== FIL_PAGE_PREV + 4);
compile_time_assert(FIL_NULL == 0xffffffff);
mlog_memset(new_block, FIL_PAGE_PREV, 8, 0xff, &m_mtr);
mlog_write_ulint(PAGE_HEADER + PAGE_LEVEL + new_page,
m_level, MLOG_2BYTES, &m_mtr);
mlog_write_ull(PAGE_HEADER + PAGE_INDEX_ID + new_page,
m_index->id, &m_mtr);
m_mtr.write<2,mtr_t::OPT>(*new_block,
PAGE_HEADER + PAGE_LEVEL
+ new_page, m_level);
m_mtr.write<8>(*new_block, index_id, m_index->id);
}
} else {
new_block = btr_block_get(*m_index, m_page_no, RW_X_LATCH,
@ -130,7 +130,7 @@ PageBulk::init()
ut_ad(page_dir_get_n_heap(new_page) == PAGE_HEAP_NO_USER_LOW);
btr_page_set_level(new_page, new_page_zip, m_level, &m_mtr);
btr_page_set_level(new_block, m_level, &m_mtr);
}
if (!m_level && dict_index_is_sec_or_ibuf(m_index)) {
@ -169,13 +169,14 @@ PageBulk::init()
}
/** Insert a record in the page.
@tparam fmt the page format
@param[in] rec record
@param[in] offsets record offsets */
void
PageBulk::insert(
const rec_t* rec,
ulint* offsets)
template<PageBulk::format fmt>
inline void PageBulk::insertPage(const rec_t *rec, ulint *offsets)
{
ut_ad((m_page_zip != nullptr) == (fmt == COMPRESSED));
ut_ad((fmt != REDUNDANT) == m_is_comp);
ulint rec_size;
ut_ad(m_heap != NULL);
@ -210,7 +211,7 @@ PageBulk::insert(
/* 3. Set the n_owned field in the inserted record to zero,
and set the heap_no field. */
if (m_is_comp) {
if (fmt != REDUNDANT) {
rec_set_n_owned_new(insert_rec, NULL, 0);
rec_set_heap_no_new(insert_rec,
PAGE_HEAP_NO_USER_LOW + m_rec_no);
@ -242,15 +243,30 @@ PageBulk::insert(
m_cur_rec = insert_rec;
}
/** Insert a record in the page.
@param[in] rec record
@param[in] offsets record offsets */
inline void PageBulk::insert(const rec_t *rec, ulint *offsets)
{
if (UNIV_LIKELY_NULL(m_page_zip))
insertPage<COMPRESSED>(rec, offsets);
else if (m_is_comp)
insertPage<DYNAMIC>(rec, offsets);
else
insertPage<REDUNDANT>(rec, offsets);
}
/** Mark end of insertion to the page. Scan all records to set page dirs,
and set page header members.
Note: we refer to page_copy_rec_list_end_to_created_page. */
void
PageBulk::finish()
@tparam fmt the page format */
template<PageBulk::format fmt>
inline void PageBulk::finishPage()
{
ut_ad(m_rec_no > 0);
ut_ad((m_page_zip != nullptr) == (fmt == COMPRESSED));
ut_ad((fmt != REDUNDANT) == m_is_comp);
ut_ad(m_total_data + page_dir_calc_reserved_space(m_rec_no)
<= page_get_free_space_of_empty(m_is_comp));
<= page_get_free_space_of_empty(fmt != REDUNDANT));
/* See page_copy_rec_list_end_to_created_page() */
ut_d(page_dir_set_n_slots(m_page, NULL, srv_page_size / 2));
@ -304,26 +320,26 @@ PageBulk::finish()
ut_ad(!dict_index_is_spatial(m_index));
ut_ad(!page_get_instant(m_page));
ut_ad(!mach_read_from_2(PAGE_HEADER + PAGE_N_DIRECTION + m_page));
if (!m_flush_observer && !m_page_zip) {
mlog_write_ulint(PAGE_HEADER + PAGE_N_DIR_SLOTS + m_page,
2 + slot_index, MLOG_2BYTES, &m_mtr);
mlog_write_ulint(PAGE_HEADER + PAGE_HEAP_TOP + m_page,
ulint(m_heap_top - m_page),
MLOG_2BYTES, &m_mtr);
mlog_write_ulint(PAGE_HEADER + PAGE_N_HEAP + m_page,
(PAGE_HEAP_NO_USER_LOW + m_rec_no)
| ulint(m_is_comp) << 15,
MLOG_2BYTES, &m_mtr);
mlog_write_ulint(PAGE_HEADER + PAGE_N_RECS + m_page, m_rec_no,
MLOG_2BYTES, &m_mtr);
mlog_write_ulint(PAGE_HEADER + PAGE_LAST_INSERT + m_page,
ulint(m_cur_rec - m_page),
MLOG_2BYTES, &m_mtr);
mlog_write_ulint(PAGE_HEADER + PAGE_DIRECTION_B - 1 + m_page,
PAGE_RIGHT, MLOG_2BYTES, &m_mtr);
mlog_write_ulint(PAGE_HEADER + PAGE_N_DIRECTION + m_page, 0,
MLOG_2BYTES, &m_mtr);
if (fmt != COMPRESSED && !m_flush_observer) {
m_mtr.write<2,mtr_t::OPT>(*m_block,
PAGE_HEADER + PAGE_N_DIR_SLOTS
+ m_page, 2 + slot_index);
m_mtr.write<2>(*m_block, PAGE_HEADER + PAGE_HEAP_TOP + m_page,
ulint(m_heap_top - m_page));
m_mtr.write<2>(*m_block,
PAGE_HEADER + PAGE_N_HEAP + m_page,
(PAGE_HEAP_NO_USER_LOW + m_rec_no)
| uint16_t{fmt != REDUNDANT} << 15);
m_mtr.write<2>(*m_block,
PAGE_HEADER + PAGE_N_RECS + m_page, m_rec_no);
m_mtr.write<2>(*m_block,
PAGE_HEADER + PAGE_LAST_INSERT + m_page,
ulint(m_cur_rec - m_page));
m_mtr.write<2>(*m_block,
PAGE_HEADER + PAGE_DIRECTION_B - 1 + m_page,
PAGE_RIGHT);
} else {
/* For ROW_FORMAT=COMPRESSED, redo log may be written
in PageBulk::compress(). */
@ -333,18 +349,29 @@ PageBulk::finish()
ulint(m_heap_top - m_page));
mach_write_to_2(PAGE_HEADER + PAGE_N_HEAP + m_page,
(PAGE_HEAP_NO_USER_LOW + m_rec_no)
| ulint(m_is_comp) << 15);
| uint16_t{fmt != REDUNDANT} << 15);
mach_write_to_2(PAGE_HEADER + PAGE_N_RECS + m_page, m_rec_no);
mach_write_to_2(PAGE_HEADER + PAGE_LAST_INSERT + m_page,
ulint(m_cur_rec - m_page));
mach_write_to_2(PAGE_HEADER + PAGE_DIRECTION_B - 1 + m_page,
PAGE_RIGHT);
mach_write_to_2(PAGE_HEADER + PAGE_N_DIRECTION + m_page, 0);
}
m_block->skip_flush_check = false;
}
/** Mark end of insertion to the page. Scan all records to set page dirs,
and set page header members. */
inline void PageBulk::finish()
{
if (UNIV_LIKELY_NULL(m_page_zip))
finishPage<COMPRESSED>();
else if (m_is_comp)
finishPage<DYNAMIC>();
else
finishPage<REDUNDANT>();
}
/** Commit inserts done to the page
@param[in] success Flag whether all inserts succeed. */
void
@ -521,28 +548,24 @@ PageBulk::copyOut(
@param[in] next_page_no next page no */
inline void PageBulk::setNext(ulint next_page_no)
{
if (UNIV_LIKELY_NULL(m_page_zip)) {
/* For ROW_FORMAT=COMPRESSED, redo log may be written
in PageBulk::compress(). */
mach_write_to_4(m_page + FIL_PAGE_NEXT, next_page_no);
} else {
mlog_write_ulint(m_page + FIL_PAGE_NEXT, next_page_no,
MLOG_4BYTES, &m_mtr);
}
if (UNIV_LIKELY_NULL(m_page_zip))
/* For ROW_FORMAT=COMPRESSED, redo log may be written
in PageBulk::compress(). */
mach_write_to_4(m_page + FIL_PAGE_NEXT, next_page_no);
else
m_mtr.write<4>(*m_block, m_page + FIL_PAGE_NEXT, next_page_no);
}
/** Set previous page
@param[in] prev_page_no previous page no */
inline void PageBulk::setPrev(ulint prev_page_no)
{
if (UNIV_LIKELY_NULL(m_page_zip)) {
/* For ROW_FORMAT=COMPRESSED, redo log may be written
in PageBulk::compress(). */
mach_write_to_4(m_page + FIL_PAGE_PREV, prev_page_no);
} else {
mlog_write_ulint(m_page + FIL_PAGE_PREV, prev_page_no,
MLOG_4BYTES, &m_mtr);
}
if (UNIV_LIKELY_NULL(m_page_zip))
/* For ROW_FORMAT=COMPRESSED, redo log may be written
in PageBulk::compress(). */
mach_write_to_4(m_page + FIL_PAGE_PREV, prev_page_no);
else
m_mtr.write<4>(*m_block, m_page + FIL_PAGE_PREV, prev_page_no);
}
/** Check if required space is available in the page for the rec to be inserted.
@ -748,9 +771,10 @@ BtrBulk::pageCommit(
page_bulk->setNext(next_page_bulk->getPageNo());
next_page_bulk->setPrev(page_bulk->getPageNo());
} else {
/** Suppose a page is released and latched again, we need to
ut_ad(!page_has_next(page_bulk->getPage()));
/* If a page is released and latched again, we need to
mark it modified in mini-transaction. */
page_bulk->setNext(FIL_NULL);
page_bulk->set_modified();
}
ut_ad(!rw_lock_own_flagged(&m_index->lock,

View file

@ -154,8 +154,7 @@ static
void
btr_cur_unmark_extern_fields(
/*=========================*/
page_zip_des_t* page_zip,/*!< in/out: compressed page whose uncompressed
part will be updated, or NULL */
buf_block_t* block, /*!< in/out: index page */
rec_t* rec, /*!< in/out: record in a clustered index */
dict_index_t* index, /*!< in: index of the page */
const ulint* offsets,/*!< in: array returned by rec_get_offsets() */
@ -181,8 +180,7 @@ btr_rec_free_updated_extern_fields(
dict_index_t* index, /*!< in: index of rec; the index tree MUST be
X-latched */
rec_t* rec, /*!< in: record */
page_zip_des_t* page_zip,/*!< in: compressed page whose uncompressed
part will be updated, or NULL */
buf_block_t* block, /*!< in: index page of rec */
const ulint* offsets,/*!< in: rec_get_offsets(rec, index) */
const upd_t* update, /*!< in: update vector */
bool rollback,/*!< in: performing rollback? */
@ -198,8 +196,7 @@ btr_rec_free_externally_stored_fields(
tree MUST be X-latched */
rec_t* rec, /*!< in: record */
const ulint* offsets,/*!< in: rec_get_offsets(rec, index) */
page_zip_des_t* page_zip,/*!< in: compressed page whose uncompressed
part will be updated, or NULL */
buf_block_t* block, /*!< in: index page of rec */
bool rollback,/*!< in: performing rollback? */
mtr_t* mtr); /*!< in: mini-transaction handle which contains
an X-latch to record page and to the index
@ -224,7 +221,6 @@ btr_cur_latch_leaves(
uint32_t left_page_no;
uint32_t right_page_no;
buf_block_t* get_block;
page_t* page = buf_block_get_frame(block);
bool spatial;
btr_latch_leaves_t latch_leaves = {{NULL, NULL, NULL}, {0, 0, 0}};
@ -252,7 +248,8 @@ btr_cur_latch_leaves(
true, mtr);
latch_leaves.blocks[1] = get_block;
#ifdef UNIV_BTR_DEBUG
ut_a(page_is_comp(get_block->frame) == page_is_comp(page));
ut_a(page_is_comp(get_block->frame)
== page_is_comp(block->frame));
#endif /* UNIV_BTR_DEBUG */
if (spatial) {
cursor->rtr_info->tree_blocks[RTR_MAX_LEVELS]
@ -268,7 +265,7 @@ btr_cur_latch_leaves(
dict_index_get_lock(cursor->index),
MTR_MEMO_X_LOCK | MTR_MEMO_SX_LOCK));
/* x-latch also siblings from left to right */
left_page_no = btr_page_get_prev(page);
left_page_no = btr_page_get_prev(block->frame);
if (left_page_no != FIL_NULL) {
@ -304,11 +301,12 @@ btr_cur_latch_leaves(
/* Sanity check only after both the blocks are latched. */
if (latch_leaves.blocks[0] != NULL) {
ut_a(page_is_comp(latch_leaves.blocks[0]->frame)
== page_is_comp(page));
== page_is_comp(block->frame));
ut_a(btr_page_get_next(latch_leaves.blocks[0]->frame)
== page_get_page_no(page));
== block->page.id.page_no());
}
ut_a(page_is_comp(get_block->frame) == page_is_comp(page));
ut_a(page_is_comp(get_block->frame)
== page_is_comp(block->frame));
#endif /* UNIV_BTR_DEBUG */
if (spatial) {
@ -316,7 +314,7 @@ btr_cur_latch_leaves(
= get_block;
}
right_page_no = btr_page_get_next(page);
right_page_no = btr_page_get_next(block->frame);
if (right_page_no != FIL_NULL) {
if (spatial) {
@ -331,9 +329,9 @@ btr_cur_latch_leaves(
latch_leaves.blocks[2] = get_block;
#ifdef UNIV_BTR_DEBUG
ut_a(page_is_comp(get_block->frame)
== page_is_comp(page));
== page_is_comp(block->frame));
ut_a(btr_page_get_prev(get_block->frame)
== page_get_page_no(page));
== block->page.id.page_no());
#endif /* UNIV_BTR_DEBUG */
if (spatial) {
cursor->rtr_info->tree_blocks[
@ -348,7 +346,7 @@ btr_cur_latch_leaves(
mode = latch_mode == BTR_SEARCH_PREV ? RW_S_LATCH : RW_X_LATCH;
/* latch also left sibling */
rw_lock_s_lock(&block->lock);
left_page_no = btr_page_get_prev(page);
left_page_no = btr_page_get_prev(block->frame);
rw_lock_s_unlock(&block->lock);
if (left_page_no != FIL_NULL) {
@ -360,9 +358,9 @@ btr_cur_latch_leaves(
cursor->left_block = get_block;
#ifdef UNIV_BTR_DEBUG
ut_a(page_is_comp(get_block->frame)
== page_is_comp(page));
== page_is_comp(block->frame));
ut_a(btr_page_get_next(get_block->frame)
== page_get_page_no(page));
== block->page.id.page_no());
#endif /* UNIV_BTR_DEBUG */
}
@ -372,7 +370,8 @@ btr_cur_latch_leaves(
true, mtr);
latch_leaves.blocks[1] = get_block;
#ifdef UNIV_BTR_DEBUG
ut_a(page_is_comp(get_block->frame) == page_is_comp(page));
ut_a(page_is_comp(get_block->frame)
== page_is_comp(block->frame));
#endif /* UNIV_BTR_DEBUG */
return(latch_leaves);
case BTR_CONT_MODIFY_TREE:
@ -2424,8 +2423,7 @@ need_opposite_intention:
cursor->up_bytes = up_bytes;
if (autoinc) {
page_set_autoinc(tree_blocks[0],
index, autoinc, mtr, false);
page_set_autoinc(tree_blocks[0], autoinc, mtr, false);
}
#ifdef BTR_CUR_HASH_ADAPT
@ -4160,8 +4158,6 @@ btr_cur_update_in_place(
further pages */
{
dict_index_t* index;
buf_block_t* block;
page_zip_des_t* page_zip;
dberr_t err;
rec_t* rec;
roll_ptr_t roll_ptr = 0;
@ -4190,11 +4186,11 @@ btr_cur_update_in_place(
<< ") by " << ib::hex(trx_id) << ": "
<< rec_printer(rec, offsets).str());
block = btr_cur_get_block(cursor);
page_zip = buf_block_get_page_zip(block);
buf_block_t* block = btr_cur_get_block(cursor);
page_zip_des_t* page_zip = buf_block_get_page_zip(block);
/* Check that enough space is available on the compressed page. */
if (page_zip) {
if (UNIV_LIKELY_NULL(page_zip)) {
ut_ad(!index->table->is_temporary());
if (!btr_cur_update_alloc_zip(
@ -4277,8 +4273,7 @@ btr_cur_update_in_place(
/* The new updated record owns its possible externally
stored fields */
btr_cur_unmark_extern_fields(page_zip,
rec, index, offsets, mtr);
btr_cur_unmark_extern_fields(block, rec, index, offsets, mtr);
}
ut_ad(err == DB_SUCCESS);
@ -4803,7 +4798,6 @@ btr_cur_pessimistic_update(
big_rec_t* dummy_big_rec;
dict_index_t* index;
buf_block_t* block;
page_t* page;
page_zip_des_t* page_zip;
rec_t* rec;
page_cur_t* page_cursor;
@ -4813,13 +4807,11 @@ btr_cur_pessimistic_update(
ibool was_first;
ulint n_reserved = 0;
ulint n_ext;
ulint max_ins_size = 0;
*offsets = NULL;
*big_rec = NULL;
block = btr_cur_get_block(cursor);
page = buf_block_get_frame(block);
page_zip = buf_block_get_page_zip(block);
index = cursor->index;
@ -4828,7 +4820,7 @@ btr_cur_pessimistic_update(
MTR_MEMO_SX_LOCK));
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
#ifdef UNIV_ZIP_DEBUG
ut_a(!page_zip || page_zip_validate(page_zip, page, index));
ut_a(!page_zip || page_zip_validate(page_zip, block->frame, index));
#endif /* UNIV_ZIP_DEBUG */
ut_ad(!page_zip || !index->table->is_temporary());
/* The insert buffer tree should never be updated in place. */
@ -4861,7 +4853,7 @@ btr_cur_pessimistic_update(
if (page_zip
&& optim_err != DB_ZIP_OVERFLOW
&& !dict_index_is_clust(index)
&& page_is_leaf(page)) {
&& page_is_leaf(block->frame)) {
ut_ad(!index->table->is_temporary());
ibuf_update_free_bits_zip(block, mtr);
}
@ -4910,7 +4902,7 @@ btr_cur_pessimistic_update(
/* We have to set appropriate extern storage bits in the new
record to be inserted: we have to remember which fields were such */
ut_ad(!page_is_comp(page) || !rec_get_node_ptr_flag(rec));
ut_ad(!page_is_comp(block->frame) || !rec_get_node_ptr_flag(rec));
ut_ad(rec_offs_validate(rec, index, *offsets));
if (index->is_primary()) {
n_ext += btr_push_update_extern_fields(
@ -4933,12 +4925,12 @@ btr_cur_pessimistic_update(
DEBUG_SYNC_C("blob_rollback_middle");
btr_rec_free_updated_extern_fields(
index, rec, page_zip, *offsets, update, true, mtr);
index, rec, block, *offsets, update, true, mtr);
}
if (page_zip_rec_needs_ext(
rec_get_converted_size(index, new_entry, n_ext),
page_is_comp(page),
page_is_comp(block->frame),
dict_index_get_n_fields(index),
block->zip_size())
|| (UNIV_UNLIKELY(update->is_alter_metadata())
@ -4954,14 +4946,15 @@ btr_cur_pessimistic_update(
BTR_KEEP_IBUF_BITMAP. */
#ifdef UNIV_ZIP_DEBUG
ut_a(!page_zip
|| page_zip_validate(page_zip, page, index));
|| page_zip_validate(page_zip, block->frame,
index));
#endif /* UNIV_ZIP_DEBUG */
index->table->space->release_free_extents(n_reserved);
err = DB_TOO_BIG_RECORD;
goto err_exit;
}
ut_ad(page_is_leaf(page));
ut_ad(page_is_leaf(block->frame));
ut_ad(dict_index_is_clust(index));
ut_ad(flags & BTR_KEEP_POS_FLAG);
}
@ -4996,10 +4989,9 @@ btr_cur_pessimistic_update(
btr_cur_write_sys(new_entry, index, trx_id, roll_ptr);
}
if (!page_zip) {
max_ins_size = page_get_max_insert_size_after_reorganize(
page, 1);
}
const ulint max_ins_size = page_zip
? 0 : page_get_max_insert_size_after_reorganize(block->frame,
1);
if (UNIV_UNLIKELY(is_metadata)) {
ut_ad(new_entry->is_metadata());
@ -5027,7 +5019,7 @@ btr_cur_pessimistic_update(
}
#ifdef UNIV_ZIP_DEBUG
ut_a(!page_zip || page_zip_validate(page_zip, page, index));
ut_a(!page_zip || page_zip_validate(page_zip, block->frame, index));
#endif /* UNIV_ZIP_DEBUG */
page_cursor = btr_cur_get_page_cur(cursor);
@ -5058,8 +5050,8 @@ btr_cur_pessimistic_update(
|| rec_is_alter_metadata(rec, *index)) {
/* The new inserted record owns its possible externally
stored fields */
btr_cur_unmark_extern_fields(
page_zip, rec, index, *offsets, mtr);
btr_cur_unmark_extern_fields(btr_cur_get_block(cursor),
rec, index, *offsets, mtr);
} else {
/* In delete-marked records, DB_TRX_ID must
always refer to an existing undo log record. */
@ -5067,7 +5059,7 @@ btr_cur_pessimistic_update(
}
bool adjust = big_rec_vec && (flags & BTR_KEEP_POS_FLAG);
ut_ad(!adjust || page_is_leaf(page));
ut_ad(!adjust || page_is_leaf(block->frame));
if (btr_cur_compress_if_useful(cursor, adjust, mtr)) {
if (adjust) {
@ -5075,7 +5067,7 @@ btr_cur_pessimistic_update(
true, *offsets);
}
} else if (!dict_index_is_clust(index)
&& page_is_leaf(page)) {
&& page_is_leaf(block->frame)) {
/* Update the free bits in the insert buffer.
This is the same block which was skipped by
BTR_KEEP_IBUF_BITMAP. */
@ -5090,7 +5082,7 @@ btr_cur_pessimistic_update(
if (!srv_read_only_mode
&& !big_rec_vec
&& page_is_leaf(page)
&& page_is_leaf(block->frame)
&& !dict_index_is_online_ddl(index)) {
mtr_memo_release(mtr, dict_index_get_lock(index),
@ -5115,13 +5107,13 @@ btr_cur_pessimistic_update(
BTR_KEEP_IBUF_BITMAP. */
if (!dict_index_is_clust(index)
&& !index->table->is_temporary()
&& page_is_leaf(page)) {
&& page_is_leaf(block->frame)) {
ibuf_reset_free_bits(block);
}
}
if (big_rec_vec != NULL) {
ut_ad(page_is_leaf(page));
ut_ad(page_is_leaf(block->frame));
ut_ad(dict_index_is_clust(index));
ut_ad(flags & BTR_KEEP_POS_FLAG);
@ -5170,28 +5162,20 @@ btr_cur_pessimistic_update(
/* Update PAGE_MAX_TRX_ID in the index page header.
It was not updated by btr_cur_pessimistic_insert()
because of BTR_NO_LOCKING_FLAG. */
buf_block_t* rec_block;
rec_block = btr_cur_get_block(cursor);
page_update_max_trx_id(rec_block,
buf_block_get_page_zip(rec_block),
page_update_max_trx_id(btr_cur_get_block(cursor),
btr_cur_get_page_zip(cursor),
trx_id, mtr);
}
if (!rec_get_deleted_flag(rec, rec_offs_comp(*offsets))) {
/* The new inserted record owns its possible externally
stored fields */
buf_block_t* rec_block = btr_cur_get_block(cursor);
#ifdef UNIV_ZIP_DEBUG
ut_a(!page_zip || page_zip_validate(page_zip, page, index));
page = buf_block_get_frame(rec_block);
ut_a(!page_zip || page_zip_validate(page_zip, block->frame,
index));
#endif /* UNIV_ZIP_DEBUG */
page_zip = buf_block_get_page_zip(rec_block);
btr_cur_unmark_extern_fields(page_zip,
rec, index, *offsets, mtr);
btr_cur_unmark_extern_fields(btr_cur_get_block(cursor), rec,
index, *offsets, mtr);
} else {
/* In delete-marked records, DB_TRX_ID must
always refer to an existing undo log record. */
@ -5222,7 +5206,8 @@ btr_cur_pessimistic_update(
return_after_reservations:
#ifdef UNIV_ZIP_DEBUG
ut_a(!page_zip || page_zip_validate(page_zip, page, index));
ut_a(!page_zip || page_zip_validate(btr_cur_get_page_zip(cursor),
btr_cur_get_page(cursor), index));
#endif /* UNIV_ZIP_DEBUG */
index->table->space->release_free_extents(n_reserved);
@ -5393,7 +5378,6 @@ btr_cur_del_mark_set_clust_rec(
{
roll_ptr_t roll_ptr;
dberr_t err;
page_zip_des_t* page_zip;
trx_t* trx;
ut_ad(dict_index_is_clust(index));
@ -5431,7 +5415,7 @@ btr_cur_del_mark_set_clust_rec(
the adaptive hash index does not depend on the delete-mark
and the delete-mark is being updated in place. */
page_zip = buf_block_get_page_zip(block);
page_zip_des_t* page_zip = buf_block_get_page_zip(block);
btr_rec_set_deleted_flag(rec, page_zip, TRUE);
@ -5905,7 +5889,7 @@ btr_cur_pessimistic_delete(
if (rec_offs_any_extern(offsets)) {
btr_rec_free_externally_stored_fields(index,
rec, offsets, page_zip,
rec, offsets, block,
rollback, mtr);
#ifdef UNIV_ZIP_DEBUG
ut_a(!page_zip || page_zip_validate(page_zip, page, index));
@ -7144,13 +7128,12 @@ static
void
btr_cur_set_ownership_of_extern_field(
/*==================================*/
page_zip_des_t* page_zip,/*!< in/out: compressed page whose uncompressed
part will be updated, or NULL */
buf_block_t* block, /*!< in/out: index page */
rec_t* rec, /*!< in/out: clustered index record */
dict_index_t* index, /*!< in: index of the page */
const ulint* offsets,/*!< in: array returned by rec_get_offsets() */
ulint i, /*!< in: field number */
ibool val, /*!< in: value to set */
bool val, /*!< in: value to set */
mtr_t* mtr) /*!< in: mtr, or NULL if not logged */
{
byte* data;
@ -7174,15 +7157,14 @@ btr_cur_set_ownership_of_extern_field(
byte_val |= BTR_EXTERN_OWNER_FLAG;
}
if (page_zip) {
if (UNIV_LIKELY_NULL(block->page.zip.data)) {
mach_write_to_1(data + local_len + BTR_EXTERN_LEN, byte_val);
page_zip_write_blob_ptr(page_zip, rec, index, offsets, i, mtr);
} else if (mtr != NULL) {
mlog_write_ulint(data + local_len + BTR_EXTERN_LEN, byte_val,
MLOG_1BYTE, mtr);
page_zip_write_blob_ptr(&block->page.zip, rec, index, offsets,
i, mtr);
} else {
mach_write_to_1(data + local_len + BTR_EXTERN_LEN, byte_val);
mtr->write<1,mtr_t::OPT>(*block,
data + local_len + BTR_EXTERN_LEN,
byte_val);
}
}
@ -7194,8 +7176,7 @@ to free the field. */
void
btr_cur_disown_inherited_fields(
/*============================*/
page_zip_des_t* page_zip,/*!< in/out: compressed page whose uncompressed
part will be updated, or NULL */
buf_block_t* block, /*!< in/out: index page */
rec_t* rec, /*!< in/out: record in a clustered index */
dict_index_t* index, /*!< in: index of the page */
const ulint* offsets,/*!< in: array returned by rec_get_offsets() */
@ -7212,7 +7193,7 @@ btr_cur_disown_inherited_fields(
if (rec_offs_nth_extern(offsets, i)
&& !upd_get_field_by_field_no(update, i, false)) {
btr_cur_set_ownership_of_extern_field(
page_zip, rec, index, offsets, i, FALSE, mtr);
block, rec, index, offsets, i, false, mtr);
}
}
}
@ -7225,29 +7206,23 @@ static
void
btr_cur_unmark_extern_fields(
/*=========================*/
page_zip_des_t* page_zip,/*!< in/out: compressed page whose uncompressed
part will be updated, or NULL */
buf_block_t* block, /*!< in/out: index page */
rec_t* rec, /*!< in/out: record in a clustered index */
dict_index_t* index, /*!< in: index of the page */
const ulint* offsets,/*!< in: array returned by rec_get_offsets() */
mtr_t* mtr) /*!< in: mtr, or NULL if not logged */
{
ulint n;
ulint i;
ut_ad(!rec_offs_comp(offsets) || !rec_get_node_ptr_flag(rec));
n = rec_offs_n_fields(offsets);
if (!rec_offs_any_extern(offsets)) {
return;
}
for (i = 0; i < n; i++) {
if (rec_offs_nth_extern(offsets, i)) {
const ulint n = rec_offs_n_fields(offsets);
for (ulint i = 0; i < n; i++) {
if (rec_offs_nth_extern(offsets, i)) {
btr_cur_set_ownership_of_extern_field(
page_zip, rec, index, offsets, i, TRUE, mtr);
block, rec, index, offsets, i, true, mtr);
}
}
}
@ -7648,7 +7623,6 @@ btr_store_big_rec_extern_fields(
for (ulint blob_npages = 0;; ++blob_npages) {
buf_block_t* block;
page_t* page;
const ulint commit_freq = 4;
ulint r_extents;
@ -7711,11 +7685,9 @@ btr_store_big_rec_extern_fields(
ut_a(block != NULL);
page_no = block->page.id.page_no();
page = buf_block_get_frame(block);
if (prev_page_no != FIL_NULL) {
buf_block_t* prev_block;
page_t* prev_page;
prev_block = buf_page_get(
page_id_t(space_id, prev_page_no),
@ -7724,23 +7696,25 @@ btr_store_big_rec_extern_fields(
buf_block_dbg_add_level(prev_block,
SYNC_EXTERN_STORAGE);
prev_page = buf_block_get_frame(prev_block);
if (page_zip) {
mlog_write_ulint(
prev_page + FIL_PAGE_NEXT,
page_no, MLOG_4BYTES, &mtr);
memcpy(buf_block_get_page_zip(
prev_block)
->data + FIL_PAGE_NEXT,
prev_page + FIL_PAGE_NEXT, 4);
mtr.write<4>(*prev_block,
prev_block->frame
+ FIL_PAGE_NEXT,
page_no);
memcpy_aligned<4>(
buf_block_get_page_zip(
prev_block)
->data + FIL_PAGE_NEXT,
prev_block->frame
+ FIL_PAGE_NEXT, 4);
} else {
mlog_write_ulint(
prev_page + FIL_PAGE_DATA
+ BTR_BLOB_HDR_NEXT_PAGE_NO,
page_no, MLOG_4BYTES, &mtr);
mtr.write<4>(*prev_block,
BTR_BLOB_HDR_NEXT_PAGE_NO
+ FIL_PAGE_DATA
+ prev_block->frame,
page_no);
}
} else if (dict_index_is_online_ddl(index)) {
row_log_table_blob_alloc(index, page_no);
}
@ -7751,7 +7725,7 @@ btr_store_big_rec_extern_fields(
/* Write FIL_PAGE_TYPE to the redo log
separately, before logging any other
changes to the page, so that the debug
changes to the block, so that the debug
assertions in
recv_parse_or_apply_log_rec_body() can
be made simpler. Before InnoDB Plugin
@ -7759,13 +7733,13 @@ btr_store_big_rec_extern_fields(
FIL_PAGE_TYPE was logged as part of
the mlog_log_string() below. */
mlog_write_ulint(page + FIL_PAGE_TYPE,
prev_page_no == FIL_NULL
? FIL_PAGE_TYPE_ZBLOB
: FIL_PAGE_TYPE_ZBLOB2,
MLOG_2BYTES, &mtr);
mtr.write<2>(*block,
block->frame + FIL_PAGE_TYPE,
prev_page_no == FIL_NULL
? FIL_PAGE_TYPE_ZBLOB
: FIL_PAGE_TYPE_ZBLOB2);
c_stream.next_out = page
c_stream.next_out = block->frame
+ FIL_PAGE_DATA;
c_stream.avail_out = static_cast<uInt>(
payload_size_zip);
@ -7799,15 +7773,13 @@ btr_store_big_rec_extern_fields(
Number */
ut_ad(!dict_index_is_spatial(index));
mlog_write_ulint(page
+ FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION,
space_id,
MLOG_4BYTES, &mtr);
mlog_write_ulint(page
+ FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION + 4,
rec_page_no,
MLOG_4BYTES, &mtr);
mlog_log_string(page
mtr.write<4>(*block, block->frame
+ FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION,
space_id);
mtr.write<4>(*block, block->frame
+ FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION + 4,
rec_page_no);
mlog_log_string(block->frame
+ FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION,
page_zip_get_size(page_zip)
- FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION
@ -7828,7 +7800,7 @@ btr_store_big_rec_extern_fields(
ut_ad(blob_page_zip);
ut_ad(page_zip_get_size(blob_page_zip)
== page_zip_get_size(page_zip));
memcpy(blob_page_zip->data, page,
memcpy(blob_page_zip->data, block->frame,
page_zip_get_size(page_zip));
if (err == Z_OK && prev_page_no != FIL_NULL) {
@ -7880,9 +7852,9 @@ next_zip_page:
break;
}
} else {
mlog_write_ulint(page + FIL_PAGE_TYPE,
FIL_PAGE_TYPE_BLOB,
MLOG_2BYTES, &mtr);
mtr.write<2>(*block, FIL_PAGE_TYPE
+ block->frame,
FIL_PAGE_TYPE_BLOB);
if (extern_len > payload_size) {
store_len = payload_size;
@ -7890,47 +7862,44 @@ next_zip_page:
store_len = extern_len;
}
mlog_write_string(page + FIL_PAGE_DATA
+ BTR_BLOB_HDR_SIZE,
mlog_write_string(FIL_PAGE_DATA
+ BTR_BLOB_HDR_SIZE
+ block->frame,
(const byte*)
big_rec_vec->fields[i].data
+ big_rec_vec->fields[i].len
- extern_len,
store_len, &mtr);
mlog_write_ulint(page + FIL_PAGE_DATA
+ BTR_BLOB_HDR_PART_LEN,
store_len, MLOG_4BYTES, &mtr);
mlog_write_ulint(page + FIL_PAGE_DATA
+ BTR_BLOB_HDR_NEXT_PAGE_NO,
FIL_NULL, MLOG_4BYTES, &mtr);
mtr.write<4>(*block, BTR_BLOB_HDR_PART_LEN
+ FIL_PAGE_DATA + block->frame,
store_len);
compile_time_assert(FIL_NULL == 0xffffffff);
mlog_memset(block, BTR_BLOB_HDR_NEXT_PAGE_NO
+ FIL_PAGE_DATA, 4, 0xff, &mtr);
extern_len -= store_len;
ut_ad(!mach_read_from_4(BTR_EXTERN_LEN
+ field_ref));
mlog_write_ulint(field_ref
+ BTR_EXTERN_LEN + 4,
big_rec_vec->fields[i].len
- extern_len,
MLOG_4BYTES, &mtr);
mtr.write<4>(*rec_block,
BTR_EXTERN_LEN + 4 + field_ref,
big_rec_vec->fields[i].len
- extern_len);
if (prev_page_no == FIL_NULL) {
ut_ad(blob_npages == 0);
mlog_write_ulint(field_ref
+ BTR_EXTERN_SPACE_ID,
space_id, MLOG_4BYTES,
&mtr);
mtr.write<4,mtr_t::OPT>(
*rec_block,
field_ref + BTR_EXTERN_SPACE_ID,
space_id);
mlog_write_ulint(field_ref
+ BTR_EXTERN_PAGE_NO,
page_no, MLOG_4BYTES,
&mtr);
mtr.write<4>(*rec_block, field_ref
+ BTR_EXTERN_PAGE_NO,
page_no);
mlog_write_ulint(field_ref
+ BTR_EXTERN_OFFSET,
FIL_PAGE_DATA,
MLOG_4BYTES,
&mtr);
mtr.write<4>(*rec_block, field_ref
+ BTR_EXTERN_OFFSET,
FIL_PAGE_DATA);
}
prev_page_no = page_no;
@ -8038,8 +8007,7 @@ btr_free_externally_stored_field(
page_zip_write_blob_ptr(), or NULL */
const ulint* offsets, /*!< in: rec_get_offsets(rec, index),
or NULL */
page_zip_des_t* page_zip, /*!< in: compressed page corresponding
to rec, or NULL if rec == NULL */
buf_block_t* block, /*!< in/out: page of field_ref */
ulint i, /*!< in: field number of field_ref;
ignored if rec == NULL */
bool rollback, /*!< in: performing rollback? */
@ -8084,10 +8052,8 @@ btr_free_externally_stored_field(
const ulint ext_zip_size = index->table->space->zip_size();
const ulint rec_zip_size = rec ? ext_zip_size : 0;
if (rec == NULL) {
/* This is a call from row_purge_upd_exist_or_extern(). */
ut_ad(!page_zip);
}
/* !rec holds in a call from purge when field_ref is in an undo page */
ut_ad(rec || !block->page.zip.data);
for (;;) {
#ifdef UNIV_DEBUG
@ -8156,24 +8122,23 @@ btr_free_externally_stored_field(
btr_page_free(index, ext_block, &mtr, true);
if (page_zip != NULL) {
if (UNIV_LIKELY_NULL(block->page.zip.data)) {
mach_write_to_4(field_ref + BTR_EXTERN_PAGE_NO,
next_page_no);
mach_write_to_4(field_ref + BTR_EXTERN_LEN + 4,
0);
page_zip_write_blob_ptr(page_zip, rec, index,
memset(field_ref + BTR_EXTERN_LEN, 0, 4);
page_zip_write_blob_ptr(&block->page.zip,
rec, index,
offsets, i, &mtr);
} else {
mlog_write_ulint(field_ref
+ BTR_EXTERN_PAGE_NO,
next_page_no,
MLOG_4BYTES, &mtr);
mlog_write_ulint(field_ref
+ BTR_EXTERN_LEN + 4, 0,
MLOG_4BYTES, &mtr);
mtr.write<4>(*block,
BTR_EXTERN_PAGE_NO + field_ref,
next_page_no);
mtr.write<4>(*block,
BTR_EXTERN_LEN + 4 + field_ref,
0U);
}
} else {
ut_a(!page_zip);
ut_ad(!block->page.zip.data);
btr_check_blob_fil_page_type(space_id, page_no, page,
FALSE);
@ -8182,17 +8147,16 @@ btr_free_externally_stored_field(
+ BTR_BLOB_HDR_NEXT_PAGE_NO);
btr_page_free(index, ext_block, &mtr, true);
mlog_write_ulint(field_ref + BTR_EXTERN_PAGE_NO,
next_page_no,
MLOG_4BYTES, &mtr);
mtr.write<4>(*block, BTR_EXTERN_PAGE_NO + field_ref,
next_page_no);
/* Zero out the BLOB length. If the server
crashes during the execution of this function,
trx_rollback_all_recovered() could
dereference the half-deleted BLOB, fetching a
wrong prefix for the BLOB. */
mlog_write_ulint(field_ref + BTR_EXTERN_LEN + 4,
0,
MLOG_4BYTES, &mtr);
mtr.write<4,mtr_t::OPT>(*block,
BTR_EXTERN_LEN + 4 + field_ref,
0U);
}
/* Commit mtr and release the BLOB block to save memory. */
@ -8210,8 +8174,7 @@ btr_rec_free_externally_stored_fields(
tree MUST be X-latched */
rec_t* rec, /*!< in/out: record */
const ulint* offsets,/*!< in: rec_get_offsets(rec, index) */
page_zip_des_t* page_zip,/*!< in: compressed page whose uncompressed
part will be updated, or NULL */
buf_block_t* block, /*!< in: index page of rec */
bool rollback,/*!< in: performing rollback? */
mtr_t* mtr) /*!< in: mini-transaction handle which contains
an X-latch to record page and to the index
@ -8233,7 +8196,7 @@ btr_rec_free_externally_stored_fields(
if (rec_offs_nth_extern(offsets, i)) {
btr_free_externally_stored_field(
index, btr_rec_get_field_ref(rec, offsets, i),
rec, offsets, page_zip, i, rollback, mtr);
rec, offsets, block, i, rollback, mtr);
}
}
}
@ -8248,8 +8211,7 @@ btr_rec_free_updated_extern_fields(
dict_index_t* index, /*!< in: index of rec; the index tree MUST be
X-latched */
rec_t* rec, /*!< in/out: record */
page_zip_des_t* page_zip,/*!< in: compressed page whose uncompressed
part will be updated, or NULL */
buf_block_t* block, /*!< in: index page of rec */
const ulint* offsets,/*!< in: rec_get_offsets(rec, index) */
const upd_t* update, /*!< in: update vector */
bool rollback,/*!< in: performing rollback? */
@ -8277,7 +8239,7 @@ btr_rec_free_updated_extern_fields(
btr_free_externally_stored_field(
index, data + len - BTR_EXTERN_FIELD_REF_SIZE,
rec, offsets, page_zip,
rec, offsets, block,
ufield->field_no, rollback, mtr);
}
}

View file

@ -71,25 +71,13 @@ buf_dblwr_page_inside(
return(FALSE);
}
/****************************************************************//**
Calls buf_page_get() on the TRX_SYS_PAGE and returns a pointer to the
doublewrite buffer within it.
@return pointer to the doublewrite buffer within the filespace header
page. */
UNIV_INLINE
byte*
buf_dblwr_get(
/*==========*/
mtr_t* mtr) /*!< in/out: MTR to hold the page latch */
/** @return the TRX_SYS page */
inline buf_block_t *buf_dblwr_trx_sys_get(mtr_t *mtr)
{
buf_block_t* block;
block = buf_page_get(page_id_t(TRX_SYS_SPACE, TRX_SYS_PAGE_NO),
0, RW_X_LATCH, mtr);
buf_block_dbg_add_level(block, SYNC_NO_ORDER_CHECK);
return(buf_block_get_frame(block) + TRX_SYS_DOUBLEWRITE);
buf_block_t *block= buf_page_get(page_id_t(TRX_SYS_SPACE, TRX_SYS_PAGE_NO),
0, RW_X_LATCH, mtr);
buf_block_dbg_add_level(block, SYNC_NO_ORDER_CHECK);
return block;
}
/********************************************************************//**
@ -106,12 +94,7 @@ buf_dblwr_sync_datafiles()
/****************************************************************//**
Creates or initialializes the doublewrite buffer at a database start. */
static
void
buf_dblwr_init(
/*===========*/
byte* doublewrite) /*!< in: pointer to the doublewrite buf
header on trx sys page */
static void buf_dblwr_init(const byte *doublewrite)
{
ulint buf_size;
@ -164,7 +147,6 @@ buf_dblwr_create()
{
buf_block_t* block2;
buf_block_t* new_block;
byte* doublewrite;
byte* fseg_header;
ulint page_no;
ulint prev_page_no;
@ -180,14 +162,15 @@ start_again:
mtr.start();
buf_dblwr_being_created = TRUE;
doublewrite = buf_dblwr_get(&mtr);
buf_block_t *trx_sys_block = buf_dblwr_trx_sys_get(&mtr);
if (mach_read_from_4(doublewrite + TRX_SYS_DOUBLEWRITE_MAGIC)
if (mach_read_from_4(TRX_SYS_DOUBLEWRITE + TRX_SYS_DOUBLEWRITE_MAGIC
+ trx_sys_block->frame)
== TRX_SYS_DOUBLEWRITE_MAGIC_N) {
/* The doublewrite buffer has already been created:
just read in some numbers */
buf_dblwr_init(doublewrite);
buf_dblwr_init(TRX_SYS_DOUBLEWRITE + trx_sys_block->frame);
mtr.commit();
buf_dblwr_being_created = FALSE;
@ -229,7 +212,8 @@ too_small:
buf_block_dbg_add_level(block2, SYNC_NO_ORDER_CHECK);
fseg_header = doublewrite + TRX_SYS_DOUBLEWRITE_FSEG;
fseg_header = TRX_SYS_DOUBLEWRITE + TRX_SYS_DOUBLEWRITE_FSEG
+ trx_sys_block->frame;
prev_page_no = 0;
for (i = 0; i < TRX_SYS_DOUBLEWRITE_BLOCKS * TRX_SYS_DOUBLEWRITE_BLOCK_SIZE
@ -265,30 +249,38 @@ too_small:
recv_parse_or_apply_log_rec_body() will see a valid
page type. The flushes of new_block are actually
unnecessary here. */
ut_d(mlog_write_ulint(FIL_PAGE_TYPE + new_block->frame,
FIL_PAGE_TYPE_SYS, MLOG_2BYTES, &mtr));
ut_d(mtr.write<2>(*new_block,
FIL_PAGE_TYPE + new_block->frame,
FIL_PAGE_TYPE_SYS));
if (i == FSP_EXTENT_SIZE / 2) {
ut_a(page_no == FSP_EXTENT_SIZE);
mlog_write_ulint(doublewrite
+ TRX_SYS_DOUBLEWRITE_BLOCK1,
page_no, MLOG_4BYTES, &mtr);
mlog_write_ulint(doublewrite
+ TRX_SYS_DOUBLEWRITE_REPEAT
+ TRX_SYS_DOUBLEWRITE_BLOCK1,
page_no, MLOG_4BYTES, &mtr);
mtr.write<4>(*trx_sys_block,
TRX_SYS_DOUBLEWRITE
+ TRX_SYS_DOUBLEWRITE_BLOCK1
+ trx_sys_block->frame,
page_no);
mtr.write<4>(*trx_sys_block,
TRX_SYS_DOUBLEWRITE
+ TRX_SYS_DOUBLEWRITE_REPEAT
+ TRX_SYS_DOUBLEWRITE_BLOCK1
+ trx_sys_block->frame,
page_no);
} else if (i == FSP_EXTENT_SIZE / 2
+ TRX_SYS_DOUBLEWRITE_BLOCK_SIZE) {
ut_a(page_no == 2 * FSP_EXTENT_SIZE);
mlog_write_ulint(doublewrite
+ TRX_SYS_DOUBLEWRITE_BLOCK2,
page_no, MLOG_4BYTES, &mtr);
mlog_write_ulint(doublewrite
+ TRX_SYS_DOUBLEWRITE_REPEAT
+ TRX_SYS_DOUBLEWRITE_BLOCK2,
page_no, MLOG_4BYTES, &mtr);
mtr.write<4>(*trx_sys_block,
TRX_SYS_DOUBLEWRITE
+ TRX_SYS_DOUBLEWRITE_BLOCK2
+ trx_sys_block->frame,
page_no);
mtr.write<4>(*trx_sys_block,
TRX_SYS_DOUBLEWRITE
+ TRX_SYS_DOUBLEWRITE_REPEAT
+ TRX_SYS_DOUBLEWRITE_BLOCK2
+ trx_sys_block->frame,
page_no);
} else if (i > FSP_EXTENT_SIZE / 2) {
ut_a(page_no == prev_page_no + 1);
}
@ -303,29 +295,32 @@ too_small:
lock the fseg header too many times. Since
this code is not done while any other threads
are active, restart the MTR occasionally. */
mtr_commit(&mtr);
mtr_start(&mtr);
doublewrite = buf_dblwr_get(&mtr);
fseg_header = doublewrite
+ TRX_SYS_DOUBLEWRITE_FSEG;
mtr.commit();
mtr.start();
trx_sys_block = buf_dblwr_trx_sys_get(&mtr);
fseg_header = TRX_SYS_DOUBLEWRITE
+ TRX_SYS_DOUBLEWRITE_FSEG
+ trx_sys_block->frame;
}
prev_page_no = page_no;
}
mlog_write_ulint(doublewrite + TRX_SYS_DOUBLEWRITE_MAGIC,
TRX_SYS_DOUBLEWRITE_MAGIC_N,
MLOG_4BYTES, &mtr);
mlog_write_ulint(doublewrite + TRX_SYS_DOUBLEWRITE_MAGIC
+ TRX_SYS_DOUBLEWRITE_REPEAT,
TRX_SYS_DOUBLEWRITE_MAGIC_N,
MLOG_4BYTES, &mtr);
mtr.write<4>(*trx_sys_block,
TRX_SYS_DOUBLEWRITE + TRX_SYS_DOUBLEWRITE_MAGIC
+ trx_sys_block->frame,
TRX_SYS_DOUBLEWRITE_MAGIC_N);
mtr.write<4>(*trx_sys_block,
TRX_SYS_DOUBLEWRITE + TRX_SYS_DOUBLEWRITE_MAGIC
+ TRX_SYS_DOUBLEWRITE_REPEAT
+ trx_sys_block->frame,
TRX_SYS_DOUBLEWRITE_MAGIC_N);
mlog_write_ulint(doublewrite
+ TRX_SYS_DOUBLEWRITE_SPACE_ID_STORED,
TRX_SYS_DOUBLEWRITE_SPACE_ID_STORED_N,
MLOG_4BYTES, &mtr);
mtr_commit(&mtr);
mtr.write<4>(*trx_sys_block,
TRX_SYS_DOUBLEWRITE + TRX_SYS_DOUBLEWRITE_SPACE_ID_STORED
+ trx_sys_block->frame,
TRX_SYS_DOUBLEWRITE_SPACE_ID_STORED_N);
mtr.commit();
/* Flush the modified pages to disk and make a checkpoint */
log_make_checkpoint();

View file

@ -35,24 +35,13 @@ Created 4/18/1996 Heikki Tuuri
#include "log0recv.h"
#include "os0file.h"
/**********************************************************************//**
Gets a pointer to the dictionary header and x-latches its page.
@return pointer to the dictionary header, page x-latched */
dict_hdr_t*
dict_hdr_get(
/*=========*/
mtr_t* mtr) /*!< in: mtr */
/** @return the DICT_HDR block, x-latched */
buf_block_t *dict_hdr_get(mtr_t* mtr)
{
buf_block_t* block;
dict_hdr_t* header;
block = buf_page_get(page_id_t(DICT_HDR_SPACE, DICT_HDR_PAGE_NO),
0, RW_X_LATCH, mtr);
header = DICT_HDR + buf_block_get_frame(block);
buf_block_dbg_add_level(block, SYNC_DICT_HEADER);
return(header);
buf_block_t *block= buf_page_get(page_id_t(DICT_HDR_SPACE, DICT_HDR_PAGE_NO),
0, RW_X_LATCH, mtr);
buf_block_dbg_add_level(block, SYNC_DICT_HEADER);
return block;
}
/**********************************************************************//**
@ -67,36 +56,41 @@ dict_hdr_get_new_id(
ulint* space_id) /*!< out: space id
(not assigned if NULL) */
{
dict_hdr_t* dict_hdr;
ib_id_t id;
mtr_t mtr;
mtr_start(&mtr);
dict_hdr = dict_hdr_get(&mtr);
mtr.start();
buf_block_t* dict_hdr = dict_hdr_get(&mtr);
if (table_id) {
id = mach_read_from_8(dict_hdr + DICT_HDR_TABLE_ID);
id = mach_read_from_8(DICT_HDR + DICT_HDR_TABLE_ID
+ dict_hdr->frame);
id++;
mlog_write_ull(dict_hdr + DICT_HDR_TABLE_ID, id, &mtr);
mtr.write<8>(*dict_hdr, DICT_HDR + DICT_HDR_TABLE_ID
+ dict_hdr->frame, id);
*table_id = id;
}
if (index_id) {
id = mach_read_from_8(dict_hdr + DICT_HDR_INDEX_ID);
id = mach_read_from_8(DICT_HDR + DICT_HDR_INDEX_ID
+ dict_hdr->frame);
id++;
mlog_write_ull(dict_hdr + DICT_HDR_INDEX_ID, id, &mtr);
mtr.write<8>(*dict_hdr, DICT_HDR + DICT_HDR_INDEX_ID
+ dict_hdr->frame, id);
*index_id = id;
}
if (space_id) {
*space_id = mach_read_from_4(dict_hdr + DICT_HDR_MAX_SPACE_ID);
*space_id = mach_read_from_4(DICT_HDR + DICT_HDR_MAX_SPACE_ID
+ dict_hdr->frame);
if (fil_assign_new_space_id(space_id)) {
mlog_write_ulint(dict_hdr + DICT_HDR_MAX_SPACE_ID,
*space_id, MLOG_4BYTES, &mtr);
mtr.write<4>(*dict_hdr,
DICT_HDR + DICT_HDR_MAX_SPACE_ID
+ dict_hdr->frame, *space_id);
}
}
mtr_commit(&mtr);
mtr.commit();
}
/**********************************************************************//**
@ -106,7 +100,6 @@ void
dict_hdr_flush_row_id(void)
/*=======================*/
{
dict_hdr_t* dict_hdr;
row_id_t id;
mtr_t mtr;
@ -114,13 +107,13 @@ dict_hdr_flush_row_id(void)
id = dict_sys.row_id;
mtr_start(&mtr);
mtr.start();
dict_hdr = dict_hdr_get(&mtr);
buf_block_t* d = dict_hdr_get(&mtr);
mlog_write_ull(dict_hdr + DICT_HDR_ROW_ID, id, &mtr);
mtr.write<8>(*d, DICT_HDR + DICT_HDR_ROW_ID + d->frame, id);
mtr_commit(&mtr);
mtr.commit();
}
/*****************************************************************//**
@ -134,7 +127,6 @@ dict_hdr_create(
mtr_t* mtr) /*!< in: mtr */
{
buf_block_t* block;
dict_hdr_t* dict_header;
ulint root_page_no;
ut_ad(mtr);
@ -147,24 +139,22 @@ dict_hdr_create(
ut_a(DICT_HDR_PAGE_NO == block->page.id.page_no());
dict_header = dict_hdr_get(mtr);
buf_block_t* d = dict_hdr_get(mtr);
/* Start counting row, table, index, and tree ids from
DICT_HDR_FIRST_ID */
mlog_write_ull(dict_header + DICT_HDR_ROW_ID,
DICT_HDR_FIRST_ID, mtr);
mtr->write<8>(*d, DICT_HDR + DICT_HDR_ROW_ID + d->frame,
DICT_HDR_FIRST_ID);
mtr->write<8>(*d, DICT_HDR + DICT_HDR_TABLE_ID + d->frame,
DICT_HDR_FIRST_ID);
mtr->write<8>(*d, DICT_HDR + DICT_HDR_INDEX_ID + d->frame,
DICT_HDR_FIRST_ID);
mlog_write_ull(dict_header + DICT_HDR_TABLE_ID,
DICT_HDR_FIRST_ID, mtr);
mlog_write_ull(dict_header + DICT_HDR_INDEX_ID,
DICT_HDR_FIRST_ID, mtr);
ut_ad(mach_read_from_4(dict_header + DICT_HDR_MAX_SPACE_ID) == 0);
ut_ad(!mach_read_from_4(DICT_HDR + DICT_HDR_MAX_SPACE_ID + d->frame));
/* Obsolete, but we must initialize it anyway. */
mlog_write_ulint(dict_header + DICT_HDR_MIX_ID_LOW,
DICT_HDR_FIRST_ID, MLOG_4BYTES, mtr);
mtr->write<4>(*d, DICT_HDR + DICT_HDR_MIX_ID_LOW + d->frame,
DICT_HDR_FIRST_ID);
/* Create the B-tree roots for the clustered indexes of the basic
system tables */
@ -178,8 +168,7 @@ dict_hdr_create(
return(FALSE);
}
mlog_write_ulint(dict_header + DICT_HDR_TABLES, root_page_no,
MLOG_4BYTES, mtr);
mtr->write<4>(*d, DICT_HDR + DICT_HDR_TABLES + d->frame, root_page_no);
/*--------------------------*/
root_page_no = btr_create(DICT_UNIQUE,
fil_system.sys_space, DICT_TABLE_IDS_ID,
@ -189,8 +178,8 @@ dict_hdr_create(
return(FALSE);
}
mlog_write_ulint(dict_header + DICT_HDR_TABLE_IDS, root_page_no,
MLOG_4BYTES, mtr);
mtr->write<4>(*d, DICT_HDR + DICT_HDR_TABLE_IDS + d->frame,
root_page_no);
/*--------------------------*/
root_page_no = btr_create(DICT_CLUSTERED | DICT_UNIQUE,
fil_system.sys_space, DICT_COLUMNS_ID,
@ -200,8 +189,8 @@ dict_hdr_create(
return(FALSE);
}
mlog_write_ulint(dict_header + DICT_HDR_COLUMNS, root_page_no,
MLOG_4BYTES, mtr);
mtr->write<4>(*d, DICT_HDR + DICT_HDR_COLUMNS + d->frame,
root_page_no);
/*--------------------------*/
root_page_no = btr_create(DICT_CLUSTERED | DICT_UNIQUE,
fil_system.sys_space, DICT_INDEXES_ID,
@ -211,8 +200,8 @@ dict_hdr_create(
return(FALSE);
}
mlog_write_ulint(dict_header + DICT_HDR_INDEXES, root_page_no,
MLOG_4BYTES, mtr);
mtr->write<4>(*d, DICT_HDR + DICT_HDR_INDEXES + d->frame,
root_page_no);
/*--------------------------*/
root_page_no = btr_create(DICT_CLUSTERED | DICT_UNIQUE,
fil_system.sys_space, DICT_FIELDS_ID,
@ -222,8 +211,7 @@ dict_hdr_create(
return(FALSE);
}
mlog_write_ulint(dict_header + DICT_HDR_FIELDS, root_page_no,
MLOG_4BYTES, mtr);
mtr->write<4>(*d, DICT_HDR + DICT_HDR_FIELDS + d->frame, root_page_no);
/*--------------------------*/
return(TRUE);
@ -239,7 +227,6 @@ dict_boot(void)
{
dict_table_t* table;
dict_index_t* index;
dict_hdr_t* dict_hdr;
mem_heap_t* heap;
mtr_t mtr;
@ -271,7 +258,7 @@ dict_boot(void)
mutex_enter(&dict_sys.mutex);
/* Get the dictionary header */
dict_hdr = dict_hdr_get(&mtr);
const byte* dict_hdr = &dict_hdr_get(&mtr)->frame[DICT_HDR];
/* Because we only write new row ids to disk-based data structure
(dictionary header) when it is divisible by

View file

@ -373,16 +373,18 @@ dict_build_table_def_step(
mtr.start();
undo->table_id = trx->table_id;
undo->dict_operation = TRUE;
page_t* page = trx_undo_page_get(
buf_block_t* block = trx_undo_page_get(
page_id_t(trx->rsegs.m_redo.rseg->space->id,
undo->hdr_page_no),
&mtr);
mlog_write_ulint(page + undo->hdr_offset
+ TRX_UNDO_DICT_TRANS,
TRUE, MLOG_1BYTE, &mtr);
mlog_write_ull(page + undo->hdr_offset
+ TRX_UNDO_TABLE_ID,
trx->table_id, &mtr);
mtr.write<1,mtr_t::OPT>(
*block,
block->frame + undo->hdr_offset
+ TRX_UNDO_DICT_TRANS, 1U);
mtr.write<8,mtr_t::OPT>(
*block,
block->frame + undo->hdr_offset
+ TRX_UNDO_TABLE_ID, trx->table_id);
mtr.commit();
log_write_up_to(mtr.commit_lsn(), true);
}
@ -851,14 +853,13 @@ dict_create_index_tree_step(
err = DB_OUT_OF_FILE_SPACE; );
}
ulint len;
byte* data = rec_get_nth_field_old(btr_pcur_get_rec(&pcur),
ulint len;
byte* data = rec_get_nth_field_old(btr_pcur_get_rec(&pcur),
DICT_FLD__SYS_INDEXES__PAGE_NO,
&len);
ut_ad(len == 4);
if (mach_read_from_4(data) != node->page_no) {
mlog_write_ulint(data, node->page_no, MLOG_4BYTES, &mtr);
}
mtr.write<4,mtr_t::OPT>(*btr_pcur_get_block(&pcur), data,
node->page_no);
mtr.commit();

View file

@ -4013,7 +4013,7 @@ dict_set_corrupted(
if (len != 4) {
goto fail;
}
mlog_write_ulint(field, index->type, MLOG_4BYTES, &mtr);
mtr.write<4>(*btr_cur_get_block(&cursor), field, index->type);
status = "Flagged";
} else {
fail:
@ -4113,11 +4113,8 @@ dict_index_set_merge_threshold(
DICT_FLD__SYS_INDEXES__MERGE_THRESHOLD, &len);
ut_ad(len == 4);
if (len == 4) {
mlog_write_ulint(field, merge_threshold,
MLOG_4BYTES, &mtr);
}
mtr.write<4,mtr_t::OPT>(*btr_cur_get_block(&cursor), field,
merge_threshold);
}
mtr_commit(&mtr);

View file

@ -1481,7 +1481,8 @@ void dict_check_tablespaces_and_store_max_id()
/* Initialize the max space_id from sys header */
mtr.start();
ulint max_space_id = mach_read_from_4(DICT_HDR_MAX_SPACE_ID
+ dict_hdr_get(&mtr));
+ DICT_HDR
+ dict_hdr_get(&mtr)->frame);
mtr.commit();
fil_set_max_space_id_if_bigger(max_space_id);

View file

@ -2035,8 +2035,9 @@ fil_crypt_rotate_page(
modified = true;
/* force rotation by dummy updating page */
mlog_write_ulint(frame + FIL_PAGE_SPACE_ID,
space_id, MLOG_4BYTES, &mtr);
mtr.write<1,mtr_t::FORCED>(*block,
&frame[FIL_PAGE_SPACE_ID],
frame[FIL_PAGE_SPACE_ID]);
/* statistics */
state->crypt_stat.pages_modified++;

View file

@ -3899,8 +3899,8 @@ void fsp_flags_try_adjust(fil_space_t* space, ulint flags)
<< " to " << ib::hex(flags);
}
mtr.set_named_space(space);
mlog_write_ulint(FSP_HEADER_OFFSET + FSP_SPACE_FLAGS
+ b->frame, flags, MLOG_4BYTES, &mtr);
mtr.write<4>(*b, FSP_HEADER_OFFSET + FSP_SPACE_FLAGS
+ b->frame, flags);
}
func_exit:
mtr.commit();

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@ -27,434 +28,300 @@ Created 11/28/1995 Heikki Tuuri
#include "buf0buf.h"
#include "page0page.h"
/********************************************************************//**
Adds a node to an empty list. */
static
void
flst_add_to_empty(
/*==============*/
flst_base_node_t* base, /*!< in: pointer to base node of
empty list */
flst_node_t* node, /*!< in: node to add */
mtr_t* mtr) /*!< in: mini-transaction handle */
/** Add a node to an empty list. */
static void flst_add_to_empty(buf_block_t *base, uint16_t boffset,
buf_block_t *add, uint16_t aoffset, mtr_t *mtr)
{
ulint space;
fil_addr_t node_addr;
ut_ad(base != add || boffset != aoffset);
ut_ad(boffset < base->physical_size());
ut_ad(aoffset < add->physical_size());
ut_ad(mtr_memo_contains_page_flagged(mtr, base->frame,
MTR_MEMO_PAGE_X_FIX |
MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr_memo_contains_page_flagged(mtr, add->frame,
MTR_MEMO_PAGE_X_FIX |
MTR_MEMO_PAGE_SX_FIX));
fil_addr_t addr= { add->page.id.page_no(), aoffset };
ut_ad(mtr && base && node);
ut_ad(base != node);
ut_ad(mtr_memo_contains_page_flagged(mtr, base,
MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr_memo_contains_page_flagged(mtr, node,
MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_SX_FIX));
ut_a(!flst_get_len(base));
/* Update first and last fields of base node */
flst_write_addr(*base, base->frame + boffset + FLST_FIRST, addr, mtr);
/* MDEV-12353 TODO: use MEMMOVE record */
flst_write_addr(*base, base->frame + boffset + FLST_LAST, addr, mtr);
buf_ptr_get_fsp_addr(node, &space, &node_addr);
/* Set prev and next fields of node to add */
flst_zero_addr(*add, add->frame + aoffset + FLST_PREV, mtr);
flst_zero_addr(*add, add->frame + aoffset + FLST_NEXT, mtr);
/* Update first and last fields of base node */
flst_write_addr(base + FLST_FIRST, node_addr, mtr);
flst_write_addr(base + FLST_LAST, node_addr, mtr);
/* Set prev and next fields of node to add */
flst_zero_addr(node + FLST_PREV, mtr);
flst_zero_addr(node + FLST_NEXT, mtr);
/* Update len of base node */
mlog_write_ulint(base + FLST_LEN, 1, MLOG_4BYTES, mtr);
/* Update len of base node */
ut_ad(!mach_read_from_4(base->frame + boffset + FLST_LEN));
mtr->write<1>(*base, base->frame + boffset + (FLST_LEN + 3), 1U);
}
/********************************************************************//**
Inserts a node after another in a list. */
static
void
flst_insert_after(
/*==============*/
flst_base_node_t* base, /*!< in: pointer to base node of list */
flst_node_t* node1, /*!< in: node to insert after */
flst_node_t* node2, /*!< in: node to add */
mtr_t* mtr); /*!< in: mini-transaction handle */
/********************************************************************//**
Inserts a node before another in a list. */
static
void
flst_insert_before(
/*===============*/
flst_base_node_t* base, /*!< in: pointer to base node of list */
flst_node_t* node2, /*!< in: node to insert */
flst_node_t* node3, /*!< in: node to insert before */
mtr_t* mtr); /*!< in: mini-transaction handle */
/********************************************************************//**
Adds a node as the last node in a list. */
void
flst_add_last(
/*==========*/
flst_base_node_t* base, /*!< in: pointer to base node of list */
flst_node_t* node, /*!< in: node to add */
mtr_t* mtr) /*!< in: mini-transaction handle */
/** Insert a node after another one.
@param[in,out] base base node block
@param[in] boffset byte offset of the base node
@param[in,out] cur insert position block
@param[in] coffset byte offset of the insert position
@param[in,out] add block to be added
@param[in] aoffset byte offset of the block to be added
@param[in,outr] mtr mini-transaction */
static void flst_insert_after(buf_block_t *base, uint16_t boffset,
buf_block_t *cur, uint16_t coffset,
buf_block_t *add, uint16_t aoffset, mtr_t *mtr)
{
ulint space;
fil_addr_t node_addr;
ulint len;
fil_addr_t last_addr;
ut_ad(base != cur || boffset != coffset);
ut_ad(base != add || boffset != aoffset);
ut_ad(cur != add || coffset != aoffset);
ut_ad(boffset < base->physical_size());
ut_ad(coffset < cur->physical_size());
ut_ad(aoffset < add->physical_size());
ut_ad(mtr_memo_contains_page_flagged(mtr, base->frame,
MTR_MEMO_PAGE_X_FIX |
MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr_memo_contains_page_flagged(mtr, cur->frame,
MTR_MEMO_PAGE_X_FIX |
MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr_memo_contains_page_flagged(mtr, add->frame,
MTR_MEMO_PAGE_X_FIX |
MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr && base && node);
ut_ad(base != node);
ut_ad(mtr_memo_contains_page_flagged(mtr, base,
MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr_memo_contains_page_flagged(mtr, node,
MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_SX_FIX));
len = flst_get_len(base);
last_addr = flst_get_last(base);
fil_addr_t cur_addr= { cur->page.id.page_no(), coffset };
fil_addr_t add_addr= { add->page.id.page_no(), aoffset };
fil_addr_t next_addr= flst_get_next_addr(cur->frame + coffset);
buf_ptr_get_fsp_addr(node, &space, &node_addr);
flst_write_addr(*add, add->frame + aoffset + FLST_PREV, cur_addr, mtr);
flst_write_addr(*add, add->frame + aoffset + FLST_NEXT, next_addr, mtr);
/* If the list is not empty, call flst_insert_after */
if (len != 0) {
flst_node_t* last_node;
if (fil_addr_is_null(next_addr))
flst_write_addr(*base, base->frame + boffset + FLST_LAST, add_addr, mtr);
else
{
buf_block_t *block;
flst_node_t *next= fut_get_ptr(add->page.id.space(), add->zip_size(),
next_addr, RW_SX_LATCH, mtr, &block);
flst_write_addr(*block, next + FLST_PREV, add_addr, mtr);
}
if (last_addr.page == node_addr.page) {
last_node = page_align(node) + last_addr.boffset;
} else {
fil_space_t* s = fil_space_acquire_silent(space);
ulint zip_size = s ? s->zip_size() : 0;
if (s) s->release();
flst_write_addr(*cur, cur->frame + coffset + FLST_NEXT, add_addr, mtr);
last_node = fut_get_ptr(space, zip_size, last_addr,
RW_SX_LATCH, mtr);
}
flst_insert_after(base, last_node, node, mtr);
} else {
/* else call flst_add_to_empty */
flst_add_to_empty(base, node, mtr);
}
byte *len= &base->frame[boffset + FLST_LEN];
mtr->write<4>(*base, len, mach_read_from_4(len) + 1);
}
/********************************************************************//**
Adds a node as the first node in a list. */
void
flst_add_first(
/*===========*/
flst_base_node_t* base, /*!< in: pointer to base node of list */
flst_node_t* node, /*!< in: node to add */
mtr_t* mtr) /*!< in: mini-transaction handle */
/** Insert a node before another one.
@param[in,out] base base node block
@param[in] boffset byte offset of the base node
@param[in,out] cur insert position block
@param[in] coffset byte offset of the insert position
@param[in,out] add block to be added
@param[in] aoffset byte offset of the block to be added
@param[in,outr] mtr mini-transaction */
static void flst_insert_before(buf_block_t *base, uint16_t boffset,
buf_block_t *cur, uint16_t coffset,
buf_block_t *add, uint16_t aoffset, mtr_t *mtr)
{
ulint space;
fil_addr_t node_addr;
ulint len;
fil_addr_t first_addr;
flst_node_t* first_node;
ut_ad(base != cur || boffset != coffset);
ut_ad(base != add || boffset != aoffset);
ut_ad(cur != add || coffset != aoffset);
ut_ad(boffset < base->physical_size());
ut_ad(coffset < cur->physical_size());
ut_ad(aoffset < add->physical_size());
ut_ad(mtr_memo_contains_page_flagged(mtr, base->frame,
MTR_MEMO_PAGE_X_FIX |
MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr_memo_contains_page_flagged(mtr, cur->frame,
MTR_MEMO_PAGE_X_FIX |
MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr_memo_contains_page_flagged(mtr, add->frame,
MTR_MEMO_PAGE_X_FIX |
MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr && base && node);
ut_ad(base != node);
ut_ad(mtr_memo_contains_page_flagged(mtr, base,
MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr_memo_contains_page_flagged(mtr, node,
MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_SX_FIX));
len = flst_get_len(base);
first_addr = flst_get_first(base);
fil_addr_t cur_addr= { cur->page.id.page_no(), coffset };
fil_addr_t add_addr= { add->page.id.page_no(), aoffset };
fil_addr_t prev_addr= flst_get_prev_addr(cur->frame + coffset);
buf_ptr_get_fsp_addr(node, &space, &node_addr);
flst_write_addr(*add, add->frame + aoffset + FLST_PREV, prev_addr, mtr);
flst_write_addr(*add, add->frame + aoffset + FLST_NEXT, cur_addr, mtr);
/* If the list is not empty, call flst_insert_before */
if (len != 0) {
if (first_addr.page == node_addr.page) {
first_node = page_align(node) + first_addr.boffset;
} else {
fil_space_t* s = fil_space_acquire_silent(space);
ulint zip_size = s ? s->zip_size() : 0;
if (s) s->release();
if (fil_addr_is_null(prev_addr))
flst_write_addr(*base, base->frame + boffset + FLST_FIRST, add_addr, mtr);
else
{
buf_block_t *block;
flst_node_t *prev= fut_get_ptr(add->page.id.space(), add->zip_size(),
prev_addr, RW_SX_LATCH, mtr, &block);
flst_write_addr(*block, prev + FLST_NEXT, add_addr, mtr);
}
first_node = fut_get_ptr(space, zip_size, first_addr,
RW_SX_LATCH, mtr);
}
flst_write_addr(*cur, cur->frame + coffset + FLST_PREV, add_addr, mtr);
flst_insert_before(base, node, first_node, mtr);
} else {
/* else call flst_add_to_empty */
flst_add_to_empty(base, node, mtr);
}
byte *len= &base->frame[boffset + FLST_LEN];
mtr->write<4>(*base, len, mach_read_from_4(len) + 1);
}
/********************************************************************//**
Inserts a node after another in a list. */
static
void
flst_insert_after(
/*==============*/
flst_base_node_t* base, /*!< in: pointer to base node of list */
flst_node_t* node1, /*!< in: node to insert after */
flst_node_t* node2, /*!< in: node to add */
mtr_t* mtr) /*!< in: mini-transaction handle */
/** Append a file list node to a list.
@param[in,out] base base node block
@param[in] boffset byte offset of the base node
@param[in,out] add block to be added
@param[in] aoffset byte offset of the node to be added
@param[in,outr] mtr mini-transaction */
void flst_add_last(buf_block_t *base, uint16_t boffset,
buf_block_t *add, uint16_t aoffset, mtr_t *mtr)
{
ulint space;
fil_addr_t node1_addr;
fil_addr_t node2_addr;
flst_node_t* node3;
fil_addr_t node3_addr;
ulint len;
ut_ad(base != add || boffset != aoffset);
ut_ad(boffset < base->physical_size());
ut_ad(aoffset < add->physical_size());
ut_ad(mtr_memo_contains_page_flagged(mtr, base->frame,
MTR_MEMO_PAGE_X_FIX |
MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr_memo_contains_page_flagged(mtr, add->frame,
MTR_MEMO_PAGE_X_FIX |
MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr && node1 && node2 && base);
ut_ad(base != node1);
ut_ad(base != node2);
ut_ad(node2 != node1);
ut_ad(mtr_memo_contains_page_flagged(mtr, base,
MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr_memo_contains_page_flagged(mtr, node1,
MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr_memo_contains_page_flagged(mtr, node2,
MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_SX_FIX));
buf_ptr_get_fsp_addr(node1, &space, &node1_addr);
buf_ptr_get_fsp_addr(node2, &space, &node2_addr);
node3_addr = flst_get_next_addr(node1);
/* Set prev and next fields of node2 */
flst_write_addr(node2 + FLST_PREV, node1_addr, mtr);
flst_write_addr(node2 + FLST_NEXT, node3_addr, mtr);
if (!fil_addr_is_null(node3_addr)) {
/* Update prev field of node3 */
fil_space_t* s = fil_space_acquire_silent(space);
ulint zip_size = s ? s->zip_size() : 0;
if (s) s->release();
node3 = fut_get_ptr(space, zip_size,
node3_addr, RW_SX_LATCH, mtr);
flst_write_addr(node3 + FLST_PREV, node2_addr, mtr);
} else {
/* node1 was last in list: update last field in base */
flst_write_addr(base + FLST_LAST, node2_addr, mtr);
}
/* Set next field of node1 */
flst_write_addr(node1 + FLST_NEXT, node2_addr, mtr);
/* Update len of base node */
len = flst_get_len(base);
mlog_write_ulint(base + FLST_LEN, len + 1, MLOG_4BYTES, mtr);
if (!flst_get_len(base->frame + boffset))
flst_add_to_empty(base, boffset, add, aoffset, mtr);
else
{
fil_addr_t addr= flst_get_last(base->frame + boffset);
buf_block_t *cur= add;
const flst_node_t *c= addr.page == add->page.id.page_no()
? add->frame + addr.boffset
: fut_get_ptr(add->page.id.space(), add->zip_size(), addr,
RW_SX_LATCH, mtr, &cur);
flst_insert_after(base, boffset, cur,
static_cast<uint16_t>(c - cur->frame),
add, aoffset, mtr);
}
}
/********************************************************************//**
Inserts a node before another in a list. */
static
void
flst_insert_before(
/*===============*/
flst_base_node_t* base, /*!< in: pointer to base node of list */
flst_node_t* node2, /*!< in: node to insert */
flst_node_t* node3, /*!< in: node to insert before */
mtr_t* mtr) /*!< in: mini-transaction handle */
/** Prepend a file list node to a list.
@param[in,out] base base node block
@param[in] boffset byte offset of the base node
@param[in,out] add block to be added
@param[in] aoffset byte offset of the node to be added
@param[in,outr] mtr mini-transaction */
void flst_add_first(buf_block_t *base, uint16_t boffset,
buf_block_t *add, uint16_t aoffset, mtr_t *mtr)
{
ulint space;
flst_node_t* node1;
fil_addr_t node1_addr;
fil_addr_t node2_addr;
fil_addr_t node3_addr;
ulint len;
ut_ad(base != add || boffset != aoffset);
ut_ad(boffset < base->physical_size());
ut_ad(aoffset < add->physical_size());
ut_ad(mtr_memo_contains_page_flagged(mtr, base->frame,
MTR_MEMO_PAGE_X_FIX |
MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr_memo_contains_page_flagged(mtr, add->frame,
MTR_MEMO_PAGE_X_FIX |
MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr && node2 && node3 && base);
ut_ad(base != node2);
ut_ad(base != node3);
ut_ad(node2 != node3);
ut_ad(mtr_memo_contains_page_flagged(mtr, base,
MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr_memo_contains_page_flagged(mtr, node2,
MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr_memo_contains_page_flagged(mtr, node3,
MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_SX_FIX));
buf_ptr_get_fsp_addr(node2, &space, &node2_addr);
buf_ptr_get_fsp_addr(node3, &space, &node3_addr);
node1_addr = flst_get_prev_addr(node3);
/* Set prev and next fields of node2 */
flst_write_addr(node2 + FLST_PREV, node1_addr, mtr);
flst_write_addr(node2 + FLST_NEXT, node3_addr, mtr);
if (!fil_addr_is_null(node1_addr)) {
fil_space_t* s = fil_space_acquire_silent(space);
ulint zip_size = s ? s->zip_size() : 0;
if (s) s->release();
/* Update next field of node1 */
node1 = fut_get_ptr(space, zip_size, node1_addr,
RW_SX_LATCH, mtr);
flst_write_addr(node1 + FLST_NEXT, node2_addr, mtr);
} else {
/* node3 was first in list: update first field in base */
flst_write_addr(base + FLST_FIRST, node2_addr, mtr);
}
/* Set prev field of node3 */
flst_write_addr(node3 + FLST_PREV, node2_addr, mtr);
/* Update len of base node */
len = flst_get_len(base);
mlog_write_ulint(base + FLST_LEN, len + 1, MLOG_4BYTES, mtr);
if (!flst_get_len(base->frame + boffset))
flst_add_to_empty(base, boffset, add, aoffset, mtr);
else
{
fil_addr_t addr= flst_get_first(base->frame + boffset);
buf_block_t *cur= add;
const flst_node_t *c= addr.page == add->page.id.page_no()
? add->frame + addr.boffset
: fut_get_ptr(add->page.id.space(), add->zip_size(), addr,
RW_SX_LATCH, mtr, &cur);
flst_insert_before(base, boffset, cur,
static_cast<uint16_t>(c - cur->frame),
add, aoffset, mtr);
}
}
/********************************************************************//**
Removes a node. */
void
flst_remove(
/*========*/
flst_base_node_t* base, /*!< in: pointer to base node of list */
flst_node_t* node2, /*!< in: node to remove */
mtr_t* mtr) /*!< in: mini-transaction handle */
/** Remove a file list node.
@param[in,out] base base node block
@param[in] boffset byte offset of the base node
@param[in,out] cur block to be removed
@param[in] coffset byte offset of the current record to be removed
@param[in,outr] mtr mini-transaction */
void flst_remove(buf_block_t *base, uint16_t boffset,
buf_block_t *cur, uint16_t coffset, mtr_t *mtr)
{
ulint space;
flst_node_t* node1;
fil_addr_t node1_addr;
fil_addr_t node2_addr;
flst_node_t* node3;
fil_addr_t node3_addr;
ulint len;
ut_ad(boffset < base->physical_size());
ut_ad(coffset < cur->physical_size());
ut_ad(mtr_memo_contains_page_flagged(mtr, base->frame,
MTR_MEMO_PAGE_X_FIX |
MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr_memo_contains_page_flagged(mtr, cur->frame,
MTR_MEMO_PAGE_X_FIX |
MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr && node2 && base);
ut_ad(mtr_memo_contains_page_flagged(mtr, base,
MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr_memo_contains_page_flagged(mtr, node2,
MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_SX_FIX));
const fil_addr_t prev_addr= flst_get_prev_addr(cur->frame + coffset);
const fil_addr_t next_addr= flst_get_next_addr(cur->frame + coffset);
buf_ptr_get_fsp_addr(node2, &space, &node2_addr);
if (fil_addr_is_null(prev_addr))
flst_write_addr(*base, base->frame + boffset + FLST_FIRST, next_addr, mtr);
else
{
buf_block_t *block= cur;
flst_node_t *prev= prev_addr.page == cur->page.id.page_no()
? cur->frame + prev_addr.boffset
: fut_get_ptr(cur->page.id.space(), cur->zip_size(), prev_addr,
RW_SX_LATCH, mtr, &block);
flst_write_addr(*block, prev + FLST_NEXT, next_addr, mtr);
}
fil_space_t* s = fil_space_acquire_silent(space);
ulint zip_size = s ? s->zip_size() : 0;
if (s) s->release();
if (fil_addr_is_null(next_addr))
flst_write_addr(*base, base->frame + boffset + FLST_LAST, prev_addr, mtr);
else
{
buf_block_t *block= cur;
flst_node_t *next= next_addr.page == cur->page.id.page_no()
? cur->frame + next_addr.boffset
: fut_get_ptr(cur->page.id.space(), cur->zip_size(), next_addr,
RW_SX_LATCH, mtr, &block);
flst_write_addr(*block, next + FLST_PREV, prev_addr, mtr);
}
node1_addr = flst_get_prev_addr(node2);
node3_addr = flst_get_next_addr(node2);
if (!fil_addr_is_null(node1_addr)) {
/* Update next field of node1 */
if (node1_addr.page == node2_addr.page) {
node1 = page_align(node2) + node1_addr.boffset;
} else {
node1 = fut_get_ptr(space, zip_size,
node1_addr, RW_SX_LATCH, mtr);
}
ut_ad(node1 != node2);
flst_write_addr(node1 + FLST_NEXT, node3_addr, mtr);
} else {
/* node2 was first in list: update first field in base */
flst_write_addr(base + FLST_FIRST, node3_addr, mtr);
}
if (!fil_addr_is_null(node3_addr)) {
/* Update prev field of node3 */
if (node3_addr.page == node2_addr.page) {
node3 = page_align(node2) + node3_addr.boffset;
} else {
node3 = fut_get_ptr(space, zip_size,
node3_addr, RW_SX_LATCH, mtr);
}
ut_ad(node2 != node3);
flst_write_addr(node3 + FLST_PREV, node1_addr, mtr);
} else {
/* node2 was last in list: update last field in base */
flst_write_addr(base + FLST_LAST, node1_addr, mtr);
}
/* Update len of base node */
len = flst_get_len(base);
ut_ad(len > 0);
mlog_write_ulint(base + FLST_LEN, len - 1, MLOG_4BYTES, mtr);
byte *len= &base->frame[boffset + FLST_LEN];
ut_ad(mach_read_from_4(len) > 0);
mtr->write<4>(*base, len, mach_read_from_4(len) - 1);
}
/********************************************************************//**
Validates a file-based list.
@return TRUE if ok */
ibool
flst_validate(
/*==========*/
const flst_base_node_t* base, /*!< in: pointer to base node of list */
mtr_t* mtr1) /*!< in: mtr */
#ifdef UNIV_DEBUG
/** Validate a file-based list. */
void flst_validate(const buf_block_t *base, uint16_t boffset, mtr_t *mtr)
{
ulint space;
const flst_node_t* node;
fil_addr_t node_addr;
fil_addr_t base_addr;
ulint len;
ulint i;
mtr_t mtr2;
ut_ad(boffset < base->physical_size());
ut_ad(mtr_memo_contains_page_flagged(mtr, base->frame,
MTR_MEMO_PAGE_X_FIX |
MTR_MEMO_PAGE_SX_FIX));
ut_ad(base);
ut_ad(mtr_memo_contains_page_flagged(mtr1, base,
MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_SX_FIX));
/* We use two mini-transaction handles: the first is used to lock
the base node, and prevent other threads from modifying the list.
The second is used to traverse the list. We cannot run the second
mtr without committing it at times, because if the list is long,
the x-locked pages could fill the buffer, resulting in a deadlock. */
mtr_t mtr2;
/* We use two mini-transaction handles: the first is used to
lock the base node, and prevent other threads from modifying the
list. The second is used to traverse the list. We cannot run the
second mtr without committing it at times, because if the list
is long, then the x-locked pages could fill the buffer resulting
in a deadlock. */
const uint32_t len= flst_get_len(base->frame + boffset);
fil_addr_t addr= flst_get_first(base->frame + boffset);
/* Find out the space id */
buf_ptr_get_fsp_addr(base, &space, &base_addr);
for (uint32_t i= len; i--; )
{
mtr2.start();
const flst_node_t *node= fut_get_ptr(base->page.id.space(),
base->zip_size(), addr,
RW_SX_LATCH, &mtr2);
addr= flst_get_next_addr(node);
mtr2.commit();
}
fil_space_t* s = fil_space_acquire_silent(space);
ulint zip_size = s ? s->zip_size() : 0;
if (s) s->release();
ut_ad(fil_addr_is_null(addr));
len = flst_get_len(base);
node_addr = flst_get_first(base);
addr= flst_get_last(base->frame + boffset);
for (i = 0; i < len; i++) {
mtr_start(&mtr2);
for (uint32_t i= len; i--; )
{
mtr2.start();
const flst_node_t *node= fut_get_ptr(base->page.id.space(),
base->zip_size(), addr,
RW_SX_LATCH, &mtr2);
addr= flst_get_prev_addr(node);
mtr2.commit();
}
node = fut_get_ptr(space, zip_size,
node_addr, RW_SX_LATCH, &mtr2);
node_addr = flst_get_next_addr(node);
mtr_commit(&mtr2); /* Commit mtr2 each round to prevent buffer
becoming full */
}
ut_a(fil_addr_is_null(node_addr));
node_addr = flst_get_last(base);
for (i = 0; i < len; i++) {
mtr_start(&mtr2);
node = fut_get_ptr(space, zip_size,
node_addr, RW_SX_LATCH, &mtr2);
node_addr = flst_get_prev_addr(node);
mtr_commit(&mtr2); /* Commit mtr2 each round to prevent buffer
becoming full */
}
ut_a(fil_addr_is_null(node_addr));
return(TRUE);
ut_ad(fil_addr_is_null(addr));
}
#endif

View file

@ -628,12 +628,8 @@ rtr_adjust_upper_level(
rtr_mbr_t* new_mbr, /*!< in: MBR on the new page */
mtr_t* mtr) /*!< in: mtr */
{
page_t* page;
page_t* new_page;
ulint page_no;
ulint new_page_no;
page_zip_des_t* page_zip;
page_zip_des_t* new_page_zip;
dict_index_t* index = sea_cur->index;
btr_cur_t cursor;
ulint* offsets;
@ -657,13 +653,9 @@ rtr_adjust_upper_level(
level = btr_page_get_level(buf_block_get_frame(block));
ut_ad(level == btr_page_get_level(buf_block_get_frame(new_block)));
page = buf_block_get_frame(block);
page_no = block->page.id.page_no();
page_zip = buf_block_get_page_zip(block);
new_page = buf_block_get_frame(new_block);
new_page_no = new_block->page.id.page_no();
new_page_zip = buf_block_get_page_zip(new_block);
/* Set new mbr for the old page on the upper level. */
/* Look up the index for the node pointer to page */
@ -672,7 +664,8 @@ rtr_adjust_upper_level(
page_cursor = btr_cur_get_page_cur(&cursor);
rtr_update_mbr_field(&cursor, offsets, NULL, page, mbr, NULL, mtr);
rtr_update_mbr_field(&cursor, offsets, NULL, block->frame, mbr, NULL,
mtr);
/* Already updated parent MBR, reset in our path */
if (sea_cur->rtr_info) {
@ -686,7 +679,7 @@ rtr_adjust_upper_level(
/* Insert the node for the new page. */
node_ptr_upper = rtr_index_build_node_ptr(
index, new_mbr,
page_rec_get_next(page_get_infimum_rec(new_page)),
page_rec_get_next(page_get_infimum_rec(new_block->frame)),
new_page_no, heap);
ulint up_match = 0;
@ -742,26 +735,25 @@ rtr_adjust_upper_level(
ut_ad(block->zip_size() == index->table->space->zip_size());
const uint32_t next_page_no = btr_page_get_next(page);
const uint32_t next_page_no = btr_page_get_next(block->frame);
if (next_page_no != FIL_NULL) {
buf_block_t* next_block = btr_block_get(
*index, next_page_no, RW_X_LATCH, false, mtr);
#ifdef UNIV_BTR_DEBUG
ut_a(page_is_comp(next_block->frame) == page_is_comp(page));
ut_a(page_is_comp(next_block->frame)
== page_is_comp(block->frame));
ut_a(btr_page_get_prev(next_block->frame)
== block->page.id.page_no());
#endif /* UNIV_BTR_DEBUG */
btr_page_set_prev(buf_block_get_frame(next_block),
buf_block_get_page_zip(next_block),
new_page_no, mtr);
btr_page_set_prev(next_block, new_page_no, mtr);
}
btr_page_set_next(page, page_zip, new_page_no, mtr);
btr_page_set_next(block, new_page_no, mtr);
btr_page_set_prev(new_page, new_page_zip, page_no, mtr);
btr_page_set_next(new_page, new_page_zip, next_page_no, mtr);
btr_page_set_prev(new_block, page_no, mtr);
btr_page_set_next(new_block, next_page_no, mtr);
}
/*************************************************************//**
@ -848,11 +840,8 @@ rtr_split_page_move_rec_list(
ut_ad(!is_leaf || cur_split_node->key != first_rec);
rec = page_cur_insert_rec_low(
page_cur_get_rec(&new_page_cursor),
index,
cur_split_node->key,
offsets,
mtr);
page_cur_get_rec(&new_page_cursor),
index, cur_split_node->key, offsets, mtr);
ut_a(rec);

View file

@ -17856,14 +17856,10 @@ func_exit:
space->zip_size(), RW_X_LATCH, &mtr);
if (block != NULL) {
byte* page = block->frame;
ib::info() << "Dirtying page: " << page_id_t(
page_get_space_id(page), page_get_page_no(page));
mlog_write_ulint(page + FIL_PAGE_TYPE,
fil_page_get_type(page),
MLOG_2BYTES, &mtr);
ib::info() << "Dirtying page: " << block->page.id;
mtr.write<1,mtr_t::FORCED>(*block,
block->frame + FIL_PAGE_SPACE_ID,
block->frame[FIL_PAGE_SPACE_ID]);
}
mtr.commit();
goto func_exit;

View file

@ -10198,12 +10198,12 @@ commit_cache_norebuild(
space->zip_size(),
RW_X_LATCH, &mtr)) {
mtr.set_named_space(space);
mlog_write_ulint(
mtr.write<4,mtr_t::OPT>(
*b,
FSP_HEADER_OFFSET
+ FSP_SPACE_FLAGS + b->frame,
space->flags
& ~FSP_FLAGS_MEM_MASK,
MLOG_4BYTES, &mtr);
& ~FSP_FLAGS_MEM_MASK);
}
mtr.commit();
}

View file

@ -332,17 +332,12 @@ ibuf_header_page_get(
return page;
}
/******************************************************************//**
Gets the root page and sx-latches it.
@return insert buffer tree root page */
static
page_t*
ibuf_tree_root_get(
/*===============*/
mtr_t* mtr) /*!< in: mtr */
/** Acquire the change buffer root page.
@param[in,out] mtr mini-transaction
@return change buffer root page, SX-latched */
static buf_block_t *ibuf_tree_root_get(mtr_t *mtr)
{
buf_block_t* block;
page_t* root;
ut_ad(ibuf_inside(mtr));
ut_ad(mutex_own(&ibuf_mutex));
@ -356,13 +351,11 @@ ibuf_tree_root_get(
buf_block_dbg_add_level(block, SYNC_IBUF_TREE_NODE_NEW);
root = buf_block_get_frame(block);
ut_ad(page_get_space_id(block->frame) == IBUF_SPACE_ID);
ut_ad(page_get_page_no(block->frame) == FSP_IBUF_TREE_ROOT_PAGE_NO);
ut_ad(ibuf.empty == page_is_empty(block->frame));
ut_ad(page_get_space_id(root) == IBUF_SPACE_ID);
ut_ad(page_get_page_no(root) == FSP_IBUF_TREE_ROOT_PAGE_NO);
ut_ad(ibuf.empty == page_is_empty(root));
return(root);
return block;
}
/******************************************************************//**
@ -624,29 +617,27 @@ ibuf_bitmap_page_get_bits_low(
}
/** Sets the desired bit for a given page in a bitmap page.
@param[in,out] page bitmap page
@tparam bit IBUF_BITMAP_FREE, IBUF_BITMAP_BUFFERED, ...
@param[in,out] block bitmap page
@param[in] page_id page id whose bits to set
@param[in] physical_size page size
@param[in] bit IBUF_BITMAP_FREE, IBUF_BITMAP_BUFFERED, ...
@param[in] val value to set
@param[in,out] mtr mtr containing an x-latch to the bitmap page */
static
void
template<ulint bit>
static void
ibuf_bitmap_page_set_bits(
page_t* page,
buf_block_t* block,
const page_id_t page_id,
ulint physical_size,
ulint bit,
ulint val,
mtr_t* mtr)
{
ulint byte_offset;
ulint bit_offset;
ulint map_byte;
ut_ad(bit < IBUF_BITS_PER_PAGE);
static_assert(bit < IBUF_BITS_PER_PAGE, "wrong bit");
compile_time_assert(!(IBUF_BITS_PER_PAGE % 2));
ut_ad(mtr_memo_contains_page(mtr, page, MTR_MEMO_PAGE_X_FIX));
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
ut_ad(mtr->is_named_space(page_id.space()));
bit_offset = (page_id.page_no() % physical_size)
@ -657,21 +648,22 @@ ibuf_bitmap_page_set_bits(
ut_ad(byte_offset + IBUF_BITMAP < srv_page_size);
map_byte = mach_read_from_1(page + IBUF_BITMAP + byte_offset);
byte* map_byte = &block->frame[IBUF_BITMAP + byte_offset];
byte b = *map_byte;
if (bit == IBUF_BITMAP_FREE) {
ut_ad(bit_offset + 1 < 8);
ut_ad(val <= 3);
map_byte = ut_bit_set_nth(map_byte, bit_offset, val / 2);
map_byte = ut_bit_set_nth(map_byte, bit_offset + 1, val % 2);
b &= ~(3U << bit_offset);
b |= (val & 2) << (bit_offset - 1)
| (val & 1) << (bit_offset + 1);
} else {
ut_ad(val <= 1);
map_byte = ut_bit_set_nth(map_byte, bit_offset, val);
b &= ~(1U << bit_offset);
b |= val << bit_offset;
}
mlog_write_ulint(page + IBUF_BITMAP + byte_offset, map_byte,
MLOG_1BYTE, mtr);
mtr->write<1,mtr_t::OPT>(*block, map_byte, b);
}
/** Calculates the bitmap page number for a given page number.
@ -697,7 +689,7 @@ stored.
page containing the descriptor bits for the file page; the bitmap page
is x-latched */
static
page_t*
buf_block_t*
ibuf_bitmap_get_map_page_func(
const page_id_t page_id,
ulint zip_size,
@ -718,8 +710,7 @@ ibuf_bitmap_get_map_page_func(
buf_block_dbg_add_level(block, SYNC_IBUF_BITMAP);
return(buf_block_get_frame(block));
return block;
}
/** Gets the ibuf bitmap page where the bits describing a given file page are
@ -749,31 +740,19 @@ ibuf_set_free_bits_low(
ulint val, /*!< in: value to set: < 4 */
mtr_t* mtr) /*!< in/out: mtr */
{
page_t* bitmap_page;
buf_frame_t* frame;
ut_ad(mtr->is_named_space(block->page.id.space()));
if (!block) {
if (!page_is_leaf(block->frame)) {
return;
}
frame = buf_block_get_frame(block);
if (!frame || !page_is_leaf(frame)) {
return;
}
bitmap_page = ibuf_bitmap_get_map_page(block->page.id,
block->zip_size(), mtr);
#ifdef UNIV_IBUF_DEBUG
ut_a(val <= ibuf_index_page_calc_free(block));
#endif /* UNIV_IBUF_DEBUG */
ibuf_bitmap_page_set_bits(
bitmap_page, block->page.id, block->physical_size(),
IBUF_BITMAP_FREE, val, mtr);
ibuf_bitmap_page_set_bits<IBUF_BITMAP_FREE>(
ibuf_bitmap_get_map_page(block->page.id, block->zip_size(),
mtr),
block->page.id, block->physical_size(), val, mtr);
}
/************************************************************************//**
@ -793,34 +772,21 @@ ibuf_set_free_bits_func(
#endif /* UNIV_IBUF_DEBUG */
ulint val) /*!< in: value to set: < 4 */
{
mtr_t mtr;
page_t* page;
page_t* bitmap_page;
page = buf_block_get_frame(block);
if (!page_is_leaf(page)) {
if (!page_is_leaf(block->frame)) {
return;
}
mtr_start(&mtr);
mtr_t mtr;
mtr.start();
const fil_space_t* space = mtr.set_named_space_id(
block->page.id.space());
bitmap_page = ibuf_bitmap_get_map_page(block->page.id,
block->zip_size(), &mtr);
buf_block_t* bitmap_page = ibuf_bitmap_get_map_page(block->page.id,
block->zip_size(),
&mtr);
switch (space->purpose) {
case FIL_TYPE_LOG:
ut_ad(0);
break;
case FIL_TYPE_TABLESPACE:
break;
/* fall through */
case FIL_TYPE_TEMPORARY:
case FIL_TYPE_IMPORT:
mtr_set_log_mode(&mtr, MTR_LOG_NO_REDO);
if (space->purpose != FIL_TYPE_TABLESPACE) {
mtr.set_log_mode(MTR_LOG_NO_REDO);
}
#ifdef UNIV_IBUF_DEBUG
@ -830,31 +796,17 @@ ibuf_set_free_bits_func(
old_val = ibuf_bitmap_page_get_bits(
bitmap_page, block->page.id,
IBUF_BITMAP_FREE, &mtr);
# if 0
if (old_val != max_val) {
fprintf(stderr,
"Ibuf: page %lu old val %lu max val %lu\n",
page_get_page_no(page),
old_val, max_val);
}
# endif
ut_a(old_val <= max_val);
}
# if 0
fprintf(stderr, "Setting page no %lu free bits to %lu should be %lu\n",
page_get_page_no(page), val,
ibuf_index_page_calc_free(block));
# endif
ut_a(val <= ibuf_index_page_calc_free(block));
#endif /* UNIV_IBUF_DEBUG */
ibuf_bitmap_page_set_bits(
ibuf_bitmap_page_set_bits<IBUF_BITMAP_FREE>(
bitmap_page, block->page.id, block->physical_size(),
IBUF_BITMAP_FREE, val, &mtr);
val, &mtr);
mtr_commit(&mtr);
mtr.commit();
}
/************************************************************************//**
@ -929,19 +881,10 @@ ibuf_update_free_bits_zip(
buf_block_t* block, /*!< in/out: index page */
mtr_t* mtr) /*!< in/out: mtr */
{
page_t* bitmap_page;
ulint after;
ut_ad(page_is_leaf(block->frame));
ut_ad(block->zip_size());
ut_a(block);
buf_frame_t* frame = buf_block_get_frame(block);
ut_a(frame);
ut_a(page_is_leaf(frame));
ut_a(block->zip_size());
bitmap_page = ibuf_bitmap_get_map_page(block->page.id,
block->zip_size(), mtr);
after = ibuf_index_page_calc_free_zip(block);
ulint after = ibuf_index_page_calc_free_zip(block);
if (after == 0) {
/* We move the page to the front of the buffer pool LRU list:
@ -952,9 +895,10 @@ ibuf_update_free_bits_zip(
buf_page_make_young(&block->page);
}
ibuf_bitmap_page_set_bits(
bitmap_page, block->page.id, block->physical_size(),
IBUF_BITMAP_FREE, after, mtr);
ibuf_bitmap_page_set_bits<IBUF_BITMAP_FREE>(
ibuf_bitmap_get_map_page(block->page.id, block->zip_size(),
mtr),
block->page.id, block->physical_size(), after, mtr);
}
/**********************************************************************//**
@ -1029,7 +973,6 @@ ibuf_page_low(
{
ibool ret;
mtr_t local_mtr;
page_t* bitmap_page;
ut_ad(!recv_no_ibuf_operations);
ut_ad(x_latch || mtr == NULL);
@ -1064,10 +1007,8 @@ ibuf_page_low(
zip_size, RW_NO_LATCH, NULL, BUF_GET_NO_LATCH,
file, line, &local_mtr, &err);
bitmap_page = buf_block_get_frame(block);
ret = ibuf_bitmap_page_get_bits_low(
bitmap_page, page_id, zip_size,
block->frame, page_id, zip_size,
MTR_MEMO_BUF_FIX, &local_mtr, IBUF_BITMAP_IBUF);
mtr_commit(&local_mtr);
@ -1080,10 +1021,10 @@ ibuf_page_low(
mtr_start(mtr);
}
bitmap_page = ibuf_bitmap_get_map_page_func(page_id, zip_size,
file, line, mtr);
ret = ibuf_bitmap_page_get_bits(bitmap_page, page_id, zip_size,
ret = ibuf_bitmap_page_get_bits(ibuf_bitmap_get_map_page_func(
page_id, zip_size, file, line,
mtr)->frame,
page_id, zip_size,
IBUF_BITMAP_IBUF, mtr);
if (mtr == &local_mtr) {
@ -1891,23 +1832,16 @@ ibuf_data_too_much_free(void)
return(ibuf.free_list_len >= 3 + (ibuf.size / 2) + 3 * ibuf.height);
}
/*********************************************************************//**
Allocates a new page from the ibuf file segment and adds it to the free
list.
@return TRUE on success, FALSE if no space left */
static
ibool
ibuf_add_free_page(void)
/*====================*/
/** Allocate a change buffer page.
@retval true on success
@retval false if no space left */
static bool ibuf_add_free_page()
{
mtr_t mtr;
page_t* header_page;
buf_block_t* block;
page_t* page;
page_t* root;
page_t* bitmap_page;
mtr_start(&mtr);
mtr.start();
/* Acquire the fsp latch before the ibuf header, obeying the latching
order */
mtr_x_lock_space(fil_system.sys_space, &mtr);
@ -1928,26 +1862,24 @@ ibuf_add_free_page(void)
&mtr);
if (block == NULL) {
mtr_commit(&mtr);
return(FALSE);
mtr.commit();
return false;
}
ut_ad(rw_lock_get_x_lock_count(&block->lock) == 1);
ibuf_enter(&mtr);
mutex_enter(&ibuf_mutex);
root = ibuf_tree_root_get(&mtr);
buf_block_dbg_add_level(block, SYNC_IBUF_TREE_NODE_NEW);
page = buf_block_get_frame(block);
mlog_write_ulint(page + FIL_PAGE_TYPE, FIL_PAGE_IBUF_FREE_LIST,
MLOG_2BYTES, &mtr);
mtr.write<2>(*block, block->frame + FIL_PAGE_TYPE,
FIL_PAGE_IBUF_FREE_LIST);
/* Add the page to the free list and update the ibuf size data */
flst_add_last(root + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST,
page + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST_NODE, &mtr);
flst_add_last(ibuf_tree_root_get(&mtr),
PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST,
block, PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST_NODE, &mtr);
ibuf.seg_size++;
ibuf.free_list_len++;
@ -1955,17 +1887,18 @@ ibuf_add_free_page(void)
/* Set the bit indicating that this page is now an ibuf tree page
(level 2 page) */
const page_id_t page_id(IBUF_SPACE_ID, block->page.id.page_no());
bitmap_page = ibuf_bitmap_get_map_page(page_id, 0, &mtr);
const page_id_t page_id(IBUF_SPACE_ID, block->page.id.page_no());
buf_block_t* bitmap_page = ibuf_bitmap_get_map_page(page_id, 0, &mtr);
mutex_exit(&ibuf_mutex);
ibuf_bitmap_page_set_bits(bitmap_page, page_id, srv_page_size,
IBUF_BITMAP_IBUF, TRUE, &mtr);
ibuf_bitmap_page_set_bits<IBUF_BITMAP_IBUF>(bitmap_page, page_id,
srv_page_size, true,
&mtr);
ibuf_mtr_commit(&mtr);
return(TRUE);
return true;
}
/*********************************************************************//**
@ -1979,9 +1912,6 @@ ibuf_remove_free_page(void)
mtr_t mtr2;
page_t* header_page;
ulint page_no;
page_t* page;
page_t* root;
page_t* bitmap_page;
log_free_check();
@ -2009,12 +1939,12 @@ ibuf_remove_free_page(void)
ibuf_mtr_start(&mtr2);
root = ibuf_tree_root_get(&mtr2);
buf_block_t* root = ibuf_tree_root_get(&mtr2);
mutex_exit(&ibuf_mutex);
page_no = flst_get_last(PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST
+ root).page;
+ root->frame).page;
/* NOTE that we must release the latch on the ibuf tree root
because in fseg_free_page we access level 1 pages, and the root
@ -2044,22 +1974,15 @@ ibuf_remove_free_page(void)
root = ibuf_tree_root_get(&mtr);
ut_ad(page_no == flst_get_last(PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST
+ root).page);
+ root->frame).page);
{
buf_block_t* block;
block = buf_page_get(page_id, 0, RW_X_LATCH, &mtr);
buf_block_dbg_add_level(block, SYNC_IBUF_TREE_NODE);
page = buf_block_get_frame(block);
}
buf_block_t* block = buf_page_get(page_id, 0, RW_X_LATCH, &mtr);
buf_block_dbg_add_level(block, SYNC_IBUF_TREE_NODE);
/* Remove the page from the free list and update the ibuf size data */
flst_remove(root + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST,
page + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST_NODE, &mtr);
flst_remove(root, PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST,
block, PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST_NODE, &mtr);
mutex_exit(&ibuf_pessimistic_insert_mutex);
@ -2069,13 +1992,12 @@ ibuf_remove_free_page(void)
/* Set the bit indicating that this page is no more an ibuf tree page
(level 2 page) */
bitmap_page = ibuf_bitmap_get_map_page(page_id, 0, &mtr);
buf_block_t* bitmap_page = ibuf_bitmap_get_map_page(page_id, 0, &mtr);
mutex_exit(&ibuf_mutex);
ibuf_bitmap_page_set_bits(
bitmap_page, page_id, srv_page_size,
IBUF_BITMAP_IBUF, FALSE, &mtr);
ibuf_bitmap_page_set_bits<IBUF_BITMAP_IBUF>(
bitmap_page, page_id, srv_page_size, false, &mtr);
ut_d(buf_page_set_file_page_was_freed(page_id));
@ -3305,8 +3227,7 @@ ibuf_insert_low(
ulint buffered;
lint min_n_recs;
rec_t* ins_rec;
ibool old_bit_value;
page_t* bitmap_page;
buf_block_t* bitmap_page;
buf_block_t* block;
page_t* root;
dberr_t err;
@ -3459,8 +3380,8 @@ fail_exit:
if (op == IBUF_OP_INSERT) {
ulint bits = ibuf_bitmap_page_get_bits(
bitmap_page, page_id, physical_size, IBUF_BITMAP_FREE,
&bitmap_mtr);
bitmap_page->frame, page_id, physical_size,
IBUF_BITMAP_FREE, &bitmap_mtr);
if (buffered + entry_size + page_dir_calc_reserved_space(1)
> ibuf_index_page_calc_free_from_bits(physical_size,
@ -3505,17 +3426,8 @@ fail_exit:
/* Set the bitmap bit denoting that the insert buffer contains
buffered entries for this index page, if the bit is not set yet */
old_bit_value = ibuf_bitmap_page_get_bits(
bitmap_page, page_id, physical_size,
IBUF_BITMAP_BUFFERED, &bitmap_mtr);
if (!old_bit_value) {
ibuf_bitmap_page_set_bits(bitmap_page, page_id, physical_size,
IBUF_BITMAP_BUFFERED, TRUE,
&bitmap_mtr);
}
ibuf_bitmap_page_set_bits<IBUF_BITMAP_BUFFERED>(
bitmap_page, page_id, physical_size, true, &bitmap_mtr);
ibuf_mtr_commit(&bitmap_mtr);
cursor = btr_pcur_get_btr_cur(&pcur);
@ -3548,7 +3460,7 @@ fail_exit:
which would cause the sx-latching of the root after that to
break the latching order. */
root = ibuf_tree_root_get(&mtr);
root = ibuf_tree_root_get(&mtr)->frame;
err = btr_cur_optimistic_insert(
BTR_NO_LOCKING_FLAG | BTR_NO_UNDO_LOG_FLAG,
@ -3760,9 +3672,6 @@ ibuf_insert_to_index_page_low(
page_cur_t* page_cur)/*!< in/out: cursor positioned on the record
after which to insert the buffered entry */
{
const page_t* page;
const page_t* bitmap_page;
ulint old_bits;
rec_t* rec;
DBUG_ENTER("ibuf_insert_to_index_page_low");
@ -3790,11 +3699,10 @@ ibuf_insert_to_index_page_low(
DBUG_RETURN(rec);
}
page = buf_block_get_frame(block);
ib::error() << "Insert buffer insert fails; page free "
<< page_get_max_insert_size(page, 1) << ", dtuple size "
<< rec_get_converted_size(index, entry, 0);
<< page_get_max_insert_size(block->frame, 1)
<< ", dtuple size "
<< rec_get_converted_size(index, entry, 0);
fputs("InnoDB: Cannot insert index record ", stderr);
dtuple_print(stderr, entry);
@ -3802,14 +3710,14 @@ ibuf_insert_to_index_page_low(
"InnoDB: is now probably corrupt. Please run CHECK TABLE on\n"
"InnoDB: that table.\n", stderr);
bitmap_page = ibuf_bitmap_get_map_page(block->page.id,
block->zip_size(), mtr);
old_bits = ibuf_bitmap_page_get_bits(
bitmap_page, block->page.id, block->zip_size(),
IBUF_BITMAP_FREE, mtr);
ib::error() << "page " << block->page.id << ", size "
<< block->physical_size() << ", bitmap bits " << old_bits;
<< block->physical_size() << ", bitmap bits "
<< ibuf_bitmap_page_get_bits(
ibuf_bitmap_get_map_page(block->page.id,
block->zip_size(),
mtr)->frame,
block->page.id, block->zip_size(),
IBUF_BITMAP_FREE, mtr);
ib::error() << BUG_REPORT_MSG;
@ -3898,7 +3806,6 @@ dump:
if (UNIV_UNLIKELY(low_match == dtuple_get_n_fields(entry))) {
upd_t* update;
page_zip_des_t* page_zip;
rec = page_cur_get_rec(&page_cur);
@ -3910,8 +3817,7 @@ dump:
ULINT_UNDEFINED, &heap);
update = row_upd_build_sec_rec_difference_binary(
rec, index, offsets, entry, heap);
page_zip = buf_block_get_page_zip(block);
page_zip_des_t* page_zip = buf_block_get_page_zip(block);
if (update->n_fields == 0) {
/* The records only differ in the delete-mark.
@ -4023,7 +3929,7 @@ ibuf_set_del_mark(
if (low_match == dtuple_get_n_fields(entry)) {
rec_t* rec;
page_zip_des_t* page_zip;
page_zip_des_t* page_zip;
rec = page_cur_get_rec(&page_cur);
page_zip = page_cur_get_page_zip(&page_cur);
@ -4272,7 +4178,7 @@ bool ibuf_delete_rec(ulint space, ulint page_no, btr_pcur_t* pcur,
goto func_exit;
}
root = ibuf_tree_root_get(mtr);
root = ibuf_tree_root_get(mtr)->frame;
btr_cur_pessimistic_delete(&err, TRUE, btr_pcur_get_btr_cur(pcur), 0,
false, mtr);
@ -4317,10 +4223,10 @@ bool ibuf_page_exists(const buf_page_t& bpage)
bool bitmap_bits = false;
ibuf_mtr_start(&mtr);
if (const page_t* bitmap_page = ibuf_bitmap_get_map_page(
if (const buf_block_t* bitmap_page = ibuf_bitmap_get_map_page(
bpage.id, bpage.zip_size(), &mtr)) {
bitmap_bits = ibuf_bitmap_page_get_bits(
bitmap_page, bpage.id, bpage.zip_size(),
bitmap_page->frame, bpage.id, bpage.zip_size(),
IBUF_BITMAP_BUFFERED, &mtr) != 0;
}
ibuf_mtr_commit(&mtr);
@ -4351,7 +4257,6 @@ ibuf_merge_or_delete_for_page(
#ifdef UNIV_IBUF_DEBUG
ulint volume = 0;
#endif /* UNIV_IBUF_DEBUG */
page_zip_des_t* page_zip = NULL;
bool corruption_noticed = false;
mtr_t mtr;
@ -4385,18 +4290,18 @@ ibuf_merge_or_delete_for_page(
block = NULL;
update_ibuf_bitmap = false;
} else {
page_t* bitmap_page = NULL;
ulint bitmap_bits = 0;
ibuf_mtr_start(&mtr);
bitmap_page = ibuf_bitmap_get_map_page(
buf_block_t* bitmap_page = ibuf_bitmap_get_map_page(
page_id, zip_size, &mtr);
if (bitmap_page &&
fil_page_get_type(bitmap_page) != FIL_PAGE_TYPE_ALLOCATED) {
if (bitmap_page
&& fil_page_get_type(bitmap_page->frame)
!= FIL_PAGE_TYPE_ALLOCATED) {
bitmap_bits = ibuf_bitmap_page_get_bits(
bitmap_page, page_id, zip_size,
bitmap_page->frame, page_id, zip_size,
IBUF_BITMAP_BUFFERED, &mtr);
}
@ -4429,7 +4334,6 @@ ibuf_merge_or_delete_for_page(
the debug checks. */
rw_lock_x_lock_move_ownership(&(block->lock));
page_zip = buf_block_get_page_zip(block);
if (!fil_page_index_page_check(block->frame)
|| !page_is_leaf(block->frame)) {
@ -4496,8 +4400,7 @@ loop:
|| ibuf_rec_get_space(&mtr, rec) != page_id.space()) {
if (block != NULL) {
page_header_reset_last_insert(
block->frame, page_zip, &mtr);
page_header_reset_last_insert(block, &mtr);
}
goto reset_bit;
@ -4519,8 +4422,9 @@ loop:
ibuf_op_t op = ibuf_rec_get_op_type(&mtr, rec);
max_trx_id = page_get_max_trx_id(page_align(rec));
page_update_max_trx_id(block, page_zip, max_trx_id,
&mtr);
page_update_max_trx_id(block,
buf_block_get_page_zip(block),
max_trx_id, &mtr);
ut_ad(page_validate(page_align(rec), ibuf.index));
@ -4635,28 +4539,17 @@ loop:
}
reset_bit:
if (update_ibuf_bitmap) {
page_t* bitmap_page;
bitmap_page = ibuf_bitmap_get_map_page(page_id, zip_size,
&mtr);
ibuf_bitmap_page_set_bits(
bitmap_page, page_id, physical_size,
IBUF_BITMAP_BUFFERED, FALSE, &mtr);
if (!update_ibuf_bitmap) {
} else if (buf_block_t* bitmap = ibuf_bitmap_get_map_page(
page_id, zip_size, &mtr)) {
/* FIXME: update the bitmap byte only once! */
ibuf_bitmap_page_set_bits<IBUF_BITMAP_BUFFERED>(
bitmap, page_id, physical_size, false, &mtr);
if (block != NULL) {
ulint old_bits = ibuf_bitmap_page_get_bits(
bitmap_page, page_id, zip_size,
IBUF_BITMAP_FREE, &mtr);
ulint new_bits = ibuf_index_page_calc_free(block);
if (old_bits != new_bits) {
ibuf_bitmap_page_set_bits(
bitmap_page, page_id, physical_size,
IBUF_BITMAP_FREE, new_bits, &mtr);
}
ibuf_bitmap_page_set_bits<IBUF_BITMAP_FREE>(
bitmap, page_id, physical_size,
ibuf_index_page_calc_free(block), &mtr);
}
}
@ -4760,18 +4653,15 @@ bool
ibuf_is_empty(void)
/*===============*/
{
bool is_empty;
const page_t* root;
mtr_t mtr;
ibuf_mtr_start(&mtr);
mutex_enter(&ibuf_mutex);
root = ibuf_tree_root_get(&mtr);
mutex_exit(&ibuf_mutex);
is_empty = page_is_empty(root);
ut_d(mutex_enter(&ibuf_mutex));
const buf_block_t* root = ibuf_tree_root_get(&mtr);
bool is_empty = page_is_empty(root->frame);
ut_a(is_empty == ibuf.empty);
ut_d(mutex_exit(&ibuf_mutex));
ibuf_mtr_commit(&mtr);
return(is_empty);
@ -4849,9 +4739,6 @@ dberr_t ibuf_check_bitmap_on_import(const trx_t* trx, fil_space_t* space)
the space, as usual. */
for (page_no = 0; page_no < size; page_no += physical_size) {
page_t* bitmap_page;
ulint i;
if (trx_is_interrupted(trx)) {
mutex_exit(&ibuf_mutex);
return(DB_INTERRUPTED);
@ -4863,10 +4750,15 @@ dberr_t ibuf_check_bitmap_on_import(const trx_t* trx, fil_space_t* space)
ibuf_enter(&mtr);
bitmap_page = ibuf_bitmap_get_map_page(
buf_block_t* bitmap_page = ibuf_bitmap_get_map_page(
page_id_t(space->id, page_no), zip_size, &mtr);
if (!bitmap_page) {
mutex_exit(&ibuf_mutex);
mtr.commit();
return DB_CORRUPTION;
}
if (buf_page_is_zeroes(bitmap_page, physical_size)) {
if (buf_page_is_zeroes(bitmap_page->frame, physical_size)) {
/* This means we got all-zero page instead of
ibuf bitmap page. The subsequent page should be
all-zero pages. */
@ -4886,17 +4778,13 @@ dberr_t ibuf_check_bitmap_on_import(const trx_t* trx, fil_space_t* space)
continue;
}
if (!bitmap_page) {
mutex_exit(&ibuf_mutex);
return DB_CORRUPTION;
}
for (i = FSP_IBUF_BITMAP_OFFSET + 1; i < physical_size; i++) {
for (ulint i = FSP_IBUF_BITMAP_OFFSET + 1; i < physical_size;
i++) {
const ulint offset = page_no + i;
const page_id_t cur_page_id(space->id, offset);
if (ibuf_bitmap_page_get_bits(
bitmap_page, cur_page_id, zip_size,
bitmap_page->frame, cur_page_id, zip_size,
IBUF_BITMAP_IBUF, &mtr)) {
mutex_exit(&ibuf_mutex);
@ -4914,7 +4802,7 @@ dberr_t ibuf_check_bitmap_on_import(const trx_t* trx, fil_space_t* space)
}
if (ibuf_bitmap_page_get_bits(
bitmap_page, cur_page_id, zip_size,
bitmap_page->frame, cur_page_id, zip_size,
IBUF_BITMAP_BUFFERED, &mtr)) {
ib_errf(trx->mysql_thd,
@ -4928,10 +4816,9 @@ dberr_t ibuf_check_bitmap_on_import(const trx_t* trx, fil_space_t* space)
/* Tolerate this error, so that
slightly corrupted tables can be
imported and dumped. Clear the bit. */
ibuf_bitmap_page_set_bits(
ibuf_bitmap_page_set_bits<IBUF_BITMAP_BUFFERED>(
bitmap_page, cur_page_id,
physical_size,
IBUF_BITMAP_BUFFERED, FALSE, &mtr);
physical_size, false, &mtr);
}
}
@ -4951,7 +4838,6 @@ ibuf_set_bitmap_for_bulk_load(
buf_block_t* block,
bool reset)
{
page_t* bitmap_page;
mtr_t mtr;
ulint free_val;
@ -4959,20 +4845,22 @@ ibuf_set_bitmap_for_bulk_load(
free_val = ibuf_index_page_calc_free(block);
mtr_start(&mtr);
mtr.start();
fil_space_t* space = mtr.set_named_space_id(block->page.id.space());
bitmap_page = ibuf_bitmap_get_map_page(block->page.id,
space->zip_size(), &mtr);
buf_block_t* bitmap_page = ibuf_bitmap_get_map_page(block->page.id,
space->zip_size(),
&mtr);
free_val = reset ? 0 : ibuf_index_page_calc_free(block);
ibuf_bitmap_page_set_bits(
/* FIXME: update the bitmap byte only once! */
ibuf_bitmap_page_set_bits<IBUF_BITMAP_FREE>(
bitmap_page, block->page.id, block->physical_size(),
IBUF_BITMAP_FREE, free_val, &mtr);
free_val, &mtr);
ibuf_bitmap_page_set_bits(
ibuf_bitmap_page_set_bits<IBUF_BITMAP_BUFFERED>(
bitmap_page, block->page.id, block->physical_size(),
IBUF_BITMAP_BUFFERED, FALSE, &mtr);
false, &mtr);
mtr_commit(&mtr);
mtr.commit();
}

View file

@ -541,7 +541,7 @@ inline void btr_set_min_rec_mark(rec_t *rec, const buf_block_t &block,
page. We are not modifying the compressed page frame at all. */
*rec|= REC_INFO_MIN_REC_FLAG;
else
mlog_write_ulint(rec, *rec | REC_INFO_MIN_REC_FLAG, MLOG_1BYTE, mtr);
mtr->write<1>(block, rec, *rec | REC_INFO_MIN_REC_FLAG);
}
/** Seek to the parent page of a B-tree page.

View file

@ -29,28 +29,6 @@ Created 6/2/1994 Heikki Tuuri
#include "mtr0log.h"
#include "page0zip.h"
/**************************************************************//**
Sets the index id field of a page. */
UNIV_INLINE
void
btr_page_set_index_id(
/*==================*/
page_t* page, /*!< in: page to be created */
page_zip_des_t* page_zip,/*!< in: compressed page whose uncompressed
part will be updated, or NULL */
index_id_t id, /*!< in: index id */
mtr_t* mtr) /*!< in: mtr */
{
if (page_zip) {
mach_write_to_8(page + (PAGE_HEADER + PAGE_INDEX_ID), id);
page_zip_write_header(page_zip,
page + (PAGE_HEADER + PAGE_INDEX_ID),
8, mtr);
} else {
mlog_write_ull(page + (PAGE_HEADER + PAGE_INDEX_ID), id, mtr);
}
}
/**************************************************************//**
Gets the index id field of a page.
@return index id */
@ -63,77 +41,56 @@ btr_page_get_index_id(
return(mach_read_from_8(page + PAGE_HEADER + PAGE_INDEX_ID));
}
/********************************************************//**
Sets the node level field in an index page. */
UNIV_INLINE
void
btr_page_set_level(
/*===============*/
page_t* page, /*!< in: index page */
page_zip_des_t* page_zip,/*!< in: compressed page whose uncompressed
part will be updated, or NULL */
ulint level, /*!< in: level, leaf level == 0 */
mtr_t* mtr) /*!< in: mini-transaction handle */
/** Set PAGE_LEVEL.
@param[in,out] block buffer block
@param[in] level page level
@param[in,out] mtr mini-transaction */
inline
void btr_page_set_level(buf_block_t *block, ulint level, mtr_t *mtr)
{
ut_ad(page != NULL);
ut_ad(mtr != NULL);
ut_ad(level <= BTR_MAX_NODE_LEVEL);
ut_ad(level <= BTR_MAX_NODE_LEVEL);
if (page_zip) {
mach_write_to_2(page + (PAGE_HEADER + PAGE_LEVEL), level);
page_zip_write_header(page_zip,
page + (PAGE_HEADER + PAGE_LEVEL),
2, mtr);
} else {
mlog_write_ulint(page + (PAGE_HEADER + PAGE_LEVEL), level,
MLOG_2BYTES, mtr);
}
byte *page_level= PAGE_HEADER + PAGE_LEVEL + block->frame;
if (UNIV_LIKELY_NULL(block->page.zip.data))
{
mach_write_to_2(page_level, level);
page_zip_write_header(&block->page.zip, page_level, 2, mtr);
}
else
mtr->write<2,mtr_t::OPT>(*block, page_level, level);
}
/********************************************************//**
Sets the next index page field. */
UNIV_INLINE
void
btr_page_set_next(
/*==============*/
page_t* page, /*!< in: index page */
page_zip_des_t* page_zip,/*!< in: compressed page whose uncompressed
part will be updated, or NULL */
ulint next, /*!< in: next page number */
mtr_t* mtr) /*!< in: mini-transaction handle */
/** Set FIL_PAGE_NEXT.
@param[in,out] block buffer block
@param[in] next number of successor page
@param[in,out] mtr mini-transaction */
inline void btr_page_set_next(buf_block_t *block, ulint next, mtr_t *mtr)
{
ut_ad(page != NULL);
ut_ad(mtr != NULL);
if (page_zip) {
mach_write_to_4(page + FIL_PAGE_NEXT, next);
page_zip_write_header(page_zip, page + FIL_PAGE_NEXT, 4, mtr);
} else {
mlog_write_ulint(page + FIL_PAGE_NEXT, next, MLOG_4BYTES, mtr);
}
byte *fil_page_next= block->frame + FIL_PAGE_NEXT;
if (UNIV_LIKELY_NULL(block->page.zip.data))
{
mach_write_to_4(fil_page_next, next);
page_zip_write_header(&block->page.zip, fil_page_next, 4, mtr);
}
else
mtr->write<4>(*block, fil_page_next, next);
}
/********************************************************//**
Sets the previous index page field. */
UNIV_INLINE
void
btr_page_set_prev(
/*==============*/
page_t* page, /*!< in: index page */
page_zip_des_t* page_zip,/*!< in: compressed page whose uncompressed
part will be updated, or NULL */
ulint prev, /*!< in: previous page number */
mtr_t* mtr) /*!< in: mini-transaction handle */
/** Set FIL_PAGE_PREV.
@param[in,out] block buffer block
@param[in] prev number of predecessor page
@param[in,out] mtr mini-transaction */
inline void btr_page_set_prev(buf_block_t *block, ulint prev, mtr_t *mtr)
{
ut_ad(page != NULL);
ut_ad(mtr != NULL);
if (page_zip) {
mach_write_to_4(page + FIL_PAGE_PREV, prev);
page_zip_write_header(page_zip, page + FIL_PAGE_PREV, 4, mtr);
} else {
mlog_write_ulint(page + FIL_PAGE_PREV, prev, MLOG_4BYTES, mtr);
}
byte *fil_page_prev= block->frame + FIL_PAGE_PREV;
if (UNIV_LIKELY_NULL(block->page.zip.data))
{
mach_write_to_4(fil_page_prev, prev);
page_zip_write_header(&block->page.zip, fil_page_prev, 4, mtr);
}
else
mtr->write<4>(*block, fil_page_prev, prev);
}
/**************************************************************//**

View file

@ -101,11 +101,25 @@ public:
/** Insert a record in the page.
@param[in] rec record
@param[in] offsets record offsets */
void insert(const rec_t* rec, ulint* offsets);
inline void insert(const rec_t* rec, ulint* offsets);
private:
/** Page format */
enum format { REDUNDANT, DYNAMIC, COMPRESSED };
/** Mark end of insertion to the page. Scan all records to set page
dirs, and set page header members.
@tparam format the page format */
template<format> inline void finishPage();
/** Insert a record in the page.
@tparam format the page format
@param[in] rec record
@param[in] offsets record offsets */
template<format> inline void insertPage(const rec_t* rec,
ulint* offsets);
public:
/** Mark end of insertion to the page. Scan all records to set page
dirs, and set page header members. */
void finish();
inline void finish();
/** Commit mtr for a page
@param[in] success Flag whether all inserts succeed. */
@ -199,6 +213,8 @@ public:
return(m_err);
}
void set_modified() { m_mtr.set_modified(); }
/* Memory heap for internal allocation */
mem_heap_t* m_heap;

View file

@ -646,8 +646,7 @@ to free the field. */
void
btr_cur_disown_inherited_fields(
/*============================*/
page_zip_des_t* page_zip,/*!< in/out: compressed page whose uncompressed
part will be updated, or NULL */
buf_block_t* block, /*!< in/out: index page */
rec_t* rec, /*!< in/out: record in a clustered index */
dict_index_t* index, /*!< in: index of the page */
const ulint* offsets,/*!< in: array returned by rec_get_offsets() */
@ -722,12 +721,12 @@ btr_free_externally_stored_field(
page_zip_write_blob_ptr(), or NULL */
const ulint* offsets, /*!< in: rec_get_offsets(rec, index),
or NULL */
page_zip_des_t* page_zip, /*!< in: compressed page corresponding
to rec, or NULL if rec == NULL */
buf_block_t* block, /*!< in/out: page of field_ref */
ulint i, /*!< in: field number of field_ref;
ignored if rec == NULL */
bool rollback, /*!< in: performing rollback? */
mtr_t* local_mtr); /*!< in: mtr containing the latch */
mtr_t* local_mtr) /*!< in: mtr containing the latch */
MY_ATTRIBUTE((nonnull(1,2,5,8)));
/** Copies the prefix of an externally stored field of a record.
The clustered index record must be protected by a lock or a page latch.

View file

@ -716,16 +716,6 @@ inline void aligned_free(void *ptr)
#endif
}
/**********************************************************************//**
Gets the space id, page offset, and byte offset within page of a
pointer pointing to a buffer frame containing a file page. */
UNIV_INLINE
void
buf_ptr_get_fsp_addr(
/*=================*/
const void* ptr, /*!< in: pointer to a buffer frame */
ulint* space, /*!< out: space id */
fil_addr_t* addr); /*!< out: page offset and byte offset */
/**********************************************************************//**
Gets the hash value of a block. This can be used in searches in the
lock hash table.
@ -1094,9 +1084,9 @@ buf_block_get_frame(
Gets the compressed page descriptor corresponding to an uncompressed page
if applicable. */
#define buf_block_get_page_zip(block) \
((block)->page.zip.data ? &(block)->page.zip : NULL)
(UNIV_LIKELY_NULL((block)->page.zip.data) ? &(block)->page.zip : NULL)
#define is_buf_block_get_page_zip(block) \
((block)->page.zip.data != 0)
UNIV_LIKELY_NULL((block)->page.zip.data)
#ifdef BTR_CUR_HASH_ADAPT
/** Get a buffer block from an adaptive hash index pointer.

View file

@ -758,25 +758,6 @@ buf_frame_align(
return(frame);
}
/**********************************************************************//**
Gets the space id, page offset, and byte offset within page of a
pointer pointing to a buffer frame containing a file page. */
UNIV_INLINE
void
buf_ptr_get_fsp_addr(
/*=================*/
const void* ptr, /*!< in: pointer to a buffer frame */
ulint* space, /*!< out: space id */
fil_addr_t* addr) /*!< out: page offset and byte offset */
{
const page_t* page = (const page_t*) ut_align_down(ptr,
srv_page_size);
*space = mach_read_from_4(page + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID);
addr->page = mach_read_from_4(page + FIL_PAGE_OFFSET);
addr->boffset = static_cast<uint16_t>(ut_align_offset(ptr, srv_page_size));
}
/**********************************************************************//**
Gets the hash value of the page the pointer is pointing to. This can be used
in searches in the lock hash table.

View file

@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2018, MariaDB Corporation.
Copyright (c) 2018, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@ -33,15 +33,8 @@ Created 4/18/1996 Heikki Tuuri
#include "buf0buf.h"
#include "dict0dict.h"
typedef byte dict_hdr_t;
/**********************************************************************//**
Gets a pointer to the dictionary header and x-latches its page.
@return pointer to the dictionary header, page x-latched */
dict_hdr_t*
dict_hdr_get(
/*=========*/
mtr_t* mtr); /*!< in: mtr */
/** @return the DICT_HDR block, x-latched */
buf_block_t *dict_hdr_get(mtr_t* mtr);
/**********************************************************************//**
Returns a new table, index, or space id. */
void

View file

@ -101,7 +101,6 @@ see the table in fsp0types.h @{ */
#define FSP_HEADER_OFFSET FIL_PAGE_DATA
/* The data structures in files are defined just as byte strings in C */
typedef byte fsp_header_t;
typedef byte xdes_t;
/* SPACE HEADER
@ -207,7 +206,7 @@ typedef byte fseg_inode_t;
(16 + 3 * FLST_BASE_NODE_SIZE \
+ FSEG_FRAG_ARR_N_SLOTS * FSEG_FRAG_SLOT_SIZE)
#define FSEG_MAGIC_N_VALUE 97937874
static constexpr uint32_t FSEG_MAGIC_N_VALUE= 97937874;
#define FSEG_FILLFACTOR 8 /* If this value is x, then if
the number of unused but reserved
@ -534,7 +533,7 @@ by repeatedly calling this function in different mini-transactions.
Doing the freeing in a single mini-transaction might result in
too big a mini-transaction.
@return TRUE if freeing completed */
ibool
bool
fseg_free_step_func(
fseg_header_t* header, /*!< in, own: segment header; NOTE: if the header
resides on the first page of the frag list
@ -554,8 +553,8 @@ fseg_free_step_func(
/**********************************************************************//**
Frees part of a segment. Differs from fseg_free_step because this function
leaves the header page unfreed.
@return TRUE if freeing completed, except the header page */
ibool
@return true if freeing completed, except the header page */
bool
fseg_free_step_not_header_func(
fseg_header_t* header, /*!< in: segment header which must reside on
the first fragment page of the segment */

View file

@ -68,79 +68,91 @@ typedef byte flst_node_t;
@param[in,out] block file page
@param[in] ofs byte offset of the list base node
@param[in,out] mtr mini-transaction */
inline void flst_init(buf_block_t* block, uint16_t ofs, mtr_t* mtr)
inline void flst_init(const buf_block_t* block, uint16_t ofs, mtr_t* mtr)
{
ut_ad(0 == mach_read_from_2(FLST_LEN + ofs + block->frame));
ut_ad(0 == mach_read_from_2(FLST_FIRST + FIL_ADDR_BYTE + ofs
+ block->frame));
ut_ad(0 == mach_read_from_2(FLST_LAST + FIL_ADDR_BYTE + ofs
+ block->frame));
compile_time_assert(FIL_NULL == 0xffU * 0x1010101U);
mlog_memset(block, FLST_FIRST + FIL_ADDR_PAGE + ofs, 4, 0xff, mtr);
mlog_memset(block, FLST_LAST + FIL_ADDR_PAGE + ofs, 4, 0xff, mtr);
ut_ad(!mach_read_from_2(FLST_LEN + ofs + block->frame));
ut_ad(!mach_read_from_2(FLST_FIRST + FIL_ADDR_BYTE + ofs + block->frame));
ut_ad(!mach_read_from_2(FLST_LAST + FIL_ADDR_BYTE + ofs + block->frame));
compile_time_assert(FIL_NULL == 0xffU * 0x1010101U);
mlog_memset(block, FLST_FIRST + FIL_ADDR_PAGE + ofs, 4, 0xff, mtr);
mlog_memset(block, FLST_LAST + FIL_ADDR_PAGE + ofs, 4, 0xff, mtr);
}
/** Write a null file address.
@param[in,out] faddr file address to be zeroed otu
@param[in,out] mtr mini-transaction */
inline void flst_zero_addr(fil_faddr_t* faddr, mtr_t* mtr)
@param[in] b file page
@param[in,out] addr file address to be zeroed out
@param[in,out] mtr mini-transaction */
inline void flst_zero_addr(const buf_block_t& b, fil_faddr_t *addr, mtr_t *mtr)
{
if (mach_read_from_4(faddr + FIL_ADDR_PAGE) != FIL_NULL) {
mlog_memset(faddr + FIL_ADDR_PAGE, 4, 0xff, mtr);
}
if (mach_read_from_2(faddr + FIL_ADDR_BYTE)) {
mlog_write_ulint(faddr + FIL_ADDR_BYTE, 0, MLOG_2BYTES, mtr);
}
if (mach_read_from_4(addr + FIL_ADDR_PAGE) != FIL_NULL)
mlog_memset(&b, ulint(addr - b.frame) + FIL_ADDR_PAGE, 4, 0xff, mtr);
mtr->write<2,mtr_t::OPT>(b, addr + FIL_ADDR_BYTE, 0U);
}
/********************************************************************//**
Initializes a list base node. */
UNIV_INLINE
void
flst_init(
/*======*/
flst_base_node_t* base, /*!< in: pointer to base node */
mtr_t* mtr); /*!< in: mini-transaction handle */
/********************************************************************//**
Adds a node as the last node in a list. */
void
flst_add_last(
/*==========*/
flst_base_node_t* base, /*!< in: pointer to base node of list */
flst_node_t* node, /*!< in: node to add */
mtr_t* mtr); /*!< in: mini-transaction handle */
/********************************************************************//**
Adds a node as the first node in a list. */
void
flst_add_first(
/*===========*/
flst_base_node_t* base, /*!< in: pointer to base node of list */
flst_node_t* node, /*!< in: node to add */
mtr_t* mtr); /*!< in: mini-transaction handle */
/********************************************************************//**
Removes a node. */
void
flst_remove(
/*========*/
flst_base_node_t* base, /*!< in: pointer to base node of list */
flst_node_t* node2, /*!< in: node to remove */
mtr_t* mtr); /*!< in: mini-transaction handle */
/** Get the length of a list.
@param[in] base base node
@return length */
UNIV_INLINE
uint32_t
flst_get_len(
const flst_base_node_t* base);
/********************************************************************//**
Writes a file address. */
UNIV_INLINE
void
flst_write_addr(
/*============*/
fil_faddr_t* faddr, /*!< in: pointer to file faddress */
fil_addr_t addr, /*!< in: file address */
mtr_t* mtr); /*!< in: mini-transaction handle */
/** Write a file address.
@param[in] block file page
@param[in,out] faddr file address location
@param[in] addr file address to be written out
@param[in,out] mtr mini-transaction */
inline void flst_write_addr(const buf_block_t& block, fil_faddr_t *faddr,
fil_addr_t addr, mtr_t* mtr)
{
ut_ad(mtr->memo_contains_page_flagged(faddr,
MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_SX_FIX));
ut_a(addr.page == FIL_NULL || addr.boffset >= FIL_PAGE_DATA);
ut_a(ut_align_offset(faddr, srv_page_size) >= FIL_PAGE_DATA);
mtr->write<4,mtr_t::OPT>(block, faddr + FIL_ADDR_PAGE, addr.page);
mtr->write<2,mtr_t::OPT>(block, faddr + FIL_ADDR_BYTE, addr.boffset);
}
/** Initialize a list base node.
@param[in] block file page
@param[in,out] base base node
@param[in,out] mtr mini-transaction */
inline void flst_init(const buf_block_t& block, byte *base, mtr_t *mtr)
{
ut_ad(mtr->memo_contains_page_flagged(base, MTR_MEMO_PAGE_X_FIX |
MTR_MEMO_PAGE_SX_FIX));
mtr->write<4,mtr_t::OPT>(block, base + FLST_LEN, 0U);
flst_zero_addr(block, base + FLST_FIRST, mtr);
flst_zero_addr(block, base + FLST_LAST, mtr);
}
/** Append a file list node to a list.
@param[in,out] base base node block
@param[in] boffset byte offset of the base node
@param[in,out] add block to be added
@param[in] aoffset byte offset of the node to be added
@param[in,outr] mtr mini-transaction */
void flst_add_last(buf_block_t *base, uint16_t boffset,
buf_block_t *add, uint16_t aoffset, mtr_t *mtr)
MY_ATTRIBUTE((nonnull));
/** Prepend a file list node to a list.
@param[in,out] base base node block
@param[in] boffset byte offset of the base node
@param[in,out] add block to be added
@param[in] aoffset byte offset of the node to be added
@param[in,outr] mtr mini-transaction */
void flst_add_first(buf_block_t *base, uint16_t boffset,
buf_block_t *add, uint16_t aoffset, mtr_t *mtr)
MY_ATTRIBUTE((nonnull));
/** Remove a file list node.
@param[in,out] base base node block
@param[in] boffset byte offset of the base node
@param[in,out] cur block to be removed
@param[in] coffset byte offset of the current record to be removed
@param[in,outr] mtr mini-transaction */
void flst_remove(buf_block_t *base, uint16_t boffset,
buf_block_t *cur, uint16_t coffset, mtr_t *mtr)
MY_ATTRIBUTE((nonnull));
/** @return the length of a list */
inline uint32_t flst_get_len(const flst_base_node_t *base)
{
return mach_read_from_4(base + FLST_LEN);
}
/** @return a file address */
inline fil_addr_t flst_read_addr(const fil_faddr_t *faddr)
@ -176,16 +188,10 @@ inline fil_addr_t flst_get_prev_addr(const flst_node_t *node)
return flst_read_addr(node + FLST_PREV);
}
/********************************************************************//**
Validates a file-based list.
@return TRUE if ok */
ibool
flst_validate(
/*==========*/
const flst_base_node_t* base, /*!< in: pointer to base node of list */
mtr_t* mtr1); /*!< in: mtr */
#include "fut0lst.ic"
#ifdef UNIV_DEBUG
/** Validate a file-based list. */
void flst_validate(const buf_block_t *base, uint16_t boffset, mtr_t *mtr);
#endif
#endif /* !UNIV_INNOCHECKSUM */

View file

@ -1,80 +0,0 @@
/*****************************************************************************
Copyright (c) 1995, 2014, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
*****************************************************************************/
/******************************************************************//**
@file include/fut0lst.ic
File-based list utilities
Created 11/28/1995 Heikki Tuuri
***********************************************************************/
#include "buf0buf.h"
/********************************************************************//**
Writes a file address. */
UNIV_INLINE
void
flst_write_addr(
/*============*/
fil_faddr_t* faddr, /*!< in: pointer to file faddress */
fil_addr_t addr, /*!< in: file address */
mtr_t* mtr) /*!< in: mini-transaction handle */
{
ut_ad(faddr && mtr);
ut_ad(mtr_memo_contains_page_flagged(mtr, faddr,
MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_SX_FIX));
ut_a(addr.page == FIL_NULL || addr.boffset >= FIL_PAGE_DATA);
ut_a(ut_align_offset(faddr, srv_page_size) >= FIL_PAGE_DATA);
mlog_write_ulint(faddr + FIL_ADDR_PAGE, addr.page, MLOG_4BYTES, mtr);
mlog_write_ulint(faddr + FIL_ADDR_BYTE, addr.boffset,
MLOG_2BYTES, mtr);
}
/********************************************************************//**
Initializes a list base node. */
UNIV_INLINE
void
flst_init(
/*======*/
flst_base_node_t* base, /*!< in: pointer to base node */
mtr_t* mtr) /*!< in: mini-transaction handle */
{
ut_ad(mtr_memo_contains_page_flagged(mtr, base,
MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_SX_FIX));
if (mach_read_from_4(base + FLST_LEN)) {
mlog_write_ulint(base + FLST_LEN, 0, MLOG_4BYTES, mtr);
}
flst_zero_addr(base + FLST_FIRST, mtr);
flst_zero_addr(base + FLST_LAST, mtr);
}
/** Get the length of a list.
@param[in] base base node
@return length */
UNIV_INLINE
uint32_t
flst_get_len(
const flst_base_node_t* base)
{
return(mach_read_from_4(base + FLST_LEN));
}

View file

@ -33,26 +33,6 @@ Created 12/7/1995 Heikki Tuuri
// Forward declaration
struct dict_index_t;
/********************************************************//**
Writes 1, 2 or 4 bytes to a file page. Writes the corresponding log
record to the mini-transaction log if mtr is not NULL. */
void
mlog_write_ulint(
/*=============*/
byte* ptr, /*!< in: pointer where to write */
ulint val, /*!< in: value to write */
mlog_id_t type, /*!< in: MLOG_1BYTE, MLOG_2BYTES, MLOG_4BYTES */
mtr_t* mtr); /*!< in: mini-transaction handle */
/********************************************************//**
Writes 8 bytes to a file page. Writes the corresponding log
record to the mini-transaction log, only if mtr is not NULL */
void
mlog_write_ull(
/*===========*/
byte* ptr, /*!< in: pointer where to write */
ib_uint64_t val, /*!< in: value to write */
mtr_t* mtr); /*!< in: mini-transaction handle */
/********************************************************//**
Writes a string to a file page buffered in the buffer pool. Writes the
corresponding log record to the mini-transaction log. */
@ -80,7 +60,7 @@ mlog_log_string(
@param[in] val the data byte to write
@param[in,out] mtr mini-transaction */
void
mlog_memset(buf_block_t* b, ulint ofs, ulint len, byte val, mtr_t* mtr);
mlog_memset(const buf_block_t* b, ulint ofs, ulint len, byte val, mtr_t* mtr);
/** Initialize a string of bytes.
@param[in,out] byte byte address
@ -124,14 +104,6 @@ mlog_catenate_ulint_compressed(
mtr_t* mtr, /*!< in: mtr */
ulint val); /*!< in: value to write */
/********************************************************//**
Catenates a compressed 64-bit integer to mlog. */
UNIV_INLINE
void
mlog_catenate_ull_compressed(
/*=========================*/
mtr_t* mtr, /*!< in: mtr */
ib_uint64_t val); /*!< in: value to write */
/********************************************************//**
Opens a buffer to mlog. It must be closed with mlog_close.
@return buffer, NULL if log mode MTR_LOG_NONE */
UNIV_INLINE
@ -151,6 +123,56 @@ mlog_close(
byte* ptr); /*!< in: buffer space from ptr up was
not used */
/** Write 1, 2, 4, or 8 bytes to a file page.
@param[in] block file page
@param[in,out] ptr pointer in file page
@param[in] val value to write
@tparam l number of bytes to write
@tparam w write request type
@tparam V type of val */
template<unsigned l,mtr_t::write_type w,typename V>
inline void mtr_t::write(const buf_block_t &block, byte *ptr, V val)
{
ut_ad(ut_align_down(ptr, srv_page_size) == block.frame);
ut_ad(m_log_mode == MTR_LOG_NONE || m_log_mode == MTR_LOG_NO_REDO ||
!block.page.zip.data ||
/* written by fil_crypt_rotate_page() or innodb_make_page_dirty()? */
(w == FORCED && l == 1 && ptr == &block.frame[FIL_PAGE_SPACE_ID]) ||
mach_read_from_2(block.frame + FIL_PAGE_TYPE) <= FIL_PAGE_TYPE_ZBLOB2);
static_assert(l == 1 || l == 2 || l == 4 || l == 8, "wrong length");
switch (l) {
case 1:
if (w == OPT && mach_read_from_1(ptr) == val) return;
ut_ad(w != NORMAL || mach_read_from_1(ptr) != val);
mach_write_to_1(ptr, val);
break;
case 2:
if (w == OPT && mach_read_from_2(ptr) == val) return;
ut_ad(w != NORMAL || mach_read_from_2(ptr) != val);
mach_write_to_2(ptr, val);
break;
case 4:
if (w == OPT && mach_read_from_4(ptr) == val) return;
ut_ad(w != NORMAL || mach_read_from_4(ptr) != val);
mach_write_to_4(ptr, val);
break;
case 8:
if (w == OPT && mach_read_from_8(ptr) == val) return;
ut_ad(w != NORMAL || mach_read_from_8(ptr) != val);
mach_write_to_8(ptr, val);
break;
}
byte *log_ptr= mlog_open(this, 11 + 2 + (l == 8 ? 9 : 5));
if (!log_ptr)
return;
if (l == 8)
log_write(block, ptr, static_cast<mlog_id_t>(l), log_ptr, uint64_t{val});
else
log_write(block, ptr, static_cast<mlog_id_t>(l), log_ptr,
static_cast<uint32_t>(val));
}
/** Writes a log record about an operation.
@param[in] type redo log record type
@param[in] space_id tablespace identifier
@ -195,7 +217,7 @@ mlog_parse_initial_log_record(
ulint* space, /*!< out: space id */
ulint* page_no);/*!< out: page number */
/********************************************************//**
Parses a log record written by mlog_write_ulint, mlog_write_ull, mlog_memset.
Parses a log record written by mtr_t::write(), mlog_memset().
@return parsed record end, NULL if not a complete record */
const byte*
mlog_parse_nbytes(

View file

@ -141,30 +141,6 @@ mlog_catenate_ulint_compressed(
mlog_close(mtr, log_ptr);
}
/********************************************************//**
Catenates a compressed 64-bit integer to mlog. */
UNIV_INLINE
void
mlog_catenate_ull_compressed(
/*=========================*/
mtr_t* mtr, /*!< in: mtr */
ib_uint64_t val) /*!< in: value to write */
{
byte* log_ptr;
log_ptr = mlog_open(mtr, 15);
/* If no logging is requested, we may return now */
if (log_ptr == NULL) {
return;
}
log_ptr += mach_u64_write_compressed(log_ptr, val);
mlog_close(mtr, log_ptr);
}
/** Writes a log record about an operation.
@param[in] type redo log record type
@param[in] space_id tablespace identifier

View file

@ -425,7 +425,50 @@ struct mtr_t {
static inline bool is_block_dirtied(const buf_block_t* block)
MY_ATTRIBUTE((warn_unused_result));
/** Write request types */
enum write_type
{
/** the page is guaranteed to always change */
NORMAL= 0,
/** optional: the page contents might not change */
OPT,
/** force a write, even if the page contents is not changing */
FORCED
};
/** Write 1, 2, 4, or 8 bytes to a file page.
@param[in] block file page
@param[in,out] ptr pointer in file page
@param[in] val value to write
@tparam l number of bytes to write
@tparam w write request type
@tparam V type of val */
template<unsigned l,write_type w= NORMAL,typename V>
inline void write(const buf_block_t &block, byte *ptr, V val)
MY_ATTRIBUTE((nonnull));
private:
/**
Write a log record for writing 1, 2, or 4 bytes.
@param[in] block file page
@param[in,out] ptr pointer in file page
@param[in] l number of bytes to write
@param[in,out] log_ptr log record buffer
@param[in] val value to write */
void log_write(const buf_block_t &block, byte *ptr, mlog_id_t l,
byte *log_ptr, uint32_t val)
MY_ATTRIBUTE((nonnull));
/**
Write a log record for writing 8 bytes.
@param[in] block file page
@param[in,out] ptr pointer in file page
@param[in] l number of bytes to write (8)
@param[in,out] log_ptr log record buffer
@param[in] val value to write */
void log_write(const buf_block_t &block, byte *ptr, mlog_id_t l,
byte *log_ptr, uint64_t val)
MY_ATTRIBUTE((nonnull));
/** Prepare to write the mini-transaction log to the redo log buffer.
@return number of bytes to write in finish_write() */
inline ulint prepare_write();

View file

@ -52,7 +52,7 @@ enum mtr_log_t {
/** @name Log item types
The log items are declared 'byte' so that the compiler can warn if val
and type parameters are switched in a call to mlog_write_ulint. NOTE!
and type parameters are switched in a call to mlog_write. NOTE!
For 1 - 8 bytes, the flag value must give the length also! @{ */
enum mlog_id_t {
/** if the mtr contains only one log record for one page,

View file

@ -31,6 +31,7 @@ Created 2/2/1994 Heikki Tuuri
#include "fil0fil.h"
#include "buf0buf.h"
#include "rem0rec.h"
#include "mach0data.h"
#ifndef UNIV_INNOCHECKSUM
#include "dict0dict.h"
#include "data0data.h"
@ -42,8 +43,6 @@ Created 2/2/1994 Heikki Tuuri
Index page header starts at the first offset left free by the FIL-module */
typedef byte page_header_t;
#else
# include "mach0data.h"
#endif /* !UNIV_INNOCHECKSUM */
#define PAGE_HEADER FSEG_PAGE_DATA /* index page header starts at this
@ -393,13 +392,17 @@ inline
bool
page_rec_is_infimum(const rec_t* rec);
/*************************************************************//**
Returns the max trx id field value. */
UNIV_INLINE
trx_id_t
page_get_max_trx_id(
/*================*/
const page_t* page); /*!< in: page */
/** Read PAGE_MAX_TRX_ID.
@param[in] page index page
@return the value of PAGE_MAX_TRX_ID or PAGE_ROOT_AUTO_INC */
inline trx_id_t page_get_max_trx_id(const page_t *page)
{
static_assert((PAGE_HEADER + PAGE_MAX_TRX_ID) % 8 == 0, "alignment");
const byte *p= static_cast<const byte*>
(MY_ASSUME_ALIGNED(page + PAGE_HEADER + PAGE_MAX_TRX_ID, 8));
return mach_read_from_8(p);
}
/*************************************************************//**
Sets the max trx id field value. */
void
@ -424,7 +427,6 @@ page_update_max_trx_id(
/** Persist the AUTO_INCREMENT value on a clustered index root page.
@param[in,out] block clustered index root page
@param[in] index clustered index
@param[in] autoinc next available AUTO_INCREMENT value
@param[in,out] mtr mini-transaction
@param[in] reset whether to reset the AUTO_INCREMENT
@ -433,7 +435,6 @@ page_update_max_trx_id(
void
page_set_autoinc(
buf_block_t* block,
const dict_index_t* index MY_ATTRIBUTE((unused)),
ib_uint64_t autoinc,
mtr_t* mtr,
bool reset)
@ -517,17 +518,12 @@ page_header_set_ptr(
ulint field, /*!< in/out: PAGE_FREE, ... */
const byte* ptr); /*!< in: pointer or NULL*/
/*************************************************************//**
Resets the last insert info field in the page header. Writes to mlog
about this operation. */
UNIV_INLINE
void
page_header_reset_last_insert(
/*==========================*/
page_t* page, /*!< in: page */
page_zip_des_t* page_zip,/*!< in/out: compressed page whose
uncompressed part will be updated, or NULL */
mtr_t* mtr); /*!< in: mtr */
/**
Reset PAGE_LAST_INSERT.
@param[in,out] block file page
@param[in,out] mtr mini-transaction */
inline void page_header_reset_last_insert(buf_block_t *block, mtr_t *mtr)
MY_ATTRIBUTE((nonnull));
#define page_get_infimum_rec(page) ((page) + page_get_infimum_offset(page))
#define page_get_supremum_rec(page) ((page) + page_get_supremum_offset(page))
@ -663,14 +659,17 @@ ibool
page_rec_check(
/*===========*/
const rec_t* rec); /*!< in: record */
/***************************************************************//**
Gets the record pointed to by a directory slot.
/** Get the record pointed to by a directory slot.
@param[in] slot directory slot
@return pointer to record */
UNIV_INLINE
const rec_t*
page_dir_slot_get_rec(
/*==================*/
const page_dir_slot_t* slot); /*!< in: directory slot */
inline rec_t *page_dir_slot_get_rec(page_dir_slot_t *slot)
{
return page_align(slot) + mach_read_from_2(slot);
}
inline const rec_t *page_dir_slot_get_rec(const page_dir_slot_t *slot)
{
return page_dir_slot_get_rec(const_cast<rec_t*>(slot));
}
/***************************************************************//**
This is used to set the record offset in a directory slot. */
UNIV_INLINE

View file

@ -28,24 +28,10 @@ Created 2/2/1994 Heikki Tuuri
#define page0page_ic
#ifndef UNIV_INNOCHECKSUM
#include "mach0data.h"
#include "rem0cmp.h"
#include "mtr0log.h"
#include "page0zip.h"
/*************************************************************//**
Returns the max trx id field value. */
UNIV_INLINE
trx_id_t
page_get_max_trx_id(
/*================*/
const page_t* page) /*!< in: page */
{
ut_ad(page);
return(mach_read_from_8(page + PAGE_HEADER + PAGE_MAX_TRX_ID));
}
/*************************************************************//**
Sets the max trx id field value if trx_id is bigger than the previous
value. */
@ -115,21 +101,16 @@ page_set_ssn_id(
node_seq_t ssn_id, /*!< in: transaction id */
mtr_t* mtr) /*!< in/out: mini-transaction */
{
page_t* page = buf_block_get_frame(block);
ut_ad(!mtr || mtr_memo_contains_flagged(mtr, block,
MTR_MEMO_PAGE_SX_FIX
| MTR_MEMO_PAGE_X_FIX));
if (page_zip) {
mach_write_to_8(page + FIL_RTREE_SPLIT_SEQ_NUM, ssn_id);
page_zip_write_header(page_zip,
page + FIL_RTREE_SPLIT_SEQ_NUM,
8, mtr);
} else if (mtr) {
mlog_write_ull(page + FIL_RTREE_SPLIT_SEQ_NUM, ssn_id, mtr);
byte* ssn = block->frame + FIL_RTREE_SPLIT_SEQ_NUM;
if (UNIV_LIKELY_NULL(page_zip)) {
mach_write_to_8(ssn, ssn_id);
page_zip_write_header(page_zip, ssn, 8, mtr);
} else {
mach_write_to_8(page + FIL_RTREE_SPLIT_SEQ_NUM, ssn_id);
mtr->write<8,mtr_t::OPT>(*block, ssn, ssn_id);
}
}
@ -229,30 +210,21 @@ page_header_set_ptr(
page_header_set_field(page, page_zip, field, offs);
}
/*************************************************************//**
Resets the last insert info field in the page header. Writes to mlog
about this operation. */
UNIV_INLINE
void
page_header_reset_last_insert(
/*==========================*/
page_t* page, /*!< in/out: page */
page_zip_des_t* page_zip,/*!< in/out: compressed page whose
uncompressed part will be updated, or NULL */
mtr_t* mtr) /*!< in: mtr */
/**
Reset PAGE_LAST_INSERT.
@param[in,out] block file page
@param[in,out] mtr mini-transaction */
inline void page_header_reset_last_insert(buf_block_t *block, mtr_t *mtr)
{
ut_ad(page != NULL);
ut_ad(mtr != NULL);
byte *b= &block->frame[PAGE_HEADER + PAGE_LAST_INSERT];
if (page_zip) {
mach_write_to_2(page + (PAGE_HEADER + PAGE_LAST_INSERT), 0);
page_zip_write_header(page_zip,
page + (PAGE_HEADER + PAGE_LAST_INSERT),
2, mtr);
} else {
mlog_write_ulint(page + (PAGE_HEADER + PAGE_LAST_INSERT), 0,
MLOG_2BYTES, mtr);
}
if (UNIV_LIKELY_NULL(block->page.zip.data))
{
mach_write_to_2(b, 0);
page_zip_write_header(&block->page.zip, b, 2, mtr);
}
else
mtr->write<2,mtr_t::OPT>(*block, b, 0U);
}
/***************************************************************//**
@ -541,18 +513,6 @@ page_rec_check(
return(TRUE);
}
/***************************************************************//**
Gets the record pointed to by a directory slot.
@return pointer to record */
UNIV_INLINE
const rec_t*
page_dir_slot_get_rec(
/*==================*/
const page_dir_slot_t* slot) /*!< in: directory slot */
{
return(page_align(slot) + mach_read_from_2(slot));
}
/***************************************************************//**
This is used to set the record offset in a directory slot. */
UNIV_INLINE

View file

@ -184,15 +184,15 @@ public:
to purge */
trx_rseg_t* rseg; /*!< Rollback segment for the next undo
record to purge */
ulint page_no; /*!< Page number for the next undo
uint32_t page_no; /*!< Page number for the next undo
record to purge, page number of the
log header, if dummy record */
ulint offset; /*!< Page offset for the next undo
uint32_t hdr_page_no; /*!< Header page of the undo log where
the next record to purge belongs */
uint16_t offset; /*!< Page offset for the next undo
record to purge, 0 if the dummy
record */
ulint hdr_page_no; /*!< Header page of the undo log where
the next record to purge belongs */
ulint hdr_offset; /*!< Header byte offset on the page */
uint16_t hdr_offset; /*!< Header byte offset on the page */
TrxUndoRsegsIterator

View file

@ -36,7 +36,7 @@ Created 3/26/1996 Heikki Tuuri
@param[in,out] mtr mini-transaction
@return rollback segment header, page x-latched */
UNIV_INLINE
trx_rsegf_t*
buf_block_t*
trx_rsegf_get(fil_space_t* space, ulint page_no, mtr_t* mtr);
/** Gets a newly created rollback segment header.
@ -45,29 +45,12 @@ trx_rsegf_get(fil_space_t* space, ulint page_no, mtr_t* mtr);
@param[in,out] mtr mini-transaction
@return rollback segment header, page x-latched */
UNIV_INLINE
trx_rsegf_t*
buf_block_t*
trx_rsegf_get_new(
ulint space,
ulint page_no,
mtr_t* mtr);
/***************************************************************//**
Sets the file page number of the nth undo log slot. */
UNIV_INLINE
void
trx_rsegf_set_nth_undo(
/*===================*/
trx_rsegf_t* rsegf, /*!< in: rollback segment header */
ulint n, /*!< in: index of slot */
ulint page_no,/*!< in: page number of the undo log segment */
mtr_t* mtr); /*!< in: mtr */
/****************************************************************//**
Looks for a free slot for an undo log segment.
@return slot index or ULINT_UNDEFINED if not found */
UNIV_INLINE
ulint
trx_rsegf_undo_find_free(const trx_rsegf_t* rsegf);
/** Create a rollback segment header.
@param[in,out] space system, undo, or temporary tablespace
@param[in] rseg_id rollback segment identifier
@ -155,10 +138,10 @@ struct trx_rseg_t {
/** Page number of the last not yet purged log header in the history
list; FIL_NULL if all list purged */
ulint last_page_no;
uint32_t last_page_no;
/** Byte offset of the last not yet purged log header */
ulint last_offset;
uint16_t last_offset;
/** trx_t::no * 2 + old_insert of the last not yet purged log */
trx_id_t last_commit;
@ -255,15 +238,13 @@ If no binlog information is present, the first byte is NUL. */
/*-------------------------------------------------------------*/
/** Read the page number of an undo log slot.
@param[in] rsegf rollback segment header
@param[in] n slot number */
inline
uint32_t
trx_rsegf_get_nth_undo(const trx_rsegf_t* rsegf, ulint n)
@param[in] rseg_header rollback segment header
@param[in] n slot number */
inline uint32_t trx_rsegf_get_nth_undo(const buf_block_t *rseg_header, ulint n)
{
ut_ad(n < TRX_RSEG_N_SLOTS);
return mach_read_from_4(rsegf + TRX_RSEG_UNDO_SLOTS
+ n * TRX_RSEG_SLOT_SIZE);
ut_ad(n < TRX_RSEG_N_SLOTS);
return mach_read_from_4(TRX_RSEG + TRX_RSEG_UNDO_SLOTS +
n * TRX_RSEG_SLOT_SIZE + rseg_header->frame);
}
#ifdef WITH_WSREP
@ -273,7 +254,7 @@ trx_rsegf_get_nth_undo(const trx_rsegf_t* rsegf, ulint n)
@param[in,out] mtr mini-transaction */
void
trx_rseg_update_wsrep_checkpoint(
trx_rsegf_t* rseg_header,
buf_block_t* rseg_header,
const XID* xid,
mtr_t* mtr);
@ -295,7 +276,7 @@ bool trx_rseg_read_wsrep_checkpoint(XID& xid);
/** Upgrade a rollback segment header page to MariaDB 10.3 format.
@param[in,out] rseg_header rollback segment header page
@param[in,out] mtr mini-transaction */
void trx_rseg_format_upgrade(trx_rsegf_t* rseg_header, mtr_t* mtr);
void trx_rseg_format_upgrade(buf_block_t *rseg_header, mtr_t *mtr);
/** Update the offset information about the end of the binlog entry
which corresponds to the transaction just being committed.
@ -304,8 +285,8 @@ up to which replication has proceeded.
@param[in,out] rseg_header rollback segment header
@param[in] trx committing transaction
@param[in,out] mtr mini-transaction */
void
trx_rseg_update_binlog_offset(byte* rseg_header, const trx_t* trx, mtr_t* mtr);
void trx_rseg_update_binlog_offset(buf_block_t *rseg_header, const trx_t *trx,
mtr_t *mtr);
#include "trx0rseg.ic"

View file

@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2017, 2018, MariaDB Corporation.
Copyright (c) 2017, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@ -33,7 +33,7 @@ Created 3/26/1996 Heikki Tuuri
@param[in,out] mtr mini-transaction
@return rollback segment header, page x-latched */
UNIV_INLINE
trx_rsegf_t*
buf_block_t*
trx_rsegf_get(fil_space_t* space, ulint page_no, mtr_t* mtr)
{
ut_ad(space == fil_system.sys_space || space == fil_system.temp_space
@ -44,8 +44,7 @@ trx_rsegf_get(fil_space_t* space, ulint page_no, mtr_t* mtr)
0, RW_X_LATCH, mtr);
buf_block_dbg_add_level(block, SYNC_RSEG_HEADER);
return TRX_RSEG + block->frame;
return block;
}
/** Gets a newly created rollback segment header.
@ -54,14 +53,13 @@ trx_rsegf_get(fil_space_t* space, ulint page_no, mtr_t* mtr)
@param[in,out] mtr mini-transaction
@return rollback segment header, page x-latched */
UNIV_INLINE
trx_rsegf_t*
buf_block_t*
trx_rsegf_get_new(
ulint space,
ulint page_no,
mtr_t* mtr)
{
buf_block_t* block;
trx_rsegf_t* header;
ut_ad(space <= srv_undo_tablespaces_active || space == SRV_TMP_SPACE_ID
|| !srv_was_started);
@ -70,54 +68,5 @@ trx_rsegf_get_new(
block = buf_page_get(page_id_t(space, page_no), 0, RW_X_LATCH, mtr);
buf_block_dbg_add_level(block, SYNC_RSEG_HEADER_NEW);
header = TRX_RSEG + buf_block_get_frame(block);
return(header);
}
/***************************************************************//**
Sets the file page number of the nth undo log slot. */
UNIV_INLINE
void
trx_rsegf_set_nth_undo(
/*===================*/
trx_rsegf_t* rsegf, /*!< in: rollback segment header */
ulint n, /*!< in: index of slot */
ulint page_no,/*!< in: page number of the undo log segment */
mtr_t* mtr) /*!< in: mtr */
{
ut_a(n < TRX_RSEG_N_SLOTS);
mlog_write_ulint(rsegf + TRX_RSEG_UNDO_SLOTS + n * TRX_RSEG_SLOT_SIZE,
page_no, MLOG_4BYTES, mtr);
}
/****************************************************************//**
Looks for a free slot for an undo log segment.
@return slot index or ULINT_UNDEFINED if not found */
UNIV_INLINE
ulint
trx_rsegf_undo_find_free(const trx_rsegf_t* rsegf)
{
ulint i;
ulint page_no;
ulint max_slots = TRX_RSEG_N_SLOTS;
#ifdef UNIV_DEBUG
if (trx_rseg_n_slots_debug) {
max_slots = ut_min(static_cast<ulint>(trx_rseg_n_slots_debug),
static_cast<ulint>(TRX_RSEG_N_SLOTS));
}
#endif
for (i = 0; i < max_slots; i++) {
page_no = trx_rsegf_get_nth_undo(rsegf, i);
if (page_no == FIL_NULL) {
return(i);
}
}
return(ULINT_UNDEFINED);
return block;
}

View file

@ -121,8 +121,6 @@ struct trx_savept_t{
/** File objects */
/* @{ */
/** Rollback segment header */
typedef byte trx_rsegf_t;
/** Undo segment header */
typedef byte trx_usegf_t;
/** Undo log header */

View file

@ -46,10 +46,10 @@ UNIV_INLINE
roll_ptr_t
trx_undo_build_roll_ptr(
/*====================*/
ibool is_insert, /*!< in: TRUE if insert undo log */
bool is_insert, /*!< in: TRUE if insert undo log */
ulint rseg_id, /*!< in: rollback segment id */
ulint page_no, /*!< in: page number */
ulint offset); /*!< in: offset of the undo entry within page */
uint32_t page_no, /*!< in: page number */
uint16_t offset); /*!< in: offset of the undo entry within page */
/***********************************************************************//**
Decodes a roll pointer. */
UNIV_INLINE
@ -57,16 +57,16 @@ void
trx_undo_decode_roll_ptr(
/*=====================*/
roll_ptr_t roll_ptr, /*!< in: roll pointer */
ibool* is_insert, /*!< out: TRUE if insert undo log */
bool* is_insert, /*!< out: TRUE if insert undo log */
ulint* rseg_id, /*!< out: rollback segment id */
ulint* page_no, /*!< out: page number */
ulint* offset); /*!< out: offset of the undo
uint32_t* page_no, /*!< out: page number */
uint16_t* offset); /*!< out: offset of the undo
entry within page */
/***********************************************************************//**
Returns TRUE if the roll pointer is of the insert type.
@return TRUE if insert undo log */
Determine if DB_ROLL_PTR is of the insert type.
@return true if insert */
UNIV_INLINE
ibool
bool
trx_undo_roll_ptr_is_insert(
/*========================*/
roll_ptr_t roll_ptr); /*!< in: roll pointer */
@ -101,7 +101,7 @@ inline roll_ptr_t trx_read_roll_ptr(const byte* ptr)
@param[in,out] mtr mini-transaction
@return pointer to page x-latched */
UNIV_INLINE
page_t*
buf_block_t*
trx_undo_page_get(const page_id_t page_id, mtr_t* mtr);
/** Gets an undo log page and s-latches it.
@ -109,56 +109,52 @@ trx_undo_page_get(const page_id_t page_id, mtr_t* mtr);
@param[in,out] mtr mini-transaction
@return pointer to page s-latched */
UNIV_INLINE
page_t*
buf_block_t*
trx_undo_page_get_s_latched(const page_id_t page_id, mtr_t* mtr);
/******************************************************************//**
Returns the next undo log record on the page in the specified log, or
NULL if none exists.
@return pointer to record, NULL if none */
UNIV_INLINE
trx_undo_rec_t*
trx_undo_page_get_next_rec(
/*=======================*/
trx_undo_rec_t* rec, /*!< in: undo log record */
ulint page_no,/*!< in: undo log header page number */
ulint offset);/*!< in: undo log header offset on page */
/***********************************************************************//**
Gets the previous record in an undo log.
@return undo log record, the page s-latched, NULL if none */
trx_undo_rec_t*
trx_undo_get_prev_rec(
/*==================*/
trx_undo_rec_t* rec, /*!< in: undo record */
ulint page_no,/*!< in: undo log header page number */
ulint offset, /*!< in: undo log header offset on page */
bool shared, /*!< in: true=S-latch, false=X-latch */
mtr_t* mtr); /*!< in: mtr */
/***********************************************************************//**
Gets the next record in an undo log.
@return undo log record, the page s-latched, NULL if none */
trx_undo_rec_t*
trx_undo_get_next_rec(
/*==================*/
trx_undo_rec_t* rec, /*!< in: undo record */
ulint page_no,/*!< in: undo log header page number */
ulint offset, /*!< in: undo log header offset on page */
mtr_t* mtr); /*!< in: mtr */
/** Gets the first record in an undo log.
@param[in] space undo log header space
@param[in] page_no undo log header page number
@param[in] offset undo log header offset on page
@param[in] mode latching mode: RW_S_LATCH or RW_X_LATCH
@param[in,out] mtr mini-transaction
/** Get the next record in an undo log.
@param[in] undo_page undo log page
@param[in] rec undo record offset in the page
@param[in] page_no undo log header page number
@param[in] offset undo log header offset on page
@return undo log record, the page latched, NULL if none */
inline trx_undo_rec_t*
trx_undo_page_get_next_rec(const buf_block_t *undo_page, uint16_t rec,
uint32_t page_no, uint16_t offset);
/** Get the previous record in an undo log.
@param[in,out] block undo log page
@param[in] rec undo record offset in the page
@param[in] page_no undo log header page number
@param[in] offset undo log header offset on page
@param[in] shared latching mode: true=RW_S_LATCH, false=RW_X_LATCH
@param[in,out] mtr mini-transaction
@return undo log record, the page latched, NULL if none */
trx_undo_rec_t*
trx_undo_get_first_rec(
fil_space_t* space,
ulint page_no,
ulint offset,
ulint mode,
mtr_t* mtr);
trx_undo_get_prev_rec(buf_block_t *&block, uint16_t rec, uint32_t page_no,
uint16_t offset, bool shared, mtr_t *mtr);
/** Get the next record in an undo log.
@param[in,out] block undo log page
@param[in] rec undo record offset in the page
@param[in] page_no undo log header page number
@param[in] offset undo log header offset on page
@param[in,out] mtr mini-transaction
@return undo log record, the page latched, NULL if none */
trx_undo_rec_t*
trx_undo_get_next_rec(buf_block_t *&block, uint16_t rec, uint32_t page_no,
uint16_t offset, mtr_t *mtr);
/** Get the first record in an undo log.
@param[in] space undo log header space
@param[in] page_no undo log header page number
@param[in] offset undo log header offset on page
@param[in] mode latching mode: RW_S_LATCH or RW_X_LATCH
@param[out] block undo log page
@param[in,out] mtr mini-transaction
@return undo log record, the page latched, NULL if none */
trx_undo_rec_t*
trx_undo_get_first_rec(const fil_space_t &space, uint32_t page_no,
uint16_t offset, ulint mode, buf_block_t*& block,
mtr_t *mtr);
/** Allocate an undo log page.
@param[in,out] undo undo log
@ -193,8 +189,8 @@ freed, but emptied, if all the records there are below the limit.
void
trx_undo_truncate_start(
trx_rseg_t* rseg,
ulint hdr_page_no,
ulint hdr_offset,
uint32_t hdr_page_no,
uint16_t hdr_offset,
undo_no_t limit);
/** Mark that an undo log header belongs to a data dictionary transaction.
@param[in] trx dictionary transaction
@ -227,7 +223,7 @@ trx_undo_assign_low(trx_t* trx, trx_rseg_t* rseg, trx_undo_t** undo,
/******************************************************************//**
Sets the state of the undo log segment at a transaction finish.
@return undo log segment header page, x-latched */
page_t*
buf_block_t*
trx_undo_set_state_at_finish(
/*=========================*/
trx_undo_t* undo, /*!< in: undo log memory copy */
@ -237,14 +233,10 @@ trx_undo_set_state_at_finish(
@param[in,out] trx transaction
@param[in,out] undo undo log
@param[in] rollback false=XA PREPARE, true=XA ROLLBACK
@param[in,out] mtr mini-transaction
@return undo log segment header page, x-latched */
page_t*
trx_undo_set_state_at_prepare(
trx_t* trx,
trx_undo_t* undo,
bool rollback,
mtr_t* mtr);
@param[in,out] mtr mini-transaction */
void trx_undo_set_state_at_prepare(trx_t *trx, trx_undo_t *undo, bool rollback,
mtr_t *mtr)
MY_ATTRIBUTE((nonnull));
/** Free an old insert or temporary undo log after commit or rollback.
The information is not needed after a commit or rollback, therefore
@ -281,14 +273,14 @@ trx_undo_parse_page_header_reuse(
/** Parse the redo log entry of an undo log page header create.
@param[in] ptr redo log record
@param[in] end_ptr end of log buffer
@param[in,out] page page frame or NULL
@param[in,out] block page frame or NULL
@param[in,out] mtr mini-transaction or NULL
@return end of log record or NULL */
byte*
trx_undo_parse_page_header(
const byte* ptr,
const byte* end_ptr,
page_t* page,
buf_block_t* block,
mtr_t* mtr);
/** Read an undo log when starting up the database.
@param[in,out] rseg rollback segment
@ -296,9 +288,9 @@ trx_undo_parse_page_header(
@param[in] page_no undo log segment page number
@param[in,out] max_trx_id the largest observed transaction ID
@return size of the undo log in pages */
ulint
trx_undo_mem_create_at_db_start(trx_rseg_t* rseg, ulint id, ulint page_no,
trx_id_t& max_trx_id);
uint32_t
trx_undo_mem_create_at_db_start(trx_rseg_t *rseg, ulint id, uint32_t page_no,
trx_id_t &max_trx_id);
#endif /* !UNIV_INNOCHECKSUM */
@ -340,20 +332,20 @@ struct trx_undo_t {
id */
trx_rseg_t* rseg; /*!< rseg where the undo log belongs */
/*-----------------------------*/
ulint hdr_page_no; /*!< page number of the header page in
uint32_t hdr_page_no; /*!< page number of the header page in
the undo log */
ulint hdr_offset; /*!< header offset of the undo log on
the page */
ulint last_page_no; /*!< page number of the last page in the
uint32_t last_page_no; /*!< page number of the last page in the
undo log; this may differ from
top_page_no during a rollback */
ulint size; /*!< current size in pages */
uint16_t hdr_offset; /*!< header offset of the undo log on
the page */
uint32_t size; /*!< current size in pages */
/*-----------------------------*/
ulint top_page_no; /*!< page number where the latest undo
uint32_t top_page_no; /*!< page number where the latest undo
log record was catenated; during
rollback the page from which the latest
undo record was chosen */
ulint top_offset; /*!< offset of the latest undo record,
uint16_t top_offset; /*!< offset of the latest undo record,
i.e., the topmost element in the undo
log if we think of it as a stack */
undo_no_t top_undo_no; /*!< undo number of the latest record

View file

@ -34,22 +34,17 @@ UNIV_INLINE
roll_ptr_t
trx_undo_build_roll_ptr(
/*====================*/
ibool is_insert, /*!< in: TRUE if insert undo log */
bool is_insert, /*!< in: TRUE if insert undo log */
ulint rseg_id, /*!< in: rollback segment id */
ulint page_no, /*!< in: page number */
ulint offset) /*!< in: offset of the undo entry within page */
uint32_t page_no, /*!< in: page number */
uint16_t offset) /*!< in: offset of the undo entry within page */
{
roll_ptr_t roll_ptr;
compile_time_assert(DATA_ROLL_PTR_LEN == 7);
ut_ad(is_insert == 0 || is_insert == 1);
ut_ad(rseg_id < TRX_SYS_N_RSEGS);
ut_ad(offset < 65536);
compile_time_assert(DATA_ROLL_PTR_LEN == 7);
ut_ad(rseg_id < TRX_SYS_N_RSEGS);
roll_ptr = (roll_ptr_t) is_insert << ROLL_PTR_INSERT_FLAG_POS
| (roll_ptr_t) rseg_id << ROLL_PTR_RSEG_ID_POS
| (roll_ptr_t) page_no << ROLL_PTR_PAGE_POS
| offset;
return(roll_ptr);
return roll_ptr_t{is_insert} << ROLL_PTR_INSERT_FLAG_POS |
roll_ptr_t{rseg_id} << ROLL_PTR_RSEG_ID_POS |
roll_ptr_t{page_no} << ROLL_PTR_PAGE_POS | offset;
}
/***********************************************************************//**
@ -59,35 +54,32 @@ void
trx_undo_decode_roll_ptr(
/*=====================*/
roll_ptr_t roll_ptr, /*!< in: roll pointer */
ibool* is_insert, /*!< out: TRUE if insert undo log */
bool* is_insert, /*!< out: TRUE if insert undo log */
ulint* rseg_id, /*!< out: rollback segment id */
ulint* page_no, /*!< out: page number */
ulint* offset) /*!< out: offset of the undo
uint32_t* page_no, /*!< out: page number */
uint16_t* offset) /*!< out: offset of the undo
entry within page */
{
compile_time_assert(DATA_ROLL_PTR_LEN == 7);
ut_ad(roll_ptr < (1ULL << 56));
*offset = (ulint) roll_ptr & 0xFFFF;
roll_ptr >>= 16;
*page_no = (ulint) roll_ptr & 0xFFFFFFFF;
roll_ptr >>= 32;
*rseg_id = (ulint) roll_ptr & 0x7F;
roll_ptr >>= 7;
*is_insert = (ibool) roll_ptr; /* TRUE==1 */
compile_time_assert(DATA_ROLL_PTR_LEN == 7);
ut_ad(roll_ptr < (1ULL << 56));
*offset= static_cast<uint16_t>(roll_ptr);
*page_no= static_cast<uint32_t>(roll_ptr >> 16);
*rseg_id= static_cast<ulint>(roll_ptr >> 48 & 0x7F);
*is_insert= static_cast<bool>(roll_ptr >> 55);
}
/***********************************************************************//**
Returns TRUE if the roll pointer is of the insert type.
@return TRUE if insert undo log */
Determine if DB_ROLL_PTR is of the insert type.
@return true if insert */
UNIV_INLINE
ibool
bool
trx_undo_roll_ptr_is_insert(
/*========================*/
roll_ptr_t roll_ptr) /*!< in: roll pointer */
{
compile_time_assert(DATA_ROLL_PTR_LEN == 7);
ut_ad(roll_ptr < (1ULL << (ROLL_PTR_INSERT_FLAG_POS + 1)));
return((ibool) (roll_ptr >> ROLL_PTR_INSERT_FLAG_POS));
return static_cast<bool>(roll_ptr >> ROLL_PTR_INSERT_FLAG_POS);
}
/***********************************************************************//**
@ -108,14 +100,13 @@ trx_undo_trx_id_is_insert(
@param[in,out] mtr mini-transaction
@return pointer to page x-latched */
UNIV_INLINE
page_t*
buf_block_t*
trx_undo_page_get(const page_id_t page_id, mtr_t* mtr)
{
buf_block_t* block = buf_page_get(page_id, 0, RW_X_LATCH, mtr);
buf_block_dbg_add_level(block, SYNC_TRX_UNDO_PAGE);
return(buf_block_get_frame(block));
return block;
}
/** Gets an undo log page and s-latches it.
@ -123,14 +114,14 @@ trx_undo_page_get(const page_id_t page_id, mtr_t* mtr)
@param[in,out] mtr mini-transaction
@return pointer to page s-latched */
UNIV_INLINE
page_t*
buf_block_t*
trx_undo_page_get_s_latched(const page_id_t page_id, mtr_t* mtr)
{
buf_block_t* block = buf_page_get(page_id, 0, RW_S_LATCH, mtr);
buf_block_dbg_add_level(block, SYNC_TRX_UNDO_PAGE);
return(buf_block_get_frame(block));
return block;
}
/** Determine the end offset of undo log records of an undo log page.
@ -139,46 +130,29 @@ trx_undo_page_get_s_latched(const page_id_t page_id, mtr_t* mtr)
@param[in] offset undo log header offset
@return end offset */
inline
uint16_t
trx_undo_page_get_end(const page_t* undo_page, ulint page_no, ulint offset)
uint16_t trx_undo_page_get_end(const buf_block_t *undo_page, uint32_t page_no,
uint16_t offset)
{
if (page_no == page_get_page_no(undo_page)) {
if (uint16_t end = mach_read_from_2(TRX_UNDO_NEXT_LOG
+ offset + undo_page)) {
return end;
}
}
if (page_no == undo_page->page.id.page_no())
if (uint16_t end = mach_read_from_2(TRX_UNDO_NEXT_LOG + offset +
undo_page->frame))
return end;
return mach_read_from_2(TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_FREE
+ undo_page);
return mach_read_from_2(TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_FREE +
undo_page->frame);
}
/******************************************************************//**
Returns the next undo log record on the page in the specified log, or
NULL if none exists.
@return pointer to record, NULL if none */
UNIV_INLINE
trx_undo_rec_t*
trx_undo_page_get_next_rec(
/*=======================*/
trx_undo_rec_t* rec, /*!< in: undo log record */
ulint page_no,/*!< in: undo log header page number */
ulint offset) /*!< in: undo log header offset on page */
/** Get the next record in an undo log.
@param[in] undo_page undo log page
@param[in] rec undo record offset in the page
@param[in] page_no undo log header page number
@param[in] offset undo log header offset on page
@return undo log record, the page latched, NULL if none */
inline trx_undo_rec_t*
trx_undo_page_get_next_rec(const buf_block_t *undo_page, uint16_t rec,
uint32_t page_no, uint16_t offset)
{
page_t* undo_page;
ulint end;
ulint next;
undo_page = (page_t*) ut_align_down(rec, srv_page_size);
end = trx_undo_page_get_end(undo_page, page_no, offset);
next = mach_read_from_2(rec);
if (next == end) {
return(NULL);
}
return(undo_page + next);
uint16_t end= trx_undo_page_get_end(undo_page, page_no, offset);
uint16_t next= mach_read_from_2(undo_page->frame + rec);
return next == end ? nullptr : undo_page->frame + next;
}

View file

@ -62,7 +62,7 @@ General philosophy of InnoDB redo-logs:
through mtr, which in mtr_commit() writes log records
to the InnoDB redo log.
2) Normally these changes are performed using a mlog_write_ulint()
2) Normally these changes are performed using a mlog_write()
or similar function.
3) In some page level operations only a code number of a

View file

@ -1619,7 +1619,7 @@ parse_log:
break;
case MLOG_UNDO_HDR_CREATE:
ut_ad(!page || page_type == FIL_PAGE_UNDO_LOG);
ptr = trx_undo_parse_page_header(ptr, end_ptr, page, mtr);
ptr = trx_undo_parse_page_header(ptr, end_ptr, block, mtr);
break;
case MLOG_REC_MIN_MARK: case MLOG_COMP_REC_MIN_MARK:
ut_ad(!page || fil_page_type_is_index(page_type));

View file

@ -90,7 +90,7 @@ mlog_parse_initial_log_record(
}
/********************************************************//**
Parses a log record written by mlog_write_ulint, mlog_write_ull, mlog_memset.
Parses a log record written by mtr_t::write(), mlog_memset().
@return parsed record end, NULL if not a complete record or a corrupt record */
const byte*
mlog_parse_nbytes(
@ -213,80 +213,58 @@ mlog_parse_nbytes(
return const_cast<byte*>(ptr);
}
/********************************************************//**
Writes 1, 2 or 4 bytes to a file page. Writes the corresponding log
record to the mini-transaction log if mtr is not NULL. */
void
mlog_write_ulint(
/*=============*/
byte* ptr, /*!< in: pointer where to write */
ulint val, /*!< in: value to write */
mlog_id_t type, /*!< in: MLOG_1BYTE, MLOG_2BYTES, MLOG_4BYTES */
mtr_t* mtr) /*!< in: mini-transaction handle */
/**
Write a log record for writing 1, 2, 4, or 8 bytes.
@param[in] block file page
@param[in,out] ptr pointer in file page
@param[in] l number of bytes to write
@param[in,out] log_ptr log record buffer
@param[in,out] mtr mini-transaction */
static byte *
mlog_log_write_low(const buf_block_t &block, byte *ptr, mlog_id_t l,
byte *log_ptr, mtr_t &mtr)
{
switch (type) {
case MLOG_1BYTE:
mach_write_to_1(ptr, val);
break;
case MLOG_2BYTES:
mach_write_to_2(ptr, val);
break;
case MLOG_4BYTES:
mach_write_to_4(ptr, val);
break;
default:
ut_error;
}
if (mtr != 0) {
byte* log_ptr = mlog_open(mtr, 11 + 2 + 5);
/* If no logging is requested, we may return now */
if (log_ptr != 0) {
log_ptr = mlog_write_initial_log_record_fast(
ptr, type, log_ptr, mtr);
mach_write_to_2(log_ptr, page_offset(ptr));
log_ptr += 2;
log_ptr += mach_write_compressed(log_ptr, val);
mlog_close(mtr, log_ptr);
}
}
ut_ad(block.page.state == BUF_BLOCK_FILE_PAGE);
ut_ad(ptr >= block.frame + FIL_PAGE_OFFSET);
ut_ad(ptr + unsigned(l) <= &block.frame[srv_page_size - FIL_PAGE_DATA_END]);
log_ptr= mlog_write_initial_log_record_low(l,
block.page.id.space(),
block.page.id.page_no(),
log_ptr, &mtr);
mach_write_to_2(log_ptr, page_offset(ptr));
return log_ptr + 2;
}
/********************************************************//**
Writes 8 bytes to a file page. Writes the corresponding log
record to the mini-transaction log, only if mtr is not NULL */
void
mlog_write_ull(
/*===========*/
byte* ptr, /*!< in: pointer where to write */
ib_uint64_t val, /*!< in: value to write */
mtr_t* mtr) /*!< in: mini-transaction handle */
/**
Write a log record for writing 1, 2, or 4 bytes.
@param[in] block file page
@param[in,out] ptr pointer in file page
@param[in] l number of bytes to write
@param[in,out] log_ptr log record buffer
@param[in] val value to write */
void mtr_t::log_write(const buf_block_t &block, byte *ptr, mlog_id_t l,
byte *log_ptr, uint32_t val)
{
mach_write_to_8(ptr, val);
ut_ad(l == MLOG_1BYTE || l == MLOG_2BYTES || l == MLOG_4BYTES);
log_ptr= mlog_log_write_low(block, ptr, l, log_ptr, *this);
log_ptr+= mach_write_compressed(log_ptr, val);
mlog_close(this, log_ptr);
}
if (mtr != 0) {
byte* log_ptr = mlog_open(mtr, 11 + 2 + 9);
/* If no logging is requested, we may return now */
if (log_ptr != 0) {
log_ptr = mlog_write_initial_log_record_fast(
ptr, MLOG_8BYTES, log_ptr, mtr);
mach_write_to_2(log_ptr, page_offset(ptr));
log_ptr += 2;
log_ptr += mach_u64_write_compressed(log_ptr, val);
mlog_close(mtr, log_ptr);
}
}
/**
Write a log record for writing 8 bytes.
@param[in] block file page
@param[in,out] ptr pointer in file page
@param[in] l number of bytes to write
@param[in,out] log_ptr log record buffer
@param[in] val value to write */
void mtr_t::log_write(const buf_block_t &block, byte *ptr, mlog_id_t l,
byte *log_ptr, uint64_t val)
{
ut_ad(l == MLOG_8BYTES);
log_ptr= mlog_log_write_low(block, ptr, l, log_ptr, *this);
log_ptr+= mach_u64_write_compressed(log_ptr, val);
mlog_close(this, log_ptr);
}
/********************************************************//**
@ -402,7 +380,7 @@ mlog_parse_string(
@param[in] val the data byte to write
@param[in,out] mtr mini-transaction */
void
mlog_memset(buf_block_t* b, ulint ofs, ulint len, byte val, mtr_t* mtr)
mlog_memset(const buf_block_t* b, ulint ofs, ulint len, byte val, mtr_t* mtr)
{
ut_ad(len);
ut_ad(ofs <= ulint(srv_page_size));

View file

@ -196,24 +196,19 @@ page_set_max_trx_id(
trx_id_t trx_id, /*!< in: transaction id */
mtr_t* mtr) /*!< in/out: mini-transaction, or NULL */
{
page_t* page = buf_block_get_frame(block);
ut_ad(!mtr || mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
ut_ad(!mtr || mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
static_assert((PAGE_HEADER + PAGE_MAX_TRX_ID) % 8 == 0, "alignment");
byte *max_trx_id= static_cast<byte*>(MY_ASSUME_ALIGNED(PAGE_MAX_TRX_ID
+ PAGE_HEADER
+ block->frame, 8));
/* It is not necessary to write this change to the redo log, as
during a database recovery we assume that the max trx id of every
page is the maximum trx id assigned before the crash. */
if (page_zip) {
mach_write_to_8(page + (PAGE_HEADER + PAGE_MAX_TRX_ID), trx_id);
page_zip_write_header(page_zip,
page + (PAGE_HEADER + PAGE_MAX_TRX_ID),
8, mtr);
} else if (mtr) {
mlog_write_ull(page + (PAGE_HEADER + PAGE_MAX_TRX_ID),
trx_id, mtr);
} else {
mach_write_to_8(page + (PAGE_HEADER + PAGE_MAX_TRX_ID), trx_id);
}
if (UNIV_LIKELY_NULL(page_zip))
{
mach_write_to_8(max_trx_id, trx_id);
page_zip_write_header(page_zip, max_trx_id, 8, mtr);
}
else
mtr->write<8>(*block, max_trx_id, trx_id);
}
/** Persist the AUTO_INCREMENT value on a clustered index root page.
@ -227,27 +222,23 @@ page_set_max_trx_id(
void
page_set_autoinc(
buf_block_t* block,
const dict_index_t* index MY_ATTRIBUTE((unused)),
ib_uint64_t autoinc,
mtr_t* mtr,
bool reset)
{
ut_ad(mtr_memo_contains_flagged(
mtr, block, MTR_MEMO_PAGE_X_FIX | MTR_MEMO_PAGE_SX_FIX));
ut_ad(index->is_primary());
ut_ad(index->page == block->page.id.page_no());
ut_ad(index->table->space_id == block->page.id.space());
ut_ad(mtr->memo_contains_flagged(block, MTR_MEMO_PAGE_X_FIX |
MTR_MEMO_PAGE_SX_FIX));
byte* field = PAGE_HEADER + PAGE_ROOT_AUTO_INC
+ buf_block_get_frame(block);
if (!reset && mach_read_from_8(field) >= autoinc) {
/* nothing to update */
} else if (page_zip_des_t* page_zip = buf_block_get_page_zip(block)) {
mach_write_to_8(field, autoinc);
page_zip_write_header(page_zip, field, 8, mtr);
} else {
mlog_write_ull(field, autoinc, mtr);
}
byte *field= PAGE_HEADER + PAGE_ROOT_AUTO_INC + block->frame;
if (!reset && mach_read_from_8(field) >= autoinc)
/* nothing to update */;
else if (page_zip_des_t* page_zip = buf_block_get_page_zip(block))
{
mach_write_to_8(field, autoinc);
page_zip_write_header(page_zip, field, 8, mtr);
}
else
mtr->write<8,mtr_t::OPT>(*block, field, autoinc);
}
/** The page infimum and supremum of an empty page in ROW_FORMAT=REDUNDANT */
@ -483,12 +474,12 @@ page_create_empty(
page_header_get_field(page, PAGE_LEVEL),
max_trx_id, mtr);
} else {
page_create(block, mtr, page_is_comp(page),
dict_index_is_spatial(index));
page_create(block, mtr, index->table->not_redundant(),
index->is_spatial());
if (max_trx_id) {
mlog_write_ull(PAGE_HEADER + PAGE_MAX_TRX_ID + page,
max_trx_id, mtr);
mtr->write<8>(*block, PAGE_HEADER + PAGE_MAX_TRX_ID
+ block->frame, max_trx_id);
}
}
}
@ -581,12 +572,13 @@ page_copy_rec_list_end(
{
page_t* new_page = buf_block_get_frame(new_block);
page_zip_des_t* new_page_zip = buf_block_get_page_zip(new_block);
page_t* page = page_align(rec);
page_t* page = block->frame;
rec_t* ret = page_rec_get_next(
page_get_infimum_rec(new_page));
ulint num_moved = 0;
rtr_rec_move_t* rec_move = NULL;
mem_heap_t* heap = NULL;
ut_ad(page_align(rec) == page);
#ifdef UNIV_ZIP_DEBUG
if (new_page_zip) {
@ -810,8 +802,9 @@ page_copy_rec_list_start(
for MVCC. */
if (is_leaf && dict_index_is_sec_or_ibuf(index)
&& !index->table->is_temporary()) {
page_update_max_trx_id(new_block, NULL,
page_get_max_trx_id(page_align(rec)),
page_update_max_trx_id(new_block,
new_page_zip,
page_get_max_trx_id(block->frame),
mtr);
}
@ -979,7 +972,6 @@ page_delete_rec_list_end(
rec_t* prev_rec;
ulint n_owned;
page_zip_des_t* page_zip = buf_block_get_page_zip(block);
page_t* page = page_align(rec);
mem_heap_t* heap = NULL;
ulint offsets_[REC_OFFS_NORMAL_SIZE];
ulint* offsets = offsets_;
@ -987,8 +979,9 @@ page_delete_rec_list_end(
ut_ad(size == ULINT_UNDEFINED || size < srv_page_size);
ut_ad(!page_zip || page_rec_is_comp(rec));
ut_ad(page_align(rec) == block->frame);
#ifdef UNIV_ZIP_DEBUG
ut_a(!page_zip || page_zip_validate(page_zip, page, index));
ut_a(!page_zip || page_zip_validate(page_zip, block->frame, index));
#endif /* UNIV_ZIP_DEBUG */
if (page_rec_is_supremum(rec)) {
@ -1006,19 +999,21 @@ page_delete_rec_list_end(
only be executed when applying redo log that was
generated by an older version of MySQL. */
} else if (page_rec_is_infimum(rec)
|| n_recs == page_get_n_recs(page)) {
|| n_recs == page_get_n_recs(block->frame)) {
delete_all:
/* We are deleting all records. */
page_create_empty(block, index, mtr);
return;
} else if (page_is_comp(page)) {
if (page_rec_get_next_low(page + PAGE_NEW_INFIMUM, 1) == rec) {
} else if (page_is_comp(block->frame)) {
if (page_rec_get_next_low(block->frame + PAGE_NEW_INFIMUM, 1)
== rec) {
/* We are deleting everything from the first
user record onwards. */
goto delete_all;
}
} else {
if (page_rec_get_next_low(page + PAGE_OLD_INFIMUM, 0) == rec) {
if (page_rec_get_next_low(block->frame + PAGE_OLD_INFIMUM, 0)
== rec) {
/* We are deleting everything from the first
user record onwards. */
goto delete_all;
@ -1028,23 +1023,23 @@ delete_all:
/* Reset the last insert info in the page header and increment
the modify clock for the frame */
page_header_set_ptr(page, page_zip, PAGE_LAST_INSERT, NULL);
page_header_set_ptr(block->frame, page_zip, PAGE_LAST_INSERT, NULL);
/* The page gets invalid for optimistic searches: increment the
frame modify clock */
buf_block_modify_clock_inc(block);
page_delete_rec_list_write_log(rec, index, page_is_comp(page)
page_delete_rec_list_write_log(rec, index, page_is_comp(block->frame)
? MLOG_COMP_LIST_END_DELETE
: MLOG_LIST_END_DELETE, mtr);
const bool is_leaf = page_is_leaf(page);
const bool is_leaf = page_is_leaf(block->frame);
if (page_zip) {
mtr_log_t log_mode;
ut_a(page_is_comp(page));
ut_ad(page_is_comp(block->frame));
/* Individual deletes are not logged */
log_mode = mtr_set_log_mode(mtr, MTR_LOG_NONE);
@ -1057,7 +1052,7 @@ delete_all:
ULINT_UNDEFINED, &heap);
rec = rec_get_next_ptr(rec, TRUE);
#ifdef UNIV_ZIP_DEBUG
ut_a(page_zip_validate(page_zip, page, index));
ut_a(page_zip_validate(page_zip, block->frame, index));
#endif /* UNIV_ZIP_DEBUG */
page_cur_delete_rec(&cur, index, offsets, mtr);
} while (page_offset(rec) != PAGE_NEW_SUPREMUM);
@ -1074,7 +1069,7 @@ delete_all:
prev_rec = page_rec_get_prev(rec);
last_rec = page_rec_get_prev(page_get_supremum_rec(page));
last_rec = page_rec_get_prev(page_get_supremum_rec(block->frame));
bool scrub = srv_immediate_scrub_data_uncompressed;
if ((size == ULINT_UNDEFINED) || (n_recs == ULINT_UNDEFINED) ||
@ -1090,7 +1085,7 @@ delete_all:
is_leaf,
ULINT_UNDEFINED, &heap);
s = rec_offs_size(offsets);
ut_ad(ulint(rec2 - page) + s
ut_ad(ulint(rec2 - block->frame) + s
- rec_offs_extra_size(offsets)
< srv_page_size);
ut_ad(size + s < srv_page_size);
@ -1116,7 +1111,7 @@ delete_all:
of the records owned by the supremum record, as it is allowed to be
less than PAGE_DIR_SLOT_MIN_N_OWNED */
if (page_is_comp(page)) {
if (page_is_comp(block->frame)) {
rec_t* rec2 = rec;
ulint count = 0;
@ -1131,7 +1126,7 @@ delete_all:
n_owned = rec_get_n_owned_new(rec2) - count;
slot_index = page_dir_find_owner_slot(rec2);
ut_ad(slot_index > 0);
slot = page_dir_get_nth_slot(page, slot_index);
slot = page_dir_get_nth_slot(block->frame, slot_index);
} else {
rec_t* rec2 = rec;
ulint count = 0;
@ -1147,28 +1142,30 @@ delete_all:
n_owned = rec_get_n_owned_old(rec2) - count;
slot_index = page_dir_find_owner_slot(rec2);
ut_ad(slot_index > 0);
slot = page_dir_get_nth_slot(page, slot_index);
slot = page_dir_get_nth_slot(block->frame, slot_index);
}
page_dir_slot_set_rec(slot, page_get_supremum_rec(page));
page_dir_slot_set_rec(slot, page_get_supremum_rec(block->frame));
page_dir_slot_set_n_owned(slot, NULL, n_owned);
page_dir_set_n_slots(page, NULL, slot_index + 1);
page_dir_set_n_slots(block->frame, NULL, slot_index + 1);
/* Remove the record chain segment from the record chain */
page_rec_set_next(prev_rec, page_get_supremum_rec(page));
page_rec_set_next(prev_rec, page_get_supremum_rec(block->frame));
/* Catenate the deleted chain segment to the page free list */
page_rec_set_next(last_rec, page_header_get_ptr(page, PAGE_FREE));
page_header_set_ptr(page, NULL, PAGE_FREE, rec);
page_rec_set_next(last_rec, page_header_get_ptr(block->frame,
PAGE_FREE));
page_header_set_ptr(block->frame, NULL, PAGE_FREE, rec);
page_header_set_field(page, NULL, PAGE_GARBAGE, size
+ page_header_get_field(page, PAGE_GARBAGE));
page_header_set_field(block->frame, NULL, PAGE_GARBAGE, size
+ page_header_get_field(block->frame,
PAGE_GARBAGE));
ut_ad(page_get_n_recs(page) > n_recs);
page_header_set_field(page, NULL, PAGE_N_RECS,
(ulint)(page_get_n_recs(page) - n_recs));
ut_ad(page_get_n_recs(block->frame) > n_recs);
page_header_set_field(block->frame, NULL, PAGE_N_RECS,
ulint{page_get_n_recs(block->frame) - n_recs});
}
/*************************************************************//**

View file

@ -924,12 +924,11 @@ skip_secondaries:
if (dfield_is_ext(&ufield->new_val)) {
trx_rseg_t* rseg;
buf_block_t* block;
ulint internal_offset;
byte* data_field;
ibool is_insert;
bool is_insert;
ulint rseg_id;
ulint page_no;
ulint offset;
uint32_t page_no;
uint16_t offset;
/* We use the fact that new_val points to
undo_rec and get thus the offset of
@ -937,7 +936,7 @@ skip_secondaries:
can calculate from node->roll_ptr the file
address of the new_val data */
internal_offset = ulint(
const uint16_t internal_offset = uint16_t(
static_cast<const byte*>
(dfield_get_data(&ufield->new_val))
- undo_rec);
@ -989,7 +988,7 @@ skip_secondaries:
index,
data_field + dfield_get_len(&ufield->new_val)
- BTR_EXTERN_FIELD_REF_SIZE,
NULL, NULL, NULL, 0, false, &mtr);
NULL, NULL, block, 0, false, &mtr);
mtr.commit();
}
}

View file

@ -208,20 +208,22 @@ func_exit:
/* When rolling back the very first instant ADD COLUMN
operation, reset the root page to the basic state. */
ut_ad(!index->table->is_temporary());
if (page_t* root = btr_root_get(index, &mtr)) {
byte* page_type = root + FIL_PAGE_TYPE;
if (buf_block_t* root = btr_root_block_get(index, RW_SX_LATCH,
&mtr)) {
byte* page_type = root->frame + FIL_PAGE_TYPE;
ut_ad(mach_read_from_2(page_type)
== FIL_PAGE_TYPE_INSTANT
|| mach_read_from_2(page_type)
== FIL_PAGE_INDEX);
mlog_write_ulint(page_type, FIL_PAGE_INDEX,
MLOG_2BYTES, &mtr);
byte* instant = PAGE_INSTANT + PAGE_HEADER + root;
mlog_write_ulint(instant,
page_ptr_get_direction(instant + 1),
MLOG_2BYTES, &mtr);
rec_t* infimum = page_get_infimum_rec(root);
rec_t* supremum = page_get_supremum_rec(root);
mtr.write<2,mtr_t::OPT>(*root, page_type,
FIL_PAGE_INDEX);
byte* instant = PAGE_INSTANT + PAGE_HEADER
+ root->frame;
mtr.write<2,mtr_t::OPT>(
*root, instant,
page_ptr_get_direction(instant + 1));
rec_t* infimum = page_get_infimum_rec(root->frame);
rec_t* supremum = page_get_supremum_rec(root->frame);
static const byte str[8 + 8] = "supremuminfimum";
if (memcmp(infimum, str + 8, 8)
|| memcmp(supremum, str, 8)) {

View file

@ -335,37 +335,30 @@ static bool row_undo_rec_get(undo_node_t* node)
mtr_t mtr;
mtr.start();
page_t* undo_page = trx_undo_page_get_s_latched(
buf_block_t* undo_page = trx_undo_page_get_s_latched(
page_id_t(undo->rseg->space->id, undo->top_page_no), &mtr);
ulint offset = undo->top_offset;
trx_undo_rec_t* prev_rec = trx_undo_get_prev_rec(
undo_page + offset, undo->hdr_page_no, undo->hdr_offset,
true, &mtr);
if (prev_rec == NULL) {
undo->top_undo_no = IB_ID_MAX;
ut_ad(undo->empty());
} else {
page_t* prev_rec_page = page_align(prev_rec);
if (prev_rec_page != undo_page) {
uint16_t offset = undo->top_offset;
buf_block_t* prev_page = undo_page;
if (trx_undo_rec_t* prev_rec = trx_undo_get_prev_rec(
prev_page, offset, undo->hdr_page_no, undo->hdr_offset,
true, &mtr)) {
if (prev_page != undo_page) {
trx->pages_undone++;
}
undo->top_page_no = page_get_page_no(prev_rec_page);
undo->top_offset = ulint(prev_rec - prev_rec_page);
undo->top_page_no = prev_page->page.id.page_no();
undo->top_offset = page_offset(prev_rec);
undo->top_undo_no = trx_undo_rec_get_undo_no(prev_rec);
ut_ad(!undo->empty());
} else {
undo->top_undo_no = IB_ID_MAX;
ut_ad(undo->empty());
}
{
const trx_undo_rec_t* undo_rec = undo_page + offset;
node->undo_rec = trx_undo_rec_copy(undo_rec, node->heap);
}
node->undo_rec = trx_undo_rec_copy(undo_page->frame + offset,
node->heap);
mtr.commit();
switch (trx_undo_rec_get_type(node->undo_rec)) {

View file

@ -2755,7 +2755,7 @@ err_exit:
insert fails, then this disown will be undone
when the operation is rolled back. */
btr_cur_disown_inherited_fields(
btr_cur_get_page_zip(btr_cur),
btr_cur_get_block(btr_cur),
rec, index, offsets, node->update,
mtr);
}

View file

@ -1813,9 +1813,9 @@ files_checked:
ut_ad(size == fil_system.sys_space
->size_in_header);
size += sum_of_new_sizes;
mlog_write_ulint(FSP_HEADER_OFFSET + FSP_SIZE
+ block->frame, size,
MLOG_4BYTES, &mtr);
mtr.write<4>(*block,
FSP_HEADER_OFFSET + FSP_SIZE
+ block->frame, size);
fil_system.sys_space->size_in_header = size;
mtr.commit();
/* Immediately write the log record about

View file

@ -211,15 +211,16 @@ trx_purge_add_undo_to_history(const trx_t* trx, trx_undo_t*& undo, mtr_t* mtr)
|| undo == trx->rsegs.m_redo.old_insert);
trx_rseg_t* rseg = trx->rsegs.m_redo.rseg;
ut_ad(undo->rseg == rseg);
trx_rsegf_t* rseg_header = trx_rsegf_get(
buf_block_t* rseg_header = trx_rsegf_get(
rseg->space, rseg->page_no, mtr);
page_t* undo_page = trx_undo_set_state_at_finish(
buf_block_t* undo_page = trx_undo_set_state_at_finish(
undo, mtr);
trx_ulogf_t* undo_header = undo_page + undo->hdr_offset;
trx_ulogf_t* undo_header = undo_page->frame + undo->hdr_offset;
ut_ad(mach_read_from_2(undo_header + TRX_UNDO_NEEDS_PURGE) <= 1);
if (UNIV_UNLIKELY(mach_read_from_4(TRX_RSEG_FORMAT + rseg_header))) {
if (UNIV_UNLIKELY(mach_read_from_4(TRX_RSEG + TRX_RSEG_FORMAT
+ rseg_header->frame))) {
/* This database must have been upgraded from
before MariaDB 10.3.5. */
trx_rseg_format_upgrade(rseg_header, mtr);
@ -228,23 +229,27 @@ trx_purge_add_undo_to_history(const trx_t* trx, trx_undo_t*& undo, mtr_t* mtr)
if (undo->state != TRX_UNDO_CACHED) {
/* The undo log segment will not be reused */
ut_a(undo->id < TRX_RSEG_N_SLOTS);
trx_rsegf_set_nth_undo(rseg_header, undo->id, FIL_NULL, mtr);
compile_time_assert(FIL_NULL == 0xffffffff);
mlog_memset(TRX_RSEG + TRX_RSEG_UNDO_SLOTS
+ undo->id * TRX_RSEG_SLOT_SIZE
+ rseg_header->frame, 4, 0xff, mtr);
MONITOR_DEC(MONITOR_NUM_UNDO_SLOT_USED);
uint32_t hist_size = mach_read_from_4(TRX_RSEG_HISTORY_SIZE
+ rseg_header);
+ TRX_RSEG
+ rseg_header->frame);
ut_ad(undo->size == flst_get_len(TRX_UNDO_SEG_HDR
+ TRX_UNDO_PAGE_LIST
+ undo_page));
+ undo_page->frame));
mlog_write_ulint(
rseg_header + TRX_RSEG_HISTORY_SIZE,
hist_size + undo->size, MLOG_4BYTES, mtr);
mlog_write_ull(rseg_header + TRX_RSEG_MAX_TRX_ID,
trx_sys.get_max_trx_id(), mtr);
mtr->write<4>(*rseg_header, TRX_RSEG + TRX_RSEG_HISTORY_SIZE
+ rseg_header->frame,
hist_size + undo->size);
mtr->write<8>(*rseg_header, TRX_RSEG + TRX_RSEG_MAX_TRX_ID
+ rseg_header->frame,
trx_sys.get_max_trx_id());
}
/* After the purge thread has been given permission to exit,
@ -287,16 +292,17 @@ trx_purge_add_undo_to_history(const trx_t* trx, trx_undo_t*& undo, mtr_t* mtr)
}
/* Add the log as the first in the history list */
flst_add_first(rseg_header + TRX_RSEG_HISTORY,
undo_header + TRX_UNDO_HISTORY_NODE, mtr);
flst_add_first(rseg_header, TRX_RSEG + TRX_RSEG_HISTORY,
undo_page, undo->hdr_offset + TRX_UNDO_HISTORY_NODE,
mtr);
mlog_write_ull(undo_header + TRX_UNDO_TRX_NO, trx->no, mtr);
mtr->write<8>(*undo_page, undo_header + TRX_UNDO_TRX_NO, trx->no);
/* This is needed for upgrading old undo log pages from
before MariaDB 10.3.1. */
if (UNIV_UNLIKELY(!mach_read_from_2(undo_header
+ TRX_UNDO_NEEDS_PURGE))) {
mlog_write_ulint(undo_header + TRX_UNDO_NEEDS_PURGE, 1,
MLOG_2BYTES, mtr);
mtr->write<2>(*undo_page, undo_header + TRX_UNDO_NEEDS_PURGE,
1U);
}
if (rseg->last_page_no == FIL_NULL) {
@ -320,19 +326,16 @@ trx_purge_add_undo_to_history(const trx_t* trx, trx_undo_t*& undo, mtr_t* mtr)
}
/** Remove undo log header from the history list.
@param[in,out] rseg_hdr rollback segment header
@param[in] log_hdr undo log segment header
@param[in,out] mtr mini transaction. */
static
void
trx_purge_remove_log_hdr(
trx_rsegf_t* rseg_hdr,
trx_ulogf_t* log_hdr,
mtr_t* mtr)
@param[in,out] rseg rollback segment header page
@param[in] log undo log segment header page
@param[in] offset byte offset in the undo log segment header page
@param[in,out] mtr mini-transaction */
static void trx_purge_remove_log_hdr(buf_block_t *rseg, buf_block_t* log,
uint16_t offset, mtr_t *mtr)
{
flst_remove(rseg_hdr + TRX_RSEG_HISTORY,
log_hdr + TRX_UNDO_HISTORY_NODE, mtr);
trx_sys.rseg_history_len--;
flst_remove(rseg, TRX_RSEG + TRX_RSEG_HISTORY,
log, offset + TRX_UNDO_HISTORY_NODE, mtr);
trx_sys.rseg_history_len--;
}
/** Free an undo log segment, and remove the header from the history list.
@ -343,14 +346,12 @@ void
trx_purge_free_segment(trx_rseg_t* rseg, fil_addr_t hdr_addr)
{
mtr_t mtr;
trx_rsegf_t* rseg_hdr;
page_t* undo_page;
mtr.start();
mutex_enter(&rseg->mutex);
rseg_hdr = trx_rsegf_get(rseg->space, rseg->page_no, &mtr);
undo_page = trx_undo_page_get(
buf_block_t* rseg_hdr = trx_rsegf_get(rseg->space, rseg->page_no, &mtr);
buf_block_t* block = trx_undo_page_get(
page_id_t(rseg->space->id, hdr_addr.page), &mtr);
/* Mark the last undo log totally purged, so that if the
@ -358,12 +359,12 @@ trx_purge_free_segment(trx_rseg_t* rseg, fil_addr_t hdr_addr)
again. The list of pages in the undo log tail gets
inconsistent during the freeing of the segment, and therefore
purge should not try to access them again. */
mlog_write_ulint(undo_page + hdr_addr.boffset + TRX_UNDO_NEEDS_PURGE,
0, MLOG_2BYTES, &mtr);
mtr.write<2>(*block, block->frame + hdr_addr.boffset
+ TRX_UNDO_NEEDS_PURGE, 0U);
while (!fseg_free_step_not_header(
TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER
+ undo_page, false, &mtr)) {
+ block->frame, false, &mtr)) {
mutex_exit(&rseg->mutex);
mtr.commit();
@ -373,7 +374,7 @@ trx_purge_free_segment(trx_rseg_t* rseg, fil_addr_t hdr_addr)
rseg_hdr = trx_rsegf_get(rseg->space, rseg->page_no, &mtr);
undo_page = trx_undo_page_get(
block = trx_undo_page_get(
page_id_t(rseg->space->id, hdr_addr.page), &mtr);
}
@ -381,15 +382,15 @@ trx_purge_free_segment(trx_rseg_t* rseg, fil_addr_t hdr_addr)
stored in the list base node tells us how big it was before we
started the freeing. */
const ulint seg_size = flst_get_len(
TRX_UNDO_SEG_HDR + TRX_UNDO_PAGE_LIST + undo_page);
const uint32_t seg_size = flst_get_len(
TRX_UNDO_SEG_HDR + TRX_UNDO_PAGE_LIST + block->frame);
/* We may free the undo log segment header page; it must be freed
within the same mtr as the undo log header is removed from the
history list: otherwise, in case of a database crash, the segment
could become inaccessible garbage in the file space. */
trx_purge_remove_log_hdr(rseg_hdr, undo_page + hdr_addr.boffset, &mtr);
trx_purge_remove_log_hdr(rseg_hdr, block, hdr_addr.boffset, &mtr);
do {
@ -399,14 +400,12 @@ trx_purge_free_segment(trx_rseg_t* rseg, fil_addr_t hdr_addr)
fsp0fsp.cc. */
} while (!fseg_free_step(TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER
+ undo_page, false, &mtr));
+ block->frame, false, &mtr));
const ulint hist_size = mach_read_from_4(rseg_hdr
+ TRX_RSEG_HISTORY_SIZE);
ut_ad(hist_size >= seg_size);
byte* hist = TRX_RSEG + TRX_RSEG_HISTORY_SIZE + rseg_hdr->frame;
ut_ad(mach_read_from_4(hist) >= seg_size);
mlog_write_ulint(rseg_hdr + TRX_RSEG_HISTORY_SIZE,
hist_size - seg_size, MLOG_4BYTES, &mtr);
mtr.write<4>(*rseg_hdr, hist, mach_read_from_4(hist) - seg_size);
ut_ad(rseg->curr_size >= seg_size);
@ -428,10 +427,6 @@ trx_purge_truncate_rseg_history(
{
fil_addr_t hdr_addr;
fil_addr_t prev_hdr_addr;
trx_rsegf_t* rseg_hdr;
page_t* undo_page;
trx_ulogf_t* log_hdr;
trx_usegf_t* seg_hdr;
mtr_t mtr;
trx_id_t undo_trx_no;
@ -439,10 +434,10 @@ trx_purge_truncate_rseg_history(
ut_ad(rseg.is_persistent());
mutex_enter(&rseg.mutex);
rseg_hdr = trx_rsegf_get(rseg.space, rseg.page_no, &mtr);
buf_block_t* rseg_hdr = trx_rsegf_get(rseg.space, rseg.page_no, &mtr);
hdr_addr = trx_purge_get_log_from_hist(flst_get_last(TRX_RSEG_HISTORY
+ rseg_hdr));
hdr_addr = trx_purge_get_log_from_hist(
flst_get_last(TRX_RSEG + TRX_RSEG_HISTORY + rseg_hdr->frame));
loop:
if (hdr_addr.page == FIL_NULL) {
func_exit:
@ -451,12 +446,11 @@ func_exit:
return;
}
undo_page = trx_undo_page_get(page_id_t(rseg.space->id, hdr_addr.page),
&mtr);
log_hdr = undo_page + hdr_addr.boffset;
undo_trx_no = mach_read_from_8(log_hdr + TRX_UNDO_TRX_NO);
buf_block_t* block = trx_undo_page_get(page_id_t(rseg.space->id,
hdr_addr.page),
&mtr);
undo_trx_no = mach_read_from_8(block->frame + hdr_addr.boffset
+ TRX_UNDO_TRX_NO);
if (undo_trx_no >= limit.trx_no()) {
if (undo_trx_no == limit.trx_no()) {
@ -469,12 +463,13 @@ func_exit:
}
prev_hdr_addr = trx_purge_get_log_from_hist(
flst_get_prev_addr(log_hdr + TRX_UNDO_HISTORY_NODE));
flst_get_prev_addr(block->frame + hdr_addr.boffset
+ TRX_UNDO_HISTORY_NODE));
seg_hdr = undo_page + TRX_UNDO_SEG_HDR;
if ((mach_read_from_2(seg_hdr + TRX_UNDO_STATE) == TRX_UNDO_TO_PURGE)
&& (mach_read_from_2(log_hdr + TRX_UNDO_NEXT_LOG) == 0)) {
if (mach_read_from_2(TRX_UNDO_SEG_HDR + TRX_UNDO_STATE + block->frame)
== TRX_UNDO_TO_PURGE
&& !mach_read_from_2(block->frame + hdr_addr.boffset
+ TRX_UNDO_NEXT_LOG)) {
/* We can free the whole log segment */
@ -486,7 +481,8 @@ func_exit:
trx_purge_free_segment(&rseg, hdr_addr);
} else {
/* Remove the log hdr from the rseg history. */
trx_purge_remove_log_hdr(rseg_hdr, log_hdr, &mtr);
trx_purge_remove_log_hdr(rseg_hdr, block, hdr_addr.boffset,
&mtr);
mutex_exit(&rseg.mutex);
mtr.commit();
@ -825,8 +821,6 @@ static void trx_purge_rseg_get_next_history_log(
ulint* n_pages_handled)/*!< in/out: number of UNDO pages
handled */
{
page_t* undo_page;
trx_ulogf_t* log_hdr;
fil_addr_t prev_log_addr;
trx_id_t trx_no;
mtr_t mtr;
@ -841,11 +835,12 @@ static void trx_purge_rseg_get_next_history_log(
mtr.start();
undo_page = trx_undo_page_get_s_latched(
const buf_block_t* undo_page = trx_undo_page_get_s_latched(
page_id_t(purge_sys.rseg->space->id,
purge_sys.rseg->last_page_no), &mtr);
log_hdr = undo_page + purge_sys.rseg->last_offset;
const trx_ulogf_t* log_hdr = undo_page->frame
+ purge_sys.rseg->last_offset;
/* Increase the purge page count by one for every handled log */
@ -873,7 +868,7 @@ static void trx_purge_rseg_get_next_history_log(
log_hdr = trx_undo_page_get_s_latched(
page_id_t(purge_sys.rseg->space->id, prev_log_addr.page),
&mtr)
&mtr)->frame
+ prev_log_addr.boffset;
trx_no = mach_read_from_8(log_hdr + TRX_UNDO_TRX_NO);
@ -908,8 +903,8 @@ static
void
trx_purge_read_undo_rec()
{
ulint offset;
ulint page_no;
uint16_t offset;
uint32_t page_no;
ib_uint64_t undo_no;
purge_sys.hdr_offset = purge_sys.rseg->last_offset;
@ -918,13 +913,15 @@ trx_purge_read_undo_rec()
if (purge_sys.rseg->needs_purge) {
mtr_t mtr;
mtr.start();
buf_block_t* undo_page;
if (trx_undo_rec_t* undo_rec = trx_undo_get_first_rec(
purge_sys.rseg->space, purge_sys.hdr_page_no,
purge_sys.hdr_offset, RW_S_LATCH, &mtr)) {
*purge_sys.rseg->space, purge_sys.hdr_page_no,
purge_sys.hdr_offset, RW_S_LATCH,
undo_page, &mtr)) {
offset = page_offset(undo_rec);
undo_no = trx_undo_rec_get_undo_no(undo_rec);
page_no = page_get_page_no(page_align(undo_rec));
page_no = undo_page->page.id.page_no();
} else {
offset = 0;
undo_no = 0;
@ -974,22 +971,14 @@ trx_purge_get_next_rec(
handled */
mem_heap_t* heap) /*!< in: memory heap where copied */
{
trx_undo_rec_t* rec;
trx_undo_rec_t* rec_copy;
trx_undo_rec_t* rec2;
page_t* undo_page;
page_t* page;
ulint offset;
ulint page_no;
ulint space;
mtr_t mtr;
ut_ad(purge_sys.next_stored);
ut_ad(purge_sys.tail.trx_no() < purge_sys.view.low_limit_no());
space = purge_sys.rseg->space->id;
page_no = purge_sys.page_no;
offset = purge_sys.offset;
const ulint space = purge_sys.rseg->space->id;
const uint32_t page_no = purge_sys.page_no;
const uint16_t offset = purge_sys.offset;
if (offset == 0) {
/* It is the dummy undo log record, which means that there is
@ -1006,16 +995,16 @@ trx_purge_get_next_rec(
mtr_start(&mtr);
undo_page = trx_undo_page_get_s_latched(page_id_t(space, page_no),
&mtr);
buf_block_t* undo_page = trx_undo_page_get_s_latched(
page_id_t(space, page_no), &mtr);
buf_block_t* rec2_page = undo_page;
rec = undo_page + offset;
rec2 = trx_undo_page_get_next_rec(rec, purge_sys.hdr_page_no,
purge_sys.hdr_offset);
const trx_undo_rec_t* rec2 = trx_undo_page_get_next_rec(
undo_page, offset, purge_sys.hdr_page_no, purge_sys.hdr_offset);
if (rec2 == NULL) {
rec2 = trx_undo_get_next_rec(rec, purge_sys.hdr_page_no,
rec2 = trx_undo_get_next_rec(rec2_page, offset,
purge_sys.hdr_page_no,
purge_sys.hdr_offset, &mtr);
}
@ -1032,22 +1021,19 @@ trx_purge_get_next_rec(
undo_page = trx_undo_page_get_s_latched(
page_id_t(space, page_no), &mtr);
rec = undo_page + offset;
} else {
page = page_align(rec2);
purge_sys.offset = ulint(rec2 - page);
purge_sys.page_no = page_get_page_no(page);
purge_sys.offset = page_offset(rec2);
purge_sys.page_no = rec2_page->page.id.page_no();
purge_sys.tail.undo_no = trx_undo_rec_get_undo_no(rec2);
if (undo_page != page) {
if (undo_page != rec2_page) {
/* We advance to a new page of the undo log: */
(*n_pages_handled)++;
}
}
rec_copy = trx_undo_rec_copy(rec, heap);
trx_undo_rec_t* rec_copy = trx_undo_rec_copy(undo_page->frame + offset,
heap);
mtr_commit(&mtr);

View file

@ -162,7 +162,7 @@ that was written to ptr. Update the first free value by the number of bytes
written for this undo record.
@return offset of the inserted entry on the page if succeeded, 0 if fail */
static
ulint
uint16_t
trx_undo_page_set_next_prev_and_add(
/*================================*/
buf_block_t* undo_block, /*!< in/out: undo log page */
@ -170,30 +170,22 @@ trx_undo_page_set_next_prev_and_add(
written on this undo page. */
mtr_t* mtr) /*!< in: mtr */
{
ulint first_free; /*!< offset within undo_page */
ulint end_of_rec; /*!< offset within undo_page */
byte* ptr_to_first_free;
/* pointer within undo_page
that points to the next free
offset value within undo_page.*/
ut_ad(ptr > undo_block->frame);
ut_ad(ptr < undo_block->frame + srv_page_size);
ut_ad(page_align(ptr) == undo_block->frame);
if (UNIV_UNLIKELY(trx_undo_left(undo_block, ptr) < 2)) {
return(0);
}
ptr_to_first_free = TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_FREE
byte* ptr_to_first_free = TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_FREE
+ undo_block->frame;
first_free = mach_read_from_2(ptr_to_first_free);
uint16_t first_free = mach_read_from_2(ptr_to_first_free);
/* Write offset of the previous undo log record */
mach_write_to_2(ptr, first_free);
ptr += 2;
end_of_rec = ulint(ptr - undo_block->frame);
uint16_t end_of_rec = uint16_t(ptr - undo_block->frame);
/* Write offset of the next undo log record */
mach_write_to_2(undo_block->frame + first_free, end_of_rec);
@ -457,7 +449,7 @@ trx_undo_report_insert_virtual(
Reports in the undo log of an insert of a clustered index record.
@return offset of the inserted entry on the page if succeed, 0 if fail */
static
ulint
uint16_t
trx_undo_page_report_insert(
/*========================*/
buf_block_t* undo_block, /*!< in: undo log page */
@ -467,10 +459,6 @@ trx_undo_page_report_insert(
inserted to the clustered index */
mtr_t* mtr) /*!< in: mtr */
{
ulint first_free;
byte* ptr;
ulint i;
ut_ad(dict_index_is_clust(index));
/* MariaDB 10.3.1+ in trx_undo_page_init() always initializes
TRX_UNDO_PAGE_TYPE as 0, but previous versions wrote
@ -479,9 +467,10 @@ trx_undo_page_report_insert(
ut_ad(mach_read_from_2(TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_TYPE
+ undo_block->frame) <= 2);
first_free = mach_read_from_2(TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_FREE
+ undo_block->frame);
ptr = undo_block->frame + first_free;
uint16_t first_free = mach_read_from_2(TRX_UNDO_PAGE_HDR
+ TRX_UNDO_PAGE_FREE
+ undo_block->frame);
byte* ptr = undo_block->frame + first_free;
ut_ad(first_free <= srv_page_size);
@ -509,7 +498,7 @@ trx_undo_page_report_insert(
goto done;
}
for (i = 0; i < dict_index_get_n_unique(index); i++) {
for (unsigned i = 0; i < dict_index_get_n_unique(index); i++) {
const dfield_t* field = dtuple_get_nth_field(clust_entry, i);
ulint flen = dfield_get_len(field);
@ -572,12 +561,14 @@ trx_undo_rec_get_pars(
*updated_extern = !!(type_cmpl & TRX_UNDO_UPD_EXTERN);
type_cmpl &= ~TRX_UNDO_UPD_EXTERN;
*type = type_cmpl & (TRX_UNDO_CMPL_INFO_MULT - 1);
ut_ad(*type >= TRX_UNDO_RENAME_TABLE);
ut_ad(*type <= TRX_UNDO_DEL_MARK_REC);
*cmpl_info = type_cmpl / TRX_UNDO_CMPL_INFO_MULT;
*undo_no = mach_read_next_much_compressed(&ptr);
*table_id = mach_read_next_much_compressed(&ptr);
ut_ad(*table_id);
return(const_cast<byte*>(ptr));
}
@ -856,7 +847,7 @@ record.
@return byte offset of the inserted undo log entry on the page if
succeed, 0 if fail */
static
ulint
uint16_t
trx_undo_page_report_modify(
/*========================*/
buf_block_t* undo_block, /*!< in: undo log page */
@ -875,7 +866,6 @@ trx_undo_page_report_modify(
virtual column info */
mtr_t* mtr) /*!< in: mtr */
{
ulint first_free;
byte* ptr;
ut_ad(index->is_primary());
@ -887,8 +877,9 @@ trx_undo_page_report_modify(
ut_ad(mach_read_from_2(TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_TYPE
+ undo_block->frame) <= 2);
first_free = mach_read_from_2(TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_FREE
+ undo_block->frame);
uint16_t first_free = mach_read_from_2(TRX_UNDO_PAGE_HDR
+ TRX_UNDO_PAGE_FREE
+ undo_block->frame);
ptr = undo_block->frame + first_free;
ut_ad(first_free <= srv_page_size);
@ -1953,13 +1944,13 @@ trx_undo_erase_page_end(page_t* undo_page)
@return byte offset of the undo log record
@retval 0 in case of failure */
static
ulint
uint16_t
trx_undo_page_report_rename(trx_t* trx, const dict_table_t* table,
buf_block_t* block, mtr_t* mtr)
{
byte* ptr_first_free = TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_FREE
+ block->frame;
ulint first_free = mach_read_from_2(ptr_first_free);
uint16_t first_free = mach_read_from_2(ptr_first_free);
ut_ad(first_free >= TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_HDR_SIZE);
ut_ad(first_free <= srv_page_size);
byte* start = block->frame + first_free;
@ -1985,7 +1976,7 @@ trx_undo_page_report_rename(trx_t* trx, const dict_table_t* table,
ptr += len;
mach_write_to_2(ptr, first_free);
ptr += 2;
ulint offset = page_offset(ptr);
uint16_t offset = page_offset(ptr);
mach_write_to_2(start, offset);
mach_write_to_2(ptr_first_free, offset);
@ -2014,7 +2005,7 @@ dberr_t trx_undo_report_rename(trx_t* trx, const dict_table_t* table)
ut_ad(loop_count++ < 2);
ut_ad(undo->last_page_no == block->page.id.page_no());
if (ulint offset = trx_undo_page_report_rename(
if (uint16_t offset = trx_undo_page_report_rename(
trx, table, block, &mtr)) {
undo->withdraw_clock = buf_withdraw_clock;
undo->top_page_no = undo->last_page_no;
@ -2118,7 +2109,7 @@ trx_undo_report_row_operation(
ut_ad(undo != NULL);
do {
ulint offset = !rec
uint16_t offset = !rec
? trx_undo_page_report_insert(
undo_block, trx, index, clust_entry, &mtr)
: trx_undo_page_report_modify(
@ -2240,11 +2231,10 @@ trx_undo_get_undo_rec_low(
{
trx_undo_rec_t* undo_rec;
ulint rseg_id;
ulint page_no;
ulint offset;
const page_t* undo_page;
uint32_t page_no;
uint16_t offset;
trx_rseg_t* rseg;
ibool is_insert;
bool is_insert;
mtr_t mtr;
trx_undo_decode_roll_ptr(roll_ptr, &is_insert, &rseg_id, &page_no,
@ -2254,14 +2244,14 @@ trx_undo_get_undo_rec_low(
rseg = trx_sys.rseg_array[rseg_id];
ut_ad(rseg->is_persistent());
mtr_start(&mtr);
mtr.start();
undo_page = trx_undo_page_get_s_latched(
buf_block_t* undo_page = trx_undo_page_get_s_latched(
page_id_t(rseg->space->id, page_no), &mtr);
undo_rec = trx_undo_rec_copy(undo_page + offset, heap);
undo_rec = trx_undo_rec_copy(undo_page->frame + offset, heap);
mtr_commit(&mtr);
mtr.commit();
return(undo_rec);
}

View file

@ -49,7 +49,7 @@ static unsigned char wsrep_uuid[16];
@param[in,out] mtr mini transaction */
static void
trx_rseg_write_wsrep_checkpoint(
trx_rsegf_t* rseg_header,
buf_block_t* rseg_header,
const XID* xid,
mtr_t* mtr)
{
@ -57,25 +57,27 @@ trx_rseg_write_wsrep_checkpoint(
DBUG_ASSERT(xid->bqual_length >= 0);
DBUG_ASSERT(xid->gtrid_length + xid->bqual_length < XIDDATASIZE);
mlog_write_ulint(TRX_RSEG_WSREP_XID_FORMAT + rseg_header,
uint32_t(xid->formatID),
MLOG_4BYTES, mtr);
mtr->write<4>(*rseg_header, TRX_RSEG + TRX_RSEG_WSREP_XID_FORMAT
+ rseg_header->frame,
uint32_t(xid->formatID));
mlog_write_ulint(TRX_RSEG_WSREP_XID_GTRID_LEN + rseg_header,
uint32_t(xid->gtrid_length),
MLOG_4BYTES, mtr);
mtr->write<4>(*rseg_header, TRX_RSEG + TRX_RSEG_WSREP_XID_GTRID_LEN
+ rseg_header->frame,
uint32_t(xid->gtrid_length));
mlog_write_ulint(TRX_RSEG_WSREP_XID_BQUAL_LEN + rseg_header,
uint32_t(xid->bqual_length),
MLOG_4BYTES, mtr);
mtr->write<4>(*rseg_header, TRX_RSEG + TRX_RSEG_WSREP_XID_BQUAL_LEN
+ rseg_header->frame,
uint32_t(xid->bqual_length));
const ulint xid_length = static_cast<ulint>(xid->gtrid_length
+ xid->bqual_length);
mlog_write_string(TRX_RSEG_WSREP_XID_DATA + rseg_header,
mlog_write_string(TRX_RSEG + TRX_RSEG_WSREP_XID_DATA
+ rseg_header->frame,
reinterpret_cast<const byte*>(xid->data),
xid_length, mtr);
if (UNIV_LIKELY(xid_length < XIDDATASIZE)) {
mlog_memset(TRX_RSEG_WSREP_XID_DATA + rseg_header + xid_length,
mlog_memset(TRX_RSEG + TRX_RSEG_WSREP_XID_DATA
+ rseg_header->frame + xid_length,
XIDDATASIZE - xid_length, 0, mtr);
}
}
@ -86,7 +88,7 @@ trx_rseg_write_wsrep_checkpoint(
@param[in,out] mtr mini-transaction */
void
trx_rseg_update_wsrep_checkpoint(
trx_rsegf_t* rseg_header,
buf_block_t* rseg_header,
const XID* xid,
mtr_t* mtr)
{
@ -109,16 +111,13 @@ trx_rseg_update_wsrep_checkpoint(
}
/** Clear the WSREP XID information from rollback segment header.
@param[in,out] rseg_header Rollback segment header
@param[in,out] mtr mini-transaction */
static void
trx_rseg_clear_wsrep_checkpoint(
trx_rsegf_t* rseg_header,
mtr_t* mtr)
@param[in,out] block rollback segment header
@param[in,out] mtr mini-transaction */
static void trx_rseg_clear_wsrep_checkpoint(buf_block_t *block, mtr_t *mtr)
{
mlog_memset(rseg_header + TRX_RSEG_WSREP_XID_INFO,
TRX_RSEG_WSREP_XID_DATA + XIDDATASIZE
- TRX_RSEG_WSREP_XID_INFO, 0, mtr);
mlog_memset(block, TRX_RSEG + TRX_RSEG_WSREP_XID_INFO,
TRX_RSEG_WSREP_XID_DATA + XIDDATASIZE - TRX_RSEG_WSREP_XID_INFO,
0, mtr);
}
static void
@ -133,9 +132,10 @@ trx_rseg_update_wsrep_checkpoint(const XID* xid, mtr_t* mtr)
sizeof wsrep_uuid);
const trx_rseg_t* rseg = trx_sys.rseg_array[0];
trx_rsegf_t* rseg_header = trx_rsegf_get(rseg->space, rseg->page_no,
buf_block_t* rseg_header = trx_rsegf_get(rseg->space, rseg->page_no,
mtr);
if (UNIV_UNLIKELY(mach_read_from_4(rseg_header + TRX_RSEG_FORMAT))) {
if (UNIV_UNLIKELY(mach_read_from_4(TRX_RSEG + TRX_RSEG_FORMAT
+ rseg_header->frame))) {
trx_rseg_format_upgrade(rseg_header, mtr);
}
@ -178,25 +178,26 @@ void trx_rseg_update_wsrep_checkpoint(const XID* xid)
@param[out] xid Transaction XID
@return whether the WSREP XID was present */
static
bool trx_rseg_read_wsrep_checkpoint(const trx_rsegf_t* rseg_header, XID& xid)
bool trx_rseg_read_wsrep_checkpoint(const buf_block_t *rseg_header, XID &xid)
{
int formatID = static_cast<int>(
mach_read_from_4(
TRX_RSEG_WSREP_XID_FORMAT + rseg_header));
mach_read_from_4(TRX_RSEG + TRX_RSEG_WSREP_XID_FORMAT
+ rseg_header->frame));
if (formatID == 0) {
return false;
}
xid.formatID = formatID;
xid.gtrid_length = static_cast<int>(
mach_read_from_4(
TRX_RSEG_WSREP_XID_GTRID_LEN + rseg_header));
mach_read_from_4(TRX_RSEG + TRX_RSEG_WSREP_XID_GTRID_LEN
+ rseg_header->frame));
xid.bqual_length = static_cast<int>(
mach_read_from_4(
TRX_RSEG_WSREP_XID_BQUAL_LEN + rseg_header));
mach_read_from_4(TRX_RSEG + TRX_RSEG_WSREP_XID_BQUAL_LEN
+ rseg_header->frame));
memcpy(xid.data, TRX_RSEG_WSREP_XID_DATA + rseg_header, XIDDATASIZE);
memcpy(xid.data, TRX_RSEG + TRX_RSEG_WSREP_XID_DATA
+ rseg_header->frame, XIDDATASIZE);
return true;
}
@ -252,10 +253,11 @@ bool trx_rseg_read_wsrep_checkpoint(XID& xid)
continue;
}
const trx_rsegf_t* rseg_header = trx_rsegf_get_new(
const buf_block_t* rseg_header = trx_rsegf_get_new(
trx_sysf_rseg_get_space(sys, rseg_id), page_no, &mtr);
if (mach_read_from_4(rseg_header + TRX_RSEG_FORMAT)) {
if (mach_read_from_4(TRX_RSEG + TRX_RSEG_FORMAT
+ rseg_header->frame)) {
continue;
}
@ -279,17 +281,15 @@ bool trx_rseg_read_wsrep_checkpoint(XID& xid)
/** Upgrade a rollback segment header page to MariaDB 10.3 format.
@param[in,out] rseg_header rollback segment header page
@param[in,out] mtr mini-transaction */
void trx_rseg_format_upgrade(trx_rsegf_t* rseg_header, mtr_t* mtr)
void trx_rseg_format_upgrade(buf_block_t *rseg_header, mtr_t *mtr)
{
ut_ad(page_offset(rseg_header) == TRX_RSEG);
byte* rseg_format = TRX_RSEG_FORMAT + rseg_header;
mlog_write_ulint(rseg_format, 0, MLOG_4BYTES, mtr);
/* Clear also possible garbage at the end of the page. Old
InnoDB versions did not initialize unused parts of pages. */
mlog_memset(TRX_RSEG_MAX_TRX_ID + 8 + rseg_header,
srv_page_size
- (FIL_PAGE_DATA_END
+ TRX_RSEG + TRX_RSEG_MAX_TRX_ID + 8), 0, mtr);
mlog_memset(rseg_header, TRX_RSEG + TRX_RSEG_FORMAT, 4, 0, mtr);
/* Clear also possible garbage at the end of the page. Old
InnoDB versions did not initialize unused parts of pages. */
mlog_memset(rseg_header, TRX_RSEG + TRX_RSEG_MAX_TRX_ID + 8,
srv_page_size
- (FIL_PAGE_DATA_END + TRX_RSEG + TRX_RSEG_MAX_TRX_ID + 8),
0, mtr);
}
/** Create a rollback segment header.
@ -337,16 +337,16 @@ trx_rseg_header_create(
/* Add the rollback segment info to the free slot in
the trx system header */
mlog_write_ulint(TRX_SYS + TRX_SYS_RSEGS
+ TRX_SYS_RSEG_SPACE
+ rseg_id * TRX_SYS_RSEG_SLOT_SIZE
+ sys_header->frame,
space->id, MLOG_4BYTES, mtr);
mlog_write_ulint(TRX_SYS + TRX_SYS_RSEGS
+ TRX_SYS_RSEG_PAGE_NO
+ rseg_id * TRX_SYS_RSEG_SLOT_SIZE
+ sys_header->frame,
block->page.id.page_no(), MLOG_4BYTES, mtr);
mtr->write<4,mtr_t::OPT>(
*sys_header,
TRX_SYS + TRX_SYS_RSEGS + TRX_SYS_RSEG_SPACE
+ rseg_id * TRX_SYS_RSEG_SLOT_SIZE
+ sys_header->frame, space->id);
mtr->write<4,mtr_t::OPT>(
*sys_header,
TRX_SYS + TRX_SYS_RSEGS + TRX_SYS_RSEG_PAGE_NO
+ rseg_id * TRX_SYS_RSEG_SLOT_SIZE
+ sys_header->frame, block->page.id.page_no());
}
return block;
@ -410,29 +410,28 @@ trx_rseg_mem_create(ulint id, fil_space_t* space, ulint page_no)
}
/** Read the undo log lists.
@param[in,out] rseg rollback segment
@param[in,out] max_trx_id maximum observed transaction identifier
@param[in] rseg_header rollback segment header
@param[in,out] rseg rollback segment
@param[in,out] max_trx_id maximum observed transaction identifier
@param[in] rseg_header rollback segment header
@return the combined size of undo log segments in pages */
static
ulint
trx_undo_lists_init(trx_rseg_t* rseg, trx_id_t& max_trx_id,
const trx_rsegf_t* rseg_header)
static ulint trx_undo_lists_init(trx_rseg_t *rseg, trx_id_t &max_trx_id,
const buf_block_t *rseg_header)
{
ut_ad(srv_force_recovery < SRV_FORCE_NO_UNDO_LOG_SCAN);
ut_ad(srv_force_recovery < SRV_FORCE_NO_UNDO_LOG_SCAN);
ulint size = 0;
ulint size= 0;
for (ulint i = 0; i < TRX_RSEG_N_SLOTS; i++) {
ulint page_no = trx_rsegf_get_nth_undo(rseg_header, i);
if (page_no != FIL_NULL) {
size += trx_undo_mem_create_at_db_start(
rseg, i, page_no, max_trx_id);
MONITOR_INC(MONITOR_NUM_UNDO_SLOT_USED);
}
}
for (ulint i= 0; i < TRX_RSEG_N_SLOTS; i++)
{
uint32_t page_no= trx_rsegf_get_nth_undo(rseg_header, i);
if (page_no != FIL_NULL)
{
size+= trx_undo_mem_create_at_db_start(rseg, i, page_no, max_trx_id);
MONITOR_INC(MONITOR_NUM_UNDO_SLOT_USED);
}
}
return(size);
return size;
}
/** Restore the state of a persistent rollback segment.
@ -443,20 +442,20 @@ static
void
trx_rseg_mem_restore(trx_rseg_t* rseg, trx_id_t& max_trx_id, mtr_t* mtr)
{
trx_rsegf_t* rseg_header = trx_rsegf_get_new(
buf_block_t* rseg_hdr = trx_rsegf_get_new(
rseg->space->id, rseg->page_no, mtr);
if (mach_read_from_4(rseg_header + TRX_RSEG_FORMAT) == 0) {
trx_id_t id = mach_read_from_8(rseg_header
+ TRX_RSEG_MAX_TRX_ID);
if (!mach_read_from_4(TRX_RSEG + TRX_RSEG_FORMAT + rseg_hdr->frame)) {
trx_id_t id = mach_read_from_8(TRX_RSEG + TRX_RSEG_MAX_TRX_ID
+ rseg_hdr->frame);
if (id > max_trx_id) {
max_trx_id = id;
}
if (rseg_header[TRX_RSEG_BINLOG_NAME]) {
const char* binlog_name = reinterpret_cast<const char*>
(rseg_header) + TRX_RSEG_BINLOG_NAME;
const char* binlog_name = TRX_RSEG + TRX_RSEG_BINLOG_NAME
+ reinterpret_cast<const char*>(rseg_hdr->frame);
if (*binlog_name) {
compile_time_assert(TRX_RSEG_BINLOG_NAME_LEN == sizeof
trx_sys.recovered_binlog_filename);
@ -468,7 +467,8 @@ trx_rseg_mem_restore(trx_rseg_t* rseg, trx_id_t& max_trx_id, mtr_t* mtr)
if (cmp >= 0) {
uint64_t binlog_offset = mach_read_from_8(
rseg_header + TRX_RSEG_BINLOG_OFFSET);
TRX_RSEG + TRX_RSEG_BINLOG_OFFSET
+ rseg_hdr->frame);
if (cmp) {
memcpy(trx_sys.
recovered_binlog_filename,
@ -485,7 +485,7 @@ trx_rseg_mem_restore(trx_rseg_t* rseg, trx_id_t& max_trx_id, mtr_t* mtr)
#ifdef WITH_WSREP
trx_rseg_read_wsrep_checkpoint(
rseg_header, trx_sys.recovered_wsrep_xid);
rseg_hdr, trx_sys.recovered_wsrep_xid);
#endif
}
}
@ -499,32 +499,37 @@ trx_rseg_mem_restore(trx_rseg_t* rseg, trx_id_t& max_trx_id, mtr_t* mtr)
/* Initialize the undo log lists according to the rseg header */
rseg->curr_size = mach_read_from_4(rseg_header + TRX_RSEG_HISTORY_SIZE)
+ 1 + trx_undo_lists_init(rseg, max_trx_id, rseg_header);
rseg->curr_size = mach_read_from_4(TRX_RSEG + TRX_RSEG_HISTORY_SIZE
+ rseg_hdr->frame)
+ 1 + trx_undo_lists_init(rseg, max_trx_id, rseg_hdr);
if (auto len = flst_get_len(rseg_header + TRX_RSEG_HISTORY)) {
if (auto len = flst_get_len(TRX_RSEG + TRX_RSEG_HISTORY
+ rseg_hdr->frame)) {
trx_sys.rseg_history_len += len;
fil_addr_t node_addr = trx_purge_get_log_from_hist(
flst_get_last(rseg_header + TRX_RSEG_HISTORY));
flst_get_last(TRX_RSEG + TRX_RSEG_HISTORY
+ rseg_hdr->frame));
rseg->last_page_no = node_addr.page;
rseg->last_offset = node_addr.boffset;
const trx_ulogf_t* undo_log_hdr = trx_undo_page_get(
page_id_t(rseg->space->id, node_addr.page), mtr)
+ node_addr.boffset;
const buf_block_t* block = trx_undo_page_get(
page_id_t(rseg->space->id, node_addr.page), mtr);
trx_id_t id = mach_read_from_8(undo_log_hdr + TRX_UNDO_TRX_ID);
trx_id_t id = mach_read_from_8(block->frame + node_addr.boffset
+ TRX_UNDO_TRX_ID);
if (id > max_trx_id) {
max_trx_id = id;
}
id = mach_read_from_8(undo_log_hdr + TRX_UNDO_TRX_NO);
id = mach_read_from_8(block->frame + node_addr.boffset
+ TRX_UNDO_TRX_NO);
if (id > max_trx_id) {
max_trx_id = id;
}
unsigned purge = mach_read_from_2(
undo_log_hdr + TRX_UNDO_NEEDS_PURGE);
unsigned purge = mach_read_from_2(block->frame
+ node_addr.boffset
+ TRX_UNDO_NEEDS_PURGE);
ut_ad(purge <= 1);
rseg->set_last_trx_no(id, purge != 0);
rseg->needs_purge = purge != 0;
@ -638,8 +643,8 @@ trx_rseg_array_init()
}
/* Finally, clear WSREP XID in TRX_SYS page. */
const buf_block_t* sys = trx_sysf_get(&mtr);
mlog_memset(TRX_SYS + TRX_SYS_WSREP_XID_INFO + sys->frame,
mlog_memset(trx_sysf_get(&mtr),
TRX_SYS + TRX_SYS_WSREP_XID_INFO,
TRX_SYS_WSREP_XID_LEN, 0, &mtr);
mtr.commit();
}
@ -765,8 +770,8 @@ up to which replication has proceeded.
@param[in,out] rseg_header rollback segment header
@param[in] trx committing transaction
@param[in,out] mtr mini-transaction */
void
trx_rseg_update_binlog_offset(byte* rseg_header, const trx_t* trx, mtr_t* mtr)
void trx_rseg_update_binlog_offset(buf_block_t *rseg_header, const trx_t *trx,
mtr_t *mtr)
{
DBUG_LOG("trx", "trx_mysql_binlog_offset: " << trx->mysql_log_offset);
@ -778,9 +783,11 @@ trx_rseg_update_binlog_offset(byte* rseg_header, const trx_t* trx, mtr_t* mtr)
return;
}
mlog_write_ull(rseg_header + TRX_RSEG_BINLOG_OFFSET,
trx->mysql_log_offset, mtr);
byte* p = rseg_header + TRX_RSEG_BINLOG_NAME;
mtr->write<8,mtr_t::OPT>(*rseg_header,
TRX_RSEG + TRX_RSEG_BINLOG_OFFSET
+ rseg_header->frame,
trx->mysql_log_offset);
byte* p = TRX_RSEG + TRX_RSEG_BINLOG_NAME + rseg_header->frame;
const byte* binlog_name = reinterpret_cast<const byte*>
(trx->mysql_log_file_name);

View file

@ -147,8 +147,6 @@ trx_sysf_create(
{
ulint slot_no;
buf_block_t* block;
page_t* page;
byte* ptr;
ut_ad(mtr);
@ -167,30 +165,28 @@ trx_sysf_create(
ut_a(block->page.id.page_no() == TRX_SYS_PAGE_NO);
page = buf_block_get_frame(block);
mtr->write<2>(*block, FIL_PAGE_TYPE + block->frame,
FIL_PAGE_TYPE_TRX_SYS);
mlog_write_ulint(page + FIL_PAGE_TYPE, FIL_PAGE_TYPE_TRX_SYS,
MLOG_2BYTES, mtr);
/* Reset the doublewrite buffer magic number to zero so that we
know that the doublewrite buffer has not yet been created (this
suppresses a Valgrind warning) */
mlog_write_ulint(page + TRX_SYS_DOUBLEWRITE
+ TRX_SYS_DOUBLEWRITE_MAGIC, 0, MLOG_4BYTES, mtr);
ut_ad(!mach_read_from_4(block->frame
+ TRX_SYS_DOUBLEWRITE
+ TRX_SYS_DOUBLEWRITE_MAGIC));
/* Reset the rollback segment slots. Old versions of InnoDB
(before MySQL 5.5) define TRX_SYS_N_RSEGS as 256 and expect
that the whole array is initialized. */
ptr = TRX_SYS + TRX_SYS_RSEGS + page;
compile_time_assert(256 >= TRX_SYS_N_RSEGS);
memset(ptr, 0xff, 256 * TRX_SYS_RSEG_SLOT_SIZE);
ptr += 256 * TRX_SYS_RSEG_SLOT_SIZE;
ut_a(ptr <= page + (srv_page_size - FIL_PAGE_DATA_END));
compile_time_assert(TRX_SYS + TRX_SYS_RSEGS
+ 256 * TRX_SYS_RSEG_SLOT_SIZE
<= UNIV_PAGE_SIZE_MIN - FIL_PAGE_DATA_END);
mlog_memset(block, TRX_SYS + TRX_SYS_RSEGS,
256 * TRX_SYS_RSEG_SLOT_SIZE, 0xff, mtr);
/* Initialize all of the page. This part used to be uninitialized. */
mlog_memset(block, ptr - page,
srv_page_size - FIL_PAGE_DATA_END + size_t(page - ptr),
mlog_memset(block, TRX_SYS + TRX_SYS_RSEGS
+ 256 * TRX_SYS_RSEG_SLOT_SIZE,
srv_page_size
- (FIL_PAGE_DATA_END + TRX_SYS + TRX_SYS_RSEGS
+ 256 * TRX_SYS_RSEG_SLOT_SIZE),
0, mtr);
/* Create the first rollback segment in the SYSTEM tablespace */

View file

@ -565,8 +565,6 @@ trx_resurrect_table_locks(
const trx_undo_t* undo) /*!< in: undo log */
{
mtr_t mtr;
page_t* undo_page;
trx_undo_rec_t* undo_rec;
table_id_set tables;
ut_ad(trx_state_eq(trx, TRX_STATE_ACTIVE) ||
@ -581,11 +579,11 @@ trx_resurrect_table_locks(
/* trx_rseg_mem_create() may have acquired an X-latch on this
page, so we cannot acquire an S-latch. */
undo_page = trx_undo_page_get(
buf_block_t* block = trx_undo_page_get(
page_id_t(trx->rsegs.m_redo.rseg->space->id,
undo->top_page_no), &mtr);
undo_rec = undo_page + undo->top_offset;
buf_block_t* undo_block = block;
trx_undo_rec_t* undo_rec = block->frame + undo->top_offset;
do {
ulint type;
@ -594,11 +592,9 @@ trx_resurrect_table_locks(
ulint cmpl_info;
bool updated_extern;
page_t* undo_rec_page = page_align(undo_rec);
if (undo_rec_page != undo_page) {
mtr.release_page(undo_page, MTR_MEMO_PAGE_X_FIX);
undo_page = undo_rec_page;
if (undo_block != block) {
mtr.memo_release(undo_block, MTR_MEMO_PAGE_X_FIX);
undo_block = block;
}
trx_undo_rec_get_pars(
@ -607,7 +603,7 @@ trx_resurrect_table_locks(
tables.insert(table_id);
undo_rec = trx_undo_get_prev_rec(
undo_rec, undo->hdr_page_no,
block, page_offset(undo_rec), undo->hdr_page_no,
undo->hdr_offset, false, &mtr);
} while (undo_rec);

File diff suppressed because it is too large Load diff