diff --git a/btr/btr0btr.c b/btr/btr0btr.c index e2093a8df08..5d694d4fbe8 100644 --- a/btr/btr0btr.c +++ b/btr/btr0btr.c @@ -2509,6 +2509,11 @@ btr_discard_page( ut_ad(page_rec_is_user_rec(node_ptr)); + /* This will make page_zip_validate() fail on merge_page + until btr_level_list_remove() completes. This is harmless, + because everything will take place within a single + mini-transaction and because writing to the redo log + is an atomic operation (performed by mtr_commit()). */ btr_set_min_rec_mark(node_ptr, mtr); } @@ -2516,6 +2521,14 @@ btr_discard_page( /* Remove the page from the level list */ btr_level_list_remove(tree, page, mtr); +#if defined UNIV_DEBUG || defined UNIV_ZIP_DEBUG + { + page_zip_des_t* merge_page_zip = buf_block_get_page_zip( + buf_block_align(merge_page)); + ut_a(!merge_page_zip + || page_zip_validate(merge_page_zip, merge_page)); + } +#endif /* UNIV_DEBUG || UNIV_ZIP_DEBUG */ if (left_page_no != FIL_NULL) { lock_update_discard(page_get_supremum_rec(merge_page), page); diff --git a/btr/btr0cur.c b/btr/btr0cur.c index 0a81100720e..cdb2ecb2f52 100644 --- a/btr/btr0cur.c +++ b/btr/btr0cur.c @@ -1766,6 +1766,9 @@ btr_cur_optimistic_update( new_rec_size = rec_get_converted_size(index, new_entry); page_zip = buf_block_get_page_zip(buf_block_align(page)); +#if defined UNIV_DEBUG || defined UNIV_ZIP_DEBUG + ut_a(!page_zip || page_zip_validate(page_zip, page)); +#endif /* UNIV_DEBUG || UNIV_ZIP_DEBUG */ if (UNIV_LIKELY_NULL(page_zip) && !page_zip_alloc(page_zip, page, index, @@ -1968,6 +1971,9 @@ btr_cur_pessimistic_update( MTR_MEMO_X_LOCK)); ut_ad(mtr_memo_contains(mtr, buf_block_align(page), MTR_MEMO_PAGE_X_FIX)); +#if defined UNIV_DEBUG || defined UNIV_ZIP_DEBUG + ut_a(!page_zip || page_zip_validate(page_zip, page)); +#endif /* UNIV_DEBUG || UNIV_ZIP_DEBUG */ optim_err = btr_cur_optimistic_update(flags, cursor, update, cmpl_info, thr, mtr); @@ -2085,6 +2091,9 @@ btr_cur_pessimistic_update( btr_search_update_hash_on_delete(cursor); +#if defined UNIV_DEBUG || defined UNIV_ZIP_DEBUG + ut_a(!page_zip || page_zip_validate(page_zip, page)); +#endif /* UNIV_DEBUG || UNIV_ZIP_DEBUG */ page_cur_delete_rec(page_cursor, index, offsets, page_zip, mtr); page_cur_move_to_prev(page_cursor); @@ -2604,17 +2613,24 @@ btr_cur_optimistic_delete( if (no_compress_needed) { + page_zip_des_t* page_zip; + lock_update_delete(rec); btr_search_update_hash_on_delete(cursor); max_ins_size = page_get_max_insert_size_after_reorganize(page, 1); + page_zip = buf_block_get_page_zip(buf_block_align( + btr_cur_get_rec(cursor))); +#if defined UNIV_DEBUG || defined UNIV_ZIP_DEBUGp + ut_a(!page_zip || page_zip_validate(page_zip, page)); +#endif /* UNIV_DEBUG || UNIV_ZIP_DEBUG */ page_cur_delete_rec(btr_cur_get_page_cur(cursor), - cursor->index, offsets, - buf_block_get_page_zip(buf_block_align( - btr_cur_get_rec(cursor))), - mtr); + cursor->index, offsets, page_zip, mtr); +#if defined UNIV_DEBUG || defined UNIV_ZIP_DEBUGp + ut_a(!page_zip || page_zip_validate(page_zip, page)); +#endif /* UNIV_DEBUG || UNIV_ZIP_DEBUG */ ibuf_update_free_bits_low(cursor->index, page, max_ins_size, mtr); @@ -2695,6 +2711,9 @@ btr_cur_pessimistic_delete( heap = mem_heap_create(1024); rec = btr_cur_get_rec(cursor); page_zip = buf_block_get_page_zip(buf_block_align(page)); +#if defined UNIV_DEBUG || defined UNIV_ZIP_DEBUG + ut_a(!page_zip || page_zip_validate(page_zip, page)); +#endif /* UNIV_DEBUG || UNIV_ZIP_DEBUG */ offsets = rec_get_offsets(rec, cursor->index, NULL, ULINT_UNDEFINED, &heap); @@ -2707,6 +2726,9 @@ btr_cur_pessimistic_delete( || !rec_get_1byte_offs_flag(rec))) { btr_rec_free_externally_stored_fields(cursor->index, rec, offsets, page_zip, in_rollback, mtr); +#if defined UNIV_DEBUG || defined UNIV_ZIP_DEBUGp + ut_a(!page_zip || page_zip_validate(page_zip, page)); +#endif /* UNIV_DEBUG || UNIV_ZIP_DEBUG */ } if (UNIV_UNLIKELY(page_get_n_recs(page) < 2) @@ -2739,6 +2761,11 @@ btr_cur_pessimistic_delete( non-leaf level, we must mark the new leftmost node pointer as the predefined minimum record */ + /* This will make page_zip_validate() fail until + page_cur_delete_rec() completes. This is harmless, + because everything will take place within a single + mini-transaction and because writing to the redo log + is an atomic operation (performed by mtr_commit()). */ btr_set_min_rec_mark(next_rec, mtr); } else { /* Otherwise, if we delete the leftmost node pointer @@ -2762,6 +2789,9 @@ btr_cur_pessimistic_delete( page_cur_delete_rec(btr_cur_get_page_cur(cursor), cursor->index, offsets, page_zip, mtr); +#if defined UNIV_DEBUG || defined UNIV_ZIP_DEBUGp + ut_a(!page_zip || page_zip_validate(page_zip, page)); +#endif /* UNIV_DEBUG || UNIV_ZIP_DEBUG */ ut_ad(btr_check_node_ptr(tree, page, mtr)); diff --git a/page/page0cur.c b/page/page0cur.c index 82f63da65df..9d0d191f4ec 100644 --- a/page/page0cur.c +++ b/page/page0cur.c @@ -1514,9 +1514,6 @@ page_cur_delete_rec( current_rec = cursor->rec; ut_ad(rec_offs_validate(current_rec, index, offsets)); ut_ad(!!page_is_comp(page) == dict_table_is_comp(index->table)); -#if defined UNIV_DEBUG || defined UNIV_ZIP_DEBUG - ut_a(!page_zip || page_zip_validate(page_zip, page)); -#endif /* UNIV_DEBUG || UNIV_ZIP_DEBUG */ /* The record must not be the supremum or infimum record. */ ut_ad(page_rec_is_user_rec(current_rec)); diff --git a/page/page0page.c b/page/page0page.c index 15a0271154e..d70fc14bf36 100644 --- a/page/page0page.c +++ b/page/page0page.c @@ -831,7 +831,7 @@ page_delete_rec_list_end( rec_t* last_rec; rec_t* prev_rec; ulint n_owned; - page_t* page; + page_t* page = ut_align_down(rec, UNIV_PAGE_SIZE); mem_heap_t* heap = NULL; ulint offsets_[REC_OFFS_NORMAL_SIZE]; ulint* offsets = offsets_; @@ -839,6 +839,9 @@ page_delete_rec_list_end( ut_ad(size == ULINT_UNDEFINED || size < UNIV_PAGE_SIZE); ut_ad(!page_zip || page_rec_is_comp(rec)); +#if defined UNIV_DEBUG || defined UNIV_ZIP_DEBUG + ut_a(!page_zip || page_zip_validate(page_zip, page)); +#endif /* UNIV_DEBUG || UNIV_ZIP_DEBUG */ if (page_rec_is_infimum(rec)) { rec = page_rec_get_next(rec); @@ -852,7 +855,6 @@ page_delete_rec_list_end( /* Reset the last insert info in the page header and increment the modify clock for the frame */ - page = ut_align_down(rec, UNIV_PAGE_SIZE); page_header_set_ptr(page, page_zip, PAGE_LAST_INSERT, NULL); /* The page gets invalid for optimistic searches: increment the @@ -1004,6 +1006,10 @@ page_delete_rec_list_start( ut_ad((ibool) !!page_rec_is_comp(rec) == dict_table_is_comp(index->table)); +#if defined UNIV_DEBUG || defined UNIV_ZIP_DEBUG + ut_a(!page_zip || page_zip_validate(page_zip, + ut_align_down(rec, UNIV_PAGE_SIZE))); +#endif /* UNIV_DEBUG || UNIV_ZIP_DEBUG */ if (page_rec_is_infimum(rec)) {