Pull request #614: various small code changes

This commit is contained in:
Marko Mäkelä 2018-02-19 11:37:45 +02:00
commit 5521994ce2
15 changed files with 250 additions and 284 deletions

View file

@ -307,7 +307,7 @@ btr_height_get(
root_block = btr_root_block_get(index, RW_S_LATCH, mtr);
if (root_block) {
height = btr_page_get_level(buf_block_get_frame(root_block), mtr);
height = btr_page_get_level(buf_block_get_frame(root_block));
/* Release the S latch on the root page. */
mtr->memo_release(root_block, MTR_MEMO_PAGE_S_FIX);
@ -872,7 +872,7 @@ btr_page_free(
mtr_t* mtr) /*!< in: mtr */
{
const page_t* page = buf_block_get_frame(block);
ulint level = btr_page_get_level(page, mtr);
ulint level = btr_page_get_level(page);
ut_ad(fil_page_index_page_check(block->frame));
ut_ad(level != ULINT_UNDEFINED);
@ -976,7 +976,7 @@ btr_page_get_father_node_ptr_func(
ut_ad(dict_index_get_page(index) != page_no);
level = btr_page_get_level(btr_cur_get_page(cursor), mtr);
level = btr_page_get_level(btr_cur_get_page(cursor));
user_rec = btr_cur_get_rec(cursor);
ut_a(page_rec_is_user_rec(user_rec));
@ -2018,7 +2018,7 @@ btr_root_raise_and_insert(
moving the root records to the new page, emptying the root, putting
a node pointer to the new page, and then splitting the new page. */
level = btr_page_get_level(root, mtr);
level = btr_page_get_level(root);
new_block = btr_page_alloc(index, 0, FSP_NO_DIR, level, mtr, mtr);
@ -2684,9 +2684,8 @@ btr_attach_half_pages(
}
/* Get the level of the split pages */
level = btr_page_get_level(buf_block_get_frame(block), mtr);
ut_ad(level
== btr_page_get_level(buf_block_get_frame(new_block), mtr));
level = btr_page_get_level(buf_block_get_frame(block));
ut_ad(level == btr_page_get_level(buf_block_get_frame(new_block)));
/* Build the node pointer (= node key and page address) for the upper
half */
@ -2870,7 +2869,7 @@ btr_insert_into_right_sibling(
ibool compressed;
dberr_t err;
ulint level = btr_page_get_level(next_page, mtr);
ulint level = btr_page_get_level(next_page);
/* adjust cursor position */
*btr_cur_get_page_cur(cursor) = next_page_cursor;
@ -3056,7 +3055,7 @@ func_start:
/* 2. Allocate a new page to the index */
new_block = btr_page_alloc(cursor->index, hint_page_no, direction,
btr_page_get_level(page, mtr), mtr, mtr);
btr_page_get_level(page), mtr, mtr);
if (new_block == NULL && os_has_said_disk_full) {
return(NULL);
@ -3065,7 +3064,7 @@ func_start:
new_page = buf_block_get_frame(new_block);
new_page_zip = buf_block_get_page_zip(new_block);
btr_page_create(new_block, new_page_zip, cursor->index,
btr_page_get_level(page, mtr), mtr);
btr_page_get_level(page), mtr);
/* Only record the leaf level page splits. */
if (page_is_leaf(page)) {
cursor->index->stat_defrag_n_page_split ++;
@ -3545,7 +3544,7 @@ btr_lift_page_up(
ut_ad(!page_has_siblings(page));
ut_ad(mtr_is_block_fix(mtr, block, MTR_MEMO_PAGE_X_FIX, index->table));
page_level = btr_page_get_level(page, mtr);
page_level = btr_page_get_level(page);
root_page_no = dict_index_get_page(index);
{
@ -3607,7 +3606,7 @@ btr_lift_page_up(
block = father_block;
page = buf_block_get_frame(block);
page_level = btr_page_get_level(page, mtr);
page_level = btr_page_get_level(page);
ut_ad(!page_has_siblings(page));
ut_ad(mtr_is_block_fix(
@ -3687,7 +3686,7 @@ btr_lift_page_up(
page_t* page = buf_block_get_frame(blocks[i]);
page_zip_des_t* page_zip= buf_block_get_page_zip(blocks[i]);
ut_ad(btr_page_get_level(page, mtr) == page_level + 1);
ut_ad(btr_page_get_level(page) == page_level + 1);
btr_page_set_level(page, page_zip, page_level, mtr);
#ifdef UNIV_ZIP_DEBUG
@ -4273,7 +4272,7 @@ btr_discard_only_page_on_level(
const page_t* page = buf_block_get_frame(block);
ut_a(page_get_n_recs(page) == 1);
ut_a(page_level == btr_page_get_level(page, mtr));
ut_a(page_level == btr_page_get_level(page));
ut_a(!page_has_siblings(page));
ut_ad(mtr_is_block_fix(
@ -4553,7 +4552,7 @@ btr_print_recursive(
ut_ad(mtr_is_block_fix(mtr, block, MTR_MEMO_PAGE_SX_FIX, index->table));
ib::info() << "NODE ON LEVEL " << btr_page_get_level(page, mtr)
ib::info() << "NODE ON LEVEL " << btr_page_get_level(page)
<< " page " << block->page.id;
page_print(block, index, width, width);
@ -4669,7 +4668,7 @@ btr_check_node_ptr(
tuple = dict_index_build_node_ptr(
index, page_rec_get_next(page_get_infimum_rec(page)), 0, heap,
btr_page_get_level(page, mtr));
btr_page_get_level(page));
/* For spatial index, the MBR in the parent rec could be different
with that of first rec of child, their relationship should be
@ -5000,7 +4999,7 @@ btr_validate_level(
return(false);
}
while (level != btr_page_get_level(page, &mtr)) {
while (level != btr_page_get_level(page)) {
const rec_t* node_ptr;
if (fseg_page_is_free(space, block->page.id.page_no())) {
@ -5108,7 +5107,7 @@ loop:
ret = false;
}
ut_a(btr_page_get_level(page, &mtr) == level);
ut_a(btr_page_get_level(page) == level);
right_page_no = btr_page_get_next(page, &mtr);
left_page_no = btr_page_get_prev(page, &mtr);
@ -5253,7 +5252,7 @@ loop:
node_ptr_tuple = dict_index_build_node_ptr(
index,
page_rec_get_next(page_get_infimum_rec(page)),
0, heap, btr_page_get_level(page, &mtr));
0, heap, btr_page_get_level(page));
if (cmp_dtuple_rec(node_ptr_tuple, node_ptr,
offsets)) {
@ -5431,7 +5430,7 @@ btr_validate_spatial_index(
mtr_x_lock(dict_index_get_lock(index), &mtr);
page_t* root = btr_root_get(index, &mtr);
ulint n = btr_page_get_level(root, &mtr);
ulint n = btr_page_get_level(root);
#ifdef UNIV_RTR_DEBUG
fprintf(stderr, "R-tree level is %lu\n", n);
@ -5498,7 +5497,7 @@ btr_validate_index(
return err;
}
ulint n = btr_page_get_level(root, &mtr);
ulint n = btr_page_get_level(root);
for (ulint i = 0; i <= n; ++i) {

View file

@ -1513,7 +1513,7 @@ retry_page_get:
if (height == ULINT_UNDEFINED) {
/* We are in the root node */
height = btr_page_get_level(page, mtr);
height = btr_page_get_level(page);
root_height = height;
cursor->tree_height = root_height + 1;
@ -1707,8 +1707,7 @@ retry_page_get:
/* If this is the desired level, leave the loop */
ut_ad(height == btr_page_get_level(page_cur_get_page(page_cursor),
mtr));
ut_ad(height == btr_page_get_level(page_cur_get_page(page_cursor)));
/* Add Predicate lock if it is serializable isolation
and only if it is in the search case */
@ -2408,12 +2407,12 @@ btr_cur_open_at_index_side_func(
if (height == ULINT_UNDEFINED) {
/* We are in the root node */
height = btr_page_get_level(page, mtr);
height = btr_page_get_level(page);
root_height = height;
ut_a(height >= level);
} else {
/* TODO: flag the index corrupted if this fails */
ut_ad(height == btr_page_get_level(page, mtr));
ut_ad(height == btr_page_get_level(page));
}
if (height == level) {
@ -2768,7 +2767,7 @@ btr_cur_open_at_rnd_pos_func(
if (height == ULINT_UNDEFINED) {
/* We are in the root node */
height = btr_page_get_level(page, mtr);
height = btr_page_get_level(page);
}
if (height == 0) {
@ -2973,7 +2972,7 @@ btr_cur_ins_lock_and_undo(
dtuple_t* entry, /*!< in/out: entry to insert */
que_thr_t* thr, /*!< in: query thread or NULL */
mtr_t* mtr, /*!< in/out: mini-transaction */
ibool* inherit)/*!< out: TRUE if the inserted new record maybe
bool* inherit)/*!< out: true if the inserted new record maybe
should inherit LOCK_GAP type locks from the
successor record */
{
@ -3115,9 +3114,9 @@ btr_cur_optimistic_insert(
buf_block_t* block;
page_t* page;
rec_t* dummy;
ibool leaf;
ibool reorg;
ibool inherit = TRUE;
bool leaf;
bool reorg;
bool inherit = true;
ulint rec_size;
dberr_t err;
@ -3408,7 +3407,7 @@ btr_cur_pessimistic_insert(
dict_index_t* index = cursor->index;
big_rec_t* big_rec_vec = NULL;
dberr_t err;
ibool inherit = FALSE;
bool inherit = false;
bool success;
ulint n_reserved = 0;
@ -3519,7 +3518,7 @@ btr_cur_pessimistic_insert(
== FIL_NULL) {
/* split and inserted need to call
lock_update_insert() always. */
inherit = TRUE;
inherit = true;
}
}
}
@ -5677,7 +5676,7 @@ discard_page:
on the page */
btr_node_ptr_delete(index, block, mtr);
const ulint level = btr_page_get_level(page, mtr);
const ulint level = btr_page_get_level(page);
dtuple_t* node_ptr = dict_index_build_node_ptr(
index, next_rec, block->page.id.page_no(),
@ -5767,7 +5766,7 @@ btr_cur_add_path_info(
slot->nth_rec = page_rec_get_n_recs_before(rec);
slot->n_recs = page_get_n_recs(page);
slot->page_no = page_get_page_no(page);
slot->page_level = btr_page_get_level_low(page);
slot->page_level = btr_page_get_level(page);
}
/*******************************************************************//**
@ -5884,7 +5883,7 @@ btr_estimate_n_rows_in_range_on_level(
reuses them. */
if (!fil_page_index_page_check(page)
|| btr_page_get_index_id(page) != index->id
|| btr_page_get_level_low(page) != level) {
|| btr_page_get_level(page) != level) {
/* The page got reused for something else */
mtr_commit(&mtr);

View file

@ -438,7 +438,7 @@ btr_defragment_merge_pages(
page_t* from_page = buf_block_get_frame(from_block);
page_t* to_page = buf_block_get_frame(to_block);
ulint space = dict_index_get_space(index);
ulint level = btr_page_get_level(from_page, mtr);
ulint level = btr_page_get_level(from_page);
ulint n_recs = page_get_n_recs(from_page);
ulint new_data_size = page_get_data_size(to_page);
ulint max_ins_size =
@ -623,7 +623,7 @@ btr_defragment_n_pages(
}
first_page = buf_block_get_frame(block);
level = btr_page_get_level(first_page, mtr);
level = btr_page_get_level(first_page);
const page_size_t page_size(dict_table_page_size(index->table));
if (level != 0) {

View file

@ -5668,7 +5668,7 @@ buf_page_monitor(
case FIL_PAGE_TYPE_INSTANT:
case FIL_PAGE_INDEX:
case FIL_PAGE_RTREE:
level = btr_page_get_level_low(frame);
level = btr_page_get_level(frame);
/* Check if it is an index page for insert buffer */
if (fil_page_get_type(frame) == FIL_PAGE_INDEX

View file

@ -1080,7 +1080,7 @@ dict_stats_analyze_index_level(
== page_rec_get_next_const(page_get_infimum_rec(page)));
/* check that we are indeed on the desired level */
ut_a(btr_page_get_level(page, mtr) == level);
ut_a(btr_page_get_level(page) == level);
/* there should not be any pages on the left */
ut_a(!page_has_prev(page));
@ -1701,7 +1701,7 @@ dict_stats_analyze_index_for_n_prefix(
ut_ad(first_rec == page_rec_get_next_const(page_get_infimum_rec(page)));
/* check that we are indeed on the desired level */
ut_a(btr_page_get_level(page, mtr) == n_diff_data->level);
ut_a(btr_page_get_level(page) == n_diff_data->level);
/* there should not be any pages on the left */
ut_a(!page_has_prev(page));

View file

@ -40,7 +40,6 @@ flst_add_to_empty(
{
ulint space;
fil_addr_t node_addr;
ulint len;
ut_ad(mtr && base && node);
ut_ad(base != node);
@ -50,8 +49,7 @@ flst_add_to_empty(
ut_ad(mtr_memo_contains_page_flagged(mtr, node,
MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_SX_FIX));
len = flst_get_len(base);
ut_a(len == 0);
ut_a(!flst_get_len(base));
buf_ptr_get_fsp_addr(node, &space, &node_addr);
@ -64,7 +62,7 @@ flst_add_to_empty(
flst_write_addr(node + FLST_NEXT, fil_addr_null, mtr);
/* Update len of base node */
mlog_write_ulint(base + FLST_LEN, len + 1, MLOG_4BYTES, mtr);
mlog_write_ulint(base + FLST_LEN, 1, MLOG_4BYTES, mtr);
}
/********************************************************************//**

View file

@ -310,7 +310,7 @@ rtr_update_mbr_field(
page_zip = buf_block_get_page_zip(block);
child = btr_node_ptr_get_child_page_no(rec, offsets);
level = btr_page_get_level(buf_block_get_frame(block), mtr);
level = btr_page_get_level(buf_block_get_frame(block));
if (new_rec) {
child_rec = new_rec;
@ -668,9 +668,8 @@ rtr_adjust_upper_level(
cursor.thr = sea_cur->thr;
/* Get the level of the split pages */
level = btr_page_get_level(buf_block_get_frame(block), mtr);
ut_ad(level
== btr_page_get_level(buf_block_get_frame(new_block), mtr));
level = btr_page_get_level(buf_block_get_frame(block));
ut_ad(level == btr_page_get_level(buf_block_get_frame(new_block)));
page = buf_block_get_frame(block);
page_no = block->page.id.page_no();
@ -1048,7 +1047,7 @@ func_start:
block = btr_cur_get_block(cursor);
page = buf_block_get_frame(block);
page_zip = buf_block_get_page_zip(block);
page_level = btr_page_get_level(page, mtr);
page_level = btr_page_get_level(page);
current_ssn = page_get_ssn_id(page);
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));

View file

@ -718,7 +718,7 @@ rtr_page_get_father_node_ptr(
ut_ad(dict_index_get_page(index) != page_no);
level = btr_page_get_level(btr_cur_get_page(cursor), mtr);
level = btr_page_get_level(btr_cur_get_page(cursor));
user_rec = btr_cur_get_rec(cursor);
ut_a(page_rec_is_user_rec(user_rec));
@ -1680,7 +1680,7 @@ rtr_cur_search_with_match(
page = buf_block_get_frame(block);
const ulint level = btr_page_get_level(page, mtr);
const ulint level = btr_page_get_level(page);
const bool is_leaf = !level;
if (mode == PAGE_CUR_RTREE_LOCATE) {

View file

@ -483,7 +483,7 @@ ibuf_size_update(
ibuf->free_list_len = flst_get_len(root + PAGE_HEADER
+ PAGE_BTR_IBUF_FREE_LIST);
ibuf->height = 1 + btr_page_get_level_low(root);
ibuf->height = 1 + btr_page_get_level(root);
/* the '1 +' is the ibuf header page */
ibuf->size = ibuf->seg_size - (1 + ibuf->free_list_len);

View file

@ -37,6 +37,12 @@ Created 6/2/1994 Heikki Tuuri
#include "btr0types.h"
#include "gis0type.h"
#define BTR_MAX_NODE_LEVEL 50 /*!< Maximum B-tree page level
(not really a hard limit).
Used in debug assertions
in btr_page_set_level and
btr_page_get_level */
/** Maximum record size which can be stored on a page, without using the
special big record storage structure */
#define BTR_PAGE_MAX_REC_SIZE (UNIV_PAGE_SIZE / 2 - 200)
@ -285,14 +291,22 @@ btr_page_get_index_id(
MY_ATTRIBUTE((warn_unused_result));
/********************************************************//**
Gets the node level field in an index page.
@param[in] page index page
@return level, leaf level == 0 */
UNIV_INLINE
ulint
btr_page_get_level_low(
/*===================*/
const page_t* page) /*!< in: index page */
MY_ATTRIBUTE((warn_unused_result));
#define btr_page_get_level(page, mtr) btr_page_get_level_low(page)
btr_page_get_level(const page_t* page)
{
ulint level;
ut_ad(page);
level = mach_read_from_2(page + PAGE_HEADER + PAGE_LEVEL);
ut_ad(level <= BTR_MAX_NODE_LEVEL);
return(level);
} MY_ATTRIBUTE((warn_unused_result))
/********************************************************//**
Gets the next index page number.
@return next page number */

View file

@ -29,12 +29,6 @@ Created 6/2/1994 Heikki Tuuri
#include "mtr0log.h"
#include "page0zip.h"
#define BTR_MAX_NODE_LEVEL 50 /*!< Maximum B-tree page level
(not really a hard limit).
Used in debug assertions
in btr_page_set_level and
btr_page_get_level_low */
/** Gets a buffer page and declares its latching order level.
@param[in] page_id page id
@param[in] mode latch mode
@ -143,26 +137,6 @@ btr_page_get_index_id(
return(mach_read_from_8(page + PAGE_HEADER + PAGE_INDEX_ID));
}
/********************************************************//**
Gets the node level field in an index page.
@return level, leaf level == 0 */
UNIV_INLINE
ulint
btr_page_get_level_low(
/*===================*/
const page_t* page) /*!< in: index page */
{
ulint level;
ut_ad(page);
level = mach_read_from_2(page + PAGE_HEADER + PAGE_LEVEL);
ut_ad(level <= BTR_MAX_NODE_LEVEL);
return(level);
}
/********************************************************//**
Sets the node level field in an index page. */
UNIV_INLINE

View file

@ -1738,7 +1738,7 @@ struct buf_block_t{
used in debugging */
ibool in_withdraw_list;
#endif /* UNIV_DEBUG */
unsigned lock_hash_val:32;/*!< hashed value of the page address
uint32_t lock_hash_val; /*!< hashed value of the page address
in the record lock hash table;
protected by buf_block_t::lock
(or buf_block_t::mutex, buf_pool->mutex
@ -2351,8 +2351,12 @@ Use these instead of accessing buf_pool->mutex directly. */
/** Get appropriate page_hash_lock. */
# define buf_page_hash_lock_get(buf_pool, page_id) \
hash_get_lock((buf_pool)->page_hash, (page_id).fold())
UNIV_INLINE
rw_lock_t*
buf_page_hash_lock_get(const buf_pool_t* buf_pool, const page_id_t& page_id)
{
return hash_get_lock(buf_pool->page_hash, page_id.fold());
}
/** If not appropriate page_hash_lock, relock until appropriate. */
# define buf_page_hash_lock_s_confirm(hash_lock, buf_pool, page_id)\

View file

@ -296,7 +296,7 @@ lock_rec_insert_check_and_lock(
dict_index_t* index, /*!< in: index */
que_thr_t* thr, /*!< in: query thread */
mtr_t* mtr, /*!< in/out: mini-transaction */
ibool* inherit)/*!< out: set to TRUE if the new
bool* inherit)/*!< out: set to true if the new
inserted record maybe should inherit
LOCK_GAP type locks from the successor
record */
@ -555,8 +555,8 @@ lock_rec_find_set_bit(
/*********************************************************************//**
Checks if a lock request lock1 has to wait for request lock2.
@return TRUE if lock1 has to wait for lock2 to be removed */
ibool
@return whether lock1 has to wait for lock2 to be removed */
bool
lock_has_to_wait(
/*=============*/
const lock_t* lock1, /*!< in: waiting lock */

View file

@ -335,13 +335,6 @@ enum que_thr_lock_t {
QUE_THR_LOCK_TABLE
};
/** From where the cursor position is counted */
enum que_cur_t {
QUE_CUR_NOT_DEFINED,
QUE_CUR_START,
QUE_CUR_END
};
/* Query graph query thread node: the fields are protected by the
trx_t::mutex with the exceptions named below */
@ -415,18 +408,7 @@ struct que_fork_t{
generated by the parser, or NULL
if the graph was created 'by hand' */
pars_info_t* info; /*!< info struct, or NULL */
/* The following cur_... fields are relevant only in a select graph */
ulint cur_end; /*!< QUE_CUR_NOT_DEFINED, QUE_CUR_START,
QUE_CUR_END */
ulint cur_pos; /*!< if there are n rows in the result
set, values 0 and n + 1 mean before
first row, or after last row, depending
on cur_end; values 1...n mean a row
index */
ibool cur_on_row; /*!< TRUE if cursor is on a row, i.e.,
it is not before the first row or
after the last row */
sel_node_t* last_sel_node; /*!< last executed select node, or NULL
if none */
UT_LIST_NODE_T(que_fork_t)

View file

@ -738,7 +738,7 @@ lock_rec_get_insert_intention(
Checks if a lock request for a new lock has to wait for request lock2.
@return TRUE if new lock has to wait for lock2 to be removed */
UNIV_INLINE
ibool
bool
lock_rec_has_to_wait(
/*=================*/
bool for_locking,
@ -761,160 +761,162 @@ lock_rec_has_to_wait(
ut_ad(trx && lock2);
ut_ad(lock_get_type_low(lock2) == LOCK_REC);
if (trx != lock2->trx
&& !lock_mode_compatible(static_cast<lock_mode>(
LOCK_MODE_MASK & type_mode),
lock_get_mode(lock2))) {
/* We have somewhat complex rules when gap type record locks
cause waits */
if ((lock_is_on_supremum || (type_mode & LOCK_GAP))
&& !(type_mode & LOCK_INSERT_INTENTION)) {
/* Gap type locks without LOCK_INSERT_INTENTION flag
do not need to wait for anything. This is because
different users can have conflicting lock types
on gaps. */
return(FALSE);
}
if (!(type_mode & LOCK_INSERT_INTENTION)
&& lock_rec_get_gap(lock2)) {
/* Record lock (LOCK_ORDINARY or LOCK_REC_NOT_GAP
does not need to wait for a gap type lock */
return(FALSE);
}
if ((type_mode & LOCK_GAP)
&& lock_rec_get_rec_not_gap(lock2)) {
/* Lock on gap does not need to wait for
a LOCK_REC_NOT_GAP type lock */
return(FALSE);
}
if (lock_rec_get_insert_intention(lock2)) {
/* No lock request needs to wait for an insert
intention lock to be removed. This is ok since our
rules allow conflicting locks on gaps. This eliminates
a spurious deadlock caused by a next-key lock waiting
for an insert intention lock; when the insert
intention lock was granted, the insert deadlocked on
the waiting next-key lock.
Also, insert intention locks do not disturb each
other. */
return(FALSE);
}
if ((type_mode & LOCK_GAP || lock_rec_get_gap(lock2)) &&
!thd_need_ordering_with(trx->mysql_thd,
lock2->trx->mysql_thd)) {
/* If the upper server layer has already decided on the
commit order between the transaction requesting the
lock and the transaction owning the lock, we do not
need to wait for gap locks. Such ordeering by the upper
server layer happens in parallel replication, where the
commit order is fixed to match the original order on the
master.
Such gap locks are mainly needed to get serialisability
between transactions so that they will be binlogged in
the correct order so that statement-based replication
will give the correct results. Since the right order
was already determined on the master, we do not need
to enforce it again here.
Skipping the locks is not essential for correctness,
since in case of deadlock we will just kill the later
transaction and retry it. But it can save some
unnecessary rollbacks and retries. */
return (FALSE);
}
#ifdef WITH_WSREP
/* if BF thread is locking and has conflict with another BF
thread, we need to look at trx ordering and lock types */
if (wsrep_thd_is_BF(trx->mysql_thd, FALSE) &&
wsrep_thd_is_BF(lock2->trx->mysql_thd, TRUE)) {
if (wsrep_debug) {
ib::info() <<
"BF-BF lock conflict, locking: " << for_locking;
lock_rec_print(stderr, lock2);
ib::info() << " SQL1: "
<< wsrep_thd_query(trx->mysql_thd);
ib::info() << " SQL2: "
<< wsrep_thd_query(lock2->trx->mysql_thd);
}
if (wsrep_trx_order_before(trx->mysql_thd,
lock2->trx->mysql_thd) &&
(type_mode & LOCK_MODE_MASK) == LOCK_X &&
(lock2->type_mode & LOCK_MODE_MASK) == LOCK_X) {
if (for_locking || wsrep_debug) {
/* exclusive lock conflicts are not
accepted */
ib::info() <<
"BF-BF X lock conflict,"
"mode: " << type_mode <<
" supremum: " << lock_is_on_supremum;
ib::info() <<
"conflicts states: my "
<< wsrep_thd_conflict_state(trx->mysql_thd, FALSE)
<< " locked "
<< wsrep_thd_conflict_state(lock2->trx->mysql_thd, FALSE);
lock_rec_print(stderr, lock2);
ib::info() << " SQL1: "
<< wsrep_thd_query(trx->mysql_thd);
ib::info() << " SQL2: "
<< wsrep_thd_query(lock2->trx->mysql_thd);
if (for_locking) {
return FALSE;
}
}
} else {
/* if lock2->index->n_uniq <=
lock2->index->n_user_defined_cols
operation is on uniq index
*/
if (wsrep_debug) {
ib::info() <<
"BF conflict, modes: "
<< type_mode << ":" << lock2->type_mode
<< " idx: " << lock2->index->name()
<< " table: " << lock2->index->table->name.m_name
<< " n_uniq: " << lock2->index->n_uniq
<< " n_user: " << lock2->index->n_user_defined_cols;
ib::info() << " SQL1: "
<< wsrep_thd_query(trx->mysql_thd);
ib::info() << " SQL2: "
<< wsrep_thd_query(lock2->trx->mysql_thd);
}
return FALSE;
}
}
#endif /* WITH_WSREP */
return(TRUE);
if (trx == lock2->trx
|| lock_mode_compatible(
static_cast<lock_mode>(LOCK_MODE_MASK & type_mode),
lock_get_mode(lock2))) {
return false;
}
return(FALSE);
/* We have somewhat complex rules when gap type record locks
cause waits */
if ((lock_is_on_supremum || (type_mode & LOCK_GAP))
&& !(type_mode & LOCK_INSERT_INTENTION)) {
/* Gap type locks without LOCK_INSERT_INTENTION flag
do not need to wait for anything. This is because
different users can have conflicting lock types
on gaps. */
return false;
}
if (!(type_mode & LOCK_INSERT_INTENTION) && lock_rec_get_gap(lock2)) {
/* Record lock (LOCK_ORDINARY or LOCK_REC_NOT_GAP
does not need to wait for a gap type lock */
return false;
}
if ((type_mode & LOCK_GAP) && lock_rec_get_rec_not_gap(lock2)) {
/* Lock on gap does not need to wait for
a LOCK_REC_NOT_GAP type lock */
return false;
}
if (lock_rec_get_insert_intention(lock2)) {
/* No lock request needs to wait for an insert
intention lock to be removed. This is ok since our
rules allow conflicting locks on gaps. This eliminates
a spurious deadlock caused by a next-key lock waiting
for an insert intention lock; when the insert
intention lock was granted, the insert deadlocked on
the waiting next-key lock.
Also, insert intention locks do not disturb each
other. */
return false;
}
if ((type_mode & LOCK_GAP || lock_rec_get_gap(lock2))
&& !thd_need_ordering_with(trx->mysql_thd, lock2->trx->mysql_thd)) {
/* If the upper server layer has already decided on the
commit order between the transaction requesting the
lock and the transaction owning the lock, we do not
need to wait for gap locks. Such ordeering by the upper
server layer happens in parallel replication, where the
commit order is fixed to match the original order on the
master.
Such gap locks are mainly needed to get serialisability
between transactions so that they will be binlogged in
the correct order so that statement-based replication
will give the correct results. Since the right order
was already determined on the master, we do not need
to enforce it again here.
Skipping the locks is not essential for correctness,
since in case of deadlock we will just kill the later
transaction and retry it. But it can save some
unnecessary rollbacks and retries. */
return false;
}
#ifdef WITH_WSREP
/* if BF thread is locking and has conflict with another BF
thread, we need to look at trx ordering and lock types */
if (wsrep_thd_is_BF(trx->mysql_thd, FALSE)
&& wsrep_thd_is_BF(lock2->trx->mysql_thd, TRUE)) {
if (wsrep_debug) {
ib::info() << "BF-BF lock conflict, locking: "
<< for_locking;
lock_rec_print(stderr, lock2);
ib::info()
<< " SQL1: " << wsrep_thd_query(trx->mysql_thd)
<< " SQL2: "
<< wsrep_thd_query(lock2->trx->mysql_thd);
}
if (wsrep_trx_order_before(trx->mysql_thd,
lock2->trx->mysql_thd)
&& (type_mode & LOCK_MODE_MASK) == LOCK_X
&& (lock2->type_mode & LOCK_MODE_MASK) == LOCK_X) {
if (for_locking || wsrep_debug) {
/* exclusive lock conflicts are not
accepted */
ib::info()
<< "BF-BF X lock conflict,mode: "
<< type_mode
<< " supremum: " << lock_is_on_supremum
<< "conflicts states: my "
<< wsrep_thd_conflict_state(
trx->mysql_thd, FALSE)
<< " locked "
<< wsrep_thd_conflict_state(
lock2->trx->mysql_thd,
FALSE);
lock_rec_print(stderr, lock2);
ib::info() << " SQL1: "
<< wsrep_thd_query(trx->mysql_thd)
<< " SQL2: "
<< wsrep_thd_query(
lock2->trx->mysql_thd);
if (for_locking) {
return false;
}
}
} else {
/* if lock2->index->n_uniq <=
lock2->index->n_user_defined_cols
operation is on uniq index
*/
if (wsrep_debug) {
ib::info()
<< "BF conflict, modes: " << type_mode
<< ":" << lock2->type_mode
<< " idx: " << lock2->index->name()
<< " table: "
<< lock2->index->table->name.m_name
<< " n_uniq: " << lock2->index->n_uniq
<< " n_user: "
<< lock2->index->n_user_defined_cols
<< " SQL1: "
<< wsrep_thd_query(trx->mysql_thd)
<< " SQL2: "
<< wsrep_thd_query(
lock2->trx->mysql_thd);
}
return false;
}
}
#endif /* WITH_WSREP */
return true;
}
/*********************************************************************//**
Checks if a lock request lock1 has to wait for request lock2.
@return TRUE if lock1 has to wait for lock2 to be removed */
ibool
bool
lock_has_to_wait(
/*=============*/
const lock_t* lock1, /*!< in: waiting lock */
@ -925,32 +927,27 @@ lock_has_to_wait(
{
ut_ad(lock1 && lock2);
if (lock1->trx != lock2->trx
&& !lock_mode_compatible(lock_get_mode(lock1),
lock_get_mode(lock2))) {
if (lock_get_type_low(lock1) == LOCK_REC) {
ut_ad(lock_get_type_low(lock2) == LOCK_REC);
/* If this lock request is for a supremum record
then the second bit on the lock bitmap is set */
if (lock1->type_mode
& (LOCK_PREDICATE | LOCK_PRDT_PAGE)) {
return(lock_prdt_has_to_wait(
lock1->trx, lock1->type_mode,
lock_get_prdt_from_lock(lock1),
lock2));
} else {
return(lock_rec_has_to_wait(false,
lock1->trx, lock1->type_mode, lock2,
lock_rec_get_nth_bit(lock1, true)));
}
}
return(TRUE);
if (lock1->trx == lock2->trx
|| lock_mode_compatible(lock_get_mode(lock1),
lock_get_mode(lock2))) {
return false;
}
return(FALSE);
if (lock_get_type_low(lock1) != LOCK_REC) {
return true;
}
ut_ad(lock_get_type_low(lock2) == LOCK_REC);
if (lock1->type_mode & (LOCK_PREDICATE | LOCK_PRDT_PAGE)) {
return lock_prdt_has_to_wait(lock1->trx, lock1->type_mode,
lock_get_prdt_from_lock(lock1),
lock2);
}
return lock_rec_has_to_wait(
false, lock1->trx, lock1->type_mode, lock2,
lock_rec_get_nth_bit(lock1, PAGE_HEAP_NO_SUPREMUM));
}
/*============== RECORD LOCK BASIC FUNCTIONS ============================*/
@ -5984,7 +5981,7 @@ lock_rec_insert_check_and_lock(
dict_index_t* index, /*!< in: index */
que_thr_t* thr, /*!< in: query thread */
mtr_t* mtr, /*!< in/out: mini-transaction */
ibool* inherit)/*!< out: set to TRUE if the new
bool* inherit)/*!< out: set to true if the new
inserted record maybe should inherit
LOCK_GAP type locks from the successor
record */
@ -6005,7 +6002,7 @@ lock_rec_insert_check_and_lock(
dberr_t err;
lock_t* lock;
ibool inherit_in = *inherit;
bool inherit_in = *inherit;
trx_t* trx = thr_get_trx(thr);
const rec_t* next_rec = page_rec_get_next_const(rec);
ulint heap_no = page_rec_get_heap_no(next_rec);
@ -6035,7 +6032,7 @@ lock_rec_insert_check_and_lock(
trx->id, mtr);
}
*inherit = FALSE;
*inherit = false;
return(DB_SUCCESS);
}
@ -6046,7 +6043,7 @@ lock_rec_insert_check_and_lock(
return(DB_SUCCESS);
}
*inherit = TRUE;
*inherit = true;
/* If another transaction has an explicit lock request which locks
the gap, waiting or granted, on the successor, the insert has to wait.