mirror of
https://github.com/MariaDB/server.git
synced 2025-01-22 06:44:16 +01:00
branches/zip: When adding a page to the buffer pool, add it to
buf_pool->page_hash and buf_pool->LRU before releasing buf_pool->mutex. buf_page_init_for_read(), buf_page_create(): Allocate the compressed page after the block has been added to the buffer pool. Document the reason for this.
This commit is contained in:
parent
34c50ac343
commit
134aff29d0
1 changed files with 30 additions and 14 deletions
|
@ -2231,15 +2231,6 @@ err_exit:
|
|||
|
||||
ut_ad(block);
|
||||
|
||||
if (zip_size) {
|
||||
void* data;
|
||||
page_zip_set_size(&block->page.zip, zip_size);
|
||||
mutex_exit(&block->mutex);
|
||||
data = buf_buddy_alloc(zip_size, TRUE);
|
||||
mutex_enter(&block->mutex);
|
||||
block->page.zip.data = data;
|
||||
}
|
||||
|
||||
buf_page_init(space, offset, block);
|
||||
|
||||
/* The block must be put to the LRU list, to the old blocks */
|
||||
|
@ -2250,6 +2241,21 @@ err_exit:
|
|||
|
||||
buf_pool->n_pend_reads++;
|
||||
|
||||
if (zip_size) {
|
||||
void* data;
|
||||
page_zip_set_size(&block->page.zip, zip_size);
|
||||
mutex_exit(&block->mutex);
|
||||
/* buf_pool->mutex may be released and reacquired by
|
||||
buf_buddy_alloc(). Thus, we must release block->mutex
|
||||
in order not to break the latching order in
|
||||
the reacquisition of buf_pool->mutex. We also must
|
||||
defer this operation until after the block descriptor
|
||||
has been added to buf_pool->LRU and buf_pool->page_hash. */
|
||||
data = buf_buddy_alloc(zip_size, TRUE);
|
||||
mutex_enter(&block->mutex);
|
||||
block->page.zip.data = data;
|
||||
}
|
||||
|
||||
/* We set a pass-type x-lock on the frame because then the same
|
||||
thread which called for the read operation (and is running now at
|
||||
this point of code) can wait for the read to complete by waiting
|
||||
|
@ -2326,11 +2332,6 @@ buf_page_create(
|
|||
|
||||
block = free_block;
|
||||
|
||||
if (zip_size) {
|
||||
page_zip_set_size(&block->page.zip, zip_size);
|
||||
block->page.zip.data = buf_buddy_alloc(zip_size, TRUE);
|
||||
}
|
||||
|
||||
mutex_enter(&block->mutex);
|
||||
|
||||
buf_page_init(space, offset, block);
|
||||
|
@ -2341,6 +2342,21 @@ buf_page_create(
|
|||
buf_block_buf_fix_inc(block, __FILE__, __LINE__);
|
||||
buf_pool->n_pages_created++;
|
||||
|
||||
if (zip_size) {
|
||||
void* data;
|
||||
page_zip_set_size(&block->page.zip, zip_size);
|
||||
mutex_exit(&block->mutex);
|
||||
/* buf_pool->mutex may be released and reacquired by
|
||||
buf_buddy_alloc(). Thus, we must release block->mutex
|
||||
in order not to break the latching order in
|
||||
the reacquisition of buf_pool->mutex. We also must
|
||||
defer this operation until after the block descriptor
|
||||
has been added to buf_pool->LRU and buf_pool->page_hash. */
|
||||
data = buf_buddy_alloc(zip_size, TRUE);
|
||||
mutex_enter(&block->mutex);
|
||||
block->page.zip.data = data;
|
||||
}
|
||||
|
||||
mutex_exit(&(buf_pool->mutex));
|
||||
|
||||
mtr_memo_push(mtr, block, MTR_MEMO_BUF_FIX);
|
||||
|
|
Loading…
Add table
Reference in a new issue