mariadb/buf/buf0flu.c

1549 lines
41 KiB
C
Raw Normal View History

2005-10-27 09:29:40 +02:00
/******************************************************
The database buffer buf_pool flush algorithm
(c) 1995-2001 Innobase Oy
Created 11/11/1995 Heikki Tuuri
*******************************************************/
#include "buf0flu.h"
#ifdef UNIV_NONINL
#include "buf0flu.ic"
#include "trx0sys.h"
#endif
#include "ut0byte.h"
#include "ut0lst.h"
#include "page0page.h"
#include "page0zip.h"
2005-10-27 09:29:40 +02:00
#include "fil0fil.h"
#include "buf0buf.h"
#include "buf0lru.h"
#include "buf0rea.h"
#include "ibuf0ibuf.h"
#include "log0log.h"
#include "os0file.h"
#include "trx0sys.h"
#include "srv0srv.h"
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
2005-10-27 09:29:40 +02:00
/**********************************************************************
Validates the flush list. */
static
ibool
buf_flush_validate_low(void);
/*========================*/
/* out: TRUE if ok */
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
2005-10-27 09:29:40 +02:00
/**********************************************************************
Insert a block in the flush_rbt and returns a pointer to its
predecessor or NULL if no predecessor. The ordering is maintained
on the basis of the <oldest_modification, space, offset> key. */
static
buf_page_t*
buf_flush_insert_in_flush_rbt(
/*==========================*/
/* out: pointer to the predecessor or
NULL if no predecessor. */
buf_page_t* bpage) /* in: bpage to be inserted. */
{
buf_page_t* prev = NULL;
const ib_rbt_node_t* c_node;
const ib_rbt_node_t* p_node;
ut_ad(buf_pool_mutex_own());
/* Insert this buffer into the rbt. */
c_node = rbt_insert(buf_pool->flush_rbt, &bpage, &bpage);
ut_a(c_node != NULL);
/* Get the predecessor. */
p_node = rbt_prev(buf_pool->flush_rbt, c_node);
if (p_node != NULL) {
prev = *rbt_value(buf_page_t*, p_node);
ut_a(prev != NULL);
}
return(prev);
}
/*************************************************************
Delete a bpage from the flush_rbt. */
static
void
buf_flush_delete_from_flush_rbt(
/*============================*/
buf_page_t* bpage) /* in: bpage to be removed. */
{
ibool ret = FALSE;
ut_ad(buf_pool_mutex_own());
ret = rbt_delete(buf_pool->flush_rbt, &bpage);
ut_ad(ret);
}
/*********************************************************************
Compare two modified blocks in the buffer pool. The key for comparison
is:
key = <oldest_modification, space, offset>
This comparison is used to maintian ordering of blocks in the
buf_pool->flush_rbt.
Note that for the purpose of flush_rbt, we only need to order blocks
on the oldest_modification. The other two fields are used to uniquely
identify the blocks. */
static
int
buf_flush_block_cmp(
/*================*/
/* out:
< 0 if b2 < b1,
0 if b2 == b1,
> 0 if b2 > b1 */
const void* p1, /* in: block1 */
const void* p2) /* in: block2 */
{
int ret;
ut_ad(p1 != NULL);
ut_ad(p2 != NULL);
const buf_page_t* b1 = *(const buf_page_t**) p1;
const buf_page_t* b2 = *(const buf_page_t**) p2;
ut_ad(b1 != NULL);
ut_ad(b2 != NULL);
ut_ad(b1->in_flush_list);
ut_ad(b2->in_flush_list);
if (b2->oldest_modification
> b1->oldest_modification) {
return(1);
}
if (b2->oldest_modification
< b1->oldest_modification) {
return(-1);
}
/* If oldest_modification is same then decide on the space. */
ret = (int)(b2->space - b1->space);
/* Or else decide ordering on the offset field. */
return(ret ? ret : (int)(b2->offset - b1->offset));
}
/************************************************************************
Initialize the red-black tree to speed up insertions into the flush_list
during recovery process. Should be called at the start of recovery
process before any page has been read/written. */
UNIV_INTERN
void
buf_flush_init_flush_rbt(void)
/*==========================*/
{
buf_pool_mutex_enter();
/* Create red black tree for speedy insertions in flush list. */
buf_pool->flush_rbt = rbt_create(sizeof(buf_page_t*),
buf_flush_block_cmp);
buf_pool_mutex_exit();
}
/************************************************************************
Frees up the red-black tree. */
UNIV_INTERN
void
buf_flush_free_flush_rbt(void)
/*==========================*/
{
buf_pool_mutex_enter();
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
ut_a(buf_flush_validate_low());
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
rbt_free(buf_pool->flush_rbt);
buf_pool->flush_rbt = NULL;
buf_pool_mutex_exit();
}
2005-10-27 09:29:40 +02:00
/************************************************************************
Inserts a modified block into the flush list. */
UNIV_INTERN
2005-10-27 09:29:40 +02:00
void
buf_flush_insert_into_flush_list(
/*=============================*/
buf_page_t* bpage) /* in: block which is modified */
2005-10-27 09:29:40 +02:00
{
ut_ad(buf_pool_mutex_own());
2005-10-27 09:29:40 +02:00
ut_ad((UT_LIST_GET_FIRST(buf_pool->flush_list) == NULL)
|| (UT_LIST_GET_FIRST(buf_pool->flush_list)->oldest_modification
<= bpage->oldest_modification));
2005-10-27 09:29:40 +02:00
/* If we are in the recovery then we need to update the flush
red-black tree as well. */
if (UNIV_LIKELY_NULL(buf_pool->flush_rbt)) {
buf_flush_insert_sorted_into_flush_list(bpage);
return;
}
switch (buf_page_get_state(bpage)) {
case BUF_BLOCK_ZIP_PAGE:
mutex_enter(&buf_pool_zip_mutex);
buf_page_set_state(bpage, BUF_BLOCK_ZIP_DIRTY);
mutex_exit(&buf_pool_zip_mutex);
UT_LIST_REMOVE(list, buf_pool->zip_clean, bpage);
/* fall through */
case BUF_BLOCK_ZIP_DIRTY:
case BUF_BLOCK_FILE_PAGE:
ut_ad(bpage->in_LRU_list);
ut_ad(bpage->in_page_hash);
ut_ad(!bpage->in_zip_hash);
ut_ad(!bpage->in_flush_list);
ut_d(bpage->in_flush_list = TRUE);
UT_LIST_ADD_FIRST(list, buf_pool->flush_list, bpage);
break;
case BUF_BLOCK_ZIP_FREE:
case BUF_BLOCK_NOT_USED:
case BUF_BLOCK_READY_FOR_USE:
case BUF_BLOCK_MEMORY:
case BUF_BLOCK_REMOVE_HASH:
ut_error;
return;
}
2005-10-27 09:29:40 +02:00
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
ut_a(buf_flush_validate_low());
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
2005-10-27 09:29:40 +02:00
}
/************************************************************************
Inserts a modified block into the flush list in the right sorted position.
This function is used by recovery, because there the modifications do not
necessarily come in the order of lsn's. */
UNIV_INTERN
2005-10-27 09:29:40 +02:00
void
buf_flush_insert_sorted_into_flush_list(
/*====================================*/
buf_page_t* bpage) /* in: block which is modified */
2005-10-27 09:29:40 +02:00
{
buf_page_t* prev_b;
buf_page_t* b;
ut_ad(buf_pool_mutex_own());
2005-10-27 09:29:40 +02:00
switch (buf_page_get_state(bpage)) {
case BUF_BLOCK_ZIP_PAGE:
mutex_enter(&buf_pool_zip_mutex);
buf_page_set_state(bpage, BUF_BLOCK_ZIP_DIRTY);
mutex_exit(&buf_pool_zip_mutex);
UT_LIST_REMOVE(list, buf_pool->zip_clean, bpage);
/* fall through */
case BUF_BLOCK_ZIP_DIRTY:
case BUF_BLOCK_FILE_PAGE:
ut_ad(bpage->in_LRU_list);
ut_ad(bpage->in_page_hash);
ut_ad(!bpage->in_zip_hash);
ut_ad(!bpage->in_flush_list);
ut_d(bpage->in_flush_list = TRUE);
break;
case BUF_BLOCK_ZIP_FREE:
case BUF_BLOCK_NOT_USED:
case BUF_BLOCK_READY_FOR_USE:
case BUF_BLOCK_MEMORY:
case BUF_BLOCK_REMOVE_HASH:
ut_error;
return;
}
2005-10-27 09:29:40 +02:00
prev_b = NULL;
/* For the most part when this function is called the flush_rbt
should not be NULL. In a very rare boundary case it is possible
that the flush_rbt has already been freed by the recovery thread
before the last page was hooked up in the flush_list by the
io-handler thread. In that case we'll just do a simple
linear search in the else block. */
if (buf_pool->flush_rbt) {
prev_b = buf_flush_insert_in_flush_rbt(bpage);
} else {
b = UT_LIST_GET_FIRST(buf_pool->flush_list);
while (b && b->oldest_modification
> bpage->oldest_modification) {
ut_ad(b->in_flush_list);
prev_b = b;
b = UT_LIST_GET_NEXT(list, b);
}
2005-10-27 09:29:40 +02:00
}
if (prev_b == NULL) {
UT_LIST_ADD_FIRST(list, buf_pool->flush_list, bpage);
2005-10-27 09:29:40 +02:00
} else {
UT_LIST_INSERT_AFTER(list, buf_pool->flush_list,
prev_b, bpage);
2005-10-27 09:29:40 +02:00
}
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
ut_a(buf_flush_validate_low());
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
2005-10-27 09:29:40 +02:00
}
/************************************************************************
Returns TRUE if the file page block is immediately suitable for replacement,
i.e., the transition FILE_PAGE => NOT_USED allowed. */
UNIV_INTERN
2005-10-27 09:29:40 +02:00
ibool
buf_flush_ready_for_replace(
/*========================*/
/* out: TRUE if can replace immediately */
buf_page_t* bpage) /* in: buffer control block, must be
buf_page_in_file(bpage) and in the LRU list */
2005-10-27 09:29:40 +02:00
{
ut_ad(buf_pool_mutex_own());
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
ut_ad(bpage->in_LRU_list);
if (UNIV_LIKELY(buf_page_in_file(bpage))) {
return(bpage->oldest_modification == 0
&& buf_page_get_io_fix(bpage) == BUF_IO_NONE
&& bpage->buf_fix_count == 0);
2005-10-27 09:29:40 +02:00
}
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: Error: buffer block state %lu"
" in the LRU list!\n",
(ulong) buf_page_get_state(bpage));
ut_print_buf(stderr, bpage, sizeof(buf_page_t));
putc('\n', stderr);
return(FALSE);
2005-10-27 09:29:40 +02:00
}
/************************************************************************
Returns TRUE if the block is modified and ready for flushing. */
UNIV_INLINE
ibool
buf_flush_ready_for_flush(
/*======================*/
/* out: TRUE if can flush immediately */
buf_page_t* bpage, /* in: buffer control block, must be
buf_page_in_file(bpage) */
enum buf_flush flush_type)/* in: BUF_FLUSH_LRU or BUF_FLUSH_LIST */
2005-10-27 09:29:40 +02:00
{
ut_a(buf_page_in_file(bpage));
ut_ad(buf_pool_mutex_own());
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
2005-10-27 09:29:40 +02:00
if (bpage->oldest_modification != 0
&& buf_page_get_io_fix(bpage) == BUF_IO_NONE) {
ut_ad(bpage->in_flush_list);
if (flush_type != BUF_FLUSH_LRU) {
2005-10-27 09:29:40 +02:00
return(TRUE);
} else if (bpage->buf_fix_count == 0) {
2005-10-27 09:29:40 +02:00
/* If we are flushing the LRU list, to avoid deadlocks
we require the block not to be bufferfixed, and hence
not latched. */
return(TRUE);
}
}
2005-10-27 09:29:40 +02:00
return(FALSE);
}
/************************************************************************
Remove a block from the flush list of modified blocks. */
UNIV_INTERN
void
buf_flush_remove(
/*=============*/
buf_page_t* bpage) /* in: pointer to the block in question */
{
ut_ad(buf_pool_mutex_own());
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
ut_ad(bpage->in_flush_list);
switch (buf_page_get_state(bpage)) {
case BUF_BLOCK_ZIP_PAGE:
/* clean compressed pages should not be on the flush list */
case BUF_BLOCK_ZIP_FREE:
case BUF_BLOCK_NOT_USED:
case BUF_BLOCK_READY_FOR_USE:
case BUF_BLOCK_MEMORY:
case BUF_BLOCK_REMOVE_HASH:
ut_error;
return;
case BUF_BLOCK_ZIP_DIRTY:
buf_page_set_state(bpage, BUF_BLOCK_ZIP_PAGE);
UT_LIST_REMOVE(list, buf_pool->flush_list, bpage);
buf_LRU_insert_zip_clean(bpage);
break;
case BUF_BLOCK_FILE_PAGE:
UT_LIST_REMOVE(list, buf_pool->flush_list, bpage);
break;
}
/* If the flush_rbt is active then delete from it as well. */
if (UNIV_LIKELY_NULL(buf_pool->flush_rbt)) {
buf_flush_delete_from_flush_rbt(bpage);
}
/* Must be done after we have removed it from the flush_rbt
because we assert on in_flush_list in comparison function. */
ut_d(bpage->in_flush_list = FALSE);
bpage->oldest_modification = 0;
ut_d(UT_LIST_VALIDATE(list, buf_page_t, buf_pool->flush_list));
}
/***********************************************************************
Relocates a buffer control block on the flush_list.
Note that it is assumed that the contents of bpage has already been
copied to dpage. */
UNIV_INTERN
void
buf_flush_relocate_on_flush_list(
/*=============================*/
buf_page_t* bpage, /* in/out: control block being moved */
buf_page_t* dpage) /* in/out: destination block */
{
buf_page_t* prev;
buf_page_t* prev_b = NULL;
ut_ad(buf_pool_mutex_own());
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
ut_ad(bpage->in_flush_list);
ut_ad(dpage->in_flush_list);
/* If recovery is active we must swap the control blocks in
the flush_rbt as well. */
if (UNIV_LIKELY_NULL(buf_pool->flush_rbt)) {
buf_flush_delete_from_flush_rbt(bpage);
prev_b = buf_flush_insert_in_flush_rbt(dpage);
}
/* Must be done after we have removed it from the flush_rbt
because we assert on in_flush_list in comparison function. */
ut_d(bpage->in_flush_list = FALSE);
prev = UT_LIST_GET_PREV(list, bpage);
UT_LIST_REMOVE(list, buf_pool->flush_list, bpage);
if (prev) {
ut_ad(prev->in_flush_list);
UT_LIST_INSERT_AFTER(
list,
buf_pool->flush_list,
prev, dpage);
} else {
UT_LIST_ADD_FIRST(
list,
buf_pool->flush_list,
dpage);
}
/* Just an extra check. Previous in flush_list
should be the same control block as in flush_rbt. */
ut_a(!buf_pool->flush_rbt || prev_b == prev);
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
ut_a(buf_flush_validate_low());
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
}
2005-10-27 09:29:40 +02:00
/************************************************************************
Updates the flush system data structures when a write is completed. */
UNIV_INTERN
2005-10-27 09:29:40 +02:00
void
buf_flush_write_complete(
/*=====================*/
buf_page_t* bpage) /* in: pointer to the block in question */
2005-10-27 09:29:40 +02:00
{
enum buf_flush flush_type;
ut_ad(bpage);
2005-10-27 09:29:40 +02:00
buf_flush_remove(bpage);
2005-10-27 09:29:40 +02:00
flush_type = buf_page_get_flush_type(bpage);
buf_pool->n_flush[flush_type]--;
2005-10-27 09:29:40 +02:00
if (flush_type == BUF_FLUSH_LRU) {
2005-10-27 09:29:40 +02:00
/* Put the block to the end of the LRU list to wait to be
moved to the free list */
buf_LRU_make_block_old(bpage);
2005-10-27 09:29:40 +02:00
buf_pool->LRU_flush_ended++;
}
/* fprintf(stderr, "n pending flush %lu\n",
buf_pool->n_flush[flush_type]); */
2005-10-27 09:29:40 +02:00
if ((buf_pool->n_flush[flush_type] == 0)
&& (buf_pool->init_flush[flush_type] == FALSE)) {
2005-10-27 09:29:40 +02:00
/* The running flush batch has ended */
os_event_set(buf_pool->no_flush[flush_type]);
2005-10-27 09:29:40 +02:00
}
}
/************************************************************************
Flushes possible buffered writes from the doublewrite memory buffer to disk,
and also wakes up the aio thread if simulated aio is used. It is very
important to call this function after a batch of writes has been posted,
and also when we may have to wait for a page latch! Otherwise a deadlock
of threads can occur. */
static
void
buf_flush_buffered_writes(void)
/*===========================*/
{
byte* write_buf;
ulint len;
ulint len2;
ulint i;
if (!srv_use_doublewrite_buf || trx_doublewrite == NULL) {
os_aio_simulated_wake_handler_threads();
return;
}
2005-10-27 09:29:40 +02:00
mutex_enter(&(trx_doublewrite->mutex));
/* Write first to doublewrite buffer blocks. We use synchronous
aio and thus know that file write has been completed when the
control returns. */
if (trx_doublewrite->first_free == 0) {
mutex_exit(&(trx_doublewrite->mutex));
return;
}
for (i = 0; i < trx_doublewrite->first_free; i++) {
const buf_block_t* block;
2005-10-27 09:29:40 +02:00
block = (buf_block_t*) trx_doublewrite->buf_block_arr[i];
if (buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE
|| block->page.zip.data) {
/* No simple validate for compressed pages exists. */
continue;
}
if (UNIV_UNLIKELY
(memcmp(block->frame + (FIL_PAGE_LSN + 4),
block->frame + (UNIV_PAGE_SIZE
- FIL_PAGE_END_LSN_OLD_CHKSUM + 4),
4))) {
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: ERROR: The page to be written"
" seems corrupt!\n"
"InnoDB: The lsn fields do not match!"
" Noticed in the buffer pool\n"
"InnoDB: before posting to the"
" doublewrite buffer.\n");
}
2005-10-27 09:29:40 +02:00
if (!block->check_index_page_at_flush) {
} else if (page_is_comp(block->frame)) {
if (UNIV_UNLIKELY
(!page_simple_validate_new(block->frame))) {
corrupted_page:
buf_page_print(block->frame, 0);
2005-10-27 09:29:40 +02:00
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: Apparent corruption of an"
" index page n:o %lu in space %lu\n"
"InnoDB: to be written to data file."
" We intentionally crash server\n"
"InnoDB: to prevent corrupt data"
" from ending up in data\n"
"InnoDB: files.\n",
(ulong) buf_block_get_page_no(block),
(ulong) buf_block_get_space(block));
2005-10-27 09:29:40 +02:00
ut_error;
}
} else if (UNIV_UNLIKELY
(!page_simple_validate_old(block->frame))) {
goto corrupted_page;
2005-10-27 09:29:40 +02:00
}
}
/* increment the doublewrite flushed pages counter */
srv_dblwr_pages_written+= trx_doublewrite->first_free;
srv_dblwr_writes++;
len = ut_min(TRX_SYS_DOUBLEWRITE_BLOCK_SIZE,
trx_doublewrite->first_free) * UNIV_PAGE_SIZE;
2005-10-27 09:29:40 +02:00
write_buf = trx_doublewrite->write_buf;
i = 0;
2005-10-27 09:29:40 +02:00
fil_io(OS_FILE_WRITE, TRUE, TRX_SYS_SPACE, 0,
trx_doublewrite->block1, 0, len,
(void*) write_buf, NULL);
for (len2 = 0; len2 + UNIV_PAGE_SIZE <= len;
len2 += UNIV_PAGE_SIZE, i++) {
const buf_block_t* block = (buf_block_t*)
trx_doublewrite->buf_block_arr[i];
if (UNIV_LIKELY(!block->page.zip.data)
&& UNIV_LIKELY(buf_block_get_state(block)
== BUF_BLOCK_FILE_PAGE)
&& UNIV_UNLIKELY
(memcmp(write_buf + len2 + (FIL_PAGE_LSN + 4),
write_buf + len2
+ (UNIV_PAGE_SIZE
- FIL_PAGE_END_LSN_OLD_CHKSUM + 4), 4))) {
2005-10-27 09:29:40 +02:00
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: ERROR: The page to be written"
" seems corrupt!\n"
"InnoDB: The lsn fields do not match!"
" Noticed in the doublewrite block1.\n");
2005-10-27 09:29:40 +02:00
}
}
if (trx_doublewrite->first_free <= TRX_SYS_DOUBLEWRITE_BLOCK_SIZE) {
goto flush;
}
len = (trx_doublewrite->first_free - TRX_SYS_DOUBLEWRITE_BLOCK_SIZE)
* UNIV_PAGE_SIZE;
2005-10-27 09:29:40 +02:00
write_buf = trx_doublewrite->write_buf
+ TRX_SYS_DOUBLEWRITE_BLOCK_SIZE * UNIV_PAGE_SIZE;
ut_ad(i == TRX_SYS_DOUBLEWRITE_BLOCK_SIZE);
fil_io(OS_FILE_WRITE, TRUE, TRX_SYS_SPACE, 0,
trx_doublewrite->block2, 0, len,
(void*) write_buf, NULL);
for (len2 = 0; len2 + UNIV_PAGE_SIZE <= len;
len2 += UNIV_PAGE_SIZE, i++) {
const buf_block_t* block = (buf_block_t*)
trx_doublewrite->buf_block_arr[i];
if (UNIV_LIKELY(!block->page.zip.data)
&& UNIV_LIKELY(buf_block_get_state(block)
== BUF_BLOCK_FILE_PAGE)
&& UNIV_UNLIKELY
(memcmp(write_buf + len2 + (FIL_PAGE_LSN + 4),
write_buf + len2
+ (UNIV_PAGE_SIZE
- FIL_PAGE_END_LSN_OLD_CHKSUM + 4), 4))) {
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: ERROR: The page to be"
" written seems corrupt!\n"
"InnoDB: The lsn fields do not match!"
" Noticed in"
" the doublewrite block2.\n");
2005-10-27 09:29:40 +02:00
}
}
flush:
2005-10-27 09:29:40 +02:00
/* Now flush the doublewrite buffer data to disk */
fil_flush(TRX_SYS_SPACE);
/* We know that the writes have been flushed to disk now
and in recovery we will find them in the doublewrite buffer
blocks. Next do the writes to the intended positions. */
for (i = 0; i < trx_doublewrite->first_free; i++) {
const buf_block_t* block = (buf_block_t*)
trx_doublewrite->buf_block_arr[i];
ut_a(buf_page_in_file(&block->page));
if (UNIV_LIKELY_NULL(block->page.zip.data)) {
fil_io(OS_FILE_WRITE | OS_AIO_SIMULATED_WAKE_LATER,
FALSE, buf_page_get_space(&block->page),
buf_page_get_zip_size(&block->page),
buf_page_get_page_no(&block->page), 0,
buf_page_get_zip_size(&block->page),
(void*)block->page.zip.data,
(void*)block);
/* Increment the counter of I/O operations used
for selecting LRU policy. */
buf_LRU_stat_inc_io();
continue;
}
ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
if (UNIV_UNLIKELY(memcmp(block->frame + (FIL_PAGE_LSN + 4),
block->frame
+ (UNIV_PAGE_SIZE
- FIL_PAGE_END_LSN_OLD_CHKSUM + 4),
4))) {
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: ERROR: The page to be written"
" seems corrupt!\n"
"InnoDB: The lsn fields do not match!"
" Noticed in the buffer pool\n"
"InnoDB: after posting and flushing"
" the doublewrite buffer.\n"
"InnoDB: Page buf fix count %lu,"
" io fix %lu, state %lu\n",
(ulong)block->page.buf_fix_count,
(ulong)buf_block_get_io_fix(block),
(ulong)buf_block_get_state(block));
}
2005-10-27 09:29:40 +02:00
fil_io(OS_FILE_WRITE | OS_AIO_SIMULATED_WAKE_LATER,
FALSE, buf_block_get_space(block), 0,
buf_block_get_page_no(block), 0, UNIV_PAGE_SIZE,
(void*)block->frame, (void*)block);
/* Increment the counter of I/O operations used
for selecting LRU policy. */
buf_LRU_stat_inc_io();
2005-10-27 09:29:40 +02:00
}
2005-10-27 09:29:40 +02:00
/* Wake possible simulated aio thread to actually post the
writes to the operating system */
os_aio_simulated_wake_handler_threads();
/* Wait that all async writes to tablespaces have been posted to
the OS */
2005-10-27 09:29:40 +02:00
os_aio_wait_until_no_pending_writes();
/* Now we flush the data to disk (for example, with fsync) */
fil_flush_file_spaces(FIL_TABLESPACE);
/* We can now reuse the doublewrite memory buffer: */
trx_doublewrite->first_free = 0;
mutex_exit(&(trx_doublewrite->mutex));
2005-10-27 09:29:40 +02:00
}
/************************************************************************
Posts a buffer page for writing. If the doublewrite memory buffer is
full, calls buf_flush_buffered_writes and waits for for free space to
appear. */
static
void
buf_flush_post_to_doublewrite_buf(
/*==============================*/
buf_page_t* bpage) /* in: buffer block to write */
2005-10-27 09:29:40 +02:00
{
ulint zip_size;
2005-10-27 09:29:40 +02:00
try_again:
mutex_enter(&(trx_doublewrite->mutex));
ut_a(buf_page_in_file(bpage));
2005-10-27 09:29:40 +02:00
if (trx_doublewrite->first_free
>= 2 * TRX_SYS_DOUBLEWRITE_BLOCK_SIZE) {
2005-10-27 09:29:40 +02:00
mutex_exit(&(trx_doublewrite->mutex));
buf_flush_buffered_writes();
goto try_again;
}
zip_size = buf_page_get_zip_size(bpage);
if (UNIV_UNLIKELY(zip_size)) {
/* Copy the compressed page and clear the rest. */
memcpy(trx_doublewrite->write_buf
+ UNIV_PAGE_SIZE * trx_doublewrite->first_free,
bpage->zip.data, zip_size);
memset(trx_doublewrite->write_buf
+ UNIV_PAGE_SIZE * trx_doublewrite->first_free
+ zip_size, 0, UNIV_PAGE_SIZE - zip_size);
} else {
ut_a(buf_page_get_state(bpage) == BUF_BLOCK_FILE_PAGE);
memcpy(trx_doublewrite->write_buf
+ UNIV_PAGE_SIZE * trx_doublewrite->first_free,
((buf_block_t*) bpage)->frame, UNIV_PAGE_SIZE);
}
2005-10-27 09:29:40 +02:00
trx_doublewrite->buf_block_arr[trx_doublewrite->first_free] = bpage;
2005-10-27 09:29:40 +02:00
trx_doublewrite->first_free++;
if (trx_doublewrite->first_free
>= 2 * TRX_SYS_DOUBLEWRITE_BLOCK_SIZE) {
2005-10-27 09:29:40 +02:00
mutex_exit(&(trx_doublewrite->mutex));
buf_flush_buffered_writes();
return;
}
mutex_exit(&(trx_doublewrite->mutex));
}
/************************************************************************
Initializes a page for writing to the tablespace. */
UNIV_INTERN
2005-10-27 09:29:40 +02:00
void
buf_flush_init_for_writing(
/*=======================*/
byte* page, /* in/out: page */
void* page_zip_, /* in/out: compressed page, or NULL */
ib_uint64_t newest_lsn) /* in: newest modification lsn
to the page */
{
ut_ad(page);
if (page_zip_) {
page_zip_des_t* page_zip = page_zip_;
ulint zip_size = page_zip_get_size(page_zip);
ut_ad(zip_size);
ut_ad(ut_is_2pow(zip_size));
ut_ad(zip_size <= UNIV_PAGE_SIZE);
switch (UNIV_EXPECT(fil_page_get_type(page), FIL_PAGE_INDEX)) {
case FIL_PAGE_TYPE_ALLOCATED:
case FIL_PAGE_INODE:
case FIL_PAGE_IBUF_BITMAP:
case FIL_PAGE_TYPE_FSP_HDR:
case FIL_PAGE_TYPE_XDES:
/* These are essentially uncompressed pages. */
memcpy(page_zip->data, page, zip_size);
/* fall through */
case FIL_PAGE_TYPE_ZBLOB:
case FIL_PAGE_TYPE_ZBLOB2:
case FIL_PAGE_INDEX:
mach_write_ull(page_zip->data
+ FIL_PAGE_LSN, newest_lsn);
memset(page_zip->data + FIL_PAGE_FILE_FLUSH_LSN, 0, 8);
mach_write_to_4(page_zip->data
+ FIL_PAGE_SPACE_OR_CHKSUM,
srv_use_checksums
? page_zip_calc_checksum(
page_zip->data, zip_size)
: BUF_NO_CHECKSUM_MAGIC);
return;
}
ut_print_timestamp(stderr);
fputs(" InnoDB: ERROR: The compressed page to be written"
" seems corrupt:", stderr);
ut_print_buf(stderr, page, zip_size);
fputs("\nInnoDB: Possibly older version of the page:", stderr);
ut_print_buf(stderr, page_zip->data, zip_size);
putc('\n', stderr);
ut_error;
}
2005-10-27 09:29:40 +02:00
/* Write the newest modification lsn to the page header and trailer */
mach_write_ull(page + FIL_PAGE_LSN, newest_lsn);
2005-10-27 09:29:40 +02:00
mach_write_ull(page + UNIV_PAGE_SIZE - FIL_PAGE_END_LSN_OLD_CHKSUM,
newest_lsn);
2005-10-27 09:29:40 +02:00
/* Store the new formula checksum */
mach_write_to_4(page + FIL_PAGE_SPACE_OR_CHKSUM,
srv_use_checksums
? buf_calc_page_new_checksum(page)
: BUF_NO_CHECKSUM_MAGIC);
2005-10-27 09:29:40 +02:00
/* We overwrite the first 4 bytes of the end lsn field to store
the old formula checksum. Since it depends also on the field
FIL_PAGE_SPACE_OR_CHKSUM, it has to be calculated after storing the
new formula checksum. */
mach_write_to_4(page + UNIV_PAGE_SIZE - FIL_PAGE_END_LSN_OLD_CHKSUM,
srv_use_checksums
? buf_calc_page_old_checksum(page)
: BUF_NO_CHECKSUM_MAGIC);
2005-10-27 09:29:40 +02:00
}
/************************************************************************
Does an asynchronous write of a buffer page. NOTE: in simulated aio and
also when the doublewrite buffer is used, we must call
buf_flush_buffered_writes after we have posted a batch of writes! */
static
void
buf_flush_write_block_low(
/*======================*/
buf_page_t* bpage) /* in: buffer block to write */
2005-10-27 09:29:40 +02:00
{
ulint zip_size = buf_page_get_zip_size(bpage);
page_t* frame = NULL;
2005-10-27 09:29:40 +02:00
#ifdef UNIV_LOG_DEBUG
static ibool univ_log_debug_warned;
#endif /* UNIV_LOG_DEBUG */
ut_ad(buf_page_in_file(bpage));
2005-10-27 09:29:40 +02:00
#ifdef UNIV_IBUF_COUNT_DEBUG
ut_a(ibuf_count_get(bpage->space, bpage->offset) == 0);
2005-10-27 09:29:40 +02:00
#endif
ut_ad(bpage->newest_modification != 0);
2005-10-27 09:29:40 +02:00
#ifdef UNIV_LOG_DEBUG
if (!univ_log_debug_warned) {
univ_log_debug_warned = TRUE;
fputs("Warning: cannot force log to disk if"
" UNIV_LOG_DEBUG is defined!\n"
"Crash recovery will not work!\n",
stderr);
2005-10-27 09:29:40 +02:00
}
#else
/* Force the log to the disk before writing the modified block */
log_write_up_to(bpage->newest_modification, LOG_WAIT_ALL_GROUPS, TRUE);
#endif
switch (buf_page_get_state(bpage)) {
case BUF_BLOCK_ZIP_FREE:
case BUF_BLOCK_ZIP_PAGE: /* The page should be dirty. */
case BUF_BLOCK_NOT_USED:
case BUF_BLOCK_READY_FOR_USE:
case BUF_BLOCK_MEMORY:
case BUF_BLOCK_REMOVE_HASH:
ut_error;
break;
case BUF_BLOCK_ZIP_DIRTY:
frame = bpage->zip.data;
if (UNIV_LIKELY(srv_use_checksums)) {
ut_a(mach_read_from_4(frame + FIL_PAGE_SPACE_OR_CHKSUM)
== page_zip_calc_checksum(frame, zip_size));
}
mach_write_ull(frame + FIL_PAGE_LSN,
bpage->newest_modification);
memset(frame + FIL_PAGE_FILE_FLUSH_LSN, 0, 8);
break;
case BUF_BLOCK_FILE_PAGE:
frame = bpage->zip.data;
if (!frame) {
frame = ((buf_block_t*) bpage)->frame;
}
buf_flush_init_for_writing(((buf_block_t*) bpage)->frame,
bpage->zip.data
? &bpage->zip : NULL,
bpage->newest_modification);
break;
}
if (!srv_use_doublewrite_buf || !trx_doublewrite) {
2005-10-27 09:29:40 +02:00
fil_io(OS_FILE_WRITE | OS_AIO_SIMULATED_WAKE_LATER,
FALSE, buf_page_get_space(bpage), zip_size,
buf_page_get_page_no(bpage), 0,
zip_size ? zip_size : UNIV_PAGE_SIZE,
frame, bpage);
2005-10-27 09:29:40 +02:00
} else {
buf_flush_post_to_doublewrite_buf(bpage);
2005-10-27 09:29:40 +02:00
}
}
/************************************************************************
Writes a page asynchronously from the buffer buf_pool to a file, if it can be
found in the buf_pool and it is in a flushable state. NOTE: in simulated aio
we must call os_aio_simulated_wake_handler_threads after we have posted a batch
of writes! */
static
ulint
buf_flush_try_page(
/*===============*/
/* out: 1 if a page was
flushed, 0 otherwise */
ulint space, /* in: space id */
ulint offset, /* in: page offset */
enum buf_flush flush_type) /* in: BUF_FLUSH_LRU, BUF_FLUSH_LIST,
or BUF_FLUSH_SINGLE_PAGE */
2005-10-27 09:29:40 +02:00
{
buf_page_t* bpage;
mutex_t* block_mutex;
2005-10-27 09:29:40 +02:00
ibool locked;
2005-10-27 09:29:40 +02:00
ut_ad(flush_type == BUF_FLUSH_LRU || flush_type == BUF_FLUSH_LIST
|| flush_type == BUF_FLUSH_SINGLE_PAGE);
2005-10-27 09:29:40 +02:00
buf_pool_mutex_enter();
2005-10-27 09:29:40 +02:00
bpage = buf_page_hash_get(space, offset);
2005-10-27 09:29:40 +02:00
if (!bpage) {
buf_pool_mutex_exit();
return(0);
}
ut_a(buf_page_in_file(bpage));
block_mutex = buf_page_get_mutex(bpage);
mutex_enter(block_mutex);
if (!buf_flush_ready_for_flush(bpage, flush_type)) {
mutex_exit(block_mutex);
buf_pool_mutex_exit();
return(0);
}
switch (flush_type) {
case BUF_FLUSH_LIST:
buf_page_set_io_fix(bpage, BUF_IO_WRITE);
2005-10-27 09:29:40 +02:00
buf_page_set_flush_type(bpage, flush_type);
2005-10-27 09:29:40 +02:00
if (buf_pool->n_flush[flush_type] == 0) {
os_event_reset(buf_pool->no_flush[flush_type]);
}
buf_pool->n_flush[flush_type]++;
2005-10-27 09:29:40 +02:00
/* If the simulated aio thread is not running, we must
not wait for any latch, as we may end up in a deadlock:
if buf_fix_count == 0, then we know we need not wait */
locked = bpage->buf_fix_count == 0;
if (locked
&& buf_page_get_state(bpage) == BUF_BLOCK_FILE_PAGE) {
rw_lock_s_lock_gen(&((buf_block_t*) bpage)->lock,
BUF_IO_WRITE);
2005-10-27 09:29:40 +02:00
}
mutex_exit(block_mutex);
buf_pool_mutex_exit();
2005-10-27 09:29:40 +02:00
if (!locked) {
buf_flush_buffered_writes();
if (buf_page_get_state(bpage) == BUF_BLOCK_FILE_PAGE) {
rw_lock_s_lock_gen(&((buf_block_t*) bpage)
->lock, BUF_IO_WRITE);
}
2005-10-27 09:29:40 +02:00
}
break;
2005-10-27 09:29:40 +02:00
case BUF_FLUSH_LRU:
2005-10-27 09:29:40 +02:00
/* VERY IMPORTANT:
Because any thread may call the LRU flush, even when owning
locks on pages, to avoid deadlocks, we must make sure that the
s-lock is acquired on the page without waiting: this is
accomplished because in the if-condition above we require
the page not to be bufferfixed (in function
..._ready_for_flush). */
buf_page_set_io_fix(bpage, BUF_IO_WRITE);
2005-10-27 09:29:40 +02:00
buf_page_set_flush_type(bpage, flush_type);
2005-10-27 09:29:40 +02:00
if (buf_pool->n_flush[flush_type] == 0) {
os_event_reset(buf_pool->no_flush[flush_type]);
}
buf_pool->n_flush[flush_type]++;
2005-10-27 09:29:40 +02:00
if (buf_page_get_state(bpage) == BUF_BLOCK_FILE_PAGE) {
rw_lock_s_lock_gen(&((buf_block_t*) bpage)->lock,
BUF_IO_WRITE);
}
2005-10-27 09:29:40 +02:00
/* Note that the s-latch is acquired before releasing the
buf_pool mutex: this ensures that the latch is acquired
immediately. */
mutex_exit(block_mutex);
buf_pool_mutex_exit();
break;
2005-10-27 09:29:40 +02:00
case BUF_FLUSH_SINGLE_PAGE:
buf_page_set_io_fix(bpage, BUF_IO_WRITE);
2005-10-27 09:29:40 +02:00
buf_page_set_flush_type(bpage, flush_type);
2005-10-27 09:29:40 +02:00
if (buf_pool->n_flush[flush_type] == 0) {
2005-10-27 09:29:40 +02:00
os_event_reset(buf_pool->no_flush[flush_type]);
2005-10-27 09:29:40 +02:00
}
buf_pool->n_flush[flush_type]++;
2005-10-27 09:29:40 +02:00
mutex_exit(block_mutex);
buf_pool_mutex_exit();
2005-10-27 09:29:40 +02:00
if (buf_page_get_state(bpage) == BUF_BLOCK_FILE_PAGE) {
rw_lock_s_lock_gen(&((buf_block_t*) bpage)->lock,
BUF_IO_WRITE);
2005-10-27 09:29:40 +02:00
}
break;
default:
ut_error;
}
#ifdef UNIV_DEBUG
if (buf_debug_prints) {
fprintf(stderr,
"Flushing %u space %u page %u\n",
flush_type, bpage->space, bpage->offset);
}
#endif /* UNIV_DEBUG */
buf_flush_write_block_low(bpage);
return(1);
2005-10-27 09:29:40 +02:00
}
/***************************************************************
Flushes to disk all flushable pages within the flush area. */
static
ulint
buf_flush_try_neighbors(
/*====================*/
/* out: number of pages flushed */
ulint space, /* in: space id */
ulint offset, /* in: page offset */
enum buf_flush flush_type) /* in: BUF_FLUSH_LRU or
BUF_FLUSH_LIST */
2005-10-27 09:29:40 +02:00
{
buf_page_t* bpage;
2005-10-27 09:29:40 +02:00
ulint low, high;
ulint count = 0;
ulint i;
ut_ad(flush_type == BUF_FLUSH_LRU || flush_type == BUF_FLUSH_LIST);
if (UT_LIST_GET_LEN(buf_pool->LRU) < BUF_LRU_OLD_MIN_LEN) {
/* If there is little space, it is better not to flush any
block except from the end of the LRU list */
2005-10-27 09:29:40 +02:00
low = offset;
high = offset + 1;
} else {
/* When flushed, dirty blocks are searched in neighborhoods of
this size, and flushed along with the original page. */
ulint buf_flush_area = ut_min(BUF_READ_AHEAD_AREA,
buf_pool->curr_size / 16);
low = (offset / buf_flush_area) * buf_flush_area;
high = (offset / buf_flush_area + 1) * buf_flush_area;
2005-10-27 09:29:40 +02:00
}
/* fprintf(stderr, "Flush area: low %lu high %lu\n", low, high); */
2005-10-27 09:29:40 +02:00
if (high > fil_space_get_size(space)) {
high = fil_space_get_size(space);
}
buf_pool_mutex_enter();
2005-10-27 09:29:40 +02:00
for (i = low; i < high; i++) {
bpage = buf_page_hash_get(space, i);
ut_a(!bpage || buf_page_in_file(bpage));
2005-10-27 09:29:40 +02:00
if (!bpage) {
continue;
} else if (flush_type == BUF_FLUSH_LRU && i != offset
&& !buf_page_is_old(bpage)) {
2005-10-27 09:29:40 +02:00
/* We avoid flushing 'non-old' blocks in an LRU flush,
because the flushed blocks are soon freed */
2005-10-27 09:29:40 +02:00
continue;
} else {
2005-10-27 09:29:40 +02:00
mutex_t* block_mutex = buf_page_get_mutex(bpage);
mutex_enter(block_mutex);
2005-10-27 09:29:40 +02:00
if (buf_flush_ready_for_flush(bpage, flush_type)
&& (i == offset || !bpage->buf_fix_count)) {
/* We only try to flush those
neighbors != offset where the buf fix count is
zero, as we then know that we probably can
latch the page without a semaphore wait.
Semaphore waits are expensive because we must
flush the doublewrite buffer before we start
waiting. */
2005-10-27 09:29:40 +02:00
buf_pool_mutex_exit();
2005-10-27 09:29:40 +02:00
mutex_exit(block_mutex);
/* Note: as we release the buf_pool mutex
above, in buf_flush_try_page we cannot be sure
the page is still in a flushable state:
therefore we check it again inside that
function. */
count += buf_flush_try_page(space, i,
flush_type);
buf_pool_mutex_enter();
} else {
mutex_exit(block_mutex);
}
2005-10-27 09:29:40 +02:00
}
}
buf_pool_mutex_exit();
2005-10-27 09:29:40 +02:00
return(count);
}
/***********************************************************************
This utility flushes dirty blocks from the end of the LRU list or flush_list.
NOTE 1: in the case of an LRU flush the calling thread may own latches to
pages: to avoid deadlocks, this function must be written so that it cannot
end up waiting for these latches! NOTE 2: in the case of a flush list flush,
the calling thread is not allowed to own any latches on pages! */
UNIV_INTERN
2005-10-27 09:29:40 +02:00
ulint
buf_flush_batch(
/*============*/
/* out: number of blocks for which the
write request was queued;
ULINT_UNDEFINED if there was a flush
of the same type already running */
enum buf_flush flush_type, /* in: BUF_FLUSH_LRU or
BUF_FLUSH_LIST; if BUF_FLUSH_LIST,
then the caller must not own any
latches on pages */
ulint min_n, /* in: wished minimum mumber of blocks
flushed (it is not guaranteed that the
actual number is that big, though) */
ib_uint64_t lsn_limit) /* in the case BUF_FLUSH_LIST all
blocks whose oldest_modification is
smaller than this should be flushed
(if their number does not exceed
min_n), otherwise ignored */
2005-10-27 09:29:40 +02:00
{
buf_page_t* bpage;
ulint page_count = 0;
2005-10-27 09:29:40 +02:00
ulint old_page_count;
ulint space;
ulint offset;
2005-10-27 09:29:40 +02:00
ut_ad((flush_type == BUF_FLUSH_LRU)
|| (flush_type == BUF_FLUSH_LIST));
#ifdef UNIV_SYNC_DEBUG
2005-10-27 09:29:40 +02:00
ut_ad((flush_type != BUF_FLUSH_LIST)
|| sync_thread_levels_empty_gen(TRUE));
#endif /* UNIV_SYNC_DEBUG */
buf_pool_mutex_enter();
2005-10-27 09:29:40 +02:00
if ((buf_pool->n_flush[flush_type] > 0)
|| (buf_pool->init_flush[flush_type] == TRUE)) {
2005-10-27 09:29:40 +02:00
/* There is already a flush batch of the same type running */
buf_pool_mutex_exit();
2005-10-27 09:29:40 +02:00
return(ULINT_UNDEFINED);
}
buf_pool->init_flush[flush_type] = TRUE;
2005-10-27 09:29:40 +02:00
for (;;) {
flush_next:
2005-10-27 09:29:40 +02:00
/* If we have flushed enough, leave the loop */
if (page_count >= min_n) {
break;
}
2005-10-27 09:29:40 +02:00
/* Start from the end of the list looking for a suitable
block to be flushed. */
if (flush_type == BUF_FLUSH_LRU) {
bpage = UT_LIST_GET_LAST(buf_pool->LRU);
} else {
2005-10-27 09:29:40 +02:00
ut_ad(flush_type == BUF_FLUSH_LIST);
bpage = UT_LIST_GET_LAST(buf_pool->flush_list);
if (!bpage
|| bpage->oldest_modification >= lsn_limit) {
2005-10-27 09:29:40 +02:00
/* We have flushed enough */
break;
}
ut_ad(bpage->in_flush_list);
}
2005-10-27 09:29:40 +02:00
/* Note that after finding a single flushable page, we try to
flush also all its neighbors, and after that start from the
END of the LRU list or flush list again: the list may change
during the flushing and we cannot safely preserve within this
function a pointer to a block in the list! */
do {
mutex_t* block_mutex = buf_page_get_mutex(bpage);
2005-10-27 09:29:40 +02:00
ut_a(buf_page_in_file(bpage));
mutex_enter(block_mutex);
if (buf_flush_ready_for_flush(bpage, flush_type)) {
2005-10-27 09:29:40 +02:00
space = buf_page_get_space(bpage);
offset = buf_page_get_page_no(bpage);
buf_pool_mutex_exit();
mutex_exit(block_mutex);
2005-10-27 09:29:40 +02:00
old_page_count = page_count;
2005-10-27 09:29:40 +02:00
/* Try to flush also all the neighbors */
page_count += buf_flush_try_neighbors(
space, offset, flush_type);
2005-10-27 09:29:40 +02:00
/* fprintf(stderr,
"Flush type %lu, page no %lu, neighb %lu\n",
flush_type, offset,
page_count - old_page_count); */
buf_pool_mutex_enter();
goto flush_next;
2005-10-27 09:29:40 +02:00
} else if (flush_type == BUF_FLUSH_LRU) {
mutex_exit(block_mutex);
bpage = UT_LIST_GET_PREV(LRU, bpage);
2005-10-27 09:29:40 +02:00
} else {
ut_ad(flush_type == BUF_FLUSH_LIST);
mutex_exit(block_mutex);
bpage = UT_LIST_GET_PREV(list, bpage);
ut_ad(!bpage || bpage->in_flush_list);
2005-10-27 09:29:40 +02:00
}
} while (bpage != NULL);
2005-10-27 09:29:40 +02:00
/* If we could not find anything to flush, leave the loop */
2005-10-27 09:29:40 +02:00
break;
2005-10-27 09:29:40 +02:00
}
buf_pool->init_flush[flush_type] = FALSE;
2005-10-27 09:29:40 +02:00
if ((buf_pool->n_flush[flush_type] == 0)
&& (buf_pool->init_flush[flush_type] == FALSE)) {
2005-10-27 09:29:40 +02:00
/* The running flush batch has ended */
os_event_set(buf_pool->no_flush[flush_type]);
}
buf_pool_mutex_exit();
2005-10-27 09:29:40 +02:00
buf_flush_buffered_writes();
#ifdef UNIV_DEBUG
if (buf_debug_prints && page_count > 0) {
ut_a(flush_type == BUF_FLUSH_LRU
|| flush_type == BUF_FLUSH_LIST);
2005-10-27 09:29:40 +02:00
fprintf(stderr, flush_type == BUF_FLUSH_LRU
? "Flushed %lu pages in LRU flush\n"
: "Flushed %lu pages in flush list flush\n",
(ulong) page_count);
}
#endif /* UNIV_DEBUG */
srv_buf_pool_flushed += page_count;
2005-10-27 09:29:40 +02:00
return(page_count);
}
/**********************************************************************
Waits until a flush batch of the given type ends */
UNIV_INTERN
2005-10-27 09:29:40 +02:00
void
buf_flush_wait_batch_end(
/*=====================*/
enum buf_flush type) /* in: BUF_FLUSH_LRU or BUF_FLUSH_LIST */
2005-10-27 09:29:40 +02:00
{
ut_ad((type == BUF_FLUSH_LRU) || (type == BUF_FLUSH_LIST));
2005-10-27 09:29:40 +02:00
os_event_wait(buf_pool->no_flush[type]);
}
2005-10-27 09:29:40 +02:00
/**********************************************************************
Gives a recommendation of how many blocks should be flushed to establish
a big enough margin of replaceable blocks near the end of the LRU list
and in the free list. */
static
ulint
buf_flush_LRU_recommendation(void)
/*==============================*/
/* out: number of blocks which should be flushed
from the end of the LRU list */
{
buf_page_t* bpage;
2005-10-27 09:29:40 +02:00
ulint n_replaceable;
ulint distance = 0;
buf_pool_mutex_enter();
2005-10-27 09:29:40 +02:00
n_replaceable = UT_LIST_GET_LEN(buf_pool->free);
bpage = UT_LIST_GET_LAST(buf_pool->LRU);
2005-10-27 09:29:40 +02:00
while ((bpage != NULL)
&& (n_replaceable < BUF_FLUSH_FREE_BLOCK_MARGIN
+ BUF_FLUSH_EXTRA_MARGIN)
&& (distance < BUF_LRU_FREE_SEARCH_LEN)) {
2005-10-27 09:29:40 +02:00
mutex_t* block_mutex = buf_page_get_mutex(bpage);
mutex_enter(block_mutex);
if (buf_flush_ready_for_replace(bpage)) {
2005-10-27 09:29:40 +02:00
n_replaceable++;
}
mutex_exit(block_mutex);
2005-10-27 09:29:40 +02:00
distance++;
bpage = UT_LIST_GET_PREV(LRU, bpage);
2005-10-27 09:29:40 +02:00
}
buf_pool_mutex_exit();
2005-10-27 09:29:40 +02:00
if (n_replaceable >= BUF_FLUSH_FREE_BLOCK_MARGIN) {
return(0);
}
2005-10-27 09:29:40 +02:00
return(BUF_FLUSH_FREE_BLOCK_MARGIN + BUF_FLUSH_EXTRA_MARGIN
- n_replaceable);
2005-10-27 09:29:40 +02:00
}
/*************************************************************************
Flushes pages from the end of the LRU list if there is too small a margin
of replaceable pages there or in the free list. VERY IMPORTANT: this function
is called also by threads which have locks on pages. To avoid deadlocks, we
flush only pages such that the s-lock required for flushing can be acquired
immediately, without waiting. */
UNIV_INTERN
2005-10-27 09:29:40 +02:00
void
buf_flush_free_margin(void)
/*=======================*/
{
ulint n_to_flush;
ulint n_flushed;
n_to_flush = buf_flush_LRU_recommendation();
2005-10-27 09:29:40 +02:00
if (n_to_flush > 0) {
n_flushed = buf_flush_batch(BUF_FLUSH_LRU, n_to_flush, 0);
2005-10-27 09:29:40 +02:00
if (n_flushed == ULINT_UNDEFINED) {
/* There was an LRU type flush batch already running;
let us wait for it to end */
buf_flush_wait_batch_end(BUF_FLUSH_LRU);
2005-10-27 09:29:40 +02:00
}
}
}
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
2005-10-27 09:29:40 +02:00
/**********************************************************************
Validates the flush list. */
static
ibool
buf_flush_validate_low(void)
/*========================*/
/* out: TRUE if ok */
{
buf_page_t* bpage;
const ib_rbt_node_t* rnode = NULL;
UT_LIST_VALIDATE(list, buf_page_t, buf_pool->flush_list);
2005-10-27 09:29:40 +02:00
bpage = UT_LIST_GET_FIRST(buf_pool->flush_list);
2005-10-27 09:29:40 +02:00
/* If we are in recovery mode i.e.: flush_rbt != NULL
then each block in the flush_list must also be present
in the flush_rbt. */
if (UNIV_LIKELY_NULL(buf_pool->flush_rbt)) {
rnode = rbt_first(buf_pool->flush_rbt);
}
while (bpage != NULL) {
const ib_uint64_t om = bpage->oldest_modification;
ut_ad(bpage->in_flush_list);
ut_a(buf_page_in_file(bpage));
ut_a(om > 0);
if (UNIV_LIKELY_NULL(buf_pool->flush_rbt)) {
ut_a(rnode);
buf_page_t* rpage = *rbt_value(buf_page_t*,
rnode);
ut_a(rpage);
ut_a(rpage == bpage);
rnode = rbt_next(buf_pool->flush_rbt, rnode);
}
bpage = UT_LIST_GET_NEXT(list, bpage);
2005-10-27 09:29:40 +02:00
ut_a(!bpage || om >= bpage->oldest_modification);
2005-10-27 09:29:40 +02:00
}
/* By this time we must have exhausted the traversal of
flush_rbt (if active) as well. */
ut_a(rnode == NULL);
2005-10-27 09:29:40 +02:00
return(TRUE);
}
/**********************************************************************
Validates the flush list. */
UNIV_INTERN
2005-10-27 09:29:40 +02:00
ibool
buf_flush_validate(void)
/*====================*/
/* out: TRUE if ok */
{
ibool ret;
buf_pool_mutex_enter();
2005-10-27 09:29:40 +02:00
ret = buf_flush_validate_low();
buf_pool_mutex_exit();
2005-10-27 09:29:40 +02:00
return(ret);
}
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */