mirror of
https://github.com/MariaDB/server.git
synced 2025-01-29 18:20:07 +01:00
MDEV-18726: innodb buffer pool size not consistent with large pages
Rather than add a small extra amount on the size of chunks, keep it of the specified size. The rest of the chunk initialization code adapts to this small size reduction. This has been made in the general case, not just large pages, to keep it simple. The chunks size is controlled by innodb-buffer-pool-chunk-size. In the code increasing this by a descriptor table size length makes it difficult with large pages. With innodb-buffer-pool-chunk-size set to 2M the code before this commit would of added a small amount extra to this value when it tried to allocate this. While not normally a problem it is with large pages, it now requires addition space, a whole extra large page. With a number of pools, or with 1G or 16G large pages this is quite significant. By removing this additional amount, DBAs can set innodb-buffer-pool-chunk size to the large page size, or a multiple of it, and actually get that amount allocated. Previously they had to fudge a value less. The innodb.test results show how this is fudged over a number of tests. With this change the values are just between 488 and 500 depending on architecture and build options. Tested with --large-pages --innodb-buffer-pool-size=256M --innodb-buffer-pool-chunk-size=2M on x86_64 with 2M default large page size. Breaking before buf_pool init, one large page was allocated in MyISAM, by the end of the function 128 huge pages where allocated as expected. A further 16 pages where allocated for a 32M log buffer and during startup 1 page was allocated briefly to the redo log.
This commit is contained in:
parent
6b6fa3cdb1
commit
de51acd037
3 changed files with 8 additions and 15 deletions
|
@ -1688,9 +1688,9 @@ select count(*) from t1 where x = 18446744073709551601;
|
|||
count(*)
|
||||
1
|
||||
drop table t1;
|
||||
SELECT variable_value FROM information_schema.global_status WHERE LOWER(variable_name) = 'innodb_buffer_pool_pages_total';
|
||||
variable_value
|
||||
ok
|
||||
SELECT IF(variable_value BETWEEN 488 AND 512, 'OK', variable_value) FROM information_schema.global_status WHERE LOWER(variable_name) = 'innodb_buffer_pool_pages_total';
|
||||
IF(variable_value BETWEEN 488 AND 512, 'OK', variable_value)
|
||||
OK
|
||||
SELECT variable_value FROM information_schema.global_status WHERE LOWER(variable_name) = 'innodb_page_size';
|
||||
variable_value
|
||||
16384
|
||||
|
|
|
@ -1321,8 +1321,7 @@ drop table t1;
|
|||
|
||||
# Test for testable InnoDB status variables. This test
|
||||
# uses previous ones(pages_created, rows_deleted, ...).
|
||||
--replace_result 511 ok 512 ok 2047 ok 513 ok 514 ok 515 ok
|
||||
SELECT variable_value FROM information_schema.global_status WHERE LOWER(variable_name) = 'innodb_buffer_pool_pages_total';
|
||||
SELECT IF(variable_value BETWEEN 488 AND 512, 'OK', variable_value) FROM information_schema.global_status WHERE LOWER(variable_name) = 'innodb_buffer_pool_pages_total';
|
||||
SELECT variable_value FROM information_schema.global_status WHERE LOWER(variable_name) = 'innodb_page_size';
|
||||
SELECT variable_value - @innodb_rows_deleted_orig FROM information_schema.global_status WHERE LOWER(variable_name) = 'innodb_rows_deleted';
|
||||
SELECT variable_value - @innodb_rows_inserted_orig FROM information_schema.global_status WHERE LOWER(variable_name) = 'innodb_rows_inserted';
|
||||
|
|
|
@ -1587,11 +1587,6 @@ buf_chunk_init(
|
|||
/* Round down to a multiple of page size,
|
||||
although it already should be. */
|
||||
mem_size = ut_2pow_round(mem_size, ulint(srv_page_size));
|
||||
/* Reserve space for the block descriptors. */
|
||||
mem_size += ut_2pow_round((mem_size >> srv_page_size_shift)
|
||||
* (sizeof *block)
|
||||
+ (srv_page_size - 1),
|
||||
ulint(srv_page_size));
|
||||
|
||||
DBUG_EXECUTE_IF("ib_buf_chunk_init_fails", return(NULL););
|
||||
|
||||
|
@ -1913,8 +1908,7 @@ buf_pool_init_instance(
|
|||
ut_min(BUF_READ_AHEAD_PAGES,
|
||||
ut_2_power_up(buf_pool->curr_size /
|
||||
BUF_READ_AHEAD_PORTION));
|
||||
buf_pool->curr_pool_size = buf_pool->curr_size
|
||||
<< srv_page_size_shift;
|
||||
buf_pool->curr_pool_size = buf_pool_size;
|
||||
|
||||
buf_pool->old_size = buf_pool->curr_size;
|
||||
buf_pool->n_chunks_new = buf_pool->n_chunks;
|
||||
|
@ -2718,12 +2712,12 @@ buf_pool_resize()
|
|||
ut_ad(UT_LIST_GET_LEN(buf_pool->withdraw) == 0);
|
||||
ut_ad(buf_pool->flush_rbt == NULL);
|
||||
|
||||
buf_pool->curr_size = new_instance_size;
|
||||
|
||||
buf_pool->n_chunks_new =
|
||||
(new_instance_size << srv_page_size_shift)
|
||||
/ srv_buf_pool_chunk_unit;
|
||||
|
||||
buf_pool->curr_size = buf_pool->n_chunks_new * buf_pool->chunks->size;
|
||||
|
||||
buf_pool_mutex_exit(buf_pool);
|
||||
}
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
|
@ -3053,7 +3047,7 @@ calc_buf_pool_size:
|
|||
ut_2_power_up(buf_pool->curr_size /
|
||||
BUF_READ_AHEAD_PORTION));
|
||||
buf_pool->curr_pool_size
|
||||
= buf_pool->curr_size << srv_page_size_shift;
|
||||
= buf_pool->n_chunks * srv_buf_pool_chunk_unit;
|
||||
curr_size += buf_pool->curr_pool_size;
|
||||
buf_pool->old_size = buf_pool->curr_size;
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue