mirror of
https://github.com/MariaDB/server.git
synced 2025-01-29 18:20:07 +01:00
Merge
This commit is contained in:
commit
701636ef6e
9 changed files with 83 additions and 50 deletions
|
@ -46781,6 +46781,14 @@ not yet 100% confident in this code.
|
|||
@appendixsubsec Changes in release 3.23.45
|
||||
@itemize @bullet
|
||||
@item
|
||||
Fix a bug which could cause InnoDB to complain if it cannot find free blocks
|
||||
from the buffer cache during recovery.
|
||||
@item
|
||||
Fixed a bug in InnoDB insert buffer B-tree handling that could cause crashes.
|
||||
@item
|
||||
Fixed bug in @code{OPTIMIZE TABLE} that reset index cardinality if it
|
||||
was up to date.
|
||||
@item
|
||||
Fixed problem with @code{t1 LEFT_JOIN t2 ... WHERE t2.date_column IS NULL} when
|
||||
date_column was declared as @code{NOT NULL}.
|
||||
@item
|
||||
|
|
|
@ -21,7 +21,7 @@ Created 6/2/1994 Heikki Tuuri
|
|||
#include "lock0lock.h"
|
||||
#include "ibuf0ibuf.h"
|
||||
|
||||
/**
|
||||
/*
|
||||
Node pointers
|
||||
-------------
|
||||
Leaf pages of a B-tree contain the index records stored in the
|
||||
|
@ -550,14 +550,15 @@ btr_page_get_father_for_rec(
|
|||
|
||||
ut_ad(mtr_memo_contains(mtr, dict_tree_get_lock(tree),
|
||||
MTR_MEMO_X_LOCK));
|
||||
ut_ad(user_rec != page_get_supremum_rec(page));
|
||||
ut_ad(user_rec != page_get_infimum_rec(page));
|
||||
ut_a(user_rec != page_get_supremum_rec(page));
|
||||
ut_a(user_rec != page_get_infimum_rec(page));
|
||||
|
||||
ut_ad(dict_tree_get_page(tree) != buf_frame_get_page_no(page));
|
||||
|
||||
heap = mem_heap_create(100);
|
||||
|
||||
tuple = dict_tree_build_node_ptr(tree, user_rec, 0, heap);
|
||||
tuple = dict_tree_build_node_ptr(tree, user_rec, 0, heap,
|
||||
btr_page_get_level(page, mtr));
|
||||
|
||||
/* In the following, we choose just any index from the tree as the
|
||||
first parameter for btr_cur_search_to_nth_level. */
|
||||
|
@ -569,7 +570,7 @@ btr_page_get_father_for_rec(
|
|||
|
||||
node_ptr = btr_cur_get_rec(&cursor);
|
||||
|
||||
ut_ad(btr_node_ptr_get_child_page_no(node_ptr) ==
|
||||
ut_a(btr_node_ptr_get_child_page_no(node_ptr) ==
|
||||
buf_frame_get_page_no(page));
|
||||
mem_heap_free(heap);
|
||||
|
||||
|
@ -949,8 +950,8 @@ btr_root_raise_and_insert(
|
|||
/* Build the node pointer (= node key and page address) for the
|
||||
child */
|
||||
|
||||
node_ptr = dict_tree_build_node_ptr(tree, rec, new_page_no, heap);
|
||||
|
||||
node_ptr = dict_tree_build_node_ptr(tree, rec, new_page_no, heap,
|
||||
level);
|
||||
/* Reorganize the root to get free space */
|
||||
btr_page_reorganize(root, mtr);
|
||||
|
||||
|
@ -1365,7 +1366,7 @@ btr_attach_half_pages(
|
|||
half */
|
||||
|
||||
node_ptr_upper = dict_tree_build_node_ptr(tree, split_rec,
|
||||
upper_page_no, heap);
|
||||
upper_page_no, heap, level);
|
||||
|
||||
/* Insert it next to the pointer to the lower half. Note that this
|
||||
may generate recursion leading to a split on the higher level. */
|
||||
|
@ -2230,7 +2231,7 @@ btr_check_node_ptr(
|
|||
node_ptr_tuple = dict_tree_build_node_ptr(
|
||||
tree,
|
||||
page_rec_get_next(page_get_infimum_rec(page)),
|
||||
0, heap);
|
||||
0, heap, btr_page_get_level(page, mtr));
|
||||
|
||||
ut_a(cmp_dtuple_rec(node_ptr_tuple, node_ptr) == 0);
|
||||
|
||||
|
@ -2485,10 +2486,11 @@ loop:
|
|||
heap = mem_heap_create(256);
|
||||
|
||||
node_ptr_tuple = dict_tree_build_node_ptr(
|
||||
tree,
|
||||
tree,
|
||||
page_rec_get_next(
|
||||
page_get_infimum_rec(page)),
|
||||
0, heap);
|
||||
0, heap,
|
||||
btr_page_get_level(page, &mtr));
|
||||
|
||||
if (cmp_dtuple_rec(node_ptr_tuple, node_ptr) != 0) {
|
||||
|
||||
|
|
|
@ -2345,9 +2345,9 @@ btr_cur_pessimistic_delete(
|
|||
heap = mem_heap_create(256);
|
||||
|
||||
node_ptr = dict_tree_build_node_ptr(
|
||||
tree, page_rec_get_next(rec),
|
||||
buf_frame_get_page_no(page),
|
||||
heap);
|
||||
tree, page_rec_get_next(rec),
|
||||
buf_frame_get_page_no(page),
|
||||
heap, btr_page_get_level(page, mtr));
|
||||
|
||||
btr_insert_on_non_leaf_level(tree,
|
||||
btr_page_get_level(page, mtr) + 1,
|
||||
|
|
|
@ -138,15 +138,11 @@ buf_flush_ready_for_flush(
|
|||
|
||||
return(TRUE);
|
||||
|
||||
} else if ((block->old || (UT_LIST_GET_LEN(buf_pool->LRU)
|
||||
< BUF_LRU_OLD_MIN_LEN))
|
||||
&& (block->buf_fix_count == 0)) {
|
||||
} else if (block->buf_fix_count == 0) {
|
||||
|
||||
/* If we are flushing the LRU list, to avoid deadlocks
|
||||
we require the block not to be bufferfixed, and hence
|
||||
not latched. Since LRU flushed blocks are soon moved
|
||||
to the free list, it is good to flush only old blocks
|
||||
from the end of the LRU list. */
|
||||
not latched. */
|
||||
|
||||
return(TRUE);
|
||||
}
|
||||
|
@ -560,6 +556,15 @@ buf_flush_try_neighbors(
|
|||
|
||||
block = buf_page_hash_get(space, i);
|
||||
|
||||
if (block && flush_type == BUF_FLUSH_LRU && i != offset
|
||||
&& !block->old) {
|
||||
|
||||
/* We avoid flushing 'non-old' blocks in an LRU flush,
|
||||
because the flushed blocks are soon freed */
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
if (block && buf_flush_ready_for_flush(block, flush_type)) {
|
||||
|
||||
mutex_exit(&(buf_pool->mutex));
|
||||
|
|
|
@ -2415,7 +2415,9 @@ dict_tree_build_node_ptr(
|
|||
dict_tree_t* tree, /* in: index tree */
|
||||
rec_t* rec, /* in: record for which to build node pointer */
|
||||
ulint page_no,/* in: page number to put in node pointer */
|
||||
mem_heap_t* heap) /* in: memory heap where pointer created */
|
||||
mem_heap_t* heap, /* in: memory heap where pointer created */
|
||||
ulint level) /* in: level of rec in tree: 0 means leaf
|
||||
level */
|
||||
{
|
||||
dtuple_t* tuple;
|
||||
dict_index_t* ind;
|
||||
|
@ -2427,9 +2429,16 @@ dict_tree_build_node_ptr(
|
|||
|
||||
if (tree->type & DICT_UNIVERSAL) {
|
||||
/* In a universal index tree, we take the whole record as
|
||||
the node pointer */
|
||||
the node pointer if the reord is on the leaf level,
|
||||
on non-leaf levels we remove the last field, which
|
||||
contains the page number of the child page */
|
||||
|
||||
n_unique = rec_get_n_fields(rec);
|
||||
|
||||
if (level > 0) {
|
||||
ut_a(n_unique > 1);
|
||||
n_unique--;
|
||||
}
|
||||
} else {
|
||||
n_unique = dict_index_get_n_unique_in_tree(ind);
|
||||
}
|
||||
|
|
|
@ -622,7 +622,9 @@ dict_tree_build_node_ptr(
|
|||
dict_tree_t* tree, /* in: index tree */
|
||||
rec_t* rec, /* in: record for which to build node pointer */
|
||||
ulint page_no,/* in: page number to put in node pointer */
|
||||
mem_heap_t* heap); /* in: memory heap where pointer created */
|
||||
mem_heap_t* heap, /* in: memory heap where pointer created */
|
||||
ulint level); /* in: level of rec in tree: 0 means leaf
|
||||
level */
|
||||
/**************************************************************************
|
||||
Copies an initial segment of a physical record, long enough to specify an
|
||||
index entry uniquely. */
|
||||
|
|
|
@ -38,3 +38,15 @@ check table t1;
|
|||
repair table t1;
|
||||
check table t1;
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# Test bug: Two optimize in a row reset index cardinality
|
||||
#
|
||||
|
||||
create table t1 (a int not null auto_increment, b int not null, primary key (a), index(b));
|
||||
insert into t1 (b) values (1),(2),(2),(2),(2);
|
||||
optimize table t1;
|
||||
show index from t1;
|
||||
optimize table t1;
|
||||
show index from t1;
|
||||
drop table t1;
|
||||
|
|
|
@ -541,7 +541,7 @@ int ha_myisam::optimize(THD* thd, HA_CHECK_OPT *check_opt)
|
|||
int ha_myisam::repair(THD *thd, MI_CHECK ¶m, bool optimize)
|
||||
{
|
||||
int error=0;
|
||||
uint extra_testflag=0;
|
||||
uint local_testflag=param.testflag;
|
||||
bool optimize_done= !optimize, statistics_done=0;
|
||||
const char *old_proc_info=thd->proc_info;
|
||||
char fixed_name[FN_REFLEN];
|
||||
|
@ -570,19 +570,18 @@ int ha_myisam::repair(THD *thd, MI_CHECK ¶m, bool optimize)
|
|||
(!param.opt_rep_quick ||
|
||||
!(share->state.changed & STATE_NOT_OPTIMIZED_KEYS))))
|
||||
{
|
||||
ulonglong key_map= ((param.testflag & T_CREATE_MISSING_KEYS) ?
|
||||
ulonglong key_map= ((local_testflag & T_CREATE_MISSING_KEYS) ?
|
||||
((ulonglong) 1L << share->base.keys)-1 :
|
||||
share->state.key_map);
|
||||
uint testflag=param.testflag;
|
||||
if (mi_test_if_sort_rep(file,file->state->records,key_map,0) &&
|
||||
(param.testflag & T_REP_BY_SORT))
|
||||
(local_testflag & T_REP_BY_SORT))
|
||||
{
|
||||
uint testflag=param.testflag;
|
||||
extra_testflag= T_STATISTICS;
|
||||
local_testflag|= T_STATISTICS;
|
||||
param.testflag|= T_STATISTICS; // We get this for free
|
||||
thd->proc_info="Repair by sorting";
|
||||
statistics_done=1;
|
||||
error = mi_repair_by_sort(¶m, file, fixed_name, param.opt_rep_quick);
|
||||
param.testflag=testflag;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -590,22 +589,28 @@ int ha_myisam::repair(THD *thd, MI_CHECK ¶m, bool optimize)
|
|||
param.testflag &= ~T_REP_BY_SORT;
|
||||
error= mi_repair(¶m, file, fixed_name, param.opt_rep_quick);
|
||||
}
|
||||
param.testflag=testflag;
|
||||
optimize_done=1;
|
||||
}
|
||||
if (!error)
|
||||
{
|
||||
if ((param.testflag & T_SORT_INDEX) &&
|
||||
if ((local_testflag & T_SORT_INDEX) &&
|
||||
(share->state.changed & STATE_NOT_SORTED_PAGES))
|
||||
{
|
||||
optimize_done=1;
|
||||
thd->proc_info="Sorting index";
|
||||
error=mi_sort_index(¶m,file,fixed_name);
|
||||
}
|
||||
if (!statistics_done && (param.testflag & T_STATISTICS) &&
|
||||
(share->state.changed & STATE_NOT_ANALYZED))
|
||||
if (!statistics_done && (local_testflag & T_STATISTICS))
|
||||
{
|
||||
optimize_done=1;
|
||||
thd->proc_info="Analyzing";
|
||||
error = chk_key(¶m, file);
|
||||
if (share->state.changed & STATE_NOT_ANALYZED)
|
||||
{
|
||||
optimize_done=1;
|
||||
thd->proc_info="Analyzing";
|
||||
error = chk_key(¶m, file);
|
||||
}
|
||||
else
|
||||
local_testflag&= ~T_STATISTICS; // Don't update statistics
|
||||
}
|
||||
}
|
||||
thd->proc_info="Saving state";
|
||||
|
@ -620,10 +625,11 @@ int ha_myisam::repair(THD *thd, MI_CHECK ¶m, bool optimize)
|
|||
file->save_state=file->s->state.state;
|
||||
if (file->s->base.auto_key)
|
||||
update_auto_increment_key(¶m, file, 1);
|
||||
error = update_state_info(¶m, file,
|
||||
UPDATE_TIME | UPDATE_OPEN_COUNT |
|
||||
((param.testflag | extra_testflag) &
|
||||
T_STATISTICS ? UPDATE_STAT : 0));
|
||||
if (optimize_done)
|
||||
error = update_state_info(¶m, file,
|
||||
UPDATE_TIME | UPDATE_OPEN_COUNT |
|
||||
(local_testflag &
|
||||
T_STATISTICS ? UPDATE_STAT : 0));
|
||||
info(HA_STATUS_NO_LOCK | HA_STATUS_TIME | HA_STATUS_VARIABLE |
|
||||
HA_STATUS_CONST);
|
||||
if (rows != file->state->records && ! (param.testflag & T_VERY_SILENT))
|
||||
|
|
|
@ -384,9 +384,6 @@ static bool read_init_file(char *file_name);
|
|||
#ifdef __NT__
|
||||
static pthread_handler_decl(handle_connections_namedpipes,arg);
|
||||
#endif
|
||||
#ifdef __WIN__
|
||||
static int get_service_parameters();
|
||||
#endif
|
||||
extern pthread_handler_decl(handle_slave,arg);
|
||||
#ifdef SET_RLIMIT_NOFILE
|
||||
static uint set_maximum_open_files(uint max_file_limit);
|
||||
|
@ -1647,14 +1644,6 @@ int main(int argc, char **argv)
|
|||
mysql_tmpdir=(char*) P_tmpdir; /* purecov: inspected */
|
||||
|
||||
set_options();
|
||||
#ifdef __WIN__
|
||||
/* service parameters can be overwritten by options */
|
||||
if (get_service_parameters())
|
||||
{
|
||||
my_message( 0, "Can't read MySQL service parameters", MYF(0) );
|
||||
exit( 1 );
|
||||
}
|
||||
#endif
|
||||
get_options(argc,argv);
|
||||
if (opt_log || opt_update_log || opt_slow_log || opt_bin_log)
|
||||
strcat(server_version,"-log");
|
||||
|
|
Loading…
Add table
Reference in a new issue