mirror of
https://github.com/MariaDB/server.git
synced 2025-01-23 15:24:16 +01:00
dict_table_t: Rename the integer field max_row_size
to the Boolean field big_rows. (Bug #20877) BIG_ROW_SIZE: Move the definition from row0sel.c to dict_table_add_to_cache().
This commit is contained in:
parent
3b49ab383b
commit
090d9d9dc6
4 changed files with 19 additions and 21 deletions
|
@ -876,23 +876,26 @@ dict_table_add_to_cache(
|
|||
#error "DATA_N_SYS_COLS != 3"
|
||||
#endif
|
||||
|
||||
/* The lower limit for what we consider a "big" row */
|
||||
#define BIG_ROW_SIZE 1024
|
||||
|
||||
row_len = 0;
|
||||
for (i = 0; i < table->n_def; i++) {
|
||||
ulint col_len = dtype_get_max_size
|
||||
(dict_col_get_type(dict_table_get_nth_col(table, i)));
|
||||
|
||||
row_len += col_len;
|
||||
|
||||
/* If we have a single unbounded field, or several gigantic
|
||||
fields, mark the maximum row size as ULINT_MAX. */
|
||||
if (ut_max(col_len, row_len) >= (ULINT_MAX / 2)) {
|
||||
row_len = ULINT_MAX;
|
||||
fields, mark the maximum row size as BIG_ROW_SIZE. */
|
||||
if (row_len >= BIG_ROW_SIZE || col_len >= BIG_ROW_SIZE) {
|
||||
row_len = BIG_ROW_SIZE;
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
row_len += col_len;
|
||||
}
|
||||
|
||||
table->max_row_size = row_len;
|
||||
table->big_rows = row_len >= BIG_ROW_SIZE;
|
||||
|
||||
/* Look for a table with the same name: error if such exists */
|
||||
{
|
||||
|
|
|
@ -84,7 +84,7 @@ dict_mem_table_create(
|
|||
|
||||
table->stat_modified_counter = 0;
|
||||
|
||||
table->max_row_size = 0;
|
||||
table->big_rows = 0;
|
||||
|
||||
mutex_create(&table->autoinc_mutex, SYNC_DICT_AUTOINC_MUTEX);
|
||||
|
||||
|
|
|
@ -331,12 +331,6 @@ struct dict_table_struct{
|
|||
had an IX lock on */
|
||||
UT_LIST_BASE_NODE_T(lock_t)
|
||||
locks; /* list of locks on the table */
|
||||
ulint max_row_size;
|
||||
/* maximum size of a single row in the
|
||||
table, not guaranteed to be especially
|
||||
accurate. it's ULINT_MAX if there are
|
||||
unbounded variable-width fields. initialized
|
||||
in dict_table_add_to_cache. */
|
||||
#ifdef UNIV_DEBUG
|
||||
/*----------------------*/
|
||||
ibool does_not_fit_in_memory;
|
||||
|
@ -350,6 +344,13 @@ struct dict_table_struct{
|
|||
the table definition from disk */
|
||||
#endif /* UNIV_DEBUG */
|
||||
/*----------------------*/
|
||||
unsigned big_rows:1;
|
||||
/* flag: TRUE if the maximum length of
|
||||
a single row exceeds BIG_ROW_SIZE;
|
||||
initialized in dict_table_add_to_cache() */
|
||||
unsigned stat_initialized:1; /* TRUE if statistics have
|
||||
been calculated the first time
|
||||
after database startup or table creation */
|
||||
ib_longlong stat_n_rows;
|
||||
/* approximate number of rows in the table;
|
||||
we periodically calculate new estimates */
|
||||
|
@ -358,9 +359,6 @@ struct dict_table_struct{
|
|||
database pages */
|
||||
ulint stat_sum_of_other_index_sizes;
|
||||
/* other indexes in database pages */
|
||||
ibool stat_initialized:1; /* TRUE if statistics have
|
||||
been calculated the first time
|
||||
after database startup or table creation */
|
||||
ulint stat_modified_counter;
|
||||
/* when a row is inserted, updated, or deleted,
|
||||
we add 1 to this number; we calculate new
|
||||
|
|
|
@ -45,9 +45,6 @@ to que_run_threads: this is to allow canceling runaway queries */
|
|||
|
||||
#define SEL_COST_LIMIT 100
|
||||
|
||||
/* The lower limit for what we consider a "big" row */
|
||||
#define BIG_ROW_SIZE 1024
|
||||
|
||||
/* Flags for search shortcut */
|
||||
#define SEL_FOUND 0
|
||||
#define SEL_EXHAUSTED 1
|
||||
|
@ -1247,7 +1244,7 @@ table_loop:
|
|||
|
||||
if (consistent_read && plan->unique_search && !plan->pcur_is_open
|
||||
&& !plan->must_get_clust
|
||||
&& (plan->table->max_row_size < BIG_ROW_SIZE)) {
|
||||
&& !plan->table->big_rows) {
|
||||
if (!search_latch_locked) {
|
||||
rw_lock_s_lock(&btr_search_latch);
|
||||
|
||||
|
@ -1652,7 +1649,7 @@ skip_lock:
|
|||
|
||||
if ((plan->n_rows_fetched <= SEL_PREFETCH_LIMIT)
|
||||
|| plan->unique_search || plan->no_prefetch
|
||||
|| (plan->table->max_row_size >= BIG_ROW_SIZE)) {
|
||||
|| plan->table->big_rows) {
|
||||
|
||||
/* No prefetch in operation: go to the next table */
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue