Remove dict_table_t::big_rows

The field dict_table_t::big_rows was only used for determining if
the adaptive hash index should be used when the internal InnoDB SQL
parser is used. That parser is only used for modifying the InnoDB
data dictionary, updating persistent tables, and for fulltext indexes.
This commit is contained in:
Marko Mäkelä 2017-10-02 11:43:30 +03:00
parent d6f857ddbc
commit cc3057fde7
4 changed files with 2 additions and 42 deletions

View file

@ -1275,31 +1275,6 @@ dict_table_add_system_columns(
#endif
}
/** Mark if table has big rows.
@param[in,out] table table handler */
void
dict_table_set_big_rows(
dict_table_t* table)
{
ulint row_len = 0;
for (ulint i = 0; i < table->n_def; i++) {
ulint col_len = dict_col_get_max_size(
dict_table_get_nth_col(table, i));
row_len += col_len;
/* If we have a single unbounded field, or several gigantic
fields, mark the maximum row size as BIG_ROW_SIZE. */
if (row_len >= BIG_ROW_SIZE || col_len >= BIG_ROW_SIZE) {
row_len = BIG_ROW_SIZE;
break;
}
}
table->big_rows = (row_len >= BIG_ROW_SIZE) ? TRUE : FALSE;
}
/**********************************************************************//**
Adds a table object to the dictionary cache. */
void
@ -1322,8 +1297,6 @@ dict_table_add_to_cache(
fold = ut_fold_string(table->name.m_name);
id_fold = ut_fold_ull(table->id);
dict_table_set_big_rows(table);
/* Look for a table with the same name: error if such exists */
{
dict_table_t* table2;

View file

@ -386,13 +386,6 @@ dict_table_add_system_columns(
dict_table_t* table, /*!< in/out: table */
mem_heap_t* heap) /*!< in: temporary heap */
MY_ATTRIBUTE((nonnull));
/** Mark if table has big rows.
@param[in,out] table table handler */
void
dict_table_set_big_rows(
dict_table_t* table)
MY_ATTRIBUTE((nonnull));
/**********************************************************************//**
Adds a table object to the dictionary cache. */
void

View file

@ -1493,10 +1493,6 @@ struct dict_table_t {
/*!< set of foreign key constraints which refer to this table */
dict_foreign_set referenced_set;
/** TRUE if the maximum length of a single row exceeds BIG_ROW_SIZE.
Initialized in dict_table_add_to_cache(). */
unsigned big_rows:1;
/** Statistics for query optimization. @{ */
/** Creation state of 'stats_latch'. */

View file

@ -1657,8 +1657,7 @@ table_loop:
#ifdef BTR_CUR_HASH_ADAPT
if (consistent_read && plan->unique_search && !plan->pcur_is_open
&& !plan->must_get_clust
&& !plan->table->big_rows) {
&& !plan->must_get_clust) {
if (!search_latch_locked) {
btr_search_s_lock(index);
@ -2085,8 +2084,7 @@ skip_lock:
ut_ad(plan->pcur.latch_mode == BTR_SEARCH_LEAF);
if ((plan->n_rows_fetched <= SEL_PREFETCH_LIMIT)
|| plan->unique_search || plan->no_prefetch
|| plan->table->big_rows) {
|| plan->unique_search || plan->no_prefetch) {
/* No prefetch in operation: go to the next table */