Performed re-factoring and re-structuring of the code for mwl#248:

- Moved the definitions of the classes to store data from persistent
    statistical tables into statistics.h, leaving in other internal 
    data structures only references to the corresponding objects.
  - Defined class Column_statistics_collected derived from the class
    Column_statistics. This is a helper class to collect statistics
    on columns.
  - Moved references to read statistics to TABLE SHARE, leaving the
    the reference to the collected statistics in TABLE.
 - Added a new clone method for the class Field allowing to clone
    fields attached to table shares. It was was used to create 
    fields for min/max values in the memory of the table share.
A lso:
  - Added procedures to allocate memory for statistical data in
    the table share memory and in table memory.
Also: 
  - Added a test case demonstrating how ANALYZE could work in parallel
    to collect statistics on different indexes of the same table.
  - Added a test two demonstrate how two connections working 
    simultaneously could allocate memory for statistical data in the 
    table share memory.
This commit is contained in:
Igor Babaev 2012-07-26 17:50:08 -07:00
parent cb0a5c84b6
commit 8c499274da
15 changed files with 1223 additions and 433 deletions

View file

@ -334,6 +334,122 @@ and o_orderkey=l_orderkey and p_partkey=l_partkey;
o_orderkey p_partkey
5895 200
set optimizer_switch=@save_optimizer_switch;
flush table lineitem;
set use_stat_tables='never';
select sum(l_extendedprice*l_discount) as revenue
from lineitem
where l_shipdate >= date '1994-01-01'
and l_shipdate < date '1994-01-01' + interval '1' year
and l_discount between 0.06 - 0.01 and 0.06 + 0.01
and l_quantity < 24;
revenue
77949.91860000002
set debug_sync='statistics_mem_alloc_start1 WAIT_FOR second_thread_started_too';
set debug_sync='statistics_mem_alloc_start2 SIGNAL first_thread_working';
use dbt3_s001;
set use_stat_tables='preferably';
select sum(l_extendedprice*l_discount) as revenue
from lineitem
where l_shipdate >= date '1994-01-01'
and l_shipdate < date '1994-01-01' + interval '1' year
and l_discount between 0.06 - 0.01 and 0.06 + 0.01
and l_quantity < 24 ;
set debug_sync='statistics_mem_alloc_start1 SIGNAL second_thread_started_too';
set debug_sync='statistics_mem_alloc_start2 WAIT_FOR first_thread_working';
use dbt3_s001;
set use_stat_tables='preferably';
select sum(l_extendedprice*l_discount) as revenue
from lineitem
where l_shipdate >= date '1994-01-01'
and l_shipdate < date '1994-01-01' + interval '1' year
and l_discount between 0.06 - 0.01 and 0.06 + 0.01
and l_quantity < 24;
revenue
77949.91860000002
revenue
77949.91860000002
set use_stat_tables='preferably';
select * from mysql.index_stat where table_name='lineitem' order by index_name;
db_name table_name index_name prefix_arity avg_frequency
dbt3_s001 lineitem PRIMARY 1 4.0033
dbt3_s001 lineitem PRIMARY 2 1.0000
dbt3_s001 lineitem i_l_commitdate 1 2.7160
dbt3_s001 lineitem i_l_orderkey 1 4.0033
dbt3_s001 lineitem i_l_orderkey_quantity 1 4.0033
dbt3_s001 lineitem i_l_orderkey_quantity 2 1.0404
dbt3_s001 lineitem i_l_partkey 1 30.0250
dbt3_s001 lineitem i_l_receiptdate 1 2.6477
dbt3_s001 lineitem i_l_shipdate 1 2.6500
dbt3_s001 lineitem i_l_suppkey 1 600.5000
dbt3_s001 lineitem i_l_suppkey_partkey 1 30.0250
dbt3_s001 lineitem i_l_suppkey_partkey 2 8.5786
delete from mysql.index_stat
where table_name='lineitem' and
index_name in ('i_l_shipdate', 'i_l_receiptdate');
select * from mysql.index_stat where table_name='lineitem' order by index_name;
db_name table_name index_name prefix_arity avg_frequency
dbt3_s001 lineitem PRIMARY 1 4.0033
dbt3_s001 lineitem PRIMARY 2 1.0000
dbt3_s001 lineitem i_l_commitdate 1 2.7160
dbt3_s001 lineitem i_l_orderkey 1 4.0033
dbt3_s001 lineitem i_l_orderkey_quantity 1 4.0033
dbt3_s001 lineitem i_l_orderkey_quantity 2 1.0404
dbt3_s001 lineitem i_l_partkey 1 30.0250
dbt3_s001 lineitem i_l_suppkey 1 600.5000
dbt3_s001 lineitem i_l_suppkey_partkey 2 8.5786
dbt3_s001 lineitem i_l_suppkey_partkey 1 30.0250
analyze table lineitem persistent for columns() indexes (i_l_shipdate);
select * from mysql.index_stat where table_name='lineitem' order by index_name;
db_name table_name index_name prefix_arity avg_frequency
dbt3_s001 lineitem PRIMARY 1 4.0033
dbt3_s001 lineitem PRIMARY 2 1.0000
dbt3_s001 lineitem i_l_commitdate 1 2.7160
dbt3_s001 lineitem i_l_orderkey 1 4.0033
dbt3_s001 lineitem i_l_orderkey_quantity 1 4.0033
dbt3_s001 lineitem i_l_orderkey_quantity 2 1.0404
dbt3_s001 lineitem i_l_partkey 1 30.0250
dbt3_s001 lineitem i_l_shipdate 1 2.6500
dbt3_s001 lineitem i_l_suppkey 1 600.5000
dbt3_s001 lineitem i_l_suppkey_partkey 2 8.5786
dbt3_s001 lineitem i_l_suppkey_partkey 1 30.0250
delete from mysql.index_stat
where table_name='lineitem' and index_name= 'i_l_shipdate';
select * from mysql.index_stat where table_name='lineitem' order by index_name;
db_name table_name index_name prefix_arity avg_frequency
dbt3_s001 lineitem PRIMARY 1 4.0033
dbt3_s001 lineitem PRIMARY 2 1.0000
dbt3_s001 lineitem i_l_commitdate 1 2.7160
dbt3_s001 lineitem i_l_orderkey 1 4.0033
dbt3_s001 lineitem i_l_orderkey_quantity 1 4.0033
dbt3_s001 lineitem i_l_orderkey_quantity 2 1.0404
dbt3_s001 lineitem i_l_partkey 1 30.0250
dbt3_s001 lineitem i_l_suppkey 1 600.5000
dbt3_s001 lineitem i_l_suppkey_partkey 2 8.5786
dbt3_s001 lineitem i_l_suppkey_partkey 1 30.0250
set debug_sync='statistics_collection_start1 WAIT_FOR second_thread_started_too';
set debug_sync='statistics_collection_start2 SIGNAL first_thread_working';
use dbt3_s001;
set use_stat_tables='preferably';
analyze table lineitem persistent for columns() indexes (i_l_shipdate);
set debug_sync='statistics_collection_start1 SIGNAL second_thread_started_too';
set debug_sync='statistics_collection_start2 WAIT_FOR first_thread_working';
use dbt3_s001;
set use_stat_tables='preferably';
analyze table lineitem persistent for columns() indexes (i_l_receiptdate);
select * from mysql.index_stat where table_name='lineitem' order by index_name;
db_name table_name index_name prefix_arity avg_frequency
dbt3_s001 lineitem PRIMARY 1 4.0033
dbt3_s001 lineitem PRIMARY 2 1.0000
dbt3_s001 lineitem i_l_commitdate 1 2.7160
dbt3_s001 lineitem i_l_orderkey 1 4.0033
dbt3_s001 lineitem i_l_orderkey_quantity 1 4.0033
dbt3_s001 lineitem i_l_orderkey_quantity 2 1.0404
dbt3_s001 lineitem i_l_partkey 1 30.0250
dbt3_s001 lineitem i_l_receiptdate 1 2.6477
dbt3_s001 lineitem i_l_shipdate 1 2.6500
dbt3_s001 lineitem i_l_suppkey 1 600.5000
dbt3_s001 lineitem i_l_suppkey_partkey 1 30.0250
dbt3_s001 lineitem i_l_suppkey_partkey 2 8.5786
DROP DATABASE dbt3_s001;
use test;
set use_stat_tables=@save_use_stat_tables;

View file

@ -361,6 +361,178 @@ and o_orderkey=l_orderkey and p_partkey=l_partkey;
o_orderkey p_partkey
5895 200
set optimizer_switch=@save_optimizer_switch;
flush table lineitem;
set use_stat_tables='never';
select sum(l_extendedprice*l_discount) as revenue
from lineitem
where l_shipdate >= date '1994-01-01'
and l_shipdate < date '1994-01-01' + interval '1' year
and l_discount between 0.06 - 0.01 and 0.06 + 0.01
and l_quantity < 24;
revenue
77949.91860000002
set debug_sync='statistics_mem_alloc_start1 WAIT_FOR second_thread_started_too';
set debug_sync='statistics_mem_alloc_start2 SIGNAL first_thread_working';
use dbt3_s001;
set use_stat_tables='preferably';
select sum(l_extendedprice*l_discount) as revenue
from lineitem
where l_shipdate >= date '1994-01-01'
and l_shipdate < date '1994-01-01' + interval '1' year
and l_discount between 0.06 - 0.01 and 0.06 + 0.01
and l_quantity < 24 ;
set debug_sync='statistics_mem_alloc_start1 SIGNAL second_thread_started_too';
set debug_sync='statistics_mem_alloc_start2 WAIT_FOR first_thread_working';
use dbt3_s001;
set use_stat_tables='preferably';
select sum(l_extendedprice*l_discount) as revenue
from lineitem
where l_shipdate >= date '1994-01-01'
and l_shipdate < date '1994-01-01' + interval '1' year
and l_discount between 0.06 - 0.01 and 0.06 + 0.01
and l_quantity < 24;
revenue
77949.91860000002
revenue
77949.91860000002
set use_stat_tables='preferably';
select * from mysql.index_stat where table_name='lineitem' order by index_name;
db_name table_name index_name prefix_arity avg_frequency
dbt3_s001 lineitem PRIMARY 1 4.0033
dbt3_s001 lineitem PRIMARY 2 1.0000
dbt3_s001 lineitem i_l_commitdate 2 1.0364
dbt3_s001 lineitem i_l_commitdate 1 2.7160
dbt3_s001 lineitem i_l_commitdate 3 1.0000
dbt3_s001 lineitem i_l_orderkey 1 4.0033
dbt3_s001 lineitem i_l_orderkey 2 1.0000
dbt3_s001 lineitem i_l_orderkey_quantity 1 4.0033
dbt3_s001 lineitem i_l_orderkey_quantity 2 1.0404
dbt3_s001 lineitem i_l_orderkey_quantity 3 1.0000
dbt3_s001 lineitem i_l_partkey 1 30.0250
dbt3_s001 lineitem i_l_partkey 2 1.0089
dbt3_s001 lineitem i_l_partkey 3 1.0000
dbt3_s001 lineitem i_l_receiptdate 3 1.0000
dbt3_s001 lineitem i_l_receiptdate 2 1.0152
dbt3_s001 lineitem i_l_receiptdate 1 2.6477
dbt3_s001 lineitem i_l_shipdate 1 2.6500
dbt3_s001 lineitem i_l_shipdate 3 1.0000
dbt3_s001 lineitem i_l_shipdate 2 1.0149
dbt3_s001 lineitem i_l_suppkey 2 1.2073
dbt3_s001 lineitem i_l_suppkey 3 1.0000
dbt3_s001 lineitem i_l_suppkey 1 600.5000
dbt3_s001 lineitem i_l_suppkey_partkey 4 1.0000
dbt3_s001 lineitem i_l_suppkey_partkey 3 1.0030
dbt3_s001 lineitem i_l_suppkey_partkey 2 8.5786
dbt3_s001 lineitem i_l_suppkey_partkey 1 30.0250
delete from mysql.index_stat
where table_name='lineitem' and
index_name in ('i_l_shipdate', 'i_l_receiptdate');
select * from mysql.index_stat where table_name='lineitem' order by index_name;
db_name table_name index_name prefix_arity avg_frequency
dbt3_s001 lineitem PRIMARY 1 4.0033
dbt3_s001 lineitem PRIMARY 2 1.0000
dbt3_s001 lineitem i_l_commitdate 2 1.0364
dbt3_s001 lineitem i_l_commitdate 1 2.7160
dbt3_s001 lineitem i_l_commitdate 3 1.0000
dbt3_s001 lineitem i_l_orderkey 1 4.0033
dbt3_s001 lineitem i_l_orderkey 2 1.0000
dbt3_s001 lineitem i_l_orderkey_quantity 3 1.0000
dbt3_s001 lineitem i_l_orderkey_quantity 2 1.0404
dbt3_s001 lineitem i_l_orderkey_quantity 1 4.0033
dbt3_s001 lineitem i_l_partkey 1 30.0250
dbt3_s001 lineitem i_l_partkey 2 1.0089
dbt3_s001 lineitem i_l_partkey 3 1.0000
dbt3_s001 lineitem i_l_suppkey 2 1.2073
dbt3_s001 lineitem i_l_suppkey 3 1.0000
dbt3_s001 lineitem i_l_suppkey 1 600.5000
dbt3_s001 lineitem i_l_suppkey_partkey 3 1.0030
dbt3_s001 lineitem i_l_suppkey_partkey 2 8.5786
dbt3_s001 lineitem i_l_suppkey_partkey 1 30.0250
dbt3_s001 lineitem i_l_suppkey_partkey 4 1.0000
analyze table lineitem persistent for columns() indexes (i_l_shipdate);
select * from mysql.index_stat where table_name='lineitem' order by index_name;
db_name table_name index_name prefix_arity avg_frequency
dbt3_s001 lineitem PRIMARY 1 4.0033
dbt3_s001 lineitem PRIMARY 2 1.0000
dbt3_s001 lineitem i_l_commitdate 2 1.0364
dbt3_s001 lineitem i_l_commitdate 1 2.7160
dbt3_s001 lineitem i_l_commitdate 3 1.0000
dbt3_s001 lineitem i_l_orderkey 1 4.0033
dbt3_s001 lineitem i_l_orderkey 2 1.0000
dbt3_s001 lineitem i_l_orderkey_quantity 1 4.0033
dbt3_s001 lineitem i_l_orderkey_quantity 2 1.0404
dbt3_s001 lineitem i_l_orderkey_quantity 3 1.0000
dbt3_s001 lineitem i_l_partkey 3 1.0000
dbt3_s001 lineitem i_l_partkey 2 1.0089
dbt3_s001 lineitem i_l_partkey 1 30.0250
dbt3_s001 lineitem i_l_shipdate 3 1.0000
dbt3_s001 lineitem i_l_shipdate 2 1.0149
dbt3_s001 lineitem i_l_shipdate 1 2.6500
dbt3_s001 lineitem i_l_suppkey 3 1.0000
dbt3_s001 lineitem i_l_suppkey 2 1.2073
dbt3_s001 lineitem i_l_suppkey 1 600.5000
dbt3_s001 lineitem i_l_suppkey_partkey 3 1.0030
dbt3_s001 lineitem i_l_suppkey_partkey 2 8.5786
dbt3_s001 lineitem i_l_suppkey_partkey 1 30.0250
dbt3_s001 lineitem i_l_suppkey_partkey 4 1.0000
delete from mysql.index_stat
where table_name='lineitem' and index_name= 'i_l_shipdate';
select * from mysql.index_stat where table_name='lineitem' order by index_name;
db_name table_name index_name prefix_arity avg_frequency
dbt3_s001 lineitem PRIMARY 1 4.0033
dbt3_s001 lineitem PRIMARY 2 1.0000
dbt3_s001 lineitem i_l_commitdate 2 1.0364
dbt3_s001 lineitem i_l_commitdate 1 2.7160
dbt3_s001 lineitem i_l_commitdate 3 1.0000
dbt3_s001 lineitem i_l_orderkey 1 4.0033
dbt3_s001 lineitem i_l_orderkey 2 1.0000
dbt3_s001 lineitem i_l_orderkey_quantity 3 1.0000
dbt3_s001 lineitem i_l_orderkey_quantity 2 1.0404
dbt3_s001 lineitem i_l_orderkey_quantity 1 4.0033
dbt3_s001 lineitem i_l_partkey 1 30.0250
dbt3_s001 lineitem i_l_partkey 2 1.0089
dbt3_s001 lineitem i_l_partkey 3 1.0000
dbt3_s001 lineitem i_l_suppkey 2 1.2073
dbt3_s001 lineitem i_l_suppkey 3 1.0000
dbt3_s001 lineitem i_l_suppkey 1 600.5000
dbt3_s001 lineitem i_l_suppkey_partkey 3 1.0030
dbt3_s001 lineitem i_l_suppkey_partkey 2 8.5786
dbt3_s001 lineitem i_l_suppkey_partkey 1 30.0250
dbt3_s001 lineitem i_l_suppkey_partkey 4 1.0000
set debug_sync='statistics_collection_start1 WAIT_FOR second_thread_started_too';
set debug_sync='statistics_collection_start2 SIGNAL first_thread_working';
use dbt3_s001;
set use_stat_tables='preferably';
analyze table lineitem persistent for columns() indexes (i_l_shipdate);
set debug_sync='statistics_collection_start1 SIGNAL second_thread_started_too';
set debug_sync='statistics_collection_start2 WAIT_FOR first_thread_working';
use dbt3_s001;
set use_stat_tables='preferably';
analyze table lineitem persistent for columns() indexes (i_l_receiptdate);
select * from mysql.index_stat where table_name='lineitem' order by index_name;
db_name table_name index_name prefix_arity avg_frequency
dbt3_s001 lineitem PRIMARY 1 4.0033
dbt3_s001 lineitem PRIMARY 2 1.0000
dbt3_s001 lineitem i_l_commitdate 2 1.0364
dbt3_s001 lineitem i_l_commitdate 1 2.7160
dbt3_s001 lineitem i_l_commitdate 3 1.0000
dbt3_s001 lineitem i_l_orderkey 1 4.0033
dbt3_s001 lineitem i_l_orderkey 2 1.0000
dbt3_s001 lineitem i_l_orderkey_quantity 1 4.0033
dbt3_s001 lineitem i_l_orderkey_quantity 2 1.0404
dbt3_s001 lineitem i_l_orderkey_quantity 3 1.0000
dbt3_s001 lineitem i_l_partkey 3 1.0000
dbt3_s001 lineitem i_l_partkey 2 1.0089
dbt3_s001 lineitem i_l_partkey 1 30.0250
dbt3_s001 lineitem i_l_receiptdate 1 2.6477
dbt3_s001 lineitem i_l_shipdate 1 2.6500
dbt3_s001 lineitem i_l_suppkey 3 1.0000
dbt3_s001 lineitem i_l_suppkey 2 1.2073
dbt3_s001 lineitem i_l_suppkey 1 600.5000
dbt3_s001 lineitem i_l_suppkey_partkey 3 1.0030
dbt3_s001 lineitem i_l_suppkey_partkey 2 8.5786
dbt3_s001 lineitem i_l_suppkey_partkey 1 30.0250
dbt3_s001 lineitem i_l_suppkey_partkey 4 1.0000
DROP DATABASE dbt3_s001;
use test;
set use_stat_tables=@save_use_stat_tables;

View file

@ -144,6 +144,115 @@ eval $QQ1;
set optimizer_switch=@save_optimizer_switch;
#
# Test for parallel memory allocation for statistical data
#
# assumes that start the code of memory allocation for stats data has this line:
#
# DEBUG_SYNC(thd, "statistics_mem_alloc_start1");
# DEBUG_SYNC(thd, "statistics_mem_alloc-start2");
#
let $Q6=
select sum(l_extendedprice*l_discount) as revenue
from lineitem
where l_shipdate >= date '1994-01-01'
and l_shipdate < date '1994-01-01' + interval '1' year
and l_discount between 0.06 - 0.01 and 0.06 + 0.01
and l_quantity < 24;
flush table lineitem;
set use_stat_tables='never';
eval $Q6;
connect (con1, localhost, root,,);
connect (con2, localhost, root,,);
connection con1;
set debug_sync='statistics_mem_alloc_start1 WAIT_FOR second_thread_started_too';
set debug_sync='statistics_mem_alloc_start2 SIGNAL first_thread_working';
use dbt3_s001;
set use_stat_tables='preferably';
--send_eval $Q6
connection con2;
set debug_sync='statistics_mem_alloc_start1 SIGNAL second_thread_started_too';
set debug_sync='statistics_mem_alloc_start2 WAIT_FOR first_thread_working';
use dbt3_s001;
set use_stat_tables='preferably';
--send_eval $Q6
connection con1;
--reap
connection con2;
--reap
connection default;
set use_stat_tables='preferably';
disconnect con1;
disconnect con2;
#
# Test for parallel statistics collection
#
# assumes that start of stats collection code has this line:
#
# DEBUG_SYNC(thd, "statistics_collection_start1");
# DEBUG_SYNC(thd, "statistics_collection_start2");
#
select * from mysql.index_stat where table_name='lineitem' order by index_name;
delete from mysql.index_stat
where table_name='lineitem' and
index_name in ('i_l_shipdate', 'i_l_receiptdate');
select * from mysql.index_stat where table_name='lineitem' order by index_name;
--disable_result_log
--disable_warnings
analyze table lineitem persistent for columns() indexes (i_l_shipdate);
--enable_warnings
--enable_result_log
select * from mysql.index_stat where table_name='lineitem' order by index_name;
delete from mysql.index_stat
where table_name='lineitem' and index_name= 'i_l_shipdate';
select * from mysql.index_stat where table_name='lineitem' order by index_name;
connect (con1, localhost, root,,);
connect (con2, localhost, root,,);
connection con1;
set debug_sync='statistics_collection_start1 WAIT_FOR second_thread_started_too';
set debug_sync='statistics_collection_start2 SIGNAL first_thread_working';
use dbt3_s001;
set use_stat_tables='preferably';
--send analyze table lineitem persistent for columns() indexes (i_l_shipdate)
connection con2;
set debug_sync='statistics_collection_start1 SIGNAL second_thread_started_too';
set debug_sync='statistics_collection_start2 WAIT_FOR first_thread_working';
use dbt3_s001;
set use_stat_tables='preferably';
--send analyze table lineitem persistent for columns() indexes (i_l_receiptdate)
connection con1;
--disable_result_log
--disable_warnings
--reap
--enable_warnings
--enable_result_log
connection con2;
--disable_result_log
--disable_warnings
--reap
--enable_warnings
--enable_result_log
connection default;
disconnect con1;
disconnect con2;
select * from mysql.index_stat where table_name='lineitem' order by index_name;
DROP DATABASE dbt3_s001;

View file

@ -1180,11 +1180,11 @@ int Field_num::check_int(CHARSET_INFO *cs, const char *str, int length,
if (str == int_end || error == MY_ERRNO_EDOM)
{
ErrConvString err(str, length, cs);
push_warning_printf(table->in_use, MYSQL_ERROR::WARN_LEVEL_WARN,
push_warning_printf(get_thd(), MYSQL_ERROR::WARN_LEVEL_WARN,
ER_TRUNCATED_WRONG_VALUE_FOR_FIELD,
ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD),
"integer", err.ptr(), field_name,
(ulong) table->in_use->warning_info->current_row_for_warning());
(ulong) get_thd()->warning_info->current_row_for_warning());
return 1;
}
/* Test if we have garbage at the end of the given string. */
@ -1253,7 +1253,7 @@ bool Field_num::get_int(CHARSET_INFO *cs, const char *from, uint len,
goto out_of_range;
}
}
if (table->in_use->count_cuted_fields &&
if (get_thd()->count_cuted_fields &&
check_int(cs, from, len, end, error))
return 1;
return 0;
@ -1319,12 +1319,14 @@ String *Field::val_int_as_str(String *val_buffer, bool unsigned_val)
Field::Field(uchar *ptr_arg,uint32 length_arg,uchar *null_ptr_arg,
uchar null_bit_arg,
utype unireg_check_arg, const char *field_name_arg)
:ptr(ptr_arg), null_ptr(null_ptr_arg), table(0), orig_table(0),
:ptr(ptr_arg), null_ptr(null_ptr_arg), table(0), orig_table(0), thd(0),
table_name(0), field_name(field_name_arg), option_list(0),
option_struct(0), key_start(0), part_of_key(0),
part_of_key_not_clustered(0), part_of_sortkey(0),
unireg_check(unireg_check_arg), field_length(length_arg),
null_bit(null_bit_arg), is_created_from_null_item(FALSE), vcol_info(0),
null_bit(null_bit_arg), is_created_from_null_item(FALSE),
read_stats(NULL), collected_stats(0),
vcol_info(0),
stored_in_db(TRUE)
{
flags=null_ptr ? 0: NOT_NULL_FLAG;
@ -1431,10 +1433,11 @@ int Field::store(const char *to, uint length, CHARSET_INFO *cs,
enum_check_fields check_level)
{
int res;
enum_check_fields old_check_level= table->in_use->count_cuted_fields;
table->in_use->count_cuted_fields= check_level;
THD *thd= get_thd();
enum_check_fields old_check_level= thd->count_cuted_fields;
thd->count_cuted_fields= check_level;
res= store(to, length, cs);
table->in_use->count_cuted_fields= old_check_level;
thd->count_cuted_fields= old_check_level;
return res;
}
@ -1871,6 +1874,18 @@ Field *Field::clone(MEM_ROOT *root, TABLE *new_table, my_ptrdiff_t diff,
}
Field *Field::clone(THD *thd_arg, MEM_ROOT *root, my_ptrdiff_t diff)
{
Field *tmp;
if ((tmp= (Field*) memdup_root(root,(char*) this,size_of())))
{
tmp->thd= thd_arg;
tmp->move_field_offset(diff);
}
return tmp;
}
/****************************************************************************
Field_null, a field that always return NULL
****************************************************************************/
@ -1985,7 +2000,7 @@ int Field_decimal::store(const char *from_arg, uint len, CHARSET_INFO *cs)
uchar *left_wall,*right_wall;
uchar tmp_char;
/*
To remember if table->in_use->cuted_fields has already been incremented,
To remember if get_thd()->cuted_fields has already been incremented,
to do that only once
*/
bool is_cuted_fields_incr=0;
@ -2076,7 +2091,7 @@ int Field_decimal::store(const char *from_arg, uint len, CHARSET_INFO *cs)
it makes the code easer to read.
*/
if (table->in_use->count_cuted_fields)
if (get_thd()->count_cuted_fields)
{
// Skip end spaces
for (;from != end && my_isspace(&my_charset_bin, *from); from++) ;
@ -2228,7 +2243,7 @@ int Field_decimal::store(const char *from_arg, uint len, CHARSET_INFO *cs)
/*
Write digits of the frac_% parts ;
Depending on table->in_use->count_cutted_fields, we may also want
Depending on get_thd()->count_cutted_fields, we may also want
to know if some non-zero tail of these parts will
be truncated (for example, 0.002->0.00 will generate a warning,
while 0.000->0.00 will not)
@ -2246,7 +2261,7 @@ int Field_decimal::store(const char *from_arg, uint len, CHARSET_INFO *cs)
{
if (pos == right_wall)
{
if (table->in_use->count_cuted_fields && !is_cuted_fields_incr)
if (get_thd()->count_cuted_fields && !is_cuted_fields_incr)
break; // Go on below to see if we lose non zero digits
return 0;
}
@ -2667,20 +2682,21 @@ int Field_new_decimal::store(const char *from, uint length,
ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
int err;
my_decimal decimal_value;
THD *thd= get_thd();
DBUG_ENTER("Field_new_decimal::store(char*)");
if ((err= str2my_decimal(E_DEC_FATAL_ERROR &
~(E_DEC_OVERFLOW | E_DEC_BAD_NUM),
from, length, charset_arg,
&decimal_value)) &&
table->in_use->abort_on_warning)
thd->abort_on_warning)
{
ErrConvString errmsg(from, length, &my_charset_bin);
push_warning_printf(table->in_use, MYSQL_ERROR::WARN_LEVEL_WARN,
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_TRUNCATED_WRONG_VALUE_FOR_FIELD,
ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD),
"decimal", errmsg.ptr(), field_name,
(ulong) table->in_use->warning_info->current_row_for_warning());
(ulong) thd->warning_info->current_row_for_warning());
DBUG_RETURN(err);
}
@ -2696,11 +2712,11 @@ int Field_new_decimal::store(const char *from, uint length,
case E_DEC_BAD_NUM:
{
ErrConvString errmsg(from, length, &my_charset_bin);
push_warning_printf(table->in_use, MYSQL_ERROR::WARN_LEVEL_WARN,
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_TRUNCATED_WRONG_VALUE_FOR_FIELD,
ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD),
"decimal", errmsg.ptr(), field_name,
(ulong) table->in_use->warning_info->
(ulong) thd->warning_info->
current_row_for_warning());
my_decimal_set_zero(&decimal_value);
break;
@ -2728,6 +2744,7 @@ int Field_new_decimal::store(double nr)
ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
my_decimal decimal_value;
int err;
THD *thd= get_thd();
DBUG_ENTER("Field_new_decimal::store(double)");
err= double2my_decimal(E_DEC_FATAL_ERROR & ~E_DEC_OVERFLOW, nr,
@ -2737,11 +2754,11 @@ int Field_new_decimal::store(double nr)
if (check_overflow(err))
set_value_on_overflow(&decimal_value, decimal_value.sign());
/* Only issue a warning if store_value doesn't issue an warning */
table->in_use->got_warning= 0;
thd->got_warning= 0;
}
if (store_value(&decimal_value))
err= 1;
else if (err && !table->in_use->got_warning)
else if (err && !thd->got_warning)
err= warn_if_overflow(err);
DBUG_RETURN(err);
}
@ -2759,11 +2776,11 @@ int Field_new_decimal::store(longlong nr, bool unsigned_val)
if (check_overflow(err))
set_value_on_overflow(&decimal_value, decimal_value.sign());
/* Only issue a warning if store_value doesn't issue an warning */
table->in_use->got_warning= 0;
get_thd()->got_warning= 0;
}
if (store_value(&decimal_value))
err= 1;
else if (err && !table->in_use->got_warning)
else if (err && !thd->got_warning)
err= warn_if_overflow(err);
return err;
}
@ -3659,7 +3676,7 @@ longlong Field_long::val_int(void)
ASSERT_COLUMN_MARKED_FOR_READ;
int32 j;
/* See the comment in Field_long::store(long long) */
DBUG_ASSERT(table->in_use == current_thd);
DBUG_ASSERT(!table || table->in_use == current_thd);
j=sint4korr(ptr);
return unsigned_flag ? (longlong) (uint32) j : (longlong) j;
}
@ -3741,7 +3758,7 @@ int Field_longlong::store(const char *from,uint len,CHARSET_INFO *cs)
set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
error= 1;
}
else if (table->in_use->count_cuted_fields &&
else if (get_thd()->count_cuted_fields &&
check_int(cs, from, len, end, error))
error= 1;
else
@ -3893,7 +3910,7 @@ int Field_float::store(const char *from,uint len,CHARSET_INFO *cs)
char *end;
double nr= my_strntod(cs,(char*) from,len,&end,&error);
if (error || (!len || ((uint) (end-from) != len &&
table->in_use->count_cuted_fields)))
get_thd()->count_cuted_fields)))
{
set_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
(error ? ER_WARN_DATA_OUT_OF_RANGE : WARN_DATA_TRUNCATED), 1);
@ -4081,7 +4098,7 @@ int Field_double::store(const char *from,uint len,CHARSET_INFO *cs)
char *end;
double nr= my_strntod(cs,(char*) from, len, &end, &error);
if (error || (!len || ((uint) (end-from) != len &&
table->in_use->count_cuted_fields)))
get_thd()->count_cuted_fields)))
{
set_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
(error ? ER_WARN_DATA_OUT_OF_RANGE : WARN_DATA_TRUNCATED), 1);
@ -4529,10 +4546,11 @@ int Field_timestamp::store_TIME_with_warning(THD *thd, MYSQL_TIME *l_time,
int Field_timestamp::store_time_dec(MYSQL_TIME *ltime, uint dec)
{
THD *thd= table->in_use;
int unused;
MYSQL_TIME l_time= *ltime;
ErrConvTime str(ltime);
THD *thd= get_thd();
bool valid= !check_date(&l_time, pack_time(&l_time) != 0,
(thd->variables.sql_mode & MODE_NO_ZERO_DATE) |
MODE_NO_ZERO_IN_DATE, &unused);
@ -4547,7 +4565,7 @@ int Field_timestamp::store(const char *from,uint len,CHARSET_INFO *cs)
int error;
int have_smth_to_conv;
ErrConvString str(from, len, cs);
THD *thd= table->in_use;
THD *thd= get_thd();
/* We don't want to store invalid or fuzzy datetime values in TIMESTAMP */
have_smth_to_conv= (str_to_datetime(cs, from, len, &l_time,
@ -4564,7 +4582,7 @@ int Field_timestamp::store(double nr)
MYSQL_TIME l_time;
int error;
ErrConvDouble str(nr);
THD *thd= table->in_use;
THD *thd= get_thd();
longlong tmp= double_to_datetime(nr, &l_time, (thd->variables.sql_mode &
MODE_NO_ZERO_DATE) |
@ -4578,7 +4596,7 @@ int Field_timestamp::store(longlong nr, bool unsigned_val)
MYSQL_TIME l_time;
int error;
ErrConvInteger str(nr);
THD *thd= table->in_use;
THD *thd= get_thd();
/* We don't want to store invalid or fuzzy datetime values in TIMESTAMP */
longlong tmp= number_to_datetime(nr, 0, &l_time, (thd->variables.sql_mode &
@ -4670,7 +4688,7 @@ String *Field_timestamp::val_str(String *val_buffer, String *val_ptr)
bool Field_timestamp::get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
{
THD *thd= table->in_use;
THD *thd= get_thd();
thd->time_zone_used= 1;
ulong sec_part;
my_time_t temp= get_timestamp(&sec_part);
@ -4723,7 +4741,7 @@ void Field_timestamp::sql_type(String &res) const
int Field_timestamp::set_time()
{
THD *thd= table->in_use;
THD *thd= get_thd();
set_notnull();
store_TIME(thd->query_start(), 0);
return 0;
@ -4872,7 +4890,7 @@ int Field_timestamp_hires::store_decimal(const my_decimal *d)
int error;
MYSQL_TIME ltime;
longlong tmp;
THD *thd= table->in_use;
THD *thd= get_thd();
ErrConvDecimal str(d);
if (my_decimal2seconds(d, &nr, &sec_part))
@ -4890,7 +4908,7 @@ int Field_timestamp_hires::store_decimal(const my_decimal *d)
int Field_timestamp_hires::set_time()
{
THD *thd= table->in_use;
THD *thd= get_thd();
set_notnull();
store_TIME(thd->query_start(), thd->query_start_sec_part());
return 0;
@ -5009,7 +5027,7 @@ int Field_temporal::store(const char *from,uint len,CHARSET_INFO *cs)
MYSQL_TIME ltime;
int error;
enum enum_mysql_timestamp_type func_res;
THD *thd= table->in_use;
THD *thd= get_thd();
ErrConvString str(from, len, cs);
func_res= str_to_datetime(cs, from, len, &ltime,
@ -5026,7 +5044,7 @@ int Field_temporal::store(double nr)
{
int error= 0;
MYSQL_TIME ltime;
THD *thd= table->in_use;
THD *thd= get_thd();
ErrConvDouble str(nr);
longlong tmp= double_to_datetime(nr, &ltime,
@ -5044,7 +5062,7 @@ int Field_temporal::store(longlong nr, bool unsigned_val)
int error;
MYSQL_TIME ltime;
longlong tmp;
THD *thd= table->in_use;
THD *thd= get_thd();
ErrConvInteger str(nr);
tmp= number_to_datetime(nr, 0, &ltime, (TIME_FUZZY_DATE |
@ -5109,7 +5127,7 @@ int Field_time::store(const char *from,uint len,CHARSET_INFO *cs)
int was_cut;
int have_smth_to_conv=
str_to_time(cs, from, len, &ltime,
table->in_use->variables.sql_mode &
get_thd()->variables.sql_mode &
(MODE_NO_ZERO_DATE | MODE_NO_ZERO_IN_DATE |
MODE_INVALID_DATES),
&was_cut) > MYSQL_TIMESTAMP_ERROR;
@ -5215,7 +5233,7 @@ String *Field_time::val_str(String *val_buffer,
bool Field_time::get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
{
THD *thd= table->in_use;
THD *thd= get_thd();
if (!(fuzzydate & (TIME_FUZZY_DATE|TIME_TIME_ONLY)))
{
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
@ -5405,7 +5423,7 @@ int Field_year::store(const char *from, uint len,CHARSET_INFO *cs)
set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
return 1;
}
if (table->in_use->count_cuted_fields &&
if (get_thd()->count_cuted_fields &&
(error= check_int(cs, from, len, end, error)))
{
if (error == 1) /* empty or incorrect string */
@ -5865,7 +5883,7 @@ int Field_datetime_hires::store_decimal(const my_decimal *d)
int error;
MYSQL_TIME ltime;
longlong tmp;
THD *thd= table->in_use;
THD *thd= get_thd();
ErrConvDecimal str(d);
if (my_decimal2seconds(d, &nr, &sec_part))
@ -6002,7 +6020,9 @@ check_string_copy_error(Field_str *field,
{
const char *pos;
char tmp[32];
THD *thd= field->table->in_use;
THD *thd;
thd= field->get_thd();
if (!(pos= well_formed_error_pos) &&
!(pos= cannot_convert_error_pos))
@ -6044,11 +6064,12 @@ int
Field_longstr::report_if_important_data(const char *pstr, const char *end,
bool count_spaces)
{
if ((pstr < end) && table->in_use->count_cuted_fields)
THD *thd= get_thd();
if ((pstr < end) && thd->count_cuted_fields)
{
if (test_if_important_data(field_charset, pstr, end))
{
if (table->in_use->abort_on_warning)
if (thd->abort_on_warning)
set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_DATA_TOO_LONG, 1);
else
set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1);
@ -6075,7 +6096,7 @@ int Field_string::store(const char *from,uint length,CHARSET_INFO *cs)
const char *from_end_pos;
/* See the comment for Field_long::store(long long) */
DBUG_ASSERT(table->in_use == current_thd);
DBUG_ASSERT(!table || table->in_use == current_thd);
copy_length= well_formed_copy_nchars(field_charset,
(char*) ptr, field_length,
@ -6121,7 +6142,7 @@ int Field_str::store(double nr)
if (error)
{
if (table->in_use->abort_on_warning)
if (get_thd()->abort_on_warning)
set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_DATA_TOO_LONG, 1);
else
set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1);
@ -6181,7 +6202,7 @@ double Field_string::val_real(void)
double result;
result= my_strntod(cs,(char*) ptr,field_length,&end,&error);
if (!table->in_use->no_errors &&
if (!get_thd()->no_errors &&
(error || (field_length != (uint32)(end - (char*) ptr) &&
!check_if_only_end_space(cs, end,
(char*) ptr + field_length))))
@ -6205,7 +6226,7 @@ longlong Field_string::val_int(void)
longlong result;
result= my_strntoll(cs, (char*) ptr,field_length,10,&end,&error);
if (!table->in_use->no_errors &&
if (!get_thd()->no_errors &&
(error || (field_length != (uint32)(end - (char*) ptr) &&
!check_if_only_end_space(cs, end,
(char*) ptr + field_length))))
@ -6225,9 +6246,9 @@ String *Field_string::val_str(String *val_buffer __attribute__((unused)),
{
ASSERT_COLUMN_MARKED_FOR_READ;
/* See the comment for Field_long::store(long long) */
DBUG_ASSERT(table->in_use == current_thd);
DBUG_ASSERT(!table || table->in_use == current_thd);
uint length;
if (table->in_use->variables.sql_mode &
if (get_thd()->variables.sql_mode &
MODE_PAD_CHAR_TO_FULL_LENGTH)
length= my_charpos(field_charset, ptr, ptr + field_length,
field_length / field_charset->mbmaxlen);
@ -6244,7 +6265,7 @@ my_decimal *Field_string::val_decimal(my_decimal *decimal_value)
ASSERT_COLUMN_MARKED_FOR_READ;
int err= str2my_decimal(E_DEC_FATAL_ERROR, (char*) ptr, field_length,
charset(), decimal_value);
if (!table->in_use->no_errors && err)
if (!get_thd()->no_errors && err)
{
ErrConvString errmsg((char*) ptr, field_length, charset());
push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
@ -6628,7 +6649,7 @@ double Field_varstring::val_real(void)
uint length= length_bytes == 1 ? (uint) *ptr : uint2korr(ptr);
result= my_strntod(cs, (char*)ptr+length_bytes, length, &end, &error);
if (!table->in_use->no_errors &&
if (!get_thd()->no_errors &&
(error || (length != (uint)(end - (char*)ptr+length_bytes) &&
!check_if_only_end_space(cs, end, (char*)ptr+length_bytes+length))))
{
@ -6651,7 +6672,7 @@ longlong Field_varstring::val_int(void)
longlong result= my_strntoll(cs, (char*) ptr+length_bytes, length, 10,
&end, &error);
if (!table->in_use->no_errors &&
if (!get_thd()->no_errors &&
(error || (length != (uint)(end - (char*)ptr+length_bytes) &&
!check_if_only_end_space(cs, end, (char*)ptr+length_bytes+length))))
{
@ -6680,7 +6701,7 @@ my_decimal *Field_varstring::val_decimal(my_decimal *decimal_value)
int error= str2my_decimal(E_DEC_FATAL_ERROR, (char*) ptr+length_bytes, length,
cs, decimal_value);
if (!table->in_use->no_errors && error)
if (!get_thd()->no_errors && error)
{
push_numerical_conversion_warning(current_thd, (char*)ptr+length_bytes,
length, cs, "DECIMAL",
@ -7661,7 +7682,7 @@ int Field_enum::store(const char *from,uint length,CHARSET_INFO *cs)
tmp=0;
set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1);
}
if (!table->in_use->count_cuted_fields)
if (!get_thd()->count_cuted_fields)
err= 0;
}
else
@ -7685,7 +7706,7 @@ int Field_enum::store(longlong nr, bool unsigned_val)
if ((ulonglong) nr > typelib->count || nr == 0)
{
set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1);
if (nr != 0 || table->in_use->count_cuted_fields)
if (nr != 0 || get_thd()->count_cuted_fields)
{
nr= 0;
error= 1;
@ -8215,7 +8236,7 @@ int Field_bit::store(const char *from, uint length, CHARSET_INFO *cs)
{
set_rec_bits((1 << bit_len) - 1, bit_ptr, bit_ofs, bit_len);
memset(ptr, 0xff, bytes_in_rec);
if (table->in_use->really_abort_on_warning())
if (get_thd()->really_abort_on_warning())
set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_DATA_TOO_LONG, 1);
else
set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
@ -8650,7 +8671,7 @@ int Field_bit_as_char::store(const char *from, uint length, CHARSET_INFO *cs)
memset(ptr, 0xff, bytes_in_rec);
if (bits)
*ptr&= ((1 << bits) - 1); /* set first uchar */
if (table->in_use->really_abort_on_warning())
if (get_thd()->really_abort_on_warning())
set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_DATA_TOO_LONG, 1);
else
set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
@ -9752,7 +9773,7 @@ void Field::set_datetime_warning(MYSQL_ERROR::enum_warning_level level,
uint code, const ErrConv *str,
timestamp_type ts_type, int cuted_increment)
{
THD *thd= table->in_use;
THD *thd= get_thd();
if (thd->really_abort_on_warning() && level >= MYSQL_ERROR::WARN_LEVEL_WARN)
make_truncated_value_warning(thd, level, str, ts_type, field_name);
else

View file

@ -36,6 +36,8 @@ class Protocol;
class Create_field;
class Relay_log_info;
class Field;
class Column_statistics;
class Column_statistics_collected;
enum enum_check_fields
{
@ -173,6 +175,7 @@ public:
*/
TABLE *table; // Pointer for table
TABLE *orig_table; // Pointer to original table
THD *thd; // Used when table == NULL
const char * const *table_name;
const char *field_name;
/** reference to the list of options or NULL */
@ -220,89 +223,16 @@ public:
bool is_stat_field; /* TRUE in Field objects created for column min/max values */
/* Statistical data on a column */
class Column_statistics
{
private:
static const uint Scale_factor_nulls_ratio= 100000;
static const uint Scale_factor_avg_length= 100000;
static const uint Scale_factor_avg_frequency= 100000;
public:
/*
Bitmap indicating what statistical characteristics
are available for the column
*/
uint32 column_stat_nulls;
/* Minimum value for the column */
Field *min_value;
/* Maximum value for the column */
Field *max_value;
private:
/*
The ratio Z/N multiplied by the scale factor Scale_factor_nulls_ratio,
where N is the total number of rows,
Z is the number of nulls in the column
*/
ulong nulls_ratio;
/*
Average number of bytes occupied by the representation of a
value of the column in memory buffers such as join buffer
multiplied by the scale factor Scale_factor_avg_length
CHAR values are stripped of trailing spaces
Flexible values are stripped of their length prefixes.
*/
ulong avg_length;
/*
The ratio N/D multiplied by the scale factor Scale_factor_avg_frequency,
where N is the number of rows with null value
in the column, D the number of distinct values among them
*/
ulong avg_frequency;
public:
double get_nulls_ratio()
{
return (double) nulls_ratio / Scale_factor_nulls_ratio;
}
double get_avg_length()
{
return (double) avg_length / Scale_factor_avg_length;
}
double get_avg_frequency()
{
return (double) avg_frequency / Scale_factor_avg_frequency;
}
void set_nulls_ratio (double val)
{
nulls_ratio= (ulong) (val * Scale_factor_nulls_ratio);
}
void set_avg_length (double val)
{
avg_length= (ulong) (val * Scale_factor_avg_length);
}
void set_avg_frequency (double val)
{
avg_frequency= (ulong) (val * Scale_factor_avg_frequency);
}
};
/*
This structure is used for statistical data on the column
that has been read from the statistical table column_stat
*/
Column_statistics read_stat;
Column_statistics *read_stats;
/*
This structure is used for statistical data on the column that
is collected by the function collect_statistics_for_table
*/
Column_statistics write_stat;
/* These members are used only when collecting statistics on the column */
ha_rows nulls;
ulonglong column_total_length;
Count_distinct_field *count_distinct;
Column_statistics_collected *collected_stats;
/*
This is additional data provided for any computed(virtual) field.
@ -522,6 +452,8 @@ public:
*/
inline bool real_maybe_null(void) { return null_ptr != 0; }
inline THD *get_thd() { return table ? table->in_use : thd; }
enum {
LAST_NULL_BYTE_UNDEF= 0
};
@ -560,6 +492,7 @@ public:
Field *clone(MEM_ROOT *mem_root, TABLE *new_table);
Field *clone(MEM_ROOT *mem_root, TABLE *new_table, my_ptrdiff_t diff,
bool stat_flag= FALSE);
Field *clone(THD *thd_arg, MEM_ROOT *mem_root, my_ptrdiff_t diff);
inline void move_field(uchar *ptr_arg,uchar *null_ptr_arg,uchar null_bit_arg)
{
ptr=ptr_arg; null_ptr=null_ptr_arg; null_bit=null_bit_arg;

View file

@ -484,6 +484,7 @@ ulong refresh_version; /* Increments on each reload */
query_id_t global_query_id;
my_atomic_rwlock_t global_query_id_lock;
my_atomic_rwlock_t thread_running_lock;
my_atomic_rwlock_t statistics_lock;
ulong aborted_threads, aborted_connects;
ulong delayed_insert_timeout, delayed_insert_limit, delayed_queue_size;
ulong delayed_insert_threads, delayed_insert_writes, delayed_rows_in_use;
@ -1852,6 +1853,7 @@ void clean_up(bool print_message)
sys_var_end();
my_atomic_rwlock_destroy(&global_query_id_lock);
my_atomic_rwlock_destroy(&thread_running_lock);
my_atomic_rwlock_destroy(&statistics_lock);
mysql_mutex_lock(&LOCK_thread_count);
DBUG_PRINT("quit", ("got thread count lock"));
ready_to_exit=1;
@ -7275,6 +7277,7 @@ static int mysql_init_variables(void)
global_query_id= thread_id= 1L;
my_atomic_rwlock_init(&global_query_id_lock);
my_atomic_rwlock_init(&thread_running_lock);
my_atomic_rwlock_init(&statistics_lock);
strmov(server_version, MYSQL_SERVER_VERSION);
threads.empty();
thread_cache.empty();

View file

@ -712,6 +712,8 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
thd->variables.use_stat_tables > 0)
{
if (!(compl_result_code=
alloc_statistics_for_table(thd, table->table)) &&
!(compl_result_code=
collect_statistics_for_table(thd, table->table)))
compl_result_code= update_statistics_for_table(thd, table->table);
}

View file

@ -3138,6 +3138,16 @@ retry_share:
while (table_cache_count > table_cache_size && unused_tables)
free_cache_entry(unused_tables);
if (thd->variables.use_stat_tables > 0)
{
if (share->table_category != TABLE_CATEGORY_SYSTEM)
{
if (!share->stats_can_be_read &&
!alloc_statistics_for_table_share(thd, share, TRUE))
share->stats_can_be_read= TRUE;
}
}
mysql_mutex_unlock(&LOCK_open);
/* make a new table */
@ -4632,11 +4642,21 @@ open_and_process_table(THD *thd, LEX *lex, TABLE_LIST *tables,
goto end;
}
if (thd->variables.use_stat_tables > 0)
if (thd->variables.use_stat_tables > 0 && tables->table)
{
if (tables->table && tables->table->s &&
tables->table->s->table_category != TABLE_CATEGORY_SYSTEM)
(void) read_statistics_for_table(thd, tables->table);
TABLE_SHARE *table_share= tables->table->s;
if (table_share && table_share->table_category != TABLE_CATEGORY_SYSTEM)
{
if (!table_share->stats_can_be_read &&
!alloc_statistics_for_table_share(thd, table_share, FALSE))
table_share->stats_can_be_read= TRUE;
if (table_share->stats_can_be_read && !table_share->stats_is_read)
{
(void) read_statistics_for_table(thd, tables->table);
table_share->stats_is_read= TRUE;
}
}
}
process_view_routines:

View file

@ -314,6 +314,9 @@ int open_and_lock_tables_derived(THD *thd, TABLE_LIST *tables, bool derived);
int read_statistics_for_table(THD *thd, TABLE *table);
int collect_statistics_for_table(THD *thd, TABLE *table);
int alloc_statistics_for_table_share(THD* thd, TABLE_SHARE *share,
bool is_safe);
int alloc_statistics_for_table(THD *thd, TABLE *table);
int update_statistics_for_table(THD *thd, TABLE *table);
int delete_statistics_for_table(THD *thd, LEX_STRING *db, LEX_STRING *tab);
int delete_statistics_for_column(THD *thd, TABLE *tab, Field *col);

View file

@ -51,6 +51,7 @@
#include "opt_subselect.h"
#include "log_slow.h"
#include "sql_derived.h"
#include "sql_statistics.h"
#include "debug_sync.h" // DEBUG_SYNC
#include <m_ctype.h>
@ -14252,7 +14253,6 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
table->intersect_keys.init();
table->keys_in_use_for_query.init();
table->no_rows_with_nulls= param->force_not_null_cols;
table->read_stat.cardinality_is_null= TRUE;
table->s= share;
init_tmp_table_share(thd, share, "", 0, tmpname, tmpname);
@ -14690,8 +14690,8 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
keyinfo->ext_key_parts= keyinfo->key_parts;
keyinfo->key_length=0;
keyinfo->rec_per_key=NULL;
keyinfo->read_stat.init_avg_frequency(NULL);
keyinfo->write_stat.init_avg_frequency(NULL);
keyinfo->read_stats= NULL;
keyinfo->collected_stats= NULL;
keyinfo->algorithm= HA_KEY_ALG_UNDEF;
keyinfo->is_statistics_from_stat_tables= FALSE;
keyinfo->name= (char*) "group_key";
@ -14808,7 +14808,8 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
keyinfo->algorithm= HA_KEY_ALG_UNDEF;
keyinfo->is_statistics_from_stat_tables= FALSE;
keyinfo->rec_per_key=0;
keyinfo->read_stat.init_avg_frequency(NULL);
keyinfo->read_stats= NULL;
keyinfo->collected_stats= NULL;
/*
Create an extra field to hold NULL bits so that unique indexes on

File diff suppressed because it is too large Load diff

View file

@ -60,4 +60,159 @@ enum enum_index_stat_col
INDEX_STAT_AVG_FREQUENCY
};
class Columns_statistics;
class Index_statistics;
/* Statistical data on a table */
class Table_statistics
{
public:
my_bool cardinality_is_null; /* TRUE if the cardinality is unknown */
ha_rows cardinality; /* Number of rows in the table */
Column_statistics *column_stats; /* Array of statistical data for columns */
Index_statistics *index_stats; /* Array of statistical data for indexes */
ulong *idx_avg_frequency; /* Array of records per key for index prefixes */
};
/* Statistical data on a column */
class Column_statistics
{
private:
static const uint Scale_factor_nulls_ratio= 100000;
static const uint Scale_factor_avg_length= 100000;
static const uint Scale_factor_avg_frequency= 100000;
public:
/*
Bitmap indicating what statistical characteristics
are available for the column
*/
uint32 column_stat_nulls;
/* Minimum value for the column */
Field *min_value;
/* Maximum value for the column */
Field *max_value;
private:
/*
The ratio Z/N multiplied by the scale factor Scale_factor_nulls_ratio,
where
N is the total number of rows,
Z is the number of nulls in the column
*/
ulong nulls_ratio;
/*
Average number of bytes occupied by the representation of a
value of the column in memory buffers such as join buffer
multiplied by the scale factor Scale_factor_avg_length.
CHAR values are stripped of trailing spaces.
Flexible values are stripped of their length prefixes.
*/
ulong avg_length;
/*
The ratio N/D multiplied by the scale factor Scale_factor_avg_frequency,
where
N is the number of rows with not null value in the column,
D the number of distinct values among them
*/
ulong avg_frequency;
public:
void set_all_nulls()
{
column_stat_nulls=
((1 << (COLUMN_STAT_AVG_FREQUENCY-COLUMN_STAT_COLUMN_NAME))-1) <<
(COLUMN_STAT_COLUMN_NAME+1);
}
void set_not_null(uint stat_field_no)
{
column_stat_nulls&= ~(1 << stat_field_no);
}
bool is_null(uint stat_field_no)
{
return test(column_stat_nulls & (1 << stat_field_no));
}
double get_nulls_ratio()
{
return (double) nulls_ratio / Scale_factor_nulls_ratio;
}
double get_avg_length()
{
return (double) avg_length / Scale_factor_avg_length;
}
double get_avg_frequency()
{
return (double) avg_frequency / Scale_factor_avg_frequency;
}
void set_nulls_ratio (double val)
{
nulls_ratio= (ulong) (val * Scale_factor_nulls_ratio);
}
void set_avg_length (double val)
{
avg_length= (ulong) (val * Scale_factor_avg_length);
}
void set_avg_frequency (double val)
{
avg_frequency= (ulong) (val * Scale_factor_avg_frequency);
}
};
/* Statistical data on an index prefixes */
class Index_statistics
{
private:
static const uint Scale_factor_avg_frequency= 100000;
/*
The k-th element of this array contains the ratio N/D
multiplied by the scale factor Scale_factor_avg_frequency,
where N is the number of index entries without nulls
in the first k components, and D is the number of distinct
k-component prefixes among them
*/
ulong *avg_frequency;
public:
void init_avg_frequency(ulong *ptr) { avg_frequency= ptr; }
bool avg_frequency_is_inited() { return avg_frequency != NULL; }
double get_avg_frequency(uint i)
{
return (double) avg_frequency[i] / Scale_factor_avg_frequency;
}
void set_avg_frequency(uint i, double val)
{
avg_frequency[i]= (ulong) (val * Scale_factor_avg_frequency);
}
};
#endif /* SQL_STATISTICS_H */

View file

@ -29,6 +29,7 @@
struct TABLE;
class Field;
class Index_statistics;
class THD;
@ -121,43 +122,16 @@ typedef struct st_key {
*/
ulong *rec_per_key;
/* Statistical data on an index prefixes */
class Index_statistics
{
private:
static const uint Scale_factor_avg_frequency= 100000;
/*
The k-th element of this array contains the ratio N/D
multiplied by the scale factor Scale_factor_avg_frequency,
where N is the number of index entries without nulls
in the first k components, and D is the number of distinct
k-component prefixes among them
*/
ulong *avg_frequency;
public:
void init_avg_frequency(ulong *ptr) { avg_frequency= ptr; }
bool avg_frequency_is_set() { return avg_frequency != NULL; }
double get_avg_frequency(uint i)
{
return (double) avg_frequency[i] / Scale_factor_avg_frequency;
}
void set_avg_frequency(uint i, double val)
{
avg_frequency[i]= (ulong) (val * Scale_factor_avg_frequency);
}
};
/*
This structure is used for statistical data on the index
that has been read from the statistical table index_stat
*/
Index_statistics read_stat;
Index_statistics *read_stats;
/*
This structure is used for statistical data on the index that
is collected by the function collect_statistics_for_table
*/
Index_statistics write_stat;
Index_statistics *collected_stats;
union {
int bdb_return_if_eq;
@ -168,13 +142,8 @@ typedef struct st_key {
engine_option_value *option_list;
ha_index_option_struct *option_struct; /* structure with parsed options */
inline double real_rec_per_key(uint i)
{
if (rec_per_key == 0)
return 0;
return (is_statistics_from_stat_tables ?
read_stat.get_avg_frequency(i) : (double) rec_per_key[i]);
}
double real_rec_per_key(uint i);
} KEY;

View file

@ -39,6 +39,7 @@
#include "my_bit.h"
#include "sql_select.h"
#include "sql_derived.h"
#include "sql_statistics.h"
#include "mdl.h" // MDL_wait_for_graph_visitor
/* INFORMATION_SCHEMA name */
@ -762,8 +763,6 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
ulong pos, record_offset;
ulong *rec_per_key= NULL;
ulong rec_buff_length;
ulong *read_avg_frequency= NULL;
ulong *write_avg_frequency= NULL;
handler *handler_file= 0;
KEY *keyinfo;
KEY_PART_INFO *key_part= NULL;
@ -946,14 +945,6 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
if (!(rec_per_key= (ulong*) alloc_root(&share->mem_root,
sizeof(ulong) * ext_key_parts)))
goto err;
if (!(read_avg_frequency= (ulong*) alloc_root(&share->mem_root,
sizeof(double) *
ext_key_parts)))
goto err;
if (!(write_avg_frequency= (ulong*) alloc_root(&share->mem_root,
sizeof(double) *
ext_key_parts)))
goto err;
first_key_part= key_part;
first_key_parts= first_keyinfo.key_parts;
keyinfo->flags= first_keyinfo.flags;
@ -966,13 +957,9 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
keyinfo->key_part= key_part;
keyinfo->rec_per_key= rec_per_key;
keyinfo->read_stat.init_avg_frequency(read_avg_frequency);
keyinfo->write_stat.init_avg_frequency(write_avg_frequency);
for (j=keyinfo->key_parts ; j-- ; key_part++)
{
*rec_per_key++=0;
*read_avg_frequency++= 0;
*write_avg_frequency++= 0;
key_part->fieldnr= (uint16) (uint2korr(strpos) & FIELD_NR_MASK);
key_part->offset= (uint) uint2korr(strpos+2)-1;
key_part->key_type= (uint) uint2korr(strpos+5);
@ -1019,8 +1006,6 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
{
*key_part++= first_key_part[j];
*rec_per_key++= 0;
*read_avg_frequency++= 0;
*write_avg_frequency++= 0;
keyinfo->ext_key_parts++;
keyinfo->ext_key_part_map|= 1 << j;
}
@ -2416,8 +2401,6 @@ int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias,
}
#endif
outparam->read_stat.cardinality_is_null= TRUE;
if (!(field_ptr = (Field **) alloc_root(&outparam->mem_root,
(uint) ((share->fields+1)*
sizeof(Field*)))))
@ -5965,7 +5948,8 @@ bool TABLE::add_tmp_key(uint key, uint key_parts,
if (!keyinfo->rec_per_key)
return TRUE;
bzero(keyinfo->rec_per_key, sizeof(ulong)*key_parts);
keyinfo->read_stat.init_avg_frequency(NULL);
keyinfo->read_stats= NULL;
keyinfo->collected_stats= NULL;
for (i= 0; i < key_parts; i++)
{
@ -6753,6 +6737,14 @@ uint TABLE_SHARE::actual_n_key_parts(THD *thd)
}
double KEY::real_rec_per_key(uint i)
{
if (rec_per_key == 0)
return 0;
return (is_statistics_from_stat_tables ?
read_stats->get_avg_frequency(i) : (double) rec_per_key[i]);
}
/*****************************************************************************
** Instansiate templates
*****************************************************************************/

View file

@ -45,6 +45,7 @@ struct TABLE_LIST;
class ACL_internal_schema_access;
class ACL_internal_table_access;
class Field;
class Table_statistics;
/*
Used to identify NESTED_JOIN structures within a join (applicable only to
@ -577,6 +578,15 @@ struct TABLE_SHARE
KEY *key_info; /* data of keys in database */
uint *blob_field; /* Index to blobs in Field arrray*/
bool stats_can_be_read; /* Memory for statistical data is allocated */
bool stats_is_read; /* Statistical data for table has been read
from statistical tables */
/*
This structure is used for statistical data on the table
that has been read from the statistical table table_stat
*/
Table_statistics *read_stats;
uchar *default_values; /* row with default values */
LEX_STRING comment; /* Comment about table */
CHARSET_INFO *table_charset; /* Default charset of string fields */
@ -1007,24 +1017,11 @@ public:
*/
query_id_t query_id;
/* Statistical data on a table */
class Table_statistics
{
public:
my_bool cardinality_is_null; /* TRUE if the cardinality is unknown */
ha_rows cardinality; /* Number of rows in the table */
};
/*
This structure is used for statistical data on the table
that has been read from the statistical table table_stat
*/
Table_statistics read_stat;
/*
This structure is used for statistical data on the table that
is collected by the function collect_statistics_for_table
*/
Table_statistics write_stat;
Table_statistics *collected_stats;
/* The estimate of the number of records in the table used by optimizer */
ha_rows used_stat_records;