2020-04-21 12:16:10 +05:30
--source include/have_sequence.inc
2003-08-19 00:08:08 +03:00
SET SQL_WARNINGS=1;
#
# This failed for Elizabeth Mattijsen
#
2003-01-06 01:48:59 +02:00
2000-12-28 03:56:38 +02:00
CREATE TABLE t1 (
ID CHAR(32) NOT NULL,
name CHAR(32) NOT NULL,
value CHAR(255),
INDEX indexIDname (ID(8),name(8))
) ;
INSERT INTO t1 VALUES
('keyword','indexdir','/export/home/local/www/database/indexes/keyword');
INSERT INTO t1 VALUES ('keyword','urlprefix','text/ /text');
INSERT INTO t1 VALUES ('keyword','urlmap','/text/ /');
INSERT INTO t1 VALUES ('keyword','attr','personal employee company');
INSERT INTO t1 VALUES
('emailgids','indexdir','/export/home/local/www/database/indexes/emailgids');
INSERT INTO t1 VALUES ('emailgids','urlprefix','text/ /text');
INSERT INTO t1 VALUES ('emailgids','urlmap','/text/ /');
INSERT INTO t1 VALUES ('emailgids','attr','personal employee company');
SELECT value FROM t1 WHERE ID='emailgids' AND name='attr';
drop table t1;
#
# Problem with many key parts and many or
#
CREATE TABLE t1 (
price int(5) DEFAULT '0' NOT NULL,
area varchar(40) DEFAULT '' NOT NULL,
type varchar(40) DEFAULT '' NOT NULL,
transityes enum('Y','N') DEFAULT 'Y' NOT NULL,
shopsyes enum('Y','N') DEFAULT 'Y' NOT NULL,
schoolsyes enum('Y','N') DEFAULT 'Y' NOT NULL,
petsyes enum('Y','N') DEFAULT 'Y' NOT NULL,
KEY price (price,area,type,transityes,shopsyes,schoolsyes,petsyes)
);
INSERT INTO t1 VALUES (900,'Vancouver','Shared/Roomate','N','N','N','N');
INSERT INTO t1 VALUES (900,'Vancouver','Shared/Roomate','N','N','N','N');
2017-02-08 15:28:00 -05:00
INSERT IGNORE INTO t1 VALUES (900,'Vancouver','Shared/Roomate','','','','');
2000-12-28 03:56:38 +02:00
INSERT INTO t1 VALUES (900,'Vancouver','Shared/Roomate','Y','Y','Y','Y');
INSERT INTO t1 VALUES (900,'Vancouver','Shared/Roomate','Y','Y','Y','Y');
INSERT INTO t1 VALUES (900,'Vancouver','Shared/Roomate','Y','Y','Y','Y');
INSERT INTO t1 VALUES (900,'Vancouver','Shared/Roomate','Y','Y','Y','Y');
INSERT INTO t1 VALUES (900,'Vancouver','Shared/Roomate','Y','Y','Y','Y');
SELECT * FROM t1 WHERE area='Vancouver' and transityes='y' and schoolsyes='y' and ( ((type='1 Bedroom' or type='Studio/Bach') and (price<=500)) or ((type='2 Bedroom') and (price<=550)) or ((type='Shared/Roomate') and (price<=300)) or ((type='Room and Board') and (price<=500)) ) and price <= 400;
drop table t1;
#
2003-05-13 19:16:30 +02:00
# No longer a problem with primary key
2000-12-28 03:56:38 +02:00
#
CREATE TABLE t1 (program enum('signup','unique','sliding') not null, type enum('basic','sliding','signup'), sites set('mt'), PRIMARY KEY (program));
2003-05-13 19:19:57 +02:00
# This no longer give an error for wrong primary key
2003-05-13 19:16:30 +02:00
ALTER TABLE t1 modify program enum('signup','unique','sliding');
2000-12-28 03:56:38 +02:00
drop table t1;
#
# Test of compressed decimal index.
#
CREATE TABLE t1 (
name varchar(50) DEFAULT '' NOT NULL,
author varchar(50) DEFAULT '' NOT NULL,
category decimal(10,0) DEFAULT '0' NOT NULL,
email varchar(50),
password varchar(50),
proxy varchar(50),
bitmap varchar(20),
msg varchar(255),
urlscol varchar(127),
urlhttp varchar(127),
timeout decimal(10,0),
nbcnx decimal(10,0),
creation decimal(10,0),
livinguntil decimal(10,0),
lang decimal(10,0),
type decimal(10,0),
subcat decimal(10,0),
subtype decimal(10,0),
reg char(1),
scs varchar(255),
capacity decimal(10,0),
userISP varchar(50),
CCident varchar(50) DEFAULT '' NOT NULL,
PRIMARY KEY (name,author,category)
);
INSERT INTO t1 VALUES
2004-08-13 18:29:25 +02:00
('patnom','patauteur',0,'p.favre@cryo-networks.fr',NULL,NULL,'#p2sndnq6ae5g1u6t','essai salut','scol://195.242.78.119:patauteur.patnom',NULL,NULL,NULL,950036174,-882087474,NULL,3,0,3,'1','Pub/patnom/futur_divers.scs',NULL,'pat','CC1');
2000-12-28 03:56:38 +02:00
INSERT INTO t1 VALUES
('LeNomDeMonSite','Marc',0,'m.barilley@cryo-networks.fr',NULL,NULL,NULL,NULL,'scol://195.242.78.119:Marc.LeNomDeMonSite',NULL,NULL,NULL,950560434,-881563214,NULL,3,0,3,'1','Pub/LeNomDeMonSite/domus_hibere.scs',NULL,'Marq','CC1');
select * from t1 where name='patnom' and author='patauteur' and category=0;
drop table t1;
#
# Problem with search on partial index
#
create table t1
(
name_id int not null auto_increment,
name blob,
INDEX name_idx (name(5)),
primary key (name_id)
);
INSERT t1 VALUES(NULL,'/');
INSERT t1 VALUES(NULL,'[T,U]_axpby');
SELECT * FROM t1 WHERE name='[T,U]_axpy';
SELECT * FROM t1 WHERE name='[T,U]_axpby';
create table t2
(
name_id int not null auto_increment,
name char(255) binary,
INDEX name_idx (name(5)),
primary key (name_id)
);
INSERT t2 select * from t1;
SELECT * FROM t2 WHERE name='[T,U]_axpy';
SELECT * FROM t2 WHERE name='[T,U]_axpby';
2004-09-17 03:08:23 +03:00
# Test possible problems with warnings in CREATE ... SELECT
CREATE TABLE t3 SELECT * FROM t2 WHERE name='[T,U]_axpby';
SELECT * FROM t2 WHERE name='[T,U]_axpby';
drop table t1,t2,t3;
2000-12-28 03:56:38 +02:00
#
# Test bug with long primary key
#
create table t1
(
SEQNO numeric(12 ) not null,
MOTYPEID numeric(12 ) not null,
MOINSTANCEID numeric(12 ) not null,
ATTRID numeric(12 ) not null,
VALUE varchar(120) not null,
primary key (SEQNO, MOTYPEID, MOINSTANCEID, ATTRID, VALUE )
);
INSERT INTO t1 VALUES (1, 1, 1, 1, 'a');
INSERT INTO t1 VALUES (1, 1, 1, 1, 'b');
2007-06-06 10:57:07 -07:00
--error ER_DUP_ENTRY
2004-11-02 20:13:27 +02:00
INSERT INTO t1 VALUES (1, 1, 1, 1, 'a');
2000-12-28 03:56:38 +02:00
drop table t1;
2001-03-06 15:24:08 +02:00
#
# Test with blob + tinyint key
# (Failed for Greg Valure)
#
CREATE TABLE t1 (
a tinytext NOT NULL,
b tinyint(3) unsigned NOT NULL default '0',
PRIMARY KEY (a(32),b)
2003-12-10 04:31:42 +00:00
) ENGINE=MyISAM;
2001-03-06 15:24:08 +02:00
INSERT INTO t1 VALUES ('a',1),('a',2);
SELECT * FROM t1 WHERE a='a' AND b=2;
SELECT * FROM t1 WHERE a='a' AND b in (2);
SELECT * FROM t1 WHERE a='a' AND b in (1,2);
drop table t1;
2002-01-02 21:29:41 +02:00
#
# Test of create key order
#
create table t1 (a int not null unique, b int unique, c int, d int not null primary key, key(c), e int not null unique);
show keys from t1;
drop table t1;
2002-07-23 18:31:22 +03:00
#
# Problem with UNIQUE() with NULL parts and auto increment
#
CREATE TABLE t1 (c CHAR(10) NOT NULL,i INT NOT NULL AUTO_INCREMENT,
UNIQUE (c,i));
2017-02-08 15:28:00 -05:00
INSERT IGNORE INTO t1 (c) VALUES (NULL),(NULL);
2002-07-23 18:31:22 +03:00
SELECT * FROM t1;
INSERT INTO t1 (c) VALUES ('a'),('a');
SELECT * FROM t1;
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 (c CHAR(10) NULL, i INT NOT NULL AUTO_INCREMENT,
UNIQUE (c,i));
INSERT INTO t1 (c) VALUES (NULL),(NULL);
SELECT * FROM t1;
INSERT INTO t1 (c) VALUES ('a'),('a');
SELECT * FROM t1;
drop table t1;
2004-02-20 20:38:34 +01:00
#
# longer keys
#
create table t1 (i int, a char(200), b text, unique (a), unique (b(300))) charset utf8;
2017-02-08 15:28:00 -05:00
insert ignore t1 values (1, repeat('a',210), repeat('b', 310));
insert ignore t1 values (2, repeat(0xD0B1,215), repeat(0xD0B1, 310));
2004-02-20 20:38:34 +01:00
select i, length(a), length(b), char_length(a), char_length(b) from t1;
select i from t1 where a=repeat(_utf8 'a',200);
select i from t1 where a=repeat(_utf8 0xD0B1,200);
select i from t1 where b=repeat(_utf8 'b',310);
drop table t1;
2004-05-05 12:31:17 +03:00
#
# Test of key read with primary key (Bug #3497)
#
CREATE TABLE t1 (id int unsigned auto_increment, name char(50), primary key (id)) engine=myisam;
insert into t1 (name) values ('a'), ('b'),('c'),('d'),('e'),('f'),('g');
explain select 1 from t1 where id =2;
explain select 1 from t1 where id =2 or id=3;
explain select name from t1 where id =2;
ALTER TABLE t1 DROP PRIMARY KEY, ADD INDEX (id);
explain select 1 from t1 where id =2;
drop table t1;
2004-05-06 04:40:45 +03:00
#
# Test of problem with key read (Bug #3666)
#
CREATE TABLE t1 (numeropost mediumint(8) unsigned NOT NULL default '0', numreponse int(10) unsigned NOT NULL auto_increment, PRIMARY KEY (numeropost,numreponse), UNIQUE KEY numreponse (numreponse));
INSERT INTO t1 (numeropost,numreponse) VALUES ('1','1'),('1','2'),('2','3'),('2','4');
SELECT numeropost FROM t1 WHERE numreponse='1';
Update row and key fetch cost models to take into account data copy costs
Before this patch, when calculating the cost of fetching and using a
row/key from the engine, we took into account the cost of finding a
row or key from the engine, but did not consistently take into account
index only accessed, clustered key or covered keys for all access
paths.
The cost of the WHERE clause (TIME_FOR_COMPARE) was not consistently
considered in best_access_path(). TIME_FOR_COMPARE was used in
calculation in other places, like greedy_search(), but was in some
cases (like scans) done an a different number of rows than was
accessed.
The cost calculation of row and index scans didn't take into account
the number of rows that where accessed, only the number of accepted
rows.
When using a filter, the cost of index_only_reads and cost of
accessing and disregarding 'filtered rows' where not taken into
account, which made filters cost less than there actually where.
To remedy the above, the following key & row fetch related costs
has been added:
- The cost of fetching and using a row is now split into different costs:
- key + Row fetch cost (as before) but multiplied with the variable
'optimizer_cache_cost' (default to 0.5). This allows the user to
tell the optimizer the likehood of finding the key and row in the
engine cache.
- ROW_COPY_COST, The cost copying a row from the engine to the
sql layer or creating a row from the join_cache to the record
buffer. Mostly affects table scan costs.
- ROW_LOOKUP_COST, the cost of fetching a row by rowid.
- KEY_COPY_COST the cost of finding the next key and copying it from
the engine to the SQL layer. This is used when we calculate the cost
index only reads. It makes index scans more expensive than before if
they cover a lot of rows. (main.index_merge_myisam)
- KEY_LOOKUP_COST, the cost of finding the first key in a range.
This replaces the old define IDX_LOOKUP_COST, but with a higher cost.
- KEY_NEXT_FIND_COST, the cost of finding the next key (and rowid).
when doing a index scan and comparing the rowid to the filter.
Before this cost was assumed to be 0.
All of the above constants/variables are now tuned to be somewhat in
proportion of executing complexity to each other. There is tuning
need for these in the future, but that can wait until the above are
made user variables as that will make tuning much easier.
To make the usage of the above easy, there are new (not virtual)
cost calclation functions in handler:
- ha_read_time(), like read_time(), but take optimizer_cache_cost into
account.
- ha_read_and_copy_time(), like ha_read_time() but take into account
ROW_COPY_TIME
- ha_read_and_compare_time(), like ha_read_and_copy_time() but take
TIME_FOR_COMPARE into account.
- ha_rnd_pos_time(). Read row with row id, taking ROW_COPY_COST
into account. This is used with filesort where we don't need
to execute the WHERE clause again.
- ha_keyread_time(), like keyread_time() but take
optimizer_cache_cost into account.
- ha_keyread_and_copy_time(), like ha_keyread_time(), but add
KEY_COPY_COST.
- ha_key_scan_time(), like key_scan_time() but take
optimizer_cache_cost nto account.
- ha_key_scan_and_compare_time(), like ha_key_scan_time(), but add
KEY_COPY_COST & TIME_FOR_COMPARE.
I also added some setup costs for doing different types of scans and
creating temporary tables (on disk and in memory). This encourages
the optimizer to not use these for simple 'a few row' lookups if
there are adequate key lookup strategies.
- TABLE_SCAN_SETUP_COST, cost of starting a table scan.
- INDEX_SCAN_SETUP_COST, cost of starting an index scan.
- HEAP_TEMPTABLE_CREATE_COST, cost of creating in memory
temporary table.
- DISK_TEMPTABLE_CREATE_COST, cost of creating an on disk temporary
table.
When calculating cost of fetching ranges, we had a cost of
IDX_LOOKUP_COST (0.125) for doing a key div for a new range. This is
now replaced with 'io_cost * KEY_LOOKUP_COST (1.0) *
optimizer_cache_cost', which matches the cost we use for 'ref' and
other key lookups. The effect is that the cost is now a bit higher
when we have many ranges for a key.
Allmost all calculation with TIME_FOR_COMPARE is now done in
best_access_path(). 'JOIN::read_time' now includes the full
cost for finding the rows in the table.
In the result files, many of the changes are now again close to what
they where before the "Update cost for hash and cached joins" commit,
as that commit didn't fix the filter cost (too complex to do
everything in one commit).
The above changes showed a lot of a lot of inconsistencies in
optimizer cost calculation. The main objective with the other changes
was to do calculation as similar (and accurate) as possible and to make
different plans more comparable.
Detailed list of changes:
- Calculate index_only_cost consistently and correctly for all scan
and ref accesses. The row fetch_cost and index_only_cost now
takes into account clustered keys, covered keys and index
only accesses.
- cost_for_index_read now returns both full cost and index_only_cost
- Fixed cost calculation of get_sweep_read_cost() to match other
similar costs. This is bases on the assumption that data is more
often stored on SSD than a hard disk.
- Replaced constant 2.0 with new define TABLE_SCAN_SETUP_COST.
- Some scan cost estimates did not take into account
TIME_FOR_COMPARE. Now all scan costs takes this into
account. (main.show_explain)
- Added session variable optimizer_cache_hit_ratio (default 50%). By
adjusting this on can reduce or increase the cost of index or direct
record lookups. The effect of the default is that key lookups is now
a bit cheaper than before. See usage of 'optimizer_cache_cost' in
handler.h.
- JOIN_TAB::scan_time() did not take into account index only scans,
which produced a wrong cost when index scan was used. Changed
JOIN_TAB:::scan_time() to take into consideration clustered and
covered keys. The values are now cached and we only have to call
this function once. Other calls are changed to use the cached
values. Function renamed to JOIN_TAB::estimate_scan_time().
- Fixed that most index cost calculations are done the same way and
more close to 'range' calculations. The cost is now lower than
before for small data sets and higher for large data sets as we take
into account how many keys are read (main.opt_trace_selectivity,
main.limit_rows_examined).
- Ensured that index_scan_cost() ==
range(scan_of_all_rows_in_table_using_one_range) +
MULTI_RANGE_READ_INFO_CONST. One effect of this is that if there
is choice of doing a full index scan and a range-index scan over
almost the whole table then index scan will be preferred (no
range-read setup cost). (innodb.innodb, main.show_explain,
main.range)
- Fixed the EQ_REF and REF takes into account clustered and covered
keys. This changes some plans to use covered or clustered indexes
as these are much cheaper. (main.subselect_mat_cost,
main.state_tables_innodb, main.limit_rows_examined)
- Rowid filter setup cost and filter compare cost now takes into
account fetching and checking the rowid (KEY_NEXT_FIND_COST).
(main.partition_pruning heap.heap_btree main.log_state)
- Added KEY_NEXT_FIND_COST to
Range_rowid_filter_cost_info::lookup_cost to account of the time
to find and check the next key value against the container
- Introduced ha_keyread_time(rows) that takes into account finding
the next row and copying the key value to 'record'
(KEY_COPY_COST).
- Introduced ha_key_scan_time() for calculating an index scan over
all rows.
- Added IDX_LOOKUP_COST to keyread_time() as a startup cost.
- Added index_only_fetch_cost() as a convenience function to
OPT_RANGE.
- keyread_time() cost is slightly reduced to prefer shorter keys.
(main.index_merge_myisam)
- All of the above caused some index_merge combinations to be
rejected because of cost (main.index_intersect). In some cases
'ref' where replaced with index_merge because of the low
cost calculation of get_sweep_read_cost().
- Some index usage moved from PRIMARY to a covering index.
(main.subselect_innodb)
- Changed cost calculation of filter to take KEY_LOOKUP_COST and
TIME_FOR_COMPARE into account. See sql_select.cc::apply_filter().
filter parameters and costs are now written to optimizer_trace.
- Don't use matchings_records_in_range() to try to estimate the number
of filtered rows for ranges. The reason is that we want to ensure
that 'range' is calculated similar to 'ref'. There is also more work
needed to calculate the selectivity when using ranges and ranges and
filtering. This causes filtering column in EXPLAIN EXTENDED to be
100.00 for some cases where range cannot use filtering.
(main.rowid_filter)
- Introduced ha_scan_time() that takes into account the CPU cost of
finding the next row and copying the row from the engine to
'record'. This causes costs of table scan to slightly increase and
some test to changed their plan from ALL to RANGE or ALL to ref.
(innodb.innodb_mysql, main.select_pkeycache)
In a few cases where scan time of very small tables have lower cost
than a ref or range, things changed from ref/range to ALL.
(main.myisam, main.func_group, main.limit_rows_examined,
main.subselect2)
- Introduced ha_scan_and_compare_time() which is like ha_scan_time()
but also adds the cost of the where clause (TIME_FOR_COMPARE).
- Added small cost for creating temporary table for
materialization. This causes some very small tables to use scan
instead of materialization.
- Added checking of the WHERE clause (TIME_FOR_COMPARE) of the
accepted rows to ROR costs in get_best_ror_intersect()
- Removed '- 0.001' from 'join->best_read' and optimize_straight_join()
to ensure that the 'Last_query_cost' status variable contains the
same value as the one that was calculated by the optimizer.
- Take avg_io_cost() into account in handler::keyread_time() and
handler::read_time(). This should have no effect as it's 1.0 by
default, except for heap that overrides these functions.
- Some 'ref_or_null' accesses changed to 'range' because of cost
adjustments (main.order_by)
- Added scan type "scan_with_join_cache" for optimizer_trace. This is
just to show in the trace what kind of scan was used.
- When using 'scan_with_join_cache' take into account number of
preceding tables (as have to restore all fields for all previous
table combination when checking the where clause)
The new cost added is:
(row_combinations * ROW_COPY_COST * number_of_cached_tables).
This increases the cost of join buffering in proportion of the
number of tables in the join buffer. One effect is that full scans
are now done earlier as the cost is then smaller.
(main.join_outer_innodb, main.greedy_optimizer)
- Removed the usage of 'worst_seeks' in cost_for_index_read as it
caused wrong plans to be created; It prefered JT_EQ_REF even if it
would be much more expensive than a full table scan. A related
issue was that worst_seeks only applied to full lookup, not to
clustered or index only lookups, which is not consistent. This
caused some plans to use index scan instead of eq_ref (main.union)
- Changed federated block size from 4096 to 1500, which is the
typical size of an IO packet.
- Added costs for reading rows to Federated. Needed as there is no
caching of rows in the federated engine.
- Added ha_innobase::rnd_pos_time() cost function.
- A lot of extra things added to optimizer trace
- More costs, especially for materialization and index_merge.
- Make lables more uniform
- Fixed a lot of minor bugs
- Added 'trace_started()' around a lot of trace blocks.
- When calculating ORDER BY with LIMIT cost for using an index
the cost did not take into account the number of row retrivals
that has to be done or the cost of comparing the rows with the
WHERE clause. The cost calculated would be just a fraction of
the real cost. Now we calculate the cost as we do for ranges
and 'ref'.
- 'Using index for group-by' is used a bit more than before as
now take into account the WHERE clause cost when comparing
with 'ref' and prefer the method with fewer row combinations.
(main.group_min_max).
Bugs fixed:
- Fixed that we don't calculate TIME_FOR_COMPARE twice for some plans,
like in optimize_straight_join() and greedy_search()
- Fixed bug in save_explain_data where we could test for the wrong
index when displaying 'Using index'. This caused some old plans to
show 'Using index'. (main.subselect_innodb, main.subselect2)
- Fixed bug in get_best_ror_intersect() where 'min_cost' was not
updated, and the cost we compared with was not the one that was
used.
- Fixed very wrong cost calculation for priority queues in
check_if_pq_applicable(). (main.order_by now correctly uses priority
queue)
- When calculating cost of EQ_REF or REF, we added the cost of
comparing the WHERE clause with the found rows, not all row
combinations. This made ref and eq_ref to be regarded way to cheap
compared to other access methods.
- FORCE INDEX cost calculation didn't take into account clustered or
covered indexes.
- JT_EQ_REF cost was estimated as avg_io_cost(), which is half the
cost of a JT_REF key. This may be true for InnoDB primary key, but
not for other unique keys or other engines. Now we use handler
function to calculate the cost, which allows us to handle
consistently clustered, covered keys and not covered keys.
- ha_start_keyread() didn't call extra_opt() if keyread was already
enabled but still changed the 'keyread' variable (which is wrong).
Fixed by not doing anything if keyread is already enabled.
- multi_range_read_info_cost() didn't take into account io_cost when
calculating the cost of ranges.
- fix_semijoin_strategies_for_picked_join_order() used the wrong
record_count when calling best_access_path() for SJ_OPT_FIRST_MATCH
and SJ_OPT_LOOSE_SCAN.
- Hash joins didn't provide correct best_cost to the upper level, which
means that the cost for hash_joins more expensive than calculated
in best_access_path (a difference of 10x * TIME_OF_COMPARE).
This is fixed in the new code thanks to that we now include
TIME_OF_COMPARE cost in 'read_time'.
Other things:
- Added some 'if (thd->trace_started())' to speed up code
- Removed not used function Cost_estimate::is_zero()
- Simplified testing of HA_POS_ERROR in get_best_ror_intersect().
(No cost changes)
- Moved ha_start_keyread() from join_read_const_table() to join_read_const()
to enable keyread for all types of JT_CONST tables.
- Made a few very short functions inline in handler.h
Notes:
- In main.rowid_filter the join order of order and lineitem is swapped.
This is because the cost of doing a range fetch of lineitem(98 rows) is
almost as big as the whole join of order,lineitem. The filtering will
also ensure that we only have to do very small key fetches of the rows
in lineitem.
- main.index_merge_myisam had a few changes where we are now using
less keys for index_merge. This is because index scans are now more
expensive than before.
- handler->optimizer_cache_cost is updated in ha_external_lock().
This ensures that it is up to date per statements.
Not an optimal solution (for locked tables), but should be ok for now.
- 'DELETE FROM t1 WHERE t1.a > 0 ORDER BY t1.a' does not take cost of
filesort into consideration when table scan is chosen.
(main.myisam_explain_non_select_all)
- perfschema.table_aggregate_global_* has changed because an update
on a table with 1 row will now use table scan instead of key lookup.
TODO in upcomming commits:
- Fix selectivity calculation for ranges with and without filtering and
when there is a ref access but scan is chosen.
For this we have to store the lowest known value for
'accepted_records' in the OPT_RANGE structure.
- Change that records_read does not include filtered rows.
- test_if_cheaper_ordering() needs to be updated to properly calculate
costs. This will fix tests like main.order_by_innodb,
main.single_delete_update
- Extend get_range_limit_read_cost() to take into considering
cost_for_index_read() if there where no quick keys. This will reduce
the computed cost for ORDER BY with LIMIT in some cases.
(main.innodb_ext_key)
- Fix that we take into account selectivity when counting the number
of rows we have to read when considering using a index table scan to
resolve ORDER BY.
- Add new calculation for rnd_pos_time() where we take into account the
benefit of reading multiple rows from the same page.
2021-11-01 12:34:24 +02:00
# No 'Using index'
2004-05-06 04:40:45 +03:00
EXPLAIN SELECT numeropost FROM t1 WHERE numreponse='1';
FLUSH TABLES;
SELECT numeropost FROM t1 WHERE numreponse='1';
Update row and key fetch cost models to take into account data copy costs
Before this patch, when calculating the cost of fetching and using a
row/key from the engine, we took into account the cost of finding a
row or key from the engine, but did not consistently take into account
index only accessed, clustered key or covered keys for all access
paths.
The cost of the WHERE clause (TIME_FOR_COMPARE) was not consistently
considered in best_access_path(). TIME_FOR_COMPARE was used in
calculation in other places, like greedy_search(), but was in some
cases (like scans) done an a different number of rows than was
accessed.
The cost calculation of row and index scans didn't take into account
the number of rows that where accessed, only the number of accepted
rows.
When using a filter, the cost of index_only_reads and cost of
accessing and disregarding 'filtered rows' where not taken into
account, which made filters cost less than there actually where.
To remedy the above, the following key & row fetch related costs
has been added:
- The cost of fetching and using a row is now split into different costs:
- key + Row fetch cost (as before) but multiplied with the variable
'optimizer_cache_cost' (default to 0.5). This allows the user to
tell the optimizer the likehood of finding the key and row in the
engine cache.
- ROW_COPY_COST, The cost copying a row from the engine to the
sql layer or creating a row from the join_cache to the record
buffer. Mostly affects table scan costs.
- ROW_LOOKUP_COST, the cost of fetching a row by rowid.
- KEY_COPY_COST the cost of finding the next key and copying it from
the engine to the SQL layer. This is used when we calculate the cost
index only reads. It makes index scans more expensive than before if
they cover a lot of rows. (main.index_merge_myisam)
- KEY_LOOKUP_COST, the cost of finding the first key in a range.
This replaces the old define IDX_LOOKUP_COST, but with a higher cost.
- KEY_NEXT_FIND_COST, the cost of finding the next key (and rowid).
when doing a index scan and comparing the rowid to the filter.
Before this cost was assumed to be 0.
All of the above constants/variables are now tuned to be somewhat in
proportion of executing complexity to each other. There is tuning
need for these in the future, but that can wait until the above are
made user variables as that will make tuning much easier.
To make the usage of the above easy, there are new (not virtual)
cost calclation functions in handler:
- ha_read_time(), like read_time(), but take optimizer_cache_cost into
account.
- ha_read_and_copy_time(), like ha_read_time() but take into account
ROW_COPY_TIME
- ha_read_and_compare_time(), like ha_read_and_copy_time() but take
TIME_FOR_COMPARE into account.
- ha_rnd_pos_time(). Read row with row id, taking ROW_COPY_COST
into account. This is used with filesort where we don't need
to execute the WHERE clause again.
- ha_keyread_time(), like keyread_time() but take
optimizer_cache_cost into account.
- ha_keyread_and_copy_time(), like ha_keyread_time(), but add
KEY_COPY_COST.
- ha_key_scan_time(), like key_scan_time() but take
optimizer_cache_cost nto account.
- ha_key_scan_and_compare_time(), like ha_key_scan_time(), but add
KEY_COPY_COST & TIME_FOR_COMPARE.
I also added some setup costs for doing different types of scans and
creating temporary tables (on disk and in memory). This encourages
the optimizer to not use these for simple 'a few row' lookups if
there are adequate key lookup strategies.
- TABLE_SCAN_SETUP_COST, cost of starting a table scan.
- INDEX_SCAN_SETUP_COST, cost of starting an index scan.
- HEAP_TEMPTABLE_CREATE_COST, cost of creating in memory
temporary table.
- DISK_TEMPTABLE_CREATE_COST, cost of creating an on disk temporary
table.
When calculating cost of fetching ranges, we had a cost of
IDX_LOOKUP_COST (0.125) for doing a key div for a new range. This is
now replaced with 'io_cost * KEY_LOOKUP_COST (1.0) *
optimizer_cache_cost', which matches the cost we use for 'ref' and
other key lookups. The effect is that the cost is now a bit higher
when we have many ranges for a key.
Allmost all calculation with TIME_FOR_COMPARE is now done in
best_access_path(). 'JOIN::read_time' now includes the full
cost for finding the rows in the table.
In the result files, many of the changes are now again close to what
they where before the "Update cost for hash and cached joins" commit,
as that commit didn't fix the filter cost (too complex to do
everything in one commit).
The above changes showed a lot of a lot of inconsistencies in
optimizer cost calculation. The main objective with the other changes
was to do calculation as similar (and accurate) as possible and to make
different plans more comparable.
Detailed list of changes:
- Calculate index_only_cost consistently and correctly for all scan
and ref accesses. The row fetch_cost and index_only_cost now
takes into account clustered keys, covered keys and index
only accesses.
- cost_for_index_read now returns both full cost and index_only_cost
- Fixed cost calculation of get_sweep_read_cost() to match other
similar costs. This is bases on the assumption that data is more
often stored on SSD than a hard disk.
- Replaced constant 2.0 with new define TABLE_SCAN_SETUP_COST.
- Some scan cost estimates did not take into account
TIME_FOR_COMPARE. Now all scan costs takes this into
account. (main.show_explain)
- Added session variable optimizer_cache_hit_ratio (default 50%). By
adjusting this on can reduce or increase the cost of index or direct
record lookups. The effect of the default is that key lookups is now
a bit cheaper than before. See usage of 'optimizer_cache_cost' in
handler.h.
- JOIN_TAB::scan_time() did not take into account index only scans,
which produced a wrong cost when index scan was used. Changed
JOIN_TAB:::scan_time() to take into consideration clustered and
covered keys. The values are now cached and we only have to call
this function once. Other calls are changed to use the cached
values. Function renamed to JOIN_TAB::estimate_scan_time().
- Fixed that most index cost calculations are done the same way and
more close to 'range' calculations. The cost is now lower than
before for small data sets and higher for large data sets as we take
into account how many keys are read (main.opt_trace_selectivity,
main.limit_rows_examined).
- Ensured that index_scan_cost() ==
range(scan_of_all_rows_in_table_using_one_range) +
MULTI_RANGE_READ_INFO_CONST. One effect of this is that if there
is choice of doing a full index scan and a range-index scan over
almost the whole table then index scan will be preferred (no
range-read setup cost). (innodb.innodb, main.show_explain,
main.range)
- Fixed the EQ_REF and REF takes into account clustered and covered
keys. This changes some plans to use covered or clustered indexes
as these are much cheaper. (main.subselect_mat_cost,
main.state_tables_innodb, main.limit_rows_examined)
- Rowid filter setup cost and filter compare cost now takes into
account fetching and checking the rowid (KEY_NEXT_FIND_COST).
(main.partition_pruning heap.heap_btree main.log_state)
- Added KEY_NEXT_FIND_COST to
Range_rowid_filter_cost_info::lookup_cost to account of the time
to find and check the next key value against the container
- Introduced ha_keyread_time(rows) that takes into account finding
the next row and copying the key value to 'record'
(KEY_COPY_COST).
- Introduced ha_key_scan_time() for calculating an index scan over
all rows.
- Added IDX_LOOKUP_COST to keyread_time() as a startup cost.
- Added index_only_fetch_cost() as a convenience function to
OPT_RANGE.
- keyread_time() cost is slightly reduced to prefer shorter keys.
(main.index_merge_myisam)
- All of the above caused some index_merge combinations to be
rejected because of cost (main.index_intersect). In some cases
'ref' where replaced with index_merge because of the low
cost calculation of get_sweep_read_cost().
- Some index usage moved from PRIMARY to a covering index.
(main.subselect_innodb)
- Changed cost calculation of filter to take KEY_LOOKUP_COST and
TIME_FOR_COMPARE into account. See sql_select.cc::apply_filter().
filter parameters and costs are now written to optimizer_trace.
- Don't use matchings_records_in_range() to try to estimate the number
of filtered rows for ranges. The reason is that we want to ensure
that 'range' is calculated similar to 'ref'. There is also more work
needed to calculate the selectivity when using ranges and ranges and
filtering. This causes filtering column in EXPLAIN EXTENDED to be
100.00 for some cases where range cannot use filtering.
(main.rowid_filter)
- Introduced ha_scan_time() that takes into account the CPU cost of
finding the next row and copying the row from the engine to
'record'. This causes costs of table scan to slightly increase and
some test to changed their plan from ALL to RANGE or ALL to ref.
(innodb.innodb_mysql, main.select_pkeycache)
In a few cases where scan time of very small tables have lower cost
than a ref or range, things changed from ref/range to ALL.
(main.myisam, main.func_group, main.limit_rows_examined,
main.subselect2)
- Introduced ha_scan_and_compare_time() which is like ha_scan_time()
but also adds the cost of the where clause (TIME_FOR_COMPARE).
- Added small cost for creating temporary table for
materialization. This causes some very small tables to use scan
instead of materialization.
- Added checking of the WHERE clause (TIME_FOR_COMPARE) of the
accepted rows to ROR costs in get_best_ror_intersect()
- Removed '- 0.001' from 'join->best_read' and optimize_straight_join()
to ensure that the 'Last_query_cost' status variable contains the
same value as the one that was calculated by the optimizer.
- Take avg_io_cost() into account in handler::keyread_time() and
handler::read_time(). This should have no effect as it's 1.0 by
default, except for heap that overrides these functions.
- Some 'ref_or_null' accesses changed to 'range' because of cost
adjustments (main.order_by)
- Added scan type "scan_with_join_cache" for optimizer_trace. This is
just to show in the trace what kind of scan was used.
- When using 'scan_with_join_cache' take into account number of
preceding tables (as have to restore all fields for all previous
table combination when checking the where clause)
The new cost added is:
(row_combinations * ROW_COPY_COST * number_of_cached_tables).
This increases the cost of join buffering in proportion of the
number of tables in the join buffer. One effect is that full scans
are now done earlier as the cost is then smaller.
(main.join_outer_innodb, main.greedy_optimizer)
- Removed the usage of 'worst_seeks' in cost_for_index_read as it
caused wrong plans to be created; It prefered JT_EQ_REF even if it
would be much more expensive than a full table scan. A related
issue was that worst_seeks only applied to full lookup, not to
clustered or index only lookups, which is not consistent. This
caused some plans to use index scan instead of eq_ref (main.union)
- Changed federated block size from 4096 to 1500, which is the
typical size of an IO packet.
- Added costs for reading rows to Federated. Needed as there is no
caching of rows in the federated engine.
- Added ha_innobase::rnd_pos_time() cost function.
- A lot of extra things added to optimizer trace
- More costs, especially for materialization and index_merge.
- Make lables more uniform
- Fixed a lot of minor bugs
- Added 'trace_started()' around a lot of trace blocks.
- When calculating ORDER BY with LIMIT cost for using an index
the cost did not take into account the number of row retrivals
that has to be done or the cost of comparing the rows with the
WHERE clause. The cost calculated would be just a fraction of
the real cost. Now we calculate the cost as we do for ranges
and 'ref'.
- 'Using index for group-by' is used a bit more than before as
now take into account the WHERE clause cost when comparing
with 'ref' and prefer the method with fewer row combinations.
(main.group_min_max).
Bugs fixed:
- Fixed that we don't calculate TIME_FOR_COMPARE twice for some plans,
like in optimize_straight_join() and greedy_search()
- Fixed bug in save_explain_data where we could test for the wrong
index when displaying 'Using index'. This caused some old plans to
show 'Using index'. (main.subselect_innodb, main.subselect2)
- Fixed bug in get_best_ror_intersect() where 'min_cost' was not
updated, and the cost we compared with was not the one that was
used.
- Fixed very wrong cost calculation for priority queues in
check_if_pq_applicable(). (main.order_by now correctly uses priority
queue)
- When calculating cost of EQ_REF or REF, we added the cost of
comparing the WHERE clause with the found rows, not all row
combinations. This made ref and eq_ref to be regarded way to cheap
compared to other access methods.
- FORCE INDEX cost calculation didn't take into account clustered or
covered indexes.
- JT_EQ_REF cost was estimated as avg_io_cost(), which is half the
cost of a JT_REF key. This may be true for InnoDB primary key, but
not for other unique keys or other engines. Now we use handler
function to calculate the cost, which allows us to handle
consistently clustered, covered keys and not covered keys.
- ha_start_keyread() didn't call extra_opt() if keyread was already
enabled but still changed the 'keyread' variable (which is wrong).
Fixed by not doing anything if keyread is already enabled.
- multi_range_read_info_cost() didn't take into account io_cost when
calculating the cost of ranges.
- fix_semijoin_strategies_for_picked_join_order() used the wrong
record_count when calling best_access_path() for SJ_OPT_FIRST_MATCH
and SJ_OPT_LOOSE_SCAN.
- Hash joins didn't provide correct best_cost to the upper level, which
means that the cost for hash_joins more expensive than calculated
in best_access_path (a difference of 10x * TIME_OF_COMPARE).
This is fixed in the new code thanks to that we now include
TIME_OF_COMPARE cost in 'read_time'.
Other things:
- Added some 'if (thd->trace_started())' to speed up code
- Removed not used function Cost_estimate::is_zero()
- Simplified testing of HA_POS_ERROR in get_best_ror_intersect().
(No cost changes)
- Moved ha_start_keyread() from join_read_const_table() to join_read_const()
to enable keyread for all types of JT_CONST tables.
- Made a few very short functions inline in handler.h
Notes:
- In main.rowid_filter the join order of order and lineitem is swapped.
This is because the cost of doing a range fetch of lineitem(98 rows) is
almost as big as the whole join of order,lineitem. The filtering will
also ensure that we only have to do very small key fetches of the rows
in lineitem.
- main.index_merge_myisam had a few changes where we are now using
less keys for index_merge. This is because index scans are now more
expensive than before.
- handler->optimizer_cache_cost is updated in ha_external_lock().
This ensures that it is up to date per statements.
Not an optimal solution (for locked tables), but should be ok for now.
- 'DELETE FROM t1 WHERE t1.a > 0 ORDER BY t1.a' does not take cost of
filesort into consideration when table scan is chosen.
(main.myisam_explain_non_select_all)
- perfschema.table_aggregate_global_* has changed because an update
on a table with 1 row will now use table scan instead of key lookup.
TODO in upcomming commits:
- Fix selectivity calculation for ranges with and without filtering and
when there is a ref access but scan is chosen.
For this we have to store the lowest known value for
'accepted_records' in the OPT_RANGE structure.
- Change that records_read does not include filtered rows.
- test_if_cheaper_ordering() needs to be updated to properly calculate
costs. This will fix tests like main.order_by_innodb,
main.single_delete_update
- Extend get_range_limit_read_cost() to take into considering
cost_for_index_read() if there where no quick keys. This will reduce
the computed cost for ORDER BY with LIMIT in some cases.
(main.innodb_ext_key)
- Fix that we take into account selectivity when counting the number
of rows we have to read when considering using a index table scan to
resolve ORDER BY.
- Add new calculation for rnd_pos_time() where we take into account the
benefit of reading multiple rows from the same page.
2021-11-01 12:34:24 +02:00
# This one will have 'Using index'
EXPLAIN SELECT numreponse+0 FROM t1 WHERE numreponse='1';
2004-05-06 04:40:45 +03:00
drop table t1;
2004-08-13 18:29:25 +02:00
#
# UNIQUE prefix keys and multi-byte charsets
#
2022-06-09 10:32:51 +07:00
#enable view protocol after fix MDEV-27945
--disable_view_protocol
2004-08-13 18:29:25 +02:00
create table t1 (c varchar(30) character set utf8, t text character set utf8, unique (c(2)), unique (t(3))) engine=myisam;
show create table t1;
insert t1 values ('cccc', 'tttt'),
(0xD0B1212223D0B1D0B1D0B1D0B1D0B1, 0xD0B1D0B1212223D0B1D0B1D0B1D0B1),
(0xD0B1222123D0B1D0B1D0B1D0B1D0B1, 0xD0B1D0B1222123D0B1D0B1D0B1D0B1);
2007-06-06 10:57:07 -07:00
--error ER_DUP_ENTRY
2004-08-13 18:29:25 +02:00
insert t1 (c) values ('cc22');
2007-06-06 10:57:07 -07:00
--error ER_DUP_ENTRY
2004-08-13 18:29:25 +02:00
insert t1 (t) values ('ttt22');
2007-06-06 10:57:07 -07:00
--error ER_DUP_ENTRY
2004-08-13 18:29:25 +02:00
insert t1 (c) values (0xD0B1212322D0B1D0B1D0B1D0B1D0B1);
2007-06-06 10:57:07 -07:00
--error ER_DUP_ENTRY
2004-08-13 18:29:25 +02:00
insert t1 (t) values (0xD0B1D0B1212322D0B1D0B1D0B1D0B1);
select c from t1 where c='cccc';
select t from t1 where t='tttt';
select c from t1 where c=0xD0B1212223D0B1D0B1D0B1D0B1D0B1;
select t from t1 where t=0xD0B1D0B1212223D0B1D0B1D0B1D0B1;
drop table t1;
2022-06-09 10:32:51 +07:00
--enable_view_protocol
2004-08-13 18:29:25 +02:00
2004-10-21 22:17:10 +02:00
#
# BUG#6151 - myisam index corruption
#
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 (
c1 int,
c2 varbinary(240),
UNIQUE KEY (c1),
KEY (c2)
) ENGINE=MyISAM;
INSERT INTO t1 VALUES (1,'\Z\Z\Z\Z');
INSERT INTO t1 VALUES (2,'\Z\Z\Z\Z\Z\Z');
INSERT INTO t1 VALUES (3,'\Z\Z\Z\Z');
select c1 from t1 where c2='\Z\Z\Z\Z';
DELETE FROM t1 WHERE (c1 = 1);
2004-10-22 23:23:02 +02:00
check table t1;
2004-10-21 22:17:10 +02:00
select c1 from t1 where c2='\Z\Z\Z\Z';
DELETE FROM t1 WHERE (c1 = 3);
2004-10-22 23:23:02 +02:00
check table t1;
2004-10-21 22:17:10 +02:00
select c1 from t1 where c2='\Z\Z\Z\Z';
2004-10-23 03:30:27 +03:00
#
# test delete of keys in a different order
#
truncate table t1;
insert into t1 values(1,"aaaa"),(2,"aaab"),(3,"aaac"),(4,"aaccc");
delete from t1 where c1=3;
delete from t1 where c1=1;
delete from t1 where c1=4;
check table t1;
2004-10-22 23:23:02 +02:00
drop table t1;
2004-10-21 22:17:10 +02:00
2004-10-26 12:16:35 +04:00
#
# Bug 6166: index prefix length of 0 not rejected
#
# this test should fail in 5.0
# to fix it, remove #ifdef in
# file sql_yacc.yy(key_part)
# create dedicated error code for this and
# and change my_printf_error() to my_error
now my_printf_error is not better then my_error, but my_error call is shorter
used only one implementation of format parser of (printf)
fixed multistatement
include/mysqld_error.h:
newerror messages
mysql-test/t/key.test:
unknown error replaced with real error
mysys/my_error.c:
my_error & my_printf_error use my_vsprintf
sql/field_conv.cc:
now my_printf_error is not better then my_error, but my_error call is shorter
sql/ha_innodb.cc:
now my_printf_error is not better then my_error, but my_error call is shorter
sql/handler.cc:
now my_printf_error is not better then my_error, but my_error call is shorter
sql/item.cc:
now my_printf_error is not better then my_error, but my_error call is shorter
sql/item_cmpfunc.cc:
now my_printf_error is not better then my_error, but my_error call is shorter
sql/item_func.cc:
now my_printf_error is not better then my_error, but my_error call is shorter
sql/item_strfunc.cc:
now my_printf_error is not better then my_error, but my_error call is shorter
sql/lock.cc:
now my_printf_error is not better then my_error, but my_error call is shorter
sql/log.cc:
now my_printf_error is not better then my_error, but my_error call is shorter
sql/parse_file.cc:
now my_printf_error is not better then my_error, but my_error call is shorter
sql/procedure.cc:
now my_printf_error is not better then my_error, but my_error call is shorter
sql/protocol.cc:
no need reset thd->lex->found_colon to break multiline sequance now, send_error called too late
sql/repl_failsafe.cc:
now my_printf_error is not better then my_error, but my_error call is shorter
sql/set_var.cc:
now my_printf_error is not better then my_error, but my_error call is shorter
sql/share/czech/errmsg.txt:
new errors converted from unknown error
sql/share/danish/errmsg.txt:
new errors converted from unknown error
sql/share/dutch/errmsg.txt:
new errors converted from unknown error
sql/share/english/errmsg.txt:
new errors converted from unknown error
sql/share/estonian/errmsg.txt:
new errors converted from unknown error
sql/share/french/errmsg.txt:
new errors converted from unknown error
sql/share/german/errmsg.txt:
new errors converted from unknown error
sql/share/greek/errmsg.txt:
new errors converted from unknown error
sql/share/hungarian/errmsg.txt:
new errors converted from unknown error
sql/share/italian/errmsg.txt:
new errors converted from unknown error
sql/share/japanese/errmsg.txt:
new errors converted from unknown error
sql/share/korean/errmsg.txt:
new errors converted from unknown error
sql/share/norwegian-ny/errmsg.txt:
new errors converted from unknown error
sql/share/norwegian/errmsg.txt:
new errors converted from unknown error
sql/share/polish/errmsg.txt:
new errors converted from unknown error
sql/share/portuguese/errmsg.txt:
new errors converted from unknown error
sql/share/romanian/errmsg.txt:
new errors converted from unknown error
sql/share/russian/errmsg.txt:
new errors converted from unknown error
sql/share/serbian/errmsg.txt:
new errors converted from unknown error
sql/share/slovak/errmsg.txt:
new errors converted from unknown error
sql/share/spanish/errmsg.txt:
new errors converted from unknown error
sql/share/swedish/errmsg.txt:
new errors converted from unknown error
sql/share/ukrainian/errmsg.txt:
new errors converted from unknown error
sql/slave.cc:
now my_printf_error is not better then my_error, but my_error call is shorter
sql/sp.cc:
now my_printf_error is not better then my_error, but my_error call is shorter
sql/sp_head.cc:
now my_printf_error is not better then my_error, but my_error call is shorter
sql/sql_acl.cc:
now my_printf_error is not better then my_error, but my_error call is shorter
sql/sql_analyse.cc:
now my_printf_error is not better then my_error, but my_error call is shorter
sql/sql_base.cc:
now my_printf_error is not better then my_error, but my_error call is shorter
sql/sql_class.cc:
now my_printf_error is not better then my_error, but my_error call is shorter
sql/sql_db.cc:
now my_printf_error is not better then my_error, but my_error call is shorter
sql/sql_delete.cc:
now my_printf_error is not better then my_error, but my_error call is shorter
sql/sql_handler.cc:
now my_printf_error is not better then my_error, but my_error call is shorter
sql/sql_insert.cc:
now my_printf_error is not better then my_error, but my_error call is shorter
sql/sql_load.cc:
now my_printf_error is not better then my_error, but my_error call is shorter
sql/sql_map.cc:
now my_printf_error is not better then my_error, but my_error call is shorter
sql/sql_parse.cc:
now my_printf_error is not better then my_error, but my_error call is shorter
multi-row command fixed
sql/sql_prepare.cc:
now my_printf_error is not better then my_error, but my_error call is shorter
remover send_error ingected from 4.1
sql/sql_rename.cc:
now my_printf_error is not better then my_error, but my_error call is shorter
sql/sql_repl.cc:
now my_printf_error is not better then my_error, but my_error call is shorter
sql/sql_select.cc:
now my_printf_error is not better then my_error, but my_error call is shorter
sql/sql_show.cc:
now my_printf_error is not better then my_error, but my_error call is shorter
sql/sql_table.cc:
now my_printf_error is not better then my_error, but my_error call is shorter
sql/sql_trigger.cc:
now my_printf_error is not better then my_error, but my_error call is shorter
sql/sql_udf.cc:
now my_printf_error is not better then my_error, but my_error call is shorter
sql/sql_update.cc:
now my_printf_error is not better then my_error, but my_error call is shorter
sql/sql_view.cc:
now my_printf_error is not better then my_error, but my_error call is shorter
sql/sql_yacc.yy:
now my_printf_error is not better then my_error, but my_error call is shorter
sql/table.cc:
now my_printf_error is not better then my_error, but my_error call is shorter
strings/my_vsnprintf.c:
* format support added to my_vsprint
2004-11-13 19:35:51 +02:00
--error 1391
2004-10-26 12:16:35 +04:00
create table t1 (c char(10), index (c(0)));
2004-11-22 18:07:04 +00:00
#
# Bug #6126: Duplicate columns in keys should fail
# Bug #6252: (dup)
#
--error 1060
create table t1 (c char(10), index (c,c));
--error 1060
create table t1 (c1 char(10), c2 char(10), index (c1,c2,c1));
--error 1060
create table t1 (c1 char(10), c2 char(10), index (c1,c1,c2));
--error 1060
create table t1 (c1 char(10), c2 char(10), index (c2,c1,c1));
create table t1 (c1 char(10), c2 char(10));
--error 1060
alter table t1 add key (c1,c1);
--error 1060
alter table t1 add key (c2,c1,c1);
--error 1060
alter table t1 add key (c1,c2,c1);
--error 1060
alter table t1 add key (c1,c1,c2);
drop table t1;
2005-06-02 10:00:36 -07:00
2006-06-23 13:19:30 +05:00
#
# Bug#11228: DESC shows arbitrary column as "PRI"
#
create table t1 (
i1 INT NOT NULL,
i2 INT NOT NULL,
UNIQUE i1idx (i1),
UNIQUE i2idx (i2));
desc t1;
2006-07-01 01:37:20 +04:00
show create table t1;
2006-06-23 13:19:30 +05:00
drop table t1;
2005-06-02 10:00:36 -07:00
#
2005-08-29 16:50:09 +02:00
# Bug#12565 - ERROR 1034 when running simple UPDATE or DELETE
# on large MyISAM table
#
create table t1 (
c1 int,
c2 varchar(20) not null,
primary key (c1),
key (c2(10))
) engine=myisam;
insert into t1 values (1,'');
insert into t1 values (2,' \t\tTest String');
insert into t1 values (3,' \n\tTest String');
update t1 set c2 = 'New Test String' where c1 = 1;
select * from t1;
2005-08-29 21:06:45 +02:00
drop table t1;
#
2005-06-02 10:00:36 -07:00
# If we use a partial field for a key that is actually the length of the
# field, and we extend the field, we end up with a key that includes the
# whole new length of the field.
#
create table t1 (a varchar(10), b varchar(10), key(a(10),b(10)));
show create table t1;
alter table t1 modify b varchar(20);
show create table t1;
alter table t1 modify a varchar(20);
show create table t1;
drop table t1;
2005-06-22 12:46:21 -07:00
#
# Bug #11227: Incorrectly reporting 'MUL' vs. 'UNI' on varchar
#
create table t1 (a int not null primary key, b varchar(20) not null unique);
desc t1;
drop table t1;
create table t1 (a int not null primary key, b int not null unique);
desc t1;
drop table t1;
create table t1 (a int not null primary key, b varchar(20) not null, unique (b(10)));
desc t1;
drop table t1;
create table t1 (a int not null primary key, b varchar(20) not null, c varchar(20) not null, unique(b(10),c(10)));
desc t1;
drop table t1;
2005-07-28 17:09:54 +03:00
2005-07-28 03:22:47 +03:00
# End of 4.1 tests
2006-01-12 10:05:07 +01:00
#
# WL#1563 - Modify MySQL to support on-line CREATE/DROP INDEX
# To test if this really works, you need to run with --debug
# and check the trace file.
#
# Create a table with named and unnamed indexes.
create table t1 (
c1 int,
c2 char(12),
c3 varchar(123),
2015-09-22 14:01:54 +04:00
c4 timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
2006-01-12 10:05:07 +01:00
index (c1),
index i1 (c1),
index i2 (c2),
index i3 (c3),
unique i4 (c4),
index i5 (c1, c2, c3, c4),
primary key (c2, c3),
index (c2, c4));
show create table t1;
# Some simple tests.
alter table t1 drop index c1;
alter table t1 add index (c1);
# This creates index 'c1_2'.
alter table t1 add index (c1);
alter table t1 drop index i3;
alter table t1 add index i3 (c3);
# Two indexes at the same time.
alter table t1 drop index i2, drop index i4;
alter table t1 add index i2 (c2), add index i4 (c4);
# Three indexes, one of them reversely.
alter table t1 drop index i2, drop index i4, add index i6 (c2, c4);
alter table t1 add index i2 (c2), add index i4 (c4), drop index i6;
# include an unique index.
alter table t1 drop index i2, drop index i4, add unique i4 (c4);
alter table t1 add index i2 (c2), drop index i4, add index i4 (c4);
# Modify an index by changing its definition.
alter table t1 drop index c2, add index (c2(4),c3(7));
# Change nothing. The new key definition is the same as the old one.
alter table t1 drop index c2, add index (c2(4),c3(7));
# Test primary key handling.
alter table t1 add primary key (c1, c2), drop primary key;
alter table t1 drop primary key;
# Drop is checked first. Primary key must exist.
--error 1091
alter table t1 add primary key (c1, c2), drop primary key;
show create table t1;
# Insert non-unique values.
insert into t1 values(1, 'a', 'a', NULL);
insert into t1 values(1, 'b', 'b', NULL);
# Drop some indexes for new adds.
alter table t1 drop index i3, drop index i2, drop index i1;
# Add indexes, one is unique on non-unique values.
2007-06-06 10:57:07 -07:00
--error ER_DUP_ENTRY
2006-01-12 10:05:07 +01:00
alter table t1 add index i3 (c3), add index i2 (c2), add unique index i1 (c1);
drop table t1;
BUG#20604: FORCE INDEX uses keys disabled by ALTER TABLE
The function that checks whether we can use keys for aggregates,
find_key_for_maxmin(), assumes that keys disabled by ALTER TABLE
... DISABLE KEYS are not in the set table->keys_in_use_for_query.
I.E., if a key is in this set, the optimizer assumes it is free to
use it.
The bug is that keys disabled with ALTER TABLE ... DISABLE KEYS still
appear in table->keys_in_use_for_query When the TABLE object has been
initialized with setup_tables(). Before setup_tables is called, however,
keys that are disabled in the aforementioned way are not included in
TABLE::keys_in_use_for_query.
The provided patch changes the code that updates keys_is_use_for_query so
that it assumes that keys_is_use_for_query already takes into account all
disabled keys, and generally all keys that should be used by the query.
mysql-test/r/key.result:
Test for BUG#20604.
The important part of the test is the explain output that
tests what indexes are used.
mysql-test/t/key.test:
The minimal test case that reveals the bug. The optimizer for
aggregates relies on keys disabled with ALTER TABLE ... DISABLE KEYS
not being in the set TABLE::keys_in_use_for_query.
When the execution engine tries to use a disabled index, MyISAM
returns an error.
sql/sql_base.cc:
Exclude the keys disabled by ALTER TABLE ... DISABLE_KEYS
from TABLE::keys_in_use_for_query, and in general, don't
introduce any new keys. We may not know why keys have been
removed at previous stages.
sql/sql_select.cc:
The intersection operation between table->s->keys_in_use and
table->keys_in_use_for_query is no longer necessary.
We can trust that the latter is a subset of the former.
sql/table.h:
Added comments to TABLE_SHARE::keys_in_use and
TABLE::keys_in_use_for_query.
2007-01-29 15:07:11 +01:00
#
# Bug #20604: Test for disabled keys with aggregate functions and FORCE INDEX.
#
CREATE TABLE t1( a TINYINT, KEY(a) ) ENGINE=MyISAM;
INSERT INTO t1 VALUES( 1 );
ALTER TABLE t1 DISABLE KEYS;
EXPLAIN SELECT MAX(a) FROM t1 FORCE INDEX(a);
drop table t1;
2007-03-14 12:15:14 +01:00
#
# Bug #24778: Innodb: No result when using ORDER BY
#
CREATE TABLE t1 (
a INTEGER auto_increment PRIMARY KEY,
b INTEGER NOT NULL,
c INTEGER NOT NULL,
d CHAR(64)
);
CREATE TABLE t2 (
a INTEGER auto_increment PRIMARY KEY,
b INTEGER NOT NULL,
c SMALLINT NOT NULL,
d DATETIME NOT NULL,
e SMALLINT NOT NULL,
f INTEGER NOT NULL,
g INTEGER NOT NULL,
h SMALLINT NOT NULL,
i INTEGER NOT NULL,
j INTEGER NOT NULL,
UNIQUE INDEX (b),
INDEX (b, d, e, f, g, h, i, j, c),
INDEX (c)
);
INSERT INTO t2 VALUES
(NULL, 1, 254, '1000-01-01 00:00:00', 257, 0, 0, 0, 0, 0),
(NULL, 2, 1, '2004-11-30 12:00:00', 1, 0, 0, 0, 0, 0),
(NULL, 3, 1, '2004-11-30 12:00:00', 1, 0, 0, 2, -21600, 0),
(NULL, 4, 1, '2004-11-30 12:00:00', 1, 0, 0, 2, -10800, 0),
(NULL, 5, 1, '2004-11-30 12:00:00', 1, 0, 0, 5, -10800, 0),
(NULL, 6, 1, '2004-11-30 12:00:00', 102, 0, 0, 0, 0, 0),
(NULL, 7, 1, '2004-11-30 12:00:00', 105, 2, 0, 0, 0, 0),
(NULL, 8, 1, '2004-11-30 12:00:00', 105, 10, 0, 0, 0, 0);
INSERT INTO t1 (b, c, d) VALUES
(3388000, -553000, NULL),
(3388000, -553000, NULL);
SELECT *
FROM t2 c JOIN t1 pa ON c.b = pa.a
WHERE c.c = 1
ORDER BY c.b, c.d
;
DROP TABLE t1, t2;
2007-10-20 21:48:15 +04:00
2007-10-29 20:31:03 +04:00
#
# Bug #31137: Assertion failed: primary_key_no == -1 || primary_key_no == 0
#
create table t1(a int not null, key aa(a),
b char(10) not null, unique key bb(b(1)),
c char(4) not null, unique key cc(c));
desc t1;
show create table t1;
drop table t1;
create table t1(a int not null, key aa(a),
b char(10) not null, unique key bb(b(1)),
c char(4) not null);
desc t1;
alter table t1 add unique key cc(c);
desc t1;
show create table t1;
drop table t1;
--echo End of 5.0 tests
2007-12-05 12:33:36 -07:00
2007-10-20 21:48:15 +04:00
#
# Bug #31148: bool close_thread_table(THD*, TABLE**): Assertion
# `table->key_read == 0' failed.
#
--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_warnings
CREATE TABLE t1 (a INT PRIMARY KEY AUTO_INCREMENT);
INSERT INTO t1 VALUES (), (), ();
SELECT 1 AS c1
FROM t1
ORDER BY (
SELECT 1 AS c2
FROM t1
GROUP BY GREATEST(LAST_INSERT_ID(), t1.a) ASC
LIMIT 1);
DROP TABLE t1;
2007-11-05 13:19:56 +02:00
#
# Bug #31974: Wrong EXPLAIN output
#
CREATE TABLE t1 (a INT, b INT, INDEX (a,b));
INSERT INTO t1 (a, b)
VALUES
(1,1), (1,2), (1,3), (1,4), (1,5),
(2,2), (2,3), (2,1), (3,1), (4,1), (4,2), (4,3), (4,4), (4,5), (4,6);
2019-02-03 14:56:12 -08:00
ANALYZE table t1;
2007-11-05 13:19:56 +02:00
EXPLAIN SELECT 1 FROM t1 AS t1_outer WHERE
(SELECT max(b) FROM t1 GROUP BY a HAVING a < 2) > 12;
SELECT 1 as RES FROM t1 AS t1_outer WHERE
(SELECT max(b) FROM t1 GROUP BY a HAVING a < 2) > 12;
DROP TABLE t1;
2014-03-20 23:27:08 +01:00
--echo #
--echo # Bug#18144: Cost with FORCE/USE index seems incorrect in some cases.
--echo #
--echo # We are interested in showing that the cost for the last plan is higher
--echo # than for the preceding two plans.
--echo #
CREATE TABLE t1( a INT, b INT, KEY( a ) );
INSERT INTO t1 values (1, 2), (1, 3), (2, 3), (2, 4), (3, 4), (3, 5);
EXPLAIN SELECT a, SUM( b ) FROM t1 GROUP BY a;
SHOW STATUS LIKE 'Last_query_cost';
EXPLAIN SELECT a, SUM( b ) FROM t1 USE INDEX( a ) GROUP BY a;
SHOW STATUS LIKE 'Last_query_cost';
EXPLAIN SELECT a, SUM( b ) FROM t1 FORCE INDEX( a ) GROUP BY a;
SHOW STATUS LIKE 'Last_query_cost';
DROP TABLE t1;
2020-04-21 12:16:10 +05:30
--echo #
--echo # MDEV-21480: Unique key using ref access though eq_ref access can be used
--echo #
create table t1(a int, b int,c int, primary key(a), unique key(b,c));
insert into t1 select seq, seq, seq from seq_1_to_10;
create table t2(a int, b int,c int);
insert into t2 select seq, seq, seq+1 from seq_1_to_100;
EXPLAIN SELECT t1.c, t2.c FROM t1, t2 WHERE t1.b=t2.a and t1.c=t2.b;
SELECT t1.c, t2.c FROM t1, t2 WHERE t1.b=t2.a and t1.c=t2.b;
alter table t1 drop PRIMARY KEY;
alter table t1 add PRIMARY KEY(b,c);
EXPLAIN SELECT t1.c, t2.c FROM t1, t2 WHERE t1.b=t2.a and t1.c=t2.b;
SELECT t1.c, t2.c FROM t1, t2 WHERE t1.b=t2.a and t1.c=t2.b;
drop table t1,t2;
2021-11-24 16:50:21 +01:00
--echo #
--echo # MDEV-13756 Implement descending index: KEY (a DESC, b ASC)
--echo #
create table t1 (a int, b int, key(a), key(a desc));
drop table t1;
2022-08-20 08:22:57 +03:00
--echo # Check some issues with FORCE INDEX and full index scans
--echo # (Does FORCE INDEX force an index scan)
--echo #
create table t1 (a int primary key, b int, c int, d int,
key k1 (b) using BTREE, key k2 (c,d) using btree) engine=heap;
insert into t1 select seq as a, seq as b, seq as c, seq as d
from seq_1_to_100;
explain select sum(a+b) from t1 force index (k1) where b>0 and a=99;
explain select sum(a+b) from t1 force index (k1) where a>0;
explain select sum(a+b) from t1 force index (k1);
explain select sum(a+b) from t1 force index for join (k1);
explain select sum(a+b) from t1 force index for order by (k1);
explain select sum(a+b) from t1 force index (k1,k2);
select sum(a+b) from t1 force index (k1);
explain select sum(a+b) from t1 force index (primary);
select sum(a+b) from t1 force index (primary);
explain select straight_join sum(a+b) from seq_1_to_10 as s, t1 force index (k2) where t1.a=s.seq;
drop table t1;