mirror of
https://github.com/MariaDB/server.git
synced 2025-01-16 03:52:35 +01:00
merge
This commit is contained in:
commit
bea12d761a
46 changed files with 840 additions and 247 deletions
|
@ -451,3 +451,4 @@ vio/test-ssl
|
|||
vio/test-sslclient
|
||||
vio/test-sslserver
|
||||
vio/viotest-ssl
|
||||
libmysqld/ha_innodb.cc
|
||||
|
|
196
Docs/manual.texi
196
Docs/manual.texi
|
@ -750,7 +750,7 @@ Large server clusters using replication are in production use, with
|
|||
good results. Work on enhanced replication features is continuing
|
||||
in MySQL 4.0.
|
||||
|
||||
@item @code{InnoDB} tables -- Gamma
|
||||
@item @code{InnoDB} tables -- Stable
|
||||
While the @code{InnoDB} transactional table handler is a fairly recent
|
||||
addition to @code{MySQL}, it appears to work well and is already being
|
||||
used in some large, heavy load production systems.
|
||||
|
@ -3698,9 +3698,6 @@ Allow users to change startup options without taking down the server.
|
|||
@item
|
||||
Fail safe replication.
|
||||
@item
|
||||
More functions for full-text search.
|
||||
@xref{Fulltext TODO}.
|
||||
@item
|
||||
New key cache
|
||||
@item
|
||||
New table definition file format (@code{.frm} files) This will enable us
|
||||
|
@ -3733,9 +3730,6 @@ When using @code{SET CHARACTER SET} we should translate the whole query
|
|||
at once and not only strings. This will enable users to use the translated
|
||||
characters in database, table and column names.
|
||||
@item
|
||||
Add a portable interface over @code{gethostbyaddr_r()} so that we can change
|
||||
@code{ip_to_hostname()} to not block other threads while doing DNS lookups.
|
||||
@item
|
||||
Add @code{record_in_range()} method to @code{MERGE} tables to be
|
||||
able to choose the right index when there is many to choose from. We should
|
||||
also extend the info interface to get the key distribution for each index,
|
||||
|
@ -3867,15 +3861,6 @@ Don't add automatic @code{DEFAULT} values to columns. Give an error when using
|
|||
an @code{INSERT} that doesn't contain a column that doesn't have a
|
||||
@code{DEFAULT}.
|
||||
@item
|
||||
Caching of queries and results. This should be done as a separated
|
||||
module that examines each query and if this is query is in the cache
|
||||
the cached result should be returned. When one updates a table one
|
||||
should remove as few queries as possible from the cache.
|
||||
This should give a big speed bost on machines with much RAM where
|
||||
queries are often repeated (like WWW applications).
|
||||
One idea would be to only cache queries of type:
|
||||
@code{SELECT CACHED ...}
|
||||
@item
|
||||
Fix @file{libmysql.c} to allow two @code{mysql_query()} commands in a row
|
||||
without reading results or give a nice error message when one does this.
|
||||
@item
|
||||
|
@ -3940,10 +3925,7 @@ ADD_TO_SET(value,set) and REMOVE_FROM_SET(value,set)
|
|||
Add use of @code{t1 JOIN t2 ON ...} and @code{t1 JOIN t2 USING ...}
|
||||
Currently, you can only use this syntax with @code{LEFT JOIN}.
|
||||
@item
|
||||
Add full support for @code{unsigned long long} type.
|
||||
@item
|
||||
Many more variables for @code{show status}. Counts for:
|
||||
@code{INSERT}/@code{DELETE}/@code{UPDATE} statements. Records reads and
|
||||
Many more variables for @code{show status}. Records reads and
|
||||
updated. Selects on 1 table and selects with joins. Mean number of
|
||||
tables in select. Number of @code{ORDER BY} and @code{GROUP BY} queries.
|
||||
@item
|
||||
|
@ -3958,7 +3940,7 @@ should be implemented.
|
|||
@item
|
||||
Add support for UNICODE.
|
||||
@item
|
||||
@code{NATURAL JOIN} and @code{UNION JOIN}
|
||||
@code{NATURAL JOIN}.
|
||||
@item
|
||||
Allow @code{select a from crash_me left join crash_me2 using (a)}; In this
|
||||
case @code{a} is assumed to come from the @code{crash_me} table.
|
||||
|
@ -4075,8 +4057,6 @@ Use of full calculation names in the order part. (For ACCESS97)
|
|||
@code{MINUS}, @code{INTERSECT} and @code{FULL OUTER JOIN}.
|
||||
(Currently @code{UNION} (in 4.0) and @code{LEFT OUTER JOIN} are supported)
|
||||
@item
|
||||
Allow @code{UNIQUE} on fields that can be @code{NULL}.
|
||||
@item
|
||||
@code{SQL_OPTION MAX_SELECT_TIME=#} to put a time limit on a query.
|
||||
@item
|
||||
Make the update log to a database.
|
||||
|
@ -24584,6 +24564,7 @@ great tool to find out if this is a problem with your query.
|
|||
* Where optimisations:: How MySQL optimises @code{WHERE} clauses
|
||||
* DISTINCT optimisation:: How MySQL Optimises @code{DISTINCT}
|
||||
* LEFT JOIN optimisation:: How MySQL optimises @code{LEFT JOIN}
|
||||
* ORDER BY optimisation::
|
||||
* LIMIT optimisation:: How MySQL optimises @code{LIMIT}
|
||||
* Insert speed:: Speed of @code{INSERT} queries
|
||||
* Update speed:: Speed of @code{UPDATE} queries
|
||||
|
@ -25177,7 +25158,7 @@ MySQL will stop reading from t2 (for that particular row in t1)
|
|||
when the first row in t2 is found.
|
||||
|
||||
|
||||
@node LEFT JOIN optimisation, LIMIT optimisation, DISTINCT optimisation, Query Speed
|
||||
@node LEFT JOIN optimisation, ORDER BY optimisation, DISTINCT optimisation, Query Speed
|
||||
@subsection How MySQL Optimises @code{LEFT JOIN} and @code{RIGHT JOIN}
|
||||
|
||||
@findex LEFT JOIN
|
||||
|
@ -25243,7 +25224,119 @@ SELECT * FROM b,a LEFT JOIN c ON (c.key=a.key) LEFT JOIN d (d.key=a.key) WHERE b
|
|||
@end example
|
||||
|
||||
|
||||
@node LIMIT optimisation, Insert speed, LEFT JOIN optimisation, Query Speed
|
||||
@node ORDER BY optimisation, LIMIT optimisation, LEFT JOIN optimisation, Query Speed
|
||||
@subsection How MySQL Optimises @code{ORDER BY}
|
||||
|
||||
In some cases MySQL can uses index to satisfy an @code{ORDER BY} or
|
||||
@code{GROUP BY} request without doing any extra sorting.
|
||||
|
||||
The index can also be used even if the @code{ORDER BY} doesn't match the
|
||||
index exactly, as long as all the unused index parts and all the extra
|
||||
are @code{ORDER BY} columns are constants in the @code{WHERE}
|
||||
clause. The following queries will use the index to resolve the
|
||||
@code{ORDER BY} / @code{GROUP BY} part:
|
||||
|
||||
@example
|
||||
SELECT * FROM t1 ORDER BY key_part1,key_part2,...
|
||||
SELECT * FROM t1 WHERE key_part1=constant ORDER BY key_part2
|
||||
SELECT * FROM t1 WHERE key_part1=constant GROUP BY key_part2
|
||||
SELECT * FROM t1 ORDER BY key_part1 DESC,key_part2 DESC
|
||||
SELECT * FROM t1 WHERE key_part1=1 ORDER BY key_part1 DESC,key_part2 DESC
|
||||
@end example
|
||||
|
||||
Some cases where MySQL can NOT use indexes to resolve the @code{ORDER
|
||||
BY}: (Note that MySQL will still use indexes to find the rows that
|
||||
matches the where clause):
|
||||
|
||||
@itemize @bullet
|
||||
@item
|
||||
You are doing an @code{ORDER BY} on different keys:
|
||||
|
||||
@code{SELECT * FROM t1 ORDER BY key1,key2}
|
||||
@item
|
||||
You are doing an @code{ORDER BY} on not following key parts.
|
||||
|
||||
@code{SELECT * FROM t1 WHERE key2=constant ORDER BY key_part2}
|
||||
|
||||
@item
|
||||
You are mixing @code{ASC} and @code{DESC}.
|
||||
|
||||
@code{SELECT * FROM t1 ORDER BY key_part1 DESC,key_part2 ASC}
|
||||
|
||||
@item
|
||||
The key used to fetch the rows are not the same one that is used to
|
||||
do the @code{ORDER BY}:
|
||||
|
||||
@code{SELECT * FROM t1 WHERE key2=constant ORDER BY key1}
|
||||
|
||||
@item
|
||||
You are joining many tables and the columns you are doing an @code{ORDER
|
||||
BY} on are not all from the first not-const table that is used to
|
||||
retrieve rows (This is the first table in the @code{EXPLAIN} output which
|
||||
doesn't use a @code{const} row fetch method).
|
||||
|
||||
@item
|
||||
You have different @code{ORDER BY} and @code{GROUP BY} expressions.
|
||||
|
||||
@item
|
||||
The used table index is an index type that doesn't store rows in order.
|
||||
(Like index in @code{HEAP} tables).
|
||||
@end itemize
|
||||
|
||||
|
||||
In the cases where MySQL have to sort the result, it uses the following
|
||||
algorithm:
|
||||
|
||||
@itemize @bullet
|
||||
@item
|
||||
Read all rows according to key or by table scanning.
|
||||
Rows that doesn't match the WHERE clause are skipped.
|
||||
@item
|
||||
Store the sort-key in a buffer (of size @code{sort_buffer}).
|
||||
@item
|
||||
When the buffer gets full, run a qsort on it and store the result
|
||||
in a temporary file. Save a pointer to the sorted block.
|
||||
(In the case where all rows fits into the sort buffer, no temporary
|
||||
file is created)
|
||||
@item
|
||||
Repeat the above until all rows have been read.
|
||||
@item
|
||||
Do a multi-merge of up to @code{MERGEBUFF} (7) regions to one block in
|
||||
another temporary file. Repeat until all blocks from the first file
|
||||
are in the second file.
|
||||
@item
|
||||
Repeat the following until there is less than @code{MERGEBUFF2} (15)
|
||||
blocks left.
|
||||
@item
|
||||
On the last multi-merge, only the pointer to the row (last part of
|
||||
the sort-key) is written to a result file.
|
||||
@item
|
||||
Now the code in @file{sql/records.cc} will be used to read through them
|
||||
in sorted order by using the row pointers in the result file. To
|
||||
optimize this, we read in a big block of row pointers, sort these and
|
||||
then we read the rows in the sorted order into a row buffer
|
||||
(@code{record_rnd_buffer}) .
|
||||
@end itemize
|
||||
|
||||
You can with @code{EXPLAIN SELECT ... ORDER BY} check if MySQL can use
|
||||
indexes to resolve the query. If you get @code{Using filesort} in the
|
||||
@code{extra} column, then MySQL can't use indexes to resolve the
|
||||
@code{ORDER BY}. @xref{EXPLAIN}.
|
||||
|
||||
If you want to have a higher @code{ORDER BY} speed, you should first
|
||||
see if you can get MySQL to use indexes instead of having to do an extra
|
||||
sorting phase. If this is not possible, then you can do:
|
||||
|
||||
@itemize @bullet
|
||||
@item
|
||||
Increase the size of the @code{sort_buffer} variable.
|
||||
@item
|
||||
Increase the size of the @code{record_rnd_buffer} variable.
|
||||
@item
|
||||
Change @code{tmpdir} to point to a dedicated disk with lots of empty space.
|
||||
@end itemize
|
||||
|
||||
@node LIMIT optimisation, Insert speed, ORDER BY optimisation, Query Speed
|
||||
@subsection How MySQL Optimises @code{LIMIT}
|
||||
|
||||
@findex LIMIT
|
||||
|
@ -25992,19 +26085,9 @@ SELECT MIN(key_part2),MAX(key_part2) FROM table_name where key_part1=10
|
|||
|
||||
@item
|
||||
Sort or group a table if the sorting or grouping is done on a leftmost
|
||||
prefix of a usable key (for example, @code{ORDER BY key_part_1,key_part_2 }). The
|
||||
key is read in reverse order if all key parts are followed by @code{DESC}.
|
||||
|
||||
The index can also be used even if the @code{ORDER BY} doesn't match the index
|
||||
exactly, as long as all the unused index parts and all the extra
|
||||
are @code{ORDER BY} columns are constants in the @code{WHERE} clause. The
|
||||
following queries will use the index to resolve the @code{ORDER BY} part:
|
||||
|
||||
@example
|
||||
SELECT * FROM foo ORDER BY key_part1,key_part2,key_part3;
|
||||
SELECT * FROM foo WHERE column=constant ORDER BY column, key_part1;
|
||||
SELECT * FROM foo WHERE key_part1=const GROUP BY key_part2;
|
||||
@end example
|
||||
prefix of a usable key (for example, @code{ORDER BY
|
||||
key_part_1,key_part_2 }). The key is read in reverse order if all key
|
||||
parts are followed by @code{DESC}. @xref{ORDER BY optimisation}.
|
||||
|
||||
@item
|
||||
In some cases a query can be optimised to retrieve values without
|
||||
|
@ -33370,7 +33453,12 @@ DELETE [LOW_PRIORITY | QUICK] FROM table_name
|
|||
|
||||
or
|
||||
|
||||
DELETE [LOW_PRIORITY | QUICK] table_name[.*] [table_name[.*] ...] FROM
|
||||
DELETE [LOW_PRIORITY | QUICK] table_name[.*] [,table_name[.*] ...] FROM
|
||||
table-references [WHERE where_definition]
|
||||
|
||||
or
|
||||
|
||||
DELETE [LOW_PRIORITY | QUICK] FROM table_name[.*], [table_name[.*] ...] USING
|
||||
table-references [WHERE where_definition]
|
||||
@end example
|
||||
|
||||
|
@ -33407,18 +33495,23 @@ TABLE} statement or the @code{myisamchk} utility to reorganise tables.
|
|||
@code{OPTIMIZE TABLE} is easier, but @code{myisamchk} is faster. See
|
||||
@ref{OPTIMIZE TABLE, , @code{OPTIMIZE TABLE}} and @ref{Optimisation}.
|
||||
|
||||
The multi table delete format is supported starting from MySQL 4.0.0.
|
||||
The first multi table delete format is supported starting from MySQL 4.0.0.
|
||||
The second multi table delete format is supported starting from MySQL 4.0.2.
|
||||
|
||||
The idea is that only matching rows from the tables listed @strong{before} the
|
||||
@code{FROM} clause is deleted. The effect is that you can delete rows
|
||||
from many tables at the same time and also have additional tables that
|
||||
are used for searching.
|
||||
The idea is that only matching rows from the tables listed
|
||||
@strong{before} the @code{FROM} or before the @code{USING} clause is
|
||||
deleted. The effect is that you can delete rows from many tables at the
|
||||
same time and also have additional tables that are used for searching.
|
||||
|
||||
The @code{.*} after the table names is there just to be compatible with
|
||||
@code{Access}:
|
||||
|
||||
@example
|
||||
DELETE t1,t2 FROM t1,t2,t3 WHERE t1.id=t2.id AND t2.id=t3.id
|
||||
|
||||
or
|
||||
|
||||
DELETE FROM t1,t2 USING t1,t2,t3 WHERE t1.id=t2.id AND t2.id=t3.id
|
||||
@end example
|
||||
|
||||
In the above case we delete matching rows just from tables @code{t1} and
|
||||
|
@ -48044,10 +48137,16 @@ Our TODO section contains what we plan to have in 4.0. @xref{TODO MySQL 4.0}.
|
|||
@item
|
||||
Fixed bug with empty expression for boolean fulltext search.
|
||||
@item
|
||||
Fixed bug in updating fulltext key from/to @code{NULL}.
|
||||
No coredump anymore.
|
||||
Fixed core dump bug in updating fulltext key from/to @code{NULL}.
|
||||
@item
|
||||
ODBC compatibility: added @code{BIT_LENGTH()} function.
|
||||
ODBC compatibility: Added @code{BIT_LENGTH()}
|
||||
@item
|
||||
Fixed core dump bug in @code{GROUP BY BINARY column}.
|
||||
@item
|
||||
Added support for @code{NULL} keys in HEAP tables.
|
||||
@item
|
||||
Use index for @code{ORDER BY} in queries of type:
|
||||
@code{SELECT * FROM t WHERE key_part1=1 ORDER BY key_part1 DESC,key_part2 DESC}
|
||||
@item
|
||||
Fixed bug in @code{FLUSH QUERY CACHE}.
|
||||
@item
|
||||
|
@ -48056,6 +48155,9 @@ Added @code{CAST()} and @code{CONVERT()} functions. The @code{CAST} and
|
|||
want to create a column with a specific type in a @code{CREATE ... SELECT}.
|
||||
For more information, read @ref{Cast Functions}.
|
||||
@item
|
||||
@code{CREATE ... SELECT} on @code{DATE} and @code{TIME} functions now
|
||||
create columns of the expected type.
|
||||
@item
|
||||
Changed order of how keys are created in tables.
|
||||
@item
|
||||
Added a new columns @code{Null} and @code{Index_type} to @code{SHOW INDEX}.
|
||||
|
|
|
@ -79,9 +79,11 @@ static int check_one_key(HP_KEYDEF *keydef, uint keynr, ulong records,
|
|||
}
|
||||
DBUG_PRINT("info",
|
||||
("records: %ld seeks: %d max links: %d hitrate: %.2f",
|
||||
records,seek,max_links,(float) seek / (float) (records ? records : 1)));
|
||||
records,seek,max_links,
|
||||
(float) seek / (float) (records ? records : 1)));
|
||||
if (print_status)
|
||||
printf("Key: %d records: %ld seeks: %d max links: %d hitrate: %.2f\n",
|
||||
keynr, records, seek, max_links, (float) seek / (float) records);
|
||||
keynr, records, seek, max_links,
|
||||
(float) seek / (float) (records ? records : 1));
|
||||
return error;
|
||||
}
|
||||
|
|
|
@ -70,6 +70,7 @@ extern int _hp_rec_key_cmp(HP_KEYDEF *keydef,const byte *rec1,
|
|||
extern int _hp_key_cmp(HP_KEYDEF *keydef,const byte *rec,
|
||||
const byte *key);
|
||||
extern void _hp_make_key(HP_KEYDEF *keydef,byte *key,const byte *rec);
|
||||
extern my_bool hp_if_null_in_key(HP_KEYDEF *keyinfo, const byte *record);
|
||||
extern int _hp_close(register HP_INFO *info);
|
||||
extern void _hp_clear(HP_SHARE *info);
|
||||
|
||||
|
|
|
@ -158,11 +158,22 @@ ulong _hp_hashnr(register HP_KEYDEF *keydef, register const byte *key)
|
|||
{
|
||||
uchar *pos=(uchar*) key;
|
||||
key+=seg->length;
|
||||
if (seg->null_bit)
|
||||
{
|
||||
key++; /* Skipp null byte */
|
||||
if (*pos) /* Found null */
|
||||
{
|
||||
nr^= (nr << 1) | 1;
|
||||
continue;
|
||||
}
|
||||
pos++;
|
||||
}
|
||||
if (seg->type == HA_KEYTYPE_TEXT)
|
||||
{
|
||||
for (; pos < (uchar*) key ; pos++)
|
||||
{
|
||||
nr^=(ulong) ((((uint) nr & 63)+nr2)*((uint) my_sort_order[(uint) *pos]))+ (nr << 8);
|
||||
nr^=(ulong) ((((uint) nr & 63)+nr2) *
|
||||
((uint) my_sort_order[(uint) *pos])) + (nr << 8);
|
||||
nr2+=3;
|
||||
}
|
||||
}
|
||||
|
@ -170,7 +181,7 @@ ulong _hp_hashnr(register HP_KEYDEF *keydef, register const byte *key)
|
|||
{
|
||||
for (; pos < (uchar*) key ; pos++)
|
||||
{
|
||||
nr^=(ulong) ((((uint) nr & 63)+nr2)*((uint) *pos))+ (nr << 8);
|
||||
nr^=(ulong) ((((uint) nr & 63)+nr2)*((uint) *pos)) + (nr << 8);
|
||||
nr2+=3;
|
||||
}
|
||||
}
|
||||
|
@ -188,11 +199,20 @@ ulong _hp_rec_hashnr(register HP_KEYDEF *keydef, register const byte *rec)
|
|||
for (seg=keydef->seg,endseg=seg+keydef->keysegs ; seg < endseg ; seg++)
|
||||
{
|
||||
uchar *pos=(uchar*) rec+seg->start,*end=pos+seg->length;
|
||||
if (seg->null_bit)
|
||||
{
|
||||
if (rec[seg->null_pos] & seg->null_bit)
|
||||
{
|
||||
nr^= (nr << 1) | 1;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (seg->type == HA_KEYTYPE_TEXT)
|
||||
{
|
||||
for (; pos < end ; pos++)
|
||||
{
|
||||
nr^=(ulong) ((((uint) nr & 63)+nr2)*((uint) my_sort_order[(uint) *pos]))+ (nr << 8);
|
||||
nr^=(ulong) ((((uint) nr & 63)+nr2)*
|
||||
((uint) my_sort_order[(uint) *pos]))+ (nr << 8);
|
||||
nr2+=3;
|
||||
}
|
||||
}
|
||||
|
@ -234,6 +254,16 @@ ulong _hp_hashnr(register HP_KEYDEF *keydef, register const byte *key)
|
|||
{
|
||||
uchar *pos=(uchar*) key;
|
||||
key+=seg->length;
|
||||
if (seg->null_bit)
|
||||
{
|
||||
key++;
|
||||
if (*pos)
|
||||
{
|
||||
nr^= (nr << 1) | 1;
|
||||
continue;
|
||||
}
|
||||
pos++;
|
||||
}
|
||||
if (seg->type == HA_KEYTYPE_TEXT)
|
||||
{
|
||||
for (; pos < (uchar*) key ; pos++)
|
||||
|
@ -264,6 +294,14 @@ ulong _hp_rec_hashnr(register HP_KEYDEF *keydef, register const byte *rec)
|
|||
for (seg=keydef->seg,endseg=seg+keydef->keysegs ; seg < endseg ; seg++)
|
||||
{
|
||||
uchar *pos=(uchar*) rec+seg->start,*end=pos+seg->length;
|
||||
if (seg->null_bit)
|
||||
{
|
||||
if (rec[seg->null_pos] & seg->null_bit)
|
||||
{
|
||||
nr^= (nr << 1) | 1;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (seg->type == HA_KEYTYPE_TEXT)
|
||||
{
|
||||
for ( ; pos < end ; pos++)
|
||||
|
@ -295,6 +333,14 @@ int _hp_rec_key_cmp(HP_KEYDEF *keydef, const byte *rec1, const byte *rec2)
|
|||
|
||||
for (seg=keydef->seg,endseg=seg+keydef->keysegs ; seg < endseg ; seg++)
|
||||
{
|
||||
if (seg->null_bit)
|
||||
{
|
||||
if ((rec1[seg->null_pos] & seg->null_bit) !=
|
||||
(rec2[seg->null_pos] & seg->null_bit))
|
||||
return 1;
|
||||
if (rec1[seg->null_pos] & seg->null_bit)
|
||||
continue;
|
||||
}
|
||||
if (seg->type == HA_KEYTYPE_TEXT)
|
||||
{
|
||||
if (my_sortcmp(rec1+seg->start,rec2+seg->start,seg->length))
|
||||
|
@ -309,14 +355,24 @@ int _hp_rec_key_cmp(HP_KEYDEF *keydef, const byte *rec1, const byte *rec2)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* Compare a key in a record to a hole key */
|
||||
/* Compare a key in a record to a whole key */
|
||||
|
||||
int _hp_key_cmp(HP_KEYDEF *keydef, const byte *rec, const byte *key)
|
||||
{
|
||||
HP_KEYSEG *seg,*endseg;
|
||||
|
||||
for (seg=keydef->seg,endseg=seg+keydef->keysegs ; seg < endseg ; seg++)
|
||||
for (seg=keydef->seg,endseg=seg+keydef->keysegs ;
|
||||
seg < endseg ;
|
||||
key+= (seg++)->length)
|
||||
{
|
||||
if (seg->null_bit)
|
||||
{
|
||||
int found_null=test(rec[seg->null_pos] & seg->null_bit);
|
||||
if (found_null != (int) *key++)
|
||||
return 1;
|
||||
if (found_null)
|
||||
continue;
|
||||
}
|
||||
if (seg->type == HA_KEYTYPE_TEXT)
|
||||
{
|
||||
if (my_sortcmp(rec+seg->start,key,seg->length))
|
||||
|
@ -327,7 +383,6 @@ int _hp_key_cmp(HP_KEYDEF *keydef, const byte *rec, const byte *key)
|
|||
if (bcmp(rec+seg->start,key,seg->length))
|
||||
return 1;
|
||||
}
|
||||
key+=seg->length;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -341,7 +396,28 @@ void _hp_make_key(HP_KEYDEF *keydef, byte *key, const byte *rec)
|
|||
|
||||
for (seg=keydef->seg,endseg=seg+keydef->keysegs ; seg < endseg ; seg++)
|
||||
{
|
||||
if (seg->null_bit)
|
||||
*key++= test(rec[seg->null_pos] & seg->null_bit);
|
||||
memcpy(key,rec+seg->start,(size_t) seg->length);
|
||||
key+=seg->length;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Test if any of the key parts are NULL.
|
||||
Return:
|
||||
1 if any of the key parts was NULL
|
||||
0 otherwise
|
||||
*/
|
||||
|
||||
my_bool hp_if_null_in_key(HP_KEYDEF *keydef, const byte *record)
|
||||
{
|
||||
HP_KEYSEG *seg,*endseg;
|
||||
for (seg=keydef->seg,endseg=seg+keydef->keysegs ; seg < endseg ; seg++)
|
||||
{
|
||||
if (seg->null_bit && (record[seg->null_pos] & seg->null_bit))
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -44,7 +44,12 @@ HP_INFO *heap_open(const char *name, int mode, uint keys, HP_KEYDEF *keydef,
|
|||
key_segs+= keydef[i].keysegs;
|
||||
bzero((char*) &keydef[i].block,sizeof(keydef[i].block));
|
||||
for (j=length=0 ; j < keydef[i].keysegs; j++)
|
||||
{
|
||||
length+=keydef[i].seg[j].length;
|
||||
if (keydef[i].seg[j].null_bit &&
|
||||
!(keydef[i].flag & HA_NULL_ARE_EQUAL))
|
||||
keydef[i].flag |= HA_NULL_PART_KEY;
|
||||
}
|
||||
keydef[i].length=length;
|
||||
if (length > max_length)
|
||||
max_length=length;
|
||||
|
|
|
@ -20,7 +20,7 @@ int heap_rkey(HP_INFO *info, byte *record, int inx, const byte *key)
|
|||
{
|
||||
byte *pos;
|
||||
HP_SHARE *share=info->s;
|
||||
DBUG_ENTER("hp_rkey");
|
||||
DBUG_ENTER("heap_rkey");
|
||||
DBUG_PRINT("enter",("base: %lx inx: %d",info,inx));
|
||||
|
||||
if ((uint) inx >= share->keys)
|
||||
|
|
|
@ -30,7 +30,7 @@
|
|||
#include <signal.h>
|
||||
|
||||
#define MAX_RECORDS 100000
|
||||
#define MAX_KEYS 3
|
||||
#define MAX_KEYS 4
|
||||
|
||||
static int get_options(int argc, char *argv[]);
|
||||
static int rnd(int max_value);
|
||||
|
@ -40,16 +40,20 @@ static uint flag=0,verbose=0,testflag=0,recant=10000,silent=0;
|
|||
static uint keys=MAX_KEYS;
|
||||
static uint16 key1[1001];
|
||||
static my_bool key3[MAX_RECORDS];
|
||||
static int reclength=39;
|
||||
|
||||
|
||||
static int calc_check(byte *buf,uint length);
|
||||
static void make_record(char *record, uint n1, uint n2, uint n3,
|
||||
const char *mark, uint count);
|
||||
|
||||
/* Huvudprogrammet */
|
||||
/* Main program */
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
register uint i,j;
|
||||
uint ant,n1,n2,n3;
|
||||
uint reclength,write_count,update,opt_delete,check2,dupp_keys,found_key;
|
||||
uint write_count,update,opt_delete,check2,dupp_keys,found_key;
|
||||
int error;
|
||||
ulong pos;
|
||||
unsigned long key_check;
|
||||
|
@ -66,7 +70,6 @@ int main(int argc, char *argv[])
|
|||
filename2= "test2_2";
|
||||
file=file2=0;
|
||||
get_options(argc,argv);
|
||||
reclength=37;
|
||||
|
||||
write_count=update=opt_delete=0;
|
||||
key_check=0;
|
||||
|
@ -77,21 +80,33 @@ int main(int argc, char *argv[])
|
|||
keyinfo[0].seg[0].type=HA_KEYTYPE_BINARY;
|
||||
keyinfo[0].seg[0].start=0;
|
||||
keyinfo[0].seg[0].length=6;
|
||||
keyinfo[0].seg[0].null_bit=0;
|
||||
keyinfo[1].seg=keyseg+1;
|
||||
keyinfo[1].keysegs=2;
|
||||
keyinfo[1].flag=0;
|
||||
keyinfo[1].seg[0].type=HA_KEYTYPE_BINARY;
|
||||
keyinfo[1].seg[0].start=7;
|
||||
keyinfo[1].seg[0].length=6;
|
||||
keyinfo[1].seg[0].null_bit=0;
|
||||
keyinfo[1].seg[1].type=HA_KEYTYPE_TEXT;
|
||||
keyinfo[1].seg[1].start=0; /* Tv}delad nyckel */
|
||||
keyinfo[1].seg[1].start=0; /* key in two parts */
|
||||
keyinfo[1].seg[1].length=6;
|
||||
keyinfo[1].seg[1].null_bit=0;
|
||||
keyinfo[2].seg=keyseg+3;
|
||||
keyinfo[2].keysegs=1;
|
||||
keyinfo[2].flag=HA_NOSAME;
|
||||
keyinfo[2].seg[0].type=HA_KEYTYPE_BINARY;
|
||||
keyinfo[2].seg[0].start=12;
|
||||
keyinfo[2].seg[0].length=8;
|
||||
keyinfo[2].seg[0].null_bit=0;
|
||||
keyinfo[3].keysegs=1;
|
||||
keyinfo[3].flag=HA_NOSAME;
|
||||
keyinfo[3].seg=keyseg+4;
|
||||
keyinfo[3].seg[0].type=HA_KEYTYPE_BINARY;
|
||||
keyinfo[3].seg[0].start=37;
|
||||
keyinfo[3].seg[0].length=1;
|
||||
keyinfo[3].seg[0].null_bit=1;
|
||||
keyinfo[3].seg[0].null_pos=38;
|
||||
|
||||
bzero((char*) key1,sizeof(key1));
|
||||
bzero((char*) key3,sizeof(key3));
|
||||
|
@ -110,7 +125,7 @@ int main(int argc, char *argv[])
|
|||
for (i=0 ; i < recant ; i++)
|
||||
{
|
||||
n1=rnd(1000); n2=rnd(100); n3=rnd(min(recant*5,MAX_RECORDS));
|
||||
sprintf(record,"%6d:%4d:%8d:Pos: %4d ",n1,n2,n3,write_count);
|
||||
make_record(record,n1,n2,n3,"Pos",write_count);
|
||||
|
||||
if (heap_write(file,record))
|
||||
{
|
||||
|
@ -191,7 +206,7 @@ int main(int argc, char *argv[])
|
|||
for (i=0 ; i < write_count/10 ; i++)
|
||||
{
|
||||
n1=rnd(1000); n2=rnd(100); n3=rnd(min(recant*2,MAX_RECORDS));
|
||||
sprintf(record2,"%6d:%4d:%8d:XXX: %4d ",n1,n2,n3,update);
|
||||
make_record(record2, n1, n2, n3, "XXX", update);
|
||||
if (rnd(2) == 1)
|
||||
{
|
||||
if (heap_scan_init(file))
|
||||
|
@ -654,3 +669,13 @@ static int calc_check(byte *buf, uint length)
|
|||
check+= (int) (uchar) *(buf++);
|
||||
return check;
|
||||
}
|
||||
|
||||
static void make_record(char *record, uint n1, uint n2, uint n3,
|
||||
const char *mark, uint count)
|
||||
{
|
||||
bfill(record,reclength,' ');
|
||||
sprintf(record,"%6d:%4d:%8d:%3.3s: %4d",
|
||||
n1,n2,n3,mark,count);
|
||||
record[37]='A'; /* Store A in null key */
|
||||
record[38]=1; /* set as null */
|
||||
}
|
||||
|
|
|
@ -238,8 +238,10 @@ int _hp_write_key(register HP_SHARE *info, HP_KEYDEF *keyinfo,
|
|||
_hp_movelink(pos,gpos,empty);
|
||||
}
|
||||
|
||||
/* Check if dupplicated keys */
|
||||
if ((keyinfo->flag & HA_NOSAME) && pos == gpos)
|
||||
/* Check if duplicated keys */
|
||||
if ((keyinfo->flag & HA_NOSAME) && pos == gpos &&
|
||||
(!(keyinfo->flag & HA_NULL_PART_KEY) ||
|
||||
!hp_if_null_in_key(keyinfo, record)))
|
||||
{
|
||||
pos=empty;
|
||||
do
|
||||
|
|
|
@ -255,6 +255,8 @@ inline double ulonglong2double(ulonglong value)
|
|||
#define HAVE_COMPRESS
|
||||
#define HAVE_CREATESEMAPHORE
|
||||
|
||||
#define HAVE_ISAM /* We want to have support for ISAM in 4.0 */
|
||||
|
||||
#ifdef NOT_USED
|
||||
#define HAVE_SNPRINTF /* Gave link error */
|
||||
#define _snprintf snprintf
|
||||
|
|
|
@ -78,11 +78,13 @@ typedef struct st_hp_keyseg /* Key-portion */
|
|||
uint start; /* Start of key in record (from 0) */
|
||||
uint length; /* Keylength */
|
||||
uint type;
|
||||
uint null_bit; /* bit set in row+null_pos */
|
||||
uint null_pos;
|
||||
} HP_KEYSEG;
|
||||
|
||||
typedef struct st_hp_keydef /* Key definition with open */
|
||||
{
|
||||
uint flag; /* NOSAME */
|
||||
uint flag; /* HA_NOSAME | HA_NULL_PART_KEY */
|
||||
uint keysegs; /* Number of key-segment */
|
||||
uint length; /* Length of key (automatic) */
|
||||
HP_KEYSEG *seg;
|
||||
|
|
|
@ -68,7 +68,8 @@ enum ha_rkey_function {
|
|||
|
||||
enum ha_key_alg {
|
||||
HA_KEY_ALG_BTREE=0, /* B-tree, default one */
|
||||
HA_KEY_ALG_RTREE=1 /* R-tree, for spatial searches */
|
||||
HA_KEY_ALG_RTREE=1, /* R-tree, for spatial searches */
|
||||
HA_KEY_ALG_HASH=2 /* HASH keys (HEAP tables) */
|
||||
};
|
||||
|
||||
/* The following is parameter to ha_extra() */
|
||||
|
@ -149,6 +150,7 @@ enum ha_base_keytype {
|
|||
#define HA_FULLTEXT 128 /* SerG: for full-text search */
|
||||
#define HA_UNIQUE_CHECK 256 /* Check the key for uniqueness */
|
||||
#define HA_SPATIAL 1024 /* Alex Barkov: for spatial search */
|
||||
#define HA_NULL_ARE_EQUAL 2048 /* NULL in key are cmp as equal */
|
||||
|
||||
|
||||
/* Automatic bits in key-flag */
|
||||
|
@ -259,6 +261,7 @@ enum ha_base_keytype {
|
|||
#define MBR_DISJOINT 4096
|
||||
#define MBR_EQUAL 8192
|
||||
#define MBR_DATA 16384
|
||||
#define SEARCH_NULL_ARE_EQUAL 32768 /* NULL in keys are equal */
|
||||
|
||||
/* bits in opt_flag */
|
||||
#define QUICK_USED 1
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
||||
|
||||
/*
|
||||
Static variables for pisam library. All definied here for easy making of
|
||||
Static variables for ISAM library. All definied here for easy making of
|
||||
a shared library
|
||||
*/
|
||||
|
||||
|
|
|
@ -38,7 +38,7 @@ libmysqlsources = errmsg.c get_password.c password.c
|
|||
noinst_HEADERS = embedded_priv.h
|
||||
|
||||
sqlsources = convert.cc derror.cc field.cc field_conv.cc filesort.cc \
|
||||
ha_innobase.cc ha_berkeley.cc ha_heap.cc ha_isam.cc ha_isammrg.cc \
|
||||
ha_innodb.cc ha_berkeley.cc ha_heap.cc ha_isam.cc ha_isammrg.cc \
|
||||
ha_myisam.cc ha_myisammrg.cc handler.cc sql_handler.cc \
|
||||
hostname.cc init.cc \
|
||||
item.cc item_buff.cc item_cmpfunc.cc item_create.cc \
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
||||
|
||||
/*
|
||||
Static variables for pisam library. All definied here for easy making of
|
||||
Static variables for MyISAM library. All definied here for easy making of
|
||||
a shared library
|
||||
*/
|
||||
|
||||
|
|
|
@ -25,7 +25,8 @@
|
|||
|
||||
/* Functions declared in this file */
|
||||
|
||||
static int w_search(MI_INFO *info,MI_KEYDEF *keyinfo,uchar *key,
|
||||
static int w_search(MI_INFO *info,MI_KEYDEF *keyinfo,
|
||||
uint comp_flag, uchar *key,
|
||||
uint key_length, my_off_t pos, uchar *father_buff,
|
||||
uchar *father_keypos, my_off_t father_page,
|
||||
my_bool insert_last);
|
||||
|
@ -245,10 +246,23 @@ int _mi_ck_write_btree(register MI_INFO *info, uint keynr, uchar *key,
|
|||
uint key_length)
|
||||
{
|
||||
int error;
|
||||
uint comp_flag;
|
||||
MI_KEYDEF *keyinfo=info->s->keyinfo+keynr;
|
||||
DBUG_ENTER("_mi_ck_write_btree");
|
||||
|
||||
if (keyinfo->flag & HA_SORT_ALLOWS_SAME)
|
||||
comp_flag=SEARCH_BIGGER; /* Put after same key */
|
||||
else if (keyinfo->flag & HA_NOSAME)
|
||||
{
|
||||
comp_flag=SEARCH_FIND | SEARCH_UPDATE; /* No dupplicates */
|
||||
if (keyinfo->flag & HA_NULL_ARE_EQUAL)
|
||||
comp_flag|= SEARCH_NULL_ARE_EQUAL;
|
||||
}
|
||||
else
|
||||
comp_flag=SEARCH_SAME; /* Keys in rec-pos order */
|
||||
|
||||
if (info->s->state.key_root[keynr] == HA_OFFSET_ERROR ||
|
||||
(error=w_search(info,info->s->keyinfo+keynr,key, key_length,
|
||||
(error=w_search(info, keyinfo, comp_flag, key, key_length,
|
||||
info->s->state.key_root[keynr], (uchar *) 0, (uchar*) 0,
|
||||
(my_off_t) 0, 1)) > 0)
|
||||
error=_mi_enlarge_root(info,keynr,key);
|
||||
|
@ -291,13 +305,12 @@ int _mi_enlarge_root(register MI_INFO *info, uint keynr, uchar *key)
|
|||
*/
|
||||
|
||||
static int w_search(register MI_INFO *info, register MI_KEYDEF *keyinfo,
|
||||
uchar *key, uint key_length, my_off_t page,
|
||||
uchar *father_buff,
|
||||
uchar *father_keypos, my_off_t father_page,
|
||||
my_bool insert_last)
|
||||
uint comp_flag, uchar *key, uint key_length, my_off_t page,
|
||||
uchar *father_buff, uchar *father_keypos,
|
||||
my_off_t father_page, my_bool insert_last)
|
||||
{
|
||||
int error,flag;
|
||||
uint comp_flag,nod_flag, search_key_length;
|
||||
uint nod_flag, search_key_length;
|
||||
uchar *temp_buff,*keypos;
|
||||
uchar keybuff[MI_MAX_KEY_BUFF];
|
||||
my_bool was_last_key;
|
||||
|
@ -305,17 +318,7 @@ static int w_search(register MI_INFO *info, register MI_KEYDEF *keyinfo,
|
|||
DBUG_ENTER("w_search");
|
||||
DBUG_PRINT("enter",("page: %ld",page));
|
||||
|
||||
search_key_length=USE_WHOLE_KEY;
|
||||
if (keyinfo->flag & HA_SORT_ALLOWS_SAME)
|
||||
comp_flag=SEARCH_BIGGER; /* Put after same key */
|
||||
else if (keyinfo->flag & HA_NOSAME)
|
||||
{
|
||||
comp_flag=SEARCH_FIND | SEARCH_UPDATE; /* No dupplicates */
|
||||
search_key_length= key_length;
|
||||
}
|
||||
else
|
||||
comp_flag=SEARCH_SAME; /* Keys in rec-pos order */
|
||||
|
||||
search_key_length= (comp_flag & SEARCH_FIND) ? key_length : USE_WHOLE_KEY;
|
||||
if (!(temp_buff= (uchar*) my_alloca((uint) keyinfo->block_length+
|
||||
MI_MAX_KEY_BUFF*2)))
|
||||
DBUG_RETURN(-1);
|
||||
|
@ -344,7 +347,7 @@ static int w_search(register MI_INFO *info, register MI_KEYDEF *keyinfo,
|
|||
insert_last=0;
|
||||
next_page=_mi_kpos(nod_flag,keypos);
|
||||
if (next_page == HA_OFFSET_ERROR ||
|
||||
(error=w_search(info,keyinfo,key,key_length,next_page,
|
||||
(error=w_search(info, keyinfo, comp_flag, key, key_length, next_page,
|
||||
temp_buff, keypos, page, insert_last)) >0)
|
||||
{
|
||||
error=_mi_insert(info,keyinfo,key,temp_buff,keypos,keybuff,father_buff,
|
||||
|
@ -759,41 +762,44 @@ static int keys_compare(bulk_insert_param *param, uchar *key1, uchar *key2)
|
|||
{
|
||||
uint not_used;
|
||||
return _mi_key_cmp(param->info->s->keyinfo[param->keynr].seg,
|
||||
key1, key2, USE_WHOLE_KEY, SEARCH_SAME, ¬_used);
|
||||
key1, key2, USE_WHOLE_KEY, SEARCH_SAME,
|
||||
¬_used);
|
||||
}
|
||||
|
||||
|
||||
static int keys_free(uchar *key, TREE_FREE mode, bulk_insert_param *param)
|
||||
{
|
||||
/* probably I can use info->lastkey here, but I'm not sure,
|
||||
and to be safe I'd better use local lastkey.
|
||||
Monty, feel free to comment on this */
|
||||
/*
|
||||
Probably I can use info->lastkey here, but I'm not sure,
|
||||
and to be safe I'd better use local lastkey.
|
||||
*/
|
||||
uchar lastkey[MI_MAX_KEY_BUFF];
|
||||
uint keylen;
|
||||
MI_KEYDEF *keyinfo;
|
||||
|
||||
switch (mode) {
|
||||
case free_init:
|
||||
if (param->info->s->concurrent_insert)
|
||||
{
|
||||
rw_wrlock(¶m->info->s->key_root_lock[param->keynr]);
|
||||
param->info->s->keyinfo[param->keynr].version++;
|
||||
}
|
||||
return 0;
|
||||
case free_free:
|
||||
keyinfo=param->info->s->keyinfo+param->keynr;
|
||||
keylen=_mi_keylength(keyinfo, key);
|
||||
memcpy(lastkey, key, keylen);
|
||||
return _mi_ck_write_btree(param->info,param->keynr,lastkey,
|
||||
keylen - param->info->s->rec_reflength);
|
||||
case free_end:
|
||||
if (param->info->s->concurrent_insert)
|
||||
rw_unlock(¶m->info->s->key_root_lock[param->keynr]);
|
||||
return 0;
|
||||
case free_init:
|
||||
if (param->info->s->concurrent_insert)
|
||||
{
|
||||
rw_wrlock(¶m->info->s->key_root_lock[param->keynr]);
|
||||
param->info->s->keyinfo[param->keynr].version++;
|
||||
}
|
||||
return 0;
|
||||
case free_free:
|
||||
keyinfo=param->info->s->keyinfo+param->keynr;
|
||||
keylen=_mi_keylength(keyinfo, key);
|
||||
memcpy(lastkey, key, keylen);
|
||||
return _mi_ck_write_btree(param->info,param->keynr,lastkey,
|
||||
keylen - param->info->s->rec_reflength);
|
||||
case free_end:
|
||||
if (param->info->s->concurrent_insert)
|
||||
rw_unlock(¶m->info->s->key_root_lock[param->keynr]);
|
||||
return 0;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
int _mi_init_bulk_insert(MI_INFO *info)
|
||||
{
|
||||
MYISAM_SHARE *share=info->s;
|
||||
|
|
|
@ -263,3 +263,84 @@ score count(*)
|
|||
2 1
|
||||
1 2
|
||||
drop table t1;
|
||||
CREATE TABLE t1 (a char(1));
|
||||
INSERT INTO t1 VALUES ('A'),('B'),('A'),('B'),('A'),('B'),(NULL),('a'),('b'),(NULL),('A'),('B'),(NULL);
|
||||
SELECT a FROM t1 GROUP BY a;
|
||||
a
|
||||
NULL
|
||||
A
|
||||
B
|
||||
SELECT a,count(*) FROM t1 GROUP BY a;
|
||||
a count(*)
|
||||
NULL 3
|
||||
A 5
|
||||
B 5
|
||||
SELECT a FROM t1 GROUP BY binary a;
|
||||
a
|
||||
NULL
|
||||
A
|
||||
B
|
||||
a
|
||||
b
|
||||
SELECT a,count(*) FROM t1 GROUP BY binary a;
|
||||
a count(*)
|
||||
NULL 3
|
||||
A 4
|
||||
B 4
|
||||
a 1
|
||||
b 1
|
||||
SELECT binary a FROM t1 GROUP BY 1;
|
||||
binary a
|
||||
NULL
|
||||
A
|
||||
B
|
||||
a
|
||||
b
|
||||
SELECT binary a,count(*) FROM t1 GROUP BY 1;
|
||||
binary a count(*)
|
||||
NULL 3
|
||||
A 4
|
||||
B 4
|
||||
a 1
|
||||
b 1
|
||||
SET SQL_BIG_TABLES=1;
|
||||
SELECT a FROM t1 GROUP BY a;
|
||||
a
|
||||
NULL
|
||||
A
|
||||
B
|
||||
SELECT a,count(*) FROM t1 GROUP BY a;
|
||||
a count(*)
|
||||
NULL 3
|
||||
A 5
|
||||
B 5
|
||||
SELECT a FROM t1 GROUP BY binary a;
|
||||
a
|
||||
NULL
|
||||
A
|
||||
B
|
||||
a
|
||||
b
|
||||
SELECT a,count(*) FROM t1 GROUP BY binary a;
|
||||
a count(*)
|
||||
NULL 3
|
||||
A 4
|
||||
B 4
|
||||
a 1
|
||||
b 1
|
||||
SELECT binary a FROM t1 GROUP BY 1;
|
||||
binary a
|
||||
NULL
|
||||
A
|
||||
B
|
||||
a
|
||||
b
|
||||
SELECT binary a,count(*) FROM t1 GROUP BY 1;
|
||||
binary a count(*)
|
||||
NULL 3
|
||||
A 4
|
||||
B 4
|
||||
a 1
|
||||
b 1
|
||||
SET SQL_BIG_TABLES=0;
|
||||
drop table t1;
|
||||
|
|
|
@ -165,3 +165,29 @@ explain select * from t1 where btn="a" and new_col="a";
|
|||
table type possible_keys key key_len ref rows Extra
|
||||
t1 ref btn btn 11 const,const 10 where used
|
||||
drop table t1;
|
||||
CREATE TABLE t1 (
|
||||
a int default NULL,
|
||||
b int default NULL,
|
||||
KEY a (a),
|
||||
UNIQUE b (b)
|
||||
) type=heap;
|
||||
INSERT INTO t1 VALUES (NULL,99),(99,NULL),(1,1),(2,2),(1,3);
|
||||
SELECT * FROM t1 WHERE a=NULL;
|
||||
a b
|
||||
explain SELECT * FROM t1 WHERE a IS NULL;
|
||||
table type possible_keys key key_len ref rows Extra
|
||||
t1 ref a a 5 const 10 where used
|
||||
SELECT * FROM t1 WHERE a<=>NULL;
|
||||
a b
|
||||
NULL 99
|
||||
SELECT * FROM t1 WHERE b=NULL;
|
||||
a b
|
||||
explain SELECT * FROM t1 WHERE b IS NULL;
|
||||
table type possible_keys key key_len ref rows Extra
|
||||
t1 ref b b 5 const 1 where used
|
||||
SELECT * FROM t1 WHERE b<=>NULL;
|
||||
a b
|
||||
99 NULL
|
||||
INSERT INTO t1 VALUES (1,3);
|
||||
Duplicate entry '3' for key 1
|
||||
DROP TABLE t1;
|
||||
|
|
|
@ -40,7 +40,6 @@ insert into t1 values (null);
|
|||
select * from t1 where x != 0;
|
||||
x
|
||||
drop table t1;
|
||||
DROP TABLE IF EXISTS t1;
|
||||
CREATE TABLE t1 (
|
||||
indexed_field int default NULL,
|
||||
KEY indexed_field (indexed_field)
|
||||
|
|
|
@ -286,15 +286,15 @@ a b c
|
|||
1 NULL NULL
|
||||
explain select * from t1 where a = 1 order by a desc, b desc;
|
||||
table type possible_keys key key_len ref rows Extra
|
||||
t1 ref a a 4 const 5 where used; Using index; Using filesort
|
||||
t1 ref a a 4 const 5 where used; Using index
|
||||
select * from t1 where a = 1 order by a desc, b desc;
|
||||
a b c
|
||||
1 3 b
|
||||
1 1 b
|
||||
1 1 b
|
||||
1 1 NULL
|
||||
1 1 b
|
||||
1 1 b
|
||||
1 NULL NULL
|
||||
1 NULL b
|
||||
1 NULL NULL
|
||||
explain select * from t1 where a = 1 and b is null order by a desc, b desc;
|
||||
table type possible_keys key key_len ref rows Extra
|
||||
t1 ref a a 9 const,const 2 where used; Using index; Using filesort
|
||||
|
|
|
@ -243,3 +243,26 @@ select sql_big_result spid,sum(userid) from t1 group by spid desc;
|
|||
explain select sql_big_result score,count(*) from t1 group by score desc;
|
||||
select sql_big_result score,count(*) from t1 group by score desc;
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# Compare with hash keys
|
||||
#
|
||||
|
||||
CREATE TABLE t1 (a char(1));
|
||||
INSERT INTO t1 VALUES ('A'),('B'),('A'),('B'),('A'),('B'),(NULL),('a'),('b'),(NULL),('A'),('B'),(NULL);
|
||||
SELECT a FROM t1 GROUP BY a;
|
||||
SELECT a,count(*) FROM t1 GROUP BY a;
|
||||
SELECT a FROM t1 GROUP BY binary a;
|
||||
SELECT a,count(*) FROM t1 GROUP BY binary a;
|
||||
SELECT binary a FROM t1 GROUP BY 1;
|
||||
SELECT binary a,count(*) FROM t1 GROUP BY 1;
|
||||
# Do the same tests with MyISAM temporary tables
|
||||
SET SQL_BIG_TABLES=1;
|
||||
SELECT a FROM t1 GROUP BY a;
|
||||
SELECT a,count(*) FROM t1 GROUP BY a;
|
||||
SELECT a FROM t1 GROUP BY binary a;
|
||||
SELECT a,count(*) FROM t1 GROUP BY binary a;
|
||||
SELECT binary a FROM t1 GROUP BY 1;
|
||||
SELECT binary a,count(*) FROM t1 GROUP BY 1;
|
||||
SET SQL_BIG_TABLES=0;
|
||||
drop table t1;
|
||||
|
|
|
@ -100,3 +100,25 @@ update t1 set new_col=btn;
|
|||
explain select * from t1 where btn="a";
|
||||
explain select * from t1 where btn="a" and new_col="a";
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# Test of NULL keys
|
||||
#
|
||||
|
||||
CREATE TABLE t1 (
|
||||
a int default NULL,
|
||||
b int default NULL,
|
||||
KEY a (a),
|
||||
UNIQUE b (b)
|
||||
) type=heap;
|
||||
INSERT INTO t1 VALUES (NULL,99),(99,NULL),(1,1),(2,2),(1,3);
|
||||
SELECT * FROM t1 WHERE a=NULL;
|
||||
explain SELECT * FROM t1 WHERE a IS NULL;
|
||||
SELECT * FROM t1 WHERE a<=>NULL;
|
||||
SELECT * FROM t1 WHERE b=NULL;
|
||||
explain SELECT * FROM t1 WHERE b IS NULL;
|
||||
SELECT * FROM t1 WHERE b<=>NULL;
|
||||
|
||||
--error 1062
|
||||
INSERT INTO t1 VALUES (1,3);
|
||||
DROP TABLE t1;
|
||||
|
|
|
@ -25,7 +25,6 @@ drop table t1;
|
|||
# Test problem med index on NULL columns and testing with =NULL;
|
||||
#
|
||||
|
||||
DROP TABLE IF EXISTS t1;
|
||||
CREATE TABLE t1 (
|
||||
indexed_field int default NULL,
|
||||
KEY indexed_field (indexed_field)
|
||||
|
|
|
@ -51,7 +51,7 @@ noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \
|
|||
sql_manager.h sql_map.h sql_string.h unireg.h \
|
||||
field.h handler.h \
|
||||
ha_isammrg.h ha_isam.h ha_myisammrg.h\
|
||||
ha_heap.h ha_myisam.h ha_berkeley.h ha_innobase.h \
|
||||
ha_heap.h ha_myisam.h ha_berkeley.h ha_innodb.h \
|
||||
opt_range.h opt_ft.h \
|
||||
sql_select.h structs.h table.h sql_udf.h hash_filo.h\
|
||||
lex.h lex_symbol.h sql_acl.h sql_crypt.h \
|
||||
|
@ -74,7 +74,7 @@ mysqld_SOURCES = sql_lex.cc sql_handler.cc \
|
|||
time.cc opt_range.cc opt_sum.cc opt_ft.cc \
|
||||
records.cc filesort.cc handler.cc \
|
||||
ha_heap.cc ha_myisam.cc ha_myisammrg.cc \
|
||||
ha_berkeley.cc ha_innobase.cc \
|
||||
ha_berkeley.cc ha_innodb.cc \
|
||||
ha_isam.cc ha_isammrg.cc \
|
||||
sql_db.cc sql_table.cc sql_rename.cc sql_crypt.cc \
|
||||
sql_load.cc mf_iocache.cc field_conv.cc sql_show.cc \
|
||||
|
|
|
@ -1454,6 +1454,37 @@ int ha_berkeley::index_read(byte * buf, const byte * key,
|
|||
DBUG_RETURN(error);
|
||||
}
|
||||
|
||||
/*
|
||||
Read last key is solved by reading the next key and then reading
|
||||
the previous key
|
||||
*/
|
||||
|
||||
int ha_berkeley::index_read_last(byte * buf, const byte * key, uint key_len)
|
||||
{
|
||||
DBT row;
|
||||
int error;
|
||||
KEY *key_info= &table->key_info[active_index];
|
||||
DBUG_ENTER("ha_berkeley::index_read");
|
||||
|
||||
statistic_increment(ha_read_key_count,&LOCK_status);
|
||||
bzero((char*) &row,sizeof(row));
|
||||
|
||||
/* read of partial key */
|
||||
pack_key(&last_key, active_index, key_buff, key, key_len);
|
||||
/* Store for compare */
|
||||
memcpy(key_buff2, key_buff, (key_len=last_key.size));
|
||||
key_info->handler.bdb_return_if_eq= 1;
|
||||
error=read_row(cursor->c_get(cursor, &last_key, &row, DB_SET_RANGE),
|
||||
(char*) buf, active_index, &row, (DBT*) 0, 0);
|
||||
key_info->handler.bdb_return_if_eq= 0;
|
||||
bzero((char*) &row,sizeof(row));
|
||||
if (read_row(cursor->c_get(cursor, &last_key, &row, DB_PREV),
|
||||
(char*) buf, active_index, &row, &last_key, 1) ||
|
||||
berkeley_key_cmp(table, key_info, key_buff2, key_len))
|
||||
error=HA_ERR_KEY_NOT_FOUND;
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
|
||||
|
||||
int ha_berkeley::index_next(byte * buf)
|
||||
{
|
||||
|
|
|
@ -89,7 +89,7 @@ class ha_berkeley: public handler
|
|||
int_option_flag(HA_READ_NEXT | HA_READ_PREV |
|
||||
HA_REC_NOT_IN_SEQ |
|
||||
HA_KEYPOS_TO_RNDPOS | HA_READ_ORDER | HA_LASTKEY_ORDER |
|
||||
HA_LONGLONG_KEYS | HA_NULL_KEY | HA_HAVE_KEY_READ_ONLY |
|
||||
HA_NULL_KEY | HA_HAVE_KEY_READ_ONLY |
|
||||
HA_BLOB_KEY | HA_NOT_EXACT_COUNT |
|
||||
HA_PRIMARY_KEY_IN_READ_INDEX | HA_DROP_BEFORE_CREATE |
|
||||
HA_AUTO_PART_KEY),
|
||||
|
@ -123,6 +123,7 @@ class ha_berkeley: public handler
|
|||
uint key_len, enum ha_rkey_function find_flag);
|
||||
int index_read_idx(byte * buf, uint index, const byte * key,
|
||||
uint key_len, enum ha_rkey_function find_flag);
|
||||
int index_read_last(byte * buf, const byte * key, uint key_len);
|
||||
int index_next(byte * buf);
|
||||
int index_next_same(byte * buf, const byte *key, uint keylen);
|
||||
int index_prev(byte * buf);
|
||||
|
|
|
@ -33,7 +33,7 @@ const char **ha_heap::bas_ext() const
|
|||
|
||||
int ha_heap::open(const char *name, int mode, uint test_if_locked)
|
||||
{
|
||||
uint key,part,parts,mem_per_row=0;
|
||||
uint key,parts,mem_per_row=0;
|
||||
ulong max_rows;
|
||||
HP_KEYDEF *keydef;
|
||||
HP_KEYSEG *seg;
|
||||
|
@ -48,24 +48,38 @@ int ha_heap::open(const char *name, int mode, uint test_if_locked)
|
|||
for (key=0 ; key < table->keys ; key++)
|
||||
{
|
||||
KEY *pos=table->key_info+key;
|
||||
KEY_PART_INFO *key_part= pos->key_part;
|
||||
KEY_PART_INFO *key_part_end= key_part+pos->key_parts;
|
||||
|
||||
mem_per_row += (pos->key_length + (sizeof(char*) * 2));
|
||||
|
||||
keydef[key].keysegs=(uint) pos->key_parts;
|
||||
keydef[key].flag = (pos->flags & HA_NOSAME);
|
||||
keydef[key].flag = (pos->flags & (HA_NOSAME | HA_NULL_ARE_EQUAL));
|
||||
keydef[key].seg=seg;
|
||||
|
||||
for (part=0 ; part < pos->key_parts ; part++)
|
||||
for (; key_part != key_part_end ; key_part++, seg++)
|
||||
{
|
||||
uint flag=pos->key_part[part].key_type;
|
||||
uint flag=key_part->key_type;
|
||||
Field *field=key_part->field;
|
||||
if (!f_is_packed(flag) &&
|
||||
f_packtype(flag) == (int) FIELD_TYPE_DECIMAL &&
|
||||
!(flag & FIELDFLAG_BINARY))
|
||||
seg->type= (int) HA_KEYTYPE_TEXT;
|
||||
else
|
||||
seg->type= (int) HA_KEYTYPE_BINARY;
|
||||
seg->start=(uint) pos->key_part[part].offset;
|
||||
seg->length=(uint) pos->key_part[part].length;
|
||||
seg++;
|
||||
seg->start=(uint) key_part->offset;
|
||||
seg->length=(uint) key_part->length;
|
||||
if (field->null_ptr)
|
||||
{
|
||||
seg->null_bit=field->null_bit;
|
||||
seg->null_pos= (uint) (field->null_ptr-
|
||||
(uchar*) table->record[0]);
|
||||
}
|
||||
else
|
||||
{
|
||||
seg->null_bit=0;
|
||||
seg->null_pos=0;
|
||||
}
|
||||
}
|
||||
}
|
||||
mem_per_row += MY_ALIGN(table->reclength+1, sizeof(char*));
|
||||
|
@ -77,7 +91,8 @@ int ha_heap::open(const char *name, int mode, uint test_if_locked)
|
|||
table->max_rows : max_rows),
|
||||
table->min_rows);
|
||||
my_free((gptr) keydef,MYF(0));
|
||||
info(HA_STATUS_NO_LOCK | HA_STATUS_CONST | HA_STATUS_VARIABLE);
|
||||
if (file)
|
||||
info(HA_STATUS_NO_LOCK | HA_STATUS_CONST | HA_STATUS_VARIABLE);
|
||||
ref_length=sizeof(HEAP_PTR);
|
||||
return (!file ? errno : 0);
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@ class ha_heap: public handler
|
|||
ulong option_flag() const
|
||||
{ return (HA_READ_RND_SAME | HA_NO_INDEX | HA_ONLY_WHOLE_INDEX |
|
||||
HA_WRONG_ASCII_ORDER | HA_KEYPOS_TO_RNDPOS | HA_NO_BLOBS |
|
||||
HA_REC_NOT_IN_SEQ); }
|
||||
HA_NULL_KEY | HA_REC_NOT_IN_SEQ | HA_NOT_READ_PREFIX_LAST); }
|
||||
uint max_record_length() const { return HA_MAX_REC_LENGTH; }
|
||||
uint max_keys() const { return MAX_KEY; }
|
||||
uint max_key_parts() const { return MAX_REF_PARTS; }
|
||||
|
|
|
@ -35,7 +35,7 @@ InnoDB */
|
|||
|
||||
#define MAX_ULONG_BIT ((ulong) 1 << (sizeof(ulong)*8-1))
|
||||
|
||||
#include "ha_innobase.h"
|
||||
#include "ha_innodb.h"
|
||||
|
||||
/* We must declare this here because we undef SAFE_MUTEX below */
|
||||
pthread_mutex_t innobase_mutex;
|
||||
|
@ -2008,6 +2008,24 @@ ha_innobase::index_read(
|
|||
DBUG_RETURN(error);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
The following functions works like index_read, but it find the last
|
||||
row with the current index prefix.
|
||||
This code is disabled until Heikki has verified that InnoDB support the
|
||||
HA_READ_PREFIX_LAST flag and removed the HA_NOT_READ_PREFIX_LAST
|
||||
flag from ha_innodb.h
|
||||
*/
|
||||
|
||||
int
|
||||
ha_innobase::index_read_last(mysql_byte *buf,
|
||||
const mysql_byte *key_ptr,
|
||||
uint key_len)
|
||||
{
|
||||
return index_read(buf, key_ptr, key_len, HA_READ_PREFIX_LAST);
|
||||
}
|
||||
|
||||
|
||||
/************************************************************************
|
||||
Changes the active index of a handle. */
|
||||
|
|
@ -1,7 +1,4 @@
|
|||
/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
|
||||
&& Innobase Oy
|
||||
|
||||
-This file is modified from ha_berkeley.h of MySQL distribution-
|
||||
/* Copyright (C) 2000 MySQL AB && Innobase Oy
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
|
@ -17,13 +14,17 @@
|
|||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
||||
|
||||
/*
|
||||
This file is based on ha_berkeley.h of MySQL distribution
|
||||
|
||||
This file defines the Innodb handler: the interface between MySQL and
|
||||
Innodb
|
||||
*/
|
||||
|
||||
#ifdef __GNUC__
|
||||
#pragma interface /* gcc class implementation */
|
||||
#endif
|
||||
|
||||
/* This file defines the Innobase handler: the interface between MySQL and
|
||||
Innobase */
|
||||
|
||||
typedef struct st_innobase_share {
|
||||
THR_LOCK lock;
|
||||
pthread_mutex_t mutex;
|
||||
|
@ -32,11 +33,11 @@ typedef struct st_innobase_share {
|
|||
} INNOBASE_SHARE;
|
||||
|
||||
|
||||
/* The class defining a handle to an Innobase table */
|
||||
/* The class defining a handle to an Innodb table */
|
||||
class ha_innobase: public handler
|
||||
{
|
||||
void* innobase_prebuilt; /* (row_prebuilt_t*) prebuilt
|
||||
struct in Innobase, used to save
|
||||
struct in Innodb, used to save
|
||||
CPU */
|
||||
THD* user_thd; /* the thread handle of the user
|
||||
currently using the handle; this is
|
||||
|
@ -50,7 +51,7 @@ class ha_innobase: public handler
|
|||
byte* upd_buff; /* buffer used in updates */
|
||||
byte* key_val_buff; /* buffer used in converting
|
||||
search key values from MySQL format
|
||||
to Innobase format */
|
||||
to Innodb format */
|
||||
uint ref_stored_len; /* length of the key value stored to
|
||||
'ref' buffer of the handle, if any */
|
||||
ulong int_option_flag;
|
||||
|
@ -78,11 +79,11 @@ class ha_innobase: public handler
|
|||
HA_REC_NOT_IN_SEQ |
|
||||
HA_KEYPOS_TO_RNDPOS | HA_LASTKEY_ORDER |
|
||||
HA_HAVE_KEY_READ_ONLY | HA_READ_NOT_EXACT_KEY |
|
||||
HA_LONGLONG_KEYS | HA_NULL_KEY |
|
||||
HA_NULL_KEY |
|
||||
HA_NOT_EXACT_COUNT |
|
||||
HA_NO_WRITE_DELAYED |
|
||||
HA_PRIMARY_KEY_IN_READ_INDEX |
|
||||
HA_DROP_BEFORE_CREATE |
|
||||
HA_DROP_BEFORE_CREATE | HA_NOT_READ_PREFIX_LAST |
|
||||
HA_NO_PREFIX_CHAR_KEYS),
|
||||
last_dup_key((uint) -1),
|
||||
start_of_scan(0)
|
||||
|
@ -122,9 +123,10 @@ class ha_innobase: public handler
|
|||
int index_init(uint index);
|
||||
int index_end();
|
||||
int index_read(byte * buf, const byte * key,
|
||||
uint key_len, enum ha_rkey_function find_flag);
|
||||
uint key_len, enum ha_rkey_function find_flag);
|
||||
int index_read_idx(byte * buf, uint index, const byte * key,
|
||||
uint key_len, enum ha_rkey_function find_flag);
|
||||
uint key_len, enum ha_rkey_function find_flag);
|
||||
int index_read_last(byte * buf, const byte * key, uint key_len);
|
||||
int index_next(byte * buf);
|
||||
int index_next_same(byte * buf, const byte *key, uint keylen);
|
||||
int index_prev(byte * buf);
|
|
@ -109,6 +109,15 @@ int ha_isam::index_read_idx(byte * buf, uint index, const byte * key,
|
|||
return !error ? 0 : my_errno ? my_errno : -1;
|
||||
}
|
||||
|
||||
int ha_isam::index_read_last(byte * buf, const byte * key, uint key_len)
|
||||
{
|
||||
statistic_increment(ha_read_key_count,&LOCK_status);
|
||||
int error=nisam_rkey(file, buf, active_index, key, key_len,
|
||||
HA_READ_PREFIX_LAST);
|
||||
table->status=error ? STATUS_NOT_FOUND: 0;
|
||||
return !error ? 0 : my_errno ? my_errno : -1;
|
||||
}
|
||||
|
||||
int ha_isam::index_next(byte * buf)
|
||||
{
|
||||
statistic_increment(ha_read_next_count,&LOCK_status);
|
||||
|
|
|
@ -33,7 +33,7 @@ class ha_isam: public handler
|
|||
int_option_flag(HA_READ_NEXT | HA_READ_PREV | HA_READ_RND_SAME |
|
||||
HA_KEYPOS_TO_RNDPOS | HA_READ_ORDER | HA_LASTKEY_ORDER |
|
||||
HA_HAVE_KEY_READ_ONLY | HA_READ_NOT_EXACT_KEY |
|
||||
HA_LONGLONG_KEYS | HA_KEY_READ_WRONG_STR | HA_DUPP_POS |
|
||||
HA_KEY_READ_WRONG_STR | HA_DUPP_POS |
|
||||
HA_NOT_DELETE_WITH_CACHE)
|
||||
{}
|
||||
~ha_isam() {}
|
||||
|
@ -57,6 +57,7 @@ class ha_isam: public handler
|
|||
uint key_len, enum ha_rkey_function find_flag);
|
||||
int index_read_idx(byte * buf, uint idx, const byte * key,
|
||||
uint key_len, enum ha_rkey_function find_flag);
|
||||
int index_read_last(byte * buf, const byte * key, uint key_len);
|
||||
int index_next(byte * buf);
|
||||
int index_prev(byte * buf);
|
||||
int index_first(byte * buf);
|
||||
|
|
|
@ -32,8 +32,9 @@ class ha_isammrg: public handler
|
|||
~ha_isammrg() {}
|
||||
const char *table_type() const { return "MRG_ISAM"; }
|
||||
const char **bas_ext() const;
|
||||
ulong option_flag() const { return HA_READ_RND_SAME | HA_KEYPOS_TO_RNDPOS
|
||||
| HA_REC_NOT_IN_SEQ;}
|
||||
ulong option_flag() const { return (HA_READ_RND_SAME | HA_KEYPOS_TO_RNDPOS |
|
||||
HA_NOT_READ_PREFIX_LAST |
|
||||
HA_REC_NOT_IN_SEQ); }
|
||||
uint max_record_length() const { return HA_MAX_REC_LENGTH; }
|
||||
uint max_keys() const { return 0; }
|
||||
uint max_key_parts() const { return 0; }
|
||||
|
|
|
@ -777,6 +777,14 @@ int ha_myisam::index_read_idx(byte * buf, uint index, const byte * key,
|
|||
return error;
|
||||
}
|
||||
|
||||
int ha_myisam::index_read_last(byte * buf, const byte * key, uint key_len)
|
||||
{
|
||||
statistic_increment(ha_read_key_count,&LOCK_status);
|
||||
int error=mi_rkey(file,buf,active_index, key, key_len, HA_READ_PREFIX_LAST);
|
||||
table->status=error ? STATUS_NOT_FOUND: 0;
|
||||
return error;
|
||||
}
|
||||
|
||||
int ha_myisam::index_next(byte * buf)
|
||||
{
|
||||
statistic_increment(ha_read_next_count,&LOCK_status);
|
||||
|
@ -973,7 +981,7 @@ void ha_myisam::update_create_info(HA_CREATE_INFO *create_info)
|
|||
}
|
||||
|
||||
|
||||
int ha_myisam::create(const char *name, register TABLE *form,
|
||||
int ha_myisam::create(const char *name, register TABLE *table,
|
||||
HA_CREATE_INFO *info)
|
||||
{
|
||||
int error;
|
||||
|
@ -985,20 +993,20 @@ int ha_myisam::create(const char *name, register TABLE *form,
|
|||
MI_KEYDEF *keydef;
|
||||
MI_COLUMNDEF *recinfo,*recinfo_pos;
|
||||
MI_KEYSEG *keyseg;
|
||||
uint options=form->db_options_in_use;
|
||||
uint options=table->db_options_in_use;
|
||||
DBUG_ENTER("ha_myisam::create");
|
||||
|
||||
type=HA_KEYTYPE_BINARY; // Keep compiler happy
|
||||
if (!(my_multi_malloc(MYF(MY_WME),
|
||||
&recinfo,(form->fields*2+2)*sizeof(MI_COLUMNDEF),
|
||||
&keydef, form->keys*sizeof(MI_KEYDEF),
|
||||
&recinfo,(table->fields*2+2)*sizeof(MI_COLUMNDEF),
|
||||
&keydef, table->keys*sizeof(MI_KEYDEF),
|
||||
&keyseg,
|
||||
((form->key_parts + form->keys) * sizeof(MI_KEYSEG)),
|
||||
((table->key_parts + table->keys) * sizeof(MI_KEYSEG)),
|
||||
0)))
|
||||
DBUG_RETURN(1);
|
||||
|
||||
pos=form->key_info;
|
||||
for (i=0; i < form->keys ; i++, pos++)
|
||||
pos=table->key_info;
|
||||
for (i=0; i < table->keys ; i++, pos++)
|
||||
{
|
||||
keydef[i].flag= (pos->flags & (HA_NOSAME | HA_FULLTEXT));
|
||||
keydef[i].seg=keyseg;
|
||||
|
@ -1041,7 +1049,7 @@ int ha_myisam::create(const char *name, register TABLE *form,
|
|||
{
|
||||
keydef[i].seg[j].null_bit=field->null_bit;
|
||||
keydef[i].seg[j].null_pos= (uint) (field->null_ptr-
|
||||
(uchar*) form->record[0]);
|
||||
(uchar*) table->record[0]);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -1059,19 +1067,19 @@ int ha_myisam::create(const char *name, register TABLE *form,
|
|||
keydef[i].seg[j].flag|=HA_BLOB_PART;
|
||||
/* save number of bytes used to pack length */
|
||||
keydef[i].seg[j].bit_start= (uint) (field->pack_length() -
|
||||
form->blob_ptr_size);
|
||||
table->blob_ptr_size);
|
||||
}
|
||||
}
|
||||
keyseg+=pos->key_parts;
|
||||
}
|
||||
|
||||
recpos=0; recinfo_pos=recinfo;
|
||||
while (recpos < (uint) form->reclength)
|
||||
while (recpos < (uint) table->reclength)
|
||||
{
|
||||
Field **field,*found=0;
|
||||
minpos=form->reclength; length=0;
|
||||
minpos=table->reclength; length=0;
|
||||
|
||||
for (field=form->field ; *field ; field++)
|
||||
for (field=table->field ; *field ; field++)
|
||||
{
|
||||
if ((fieldpos=(*field)->offset()) >= recpos &&
|
||||
fieldpos <= minpos)
|
||||
|
@ -1117,7 +1125,7 @@ int ha_myisam::create(const char *name, register TABLE *form,
|
|||
{
|
||||
recinfo_pos->null_bit=found->null_bit;
|
||||
recinfo_pos->null_pos= (uint) (found->null_ptr-
|
||||
(uchar*) form->record[0]);
|
||||
(uchar*) table->record[0]);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -1132,20 +1140,23 @@ int ha_myisam::create(const char *name, register TABLE *form,
|
|||
}
|
||||
MI_CREATE_INFO create_info;
|
||||
bzero((char*) &create_info,sizeof(create_info));
|
||||
create_info.max_rows=form->max_rows;
|
||||
create_info.reloc_rows=form->min_rows;
|
||||
create_info.max_rows=table->max_rows;
|
||||
create_info.reloc_rows=table->min_rows;
|
||||
create_info.auto_increment=(info->auto_increment_value ?
|
||||
info->auto_increment_value -1 :
|
||||
(ulonglong) 0);
|
||||
create_info.data_file_length=(ulonglong) form->max_rows*form->avg_row_length;
|
||||
create_info.data_file_length= ((ulonglong) table->max_rows *
|
||||
table->avg_row_length);
|
||||
create_info.raid_type=info->raid_type;
|
||||
create_info.raid_chunks=info->raid_chunks ? info->raid_chunks : RAID_DEFAULT_CHUNKS;
|
||||
create_info.raid_chunksize=info->raid_chunksize ? info->raid_chunksize : RAID_DEFAULT_CHUNKSIZE;
|
||||
create_info.raid_chunks= (info->raid_chunks ? info->raid_chunks :
|
||||
RAID_DEFAULT_CHUNKS);
|
||||
create_info.raid_chunksize=(info->raid_chunksize ? info->raid_chunksize :
|
||||
RAID_DEFAULT_CHUNKSIZE);
|
||||
create_info.data_file_name= info->data_file_name;
|
||||
create_info.index_file_name=info->index_file_name;
|
||||
|
||||
error=mi_create(fn_format(buff,name,"","",2+4),
|
||||
form->keys,keydef,
|
||||
table->keys,keydef,
|
||||
(uint) (recinfo_pos-recinfo), recinfo,
|
||||
0, (MI_UNIQUEDEF*) 0,
|
||||
&create_info,
|
||||
|
|
|
@ -47,7 +47,7 @@ class ha_myisam: public handler
|
|||
int_option_flag(HA_READ_NEXT | HA_READ_PREV | HA_READ_RND_SAME |
|
||||
HA_KEYPOS_TO_RNDPOS | HA_READ_ORDER | HA_LASTKEY_ORDER |
|
||||
HA_HAVE_KEY_READ_ONLY | HA_READ_NOT_EXACT_KEY |
|
||||
HA_LONGLONG_KEYS | HA_NULL_KEY |
|
||||
HA_NULL_KEY |
|
||||
HA_CAN_FULLTEXT | HA_CAN_SQL_HANDLER |
|
||||
HA_DUPP_POS | HA_BLOB_KEY | HA_AUTO_PART_KEY),
|
||||
enable_activate_all_index(1)
|
||||
|
@ -71,6 +71,7 @@ class ha_myisam: public handler
|
|||
uint key_len, enum ha_rkey_function find_flag);
|
||||
int index_read_idx(byte * buf, uint idx, const byte * key,
|
||||
uint key_len, enum ha_rkey_function find_flag);
|
||||
int index_read_last(byte * buf, const byte * key, uint key_len);
|
||||
int index_next(byte * buf);
|
||||
int index_prev(byte * buf);
|
||||
int index_first(byte * buf);
|
||||
|
@ -78,9 +79,15 @@ class ha_myisam: public handler
|
|||
int index_next_same(byte *buf, const byte *key, uint keylen);
|
||||
int index_end() { ft_handler=NULL; return 0; }
|
||||
int ft_init()
|
||||
{ if(!ft_handler) return 1; ft_handler->please->reinit_search(ft_handler); return 0; }
|
||||
FT_INFO *ft_init_ext(uint mode, uint inx,const byte *key, uint keylen, bool presort)
|
||||
{ return ft_init_search(mode, file,inx,(byte*) key,keylen,presort); }
|
||||
{
|
||||
if (!ft_handler)
|
||||
return 1;
|
||||
ft_handler->please->reinit_search(ft_handler);
|
||||
return 0;
|
||||
}
|
||||
FT_INFO *ft_init_ext(uint mode, uint inx,const byte *key, uint keylen,
|
||||
bool presort)
|
||||
{ return ft_init_search(mode, file,inx,(byte*) key,keylen,presort); }
|
||||
int ft_read(byte *buf);
|
||||
int rnd_init(bool scan=1);
|
||||
int rnd_next(byte *buf);
|
||||
|
|
|
@ -112,6 +112,15 @@ int ha_myisammrg::index_read_idx(byte * buf, uint index, const byte * key,
|
|||
return error;
|
||||
}
|
||||
|
||||
int ha_myisammrg::index_read_last(byte * buf, const byte * key, uint key_len)
|
||||
{
|
||||
statistic_increment(ha_read_key_count,&LOCK_status);
|
||||
int error=myrg_rkey(file,buf,active_index, key, key_len,
|
||||
HA_READ_PREFIX_LAST);
|
||||
table->status=error ? STATUS_NOT_FOUND: 0;
|
||||
return error;
|
||||
}
|
||||
|
||||
int ha_myisammrg::index_next(byte * buf)
|
||||
{
|
||||
statistic_increment(ha_read_next_count,&LOCK_status);
|
||||
|
|
|
@ -38,7 +38,7 @@ class ha_myisammrg: public handler
|
|||
HA_HAVE_KEY_READ_ONLY |
|
||||
HA_KEYPOS_TO_RNDPOS | HA_READ_ORDER |
|
||||
HA_LASTKEY_ORDER | HA_READ_NOT_EXACT_KEY |
|
||||
HA_LONGLONG_KEYS | HA_NULL_KEY | HA_BLOB_KEY); }
|
||||
HA_NULL_KEY | HA_BLOB_KEY); }
|
||||
uint max_record_length() const { return HA_MAX_REC_LENGTH; }
|
||||
uint max_keys() const { return MI_MAX_KEY; }
|
||||
uint max_key_parts() const { return MAX_REF_PARTS; }
|
||||
|
@ -55,6 +55,7 @@ class ha_myisammrg: public handler
|
|||
uint key_len, enum ha_rkey_function find_flag);
|
||||
int index_read_idx(byte * buf, uint idx, const byte * key,
|
||||
uint key_len, enum ha_rkey_function find_flag);
|
||||
int index_read_last(byte * buf, const byte * key, uint key_len);
|
||||
int index_next(byte * buf);
|
||||
int index_prev(byte * buf);
|
||||
int index_first(byte * buf);
|
||||
|
|
|
@ -33,7 +33,7 @@
|
|||
#include "ha_berkeley.h"
|
||||
#endif
|
||||
#ifdef HAVE_INNOBASE_DB
|
||||
#include "ha_innobase.h"
|
||||
#include "ha_innodb.h"
|
||||
#endif
|
||||
#include <myisampack.h>
|
||||
#include <errno.h>
|
||||
|
|
|
@ -55,12 +55,11 @@
|
|||
#define HA_REC_NOT_IN_SEQ 64 /* ha_info don't return recnumber;
|
||||
It returns a position to ha_r_rnd */
|
||||
#define HA_ONLY_WHOLE_INDEX 128 /* Can't use part key searches */
|
||||
#define HA_RSAME_NO_INDEX 256 /* RSAME can't restore index */
|
||||
#define HA_NOT_READ_PREFIX_LAST 256 /* RSAME can't restore index */
|
||||
#define HA_WRONG_ASCII_ORDER 512 /* Can't use sorting through key */
|
||||
#define HA_HAVE_KEY_READ_ONLY 1024 /* Can read only keys (no record) */
|
||||
#define HA_READ_NOT_EXACT_KEY 2048 /* Can read record after/before key */
|
||||
#define HA_NO_INDEX 4096 /* No index needed for next/prev */
|
||||
#define HA_LONGLONG_KEYS 8192 /* Can have longlong as key */
|
||||
#define HA_KEY_READ_WRONG_STR 16384 /* keyread returns converted strings */
|
||||
#define HA_NULL_KEY 32768 /* One can have keys with NULL */
|
||||
#define HA_DUPP_POS 65536 /* ha_position() gives dupp row */
|
||||
|
@ -256,6 +255,10 @@ public:
|
|||
virtual int index_first(byte * buf)=0;
|
||||
virtual int index_last(byte * buf)=0;
|
||||
virtual int index_next_same(byte *buf, const byte *key, uint keylen);
|
||||
virtual int index_read_last(byte * buf, const byte * key, uint key_len)
|
||||
{
|
||||
return (my_errno=HA_ERR_WRONG_COMMAND);
|
||||
}
|
||||
virtual int ft_init()
|
||||
{ return -1; }
|
||||
virtual FT_INFO *ft_init_ext(uint mode,uint inx,const byte *key, uint keylen,
|
||||
|
|
|
@ -435,7 +435,8 @@ class Item_func_binary :public Item_str_func
|
|||
public:
|
||||
Item_func_binary(Item *a) :Item_str_func(a) {}
|
||||
const char *func_name() const { return "binary"; }
|
||||
String *val_str(String *a) { return (args[0]->val_str(a)); }
|
||||
String *val_str(String *a)
|
||||
{ a=args[0]->val_str(a); null_value=args[0]->null_value; return a; }
|
||||
void fix_length_and_dec() { binary=1; max_length=args[0]->max_length; }
|
||||
void print(String *str) { print_op(str); }
|
||||
};
|
||||
|
|
|
@ -418,7 +418,8 @@ class Item_typecast :public Item_str_func
|
|||
{
|
||||
public:
|
||||
Item_typecast(Item *a) :Item_str_func(a) {}
|
||||
String *val_str(String *a) { return (args[0]->val_str(a)); }
|
||||
String *val_str(String *a)
|
||||
{ a=args[0]->val_str(a); null_value=args[0]->null_value; return a; }
|
||||
void fix_length_and_dec() { max_length=args[0]->max_length; }
|
||||
void print(String *str);
|
||||
};
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
#include "ha_berkeley.h"
|
||||
#endif
|
||||
#ifdef HAVE_INNOBASE_DB
|
||||
#include "ha_innobase.h"
|
||||
#include "ha_innodb.h"
|
||||
#endif
|
||||
#include "ha_myisam.h"
|
||||
#include <nisam.h>
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
|
||||
|
||||
#include "mysql_priv.h"
|
||||
#include "ha_innobase.h"
|
||||
#include "ha_innodb.h"
|
||||
#include "sql_select.h"
|
||||
|
||||
int mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, ORDER *order,
|
||||
|
|
|
@ -557,9 +557,9 @@ pthread_handler_decl(handle_one_connection,arg)
|
|||
|
||||
pthread_detach_this_thread();
|
||||
|
||||
#if !defined( __WIN__) && !defined(OS2) /* Win32 calls this in pthread_create */
|
||||
if (my_thread_init()) // needed to be called first before we call
|
||||
// DBUG_ macros
|
||||
#if !defined( __WIN__) && !defined(OS2) // Win32 calls this in pthread_create
|
||||
// The following calls needs to be done before we call DBUG_ macros
|
||||
if (my_thread_init())
|
||||
{
|
||||
close_connection(&thd->net,ER_OUT_OF_RESOURCES);
|
||||
statistic_increment(aborted_connects,&LOCK_thread_count);
|
||||
|
@ -568,13 +568,13 @@ pthread_handler_decl(handle_one_connection,arg)
|
|||
}
|
||||
#endif
|
||||
|
||||
// handle_one_connection() is the only way a thread would start
|
||||
// and would always be on top of the stack
|
||||
// therefore, the thread stack always starts at the address of the first
|
||||
// local variable of handle_one_connection, which is thd
|
||||
// we need to know the start of the stack so that we could check for
|
||||
// stack overruns
|
||||
|
||||
/*
|
||||
handle_one_connection() is the only way a thread would start
|
||||
and would always be on top of the stack, therefore, the thread
|
||||
stack always starts at the address of the first local variable
|
||||
of handle_one_connection, which is thd. We need to know the
|
||||
start of the stack so that we could check for stack overruns.
|
||||
*/
|
||||
DBUG_PRINT("info", ("handle_one_connection called by thread %d\n",
|
||||
thd->thread_id));
|
||||
// now that we've called my_thread_init(), it is safe to call DBUG_*
|
||||
|
@ -634,12 +634,12 @@ pthread_handler_decl(handle_one_connection,arg)
|
|||
if (net->error && net->vio != 0)
|
||||
{
|
||||
if (!thd->killed && opt_warnings)
|
||||
sql_print_error(ER(ER_NEW_ABORTING_CONNECTION),
|
||||
thd->thread_id,(thd->db ? thd->db : "unconnected"),
|
||||
thd->user ? thd->user : "unauthenticated",
|
||||
thd->host_or_ip,
|
||||
(net->last_errno ? ER(net->last_errno) :
|
||||
ER(ER_UNKNOWN_ERROR)));
|
||||
sql_print_error(ER(ER_NEW_ABORTING_CONNECTION),
|
||||
thd->thread_id,(thd->db ? thd->db : "unconnected"),
|
||||
thd->user ? thd->user : "unauthenticated",
|
||||
thd->host_or_ip,
|
||||
(net->last_errno ? ER(net->last_errno) :
|
||||
ER(ER_UNKNOWN_ERROR)));
|
||||
send_error(net,net->last_errno,NullS);
|
||||
thread_safe_increment(aborted_threads,&LOCK_thread_count);
|
||||
}
|
||||
|
|
|
@ -89,15 +89,18 @@ static int join_read_system(JOIN_TAB *tab);
|
|||
static int join_read_const(JOIN_TAB *tab);
|
||||
static int join_read_key(JOIN_TAB *tab);
|
||||
static int join_read_always_key(JOIN_TAB *tab);
|
||||
static int join_read_last_key(JOIN_TAB *tab);
|
||||
static int join_no_more_records(READ_RECORD *info);
|
||||
static int join_read_next(READ_RECORD *info);
|
||||
static int join_init_quick_read_record(JOIN_TAB *tab);
|
||||
static int test_if_quick_select(JOIN_TAB *tab);
|
||||
static int join_init_read_record(JOIN_TAB *tab);
|
||||
static int join_init_read_first_with_key(JOIN_TAB *tab);
|
||||
static int join_init_read_next_with_key(READ_RECORD *info);
|
||||
static int join_init_read_last_with_key(JOIN_TAB *tab);
|
||||
static int join_init_read_prev_with_key(READ_RECORD *info);
|
||||
static int join_read_first(JOIN_TAB *tab);
|
||||
static int join_read_next(READ_RECORD *info);
|
||||
static int join_read_next_same(READ_RECORD *info);
|
||||
static int join_read_last(JOIN_TAB *tab);
|
||||
static int join_read_prev_same(READ_RECORD *info);
|
||||
static int join_read_prev(READ_RECORD *info);
|
||||
static int join_ft_read_first(JOIN_TAB *tab);
|
||||
static int join_ft_read_next(READ_RECORD *info);
|
||||
static COND *make_cond_for_table(COND *cond,table_map table,
|
||||
|
@ -180,7 +183,7 @@ mysql_select(THD *thd,TABLE_LIST *tables,List<Item> &fields,COND *conds,
|
|||
ulong select_options,select_result *result)
|
||||
{
|
||||
TABLE *tmp_table;
|
||||
int error,tmp;
|
||||
int error, tmp_error, tmp;
|
||||
bool need_tmp,hidden_group_fields;
|
||||
bool simple_order,simple_group,no_order, skip_sort_order;
|
||||
Item::cond_result cond_value;
|
||||
|
@ -675,8 +678,11 @@ mysql_select(THD *thd,TABLE_LIST *tables,List<Item> &fields,COND *conds,
|
|||
|
||||
/* Copy data to the temporary table */
|
||||
thd->proc_info="Copying to tmp table";
|
||||
if (do_select(&join,(List<Item> *) 0,tmp_table,0))
|
||||
if ((tmp_error=do_select(&join,(List<Item> *) 0,tmp_table,0)))
|
||||
{
|
||||
error=tmp_error;
|
||||
goto err; /* purecov: inspected */
|
||||
}
|
||||
if (join.having)
|
||||
join.having=having=0; // Allready done
|
||||
|
||||
|
@ -749,9 +755,11 @@ mysql_select(THD *thd,TABLE_LIST *tables,List<Item> &fields,COND *conds,
|
|||
group=0;
|
||||
}
|
||||
thd->proc_info="Copying to group table";
|
||||
tmp_error= -1;
|
||||
if (make_sum_func_list(&join,all_fields) ||
|
||||
do_select(&join,(List<Item> *) 0,tmp_table2,0))
|
||||
(tmp_error=do_select(&join,(List<Item> *) 0,tmp_table2,0)))
|
||||
{
|
||||
error=tmp_error;
|
||||
free_tmp_table(thd,tmp_table2);
|
||||
goto err; /* purecov: inspected */
|
||||
}
|
||||
|
@ -2510,7 +2518,7 @@ make_join_readinfo(JOIN *join,uint options)
|
|||
tab->quick=0;
|
||||
table->file->index_init(tab->ref.key);
|
||||
tab->read_first_record= join_read_always_key;
|
||||
tab->read_record.read_record= join_read_next;
|
||||
tab->read_record.read_record= join_read_next_same;
|
||||
if (table->used_keys & ((key_map) 1 << tab->ref.key) &&
|
||||
!table->no_keyread)
|
||||
{
|
||||
|
@ -2585,7 +2593,7 @@ make_join_readinfo(JOIN *join,uint options)
|
|||
{ // Only read index tree
|
||||
tab->index=find_shortest_key(table, table->used_keys);
|
||||
tab->table->file->index_init(tab->index);
|
||||
tab->read_first_record= join_init_read_first_with_key;
|
||||
tab->read_first_record= join_read_first;
|
||||
tab->type=JT_NEXT; // Read with index_first / index_next
|
||||
}
|
||||
}
|
||||
|
@ -3641,6 +3649,10 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
|
|||
{
|
||||
if (field->flags & GROUP_FLAG && !using_unique_constraint)
|
||||
{
|
||||
/*
|
||||
We have to reserve one byte here for NULL bits,
|
||||
as this is updated by 'end_update()'
|
||||
*/
|
||||
*pos++=0; // Null is stored here
|
||||
recinfo->length=1;
|
||||
recinfo->type=FIELD_NORMAL;
|
||||
|
@ -3729,14 +3741,16 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
|
|||
if (maybe_null)
|
||||
{
|
||||
/*
|
||||
To be able to group on NULL, we move the null bit to be
|
||||
just before the column and extend the key to cover the null bit
|
||||
To be able to group on NULL, we reserve place in group_buff
|
||||
for the NULL flag just before the column.
|
||||
The field data is after this flag.
|
||||
The NULL flag is updated by 'end_update()' and 'end_write()'
|
||||
*/
|
||||
*group_buff= 0; // Init null byte
|
||||
key_part_info->offset--;
|
||||
key_part_info->length++;
|
||||
group->field->move_field((char*) group_buff+1, (uchar*) group_buff,
|
||||
1);
|
||||
keyinfo->flags|= HA_NULL_ARE_EQUAL; // def. that NULL == NULL
|
||||
key_part_info->null_bit=field->null_bit;
|
||||
key_part_info->null_offset= (uint) (field->null_ptr -
|
||||
(uchar*) table->record[0]);
|
||||
group->field->move_field((char*) ++group->buff);
|
||||
}
|
||||
else
|
||||
group->field->move_field((char*) group_buff);
|
||||
|
@ -3892,10 +3906,10 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param,
|
|||
for (uint i=0; i < keyinfo->key_parts ; i++,seg++)
|
||||
{
|
||||
Field *field=keyinfo->key_part[i].field;
|
||||
seg->flag=0;
|
||||
seg->language=MY_CHARSET_CURRENT;
|
||||
seg->length=keyinfo->key_part[i].length;
|
||||
seg->start=keyinfo->key_part[i].offset;
|
||||
seg->flag= 0;
|
||||
seg->language= MY_CHARSET_CURRENT;
|
||||
seg->length= keyinfo->key_part[i].length;
|
||||
seg->start= keyinfo->key_part[i].offset;
|
||||
if (field->flags & BLOB_FLAG)
|
||||
{
|
||||
seg->type=
|
||||
|
@ -3916,11 +3930,17 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param,
|
|||
keyinfo->key_part[i].length > 4)
|
||||
seg->flag|=HA_SPACE_PACK;
|
||||
}
|
||||
if (using_unique_constraint &&
|
||||
!(field->flags & NOT_NULL_FLAG))
|
||||
if (!(field->flags & NOT_NULL_FLAG))
|
||||
{
|
||||
seg->null_bit= field->null_bit;
|
||||
seg->null_pos= (uint) (field->null_ptr - (uchar*) table->record[0]);
|
||||
/*
|
||||
We are using a GROUP BY on something that contains NULL
|
||||
In this case we have to tell MyISAM that two NULL should
|
||||
on INSERT be compared as equal
|
||||
*/
|
||||
if (!using_unique_constraint)
|
||||
keydef.flag|= HA_NULL_ARE_EQUAL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -4058,9 +4078,12 @@ bool create_myisam_from_heap(TABLE *table, TMP_TABLE_PARAM *param, int error,
|
|||
}
|
||||
|
||||
|
||||
/*****************************************************************************
|
||||
** Make a join of all tables and write it on socket or to table
|
||||
*****************************************************************************/
|
||||
/****************************************************************************
|
||||
Make a join of all tables and write it on socket or to table
|
||||
Return: 0 if ok
|
||||
1 if error is sent
|
||||
-1 if error should be sent
|
||||
****************************************************************************/
|
||||
|
||||
static int
|
||||
do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure)
|
||||
|
@ -4137,15 +4160,21 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure)
|
|||
if (error == -3)
|
||||
error=0; /* select_limit used */
|
||||
}
|
||||
|
||||
/* Return 1 if error is sent; -1 if error should be sent */
|
||||
if (error < 0)
|
||||
join->result->send_error(0,NullS); /* purecov: inspected */
|
||||
{
|
||||
join->result->send_error(0,NullS); /* purecov: inspected */
|
||||
error=1; // Error sent
|
||||
}
|
||||
else
|
||||
{
|
||||
if (!table) // If sending data to client
|
||||
error=0;
|
||||
if (!table) // If sending data to client
|
||||
{
|
||||
join_free(join); // Unlock all cursors
|
||||
if (join->result->send_eof())
|
||||
error= -1;
|
||||
error= 1; // Don't send error
|
||||
}
|
||||
DBUG_PRINT("info",("%ld records output",join->send_records));
|
||||
}
|
||||
|
@ -4162,10 +4191,10 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure)
|
|||
my_errno=tmp;
|
||||
error= -1;
|
||||
}
|
||||
if (error != old_error)
|
||||
if (error == -1)
|
||||
table->file->print_error(my_errno,MYF(0));
|
||||
}
|
||||
DBUG_RETURN(error < 0);
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
|
||||
|
||||
|
@ -4497,6 +4526,35 @@ join_read_always_key(JOIN_TAB *tab)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
This function is used when optimizing away ORDER BY in
|
||||
SELECT * FROM t1 WHERE a=1 ORDER BY a DESC,b DESC
|
||||
*/
|
||||
|
||||
static int
|
||||
join_read_last_key(JOIN_TAB *tab)
|
||||
{
|
||||
int error;
|
||||
TABLE *table= tab->table;
|
||||
|
||||
if (cp_buffer_from_ref(&tab->ref))
|
||||
return -1;
|
||||
if ((error=table->file->index_read_last(table->record[0],
|
||||
tab->ref.key_buff,
|
||||
tab->ref.key_length)))
|
||||
{
|
||||
if (error != HA_ERR_KEY_NOT_FOUND)
|
||||
{
|
||||
sql_print_error("read_const: Got error %d when reading table %s",error,
|
||||
table->path);
|
||||
table->file->print_error(error,MYF(0));
|
||||
return 1;
|
||||
}
|
||||
return -1; /* purecov: inspected */
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* ARGSUSED */
|
||||
static int
|
||||
|
@ -4507,7 +4565,7 @@ join_no_more_records(READ_RECORD *info __attribute__((unused)))
|
|||
|
||||
|
||||
static int
|
||||
join_read_next(READ_RECORD *info)
|
||||
join_read_next_same(READ_RECORD *info)
|
||||
{
|
||||
int error;
|
||||
TABLE *table= info->table;
|
||||
|
@ -4530,6 +4588,37 @@ join_read_next(READ_RECORD *info)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
join_read_prev_same(READ_RECORD *info)
|
||||
{
|
||||
int error;
|
||||
TABLE *table= info->table;
|
||||
JOIN_TAB *tab=table->reginfo.join_tab;
|
||||
|
||||
if ((error=table->file->index_prev(table->record[0])))
|
||||
{
|
||||
if (error != HA_ERR_END_OF_FILE)
|
||||
{
|
||||
sql_print_error("read_next: Got error %d when reading table %s",error,
|
||||
table->path);
|
||||
table->file->print_error(error,MYF(0));
|
||||
error= 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
table->status= STATUS_GARBAGE;
|
||||
error= -1;
|
||||
}
|
||||
}
|
||||
else if (key_cmp(table, tab->ref.key_buff, tab->ref.key,
|
||||
tab->ref.key_length))
|
||||
{
|
||||
table->status=STATUS_NOT_FOUND;
|
||||
error= 1;
|
||||
}
|
||||
return error;
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
join_init_quick_read_record(JOIN_TAB *tab)
|
||||
|
@ -4560,7 +4649,7 @@ join_init_read_record(JOIN_TAB *tab)
|
|||
}
|
||||
|
||||
static int
|
||||
join_init_read_first_with_key(JOIN_TAB *tab)
|
||||
join_read_first(JOIN_TAB *tab)
|
||||
{
|
||||
int error;
|
||||
TABLE *table=tab->table;
|
||||
|
@ -4571,7 +4660,7 @@ join_init_read_first_with_key(JOIN_TAB *tab)
|
|||
table->file->extra(HA_EXTRA_KEYREAD);
|
||||
}
|
||||
tab->table->status=0;
|
||||
tab->read_record.read_record=join_init_read_next_with_key;
|
||||
tab->read_record.read_record=join_read_next;
|
||||
tab->read_record.table=table;
|
||||
tab->read_record.file=table->file;
|
||||
tab->read_record.index=tab->index;
|
||||
|
@ -4591,8 +4680,9 @@ join_init_read_first_with_key(JOIN_TAB *tab)
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
join_init_read_next_with_key(READ_RECORD *info)
|
||||
join_read_next(READ_RECORD *info)
|
||||
{
|
||||
int error=info->file->index_next(info->record);
|
||||
if (error)
|
||||
|
@ -4609,9 +4699,8 @@ join_init_read_next_with_key(READ_RECORD *info)
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
join_init_read_last_with_key(JOIN_TAB *tab)
|
||||
join_read_last(JOIN_TAB *tab)
|
||||
{
|
||||
TABLE *table=tab->table;
|
||||
int error;
|
||||
|
@ -4621,7 +4710,7 @@ join_init_read_last_with_key(JOIN_TAB *tab)
|
|||
table->file->extra(HA_EXTRA_KEYREAD);
|
||||
}
|
||||
tab->table->status=0;
|
||||
tab->read_record.read_record=join_init_read_prev_with_key;
|
||||
tab->read_record.read_record=join_read_prev;
|
||||
tab->read_record.table=table;
|
||||
tab->read_record.file=table->file;
|
||||
tab->read_record.index=tab->index;
|
||||
|
@ -4641,8 +4730,9 @@ join_init_read_last_with_key(JOIN_TAB *tab)
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
join_init_read_prev_with_key(READ_RECORD *info)
|
||||
join_read_prev(READ_RECORD *info)
|
||||
{
|
||||
int error=info->file->index_prev(info->record);
|
||||
if (error)
|
||||
|
@ -4659,6 +4749,7 @@ join_init_read_prev_with_key(READ_RECORD *info)
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
join_ft_read_first(JOIN_TAB *tab)
|
||||
{
|
||||
|
@ -4734,7 +4825,8 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
|
|||
if (join->select_options & OPTION_FOUND_ROWS)
|
||||
{
|
||||
JOIN_TAB *jt=join->join_tab;
|
||||
if ((join->tables == 1) && !join->tmp_table && !join->sort_and_group && !join->send_group_parts && !join->having && !jt->select_cond )
|
||||
if ((join->tables == 1) && !join->tmp_table && !join->sort_and_group
|
||||
&& !join->send_group_parts && !join->having && !jt->select_cond)
|
||||
{
|
||||
join->select_options ^= OPTION_FOUND_ROWS;
|
||||
join->send_records = jt->records;
|
||||
|
@ -4856,6 +4948,7 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
|
|||
copy_fields(&join->tmp_table_param);
|
||||
copy_funcs(join->tmp_table_param.funcs);
|
||||
|
||||
#ifdef TO_BE_DELETED
|
||||
if (!table->uniques) // If not unique handling
|
||||
{
|
||||
/* Copy null values from group to row */
|
||||
|
@ -4866,10 +4959,11 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
|
|||
if (item->maybe_null)
|
||||
{
|
||||
Field *field=item->tmp_table_field();
|
||||
field->ptr[-1]= (byte) (field->is_null() ? 0 : 1);
|
||||
field->ptr[-1]= (byte) (field->is_null() ? 1 : 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
if (!join->having || join->having->val_int())
|
||||
{
|
||||
join->found_records++;
|
||||
|
@ -4924,8 +5018,9 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
|
|||
{
|
||||
Item *item= *group->item;
|
||||
item->save_org_in_field(group->field);
|
||||
/* Store in the used key if the field was 0 */
|
||||
if (item->maybe_null)
|
||||
group->buff[0]=item->null_value ? 0: 1; // Save reversed value
|
||||
group->buff[-1]=item->null_value ? 1 : 0;
|
||||
}
|
||||
// table->file->index_init(0);
|
||||
if (!table->file->index_read(table->record[1],
|
||||
|
@ -5315,6 +5410,9 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
|
|||
|
||||
if (ref_key >= 0)
|
||||
{
|
||||
/*
|
||||
We come here when there is a REF key.
|
||||
*/
|
||||
int order_direction;
|
||||
uint used_key_parts;
|
||||
/* Check if we get the rows in requested sorted order by using the key */
|
||||
|
@ -5322,11 +5420,11 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
|
|||
(order_direction = test_if_order_by_key(order,table,ref_key,
|
||||
&used_key_parts)))
|
||||
{
|
||||
if (order_direction == -1)
|
||||
if (order_direction == -1) // If ORDER BY ... DESC
|
||||
{
|
||||
if (select && select->quick)
|
||||
{
|
||||
// ORDER BY ref_key DESC
|
||||
// ORDER BY range_key DESC
|
||||
QUICK_SELECT_DESC *tmp=new QUICK_SELECT_DESC(select->quick,
|
||||
used_key_parts);
|
||||
if (!tmp || tmp->error)
|
||||
|
@ -5341,11 +5439,15 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
|
|||
{
|
||||
/*
|
||||
SELECT * FROM t1 WHERE a=1 ORDER BY a DESC,b DESC
|
||||
TODO:
|
||||
Add a new traversal function to read last matching row and
|
||||
traverse backwards.
|
||||
|
||||
Use a traversal function that starts by reading the last row
|
||||
with key part (A) and then traverse the index backwards.
|
||||
*/
|
||||
DBUG_RETURN(0);
|
||||
if (table->file->option_flag() & HA_NOT_READ_PREFIX_LAST)
|
||||
DBUG_RETURN(1);
|
||||
tab->read_first_record= join_read_last_key;
|
||||
tab->read_record.read_record= join_read_prev_same;
|
||||
/* fall through */
|
||||
}
|
||||
}
|
||||
DBUG_RETURN(1); /* No need to sort */
|
||||
|
@ -5377,8 +5479,8 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
|
|||
if (!no_changes)
|
||||
{
|
||||
tab->index=nr;
|
||||
tab->read_first_record= (flag > 0 ? join_init_read_first_with_key:
|
||||
join_init_read_last_with_key);
|
||||
tab->read_first_record= (flag > 0 ? join_read_first:
|
||||
join_read_last);
|
||||
table->file->index_init(nr);
|
||||
tab->type=JT_NEXT; // Read with index_first(), index_next()
|
||||
if (table->used_keys & ((key_map) 1 << nr))
|
||||
|
@ -6369,7 +6471,8 @@ get_sort_by_table(ORDER *a,ORDER *b,TABLE_LIST *tables)
|
|||
static void
|
||||
calc_group_buffer(JOIN *join,ORDER *group)
|
||||
{
|
||||
uint key_length=0,parts=0;
|
||||
uint key_length=0, parts=0, null_parts=0;
|
||||
|
||||
if (group)
|
||||
join->group= 1;
|
||||
for (; group ; group=group->next)
|
||||
|
@ -6390,10 +6493,11 @@ calc_group_buffer(JOIN *join,ORDER *group)
|
|||
key_length+=(*group->item)->max_length;
|
||||
parts++;
|
||||
if ((*group->item)->maybe_null)
|
||||
key_length++;
|
||||
null_parts++;
|
||||
}
|
||||
join->tmp_table_param.group_length=key_length;
|
||||
join->tmp_table_param.group_length=key_length+null_parts;
|
||||
join->tmp_table_param.group_parts=parts;
|
||||
join->tmp_table_param.group_null_parts=null_parts;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -127,12 +127,13 @@ class TMP_TABLE_PARAM {
|
|||
ha_rows end_write_records;
|
||||
uint field_count,sum_func_count,func_count;
|
||||
uint hidden_field_count;
|
||||
uint group_parts,group_length;
|
||||
uint group_parts,group_length,group_null_parts;
|
||||
uint quick_group;
|
||||
bool using_indirect_summary_function;
|
||||
|
||||
TMP_TABLE_PARAM()
|
||||
:copy_funcs_it(copy_funcs), copy_field(0), group_parts(0), group_length(0)
|
||||
:copy_funcs_it(copy_funcs), copy_field(0), group_parts(0),
|
||||
group_length(0), group_null_parts(0)
|
||||
{}
|
||||
~TMP_TABLE_PARAM()
|
||||
{
|
||||
|
|
Loading…
Reference in a new issue