mirror of
https://github.com/MariaDB/server.git
synced 2025-01-29 02:05:57 +01:00
Fix for FLUSH QUERY CACHE
Fix for new bug in CREATE TABLE when sorting keys. Docs/manual.texi: Added FOUND_ROWS() section. include/my_base.h: Spatial index include/myisam.h: Spatial index include/mysql_com.h: Spatial index myisam/ft_dump.c: Remove warnings. mysys/hash.c: Fix for SUNPRO_C (Sun's Fortre compiler) sql/sql_cache.cc: Fix for FLUSH QUERY CACHE. sql/sql_cache.h: Fix for FLUSH QUERY CACHE. sql/sql_table.cc: Fix for new bug when sorting keys.
This commit is contained in:
parent
f06d80f679
commit
e9b750d1a6
9 changed files with 195 additions and 37 deletions
|
@ -5393,6 +5393,10 @@ You will need the following:
|
|||
A Windows 32 bits Operational System of the family Win9x, ME,
|
||||
NT and Win 2000. The NT family permits running the MySQL server
|
||||
as a service. @xref{NT start}.
|
||||
|
||||
If you want to use tables bigger than 4G, you should install MySQL
|
||||
on NTFS or newer file system. Don't forget to use @code{MAX_ROWS} and
|
||||
@code{AVG_ROW_LENGTH} when you create the table. @xref{CREATE TABLE}.
|
||||
@item
|
||||
TCP/IP protocol support.
|
||||
@item
|
||||
|
@ -32218,6 +32222,25 @@ returns immediately. The return value is the number of log events it had to
|
|||
wait to get to the specified position, or NULL in case of error. Useful for
|
||||
control of master-slave synchronisation, but was originally written to
|
||||
facilitate replication testing.
|
||||
|
||||
@findex FOUND_ROWS()
|
||||
@findex LIMIT
|
||||
@item FOUND_ROWS()
|
||||
Returns the number of rows that the last @code{SELECT CALC_FOUND_ROWS ...}
|
||||
command would have returned, if wasn't restricted with @code{LIMIT}.
|
||||
|
||||
@example
|
||||
SELECT CALC_FOUND_ROWS * FROM table_name WHERE id > 100 LIMIT 10;
|
||||
SELECT FOUND_ROWS();
|
||||
@end example
|
||||
|
||||
The second select will return how many rows the SELECT should have
|
||||
returned if we would remove the @code{LIMIT} clause.
|
||||
|
||||
Note that if you are using @code{SELECT CALC_FOUND_ROWS ...} MySQL has
|
||||
to calculate all rows in the result set. This is however faster than
|
||||
if you would not use @code{LIMIT} as the result set doesn't have to be sent
|
||||
to the client.
|
||||
@end table
|
||||
|
||||
|
||||
|
@ -32404,7 +32427,7 @@ mysql> SELECT id,FLOOR(value/100) FROM tbl_name ORDER BY RAND();
|
|||
@c help SELECT
|
||||
@example
|
||||
SELECT [STRAIGHT_JOIN] [SQL_SMALL_RESULT] [SQL_BIG_RESULT] [SQL_BUFFER_RESULT]
|
||||
[SQL_CACHE | SQL_NO_CACHE] [HIGH_PRIORITY]
|
||||
[SQL_CACHE | SQL_NO_CACHE] [CALC_FOUND_ROWS] [HIGH_PRIORITY]
|
||||
[DISTINCT | DISTINCTROW | ALL]
|
||||
select_expression,...
|
||||
[INTO @{OUTFILE | DUMPFILE@} 'file_name' export_options]
|
||||
|
@ -32561,6 +32584,12 @@ result set will be small. In this case, MySQL will use fast
|
|||
temporary tables to store the resulting table instead of using sorting. In
|
||||
MySQL Version 3.23 this shouldn't normally be needed.
|
||||
|
||||
@item
|
||||
@code{CALC_FOUND_ROWS} tells MySQL to calculate how many rows there
|
||||
would be in the result, disregarding any @code{LIMIT} clause. The number
|
||||
of rows can be obtained with @code{SELECT
|
||||
FOUND_ROWS()}. @xref{Miscellaneous functions}.
|
||||
|
||||
@item
|
||||
@code{SQL_CACHE} tells MySQL to store the query result in the query cache
|
||||
if you are using @code{SQL_QUERY_CACHE_TYPE=2} (@code{DEMAND}).
|
||||
|
@ -34493,12 +34522,14 @@ original tables, MySQL will not allow concurrent inserts during
|
|||
@item
|
||||
The @code{RAID_TYPE} option will help you to break the 2G/4G limit for
|
||||
the MyISAM data file (not the index file) on operating systems that
|
||||
don't support big files.
|
||||
You can get more speed from the I/O bottleneck by putting
|
||||
@code{RAID} directories on different physical disks. @code{RAID_TYPE}
|
||||
will work on any OS, as long as you have configured MySQL with
|
||||
@code{--with-raid}. For now the only allowed @code{RAID_TYPE} is
|
||||
@code{STRIPED} (@code{1} and @code{RAID0} are aliases for this).
|
||||
don't support big files. Note that this option is not recommended for
|
||||
file system that supports big files!
|
||||
|
||||
You can get more speed from the I/O bottleneck by putting @code{RAID}
|
||||
directories on different physical disks. @code{RAID_TYPE} will work on
|
||||
any OS, as long as you have configured MySQL with @code{--with-raid}.
|
||||
For now the only allowed @code{RAID_TYPE} is @code{STRIPED} (@code{1}
|
||||
and @code{RAID0} are aliases for this).
|
||||
|
||||
If you specify @code{RAID_TYPE=STRIPED} for a @code{MyISAM} table,
|
||||
@code{MyISAM} will create @code{RAID_CHUNKS} subdirectories named 00,
|
||||
|
@ -43274,7 +43305,8 @@ No UDF functions.
|
|||
@item
|
||||
No stack trace on core dump.
|
||||
@item
|
||||
No internal RAID support.
|
||||
No internal RAID support. (This is not normally needed as most OS has
|
||||
nowadays support for big files).
|
||||
@item
|
||||
You can set this up as a server or a master (no replication).
|
||||
@item
|
||||
|
@ -47996,6 +48028,8 @@ Our TODO section contains what we plan to have in 4.0. @xref{TODO MySQL 4.0}.
|
|||
|
||||
@itemize @bullet
|
||||
@item
|
||||
Fixed bug in @code{FLUSH QUERY CACHE}.
|
||||
@item
|
||||
Added @code{CAST()} and @code{CONVERT()} functions.
|
||||
@item
|
||||
Changed order of how keys are created in tables.
|
||||
|
|
|
@ -56,7 +56,19 @@ enum ha_rkey_function {
|
|||
HA_READ_AFTER_KEY, /* Find next rec. after key-record */
|
||||
HA_READ_BEFORE_KEY, /* Find next rec. before key-record */
|
||||
HA_READ_PREFIX, /* Key which as same prefix */
|
||||
HA_READ_PREFIX_LAST /* Last key with the same prefix */
|
||||
HA_READ_PREFIX_LAST, /* Last key with the same prefix */
|
||||
HA_READ_MBR_CONTAIN,
|
||||
HA_READ_MBR_INTERSECT,
|
||||
HA_READ_MBR_WITHIN,
|
||||
HA_READ_MBR_DISJOINT,
|
||||
HA_READ_MBR_EQUAL
|
||||
};
|
||||
|
||||
/* Key algorithm types */
|
||||
|
||||
enum ha_key_alg {
|
||||
HA_KEY_ALG_BTREE=0, /* B-tree, default one */
|
||||
HA_KEY_ALG_RTREE=1 /* R-tree, for spatial searches */
|
||||
};
|
||||
|
||||
/* The following is parameter to ha_extra() */
|
||||
|
@ -136,6 +148,8 @@ enum ha_base_keytype {
|
|||
#define HA_BINARY_PACK_KEY 32 /* Packing of all keys to prev key */
|
||||
#define HA_FULLTEXT 128 /* SerG: for full-text search */
|
||||
#define HA_UNIQUE_CHECK 256 /* Check the key for uniqueness */
|
||||
#define HA_SPATIAL 1024 /* Alex Barkov: for spatial search */
|
||||
|
||||
|
||||
/* Automatic bits in key-flag */
|
||||
|
||||
|
@ -239,6 +253,12 @@ enum ha_base_keytype {
|
|||
#define SEARCH_UPDATE 64
|
||||
#define SEARCH_PREFIX 128
|
||||
#define SEARCH_LAST 256
|
||||
#define MBR_CONTAIN 512
|
||||
#define MBR_INTERSECT 1024
|
||||
#define MBR_WITHIN 2048
|
||||
#define MBR_DISJOINT 4096
|
||||
#define MBR_EQUAL 8192
|
||||
#define MBR_DATA 16384
|
||||
|
||||
/* bits in opt_flag */
|
||||
#define QUICK_USED 1
|
||||
|
|
|
@ -126,6 +126,7 @@ typedef struct st_mi_keydef /* Key definition with open & info */
|
|||
uint16 keysegs; /* Number of key-segment */
|
||||
uint16 flag; /* NOSAME, PACK_USED */
|
||||
|
||||
uint8 key_alg; /* BTREE, RTREE */
|
||||
uint16 block_length; /* Length of keyblock (auto) */
|
||||
uint16 underflow_block_length; /* When to execute underflow */
|
||||
uint16 keylength; /* Tot length of keyparts (auto) */
|
||||
|
|
|
@ -150,7 +150,8 @@ enum enum_field_types { FIELD_TYPE_DECIMAL, FIELD_TYPE_TINY,
|
|||
FIELD_TYPE_LONG_BLOB=251,
|
||||
FIELD_TYPE_BLOB=252,
|
||||
FIELD_TYPE_VAR_STRING=253,
|
||||
FIELD_TYPE_STRING=254
|
||||
FIELD_TYPE_STRING=254,
|
||||
FIELD_TYPE_GEOMETRY=255
|
||||
};
|
||||
|
||||
#define FIELD_TYPE_CHAR FIELD_TYPE_TINY /* For compability */
|
||||
|
|
|
@ -33,7 +33,7 @@ static uint lengths[256];
|
|||
int main(int argc,char *argv[])
|
||||
{
|
||||
int error=0;
|
||||
uint keylen, keylen2, inx, doc_cnt=0;
|
||||
uint keylen, keylen2=0, inx, doc_cnt=0;
|
||||
float weight;
|
||||
double gws, min_gws=0, avg_gws=0;
|
||||
MI_INFO *info;
|
||||
|
@ -151,7 +151,7 @@ int main(int argc,char *argv[])
|
|||
for (inx=0;inx<256;inx++)
|
||||
{
|
||||
count+=lengths[inx];
|
||||
if (count >= total/2)
|
||||
if ((ulong) count >= total/2)
|
||||
break;
|
||||
}
|
||||
printf("Total rows: %qu\nTotal words: %lu\n"
|
||||
|
@ -170,7 +170,8 @@ int main(int argc,char *argv[])
|
|||
count+=lengths[inx];
|
||||
if (count && lengths[inx])
|
||||
printf("%3u: %10lu %5.2f%% %20lu %4.1f%%\n", inx,
|
||||
lengths[inx],100.0*lengths[inx]/total,count, 100.0*count/total);
|
||||
(ulong) lengths[inx],100.0*lengths[inx]/total,(ulong) count,
|
||||
100.0*count/total);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -177,7 +177,7 @@ uint calc_hashnr_caseup(const byte *key, uint len)
|
|||
#endif
|
||||
|
||||
|
||||
#ifndef _FORTREC_
|
||||
#ifndef __SUNPRO_C /* SUNPRO can't handle this */
|
||||
inline
|
||||
#endif
|
||||
uint rec_hashnr(HASH *hash,const byte *record)
|
||||
|
|
128
sql/sql_cache.cc
128
sql/sql_cache.cc
|
@ -615,8 +615,8 @@ void query_cache_abort(NET *net)
|
|||
query_cache.free_query(query_block);
|
||||
}
|
||||
net->query_cache_query=0;
|
||||
DBUG_EXECUTE("check_querycache",query_cache.check_integrity(1););
|
||||
STRUCT_UNLOCK(&query_cache.structure_guard_mutex);
|
||||
DBUG_EXECUTE("check_querycache",query_cache.check_integrity(0););
|
||||
}
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
@ -1153,6 +1153,8 @@ void Query_cache::flush()
|
|||
flush_cache();
|
||||
DUMP(this);
|
||||
}
|
||||
|
||||
DBUG_EXECUTE("check_querycache",query_cache.check_integrity(1););
|
||||
STRUCT_UNLOCK(&structure_guard_mutex);
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
@ -2407,13 +2409,15 @@ TABLE_COUNTER_TYPE Query_cache::is_cacheable(THD *thd, uint32 query_len,
|
|||
|
||||
void Query_cache::pack_cache()
|
||||
{
|
||||
DBUG_ENTER("Query_cache::pack_cache");
|
||||
STRUCT_LOCK(&structure_guard_mutex);
|
||||
DBUG_EXECUTE("check_querycache",query_cache.check_integrity(1););
|
||||
|
||||
byte *border = 0;
|
||||
Query_cache_block *before = 0;
|
||||
ulong gap = 0;
|
||||
my_bool ok = 1;
|
||||
Query_cache_block *block = first_block;
|
||||
DBUG_ENTER("Query_cache::pack_cache");
|
||||
DUMP(this);
|
||||
|
||||
if (first_block)
|
||||
|
@ -2438,6 +2442,8 @@ void Query_cache::pack_cache()
|
|||
}
|
||||
DUMP(this);
|
||||
}
|
||||
|
||||
DBUG_EXECUTE("check_querycache",query_cache.check_integrity(1););
|
||||
STRUCT_UNLOCK(&structure_guard_mutex);
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
@ -2502,22 +2508,14 @@ my_bool Query_cache::move_by_type(byte **border,
|
|||
|
||||
Query_cache_block_table *nlist_root = new_block->table(0);
|
||||
nlist_root->n = 0;
|
||||
if (tnext == list_root)
|
||||
{
|
||||
nlist_root->next = nlist_root;
|
||||
nlist_root->prev = nlist_root;
|
||||
}
|
||||
else
|
||||
{
|
||||
nlist_root->next = tnext;
|
||||
tnext->prev = nlist_root;
|
||||
}
|
||||
if (tprev != list_root)
|
||||
{
|
||||
nlist_root->prev = tnext;
|
||||
tprev->next = nlist_root;
|
||||
}
|
||||
|
||||
nlist_root->next = tnext;
|
||||
tnext->prev = nlist_root;
|
||||
nlist_root->prev = tprev;
|
||||
tprev->next = nlist_root;
|
||||
DBUG_PRINT("qcache",
|
||||
("list_root: 0x%lx tnext 0x%lx tprev 0x%lx tprev->next 0x%lx tnext->prev 0x%lx",
|
||||
(ulong) list_root, (ulong) tnext, (ulong) tprev,
|
||||
(ulong)tprev->next, (ulong)tnext->prev));
|
||||
/*
|
||||
Go through all queries that uses this table and change them to
|
||||
point to the new table object
|
||||
|
@ -3076,10 +3074,23 @@ my_bool Query_cache::check_integrity(bool not_locked)
|
|||
case Query_cache_block::TABLE:
|
||||
if (in_list(tables_blocks[block->table()->type()], block, "tables"))
|
||||
result = 1;
|
||||
if (in_table_list(block->table(0), block->table(0), "table list root"))
|
||||
result = 1;
|
||||
break;
|
||||
case Query_cache_block::QUERY:
|
||||
if (in_list(queries_blocks, block, "query"))
|
||||
result = 1;
|
||||
for (TABLE_COUNTER_TYPE j=0; j < block->n_tables; j++)
|
||||
{
|
||||
Query_cache_block_table *block_table = block->table(j);
|
||||
Query_cache_block_table *block_table_root =
|
||||
(Query_cache_block_table *)
|
||||
(((byte*)block_table->parent) -
|
||||
ALIGN_SIZE(sizeof(Query_cache_block_table)));
|
||||
|
||||
if (in_table_list(block_table, block_table_root, "table list"))
|
||||
result = 1;
|
||||
}
|
||||
break;
|
||||
case Query_cache_block::RES_INCOMPLETE:
|
||||
// This type of block can be not lincked yet (in multithread environment)
|
||||
|
@ -3341,4 +3352,85 @@ err2:
|
|||
return result;
|
||||
}
|
||||
|
||||
void dump_node(Query_cache_block_table * node,
|
||||
const char * call, const char * descr)
|
||||
{
|
||||
DBUG_PRINT("qcache", ("%s: %s: node: 0x%lx", call, descr, (ulong) node));
|
||||
DBUG_PRINT("qcache", ("%s: %s: node block: 0x%lx",
|
||||
call, descr, (ulong) node->block()));
|
||||
DBUG_PRINT("qcache", ("%s: %s: next: 0x%lx", call, descr,
|
||||
(ulong) node->next));
|
||||
DBUG_PRINT("qcache", ("%s: %s: prev: 0x%lx", call, descr,
|
||||
(ulong) node->prev));
|
||||
}
|
||||
|
||||
my_bool Query_cache::in_table_list(Query_cache_block_table * root,
|
||||
Query_cache_block_table * point,
|
||||
const char *name)
|
||||
{
|
||||
my_bool result = 0;
|
||||
Query_cache_block_table *table = point;
|
||||
dump_node(root, name, "parameter root");
|
||||
//back
|
||||
do
|
||||
{
|
||||
dump_node(table, name, "list element << ");
|
||||
if (table->prev->next != table)
|
||||
{
|
||||
DBUG_PRINT("error",
|
||||
("table 0x%lx(0x%lx) in list '%s' 0x%lx(0x%lx) is incorrect linked, prev table 0x%lx(0x%lx) refered as next to 0x%lx(0x%lx) (check from 0x%lx(0x%lx))",
|
||||
(ulong) table, (ulong) table->block(), name,
|
||||
(ulong) root, (ulong) root->block(),
|
||||
(ulong) table->prev, (ulong) table->prev->block(),
|
||||
(ulong) table->prev->next,
|
||||
(ulong) table->prev->next->block(),
|
||||
(ulong) point, (ulong) point->block()));
|
||||
//back trace
|
||||
for(; table != point; table = table->next)
|
||||
DBUG_PRINT("error", ("back trace 0x%lx(0x%lx)",
|
||||
(ulong) table, (ulong) table->block()));
|
||||
result = 1;
|
||||
goto err1;
|
||||
}
|
||||
table = table->prev;
|
||||
} while (table != root && table != point);
|
||||
if (table != root)
|
||||
{
|
||||
DBUG_PRINT("error",
|
||||
("table 0x%lx(0x%lx) (0x%lx(0x%lx)<-->0x%lx(0x%lx)) not owned by list '%s' 0x%lx(0x%lx)",
|
||||
(ulong) table, (ulong) table->block(),
|
||||
(ulong) table->prev, (ulong) table->prev->block(),
|
||||
(ulong) table->next, (ulong) table->next->block(),
|
||||
name, (ulong) root, (ulong) root->block()));
|
||||
return 1;
|
||||
}
|
||||
err1:
|
||||
// forward
|
||||
table = point;
|
||||
do
|
||||
{
|
||||
dump_node(table, name, "list element >> ");
|
||||
if (table->next->prev != table)
|
||||
{
|
||||
DBUG_PRINT("error",
|
||||
("table 0x%lx(0x%lx) in list '%s' 0x%lx(0x%lx) is incorrect linked, next table 0x%lx(0x%lx) refered as prev to 0x%lx(0x%lx) (check from 0x%lx(0x%lx))",
|
||||
(ulong) table, (ulong) table->block(),
|
||||
name, (ulong) root, (ulong) root->block(),
|
||||
(ulong) table->next, (ulong) table->next->block(),
|
||||
(ulong) table->next->prev,
|
||||
(ulong) table->next->prev->block(),
|
||||
(ulong) point, (ulong) point->block()));
|
||||
//back trace
|
||||
for (; table != point; table = table->prev)
|
||||
DBUG_PRINT("error", ("back trace 0x%lx(0x%lx)",
|
||||
(ulong) table, (ulong) table->block()));
|
||||
result = 1;
|
||||
goto err2;
|
||||
}
|
||||
table = table->next;
|
||||
} while (table != root);
|
||||
err2:
|
||||
return result;
|
||||
}
|
||||
|
||||
#endif /* DBUG_OFF */
|
||||
|
|
|
@ -394,6 +394,9 @@ protected:
|
|||
my_bool check_integrity(bool not_locked);
|
||||
my_bool in_list(Query_cache_block * root, Query_cache_block * point,
|
||||
const char *name);
|
||||
my_bool in_table_list(Query_cache_block_table * root,
|
||||
Query_cache_block_table * point,
|
||||
const char *name);
|
||||
my_bool in_blocks(Query_cache_block * point);
|
||||
};
|
||||
|
||||
|
|
|
@ -212,8 +212,6 @@ int quick_rm_table(enum db_type base,const char *db,
|
|||
|
||||
static int sort_keys(KEY *a, KEY *b)
|
||||
{
|
||||
if (a == b) // Safety
|
||||
return 0;
|
||||
if (a->flags & HA_NOSAME)
|
||||
{
|
||||
if (!(b->flags & HA_NOSAME))
|
||||
|
@ -235,7 +233,13 @@ static int sort_keys(KEY *a, KEY *b)
|
|||
{
|
||||
return (a->flags & HA_FULLTEXT) ? 1 : -1;
|
||||
}
|
||||
return a < b ? -1 : 1; // Prefer original key order
|
||||
/*
|
||||
Prefer original key order. usable_key_parts contains here
|
||||
the original key position.
|
||||
*/
|
||||
return ((a->usable_key_parts < b->usable_key_parts) ? -1 :
|
||||
(a->usable_key_parts > b->usable_key_parts) ? 1 :
|
||||
0);
|
||||
}
|
||||
|
||||
|
||||
|
@ -396,7 +400,7 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name,
|
|||
List<Key> keys_in_order; // Add new keys here
|
||||
bool primary_key=0,unique_key=0;
|
||||
Key *key;
|
||||
uint tmp;
|
||||
uint tmp, key_number;
|
||||
tmp=min(file->max_keys(), MAX_KEY);
|
||||
if (key_count > tmp)
|
||||
{
|
||||
|
@ -428,7 +432,8 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name,
|
|||
DBUG_RETURN(-1); // Out of memory
|
||||
|
||||
key_iterator.rewind();
|
||||
for (; (key=key_iterator++) ; key_info++)
|
||||
key_number=0;
|
||||
for (; (key=key_iterator++) ; key_info++, key_number++)
|
||||
{
|
||||
uint key_length=0;
|
||||
key_part_spec *column;
|
||||
|
@ -437,6 +442,7 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name,
|
|||
(key->type == Key::FULLTEXT) ? HA_FULLTEXT : HA_NOSAME;
|
||||
key_info->key_parts=(uint8) key->columns.elements;
|
||||
key_info->key_part=key_part_info;
|
||||
key_info->usable_key_parts= key_number;
|
||||
|
||||
if (key->type == Key::FULLTEXT)
|
||||
{
|
||||
|
|
Loading…
Add table
Reference in a new issue