Automatic merge

This commit is contained in:
Michael Widenius 2013-07-03 22:57:13 +03:00
commit 8ad34c8f9e
19 changed files with 850 additions and 679 deletions

View file

@ -76,18 +76,18 @@ t1.par
SET innodb_strict_mode = OFF;
ALTER TABLE t1 ADD PARTITION PARTITIONS 2;
Warnings:
Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=4.
Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=4.
Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=4.
Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=4.
Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=4.
Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=4.
t1.frm
t1.par
ALTER TABLE t1 REBUILD PARTITION p0;
Warnings:
Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=4.
Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=4.
UNLOCK TABLES;
SHOW CREATE TABLE t1;
Table Create Table

View file

@ -1,5 +1,25 @@
drop table if exists t1,t2;
drop view if exists v1;
#
# test basic creation of temporary tables together with normal table
#
create table t1 (a int);
create temporary table t1 AS SELECT 1;
create temporary table t1 AS SELECT 1;
ERROR 42S01: Table 't1' already exists
create temporary table t1 (a int);
ERROR 42S01: Table 't1' already exists
drop temporary table t1;
drop table t1;
create temporary table t1 AS SELECT 1;
create temporary table t1 AS SELECT 1;
ERROR 42S01: Table 't1' already exists
create temporary table t1 (a int);
ERROR 42S01: Table 't1' already exists
drop temporary table t1;
#
# Test with rename
#
CREATE TABLE t1 (c int not null, d char (10) not null);
insert into t1 values(1,""),(2,"a"),(3,"b");
CREATE TEMPORARY TABLE t1 (a int not null, b char (10) not null);
@ -145,7 +165,7 @@ DROP TABLE t1;
CREATE TABLE t1 (i INT);
CREATE TEMPORARY TABLE t2 (i INT);
DROP TEMPORARY TABLE t2, t1;
ERROR 42S02: Unknown table 't1'
ERROR 42S02: Unknown table 'test.t1'
SELECT * FROM t2;
ERROR 42S02: Table 'test.t2' doesn't exist
SELECT * FROM t1;

View file

@ -1,5 +1,6 @@
# mysqltest should be fixed
-- source include/not_embedded.inc
#
# Test of temporary tables
#
@ -9,6 +10,30 @@ drop table if exists t1,t2;
drop view if exists v1;
--enable_warnings
--echo #
--echo # test basic creation of temporary tables together with normal table
--echo #
create table t1 (a int);
create temporary table t1 AS SELECT 1;
--error 1050
create temporary table t1 AS SELECT 1;
--error 1050
create temporary table t1 (a int);
drop temporary table t1;
drop table t1;
create temporary table t1 AS SELECT 1;
--error 1050
create temporary table t1 AS SELECT 1;
--error 1050
create temporary table t1 (a int);
drop temporary table t1;
--echo #
--echo # Test with rename
--echo #
CREATE TABLE t1 (c int not null, d char (10) not null);
insert into t1 values(1,""),(2,"a"),(3,"b");
CREATE TEMPORARY TABLE t1 (a int not null, b char (10) not null);

View file

@ -8114,23 +8114,33 @@ uint8 ha_ndbcluster::table_cache_type()
}
uint ndb_get_commitcount(THD *thd, char *dbname, char *tabname,
/**
Retrieve the commit count for the table object.
@param thd Thread context.
@param norm_name Normalized path to the table.
@param[out] commit_count Commit count for the table.
@return 0 on success.
@return 1 if an error occured.
*/
uint ndb_get_commitcount(THD *thd, char *norm_name,
Uint64 *commit_count)
{
char name[FN_REFLEN + 1];
char dbname[NAME_LEN + 1];
NDB_SHARE *share;
DBUG_ENTER("ndb_get_commitcount");
build_table_filename(name, sizeof(name) - 1,
dbname, tabname, "", 0);
DBUG_PRINT("enter", ("name: %s", name));
mysql_mutex_lock(&ndbcluster_mutex);
DBUG_PRINT("enter", ("name: %s", norm_name));
pthread_mutex_lock(&ndbcluster_mutex);
if (!(share=(NDB_SHARE*) my_hash_search(&ndbcluster_open_tables,
(uchar*) name,
strlen(name))))
(const uchar*) norm_name,
strlen(norm_name))))
{
mysql_mutex_unlock(&ndbcluster_mutex);
DBUG_PRINT("info", ("Table %s not found in ndbcluster_open_tables", name));
pthread_mutex_unlock(&ndbcluster_mutex);
DBUG_PRINT("info", ("Table %s not found in ndbcluster_open_tables",
norm_name));
DBUG_RETURN(1);
}
/* ndb_share reference temporary, free below */
@ -8162,6 +8172,8 @@ uint ndb_get_commitcount(THD *thd, char *dbname, char *tabname,
Ndb *ndb;
if (!(ndb= check_ndb_in_thd(thd)))
DBUG_RETURN(1);
ha_ndbcluster::set_dbname(norm_name, dbname);
if (ndb->setDatabaseName(dbname))
{
ERR_RETURN(ndb->getNdbError());
@ -8171,7 +8183,9 @@ uint ndb_get_commitcount(THD *thd, char *dbname, char *tabname,
struct Ndb_statistics stat;
{
Ndb_table_guard ndbtab_g(ndb->getDictionary(), tabname);
char tblname[NAME_LEN + 1];
ha_ndbcluster::set_tabname(norm_name, tblname);
Ndb_table_guard ndbtab_g(ndb->getDictionary(), tblname);
if (ndbtab_g.get_table() == 0
|| ndb_get_table_statistics(NULL, FALSE, ndb, ndbtab_g.get_table(), &stat))
{
@ -8221,10 +8235,9 @@ uint ndb_get_commitcount(THD *thd, char *dbname, char *tabname,
@param thd thread handle
@param full_name concatenation of database name,
the null character '\\0', and the table name
@param full_name_len length of the full name,
i.e. len(dbname) + len(tablename) + 1
@param full_name normalized path to the table in the canonical
format.
@param full_name_len length of the normalized path to the table.
@param engine_data parameter retrieved when query was first inserted into
the cache. If the value of engine_data is changed,
all queries for this table should be invalidated.
@ -8243,11 +8256,15 @@ ndbcluster_cache_retrieval_allowed(THD *thd,
ulonglong *engine_data)
{
Uint64 commit_count;
char *dbname= full_name;
char *tabname= dbname+strlen(dbname)+1;
char dbname[NAME_LEN + 1];
char tabname[NAME_LEN + 1];
#ifndef DBUG_OFF
char buff[22], buff2[22];
#endif
ha_ndbcluster::set_dbname(full_name, dbname);
ha_ndbcluster::set_tabname(full_name, tabname);
DBUG_ENTER("ndbcluster_cache_retrieval_allowed");
DBUG_PRINT("enter", ("dbname: %s, tabname: %s", dbname, tabname));
@ -8257,7 +8274,7 @@ ndbcluster_cache_retrieval_allowed(THD *thd,
DBUG_RETURN(FALSE);
}
if (ndb_get_commitcount(thd, dbname, tabname, &commit_count))
if (ndb_get_commitcount(thd, full_name, &commit_count))
{
*engine_data= 0; /* invalidate */
DBUG_PRINT("exit", ("No, could not retrieve commit_count"));
@ -8292,10 +8309,9 @@ ndbcluster_cache_retrieval_allowed(THD *thd,
the cached query is reused.
@param thd thread handle
@param full_name concatenation of database name,
the null character '\\0', and the table name
@param full_name_len length of the full name,
i.e. len(dbname) + len(tablename) + 1
@param full_name normalized path to the table in the
canonical format.
@param full_name_len length of the normalized path to the table.
@param engine_callback function to be called before using cache on
this table
@param[out] engine_data commit_count for this table
@ -8325,7 +8341,7 @@ ha_ndbcluster::register_query_cache_table(THD *thd,
DBUG_RETURN(FALSE);
}
if (ndb_get_commitcount(thd, m_dbname, m_tabname, &commit_count))
if (ndb_get_commitcount(thd, full_name, &commit_count))
{
*engine_data= 0;
DBUG_PRINT("exit", ("Error, could not get commitcount"));

View file

@ -2304,26 +2304,27 @@ uint ha_partition::count_query_cache_dependant_tables(uint8 *tables_type)
DBUG_RETURN(type == HA_CACHE_TBL_ASKTRANSACT ? m_tot_parts : 0);
}
my_bool ha_partition::reg_query_cache_dependant_table(THD *thd,
char *key, uint key_len,
uint8 type,
Query_cache *cache,
Query_cache_block_table **block_table,
handler *file,
uint *n)
my_bool ha_partition::
reg_query_cache_dependant_table(THD *thd,
char *engine_key, uint engine_key_len,
char *cache_key, uint cache_key_len,
uint8 type,
Query_cache *cache,
Query_cache_block_table **block_table,
handler *file,
uint *n)
{
DBUG_ENTER("ha_partition::reg_query_cache_dependant_table");
qc_engine_callback engine_callback;
ulonglong engine_data;
/* ask undelying engine */
if (!file->register_query_cache_table(thd, key,
key_len,
if (!file->register_query_cache_table(thd, engine_key,
engine_key_len,
&engine_callback,
&engine_data))
{
DBUG_PRINT("qcache", ("Handler does not allow caching for %s.%s",
key,
key + table_share->db.length + 1));
DBUG_PRINT("qcache", ("Handler does not allow caching for %.*s",
engine_key_len, engine_key));
/*
As this can change from call to call, don't reset set
thd->lex->safe_to_cache_query
@ -2332,9 +2333,11 @@ my_bool ha_partition::reg_query_cache_dependant_table(THD *thd,
DBUG_RETURN(TRUE);
}
(++(*block_table))->n= ++(*n);
if (!cache->insert_table(key_len,
key, (*block_table),
if (!cache->insert_table(cache_key_len,
cache_key, (*block_table),
table_share->db.length,
(uint8) (cache_key_len -
table_share->table_cache_key.length),
type,
engine_callback, engine_data,
FALSE))
@ -2343,19 +2346,19 @@ my_bool ha_partition::reg_query_cache_dependant_table(THD *thd,
}
my_bool ha_partition::register_query_cache_dependant_tables(THD *thd,
Query_cache *cache,
Query_cache_block_table **block_table,
uint *n)
my_bool ha_partition::
register_query_cache_dependant_tables(THD *thd,
Query_cache *cache,
Query_cache_block_table **block_table,
uint *n)
{
char *name;
uint prefix_length= table_share->table_cache_key.length + 3;
char *engine_key_end, *query_cache_key_end;
uint i;
uint num_parts= m_part_info->num_parts;
uint num_subparts= m_part_info->num_subparts;
uint i= 0;
int diff_length;
List_iterator<partition_element> part_it(m_part_info->partitions);
char key[FN_REFLEN];
char engine_key[FN_REFLEN], query_cache_key[FN_REFLEN];
DBUG_ENTER("ha_partition::register_query_cache_dependant_tables");
/* see ha_partition::count_query_cache_dependant_tables */
@ -2363,36 +2366,51 @@ my_bool ha_partition::register_query_cache_dependant_tables(THD *thd,
DBUG_RETURN(FALSE); // nothing to register
/* prepare static part of the key */
memmove(key, table_share->table_cache_key.str,
table_share->table_cache_key.length);
memcpy(engine_key, table_share->normalized_path.str,
table_share->normalized_path.length);
memcpy(query_cache_key, table_share->table_cache_key.str,
table_share->table_cache_key.length);
name= key + table_share->table_cache_key.length - 1;
name[0]= name[2]= '#';
name[1]= 'P';
name+= 3;
diff_length= ((int) table_share->table_cache_key.length -
(int) table_share->normalized_path.length -1);
engine_key_end= engine_key + table_share->normalized_path.length;
query_cache_key_end= query_cache_key + table_share->table_cache_key.length -1;
engine_key_end[0]= engine_key_end[2]= query_cache_key_end[0]=
query_cache_key_end[2]= '#';
query_cache_key_end[1]= engine_key_end[1]= 'P';
engine_key_end+= 3;
query_cache_key_end+= 3;
i= 0;
do
{
partition_element *part_elem= part_it++;
uint part_len= strmov(name, part_elem->partition_name) - name;
char *engine_pos= strmov(engine_key_end, part_elem->partition_name);
if (m_is_sub_partitioned)
{
List_iterator<partition_element> subpart_it(part_elem->subpartitions);
partition_element *sub_elem;
char *sname= name + part_len;
uint j= 0, part;
sname[0]= sname[3]= '#';
sname[1]= 'S';
sname[2]= 'P';
sname += 4;
engine_pos[0]= engine_pos[3]= '#';
engine_pos[1]= 'S';
engine_pos[2]= 'P';
engine_pos += 4;
do
{
char *end;
uint length;
sub_elem= subpart_it++;
part= i * num_subparts + j;
uint spart_len= strmov(sname, sub_elem->partition_name) - name + 1;
if (reg_query_cache_dependant_table(thd, key,
prefix_length + part_len + 4 +
spart_len,
/* we store the end \0 as part of the key */
end= strmov(engine_pos, sub_elem->partition_name);
length= end - engine_key;
/* Copy the suffix also to query cache key */
memcpy(query_cache_key_end, engine_key_end, (end - engine_key_end));
if (reg_query_cache_dependant_table(thd, engine_key, length,
query_cache_key,
length + diff_length,
m_file[part]->table_cache_type(),
cache,
block_table, m_file[part],
@ -2402,8 +2420,13 @@ my_bool ha_partition::register_query_cache_dependant_tables(THD *thd,
}
else
{
if (reg_query_cache_dependant_table(thd, key,
prefix_length + part_len + 1,
char *end= engine_pos+1; // copy end \0
uint length= end - engine_key;
/* Copy the suffix also to query cache key */
memcpy(query_cache_key_end, engine_key_end, (end - engine_key_end));
if (reg_query_cache_dependant_table(thd, engine_key, length,
query_cache_key,
length + diff_length,
m_file[i]->table_cache_type(),
cache,
block_table, m_file[i],

View file

@ -643,7 +643,10 @@ public:
private:
my_bool reg_query_cache_dependant_table(THD *thd,
char *key, uint key_len, uint8 type,
char *engine_key,
uint engine_key_len,
char *query_key, uint query_key_len,
uint8 type,
Query_cache *cache,
Query_cache_block_table
**block_table,

View file

@ -26,7 +26,7 @@ int get_quote_char_for_identifier(THD *thd, const char *name, uint length);
bool schema_table_store_record(THD *thd, TABLE *table);
void localtime_to_TIME(MYSQL_TIME *to, struct tm *from);
bool check_global_access(THD *thd, ulong want_access);
uint strconvert(CHARSET_INFO *from_cs, const char *from,
uint strconvert(CHARSET_INFO *from_cs, const char *from, uint from_length,
CHARSET_INFO *to_cs, char *to, uint to_length,
uint *errors);
void sql_print_error(const char *format, ...);

View file

@ -417,8 +417,8 @@ static bool extract_date_time(DATE_TIME_FORMAT *format,
if (((fuzzy_date & TIME_NO_ZERO_IN_DATE) &&
(l_time->year == 0 || l_time->month == 0 || l_time->day == 0)) ||
(fuzzy_date & TIME_NO_ZERO_DATE) &&
(l_time->year == 0 && l_time->month == 0 && l_time->day == 0))
((fuzzy_date & TIME_NO_ZERO_DATE) &&
(l_time->year == 0 && l_time->month == 0 && l_time->day == 0)))
goto err;
if (val != val_end)

View file

@ -687,15 +687,15 @@ void create_logfile_name_with_suffix(char *res_file_name, uint length,
{
const char *info_file_end= info_file + (p - res_file_name);
const char *ext= append ? info_file_end : fn_ext2(info_file);
size_t res_length, ext_pos;
size_t res_length, ext_pos, from_length;
uint errors;
/* Create null terminated string */
strmake(buff, suffix->str, suffix->length);
from_length= strmake(buff, suffix->str, suffix->length) - buff;
/* Convert to lower case */
my_casedn_str(system_charset_info, buff);
/* Convert to characters usable in a file name */
res_length= strconvert(system_charset_info, buff,
res_length= strconvert(system_charset_info, buff, from_length,
&my_charset_filename, res, sizeof(res), &errors);
ext_pos= (size_t) (ext - info_file);

View file

@ -4575,9 +4575,24 @@ open_and_process_table(THD *thd, LEX *lex, TABLE_LIST *tables,
tables->db, tables->table_name, tables)); //psergey: invalid read of size 1 here
(*counter)++;
/* Not a placeholder: must be a base table or a view. Let us open it. */
DBUG_ASSERT(!tables->table);
/* Check if we are trying to create a temporary table */
if (tables->open_type == OT_TEMPORARY_ONLY)
{
/*
OT_TEMPORARY_ONLY means that we are in CREATE TEMPORARY TABLE statement.
Also such table list element can't correspond to prelocking placeholder
or to underlying table of merge table.
So existing temporary table should have been preopened by this moment
and we can simply continue without trying to open temporary or base
table.
*/
DBUG_ASSERT(tables->open_strategy);
DBUG_ASSERT(!tables->prelocking_placeholder);
DBUG_ASSERT(!tables->parent_l);
DBUG_RETURN(0);
}
/* Not a placeholder: must be a base table or a view. Let us open it. */
if (tables->prelocking_placeholder)
{
/*

View file

@ -336,6 +336,7 @@ TODO list:
#include "sql_acl.h" // SELECT_ACL
#include "sql_base.h" // TMP_TABLE_KEY_EXTRA
#include "debug_sync.h" // DEBUG_SYNC
#include "sql_table.h"
#ifdef HAVE_QUERY_CACHE
#include <m_ctype.h>
#include <my_dir.h>
@ -345,6 +346,7 @@ TODO list:
#include "probes_mysql.h"
#include "log_slow.h"
#include "transaction.h"
#include "strfunc.h"
const uchar *query_state_map;
@ -1636,6 +1638,41 @@ send_data_in_chunks(NET *net, const uchar *packet, ulong len)
#endif
/**
Build a normalized table name suitable for query cache engine callback
This consist of normalized directory '/' normalized_file_name
followed by suffix.
Suffix is needed for partitioned tables.
*/
size_t build_normalized_name(char *buff, size_t bufflen,
const char *db, size_t db_len,
const char *table_name, size_t table_len,
size_t suffix_len)
{
uint errors;
size_t length;
char *pos= buff, *end= buff+bufflen;
DBUG_ENTER("build_normalized_name");
(*pos++)= FN_LIBCHAR;
length= strconvert(system_charset_info, db, db_len,
&my_charset_filename, pos, bufflen - 3,
&errors);
pos+= length;
(*pos++)= FN_LIBCHAR;
length= strconvert(system_charset_info, table_name, table_len,
&my_charset_filename, pos, (uint) (end - pos),
&errors);
pos+= length;
if (pos + suffix_len < end)
pos= strmake(pos, table_name + table_len, suffix_len);
DBUG_RETURN((size_t) (pos - buff));
}
/*
Check if the query is in the cache. If it was cached, send it
to the user.
@ -2011,35 +2048,50 @@ def_week_frmt: %lu, in_trans: %d, autocommit: %d",
}
#endif /*!NO_EMBEDDED_ACCESS_CHECKS*/
engine_data= table->engine_data();
if (table->callback() &&
!(*table->callback())(thd, table->db(),
table->key_length(),
&engine_data))
if (table->callback())
{
DBUG_PRINT("qcache", ("Handler does not allow caching for %s.%s",
table_list.db, table_list.alias));
BLOCK_UNLOCK_RD(query_block);
if (engine_data != table->engine_data())
char qcache_se_key_name[FN_REFLEN + 10];
uint qcache_se_key_len, db_length= strlen(table->db());
engine_data= table->engine_data();
qcache_se_key_len= build_normalized_name(qcache_se_key_name,
sizeof(qcache_se_key_name),
table->db(),
db_length,
table->table(),
table->key_length() -
db_length - 2 -
table->suffix_length(),
table->suffix_length());
if (!(*table->callback())(thd, qcache_se_key_name,
qcache_se_key_len, &engine_data))
{
DBUG_PRINT("qcache",
("Handler require invalidation queries of %s.%s %lu-%lu",
table_list.db, table_list.alias,
(ulong) engine_data, (ulong) table->engine_data()));
invalidate_table_internal(thd,
(uchar *) table->db(),
table->key_length());
DBUG_PRINT("qcache", ("Handler does not allow caching for %.*s",
qcache_se_key_len, qcache_se_key_name));
BLOCK_UNLOCK_RD(query_block);
if (engine_data != table->engine_data())
{
DBUG_PRINT("qcache",
("Handler require invalidation queries of %.*s %lu-%lu",
qcache_se_key_len, qcache_se_key_name,
(ulong) engine_data, (ulong) table->engine_data()));
invalidate_table_internal(thd,
(uchar *) table->db(),
table->key_length());
}
else
{
/*
As this can change from call to call, don't reset set
thd->lex->safe_to_cache_query
*/
thd->query_cache_is_applicable= 0; // Query can't be cached
}
/* End the statement transaction potentially started by engine. */
trans_rollback_stmt(thd);
goto err_unlock; // Parse query
}
else
{
/*
As this can change from call to call, don't reset set
thd->lex->safe_to_cache_query
*/
thd->query_cache_is_applicable= 0; // Query can't be cached
}
/* End the statement transaction potentially started by engine. */
trans_rollback_stmt(thd);
goto err_unlock; // Parse query
}
else
DBUG_PRINT("qcache", ("handler allow caching %s,%s",
@ -3257,7 +3309,7 @@ Query_cache::register_tables_from_list(THD *thd, TABLE_LIST *tables_used,
There are not callback function for for VIEWs
*/
if (!insert_table(key_length, key, (*block_table),
tables_used->view_db.length + 1,
tables_used->view_db.length + 1, 0,
HA_CACHE_TBL_NONTRANSACT, 0, 0, TRUE))
DBUG_RETURN(0);
/*
@ -3278,7 +3330,7 @@ Query_cache::register_tables_from_list(THD *thd, TABLE_LIST *tables_used,
if (!insert_table(tables_used->table->s->table_cache_key.length,
tables_used->table->s->table_cache_key.str,
(*block_table),
tables_used->db_length,
tables_used->db_length, 0,
tables_used->table->file->table_cache_type(),
tables_used->callback_func,
tables_used->engine_data,
@ -3343,7 +3395,8 @@ my_bool Query_cache::register_all_tables(THD *thd,
my_bool
Query_cache::insert_table(uint key_len, char *key,
Query_cache_block_table *node,
uint32 db_length, uint8 cache_type,
uint32 db_length, uint8 suffix_length_arg,
uint8 cache_type,
qc_engine_callback callback,
ulonglong engine_data,
my_bool hash)
@ -3418,6 +3471,7 @@ Query_cache::insert_table(uint key_len, char *key,
char *db= header->db();
header->table(db + db_length + 1);
header->key_length(key_len);
header->suffix_length(suffix_length_arg);
header->type(cache_type);
header->callback(callback);
header->engine_data(engine_data);
@ -4041,13 +4095,13 @@ my_bool Query_cache::ask_handler_allowance(THD *thd,
continue;
handler= table->file;
if (!handler->register_query_cache_table(thd,
table->s->table_cache_key.str,
table->s->table_cache_key.length,
table->s->normalized_path.str,
table->s->normalized_path.length,
&tables_used->callback_func,
&tables_used->engine_data))
{
DBUG_PRINT("qcache", ("Handler does not allow caching for %s.%s",
tables_used->db, tables_used->alias));
DBUG_PRINT("qcache", ("Handler does not allow caching for %s",
table->s->normalized_path.str));
/*
As this can change from call to call, don't reset set
thd->lex->safe_to_cache_query

View file

@ -190,6 +190,7 @@ struct Query_cache_table
Query_cache_table() {} /* Remove gcc warning */
char *tbl;
uint32 key_len;
uint8 suffix_len; /* For partitioned tables */
uint8 table_type;
/* unique for every engine reference */
qc_engine_callback callback_func;
@ -210,6 +211,8 @@ struct Query_cache_table
inline void table(char *table_arg) { tbl= table_arg; }
inline uint32 key_length() { return key_len; }
inline void key_length(uint32 len) { key_len= len; }
inline uint8 suffix_length() { return suffix_len; }
inline void suffix_length(uint8 len) { suffix_len= len; }
inline uint8 type() { return table_type; }
inline void type(uint8 t) { table_type= t; }
inline qc_engine_callback callback() { return callback_func; }
@ -490,7 +493,8 @@ protected:
unsigned pkt_nr);
my_bool insert_table(uint key_len, char *key,
Query_cache_block_table *node,
uint32 db_length, uint8 cache_type,
uint32 db_length, uint8 suffix_length_arg,
uint8 cache_type,
qc_engine_callback callback,
ulonglong engine_data,
my_bool hash);

View file

@ -1,5 +1,5 @@
/* Copyright (c) 2005, 2011, Oracle and/or its affiliates.
Copyright (c) 2009-2011, Monty Program Ab
/* Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
Copyright (c) 2009-2013, Monty Program Ab & SkySQL Ab
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@ -12,7 +12,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
/*
This file is a container for general functionality related
@ -70,6 +70,7 @@
#include "sql_analyse.h" // append_escaped
#include "sql_alter.h" // Alter_table_ctx
#include <algorithm>
#ifdef WITH_PARTITION_STORAGE_ENGINE
#include "ha_partition.h"
@ -190,7 +191,7 @@ static int cmp_rec_and_tuple_prune(part_column_list_val *val,
item New converted item
*/
Item* convert_charset_partition_constant(Item *item, CHARSET_INFO *cs)
Item* convert_charset_partition_constant(Item *item, const CHARSET_INFO *cs)
{
THD *thd= current_thd;
Name_resolution_context *context= &thd->lex->current_select->context;
@ -208,21 +209,18 @@ Item* convert_charset_partition_constant(Item *item, CHARSET_INFO *cs)
}
/*
A support function to check if a name is in a list of strings
/**
A support function to check if a name is in a list of strings.
SYNOPSIS
is_name_in_list()
name String searched for
list_names A list of names searched in
@param name String searched for
@param list_names A list of names searched in
RETURN VALUES
TRUE String found
FALSE String not found
@return True if if the name is in the list.
@retval true String found
@retval false String not found
*/
bool is_name_in_list(char *name,
List<char> list_names)
static bool is_name_in_list(char *name, List<char> list_names)
{
List_iterator<char> names_it(list_names);
uint num_names= list_names.elements;
@ -288,61 +286,6 @@ bool partition_default_handling(TABLE *table, partition_info *part_info,
}
/*
Check that the reorganized table will not have duplicate partitions.
SYNOPSIS
check_reorganise_list()
new_part_info New partition info
old_part_info Old partition info
list_part_names The list of partition names that will go away and
can be reused in the new table.
RETURN VALUES
TRUE Inacceptable name conflict detected.
FALSE New names are OK.
DESCRIPTION
Can handle that the 'new_part_info' and 'old_part_info' the same
in which case it checks that the list of names in the partitions
doesn't contain any duplicated names.
*/
bool check_reorganise_list(partition_info *new_part_info,
partition_info *old_part_info,
List<char> list_part_names)
{
uint new_count, old_count;
uint num_new_parts= new_part_info->partitions.elements;
uint num_old_parts= old_part_info->partitions.elements;
List_iterator<partition_element> new_parts_it(new_part_info->partitions);
bool same_part_info= (new_part_info == old_part_info);
DBUG_ENTER("check_reorganise_list");
new_count= 0;
do
{
List_iterator<partition_element> old_parts_it(old_part_info->partitions);
char *new_name= (new_parts_it++)->partition_name;
new_count++;
old_count= 0;
do
{
char *old_name= (old_parts_it++)->partition_name;
old_count++;
if (same_part_info && old_count == new_count)
break;
if (!(my_strcasecmp(system_charset_info, old_name, new_name)))
{
if (!is_name_in_list(old_name, list_part_names))
DBUG_RETURN(TRUE);
}
} while (old_count < num_old_parts);
} while (new_count < num_new_parts);
DBUG_RETURN(FALSE);
}
/*
A useful routine used by update_row for partition handlers to calculate
the partition ids of the old and the new record.
@ -888,8 +831,7 @@ static bool handle_list_of_fields(List_iterator<char> it,
uint primary_key= table->s->primary_key;
if (primary_key != MAX_KEY)
{
uint num_key_parts= table->key_info[primary_key].user_defined_key_parts;
uint i;
uint num_key_parts= table->key_info[primary_key].user_defined_key_parts, i;
/*
In the case of an empty list we use primary key as partition key.
*/
@ -1265,7 +1207,7 @@ void check_range_capable_PF(TABLE *table)
and initialise it.
*/
static bool set_up_partition_bitmap(THD *thd, partition_info *part_info)
static bool set_up_partition_bitmaps(THD *thd, partition_info *part_info)
{
uint32 *bitmap_buf;
uint bitmap_bits= part_info->num_subparts?
@ -1576,7 +1518,7 @@ bool field_is_partition_charset(Field *field)
!(field->type() == MYSQL_TYPE_VARCHAR))
return FALSE;
{
CHARSET_INFO *cs= ((Field_str*)field)->charset();
const CHARSET_INFO *cs= field->charset();
if (!(field->type() == MYSQL_TYPE_STRING) ||
!(cs->state & MY_CS_BINSORT))
return TRUE;
@ -1619,7 +1561,7 @@ bool check_part_func_fields(Field **ptr, bool ok_with_charsets)
*/
if (field_is_partition_charset(field))
{
CHARSET_INFO *cs= ((Field_str*)field)->charset();
const CHARSET_INFO *cs= field->charset();
if (!ok_with_charsets ||
cs->mbmaxlen > 1 ||
cs->strxfrm_multiply > 1)
@ -1808,7 +1750,7 @@ bool fix_partition_func(THD *thd, TABLE *table,
(table->s->db_type()->partition_flags() & HA_CAN_PARTITION_UNIQUE))) &&
check_unique_keys(table)))
goto end;
if (unlikely(set_up_partition_bitmap(thd, part_info)))
if (unlikely(set_up_partition_bitmaps(thd, part_info)))
goto end;
if (unlikely(part_info->set_up_charset_field_preps()))
{
@ -1824,6 +1766,7 @@ bool fix_partition_func(THD *thd, TABLE *table,
set_up_partition_key_maps(table, part_info);
set_up_partition_func_pointers(part_info);
set_up_range_analysis_info(part_info);
table->file->set_part_info(part_info);
result= FALSE;
end:
thd->mark_used_columns= save_mark_used_columns;
@ -2030,35 +1973,40 @@ void truncate_partition_filename(char *path)
/**
@brief Output a filepath. Similar to add_keyword_string except it
also converts \ to / on Windows and skips the partition file name at
the end if found.
also converts \ to / on Windows and skips the partition file name at
the end if found.
@note
When Mysql sends a DATA DIRECTORY from SQL for partitions it does
not use a file name, but it does for DATA DIRECTORY on a non-partitioned
table. So when the storage engine is asked for the DATA DIRECTORY string
after a restart through Handler::update_create_options(), the storage
engine may include the filename.
@note When Mysql sends a DATA DIRECTORY from SQL for partitions it does
not use a file name, but it does for DATA DIRECTORY on a non-partitioned
table. So when the storage engine is asked for the DATA DIRECTORY string
after a restart through Handler::update_create_options(), the storage
engine may include the filename.
*/
static int add_keyword_path(File fptr, const char *keyword,
const char *path)
{
char temp_path[FN_REFLEN];
int err= add_string(fptr, keyword);
err+= add_space(fptr);
err+= add_equal(fptr);
err+= add_space(fptr);
strmake(temp_path, path, sizeof(temp_path)-1);
char temp_path[FN_REFLEN];
strcpy(temp_path, path);
#ifdef __WIN__
/* Convert \ to / to be able to create table on unix */
to_unix_path(temp_path);
char *pos, *end;
uint length= strlen(temp_path);
for (pos= temp_path, end= pos+length ; pos < end ; pos++)
{
if (*pos == '\\')
*pos = '/';
}
#endif
/*
If the partition file name with its "#P#" identifier
is found after the last slash, truncate that filename.
If the partition file name with its "#P#" identifier
is found after the last slash, truncate that filename.
*/
truncate_partition_filename(temp_path);
@ -2067,9 +2015,8 @@ static int add_keyword_path(File fptr, const char *keyword,
return err + add_space(fptr);
}
static int add_keyword_string(File fptr, const char *keyword,
bool should_use_quotes,
bool should_use_quotes,
const char *keystr)
{
int err= add_string(fptr, keyword);
@ -2269,7 +2216,7 @@ static int add_column_list_values(File fptr, partition_info *part_info,
else
{
String *res;
CHARSET_INFO *field_cs;
const CHARSET_INFO *field_cs;
bool need_cs_check= FALSE;
Item_result result_type= STRING_RESULT;
@ -2728,7 +2675,7 @@ static inline int part_val_int(Item *item_expr, longlong *result)
We have a set of support functions for these 14 variants. There are 4
variants of hash functions and there is a function for each. The KEY
partitioning uses the function calculate_key_value to calculate the hash
partitioning uses the function calculate_key_hash_value to calculate the hash
value based on an array of fields. The linear hash variants uses the
method get_part_id_from_linear_hash to get the partition id using the
hash value and some parameters calculated from the number of partitions.
@ -2850,20 +2797,20 @@ static int get_part_id_linear_hash(partition_info *part_info,
}
/*
/**
Calculate part_id for (SUB)PARTITION BY KEY
SYNOPSIS
get_part_id_key()
field_array Array of fields for PARTTION KEY
num_parts Number of KEY partitions
@param file Handler to storage engine
@param field_array Array of fields for PARTTION KEY
@param num_parts Number of KEY partitions
@param func_value[out] Returns calculated hash value
RETURN VALUE
Calculated partition id
@return Calculated partition id
*/
inline
static uint32 get_part_id_key(Field **field_array,
static uint32 get_part_id_key(handler *file,
Field **field_array,
uint num_parts,
longlong *func_value)
{
@ -2931,7 +2878,7 @@ static void copy_to_part_field_buffers(Field **ptr,
restore_ptr++;
if (!field->maybe_null() || !field->is_null())
{
CHARSET_INFO *cs= ((Field_str*)field)->charset();
const CHARSET_INFO *cs= field->charset();
uint max_len= field->pack_length();
uint data_len= field->data_length();
uchar *field_buf= *field_bufs;
@ -3583,7 +3530,8 @@ int get_partition_id_key_nosub(partition_info *part_info,
uint32 *part_id,
longlong *func_value)
{
*part_id= get_part_id_key(part_info->part_field_array,
*part_id= get_part_id_key(part_info->table->file,
part_info->part_field_array,
part_info->num_parts, func_value);
return 0;
}
@ -3673,7 +3621,8 @@ int get_partition_id_key_sub(partition_info *part_info,
uint32 *part_id)
{
longlong func_value;
*part_id= get_part_id_key(part_info->subpart_field_array,
*part_id= get_part_id_key(part_info->table->file,
part_info->subpart_field_array,
part_info->num_subparts, &func_value);
return FALSE;
}
@ -4298,9 +4247,11 @@ bool mysql_unpack_partition(THD *thd,
{
bool result= TRUE;
partition_info *part_info;
CHARSET_INFO *old_character_set_client= thd->variables.character_set_client;
const CHARSET_INFO *old_character_set_client=
thd->variables.character_set_client;
LEX *old_lex= thd->lex;
LEX lex;
PSI_statement_locker *parent_locker= thd->m_statement_psi;
DBUG_ENTER("mysql_unpack_partition");
thd->variables.character_set_client= system_charset_info;
@ -4330,12 +4281,16 @@ bool mysql_unpack_partition(THD *thd,
}
part_info= lex.part_info;
DBUG_PRINT("info", ("Parse: %s", part_buf));
thd->m_statement_psi= NULL;
if (parse_sql(thd, & parser_state, NULL) ||
part_info->fix_parser_data(thd))
{
thd->free_items();
thd->m_statement_psi= parent_locker;
goto end;
}
thd->m_statement_psi= parent_locker;
/*
The parsed syntax residing in the frm file can still contain defaults.
The reason is that the frm file is sometimes saved outside of this
@ -4672,7 +4627,7 @@ bool compare_partition_options(HA_CREATE_INFO *table_create_info,
}
/**
/*
Prepare for ALTER TABLE of partition structure
@param[in] thd Thread object
@ -4704,7 +4659,6 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info,
bool *partition_changed,
bool *fast_alter_table)
{
TABLE *new_table= NULL;
DBUG_ENTER("prep_alter_part_table");
/* Foreign keys on partitioned tables are not supported, waits for WL#148 */
@ -4764,15 +4718,21 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info,
alter_ctx->table_name,
MDL_INTENTION_EXCLUSIVE));
new_table->use_all_columns();
tab_part_info= new_table->part_info;
tab_part_info= table->part_info;
if (alter_info->flags & Alter_info::ALTER_TABLE_REORG)
{
uint new_part_no, curr_part_no;
/*
'ALTER TABLE t REORG PARTITION' only allowed with auto partition
if default partitioning is used.
*/
if (tab_part_info->part_type != HASH_PARTITION ||
tab_part_info->use_default_num_partitions)
((table->s->db_type()->partition_flags() & HA_USE_AUTO_PARTITION) &&
!tab_part_info->use_default_num_partitions) ||
((!(table->s->db_type()->partition_flags() & HA_USE_AUTO_PARTITION)) &&
tab_part_info->use_default_num_partitions))
{
my_error(ER_REORG_NO_PARAM_ERROR, MYF(0));
goto err;
@ -4786,7 +4746,23 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info,
after the change as before. Thus we can reply ok immediately
without any changes at all.
*/
*fast_alter_table= true;
flags= table->file->alter_table_flags(alter_info->flags);
if (flags & (HA_FAST_CHANGE_PARTITION | HA_PARTITION_ONE_PHASE))
{
*fast_alter_table= true;
/* Force table re-open for consistency with the main case. */
table->m_needs_reopen= true;
}
else
{
/*
Create copy of partition_info to avoid modifying original
TABLE::part_info, to keep it safe for later use.
*/
if (!(tab_part_info= tab_part_info->get_clone()))
DBUG_RETURN(TRUE);
}
thd->work_part_info= tab_part_info;
DBUG_RETURN(FALSE);
}
@ -4814,6 +4790,30 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info,
my_error(ER_PARTITION_FUNCTION_FAILURE, MYF(0));
goto err;
}
if ((flags & (HA_FAST_CHANGE_PARTITION | HA_PARTITION_ONE_PHASE)) != 0)
{
/*
"Fast" change of partitioning is supported in this case.
We will change TABLE::part_info (as this is how we pass
information to storage engine in this case), so the table
must be reopened.
*/
*fast_alter_table= true;
table->m_needs_reopen= true;
}
else
{
/*
"Fast" changing of partitioning is not supported. Create
a copy of TABLE::part_info object, so we can modify it safely.
Modifying original TABLE::part_info will cause problems when
we read data from old version of table using this TABLE object
while copying them to new version of table.
*/
if (!(tab_part_info= tab_part_info->get_clone()))
DBUG_RETURN(TRUE);
}
DBUG_PRINT("info", ("*fast_alter_table flags: 0x%x", flags));
if ((alter_info->flags & Alter_info::ALTER_ADD_PARTITION) ||
(alter_info->flags & Alter_info::ALTER_REORGANIZE_PARTITION))
{
@ -5349,6 +5349,8 @@ state of p1.
alt_part_info->subpart_type= tab_part_info->subpart_type;
alt_part_info->num_subparts= tab_part_info->num_subparts;
DBUG_ASSERT(!alt_part_info->use_default_partitions);
/* We specified partitions explicitly so don't use defaults anymore. */
tab_part_info->use_default_partitions= FALSE;
if (alt_part_info->set_up_defaults_for_partitioning(table->file,
ULL(0),
0))
@ -5572,7 +5574,9 @@ the generated partition syntax in a correct manner.
There was no partitioning before and no partitioning defined.
Obviously no work needed.
*/
if (table->part_info)
partition_info *tab_part_info= table->part_info;
if (tab_part_info)
{
if (alter_info->flags & Alter_info::ALTER_REMOVE_PARTITIONING)
{
@ -5580,7 +5584,7 @@ the generated partition syntax in a correct manner.
if (!(create_info->used_fields & HA_CREATE_USED_ENGINE))
{
DBUG_PRINT("info", ("No explicit engine used"));
create_info->db_type= table->part_info->default_engine_type;
create_info->db_type= tab_part_info->default_engine_type;
}
DBUG_PRINT("info", ("New engine type: %s",
ha_resolve_storage_engine_name(create_info->db_type)));
@ -5592,16 +5596,20 @@ the generated partition syntax in a correct manner.
/*
Retain partitioning but possibly with a new storage engine
beneath.
Create a copy of TABLE::part_info to be able to modify it freely.
*/
thd->work_part_info= table->part_info;
if (!(tab_part_info= tab_part_info->get_clone()))
DBUG_RETURN(TRUE);
thd->work_part_info= tab_part_info;
if (create_info->used_fields & HA_CREATE_USED_ENGINE &&
create_info->db_type != table->part_info->default_engine_type)
create_info->db_type != tab_part_info->default_engine_type)
{
/*
Make sure change of engine happens to all partitions.
*/
DBUG_PRINT("info", ("partition changed"));
if (table->part_info->is_auto_partitioned)
if (tab_part_info->is_auto_partitioned)
{
/*
If the user originally didn't specify partitioning to be
@ -5629,7 +5637,7 @@ the generated partition syntax in a correct manner.
Need to cater for engine types that can handle partition without
using the partition handler.
*/
if (thd->work_part_info != table->part_info)
if (thd->work_part_info != tab_part_info)
{
DBUG_PRINT("info", ("partition changed"));
*partition_changed= TRUE;
@ -5646,8 +5654,8 @@ the generated partition syntax in a correct manner.
part_info->default_engine_type= create_info->db_type;
else
{
if (table->part_info)
part_info->default_engine_type= table->part_info->default_engine_type;
if (tab_part_info)
part_info->default_engine_type= tab_part_info->default_engine_type;
else
part_info->default_engine_type= create_info->db_type;
}
@ -5708,9 +5716,7 @@ static bool mysql_change_partitions(ALTER_PARTITION_PARAM_TYPE *lpt)
build_table_filename(path, sizeof(path) - 1, lpt->db, lpt->table_name, "", 0);
/* Disable transactions for all new tables */
if (mysql_trans_prepare_alter_copy_data(thd))
if(mysql_trans_prepare_alter_copy_data(thd))
DBUG_RETURN(TRUE);
/* TODO: test if bulk_insert would increase the performance */
@ -5725,7 +5731,6 @@ static bool mysql_change_partitions(ALTER_PARTITION_PARAM_TYPE *lpt)
if (mysql_trans_commit_alter_copy_data(thd))
error= 1; /* The error has been reported */
DBUG_RETURN(test(error));
}
@ -6382,7 +6387,8 @@ static bool write_log_final_change_partition(ALTER_PARTITION_PARAM_TYPE *lpt)
if (write_log_changed_partitions(lpt, &next_entry, (const char*)path))
goto error;
if (write_log_dropped_partitions(lpt, &next_entry, (const char*)path,
lpt->alter_info->flags & Alter_info::ALTER_REORGANIZE_PARTITION))
lpt->alter_info->flags &
Alter_info::ALTER_REORGANIZE_PARTITION))
goto error;
if (write_log_replace_delete_frm(lpt, next_entry, shadow_path, path, TRUE))
goto error;
@ -6574,7 +6580,6 @@ void handle_alter_part_error(ALTER_PARTITION_PARAM_TYPE *lpt,
part_info= lpt->part_info->get_clone();
close_all_tables_for_name(thd, table->s, HA_EXTRA_NOT_USED, NULL);
}
else
{
err_exclusive_lock:
@ -6743,6 +6748,7 @@ static void downgrade_mdl_if_lock_tables_mode(THD *thd, MDL_ticket *ticket,
@param table_list List of the table involved
@param db Database name of new table
@param table_name Table name of new table
@return Operation status
@retval TRUE Error
@retval FALSE Success
@ -6873,7 +6879,6 @@ uint fast_alter_partition_table(THD *thd, TABLE *table,
even in the presence of a MySQL Server crash (the log is executed
before any other threads are started, so there are no locking issues).
4) Close the table that have already been opened but didn't stumble on
the abort locked previously. This is done as part of the
the abort locked previously. This is done as part of the
alter_close_table call.
5) Write the bin log
@ -7048,10 +7053,10 @@ uint fast_alter_partition_table(THD *thd, TABLE *table,
use a lower lock level. This can be handled inside store_lock in the
respective handler.
0) Write an entry that removes the shadow frm file if crash occurs
1) Write the shadow frm file of new partitioning
0) Write an entry that removes the shadow frm file if crash occurs.
1) Write the shadow frm file of new partitioning.
2) Log such that temporary partitions added in change phase are
removed in a crash situation
removed in a crash situation.
3) Add the new partitions.
Copy from the reorganised partitions to the new partitions.
4) Get an exclusive metadata lock on the table (waits for all active
@ -7069,7 +7074,7 @@ uint fast_alter_partition_table(THD *thd, TABLE *table,
10) Install the shadow frm file.
11) Reopen the table if under lock tables.
12) Complete query.
*/
*/
if (write_log_drop_shadow_frm(lpt) ||
ERROR_INJECT_CRASH("crash_change_partition_1") ||
ERROR_INJECT_ERROR("fail_change_partition_1") ||
@ -8075,8 +8080,7 @@ static uint32 get_next_partition_via_walking(PARTITION_ITERATOR *part_iter)
while (part_iter->field_vals.cur != part_iter->field_vals.end)
{
longlong dummy;
field->store(part_iter->field_vals.cur++,
((Field_num*)field)->unsigned_flag);
field->store(part_iter->field_vals.cur++, field->flags & UNSIGNED_FLAG);
if ((part_iter->part_info->is_sub_partitioned() &&
!part_iter->part_info->get_part_partition_id(part_iter->part_info,
&part_id, &dummy)) ||
@ -8100,12 +8104,11 @@ static uint32 get_next_subpartition_via_walking(PARTITION_ITERATOR *part_iter)
part_iter->field_vals.cur= part_iter->field_vals.start;
return NOT_A_PARTITION_ID;
}
field->store(part_iter->field_vals.cur++, FALSE);
field->store(part_iter->field_vals.cur++, field->flags & UNSIGNED_FLAG);
if (part_iter->part_info->get_subpartition_id(part_iter->part_info,
&res))
return NOT_A_PARTITION_ID;
return res;
}

File diff suppressed because it is too large Load diff

View file

@ -265,27 +265,22 @@ uint check_word(TYPELIB *lib, const char *val, const char *end,
*/
uint strconvert(CHARSET_INFO *from_cs, const char *from,
uint strconvert(CHARSET_INFO *from_cs, const char *from, uint from_length,
CHARSET_INFO *to_cs, char *to, uint to_length, uint *errors)
{
int cnvres;
my_wc_t wc;
char *to_start= to;
uchar *to_end= (uchar*) to + to_length - 1;
const uchar *from_end= (const uchar*) from + from_length;
my_charset_conv_mb_wc mb_wc= from_cs->cset->mb_wc;
my_charset_conv_wc_mb wc_mb= to_cs->cset->wc_mb;
uint error_count= 0;
while (1)
{
/*
Using 'from + 10' is safe:
- it is enough to scan a single character in any character set.
- if remaining string is shorter than 10, then mb_wc will return
with error because of unexpected '\0' character.
*/
if ((cnvres= (*mb_wc)(from_cs, &wc,
(uchar*) from, (uchar*) from + 10)) > 0)
(uchar*) from, from_end)) > 0)
{
if (!wc)
break;

View file

@ -43,7 +43,7 @@ char *set_to_string(THD *thd, LEX_STRING *result, ulonglong set,
/*
These functions were protected by INNODB_COMPATIBILITY_HOOKS
*/
uint strconvert(CHARSET_INFO *from_cs, const char *from,
uint strconvert(CHARSET_INFO *from_cs, const char *from, uint from_length,
CHARSET_INFO *to_cs, char *to, uint to_length, uint *errors);
#endif /* STRFUNC_INCLUDED */

View file

@ -6022,7 +6022,7 @@ dict_fs2utf8(
db[db_len] = '\0';
strconvert(
&my_charset_filename, db,
&my_charset_filename, db, db_len,
system_charset_info, db_utf8, db_utf8_size,
&errors);
@ -6049,7 +6049,7 @@ dict_fs2utf8(
errors = 0;
strconvert(
&my_charset_filename, buf,
&my_charset_filename, buf, (uint) (buf_p - buf),
system_charset_info, table_utf8, table_utf8_size,
&errors);

View file

@ -1592,7 +1592,7 @@ innobase_convert_from_table_id(
{
uint errors;
strconvert(cs, from, &my_charset_filename, to, (uint) len, &errors);
strconvert(cs, from, FN_REFLEN, &my_charset_filename, to, (uint) len, &errors);
}
/******************************************************************//**
@ -1608,7 +1608,7 @@ innobase_convert_from_id(
{
uint errors;
strconvert(cs, from, system_charset_info, to, (uint) len, &errors);
strconvert(cs, from, FN_REFLEN, system_charset_info, to, (uint) len, &errors);
}
/******************************************************************//**

View file

@ -1672,7 +1672,7 @@ my_bool ha_myisammrg::register_query_cache_dependant_tables(THD *thd
There are not callback function for for MyISAM, and engine data
*/
if (!cache->insert_table(key_length, key, (*block_table),
db_length,
db_length, 0,
table_cache_type(),
0, 0, TRUE))
DBUG_RETURN(TRUE);