Merge from mysql-5.1-bugteam to mysql-5.1-security

This commit is contained in:
Jimmy Yang 2010-09-01 17:43:02 -07:00
commit edbae904ff
43 changed files with 708 additions and 240 deletions

View file

@ -6179,8 +6179,10 @@ get_one_option(int optid, const struct my_option *opt,
print_version();
exit(0);
case OPT_MYSQL_PROTOCOL:
#ifndef EMBEDDED_LIBRARY
opt_protocol= find_type_or_exit(argument, &sql_protocol_typelib,
opt->name);
#endif
break;
case '?':
usage();

View file

@ -26,7 +26,8 @@ pkginclude_HEADERS = $(HEADERS_ABI) my_dbug.h m_string.h my_sys.h \
decimal.h errmsg.h my_global.h my_net.h \
my_getopt.h sslopt-longopts.h my_dir.h \
sslopt-vars.h sslopt-case.h sql_common.h keycache.h \
m_ctype.h my_attribute.h $(HEADERS_GEN_CONFIGURE) \
m_ctype.h my_attribute.h my_compiler.h \
$(HEADERS_GEN_CONFIGURE) \
$(HEADERS_GEN_MAKE)
noinst_HEADERS = config-win.h config-netware.h my_bit.h \
@ -37,7 +38,7 @@ noinst_HEADERS = config-win.h config-netware.h my_bit.h \
my_aes.h my_tree.h my_trie.h hash.h thr_alarm.h \
thr_lock.h t_ctype.h violite.h my_md5.h base64.h \
my_handler.h my_time.h my_vle.h my_user.h \
my_libwrap.h my_stacktrace.h my_compiler.h
my_libwrap.h my_stacktrace.h
EXTRA_DIST = mysql.h.pp mysql/plugin.h.pp

View file

@ -12,6 +12,8 @@ funcs_1.ndb* # joro : NDB tests marked as experiment
funcs_2.ndb_charset # joro : NDB tests marked as experimental as agreed with bochklin
innodb_plugin.* @solaris # Bug#56063 InnoDB Plugin mysql-tests fail on Solaris
main.ctype_gbk_binlog @solaris # Bug#46010: main.ctype_gbk_binlog fails sporadically : Table 't2' already exists
main.func_str @solaris # joro: Bug#40928
main.sp @solaris # joro : Bug#54138

View file

@ -358,4 +358,13 @@ INDEX(a), INDEX(b), INDEX(c));
INSERT INTO t1 VALUES (1,2,3), (4,5,6), (7,8,9);
DELETE FROM t1 WHERE a = 10 OR b = 20 ORDER BY c LIMIT 1;
DROP TABLE t1;
#
# Bug #53034: Multiple-table DELETE statements not accepting
# "Access compatibility" syntax
#
CREATE TABLE t1 (id INT);
CREATE TABLE t2 LIKE t1;
CREATE TABLE t3 LIKE t1;
DELETE FROM t1.*, test.t2.*, a.* USING t1, t2, t3 AS a;
DROP TABLE t1, t2, t3;
End of 5.1 tests

View file

@ -1713,4 +1713,15 @@ f1 f2 f3 f4 f1 = f2
NULL NULL NULL NULL NULL
drop table t1;
#
# Bug #54465: assert: field_types == 0 || field_types[field_pos] ==
# MYSQL_TYPE_LONGLONG
#
CREATE TABLE t1 (a INT);
INSERT INTO t1 VALUES (1), (2);
SELECT MAX((SELECT 1 FROM t1 ORDER BY @var LIMIT 1)) m FROM t1 t2, t1
ORDER BY t1.a;
m
1
DROP TABLE t1;
#
End of 5.1 tests

View file

@ -1057,4 +1057,11 @@ NULL
SELECT Polygon(12345123,'');
Polygon(12345123,'')
NULL
#
# BUG#51875: crash when loading data into geometry function polyfromwkb
#
SET @a=0x00000000030000000100000000000000000000000000144000000000000014400000000000001840000000000000184000000000000014400000000000001440;
SET @a=POLYFROMWKB(@a);
SET @a=0x00000000030000000000000000000000000000000000144000000000000014400000000000001840000000000000184000000000000014400000000000001440;
SET @a=POLYFROMWKB(@a);
End of 5.1 tests

View file

@ -1653,4 +1653,17 @@ a b
0 0
1 1
DROP TABLE t1;
#
# Bug #54802: 'NOT BETWEEN' evaluation is incorrect
#
CREATE TABLE t1 (c_key INT, c_notkey INT, KEY(c_key));
INSERT INTO t1 VALUES (1, 1), (2, 2), (3, 3);
EXPLAIN SELECT * FROM t1 WHERE 2 NOT BETWEEN c_notkey AND c_key;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL c_key NULL NULL NULL 3 Using where
SELECT * FROM t1 WHERE 2 NOT BETWEEN c_notkey AND c_key;
c_key c_notkey
1 1
3 3
DROP TABLE t1;
End of 5.1 tests

View file

@ -2541,4 +2541,62 @@ SELECT * FROM t1 FOR UPDATE;
SELECT * FROM t1 GROUP BY (SELECT a FROM t2 LIMIT 1 FOR UPDATE) + t1.a;
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
DROP TABLE t1,t2;
#
# Bug#55656: mysqldump can be slower after bug #39653 fix
#
CREATE TABLE t1 (a INT , b INT, c INT, d INT,
KEY (b), PRIMARY KEY (a,b)) ENGINE=INNODB;
INSERT INTO t1 VALUES (1,1,1,1), (2,2,2,2), (3,3,3,3);
EXPLAIN SELECT COUNT(*) FROM t1;
id 1
select_type SIMPLE
table t1
type index
possible_keys NULL
key b
key_len 4
ref NULL
rows 3
Extra Using index
DROP INDEX b ON t1;
CREATE INDEX b ON t1(a,b);
EXPLAIN SELECT COUNT(*) FROM t1;
id 1
select_type SIMPLE
table t1
type index
possible_keys NULL
key b
key_len 8
ref NULL
rows 3
Extra Using index
DROP INDEX b ON t1;
CREATE INDEX b ON t1(a,b,c);
EXPLAIN SELECT COUNT(*) FROM t1;
id 1
select_type SIMPLE
table t1
type index
possible_keys NULL
key b
key_len 13
ref NULL
rows 3
Extra Using index
DROP INDEX b ON t1;
CREATE INDEX b ON t1(a,b,c,d);
EXPLAIN SELECT COUNT(*) FROM t1;
id 1
select_type SIMPLE
table t1
type index
possible_keys NULL
key PRIMARY
key_len 8
ref NULL
rows 3
Extra Using index
DROP TABLE t1;
#
End of 5.1 tests

View file

@ -782,9 +782,14 @@ START TRANSACTION;
SELECT * FROM t1 LOCK IN SHARE MODE;
connection con1;
let $conn_id= `SELECT CONNECTION_ID()`;
--send SELECT * FROM t1 FOR UPDATE
connection con2;
let $wait_timeout= 2;
let $wait_condition= SELECT 1 FROM INFORMATION_SCHEMA.PROCESSLIST
WHERE ID=$conn_id AND STATE='Sending data';
--source include/wait_condition.inc
--echo # should not crash
--error ER_LOCK_DEADLOCK
SELECT * FROM t1 GROUP BY (SELECT a FROM t2 LIMIT 1 FOR UPDATE) + t1.a;
@ -795,5 +800,30 @@ disconnect con2;
DROP TABLE t1,t2;
--echo #
--echo # Bug#55656: mysqldump can be slower after bug #39653 fix
--echo #
CREATE TABLE t1 (a INT , b INT, c INT, d INT,
KEY (b), PRIMARY KEY (a,b)) ENGINE=INNODB;
INSERT INTO t1 VALUES (1,1,1,1), (2,2,2,2), (3,3,3,3);
--query_vertical EXPLAIN SELECT COUNT(*) FROM t1
DROP INDEX b ON t1;
CREATE INDEX b ON t1(a,b);
--query_vertical EXPLAIN SELECT COUNT(*) FROM t1
DROP INDEX b ON t1;
CREATE INDEX b ON t1(a,b,c);
--query_vertical EXPLAIN SELECT COUNT(*) FROM t1
DROP INDEX b ON t1;
CREATE INDEX b ON t1(a,b,c,d);
--query_vertical EXPLAIN SELECT COUNT(*) FROM t1
DROP TABLE t1;
--echo #
--echo End of 5.1 tests

View file

@ -387,4 +387,17 @@ DELETE FROM t1 WHERE a = 10 OR b = 20 ORDER BY c LIMIT 1;
DROP TABLE t1;
--echo #
--echo # Bug #53034: Multiple-table DELETE statements not accepting
--echo # "Access compatibility" syntax
--echo #
CREATE TABLE t1 (id INT);
CREATE TABLE t2 LIKE t1;
CREATE TABLE t3 LIKE t1;
DELETE FROM t1.*, test.t2.*, a.* USING t1, t2, t3 AS a;
DROP TABLE t1, t2, t3;
--echo End of 5.1 tests

View file

@ -1082,6 +1082,20 @@ select a.f1 as a, b.f4 as b, a.f1 > b.f4 as gt,
from t1 a, t1 b;
select *, f1 = f2 from t1;
drop table t1;
--echo #
--echo # Bug #54465: assert: field_types == 0 || field_types[field_pos] ==
--echo # MYSQL_TYPE_LONGLONG
--echo #
CREATE TABLE t1 (a INT);
INSERT INTO t1 VALUES (1), (2);
SELECT MAX((SELECT 1 FROM t1 ORDER BY @var LIMIT 1)) m FROM t1 t2, t1
ORDER BY t1.a;
DROP TABLE t1;
--echo #
--echo End of 5.1 tests

View file

@ -722,4 +722,14 @@ SELECT Polygon(123451,'');
SELECT Polygon(1234512,'');
SELECT Polygon(12345123,'');
--echo #
--echo # BUG#51875: crash when loading data into geometry function polyfromwkb
--echo #
SET @a=0x00000000030000000100000000000000000000000000144000000000000014400000000000001840000000000000184000000000000014400000000000001440;
SET @a=POLYFROMWKB(@a);
SET @a=0x00000000030000000000000000000000000000000000144000000000000014400000000000001840000000000000184000000000000014400000000000001440;
SET @a=POLYFROMWKB(@a);
--echo End of 5.1 tests

View file

@ -16,6 +16,7 @@ let $MYSQLD_DATADIR= `SELECT @@datadir`;
--copy_file std_data/parts/t1_blackhole.frm $MYSQLD_DATADIR/test/t1.frm
--copy_file std_data/parts/t1_blackhole.par $MYSQLD_DATADIR/test/t1.par
SHOW TABLES;
--replace_result $MYSQLD_DATADIR ./
--error ER_NOT_FORM_FILE
SHOW CREATE TABLE t1;
--error ER_BAD_TABLE_ERROR

View file

@ -1313,4 +1313,16 @@ SELECT * FROM t1 FORCE INDEX (PRIMARY)
DROP TABLE t1;
--echo #
--echo # Bug #54802: 'NOT BETWEEN' evaluation is incorrect
--echo #
CREATE TABLE t1 (c_key INT, c_notkey INT, KEY(c_key));
INSERT INTO t1 VALUES (1, 1), (2, 2), (3, 3);
EXPLAIN SELECT * FROM t1 WHERE 2 NOT BETWEEN c_notkey AND c_key;
SELECT * FROM t1 WHERE 2 NOT BETWEEN c_notkey AND c_key;
DROP TABLE t1;
--echo End of 5.1 tests

View file

@ -263,6 +263,7 @@ cp include/mysql.h \
include/keycache.h \
include/m_ctype.h \
include/my_attribute.h \
include/my_compiler.h \
include/mysqld_error.h \
include/sql_state.h \
include/mysqld_ername.h \

View file

@ -417,26 +417,6 @@ void Item_sum::mark_as_sum_func()
}
void Item_sum::make_field(Send_field *tmp_field)
{
if (args[0]->type() == Item::FIELD_ITEM && keep_field_type())
{
((Item_field*) args[0])->field->make_field(tmp_field);
/* For expressions only col_name should be non-empty string. */
char *empty_string= (char*)"";
tmp_field->db_name= empty_string;
tmp_field->org_table_name= empty_string;
tmp_field->table_name= empty_string;
tmp_field->org_col_name= empty_string;
tmp_field->col_name= name;
if (maybe_null)
tmp_field->flags&= ~NOT_NULL_FLAG;
}
else
init_make_field(tmp_field, field_type());
}
void Item_sum::print(String *str, enum_query_type query_type)
{
/* orig_args is not filled with valid values until fix_fields() */

View file

@ -339,7 +339,6 @@ public:
forced_const= TRUE;
}
virtual bool const_item() const { return forced_const; }
void make_field(Send_field *field);
virtual void print(String *str, enum_query_type query_type);
void fix_num_length_and_dec();

View file

@ -5063,70 +5063,93 @@ void sql_perror(const char *message)
}
#ifdef __WIN__
extern "C" my_bool reopen_fstreams(const char *filename,
FILE *outstream, FILE *errstream)
{
int handle_fd;
int stream_fd;
HANDLE osfh;
DBUG_ASSERT(filename && (outstream || errstream));
if ((osfh= CreateFile(filename, GENERIC_READ | GENERIC_WRITE,
FILE_SHARE_READ | FILE_SHARE_WRITE |
FILE_SHARE_DELETE, NULL,
OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL,
NULL)) == INVALID_HANDLE_VALUE)
return TRUE;
if ((handle_fd= _open_osfhandle((intptr_t)osfh,
_O_APPEND | _O_TEXT)) == -1)
{
CloseHandle(osfh);
return TRUE;
}
if (outstream)
{
stream_fd= _fileno(outstream);
if (_dup2(handle_fd, stream_fd) < 0)
{
CloseHandle(osfh);
return TRUE;
}
}
if (errstream)
{
stream_fd= _fileno(errstream);
if (_dup2(handle_fd, stream_fd) < 0)
{
CloseHandle(osfh);
return TRUE;
}
}
_close(handle_fd);
return FALSE;
}
#else
extern "C" my_bool reopen_fstreams(const char *filename,
FILE *outstream, FILE *errstream)
{
if (outstream && !freopen(filename, "a+", outstream))
return TRUE;
if (errstream && !freopen(filename, "a+", errstream))
return TRUE;
return FALSE;
}
#endif
/*
Unfortunately, there seems to be no good way
to restore the original streams upon failure.
*/
static bool redirect_std_streams(const char *file)
{
if (freopen(file, "a+", stdout) && freopen(file, "a+", stderr))
{
setbuf(stderr, NULL);
return FALSE;
}
if (reopen_fstreams(file, stdout, stderr))
return TRUE;
return TRUE;
setbuf(stderr, NULL);
return FALSE;
}
bool flush_error_log()
{
bool result=0;
bool result= 0;
if (opt_error_log)
{
char err_renamed[FN_REFLEN], *end;
end= strmake(err_renamed,log_error_file,FN_REFLEN-5);
strmov(end, "-old");
VOID(pthread_mutex_lock(&LOCK_error_log));
#ifdef __WIN__
char err_temp[FN_REFLEN+5];
/*
On Windows is necessary a temporary file for to rename
the current error file.
*/
strxmov(err_temp, err_renamed,"-tmp",NullS);
(void) my_delete(err_temp, MYF(0));
if (freopen(err_temp,"a+",stdout))
{
int fd;
size_t bytes;
uchar buf[IO_SIZE];
freopen(err_temp,"a+",stderr);
setbuf(stderr, NULL);
(void) my_delete(err_renamed, MYF(0));
my_rename(log_error_file,err_renamed,MYF(0));
redirect_std_streams(log_error_file);
if ((fd = my_open(err_temp, O_RDONLY, MYF(0))) >= 0)
{
while ((bytes= my_read(fd, buf, IO_SIZE, MYF(0))) &&
bytes != MY_FILE_ERROR)
my_fwrite(stderr, buf, bytes, MYF(0));
my_close(fd, MYF(0));
}
(void) my_delete(err_temp, MYF(0));
}
else
result= 1;
#else
my_rename(log_error_file,err_renamed,MYF(0));
if (redirect_std_streams(log_error_file))
result= 1;
#endif
if (redirect_std_streams(log_error_file))
result= 1;
VOID(pthread_mutex_unlock(&LOCK_error_log));
}
return result;
return result;
}
void MYSQL_BIN_LOG::signal_update()

View file

@ -199,6 +199,9 @@ typedef fp_except fp_except_t;
# endif
#endif
extern "C" my_bool reopen_fstreams(const char *filename,
FILE *outstream, FILE *errstream);
inline void setup_fpu()
{
#if defined(__FreeBSD__) && defined(HAVE_IEEEFP_H)
@ -3821,13 +3824,15 @@ static int init_server_components()
opt_error_log= 1; // Too long file name
else
{
my_bool res;
#ifndef EMBEDDED_LIBRARY
if (freopen(log_error_file, "a+", stdout))
res= reopen_fstreams(log_error_file, stdout, stderr);
#else
res= reopen_fstreams(log_error_file, NULL, stderr);
#endif
{
if (freopen(log_error_file, "a+", stderr))
setbuf(stderr, NULL);
}
if (!res)
setbuf(stderr, NULL);
}
}
@ -4475,8 +4480,8 @@ we force server id to 2, but this MySQL server will not act as a slave.");
#ifdef __WIN__
if (!opt_console)
{
freopen(log_error_file,"a+",stdout);
freopen(log_error_file,"a+",stderr);
if (reopen_fstreams(log_error_file, stdout, stderr))
unireg_abort(1);
setbuf(stderr, NULL);
FreeConsole(); // Remove window
}

View file

@ -5526,7 +5526,11 @@ static SEL_TREE *get_mm_tree(RANGE_OPT_PARAM *param,COND *cond)
SEL_TREE *tmp= get_full_func_mm_tree(param, cond_func,
field_item, (Item*)(intptr)i, inv);
if (inv)
{
tree= !tree ? tmp : tree_or(param, tree, tmp);
if (tree == NULL)
break;
}
else
tree= tree_and(param, tree, tmp);
}

View file

@ -528,7 +528,7 @@ uint Gis_line_string::init_from_wkb(const char *wkb, uint len,
n_points= wkb_get_uint(wkb, bo);
proper_length= 4 + n_points * POINT_DATA_SIZE;
if (len < proper_length || res->reserve(proper_length))
if (!n_points || len < proper_length || res->reserve(proper_length))
return 0;
res->q_append(n_points);
@ -746,7 +746,9 @@ uint Gis_polygon::init_from_wkb(const char *wkb, uint len, wkbByteOrder bo,
if (len < 4)
return 0;
n_linear_rings= wkb_get_uint(wkb, bo);
if (!(n_linear_rings= wkb_get_uint(wkb, bo)))
return 0;
if (res->reserve(4, 512))
return 0;
wkb+= 4;

View file

@ -13025,6 +13025,34 @@ static int test_if_order_by_key(ORDER *order, TABLE *table, uint idx,
}
/**
Find shortest key suitable for full table scan.
@param table Table to scan
@param usable_keys Allowed keys
@note
As far as
1) clustered primary key entry data set is a set of all record
fields (key fields and not key fields) and
2) secondary index entry data is a union of its key fields and
primary key fields (at least InnoDB and its derivatives don't
duplicate primary key fields there, even if the primary and
the secondary keys have a common subset of key fields),
then secondary index entry data is always a subset of primary key entry.
Unfortunately, key_info[nr].key_length doesn't show the length
of key/pointer pair but a sum of key field lengths only, thus
we can't estimate index IO volume comparing only this key_length
value of secondary keys and clustered PK.
So, try secondary keys first, and choose PK only if there are no
usable secondary covering keys or found best secondary key include
all table fields (i.e. same as PK):
@return
MAX_KEY no suitable key found
key index otherwise
*/
uint find_shortest_key(TABLE *table, const key_map *usable_keys)
{
uint best= MAX_KEY;
@ -13037,23 +13065,6 @@ uint find_shortest_key(TABLE *table, const key_map *usable_keys)
uint min_length= (uint) ~0;
for (uint nr=0; nr < table->s->keys ; nr++)
{
/*
As far as
1) clustered primary key entry data set is a set of all record
fields (key fields and not key fields) and
2) secondary index entry data is a union of its key fields and
primary key fields (at least InnoDB and its derivatives don't
duplicate primary key fields there, even if the primary and
the secondary keys have a common subset of key fields),
then secondary index entry data is always a subset of primary key
entry, and the PK is always longer.
Unfortunately, key_info[nr].key_length doesn't show the length
of key/pointer pair but a sum of key field lengths only, thus
we can't estimate index IO volume comparing only this key_length
value of seconday keys and clustered PK.
So, try secondary keys first, and choose PK only if there are no
usable secondary covering keys:
*/
if (nr == usable_clustered_pk)
continue;
if (usable_keys->is_set(nr))
@ -13066,7 +13077,20 @@ uint find_shortest_key(TABLE *table, const key_map *usable_keys)
}
}
}
return best != MAX_KEY ? best : usable_clustered_pk;
if (usable_clustered_pk != MAX_KEY)
{
/*
If the primary key is clustered and found shorter key covers all table
fields then primary key scan normally would be faster because amount of
data to scan is the same but PK is clustered.
It's safe to compare key parts with table fields since duplicate key
parts aren't allowed.
*/
if (best == MAX_KEY ||
table->key_info[best].key_parts >= table->s->fields)
best= usable_clustered_pk;
}
return best;
}
/**

View file

@ -6930,13 +6930,16 @@ int finalize_schema_table(st_plugin_int *plugin)
ST_SCHEMA_TABLE *schema_table= (ST_SCHEMA_TABLE *)plugin->data;
DBUG_ENTER("finalize_schema_table");
if (schema_table && plugin->plugin->deinit)
if (schema_table)
{
DBUG_PRINT("info", ("Deinitializing plugin: '%s'", plugin->name.str));
if (plugin->plugin->deinit(NULL))
if (plugin->plugin->deinit)
{
DBUG_PRINT("warning", ("Plugin '%s' deinit function returned error.",
plugin->name.str));
DBUG_PRINT("info", ("Deinitializing plugin: '%s'", plugin->name.str));
if (plugin->plugin->deinit(NULL))
{
DBUG_PRINT("warning", ("Plugin '%s' deinit function returned error.",
plugin->name.str));
}
}
my_free(schema_table, MYF(0));
}

View file

@ -1297,6 +1297,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%type <table>
table_ident table_ident_nodb references xid
table_ident_opt_wild
%type <simple_string>
remember_name remember_end opt_ident opt_db text_or_password
@ -9622,7 +9623,7 @@ table_alias_ref_list:
;
table_alias_ref:
table_ident
table_ident_opt_wild
{
if (!Select->add_table_to_list(YYTHD, $1, NULL,
TL_OPTION_UPDATING | TL_OPTION_ALIAS,
@ -11405,6 +11406,21 @@ table_ident:
}
;
table_ident_opt_wild:
ident opt_wild
{
$$= new Table_ident($1);
if ($$ == NULL)
MYSQL_YYABORT;
}
| ident '.' ident opt_wild
{
$$= new Table_ident(YYTHD, $1,$3,0);
if ($$ == NULL)
MYSQL_YYABORT;
}
;
table_ident_nodb:
ident
{

View file

@ -616,8 +616,7 @@ dict_table_get_on_id(
{
dict_table_t* table;
if (ut_dulint_cmp(table_id, DICT_FIELDS_ID) <= 0
|| trx->dict_operation_lock_mode == RW_X_LATCH) {
if (trx->dict_operation_lock_mode == RW_X_LATCH) {
/* Note: An X latch implies that the transaction
already owns the dictionary mutex. */
@ -2140,7 +2139,7 @@ dict_foreign_add_to_cache(
mem_heap_free(foreign->heap);
}
return(DB_CANNOT_ADD_CONSTRAINT);
return(DB_FOREIGN_NO_INDEX);
}
for_in_cache->referenced_table = ref_table;
@ -2184,7 +2183,7 @@ dict_foreign_add_to_cache(
mem_heap_free(foreign->heap);
}
return(DB_CANNOT_ADD_CONSTRAINT);
return(DB_REFERENCING_NO_INDEX);
}
for_in_cache->foreign_table = for_table;
@ -3754,7 +3753,6 @@ dict_update_statistics_low(
dictionary mutex */
{
dict_index_t* index;
ulint size;
ulint sum_of_index_sizes = 0;
if (table->ibd_file_missing) {
@ -3770,14 +3768,6 @@ dict_update_statistics_low(
return;
}
/* If we have set a high innodb_force_recovery level, do not calculate
statistics, as a badly corrupted index can cause a crash in it. */
if (srv_force_recovery >= SRV_FORCE_NO_IBUF_MERGE) {
return;
}
/* Find out the sizes of the indexes and how many different values
for the key they approximately have */
@ -3789,26 +3779,48 @@ dict_update_statistics_low(
return;
}
while (index) {
size = btr_get_size(index, BTR_TOTAL_SIZE);
index->stat_index_size = size;
do {
if (UNIV_LIKELY
(srv_force_recovery < SRV_FORCE_NO_IBUF_MERGE
|| (srv_force_recovery < SRV_FORCE_NO_LOG_REDO
&& (index->type & DICT_CLUSTERED)))) {
ulint size;
size = btr_get_size(index, BTR_TOTAL_SIZE);
sum_of_index_sizes += size;
index->stat_index_size = size;
size = btr_get_size(index, BTR_N_LEAF_PAGES);
sum_of_index_sizes += size;
if (size == 0) {
/* The root node of the tree is a leaf */
size = 1;
size = btr_get_size(index, BTR_N_LEAF_PAGES);
if (size == 0) {
/* The root node of the tree is a leaf */
size = 1;
}
index->stat_n_leaf_pages = size;
btr_estimate_number_of_different_key_vals(index);
} else {
/* If we have set a high innodb_force_recovery
level, do not calculate statistics, as a badly
corrupted index can cause a crash in it.
Initialize some bogus index cardinality
statistics, so that the data can be queried in
various means, also via secondary indexes. */
ulint i;
sum_of_index_sizes++;
index->stat_index_size = index->stat_n_leaf_pages = 1;
for (i = dict_index_get_n_unique(index); i; ) {
index->stat_n_diff_key_vals[i--] = 1;
}
}
index->stat_n_leaf_pages = size;
btr_estimate_number_of_different_key_vals(index);
index = dict_table_get_next_index(index);
}
} while (index);
index = dict_table_get_first_index(table);

View file

@ -966,6 +966,8 @@ try_again:
HASH_SEARCH(name_hash, system->name_hash, ut_fold_string(name), space,
0 == strcmp(name, space->name));
if (space != NULL) {
ibool success;
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: Warning: trying to init to the"
@ -1002,9 +1004,10 @@ try_again:
namesake_id = space->id;
mutex_exit(&(system->mutex));
success = fil_space_free(namesake_id, FALSE);
ut_a(success);
fil_space_free(namesake_id);
mutex_exit(&(system->mutex));
goto try_again;
}
@ -1127,6 +1130,33 @@ fil_assign_new_space_id(void)
return(id);
}
/***********************************************************************
Check if the space id exists in the cache, complain to stderr if the
space id cannot be found. */
static
fil_space_t*
fil_space_search(
/*=============*/
/* out: file space instance*/
ulint id) /* in: space id */
{
fil_space_t* space;
ut_ad(mutex_own(&fil_system->mutex));
HASH_SEARCH(hash, fil_system->spaces, id, space, space->id == id);
if (space == NULL) {
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: Error: trying to remove tablespace %lu"
" from the cache but\n"
"InnoDB: it is not there.\n", (ulong) id);
}
return(space);
}
/***********************************************************************
Frees a space object from the tablespace memory cache. Closes the files in
the chain but does not delete them. There must not be any pending i/o's or
@ -1135,27 +1165,21 @@ flushes on the files. */
ibool
fil_space_free(
/*===========*/
/* out: TRUE if success */
ulint id) /* in: space id */
/* out: TRUE if success */
ulint id, /* in: space id */
ibool x_latched) /* in: TRUE if caller has space->latch
in X mode */
{
fil_system_t* system = fil_system;
fil_space_t* space;
fil_space_t* namespace;
fil_node_t* fil_node;
mutex_enter(&(system->mutex));
ut_ad(mutex_own(&fil_system->mutex));
HASH_SEARCH(hash, system->spaces, id, space, space->id == id);
if (!space) {
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: Error: trying to remove tablespace %lu"
" from the cache but\n"
"InnoDB: it is not there.\n", (ulong) id);
mutex_exit(&(system->mutex));
space = fil_space_search(id);
if (space == NULL) {
return(FALSE);
}
@ -1191,7 +1215,9 @@ fil_space_free(
ut_a(0 == UT_LIST_GET_LEN(space->chain));
mutex_exit(&(system->mutex));
if (x_latched) {
rw_lock_x_unlock(&space->latch);
}
rw_lock_free(&(space->latch));
@ -2048,6 +2074,19 @@ try_again:
path = mem_strdup(space->name);
mutex_exit(&(system->mutex));
/* Important: We rely on the data dictionary mutex to ensure
that a race is not possible here. It should serialize the tablespace
drop/free. We acquire an X latch only to avoid a race condition
when accessing the tablespace instance via:
fsp_get_available_space_in_free_extents().
There our main motivation is to reduce the contention on the
dictionary mutex and not correctness. */
rw_lock_x_lock(&space->latch);
#ifndef UNIV_HOTBACKUP
/* Invalidate in the buffer pool all pages belonging to the
tablespace. Since we have set space->is_being_deleted = TRUE, readahead
@ -2060,7 +2099,11 @@ try_again:
#endif
/* printf("Deleting tablespace %s id %lu\n", space->name, id); */
success = fil_space_free(id);
mutex_enter(&system->mutex);
success = fil_space_free(id, TRUE);
mutex_exit(&system->mutex);
if (success) {
success = os_file_delete(path);
@ -2068,6 +2111,8 @@ try_again:
if (!success) {
success = os_file_delete_if_exists(path);
}
} else {
rw_lock_x_unlock(&space->latch);
}
if (success) {
@ -4569,3 +4614,28 @@ fil_page_get_type(
return(mach_read_from_2(page + FIL_PAGE_TYPE));
}
/***********************************************************************
Returns TRUE if a single-table tablespace is being deleted. */
ibool
fil_tablespace_is_being_deleted(
/*============================*/
/* out: TRUE if space is being deleted */
ulint id) /* in: space id */
{
fil_space_t* space;
ibool is_being_deleted;
mutex_enter(&fil_system->mutex);
HASH_SEARCH(hash, fil_system->spaces, id, space, space->id == id);
ut_a(space != NULL);
is_being_deleted = space->is_being_deleted;
mutex_exit(&fil_system->mutex);
return(is_being_deleted);
}

View file

@ -2842,12 +2842,61 @@ fsp_get_available_space_in_free_extents(
ut_ad(!mutex_own(&kernel_mutex));
/* The convoluted mutex acquire is to overcome latching order
issues: The problem is that the fil_mutex is at a lower level
than the tablespace latch and the buffer pool mutex. We have to
first prevent any operations on the file system by acquiring the
dictionary mutex. Then acquire the tablespace latch to obey the
latching order and then release the dictionary mutex. That way we
ensure that the tablespace instance can't be freed while we are
examining its contents (see fil_space_free()).
However, there is one further complication, we release the fil_mutex
when we need to invalidate the the pages in the buffer pool and we
reacquire the fil_mutex when deleting and freeing the tablespace
instance in fil0fil.c. Here we need to account for that situation
too. */
dict_mutex_enter_for_mysql();
/* At this stage there is no guarantee that the tablespace even
exists in the cache. */
if (fil_tablespace_deleted_or_being_deleted_in_mem(space, -1)) {
dict_mutex_exit_for_mysql();
return(ULLINT_UNDEFINED);
}
mtr_start(&mtr);
latch = fil_space_get_latch(space);
/* This should ensure that the tablespace instance can't be freed
by another thread. However, the tablespace pages can still be freed
from the buffer pool. We need to check for that again. */
mtr_x_lock(latch, &mtr);
dict_mutex_exit_for_mysql();
/* At this point it is possible for the tablespace to be deleted and
its pages removed from the buffer pool. We need to check for that
situation. However, the tablespace instance can't be deleted because
our latching above should ensure that. */
if (fil_tablespace_is_being_deleted(space)) {
mtr_commit(&mtr);
return(ULLINT_UNDEFINED);
}
/* From here on even if the user has dropped the tablespace, the
pages _must_ still exist in the buffer pool and the tablespace
instance _must be in the file system hash table. */
space_header = fsp_get_space_header(space, &mtr);
size = mtr_read_ulint(space_header + FSP_SIZE, MLOG_4BYTES, &mtr);

View file

@ -707,7 +707,9 @@ convert_error_code_to_mysql(
return(HA_ERR_ROW_IS_REFERENCED);
} else if (error == (int) DB_CANNOT_ADD_CONSTRAINT) {
} else if (error == (int) DB_CANNOT_ADD_CONSTRAINT
|| error == (int) DB_FOREIGN_NO_INDEX
|| error == (int) DB_REFERENCING_NO_INDEX) {
return(HA_ERR_CANNOT_ADD_FOREIGN);
@ -6116,6 +6118,8 @@ ha_innobase::rename_table(
innobase_commit_low(trx);
trx_free_for_mysql(trx);
switch (error) {
case DB_DUPLICATE_KEY:
/* Add a special case to handle the Duplicated Key error
and return DB_ERROR instead.
This is to avoid a possible SIGSEGV error from mysql error
@ -6128,10 +6132,28 @@ ha_innobase::rename_table(
the dup key error here is due to an existing table whose name
is the one we are trying to rename to) and return the generic
error code. */
if (error == (int) DB_DUPLICATE_KEY) {
my_error(ER_TABLE_EXISTS_ERROR, MYF(0), to);
error = DB_ERROR;
break;
case DB_FOREIGN_NO_INDEX:
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
HA_ERR_CANNOT_ADD_FOREIGN,
"Alter or rename of table '%s' failed"
" because the new table is a child table"
" in a FK relationship and it does not"
" have an index that contains foreign"
" keys as its prefix columns.", norm_to);
break;
case DB_REFERENCING_NO_INDEX:
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
HA_ERR_CANNOT_ADD_FOREIGN,
"Alter or rename of table '%s' failed"
" because the new table is a parent table"
" in a FK relationship and it does not"
" have an index that contains foreign"
" keys as its prefix columns.", norm_to);
break;
}
error = convert_error_code_to_mysql(error, NULL);
@ -6360,8 +6382,6 @@ ha_innobase::info(
dict_index_t* index;
ha_rows rec_per_key;
ib_longlong n_rows;
ulong j;
ulong i;
char path[FN_REFLEN];
os_file_stat_t stat_info;
@ -6371,16 +6391,6 @@ ha_innobase::info(
statistics calculation on tables, because that may crash the
server if an index is badly corrupted. */
if (srv_force_recovery >= SRV_FORCE_NO_IBUF_MERGE) {
/* We return success (0) instead of HA_ERR_CRASHED,
because we want MySQL to process this query and not
stop, like it would do if it received the error code
HA_ERR_CRASHED. */
DBUG_RETURN(0);
}
/* We do not know if MySQL can call this function before calling
external_lock(). To be safe, update the thd of the current table
handle. */
@ -6475,25 +6485,24 @@ ha_innobase::info(
acquiring latches inside InnoDB, we do not call it if we
are asked by MySQL to avoid locking. Another reason to
avoid the call is that it uses quite a lot of CPU.
See Bug#38185.
We do not update delete_length if no locking is requested
so the "old" value can remain. delete_length is initialized
to 0 in the ha_statistics' constructor. */
if (!(flag & HA_STATUS_NO_LOCK)) {
See Bug#38185. */
if (flag & HA_STATUS_NO_LOCK) {
/* We do not update delete_length if no
locking is requested so the "old" value can
remain. delete_length is initialized to 0 in
the ha_statistics' constructor. */
} else if (UNIV_UNLIKELY
(srv_force_recovery >= SRV_FORCE_NO_IBUF_MERGE)) {
/* Avoid accessing the tablespace if
innodb_crash_recovery is set to a high value. */
stats.delete_length = 0;
} else {
ullint avail_space;
/* lock the data dictionary to avoid races with
ibd_file_missing and tablespace_discarded */
row_mysql_lock_data_dictionary(prebuilt->trx);
/* ib_table->space must be an existent tablespace */
if (!ib_table->ibd_file_missing
&& !ib_table->tablespace_discarded) {
stats.delete_length =
fsp_get_available_space_in_free_extents(
ib_table->space) * 1024;
} else {
avail_space = fsp_get_available_space_in_free_extents(
ib_table->space);
if (avail_space == ULLINT_UNDEFINED) {
THD* thd;
thd = ha_thd();
@ -6510,9 +6519,9 @@ ha_innobase::info(
ib_table->name);
stats.delete_length = 0;
} else {
stats.delete_length = avail_space * 1024;
}
row_mysql_unlock_data_dictionary(prebuilt->trx);
}
stats.check_time = 0;
@ -6525,6 +6534,7 @@ ha_innobase::info(
}
if (flag & HA_STATUS_CONST) {
ulong i = 0;
index = dict_table_get_first_index_noninline(ib_table);
if (prebuilt->clust_index_was_generated) {
@ -6532,6 +6542,8 @@ ha_innobase::info(
}
for (i = 0; i < table->s->keys; i++) {
ulong j;
if (index == NULL) {
sql_print_error("Table %s contains fewer "
"indexes inside InnoDB than "
@ -6588,6 +6600,11 @@ ha_innobase::info(
}
}
if (srv_force_recovery >= SRV_FORCE_NO_IBUF_MERGE) {
goto func_exit;
}
if (flag & HA_STATUS_ERRKEY) {
ut_a(prebuilt->trx);
ut_a(prebuilt->trx->magic_n == TRX_MAGIC_N);
@ -6600,6 +6617,7 @@ ha_innobase::info(
stats.auto_increment_value = innobase_peek_autoinc();
}
func_exit:
prebuilt->trx->op_info = (char*)"";
DBUG_RETURN(0);
@ -7831,16 +7849,17 @@ ha_innobase::store_lock(
&& (lock_type == TL_READ || lock_type == TL_READ_NO_INSERT)
&& (sql_command == SQLCOM_INSERT_SELECT
|| sql_command == SQLCOM_UPDATE
|| sql_command == SQLCOM_CREATE_TABLE)) {
|| sql_command == SQLCOM_CREATE_TABLE
|| sql_command == SQLCOM_SET_OPTION)) {
/* If we either have innobase_locks_unsafe_for_binlog
option set or this session is using READ COMMITTED
isolation level and isolation level of the transaction
is not set to serializable and MySQL is doing
INSERT INTO...SELECT or UPDATE ... = (SELECT ...) or
CREATE ... SELECT... without FOR UPDATE or
IN SHARE MODE in select, then we use consistent
read for select. */
CREATE ... SELECT... or SET ... = (SELECT ...)
without FOR UPDATE or IN SHARE MODE in select,
then we use consistent read for select. */
prebuilt->select_lock_type = LOCK_NONE;
prebuilt->stored_select_lock_type = LOCK_NONE;

View file

@ -76,6 +76,12 @@ Created 5/24/1996 Heikki Tuuri
#define DB_FOREIGN_EXCEED_MAX_CASCADE 50/* Foreign key constraint related
cascading delete/update exceeds
maximum allowed depth */
#define DB_FOREIGN_NO_INDEX 51 /* the child (foreign) table does not
have an index that contains the
foreign keys as its prefix columns */
#define DB_REFERENCING_NO_INDEX 52 /* the parent (referencing) table does
not have an index that contains the
foreign keys as its prefix columns */
/* The following are partial failure codes */
#define DB_FAIL 1000

View file

@ -202,8 +202,10 @@ the chain but does not delete them. */
ibool
fil_space_free(
/*===========*/
/* out: TRUE if success */
ulint id); /* in: space id */
/* out: TRUE if success */
ulint id, /* in: space id */
ibool x_latched); /* in: TRUE if caller has space->latch
in X mode */
/***********************************************************************
Returns the size of the space in pages. The tablespace must be cached in the
memory cache. */
@ -710,6 +712,14 @@ fil_page_get_type(
written to page, the return value not defined */
byte* page); /* in: file page */
/***********************************************************************
Returns TRUE if a single-table tablespace is being deleted. */
ibool
fil_tablespace_is_being_deleted(
/*============================*/
/* out: TRUE if space is being deleted */
ulint id); /* in: space id */
typedef struct fil_space_struct fil_space_t;

View file

@ -234,6 +234,12 @@ typedef unsigned long long int ullint;
/* Maximum value for a ulint */
#define ULINT_MAX ((ulint)(-2))
/* THe 'undefined' value for ullint */
#define ULLINT_UNDEFINED ((ullint)(-1))
/* Maximum value for a ullint */
#define ULLINT_MAX ((ullint)(-2))
/* This 'ibool' type is used within Innobase. Remember that different included
headers may define 'bool' differently. Do not assume that 'bool' is a ulint! */
#define ibool ulint

View file

@ -1,3 +1,8 @@
2010-08-24 The InnoDB Team
* handler/ha_innodb.c, dict/dict0dict.c:
Fix Bug #55832 selects crash too easily when innodb_force_recovery>3
2010-08-03 The InnoDB Team
* include/dict0dict.h, include/dict0dict.ic, row/row0mysql.c:
@ -11,6 +16,19 @@
Fix Bug#54582 stack overflow when opening many tables linked
with foreign keys at once
2010-08-03 The InnoDB Team
* include/ut0mem.h, ut/ut0mem.c:
Fix Bug #55627 segv in ut_free pars_lexer_close innobase_shutdown
innodb-use-sys-malloc=0
2010-08-01 The InnoDB Team
* handler/ha_innodb.cc
Fix Bug #55382 Assignment with SELECT expressions takes unexpected
S locks in READ COMMITTED
>>>>>>> MERGE-SOURCE
2010-07-27 The InnoDB Team
* include/mem0pool.h, mem/mem0mem.c, mem/mem0pool.c, srv/srv0start.c:

View file

@ -1734,6 +1734,7 @@ function_exit:
}
}
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
/********************************************************************//**
Validates the search system.
@return TRUE if ok */
@ -1897,3 +1898,4 @@ btr_search_validate(void)
return(ok);
}
#endif /* defined UNIV_AHI_DEBUG || defined UNIV_DEBUG */

View file

@ -568,8 +568,7 @@ dict_table_get_on_id(
{
dict_table_t* table;
if (ut_dulint_cmp(table_id, DICT_FIELDS_ID) <= 0
|| trx->dict_operation_lock_mode == RW_X_LATCH) {
if (trx->dict_operation_lock_mode == RW_X_LATCH) {
/* Note: An X latch implies that the transaction
already owns the dictionary mutex. */
@ -4192,7 +4191,6 @@ dict_update_statistics_low(
dictionary mutex */
{
dict_index_t* index;
ulint size;
ulint sum_of_index_sizes = 0;
if (table->ibd_file_missing) {
@ -4207,14 +4205,6 @@ dict_update_statistics_low(
return;
}
/* If we have set a high innodb_force_recovery level, do not calculate
statistics, as a badly corrupted index can cause a crash in it. */
if (srv_force_recovery >= SRV_FORCE_NO_IBUF_MERGE) {
return;
}
/* Find out the sizes of the indexes and how many different values
for the key they approximately have */
@ -4226,26 +4216,48 @@ dict_update_statistics_low(
return;
}
while (index) {
size = btr_get_size(index, BTR_TOTAL_SIZE);
index->stat_index_size = size;
do {
if (UNIV_LIKELY
(srv_force_recovery < SRV_FORCE_NO_IBUF_MERGE
|| (srv_force_recovery < SRV_FORCE_NO_LOG_REDO
&& dict_index_is_clust(index)))) {
ulint size;
size = btr_get_size(index, BTR_TOTAL_SIZE);
sum_of_index_sizes += size;
index->stat_index_size = size;
size = btr_get_size(index, BTR_N_LEAF_PAGES);
sum_of_index_sizes += size;
if (size == 0) {
/* The root node of the tree is a leaf */
size = 1;
size = btr_get_size(index, BTR_N_LEAF_PAGES);
if (size == 0) {
/* The root node of the tree is a leaf */
size = 1;
}
index->stat_n_leaf_pages = size;
btr_estimate_number_of_different_key_vals(index);
} else {
/* If we have set a high innodb_force_recovery
level, do not calculate statistics, as a badly
corrupted index can cause a crash in it.
Initialize some bogus index cardinality
statistics, so that the data can be queried in
various means, also via secondary indexes. */
ulint i;
sum_of_index_sizes++;
index->stat_index_size = index->stat_n_leaf_pages = 1;
for (i = dict_index_get_n_unique(index); i; ) {
index->stat_n_diff_key_vals[i--] = 1;
}
}
index->stat_n_leaf_pages = size;
btr_estimate_number_of_different_key_vals(index);
index = dict_table_get_next_index(index);
}
} while (index);
index = dict_table_get_first_index(table);

View file

@ -354,6 +354,7 @@ ha_remove_all_nodes_to_page(
#endif
}
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
/*************************************************************//**
Validates a given range of the cells in hash table.
@return TRUE if ok */
@ -400,6 +401,7 @@ ha_validate(
return(ok);
}
#endif /* defined UNIV_AHI_DEBUG || defined UNIV_DEBUG */
/*************************************************************//**
Prints info of a hash table. */

View file

@ -7531,28 +7531,15 @@ ha_innobase::info(
dict_index_t* index;
ha_rows rec_per_key;
ib_int64_t n_rows;
ulong j;
ulong i;
char path[FN_REFLEN];
os_file_stat_t stat_info;
DBUG_ENTER("info");
/* If we are forcing recovery at a high level, we will suppress
statistics calculation on tables, because that may crash the
server if an index is badly corrupted. */
if (srv_force_recovery >= SRV_FORCE_NO_IBUF_MERGE) {
/* We return success (0) instead of HA_ERR_CRASHED,
because we want MySQL to process this query and not
stop, like it would do if it received the error code
HA_ERR_CRASHED. */
DBUG_RETURN(0);
}
/* We do not know if MySQL can call this function before calling
external_lock(). To be safe, update the thd of the current table
handle. */
@ -7647,12 +7634,18 @@ ha_innobase::info(
acquiring latches inside InnoDB, we do not call it if we
are asked by MySQL to avoid locking. Another reason to
avoid the call is that it uses quite a lot of CPU.
See Bug#38185.
We do not update delete_length if no locking is requested
so the "old" value can remain. delete_length is initialized
to 0 in the ha_statistics' constructor. */
if (!(flag & HA_STATUS_NO_LOCK)) {
See Bug#38185. */
if (flag & HA_STATUS_NO_LOCK) {
/* We do not update delete_length if no
locking is requested so the "old" value can
remain. delete_length is initialized to 0 in
the ha_statistics' constructor. */
} else if (UNIV_UNLIKELY
(srv_force_recovery >= SRV_FORCE_NO_IBUF_MERGE)) {
/* Avoid accessing the tablespace if
innodb_crash_recovery is set to a high value. */
stats.delete_length = 0;
} else {
/* lock the data dictionary to avoid races with
ibd_file_missing and tablespace_discarded */
row_mysql_lock_data_dictionary(prebuilt->trx);
@ -7697,6 +7690,7 @@ ha_innobase::info(
}
if (flag & HA_STATUS_CONST) {
ulong i;
/* Verify the number of index in InnoDB and MySQL
matches up. If prebuilt->clust_index_was_generated
holds, InnoDB defines GEN_CLUST_INDEX internally */
@ -7713,6 +7707,7 @@ ha_innobase::info(
}
for (i = 0; i < table->s->keys; i++) {
ulong j;
/* We could get index quickly through internal
index mapping with the index translation table.
The identity of index (match up index name with
@ -7778,6 +7773,11 @@ ha_innobase::info(
}
}
if (srv_force_recovery >= SRV_FORCE_NO_IBUF_MERGE) {
goto func_exit;
}
if (flag & HA_STATUS_ERRKEY) {
const dict_index_t* err_index;
@ -7798,6 +7798,7 @@ ha_innobase::info(
stats.auto_increment_value = innobase_peek_autoinc();
}
func_exit:
prebuilt->trx->op_info = (char*)"";
DBUG_RETURN(0);
@ -9255,7 +9256,8 @@ ha_innobase::store_lock(
&& (sql_command == SQLCOM_INSERT_SELECT
|| sql_command == SQLCOM_REPLACE_SELECT
|| sql_command == SQLCOM_UPDATE
|| sql_command == SQLCOM_CREATE_TABLE)) {
|| sql_command == SQLCOM_CREATE_TABLE
|| sql_command == SQLCOM_SET_OPTION)) {
/* If we either have innobase_locks_unsafe_for_binlog
option set or this session is using READ COMMITTED
@ -9263,9 +9265,9 @@ ha_innobase::store_lock(
is not set to serializable and MySQL is doing
INSERT INTO...SELECT or REPLACE INTO...SELECT
or UPDATE ... = (SELECT ...) or CREATE ...
SELECT... without FOR UPDATE or IN SHARE
MODE in select, then we use consistent read
for select. */
SELECT... or SET ... = (SELECT ...) without
FOR UPDATE or IN SHARE MODE in select,
then we use consistent read for select. */
prebuilt->select_lock_type = LOCK_NONE;
prebuilt->stored_select_lock_type = LOCK_NONE;

View file

@ -180,6 +180,7 @@ btr_search_update_hash_on_delete(
btr_cur_t* cursor);/*!< in: cursor which was positioned on the
record to delete using btr_cur_search_...,
the record is not yet deleted */
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
/********************************************************************//**
Validates the search system.
@return TRUE if ok */
@ -187,6 +188,9 @@ UNIV_INTERN
ibool
btr_search_validate(void);
/*======================*/
#else
# define btr_search_validate() TRUE
#endif /* defined UNIV_AHI_DEBUG || defined UNIV_DEBUG */
/** Flag: has the search system been enabled?
Protected by btr_search_latch and btr_search_enabled_mutex. */

View file

@ -186,6 +186,7 @@ ha_remove_all_nodes_to_page(
hash_table_t* table, /*!< in: hash table */
ulint fold, /*!< in: fold value */
const page_t* page); /*!< in: buffer page */
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
/*************************************************************//**
Validates a given range of the cells in hash table.
@return TRUE if ok */
@ -196,6 +197,7 @@ ha_validate(
hash_table_t* table, /*!< in: hash table */
ulint start_index, /*!< in: start index */
ulint end_index); /*!< in: end index */
#endif /* defined UNIV_AHI_DEBUG || defined UNIV_DEBUG */
/*************************************************************//**
Prints info of a hash table. */
UNIV_INTERN

View file

@ -46,7 +46,7 @@ Created 1/20/1994 Heikki Tuuri
#define INNODB_VERSION_MAJOR 1
#define INNODB_VERSION_MINOR 0
#define INNODB_VERSION_BUGFIX 11
#define INNODB_VERSION_BUGFIX 12
/* The following is the InnoDB version as shown in
SELECT plugin_version FROM information_schema.plugins;

View file

@ -113,7 +113,8 @@ ut_test_malloc(
ulint n); /*!< in: try to allocate this many bytes */
#endif /* !UNIV_HOTBACKUP */
/**********************************************************************//**
Frees a memory block allocated with ut_malloc. */
Frees a memory block allocated with ut_malloc. Freeing a NULL pointer is
a nop. */
UNIV_INTERN
void
ut_free(

View file

@ -2690,7 +2690,6 @@ row_sel_store_mysql_rec(
ut_ad(prebuilt->mysql_template);
ut_ad(prebuilt->default_rec);
ut_ad(rec_offs_validate(rec, NULL, offsets));
ut_ad(!rec_get_deleted_flag(rec, rec_offs_comp(offsets)));
if (UNIV_LIKELY_NULL(prebuilt->blob_heap)) {
mem_heap_free(prebuilt->blob_heap);
@ -3611,6 +3610,7 @@ row_search_for_mysql(
row_sel_try_search_shortcut_for_mysql().
The latch will not be released until
mtr_commit(&mtr). */
ut_ad(!rec_get_deleted_flag(rec, comp));
if (!row_sel_store_mysql_rec(buf, prebuilt,
rec, offsets)) {
@ -4238,7 +4238,7 @@ no_gap_lock:
rec = old_vers;
}
} else if (!lock_sec_rec_cons_read_sees(rec, trx->read_view)) {
} else {
/* We are looking into a non-clustered index,
and to get the right version of the record we
have to look also into the clustered index: this
@ -4246,8 +4246,12 @@ no_gap_lock:
information via the clustered index record. */
ut_ad(index != clust_index);
ut_ad(!dict_index_is_clust(index));
goto requires_clust_rec;
if (!lock_sec_rec_cons_read_sees(
rec, trx->read_view)) {
goto requires_clust_rec;
}
}
}
@ -4370,8 +4374,13 @@ requires_clust_rec:
ULINT_UNDEFINED, &heap);
result_rec = rec;
}
/* result_rec can legitimately be delete-marked
now that it has been established that it points to a
clustered index record that exists in the read view. */
} else {
result_rec = rec;
ut_ad(!rec_get_deleted_flag(rec, comp));
}
/* We found a qualifying record 'result_rec'. At this point,

View file

@ -1938,7 +1938,8 @@ trx_undo_update_cleanup(
UT_LIST_ADD_FIRST(undo_list, rseg->update_undo_cached, undo);
} else {
ut_ad(undo->state == TRX_UNDO_TO_PURGE);
ut_ad(undo->state == TRX_UNDO_TO_PURGE
|| undo->state == TRX_UNDO_TO_FREE);
trx_undo_mem_free(undo);
}

View file

@ -290,7 +290,8 @@ ut_test_malloc(
#endif /* !UNIV_HOTBACKUP */
/**********************************************************************//**
Frees a memory block allocated with ut_malloc. */
Frees a memory block allocated with ut_malloc. Freeing a NULL pointer is
a nop. */
UNIV_INTERN
void
ut_free(
@ -300,7 +301,9 @@ ut_free(
#ifndef UNIV_HOTBACKUP
ut_mem_block_t* block;
if (UNIV_LIKELY(srv_use_sys_malloc)) {
if (ptr == NULL) {
return;
} else if (UNIV_LIKELY(srv_use_sys_malloc)) {
free(ptr);
return;
}