mirror of
https://github.com/MariaDB/server.git
synced 2025-01-17 12:32:27 +01:00
auto-merge
This commit is contained in:
commit
3449f393e0
41 changed files with 763 additions and 127 deletions
|
@ -1165,6 +1165,7 @@ void free_used_memory()
|
|||
mysql_server_end();
|
||||
|
||||
/* Don't use DBUG after mysql_server_end() */
|
||||
DBUG_VIOLATION_HELPER_LEAVE;
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -2487,7 +2488,7 @@ void do_source(struct st_command *command)
|
|||
}
|
||||
|
||||
dynstr_free(&ds_filename);
|
||||
return;
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
|
||||
|
@ -7507,6 +7508,8 @@ static void init_signal_handling(void)
|
|||
#endif
|
||||
sigaction(SIGILL, &sa, NULL);
|
||||
sigaction(SIGFPE, &sa, NULL);
|
||||
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
#endif /* !__WIN__ */
|
||||
|
@ -8121,6 +8124,8 @@ void do_get_replace_column(struct st_command *command)
|
|||
}
|
||||
my_free(start, MYF(0));
|
||||
command->last_argument= command->end;
|
||||
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -16,6 +16,29 @@
|
|||
#ifndef _dbug_h
|
||||
#define _dbug_h
|
||||
|
||||
#if defined(__cplusplus) && !defined(DBUG_OFF)
|
||||
class Dbug_violation_helper
|
||||
{
|
||||
public:
|
||||
inline Dbug_violation_helper() :
|
||||
_entered(TRUE)
|
||||
{ }
|
||||
|
||||
inline ~Dbug_violation_helper()
|
||||
{
|
||||
assert(!_entered);
|
||||
}
|
||||
|
||||
inline void leave()
|
||||
{
|
||||
_entered= FALSE;
|
||||
}
|
||||
|
||||
private:
|
||||
bool _entered;
|
||||
};
|
||||
#endif /* C++ */
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
@ -47,11 +70,31 @@ extern void _db_lock_file_(void);
|
|||
extern void _db_unlock_file_(void);
|
||||
extern FILE *_db_fp_(void);
|
||||
|
||||
#define DBUG_ENTER(a) const char *_db_func_, *_db_file_; uint _db_level_; \
|
||||
char **_db_framep_; \
|
||||
_db_enter_ (a,__FILE__,__LINE__,&_db_func_,&_db_file_,&_db_level_, \
|
||||
&_db_framep_)
|
||||
#ifdef __cplusplus
|
||||
|
||||
#define DBUG_ENTER(a) \
|
||||
const char *_db_func_, *_db_file_; \
|
||||
uint _db_level_; \
|
||||
char **_db_framep_; \
|
||||
Dbug_violation_helper dbug_violation_helper; \
|
||||
_db_enter_ (a, __FILE__, __LINE__, &_db_func_, &_db_file_, \
|
||||
&_db_level_, &_db_framep_)
|
||||
#define DBUG_VIOLATION_HELPER_LEAVE dbug_violation_helper.leave()
|
||||
|
||||
#else /* C */
|
||||
|
||||
#define DBUG_ENTER(a) \
|
||||
const char *_db_func_, *_db_file_; \
|
||||
uint _db_level_; \
|
||||
char **_db_framep_; \
|
||||
_db_enter_ (a, __FILE__, __LINE__, &_db_func_, &_db_file_, \
|
||||
&_db_level_, &_db_framep_)
|
||||
#define DBUG_VIOLATION_HELPER_LEAVE do { } while(0)
|
||||
|
||||
#endif /* C++ */
|
||||
|
||||
#define DBUG_LEAVE \
|
||||
DBUG_VIOLATION_HELPER_LEAVE; \
|
||||
_db_return_ (__LINE__, &_db_func_, &_db_file_, &_db_level_)
|
||||
#define DBUG_RETURN(a1) do {DBUG_LEAVE; return(a1);} while(0)
|
||||
#define DBUG_VOID_RETURN do {DBUG_LEAVE; return;} while(0)
|
||||
|
@ -85,6 +128,7 @@ extern FILE *_db_fp_(void);
|
|||
|
||||
#define DBUG_ENTER(a1)
|
||||
#define DBUG_LEAVE
|
||||
#define DBUG_VIOLATION_HELPER_LEAVE
|
||||
#define DBUG_RETURN(a1) do { return(a1); } while(0)
|
||||
#define DBUG_VOID_RETURN do { return; } while(0)
|
||||
#define DBUG_EXECUTE(keyword,a1) do { } while(0)
|
||||
|
|
|
@ -163,5 +163,81 @@ show create table t1;
|
|||
connection master;
|
||||
drop table t1;
|
||||
|
||||
# End cleanup
|
||||
#
|
||||
# BUG#45999 Row based replication fails when auto_increment field = 0.
|
||||
# Store engine of Slaves auto-generates new sequence numbers for
|
||||
# auto_increment fields if the values of them are 0. There is an inconsistency
|
||||
# between slave and master. When MODE_NO_AUTO_VALUE_ON_ZERO are masters treat
|
||||
#
|
||||
source include/master-slave-reset.inc;
|
||||
|
||||
connection master;
|
||||
--disable_warnings
|
||||
DROP TABLE IF EXISTS t1;
|
||||
DROP TABLE IF EXISTS t2;
|
||||
--enable_warnings
|
||||
|
||||
eval CREATE TABLE t1 (id INT NOT NULL AUTO_INCREMENT PRIMARY KEY) ENGINE=$engine_type;
|
||||
eval CREATE TABLE t2 (id INT NOT NULL AUTO_INCREMENT PRIMARY KEY) ENGINE=$engine_type2;
|
||||
SET SQL_MODE='';
|
||||
# Value of the id will be 1;
|
||||
INSERT INTO t1 VALUES(NULL);
|
||||
INSERT INTO t2 VALUES(NULL);
|
||||
SELECT * FROM t1;
|
||||
SELECT * FROM t2;
|
||||
# Value of the id will be 2;
|
||||
INSERT INTO t1 VALUES();
|
||||
INSERT INTO t2 VALUES();
|
||||
SELECT * FROM t1;
|
||||
SELECT * FROM t2;
|
||||
# Value of the id will be 3. The master treats 0 as NULL or empty because
|
||||
# NO_AUTO_VALUE_ON_ZERO is not assign to SQL_MODE.
|
||||
INSERT INTO t1 VALUES(0);
|
||||
INSERT INTO t2 VALUES(0);
|
||||
SELECT * FROM t1;
|
||||
SELECT * FROM t2;
|
||||
|
||||
SET SQL_MODE=NO_AUTO_VALUE_ON_ZERO;
|
||||
# Value of the id will be 0. The master does not treat 0 as NULL or empty
|
||||
# because NO_AUTO_VALUE_ON_ZERO has assigned to SQL_MODE.
|
||||
INSERT INTO t1 VALUES(0);
|
||||
INSERT INTO t2 VALUES(0);
|
||||
SELECT * FROM t1;
|
||||
SELECT * FROM t2;
|
||||
|
||||
INSERT INTO t1 VALUES(4);
|
||||
INSERT INTO t2 VALUES(4);
|
||||
FLUSH LOGS;
|
||||
sync_slave_with_master;
|
||||
|
||||
let $diff_table_1= master:test.t1;
|
||||
let $diff_table_2= slave:test.t1;
|
||||
source include/diff_tables.inc;
|
||||
|
||||
let $diff_table_1= master:test.t2;
|
||||
let $diff_table_2= slave:test.t2;
|
||||
source include/diff_tables.inc;
|
||||
|
||||
connection master;
|
||||
DROP TABLE t1;
|
||||
DROP TABLE t2;
|
||||
sync_slave_with_master;
|
||||
|
||||
connection master;
|
||||
let $MYSQLD_DATADIR= `SELECT @@DATADIR`;
|
||||
--exec $MYSQL_BINLOG $MYSQLD_DATADIR/master-bin.000001 | $MYSQL test
|
||||
sync_slave_with_master;
|
||||
|
||||
let $diff_table_1= master:test.t1;
|
||||
let $diff_table_2= slave:test.t1;
|
||||
source include/diff_tables.inc;
|
||||
|
||||
let $diff_table_1= master:test.t2;
|
||||
let $diff_table_2= slave:test.t2;
|
||||
source include/diff_tables.inc;
|
||||
|
||||
# End cleanup
|
||||
DROP TABLE t1;
|
||||
DROP TABLE t2;
|
||||
SET SQL_MODE='';
|
||||
sync_slave_with_master;
|
||||
|
|
|
@ -12695,3 +12695,25 @@ a b
|
|||
1 NULL
|
||||
2 NULL
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1(a INT, b BLOB) ENGINE=archive;
|
||||
SELECT DATA_LENGTH, AVG_ROW_LENGTH FROM
|
||||
INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME='t1' AND TABLE_SCHEMA='test';
|
||||
DATA_LENGTH AVG_ROW_LENGTH
|
||||
8666 15
|
||||
INSERT INTO t1 VALUES(1, 'sampleblob1'),(2, 'sampleblob2');
|
||||
SELECT DATA_LENGTH, AVG_ROW_LENGTH FROM
|
||||
INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME='t1' AND TABLE_SCHEMA='test';
|
||||
DATA_LENGTH AVG_ROW_LENGTH
|
||||
8700 4350
|
||||
DROP TABLE t1;
|
||||
SET @save_join_buffer_size= @@join_buffer_size;
|
||||
SET @@join_buffer_size= 8228;
|
||||
CREATE TABLE t1(a CHAR(255)) ENGINE=archive;
|
||||
INSERT INTO t1 VALUES('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'),
|
||||
('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'),
|
||||
('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa');
|
||||
SELECT COUNT(t1.a) FROM t1, t1 a, t1 b, t1 c, t1 d, t1 e;
|
||||
COUNT(t1.a)
|
||||
729
|
||||
DROP TABLE t1;
|
||||
SET @@join_buffer_size= @save_join_buffer_size;
|
||||
|
|
|
@ -763,4 +763,34 @@ a b d c
|
|||
1 2 0 2
|
||||
1 2 0 3
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# Bug #46159: simple query that never returns
|
||||
#
|
||||
SET @old_max_heap_table_size = @@max_heap_table_size;
|
||||
SET @@max_heap_table_size = 16384;
|
||||
SET @old_sort_buffer_size = @@sort_buffer_size;
|
||||
SET @@sort_buffer_size = 32804;
|
||||
CREATE TABLE t1(c1 int, c2 VARCHAR(20));
|
||||
INSERT INTO t1 VALUES (1, '1'), (1, '1'), (2, '2'), (3, '1'), (3, '1'), (4, '4');
|
||||
INSERT INTO t1 SELECT 5 + 10000 * RAND(), '5' FROM t1;
|
||||
INSERT INTO t1 SELECT 5 + 10000 * RAND(), '5' FROM t1;
|
||||
INSERT INTO t1 SELECT 5 + 10000 * RAND(), '5' FROM t1;
|
||||
INSERT INTO t1 SELECT 5 + 10000 * RAND(), '5' FROM t1;
|
||||
INSERT INTO t1 SELECT 5 + 10000 * RAND(), '5' FROM t1;
|
||||
INSERT INTO t1 SELECT 5 + 10000 * RAND(), '5' FROM t1;
|
||||
INSERT INTO t1 SELECT 5 + 10000 * RAND(), '5' FROM t1;
|
||||
INSERT INTO t1 SELECT 5 + 10000 * RAND(), '5' FROM t1;
|
||||
SELECT c1, c2, COUNT(*) FROM t1 GROUP BY c1 LIMIT 4;
|
||||
c1 c2 COUNT(*)
|
||||
1 1 2
|
||||
2 2 1
|
||||
3 1 2
|
||||
4 4 1
|
||||
SELECT DISTINCT c2 FROM t1 GROUP BY c1 HAVING COUNT(*) > 1;
|
||||
c2
|
||||
1
|
||||
5
|
||||
DROP TABLE t1;
|
||||
SET @@sort_buffer_size = @old_sort_buffer_size;
|
||||
SET @@max_heap_table_size = @old_max_heap_table_size;
|
||||
End of 5.1 tests
|
||||
|
|
|
@ -2534,6 +2534,15 @@ SELECT LOAD_FILE(a) FROM t1;
|
|||
LOAD_FILE(a)
|
||||
NULL
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1 (f2 VARCHAR(20));
|
||||
CREATE TABLE t2 (f2 VARCHAR(20));
|
||||
INSERT INTO t1 VALUES ('MIN'),('MAX');
|
||||
INSERT INTO t2 VALUES ('LOAD');
|
||||
SELECT CONCAT_WS('_', (SELECT t2.f2 FROM t2), t1.f2) AS concat_name FROM t1;
|
||||
concat_name
|
||||
LOAD_MIN
|
||||
LOAD_MAX
|
||||
DROP TABLE t1, t2;
|
||||
End of 5.0 tests
|
||||
drop table if exists t1;
|
||||
create table t1(f1 tinyint default null)engine=myisam;
|
||||
|
|
6
mysql-test/r/lowercase_mixed_tmpdir_innodb.result
Executable file
6
mysql-test/r/lowercase_mixed_tmpdir_innodb.result
Executable file
|
@ -0,0 +1,6 @@
|
|||
drop table if exists t1;
|
||||
create table t1 (id int) engine=InnoDB;
|
||||
insert into t1 values (1);
|
||||
create temporary table t2 engine=InnoDB select * from t1;
|
||||
drop temporary table t2;
|
||||
drop table t1;
|
|
@ -270,3 +270,7 @@ SUBPARTITION BY KEY (char_column)
|
|||
SUBPARTITIONS 2
|
||||
(PARTITION p1 VALUES LESS THAN (5) ENGINE = MyISAM) */
|
||||
drop table t1;
|
||||
CREATE TABLE t1 (a INT) ENGINE=InnoDB
|
||||
PARTITION BY list(a) (PARTITION p1 VALUES IN (1));
|
||||
CREATE INDEX i1 ON t1 (a);
|
||||
DROP TABLE t1;
|
||||
|
|
30
mysql-test/r/subselect4.result
Normal file
30
mysql-test/r/subselect4.result
Normal file
|
@ -0,0 +1,30 @@
|
|||
#
|
||||
# Bug #46791: Assertion failed:(table->key_read==0),function unknown
|
||||
# function,file sql_base.cc
|
||||
#
|
||||
CREATE TABLE t1 (a INT, b INT, KEY(a));
|
||||
INSERT INTO t1 VALUES (1,1),(2,2);
|
||||
CREATE TABLE t2 LIKE t1;
|
||||
INSERT INTO t2 VALUES (1,1),(2,2);
|
||||
CREATE TABLE t3 LIKE t1;
|
||||
# should have 1 impossible where and 2 dependent subqueries
|
||||
EXPLAIN
|
||||
SELECT 1 FROM t1
|
||||
WHERE NOT EXISTS (SELECT 1 FROM t2 WHERE 1 = (SELECT MIN(t2.b) FROM t3))
|
||||
ORDER BY count(*);
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 PRIMARY t1 index NULL a 5 NULL 2 Using index; Using temporary
|
||||
2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where
|
||||
3 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL no matching row in const table
|
||||
# should not crash the next statement
|
||||
SELECT 1 FROM t1
|
||||
WHERE NOT EXISTS (SELECT 1 FROM t2 WHERE 1 = (SELECT MIN(t2.b) FROM t3))
|
||||
ORDER BY count(*);
|
||||
1
|
||||
1
|
||||
# should not crash: the crash is caused by the previous statement
|
||||
SELECT 1;
|
||||
1
|
||||
1
|
||||
DROP TABLE t1,t2,t3;
|
||||
End of 5.0 tests.
|
|
@ -313,4 +313,9 @@ ERROR 22001: Data too long for column 'c_tinytext' at row 1
|
|||
insert into t2 values(@q);
|
||||
ERROR 22001: Data too long for column 'c_tinyblob' at row 1
|
||||
drop table t1, t2;
|
||||
DROP TABLE t1;
|
||||
ERROR 42S02: Unknown table 't1'
|
||||
SHOW ERRORS;
|
||||
Level Code Message
|
||||
Error 1051 Unknown table 't1'
|
||||
End of 5.0 tests
|
||||
|
|
|
@ -244,3 +244,71 @@ t1 CREATE TABLE `t1` (
|
|||
PRIMARY KEY (`id`)
|
||||
) ENGINE=InnoDB AUTO_INCREMENT=4 DEFAULT CHARSET=latin1
|
||||
drop table t1;
|
||||
stop slave;
|
||||
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
|
||||
reset master;
|
||||
reset slave;
|
||||
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
|
||||
start slave;
|
||||
DROP TABLE IF EXISTS t1;
|
||||
DROP TABLE IF EXISTS t2;
|
||||
CREATE TABLE t1 (id INT NOT NULL AUTO_INCREMENT PRIMARY KEY) ENGINE=innodb;
|
||||
CREATE TABLE t2 (id INT NOT NULL AUTO_INCREMENT PRIMARY KEY) ENGINE=myisam;
|
||||
SET SQL_MODE='';
|
||||
INSERT INTO t1 VALUES(NULL);
|
||||
INSERT INTO t2 VALUES(NULL);
|
||||
SELECT * FROM t1;
|
||||
id
|
||||
1
|
||||
SELECT * FROM t2;
|
||||
id
|
||||
1
|
||||
INSERT INTO t1 VALUES();
|
||||
INSERT INTO t2 VALUES();
|
||||
SELECT * FROM t1;
|
||||
id
|
||||
1
|
||||
2
|
||||
SELECT * FROM t2;
|
||||
id
|
||||
1
|
||||
2
|
||||
INSERT INTO t1 VALUES(0);
|
||||
INSERT INTO t2 VALUES(0);
|
||||
SELECT * FROM t1;
|
||||
id
|
||||
1
|
||||
2
|
||||
3
|
||||
SELECT * FROM t2;
|
||||
id
|
||||
1
|
||||
2
|
||||
3
|
||||
SET SQL_MODE=NO_AUTO_VALUE_ON_ZERO;
|
||||
INSERT INTO t1 VALUES(0);
|
||||
INSERT INTO t2 VALUES(0);
|
||||
SELECT * FROM t1;
|
||||
id
|
||||
0
|
||||
1
|
||||
2
|
||||
3
|
||||
SELECT * FROM t2;
|
||||
id
|
||||
0
|
||||
1
|
||||
2
|
||||
3
|
||||
INSERT INTO t1 VALUES(4);
|
||||
INSERT INTO t2 VALUES(4);
|
||||
FLUSH LOGS;
|
||||
Comparing tables master:test.t1 and slave:test.t1
|
||||
Comparing tables master:test.t2 and slave:test.t2
|
||||
DROP TABLE t1;
|
||||
DROP TABLE t2;
|
||||
Comparing tables master:test.t1 and slave:test.t1
|
||||
Comparing tables master:test.t2 and slave:test.t2
|
||||
DROP TABLE t1;
|
||||
DROP TABLE t2;
|
||||
SET SQL_MODE='';
|
||||
|
|
|
@ -129,6 +129,9 @@ CREATE DATABASE bug42217_db;
|
|||
GRANT CREATE ROUTINE ON bug42217_db.* TO 'create_rout_db'@'localhost'
|
||||
IDENTIFIED BY 'create_rout_db' WITH GRANT OPTION;
|
||||
|
||||
-- sync_slave_with_master
|
||||
-- connection master
|
||||
|
||||
connect (create_rout_db_master, localhost, create_rout_db, create_rout_db, bug42217_db,$MASTER_MYPORT,);
|
||||
connect (create_rout_db_slave, localhost, create_rout_db, create_rout_db, bug42217_db, $SLAVE_MYPORT,);
|
||||
|
||||
|
|
|
@ -23,6 +23,8 @@ disconnect con_temp;
|
|||
--source include/wait_until_disconnected.inc
|
||||
|
||||
connection master;
|
||||
-- let $wait_binlog_event= DROP
|
||||
-- source include/wait_for_binlog_event.inc
|
||||
sync_slave_with_master;
|
||||
|
||||
connection slave;
|
||||
|
|
|
@ -1599,3 +1599,27 @@ INSERT INTO t1 VALUES (NULL, NULL),(NULL, NULL);
|
|||
FLUSH TABLE t1;
|
||||
SELECT * FROM t1 ORDER BY a;
|
||||
DROP TABLE t1;
|
||||
|
||||
#
|
||||
# BUG#29203 - archive tables have weird values in show table status
|
||||
#
|
||||
CREATE TABLE t1(a INT, b BLOB) ENGINE=archive;
|
||||
SELECT DATA_LENGTH, AVG_ROW_LENGTH FROM
|
||||
INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME='t1' AND TABLE_SCHEMA='test';
|
||||
INSERT INTO t1 VALUES(1, 'sampleblob1'),(2, 'sampleblob2');
|
||||
SELECT DATA_LENGTH, AVG_ROW_LENGTH FROM
|
||||
INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME='t1' AND TABLE_SCHEMA='test';
|
||||
DROP TABLE t1;
|
||||
|
||||
#
|
||||
# BUG#46961 - archive engine loses rows during self joining select!
|
||||
#
|
||||
SET @save_join_buffer_size= @@join_buffer_size;
|
||||
SET @@join_buffer_size= 8228;
|
||||
CREATE TABLE t1(a CHAR(255)) ENGINE=archive;
|
||||
INSERT INTO t1 VALUES('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'),
|
||||
('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'),
|
||||
('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa');
|
||||
SELECT COUNT(t1.a) FROM t1, t1 a, t1 b, t1 c, t1 d, t1 e;
|
||||
DROP TABLE t1;
|
||||
SET @@join_buffer_size= @save_join_buffer_size;
|
||||
|
|
|
@ -573,4 +573,44 @@ SELECT DISTINCT a, b, d, c FROM t1;
|
|||
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo #
|
||||
--echo # Bug #46159: simple query that never returns
|
||||
--echo #
|
||||
|
||||
# Set max_heap_table_size to the minimum value so that GROUP BY table in the
|
||||
# SELECT query below gets converted to MyISAM
|
||||
SET @old_max_heap_table_size = @@max_heap_table_size;
|
||||
SET @@max_heap_table_size = 16384;
|
||||
|
||||
# Set sort_buffer_size to the mininum value so that remove_duplicates() calls
|
||||
# remove_dup_with_compare()
|
||||
SET @old_sort_buffer_size = @@sort_buffer_size;
|
||||
SET @@sort_buffer_size = 32804;
|
||||
|
||||
CREATE TABLE t1(c1 int, c2 VARCHAR(20));
|
||||
INSERT INTO t1 VALUES (1, '1'), (1, '1'), (2, '2'), (3, '1'), (3, '1'), (4, '4');
|
||||
# Now we just need to pad the table with random data so we have enough unique
|
||||
# values to force conversion of the GROUP BY table to MyISAM
|
||||
INSERT INTO t1 SELECT 5 + 10000 * RAND(), '5' FROM t1;
|
||||
INSERT INTO t1 SELECT 5 + 10000 * RAND(), '5' FROM t1;
|
||||
INSERT INTO t1 SELECT 5 + 10000 * RAND(), '5' FROM t1;
|
||||
INSERT INTO t1 SELECT 5 + 10000 * RAND(), '5' FROM t1;
|
||||
INSERT INTO t1 SELECT 5 + 10000 * RAND(), '5' FROM t1;
|
||||
INSERT INTO t1 SELECT 5 + 10000 * RAND(), '5' FROM t1;
|
||||
INSERT INTO t1 SELECT 5 + 10000 * RAND(), '5' FROM t1;
|
||||
INSERT INTO t1 SELECT 5 + 10000 * RAND(), '5' FROM t1;
|
||||
|
||||
# First rows of the GROUP BY table that will be processed by
|
||||
# remove_dup_with_compare()
|
||||
SELECT c1, c2, COUNT(*) FROM t1 GROUP BY c1 LIMIT 4;
|
||||
|
||||
# The actual test case
|
||||
SELECT DISTINCT c2 FROM t1 GROUP BY c1 HAVING COUNT(*) > 1;
|
||||
|
||||
# Cleanup
|
||||
|
||||
DROP TABLE t1;
|
||||
SET @@sort_buffer_size = @old_sort_buffer_size;
|
||||
SET @@max_heap_table_size = @old_max_heap_table_size;
|
||||
|
||||
--echo End of 5.1 tests
|
||||
|
|
|
@ -1291,6 +1291,19 @@ INSERT INTO t1 VALUES ('aaaaaaaa');
|
|||
SELECT LOAD_FILE(a) FROM t1;
|
||||
DROP TABLE t1;
|
||||
|
||||
#
|
||||
# Bug#46815 CONCAT_WS returning wrong data
|
||||
#
|
||||
CREATE TABLE t1 (f2 VARCHAR(20));
|
||||
CREATE TABLE t2 (f2 VARCHAR(20));
|
||||
|
||||
INSERT INTO t1 VALUES ('MIN'),('MAX');
|
||||
INSERT INTO t2 VALUES ('LOAD');
|
||||
|
||||
SELECT CONCAT_WS('_', (SELECT t2.f2 FROM t2), t1.f2) AS concat_name FROM t1;
|
||||
|
||||
DROP TABLE t1, t2;
|
||||
|
||||
|
||||
--echo End of 5.0 tests
|
||||
|
||||
|
|
2
mysql-test/t/lowercase_mixed_tmpdir_innodb-master.opt
Normal file
2
mysql-test/t/lowercase_mixed_tmpdir_innodb-master.opt
Normal file
|
@ -0,0 +1,2 @@
|
|||
--lower-case-table-names=2
|
||||
--tmpdir=$MYSQLTEST_VARDIR/tmp/MixedCase
|
6
mysql-test/t/lowercase_mixed_tmpdir_innodb-master.sh
Normal file
6
mysql-test/t/lowercase_mixed_tmpdir_innodb-master.sh
Normal file
|
@ -0,0 +1,6 @@
|
|||
# This test requires a non-lowercase tmpdir directory on a case-sensitive
|
||||
# filesystem.
|
||||
|
||||
d="$MYSQLTEST_VARDIR/tmp/MixedCase"
|
||||
test -d "$d" || mkdir "$d"
|
||||
rm -f "$d"/*
|
12
mysql-test/t/lowercase_mixed_tmpdir_innodb.test
Normal file
12
mysql-test/t/lowercase_mixed_tmpdir_innodb.test
Normal file
|
@ -0,0 +1,12 @@
|
|||
--source include/have_lowercase2.inc
|
||||
--source include/have_innodb.inc
|
||||
|
||||
--disable_warnings
|
||||
drop table if exists t1;
|
||||
--enable_warnings
|
||||
|
||||
create table t1 (id int) engine=InnoDB;
|
||||
insert into t1 values (1);
|
||||
create temporary table t2 engine=InnoDB select * from t1;
|
||||
drop temporary table t2;
|
||||
drop table t1;
|
|
@ -287,3 +287,15 @@ PARTITION BY RANGE (int_column)
|
|||
(PARTITION p1 VALUES LESS THAN (5));
|
||||
show create table t1;
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# BUG#46483 - drop table of partitioned table may leave extraneous file
|
||||
# Note: was only repeatable with InnoDB plugin
|
||||
#
|
||||
CREATE TABLE t1 (a INT) ENGINE=InnoDB
|
||||
PARTITION BY list(a) (PARTITION p1 VALUES IN (1));
|
||||
CREATE INDEX i1 ON t1 (a);
|
||||
DROP TABLE t1;
|
||||
let $MYSQLD_DATADIR= `SELECT @@datadir`;
|
||||
# Before the fix it should show extra file like #sql-2405_2.par
|
||||
--list_files $MYSQLD_DATADIR/test/ *
|
||||
|
|
32
mysql-test/t/subselect4.test
Normal file
32
mysql-test/t/subselect4.test
Normal file
|
@ -0,0 +1,32 @@
|
|||
# General purpose bug fix tests go here : subselect.test too large
|
||||
|
||||
|
||||
--echo #
|
||||
--echo # Bug #46791: Assertion failed:(table->key_read==0),function unknown
|
||||
--echo # function,file sql_base.cc
|
||||
--echo #
|
||||
|
||||
CREATE TABLE t1 (a INT, b INT, KEY(a));
|
||||
INSERT INTO t1 VALUES (1,1),(2,2);
|
||||
CREATE TABLE t2 LIKE t1;
|
||||
INSERT INTO t2 VALUES (1,1),(2,2);
|
||||
CREATE TABLE t3 LIKE t1;
|
||||
|
||||
--echo # should have 1 impossible where and 2 dependent subqueries
|
||||
EXPLAIN
|
||||
SELECT 1 FROM t1
|
||||
WHERE NOT EXISTS (SELECT 1 FROM t2 WHERE 1 = (SELECT MIN(t2.b) FROM t3))
|
||||
ORDER BY count(*);
|
||||
|
||||
--echo # should not crash the next statement
|
||||
SELECT 1 FROM t1
|
||||
WHERE NOT EXISTS (SELECT 1 FROM t2 WHERE 1 = (SELECT MIN(t2.b) FROM t3))
|
||||
ORDER BY count(*);
|
||||
|
||||
--echo # should not crash: the crash is caused by the previous statement
|
||||
SELECT 1;
|
||||
|
||||
DROP TABLE t1,t2,t3;
|
||||
|
||||
|
||||
--echo End of 5.0 tests.
|
|
@ -225,4 +225,11 @@ insert into t2 values(@q);
|
|||
|
||||
drop table t1, t2;
|
||||
|
||||
#
|
||||
# Bug#42364 SHOW ERRORS returns empty resultset after dropping non existent table
|
||||
#
|
||||
--error ER_BAD_TABLE_ERROR
|
||||
DROP TABLE t1;
|
||||
SHOW ERRORS;
|
||||
|
||||
--echo End of 5.0 tests
|
||||
|
|
|
@ -239,6 +239,7 @@ void ha_partition::init_handler_variables()
|
|||
m_curr_key_info[0]= NULL;
|
||||
m_curr_key_info[1]= NULL;
|
||||
is_clone= FALSE,
|
||||
m_part_func_monotonicity_info= NON_MONOTONIC;
|
||||
auto_increment_lock= FALSE;
|
||||
auto_increment_safe_stmt_log_lock= FALSE;
|
||||
/*
|
||||
|
@ -2465,11 +2466,18 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
|
|||
}
|
||||
}
|
||||
|
||||
/* Initialize the bitmap we use to minimize ha_start_bulk_insert calls */
|
||||
if (bitmap_init(&m_bulk_insert_started, NULL, m_tot_parts + 1, FALSE))
|
||||
DBUG_RETURN(1);
|
||||
bitmap_clear_all(&m_bulk_insert_started);
|
||||
/* Initialize the bitmap we use to determine what partitions are used */
|
||||
if (!is_clone)
|
||||
{
|
||||
if (bitmap_init(&(m_part_info->used_partitions), NULL, m_tot_parts, TRUE))
|
||||
{
|
||||
bitmap_free(&m_bulk_insert_started);
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
bitmap_set_all(&(m_part_info->used_partitions));
|
||||
}
|
||||
|
||||
|
@ -2553,12 +2561,18 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
|
|||
calling open on all individual handlers.
|
||||
*/
|
||||
m_handler_status= handler_opened;
|
||||
if (m_part_info->part_expr)
|
||||
m_part_func_monotonicity_info=
|
||||
m_part_info->part_expr->get_monotonicity_info();
|
||||
else if (m_part_info->list_of_part_fields)
|
||||
m_part_func_monotonicity_info= MONOTONIC_STRICT_INCREASING;
|
||||
info(HA_STATUS_VARIABLE | HA_STATUS_CONST);
|
||||
DBUG_RETURN(0);
|
||||
|
||||
err_handler:
|
||||
while (file-- != m_file)
|
||||
(*file)->close();
|
||||
bitmap_free(&m_bulk_insert_started);
|
||||
if (!is_clone)
|
||||
bitmap_free(&(m_part_info->used_partitions));
|
||||
|
||||
|
@ -2606,6 +2620,7 @@ int ha_partition::close(void)
|
|||
|
||||
DBUG_ASSERT(table->s == table_share);
|
||||
delete_queue(&m_queue);
|
||||
bitmap_free(&m_bulk_insert_started);
|
||||
if (!is_clone)
|
||||
bitmap_free(&(m_part_info->used_partitions));
|
||||
file= m_file;
|
||||
|
@ -3022,6 +3037,8 @@ int ha_partition::write_row(uchar * buf)
|
|||
}
|
||||
m_last_part= part_id;
|
||||
DBUG_PRINT("info", ("Insert in partition %d", part_id));
|
||||
start_part_bulk_insert(part_id);
|
||||
|
||||
tmp_disable_binlog(thd); /* Do not replicate the low-level changes. */
|
||||
error= m_file[part_id]->ha_write_row(buf);
|
||||
if (have_auto_increment && !table->s->next_number_keypart)
|
||||
|
@ -3084,6 +3101,7 @@ int ha_partition::update_row(const uchar *old_data, uchar *new_data)
|
|||
}
|
||||
|
||||
m_last_part= new_part_id;
|
||||
start_part_bulk_insert(new_part_id);
|
||||
if (new_part_id == old_part_id)
|
||||
{
|
||||
DBUG_PRINT("info", ("Update in partition %d", new_part_id));
|
||||
|
@ -3248,22 +3266,65 @@ int ha_partition::delete_all_rows()
|
|||
DESCRIPTION
|
||||
rows == 0 means we will probably insert many rows
|
||||
*/
|
||||
|
||||
void ha_partition::start_bulk_insert(ha_rows rows)
|
||||
{
|
||||
handler **file;
|
||||
DBUG_ENTER("ha_partition::start_bulk_insert");
|
||||
|
||||
rows= rows ? rows/m_tot_parts + 1 : 0;
|
||||
file= m_file;
|
||||
do
|
||||
{
|
||||
(*file)->ha_start_bulk_insert(rows);
|
||||
} while (*(++file));
|
||||
m_bulk_inserted_rows= 0;
|
||||
bitmap_clear_all(&m_bulk_insert_started);
|
||||
/* use the last bit for marking if bulk_insert_started was called */
|
||||
bitmap_set_bit(&m_bulk_insert_started, m_tot_parts);
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Check if start_bulk_insert has been called for this partition,
|
||||
if not, call it and mark it called
|
||||
*/
|
||||
void ha_partition::start_part_bulk_insert(uint part_id)
|
||||
{
|
||||
if (!bitmap_is_set(&m_bulk_insert_started, part_id) &&
|
||||
bitmap_is_set(&m_bulk_insert_started, m_tot_parts))
|
||||
{
|
||||
m_file[part_id]->ha_start_bulk_insert(guess_bulk_insert_rows());
|
||||
bitmap_set_bit(&m_bulk_insert_started, part_id);
|
||||
}
|
||||
m_bulk_inserted_rows++;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Try to predict the number of inserts into this partition.
|
||||
|
||||
If less than 10 rows (including 0 which means Unknown)
|
||||
just give that as a guess
|
||||
If monotonic partitioning function was used
|
||||
guess that 50 % of the inserts goes to the first partition
|
||||
For all other cases, guess on equal distribution between the partitions
|
||||
*/
|
||||
ha_rows ha_partition::guess_bulk_insert_rows()
|
||||
{
|
||||
DBUG_ENTER("guess_bulk_insert_rows");
|
||||
|
||||
if (estimation_rows_to_insert < 10)
|
||||
DBUG_RETURN(estimation_rows_to_insert);
|
||||
|
||||
/* If first insert/partition and monotonic partition function, guess 50%. */
|
||||
if (!m_bulk_inserted_rows &&
|
||||
m_part_func_monotonicity_info != NON_MONOTONIC &&
|
||||
m_tot_parts > 1)
|
||||
DBUG_RETURN(estimation_rows_to_insert / 2);
|
||||
|
||||
/* Else guess on equal distribution (+1 is to avoid returning 0/Unknown) */
|
||||
if (m_bulk_inserted_rows < estimation_rows_to_insert)
|
||||
DBUG_RETURN(((estimation_rows_to_insert - m_bulk_inserted_rows)
|
||||
/ m_tot_parts) + 1);
|
||||
/* The estimation was wrong, must say 'Unknown' */
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Finish a large batch of insert rows
|
||||
|
||||
|
@ -3273,21 +3334,29 @@ void ha_partition::start_bulk_insert(ha_rows rows)
|
|||
RETURN VALUE
|
||||
>0 Error code
|
||||
0 Success
|
||||
|
||||
Note: end_bulk_insert can be called without start_bulk_insert
|
||||
being called, see bug¤44108.
|
||||
|
||||
*/
|
||||
|
||||
int ha_partition::end_bulk_insert()
|
||||
{
|
||||
int error= 0;
|
||||
handler **file;
|
||||
uint i;
|
||||
DBUG_ENTER("ha_partition::end_bulk_insert");
|
||||
|
||||
file= m_file;
|
||||
do
|
||||
if (!bitmap_is_set(&m_bulk_insert_started, m_tot_parts))
|
||||
DBUG_RETURN(error);
|
||||
|
||||
for (i= 0; i < m_tot_parts; i++)
|
||||
{
|
||||
int tmp;
|
||||
if ((tmp= (*file)->ha_end_bulk_insert()))
|
||||
if (bitmap_is_set(&m_bulk_insert_started, i) &&
|
||||
(tmp= m_file[i]->ha_end_bulk_insert()))
|
||||
error= tmp;
|
||||
} while (*(++file));
|
||||
}
|
||||
bitmap_clear_all(&m_bulk_insert_started);
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
|
||||
|
|
|
@ -176,6 +176,11 @@ private:
|
|||
This to ensure it will work with statement based replication.
|
||||
*/
|
||||
bool auto_increment_safe_stmt_log_lock;
|
||||
/** For optimizing ha_start_bulk_insert calls */
|
||||
MY_BITMAP m_bulk_insert_started;
|
||||
ha_rows m_bulk_inserted_rows;
|
||||
/** used for prediction of start_bulk_insert rows */
|
||||
enum_monotonicity_info m_part_func_monotonicity_info;
|
||||
public:
|
||||
handler *clone(MEM_ROOT *mem_root);
|
||||
virtual void set_part_info(partition_info *part_info)
|
||||
|
@ -353,7 +358,6 @@ public:
|
|||
Bulk inserts are supported if all underlying handlers support it.
|
||||
start_bulk_insert and end_bulk_insert is called before and after a
|
||||
number of calls to write_row.
|
||||
Not yet though.
|
||||
*/
|
||||
virtual int write_row(uchar * buf);
|
||||
virtual int update_row(const uchar * old_data, uchar * new_data);
|
||||
|
@ -361,6 +365,10 @@ public:
|
|||
virtual int delete_all_rows(void);
|
||||
virtual void start_bulk_insert(ha_rows rows);
|
||||
virtual int end_bulk_insert();
|
||||
private:
|
||||
ha_rows guess_bulk_insert_rows();
|
||||
void start_part_bulk_insert(uint part_id);
|
||||
public:
|
||||
|
||||
virtual bool is_fatal_error(int error, uint flags)
|
||||
{
|
||||
|
|
|
@ -1885,12 +1885,42 @@ bool ha_flush_logs(handlerton *db_type)
|
|||
return FALSE;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
@brief make canonical filename
|
||||
|
||||
@param[in] file table handler
|
||||
@param[in] path original path
|
||||
@param[out] tmp_path buffer for canonized path
|
||||
|
||||
@details Lower case db name and table name path parts for
|
||||
non file based tables when lower_case_table_names
|
||||
is 2 (store as is, compare in lower case).
|
||||
Filesystem path prefix (mysql_data_home or tmpdir)
|
||||
is left intact.
|
||||
|
||||
@note tmp_path may be left intact if no conversion was
|
||||
performed.
|
||||
|
||||
@retval canonized path
|
||||
|
||||
@todo This may be done more efficiently when table path
|
||||
gets built. Convert this function to something like
|
||||
ASSERT_CANONICAL_FILENAME.
|
||||
*/
|
||||
const char *get_canonical_filename(handler *file, const char *path,
|
||||
char *tmp_path)
|
||||
{
|
||||
uint i;
|
||||
if (lower_case_table_names != 2 || (file->ha_table_flags() & HA_FILE_BASED))
|
||||
return path;
|
||||
|
||||
for (i= 0; i <= mysql_tmpdir_list.max; i++)
|
||||
{
|
||||
if (is_prefix(path, mysql_tmpdir_list.list[i]))
|
||||
return path;
|
||||
}
|
||||
|
||||
/* Ensure that table handler get path in lower case */
|
||||
if (tmp_path != path)
|
||||
strmov(tmp_path, path);
|
||||
|
|
|
@ -631,6 +631,7 @@ String *Item_func_concat_ws::val_str(String *str)
|
|||
String tmp_sep_str(tmp_str_buff, sizeof(tmp_str_buff),default_charset_info),
|
||||
*sep_str, *res, *res2,*use_as_buff;
|
||||
uint i;
|
||||
bool is_const= 0;
|
||||
|
||||
null_value=0;
|
||||
if (!(sep_str= args[0]->val_str(&tmp_sep_str)))
|
||||
|
@ -644,7 +645,11 @@ String *Item_func_concat_ws::val_str(String *str)
|
|||
// If not, return the empty string
|
||||
for (i=1; i < arg_count; i++)
|
||||
if ((res= args[i]->val_str(str)))
|
||||
{
|
||||
is_const= args[i]->const_item() || !args[i]->used_tables();
|
||||
break;
|
||||
}
|
||||
|
||||
if (i == arg_count)
|
||||
return &my_empty_string;
|
||||
|
||||
|
@ -662,7 +667,7 @@ String *Item_func_concat_ws::val_str(String *str)
|
|||
current_thd->variables.max_allowed_packet);
|
||||
goto null;
|
||||
}
|
||||
if (res->alloced_length() >=
|
||||
if (!is_const && res->alloced_length() >=
|
||||
res->length() + sep_str->length() + res2->length())
|
||||
{ // Use old buffer
|
||||
res->append(*sep_str); // res->length() > 0 always
|
||||
|
|
|
@ -1024,14 +1024,10 @@ bool LOGGER::general_log_write(THD *thd, enum enum_server_command command,
|
|||
Log_event_handler **current_handler= general_log_handler_list;
|
||||
char user_host_buff[MAX_USER_HOST_SIZE + 1];
|
||||
Security_context *sctx= thd->security_ctx;
|
||||
ulong id;
|
||||
uint user_host_len= 0;
|
||||
time_t current_time;
|
||||
|
||||
if (thd)
|
||||
id= thd->thread_id; /* Normal thread */
|
||||
else
|
||||
id= 0; /* Log from connect handler */
|
||||
DBUG_ASSERT(thd);
|
||||
|
||||
lock_shared();
|
||||
if (!opt_log)
|
||||
|
@ -1050,7 +1046,7 @@ bool LOGGER::general_log_write(THD *thd, enum enum_server_command command,
|
|||
while (*current_handler)
|
||||
error|= (*current_handler++)->
|
||||
log_general(thd, current_time, user_host_buff,
|
||||
user_host_len, id,
|
||||
user_host_len, thd->thread_id,
|
||||
command_name[(uint) command].str,
|
||||
command_name[(uint) command].length,
|
||||
query, query_length,
|
||||
|
|
|
@ -8312,6 +8312,16 @@ Write_rows_log_event::do_before_row_operations(const Slave_reporting_capability
|
|||
|
||||
/* Honor next number column if present */
|
||||
m_table->next_number_field= m_table->found_next_number_field;
|
||||
/*
|
||||
* Fixed Bug#45999, In RBR, Store engine of Slave auto-generates new
|
||||
* sequence numbers for auto_increment fields if the values of them are 0.
|
||||
* If generateing a sequence number is decided by the values of
|
||||
* table->auto_increment_field_not_null and SQL_MODE(if includes
|
||||
* MODE_NO_AUTO_VALUE_ON_ZERO) in update_auto_increment function.
|
||||
* SQL_MODE of slave sql thread is always consistency with master's.
|
||||
* In RBR, auto_increment fields never are NULL.
|
||||
*/
|
||||
m_table->auto_increment_field_not_null= TRUE;
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -8321,6 +8331,7 @@ Write_rows_log_event::do_after_row_operations(const Slave_reporting_capability *
|
|||
{
|
||||
int local_error= 0;
|
||||
m_table->next_number_field=0;
|
||||
m_table->auto_increment_field_not_null= FALSE;
|
||||
if (bit_is_set(slave_exec_mode, SLAVE_EXEC_MODE_IDEMPOTENT) == 1 ||
|
||||
m_table->s->db_type()->db_type == DB_TYPE_NDBCLUSTER)
|
||||
{
|
||||
|
|
|
@ -4789,10 +4789,10 @@ static bool read_init_file(char *file_name)
|
|||
DBUG_ENTER("read_init_file");
|
||||
DBUG_PRINT("enter",("name: %s",file_name));
|
||||
if (!(file=my_fopen(file_name,O_RDONLY,MYF(MY_WME))))
|
||||
return(1);
|
||||
DBUG_RETURN(TRUE);
|
||||
bootstrap(file);
|
||||
(void) my_fclose(file,MYF(MY_WME));
|
||||
return 0;
|
||||
DBUG_RETURN(FALSE);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -350,6 +350,7 @@ Rpl_filter::add_do_db(const char* table_spec)
|
|||
DBUG_ENTER("Rpl_filter::add_do_db");
|
||||
i_string *db = new i_string(table_spec);
|
||||
do_db.push_back(db);
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
|
||||
|
@ -359,6 +360,7 @@ Rpl_filter::add_ignore_db(const char* table_spec)
|
|||
DBUG_ENTER("Rpl_filter::add_ignore_db");
|
||||
i_string *db = new i_string(table_spec);
|
||||
ignore_db.push_back(db);
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
extern "C" uchar *get_table_key(const uchar *, size_t *, my_bool);
|
||||
|
|
|
@ -1238,6 +1238,7 @@ void fix_slave_exec_mode(enum_var_type type)
|
|||
}
|
||||
if (bit_is_set(slave_exec_mode_options, SLAVE_EXEC_MODE_IDEMPOTENT) == 0)
|
||||
bit_do_set(slave_exec_mode_options, SLAVE_EXEC_MODE_STRICT);
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -399,6 +399,31 @@ char *thd_security_context(THD *thd, char *buffer, unsigned int length,
|
|||
return buffer;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
Implementation of Drop_table_error_handler::handle_error().
|
||||
The reason in having this implementation is to silence technical low-level
|
||||
warnings during DROP TABLE operation. Currently we don't want to expose
|
||||
the following warnings during DROP TABLE:
|
||||
- Some of table files are missed or invalid (the table is going to be
|
||||
deleted anyway, so why bother that something was missed);
|
||||
- A trigger associated with the table does not have DEFINER (One of the
|
||||
MySQL specifics now is that triggers are loaded for the table being
|
||||
dropped. So, we may have a warning that trigger does not have DEFINER
|
||||
attribute during DROP TABLE operation).
|
||||
|
||||
@return TRUE if the condition is handled.
|
||||
*/
|
||||
bool Drop_table_error_handler::handle_error(uint sql_errno,
|
||||
const char *message,
|
||||
MYSQL_ERROR::enum_warning_level level,
|
||||
THD *thd)
|
||||
{
|
||||
return ((sql_errno == EE_DELETE && my_errno == ENOENT) ||
|
||||
sql_errno == ER_TRG_NO_DEFINER);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
Clear this diagnostics area.
|
||||
|
||||
|
|
|
@ -1091,6 +1091,31 @@ public:
|
|||
};
|
||||
|
||||
|
||||
/**
|
||||
This class is an internal error handler implementation for
|
||||
DROP TABLE statements. The thing is that there may be warnings during
|
||||
execution of these statements, which should not be exposed to the user.
|
||||
This class is intended to silence such warnings.
|
||||
*/
|
||||
|
||||
class Drop_table_error_handler : public Internal_error_handler
|
||||
{
|
||||
public:
|
||||
Drop_table_error_handler(Internal_error_handler *err_handler)
|
||||
:m_err_handler(err_handler)
|
||||
{ }
|
||||
|
||||
public:
|
||||
bool handle_error(uint sql_errno,
|
||||
const char *message,
|
||||
MYSQL_ERROR::enum_warning_level level,
|
||||
THD *thd);
|
||||
|
||||
private:
|
||||
Internal_error_handler *m_err_handler;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
Stores status of the currently executed statement.
|
||||
Cleared at the beginning of the statement, and then
|
||||
|
|
|
@ -2274,44 +2274,9 @@ void kill_delayed_threads(void)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* Create a new delayed insert thread
|
||||
*/
|
||||
|
||||
pthread_handler_t handle_delayed_insert(void *arg)
|
||||
static void handle_delayed_insert_impl(THD *thd, Delayed_insert *di)
|
||||
{
|
||||
Delayed_insert *di=(Delayed_insert*) arg;
|
||||
THD *thd= &di->thd;
|
||||
|
||||
pthread_detach_this_thread();
|
||||
/* Add thread to THD list so that's it's visible in 'show processlist' */
|
||||
pthread_mutex_lock(&LOCK_thread_count);
|
||||
thd->thread_id= thd->variables.pseudo_thread_id= thread_id++;
|
||||
thd->set_current_time();
|
||||
threads.append(thd);
|
||||
thd->killed=abort_loop ? THD::KILL_CONNECTION : THD::NOT_KILLED;
|
||||
pthread_mutex_unlock(&LOCK_thread_count);
|
||||
|
||||
/*
|
||||
Wait until the client runs into pthread_cond_wait(),
|
||||
where we free it after the table is opened and di linked in the list.
|
||||
If we did not wait here, the client might detect the opened table
|
||||
before it is linked to the list. It would release LOCK_delayed_create
|
||||
and allow another thread to create another handler for the same table,
|
||||
since it does not find one in the list.
|
||||
*/
|
||||
pthread_mutex_lock(&di->mutex);
|
||||
#if !defined( __WIN__) /* Win32 calls this in pthread_create */
|
||||
if (my_thread_init())
|
||||
{
|
||||
/* Can't use my_error since store_globals has not yet been called */
|
||||
thd->main_da.set_error_status(thd, ER_OUT_OF_RESOURCES,
|
||||
ER(ER_OUT_OF_RESOURCES));
|
||||
goto end;
|
||||
}
|
||||
#endif
|
||||
|
||||
DBUG_ENTER("handle_delayed_insert");
|
||||
DBUG_ENTER("handle_delayed_insert_impl");
|
||||
thd->thread_stack= (char*) &thd;
|
||||
if (init_thr_lock() || thd->store_globals())
|
||||
{
|
||||
|
@ -2500,6 +2465,49 @@ err:
|
|||
*/
|
||||
ha_autocommit_or_rollback(thd, 1);
|
||||
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Create a new delayed insert thread
|
||||
*/
|
||||
|
||||
pthread_handler_t handle_delayed_insert(void *arg)
|
||||
{
|
||||
Delayed_insert *di=(Delayed_insert*) arg;
|
||||
THD *thd= &di->thd;
|
||||
|
||||
pthread_detach_this_thread();
|
||||
/* Add thread to THD list so that's it's visible in 'show processlist' */
|
||||
pthread_mutex_lock(&LOCK_thread_count);
|
||||
thd->thread_id= thd->variables.pseudo_thread_id= thread_id++;
|
||||
thd->set_current_time();
|
||||
threads.append(thd);
|
||||
thd->killed=abort_loop ? THD::KILL_CONNECTION : THD::NOT_KILLED;
|
||||
pthread_mutex_unlock(&LOCK_thread_count);
|
||||
|
||||
/*
|
||||
Wait until the client runs into pthread_cond_wait(),
|
||||
where we free it after the table is opened and di linked in the list.
|
||||
If we did not wait here, the client might detect the opened table
|
||||
before it is linked to the list. It would release LOCK_delayed_create
|
||||
and allow another thread to create another handler for the same table,
|
||||
since it does not find one in the list.
|
||||
*/
|
||||
pthread_mutex_lock(&di->mutex);
|
||||
#if !defined( __WIN__) /* Win32 calls this in pthread_create */
|
||||
if (my_thread_init())
|
||||
{
|
||||
/* Can't use my_error since store_globals has not yet been called */
|
||||
thd->main_da.set_error_status(thd, ER_OUT_OF_RESOURCES,
|
||||
ER(ER_OUT_OF_RESOURCES));
|
||||
goto end;
|
||||
}
|
||||
#endif
|
||||
|
||||
handle_delayed_insert_impl(thd, di);
|
||||
|
||||
#ifndef __WIN__
|
||||
end:
|
||||
#endif
|
||||
|
@ -2523,7 +2531,8 @@ end:
|
|||
|
||||
my_thread_end();
|
||||
pthread_exit(0);
|
||||
DBUG_RETURN(0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -408,29 +408,12 @@ void execute_init_command(THD *thd, sys_var_str *init_command_var,
|
|||
}
|
||||
|
||||
|
||||
/**
|
||||
Execute commands from bootstrap_file.
|
||||
|
||||
Used when creating the initial grant tables.
|
||||
*/
|
||||
|
||||
pthread_handler_t handle_bootstrap(void *arg)
|
||||
static void handle_bootstrap_impl(THD *thd)
|
||||
{
|
||||
THD *thd=(THD*) arg;
|
||||
FILE *file=bootstrap_file;
|
||||
char *buff;
|
||||
const char* found_semicolon= NULL;
|
||||
|
||||
/* The following must be called before DBUG_ENTER */
|
||||
thd->thread_stack= (char*) &thd;
|
||||
if (my_thread_init() || thd->store_globals())
|
||||
{
|
||||
#ifndef EMBEDDED_LIBRARY
|
||||
close_connection(thd, ER_OUT_OF_RESOURCES, 1);
|
||||
#endif
|
||||
thd->fatal_error();
|
||||
goto end;
|
||||
}
|
||||
DBUG_ENTER("handle_bootstrap");
|
||||
|
||||
#ifndef EMBEDDED_LIBRARY
|
||||
|
@ -525,6 +508,33 @@ pthread_handler_t handle_bootstrap(void *arg)
|
|||
#endif
|
||||
}
|
||||
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
Execute commands from bootstrap_file.
|
||||
|
||||
Used when creating the initial grant tables.
|
||||
*/
|
||||
|
||||
pthread_handler_t handle_bootstrap(void *arg)
|
||||
{
|
||||
THD *thd=(THD*) arg;
|
||||
|
||||
/* The following must be called before DBUG_ENTER */
|
||||
thd->thread_stack= (char*) &thd;
|
||||
if (my_thread_init() || thd->store_globals())
|
||||
{
|
||||
#ifndef EMBEDDED_LIBRARY
|
||||
close_connection(thd, ER_OUT_OF_RESOURCES, 1);
|
||||
#endif
|
||||
thd->fatal_error();
|
||||
goto end;
|
||||
}
|
||||
|
||||
handle_bootstrap_impl(thd);
|
||||
|
||||
end:
|
||||
net_end(&thd->net);
|
||||
thd->cleanup();
|
||||
|
@ -539,7 +549,8 @@ end:
|
|||
my_thread_end();
|
||||
pthread_exit(0);
|
||||
#endif
|
||||
DBUG_RETURN(0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -1520,7 +1520,7 @@ error:
|
|||
|
||||
void plugin_shutdown(void)
|
||||
{
|
||||
uint i, count= plugin_array.elements, free_slots= 0;
|
||||
uint i, count= plugin_array.elements;
|
||||
struct st_plugin_int **plugins, *plugin;
|
||||
struct st_plugin_dl **dl;
|
||||
DBUG_ENTER("plugin_shutdown");
|
||||
|
@ -1541,18 +1541,13 @@ void plugin_shutdown(void)
|
|||
while (reap_needed && (count= plugin_array.elements))
|
||||
{
|
||||
reap_plugins();
|
||||
for (i= free_slots= 0; i < count; i++)
|
||||
for (i= 0; i < count; i++)
|
||||
{
|
||||
plugin= *dynamic_element(&plugin_array, i, struct st_plugin_int **);
|
||||
switch (plugin->state) {
|
||||
case PLUGIN_IS_READY:
|
||||
if (plugin->state == PLUGIN_IS_READY)
|
||||
{
|
||||
plugin->state= PLUGIN_IS_DELETED;
|
||||
reap_needed= true;
|
||||
break;
|
||||
case PLUGIN_IS_FREED:
|
||||
case PLUGIN_IS_UNINITIALIZED:
|
||||
free_slots++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!reap_needed)
|
||||
|
@ -1565,9 +1560,6 @@ void plugin_shutdown(void)
|
|||
}
|
||||
}
|
||||
|
||||
if (count > free_slots)
|
||||
sql_print_warning("Forcing shutdown of %d plugins", count - free_slots);
|
||||
|
||||
plugins= (struct st_plugin_int **) my_alloca(sizeof(void*) * (count+1));
|
||||
|
||||
/*
|
||||
|
@ -1589,8 +1581,8 @@ void plugin_shutdown(void)
|
|||
if (!(plugins[i]->state & (PLUGIN_IS_UNINITIALIZED | PLUGIN_IS_FREED |
|
||||
PLUGIN_IS_DISABLED)))
|
||||
{
|
||||
sql_print_information("Plugin '%s' will be forced to shutdown",
|
||||
plugins[i]->name.str);
|
||||
sql_print_warning("Plugin '%s' will be forced to shutdown",
|
||||
plugins[i]->name.str);
|
||||
/*
|
||||
We are forcing deinit on plugins so we don't want to do a ref_count
|
||||
check until we have processed all the plugins.
|
||||
|
|
|
@ -1523,12 +1523,8 @@ JOIN::optimize()
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
If this join belongs to an uncacheable subquery save
|
||||
the original join
|
||||
*/
|
||||
if (select_lex->uncacheable && !is_top_level_join() &&
|
||||
init_save_join_tab())
|
||||
/* If this join belongs to an uncacheable query save the original join */
|
||||
if (select_lex->uncacheable && init_save_join_tab())
|
||||
DBUG_RETURN(-1); /* purecov: inspected */
|
||||
}
|
||||
|
||||
|
@ -13682,7 +13678,10 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
|
|||
if (error)
|
||||
{
|
||||
if (error == HA_ERR_RECORD_DELETED)
|
||||
continue;
|
||||
{
|
||||
error= file->rnd_next(record);
|
||||
continue;
|
||||
}
|
||||
if (error == HA_ERR_END_OF_FILE)
|
||||
break;
|
||||
goto err;
|
||||
|
|
|
@ -1772,6 +1772,7 @@ bool mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists,
|
|||
my_bool drop_temporary)
|
||||
{
|
||||
bool error= FALSE, need_start_waiters= FALSE;
|
||||
Drop_table_error_handler err_handler(thd->get_internal_handler());
|
||||
DBUG_ENTER("mysql_rm_table");
|
||||
|
||||
/* mark for close and remove all cached entries */
|
||||
|
@ -1792,7 +1793,10 @@ bool mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists,
|
|||
LOCK_open during wait_if_global_read_lock(), other threads could not
|
||||
close their tables. This would make a pretty deadlock.
|
||||
*/
|
||||
thd->push_internal_handler(&err_handler);
|
||||
error= mysql_rm_table_part2(thd, tables, if_exists, drop_temporary, 0, 0);
|
||||
thd->pop_internal_handler();
|
||||
|
||||
|
||||
if (need_start_waiters)
|
||||
start_waiting_global_read_lock(thd);
|
||||
|
@ -1894,9 +1898,6 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists,
|
|||
DBUG_RETURN(1);
|
||||
}
|
||||
|
||||
/* Don't give warnings for not found errors, as we already generate notes */
|
||||
thd->no_warnings_for_error= 1;
|
||||
|
||||
for (table= tables; table; table= table->next_local)
|
||||
{
|
||||
char *db=table->db;
|
||||
|
@ -2145,7 +2146,6 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists,
|
|||
err_with_placeholders:
|
||||
unlock_table_names(thd, tables, (TABLE_LIST*) 0);
|
||||
pthread_mutex_unlock(&LOCK_open);
|
||||
thd->no_warnings_for_error= 0;
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
|
||||
|
|
|
@ -412,10 +412,10 @@ int rea_create_table(THD *thd, const char *path,
|
|||
DBUG_ASSERT(*fn_rext(frm_name));
|
||||
if (thd->variables.keep_files_on_create)
|
||||
create_info->options|= HA_CREATE_KEEP_FILES;
|
||||
if (file->ha_create_handler_files(path, NULL, CHF_CREATE_FLAG, create_info))
|
||||
goto err_handler;
|
||||
if (!create_info->frm_only && ha_create_table(thd, path, db, table_name,
|
||||
create_info,0))
|
||||
if (!create_info->frm_only &&
|
||||
(file->ha_create_handler_files(path, NULL, CHF_CREATE_FLAG,
|
||||
create_info) ||
|
||||
ha_create_table(thd, path, db, table_name, create_info, 0)))
|
||||
goto err_handler;
|
||||
DBUG_RETURN(0);
|
||||
|
||||
|
|
|
@ -993,6 +993,7 @@ int ha_archive::rnd_init(bool scan)
|
|||
/* We rewind the file so that we can read from the beginning if scan */
|
||||
if (scan)
|
||||
{
|
||||
scan_rows= stats.records;
|
||||
DBUG_PRINT("info", ("archive will retrieve %llu rows",
|
||||
(unsigned long long) scan_rows));
|
||||
|
||||
|
@ -1461,7 +1462,6 @@ int ha_archive::info(uint flag)
|
|||
stats.records= share->rows_recorded;
|
||||
pthread_mutex_unlock(&share->mutex);
|
||||
|
||||
scan_rows= stats.records;
|
||||
stats.deleted= 0;
|
||||
|
||||
DBUG_PRINT("ha_archive", ("Stats rows is %d\n", (int)stats.records));
|
||||
|
@ -1472,11 +1472,12 @@ int ha_archive::info(uint flag)
|
|||
|
||||
VOID(my_stat(share->data_file_name, &file_stat, MYF(MY_WME)));
|
||||
|
||||
stats.mean_rec_length= table->s->reclength + buffer.alloced_length();
|
||||
stats.data_file_length= file_stat.st_size;
|
||||
stats.create_time= (ulong) file_stat.st_ctime;
|
||||
stats.update_time= (ulong) file_stat.st_mtime;
|
||||
stats.max_data_file_length= share->rows_recorded * stats.mean_rec_length;
|
||||
stats.mean_rec_length= stats.records ?
|
||||
stats.data_file_length / stats.records : table->s->reclength;
|
||||
stats.max_data_file_length= MAX_FILE_SIZE;
|
||||
}
|
||||
stats.delete_length= 0;
|
||||
stats.index_file_length=0;
|
||||
|
|
|
@ -274,7 +274,7 @@ Suma::execSTTOR(Signal* signal) {
|
|||
jam();
|
||||
|
||||
send_start_me_req(signal);
|
||||
return;
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -322,7 +322,7 @@ Suma::execSTTOR(Signal* signal) {
|
|||
if (ERROR_INSERTED(13030))
|
||||
{
|
||||
ndbout_c("Dont start handover");
|
||||
return;
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
}//if
|
||||
|
||||
|
@ -332,7 +332,7 @@ Suma::execSTTOR(Signal* signal) {
|
|||
* Allow API's to connect
|
||||
*/
|
||||
sendSTTORRY(signal);
|
||||
return;
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
if(startphase == 101)
|
||||
|
@ -345,7 +345,7 @@ Suma::execSTTOR(Signal* signal) {
|
|||
*/
|
||||
c_startup.m_wait_handover= true;
|
||||
check_start_handover(signal);
|
||||
return;
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
}
|
||||
sendSTTORRY(signal);
|
||||
|
@ -575,19 +575,19 @@ void Suma::execAPI_FAILREQ(Signal* signal)
|
|||
jam();
|
||||
sendSignalWithDelay(reference(), GSN_API_FAILREQ, signal,
|
||||
200, signal->getLength());
|
||||
return;
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
if (c_failedApiNodes.get(failedApiNode))
|
||||
{
|
||||
jam();
|
||||
return;
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
if (!c_subscriber_nodes.get(failedApiNode))
|
||||
{
|
||||
jam();
|
||||
return;
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
c_failedApiNodes.set(failedApiNode);
|
||||
|
@ -2453,7 +2453,7 @@ Suma::execSUB_START_REQ(Signal* signal){
|
|||
jam();
|
||||
c_subscriberPool.release(subbPtr);
|
||||
sendSubStartRef(signal, SubStartRef::PartiallyConnected);
|
||||
return;
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
DBUG_PRINT("info",("c_subscriberPool size: %d free: %d",
|
||||
|
@ -4289,7 +4289,7 @@ Suma::Restart::runSUMA_START_ME_REQ(Signal* signal, Uint32 sumaRef)
|
|||
ref->errorCode = SumaStartMeRef::Busy;
|
||||
suma.sendSignal(sumaRef, GSN_SUMA_START_ME_REF, signal,
|
||||
SumaStartMeRef::SignalLength, JBB);
|
||||
return;
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
nodeId = refToNode(sumaRef);
|
||||
|
|
Loading…
Reference in a new issue