mirror of
https://github.com/MariaDB/server.git
synced 2025-01-19 13:32:33 +01:00
Merge bk-internal:/home/bk/mysql-5.1-new
into neptunus.(none):/home/msvensson/mysql/mysql-5.1
This commit is contained in:
commit
8d78cd3e3c
100 changed files with 1872 additions and 313 deletions
|
@ -157,7 +157,7 @@
|
|||
InlineFunctionExpansion="1"
|
||||
OptimizeForProcessor="2"
|
||||
AdditionalIncludeDirectories="../include,../libmysqld,../sql,../regex,../extra/yassl/include,../storage/bdb/build_win32,../zlib"
|
||||
PreprocessorDefinitions="WIN32;_WINDOWS;_MBCS;NDEBUG;DBUG_OFF;USE_SYMDIR;SIGNAL_WITH_VIO_CLOSE;HAVE_DLOPEN;EMBEDDED_LIBRARY;MYSQL_SERVER;HAVE_INNOBASE_DB;WITH_INNOBASE_STORAGE_ENGINE;USE_TLS;__WIN__"/>
|
||||
PreprocessorDefinitions="WIN32;_WINDOWS;_MBCS;NDEBUG;DBUG_OFF;USE_SYMDIR;SIGNAL_WITH_VIO_CLOSE;HAVE_DLOPEN;EMBEDDED_LIBRARY;MYSQL_SERVER;HAVE_INNOBASE_DB;WITH_INNOBASE_STORAGE_ENGINE;USE_TLS;__WIN__"
|
||||
StringPooling="TRUE"
|
||||
RuntimeLibrary="0"
|
||||
EnableFunctionLevelLinking="TRUE"
|
||||
|
|
|
@ -1497,7 +1497,7 @@ int main(int argc, char** argv)
|
|||
the server
|
||||
*/
|
||||
|
||||
#ifdef __WIN__
|
||||
#if defined(__WIN__) && !defined(USING_CMAKE)
|
||||
#include "my_decimal.h"
|
||||
#include "decimal.c"
|
||||
#include "my_decimal.cpp"
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
-- source include/have_binlog_format_row.inc
|
||||
|
||||
CREATE TABLE t1 (word CHAR(20) NOT NULL);
|
||||
LOAD DATA INFILE '../../std_data/words.dat' INTO TABLE t1;
|
||||
LOAD DATA INFILE '../std_data_ln/words.dat' INTO TABLE t1;
|
||||
--replace_result $MYSQL_TEST_DIR MYSQL_TEST_DIR
|
||||
eval LOAD DATA LOCAL INFILE '$MYSQL_TEST_DIR/std_data/words.dat' INTO TABLE t1;
|
||||
SELECT * FROM t1 ORDER BY word LIMIT 10;
|
||||
|
|
|
@ -56,8 +56,8 @@ SHOW CREATE TABLE test.t1;
|
|||
# then LOAD DATA INFILE in slave, and use a query to compare.
|
||||
# This would have the advantage that it would not assume
|
||||
# the system has a 'diff'
|
||||
--exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info test > ./var/tmp/rpl_row_UUID_master.sql
|
||||
--exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert --no-create-info test > ./var/tmp/rpl_row_UUID_slave.sql
|
||||
--exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info test > $MYSQLTEST_VARDIR/tmp/rpl_row_UUID_master.sql
|
||||
--exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert --no-create-info test > $MYSQLTEST_VARDIR/tmp/rpl_row_UUID_slave.sql
|
||||
|
||||
connection master;
|
||||
# Let's cleanup
|
||||
|
@ -72,7 +72,7 @@ DROP TABLE test.t2;
|
|||
# will be created. You will need to go to the mysql-test dir and diff
|
||||
# the files your self to see what is not matching :-)
|
||||
|
||||
--exec diff ./var/tmp/rpl_row_UUID_master.sql ./var/tmp/rpl_row_UUID_slave.sql;
|
||||
--exec diff $MYSQLTEST_VARDIR/tmp/rpl_row_UUID_master.sql $MYSQLTEST_VARDIR/tmp/rpl_row_UUID_slave.sql;
|
||||
|
||||
# Cleanup dump files.
|
||||
# Long-term "system rm" is not portable; we could live without
|
||||
|
|
|
@ -176,10 +176,10 @@ connection master;
|
|||
--echo
|
||||
|
||||
# Post test clean up section
|
||||
--exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info test > ./var/tmp/rpl_row_blob_master.sql
|
||||
--exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert --no-create-info test > ./var/tmp/rpl_row_blob_slave.sql
|
||||
--exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info test > $MYSQLTEST_VARDIR/tmp/rpl_row_blob_master.sql
|
||||
--exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert --no-create-info test > $MYSQLTEST_VARDIR/tmp/rpl_row_blob_slave.sql
|
||||
|
||||
--exec diff ./var/tmp/rpl_row_blob_master.sql ./var/tmp/rpl_row_blob_slave.sql
|
||||
--exec diff $MYSQLTEST_VARDIR/tmp/rpl_row_blob_master.sql $MYSQLTEST_VARDIR/tmp/rpl_row_blob_slave.sql
|
||||
|
||||
DROP TABLE IF EXISTS test.t1;
|
||||
DROP TABLE IF EXISTS test.t2;
|
||||
|
|
|
@ -162,8 +162,8 @@ select hex(c1), hex(c2) from t1;
|
|||
|
||||
connection master;
|
||||
# Let's have a look at generated SETs.
|
||||
--replace_result $MYSQL_TEST_DIR MYSQL_TEST_DIR
|
||||
#--exec $MYSQL_BINLOG --short-form $MYSQL_TEST_DIR/var/log/master-bin.000001
|
||||
--replace_result $MYSQLTEST_VARDIR MYSQL_TEST_DIR/var
|
||||
#--exec $MYSQL_BINLOG --short-form $MYSQLTEST_VARDIR/log/master-bin.000001
|
||||
drop table t1;
|
||||
sync_slave_with_master;
|
||||
|
||||
|
|
|
@ -77,8 +77,8 @@ SET AUTOCOMMIT=1;
|
|||
|
||||
# time to dump the databases and so we can see if they match
|
||||
|
||||
--exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info test > ./var/tmp/func003_master.sql
|
||||
--exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert --no-create-info test > ./var/tmp/func003_slave.sql
|
||||
--exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info test > $MYSQLTEST_VARDIR/tmp/func003_master.sql
|
||||
--exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert --no-create-info test > $MYSQLTEST_VARDIR/tmp/func003_slave.sql
|
||||
|
||||
# First lets cleanupi
|
||||
DROP FUNCTION test.f1;
|
||||
|
@ -88,9 +88,9 @@ DROP TABLE test.t1;
|
|||
# the test will show that the diff statement failed and no reject file
|
||||
# will be created. You will need to go to the mysql-test dir and diff
|
||||
# the files yourself to see what is not matching :-) File are located
|
||||
# in mysql-test/var/tmp
|
||||
# in $MYSQLTEST_VARDIR/tmp
|
||||
|
||||
exec diff ./var/tmp/func003_master.sql ./var/tmp/func003_slave.sql;
|
||||
exec diff $MYSQLTEST_VARDIR/tmp/func003_master.sql $MYSQLTEST_VARDIR/tmp/func003_slave.sql;
|
||||
|
||||
|
||||
# End of 5.0 test case
|
||||
|
|
|
@ -68,8 +68,8 @@ sync_slave_with_master;
|
|||
|
||||
connection master;
|
||||
|
||||
--exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info mysqltest1 > ./var/tmp/sp006_master.sql
|
||||
--exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert --no-create-info mysqltest1 > ./var/tmp/sp006_slave.sql
|
||||
--exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info mysqltest1 > $MYSQLTEST_VARDIR/tmp/sp006_master.sql
|
||||
--exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert --no-create-info mysqltest1 > $MYSQLTEST_VARDIR/tmp/sp006_slave.sql
|
||||
|
||||
|
||||
DROP PROCEDURE IF EXISTS mysqltest1.p1;
|
||||
|
@ -81,9 +81,9 @@ DROP TABLE IF EXISTS mysqltest1.t2;
|
|||
# the test will show that the diff statement failed and not reject file
|
||||
# will be created. You will need to go to the mysql-test dir and diff
|
||||
# the files your self to see what is not matching :-) Failed test
|
||||
# Dump files will be located in mysql-test/var/tmp.
|
||||
# Dump files will be located in $MYSQLTEST_VARDIR/tmp.
|
||||
|
||||
exec diff ./var/tmp/sp006_master.sql ./var/tmp/sp006_slave.sql;
|
||||
exec diff $MYSQLTEST_VARDIR/tmp/sp006_master.sql $MYSQLTEST_VARDIR/tmp/sp006_slave.sql;
|
||||
|
||||
sync_slave_with_master;
|
||||
|
||||
|
|
|
@ -6,13 +6,13 @@
|
|||
|
||||
# there is no neat way to find the backupid, this is a hack to find it...
|
||||
|
||||
--exec $NDB_TOOLS_DIR/ndb_select_all --ndb-connectstring="localhost:$NDBCLUSTER_PORT" -d sys --delimiter=',' SYSTAB_0 | grep 520093696 > var/tmp.dat
|
||||
--exec $NDB_TOOLS_DIR/ndb_select_all --ndb-connectstring="localhost:$NDBCLUSTER_PORT" -d sys --delimiter=',' SYSTAB_0 | grep 520093696 > $MYSQLTEST_VARDIR/tmp.dat
|
||||
|
||||
CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info (id INT, backup_id INT) ENGINE = HEAP;
|
||||
|
||||
DELETE FROM test.backup_info;
|
||||
|
||||
LOAD DATA INFILE '../../var/tmp.dat' INTO TABLE test.backup_info FIELDS TERMINATED BY ',';
|
||||
LOAD DATA INFILE '../tmp.dat' INTO TABLE test.backup_info FIELDS TERMINATED BY ',';
|
||||
|
||||
--replace_column 1 <the_backup_id>
|
||||
|
||||
|
|
|
@ -36,7 +36,7 @@ let $fixed_bug16370= 0;
|
|||
|
||||
##### Option, for displaying files #####
|
||||
#
|
||||
# Attention: Displaying the directory content via "ls var/master-data/test/t*"
|
||||
# Attention: Displaying the directory content via "ls $MYSQLTEST_VARDIR/master-data/test/t*"
|
||||
# is probably not portable.
|
||||
# let $ls= 0; disables the execution of "ls ....."
|
||||
let $ls= 0;
|
||||
|
|
|
@ -9,5 +9,5 @@ eval SHOW CREATE TABLE t1;
|
|||
# listing of files belonging to the table t1
|
||||
if ($ls)
|
||||
{
|
||||
--exec ls var/master-data/test/t1*
|
||||
--exec ls $MYSQLTEST_VARDIR/master-data/test/t1*
|
||||
}
|
||||
|
|
|
@ -983,16 +983,25 @@ sub executable_setup () {
|
|||
{
|
||||
$path_client_bindir= mtr_path_exists("$glob_basedir/client_release",
|
||||
"$glob_basedir/client_debug",
|
||||
"$glob_basedir/bin",);
|
||||
"$glob_basedir/bin",
|
||||
# New CMake locations.
|
||||
"$glob_basedir/client/release",
|
||||
"$glob_basedir/client/debug");
|
||||
$exe_mysqld= mtr_exe_exists ("$path_client_bindir/mysqld-max",
|
||||
"$path_client_bindir/mysqld-nt",
|
||||
"$path_client_bindir/mysqld",
|
||||
"$path_client_bindir/mysqld-debug",);
|
||||
$path_language= mtr_path_exists("$glob_basedir/share/english/");
|
||||
$path_charsetsdir= mtr_path_exists("$glob_basedir/share/charsets");
|
||||
"$path_client_bindir/mysqld-debug",
|
||||
"$glob_basedir/sql/release/mysqld",
|
||||
"$glob_basedir/sql/debug/mysqld");
|
||||
$path_language= mtr_path_exists("$glob_basedir/share/english/",
|
||||
"$glob_basedir/sql/share/english/");
|
||||
$path_charsetsdir= mtr_path_exists("$glob_basedir/share/charsets",
|
||||
"$glob_basedir/sql/share/charsets");
|
||||
|
||||
$exe_my_print_defaults=
|
||||
mtr_exe_exists("$path_client_bindir/my_print_defaults");
|
||||
mtr_exe_exists("$path_client_bindir/my_print_defaults",
|
||||
"$glob_basedir/extra/release/my_print_defaults",
|
||||
"$glob_basedir/extra/debug/my_print_defaults");
|
||||
}
|
||||
else
|
||||
{
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
|
||||
BUILT_SOURCES = ndbcluster
|
||||
|
||||
benchdir_root= $(prefix)
|
||||
testdir = $(benchdir_root)/mysql-test/ndb
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@ insert into t1 values (3);
|
|||
update t1 set a=a+2 where a=2;
|
||||
update t1 set a=a+2 where a=3;
|
||||
create table t2 (word varchar(20));
|
||||
load data infile '../../std_data/words.dat' into table t2;
|
||||
load data infile '../std_data_ln/words.dat' into table t2;
|
||||
drop table t1;
|
||||
drop table t2;
|
||||
select * from t1;
|
||||
|
|
|
@ -3,10 +3,19 @@ create table t1 ( a int primary key, b varchar(10), c varchar(10), index (b) )
|
|||
engine=ndb;
|
||||
insert into t1 values (1,'one','one'), (2,'two','two'), (3,'three','three');
|
||||
create index c on t1(c);
|
||||
show indexes from t1;
|
||||
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment
|
||||
t1 0 PRIMARY 1 a A 3 NULL NULL BTREE
|
||||
t1 1 b 1 b A 3 NULL NULL YES BTREE
|
||||
t1 1 c 1 c A 3 NULL NULL YES BTREE
|
||||
select * from t1 where c = 'two';
|
||||
a b c
|
||||
2 two two
|
||||
alter table t1 drop index c;
|
||||
show indexes from t1;
|
||||
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment
|
||||
t1 0 PRIMARY 1 a A 3 NULL NULL BTREE
|
||||
t1 1 b 1 b A 3 NULL NULL YES BTREE
|
||||
select * from t1 where c = 'two';
|
||||
a b c
|
||||
2 two two
|
||||
|
|
|
@ -29,7 +29,7 @@ pk1 c2 c3 hex(c4)
|
|||
5 Sweden 496 1
|
||||
CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info (id INT, backup_id INT) ENGINE = HEAP;
|
||||
DELETE FROM test.backup_info;
|
||||
LOAD DATA INFILE '../../var/tmp.dat' INTO TABLE test.backup_info FIELDS TERMINATED BY ',';
|
||||
LOAD DATA INFILE '../tmp.dat' INTO TABLE test.backup_info FIELDS TERMINATED BY ',';
|
||||
SELECT @the_backup_id:=backup_id FROM test.backup_info;
|
||||
@the_backup_id:=backup_id
|
||||
<the_backup_id>
|
||||
|
@ -97,7 +97,7 @@ LENGTH(data)
|
|||
16384
|
||||
CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info (id INT, backup_id INT) ENGINE = HEAP;
|
||||
DELETE FROM test.backup_info;
|
||||
LOAD DATA INFILE '../../var/tmp.dat' INTO TABLE test.backup_info FIELDS TERMINATED BY ',';
|
||||
LOAD DATA INFILE '../tmp.dat' INTO TABLE test.backup_info FIELDS TERMINATED BY ',';
|
||||
SELECT @the_backup_id:=backup_id FROM test.backup_info;
|
||||
@the_backup_id:=backup_id
|
||||
<the_backup_id>
|
||||
|
@ -310,7 +310,7 @@ pk1 c2 c3 hex(c4)
|
|||
246 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 8 1
|
||||
CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info (id INT, backup_id INT) ENGINE = HEAP;
|
||||
DELETE FROM test.backup_info;
|
||||
LOAD DATA INFILE '../../var/tmp.dat' INTO TABLE test.backup_info FIELDS TERMINATED BY ',';
|
||||
LOAD DATA INFILE '../tmp.dat' INTO TABLE test.backup_info FIELDS TERMINATED BY ',';
|
||||
SELECT @the_backup_id:=backup_id FROM test.backup_info;
|
||||
@the_backup_id:=backup_id
|
||||
<the_backup_id>
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
DROP TABLE IF EXISTS t1;
|
||||
CREATE TABLE t1 (word CHAR(20) NOT NULL PRIMARY KEY) ENGINE=NDB;
|
||||
LOAD DATA INFILE '../../std_data/words.dat' INTO TABLE t1 ;
|
||||
LOAD DATA INFILE '../std_data_ln/words.dat' INTO TABLE t1 ;
|
||||
ERROR 23000: Can't write; duplicate key in table 't1'
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1 (word CHAR(20) NOT NULL) ENGINE=NDB;
|
||||
LOAD DATA INFILE '../../std_data/words.dat' INTO TABLE t1 ;
|
||||
LOAD DATA INFILE '../std_data_ln/words.dat' INTO TABLE t1 ;
|
||||
SELECT * FROM t1 ORDER BY word;
|
||||
word
|
||||
Aarhus
|
||||
|
|
|
@ -125,6 +125,13 @@ create table t6 engine=myisam as select * from t6_c;
|
|||
create table t7 engine=myisam as select * from t7_c;
|
||||
create table t8 engine=myisam as select * from t8_c;
|
||||
create table t9 engine=myisam as select * from t9_c;
|
||||
CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info (id INT, backup_id INT) ENGINE = HEAP;
|
||||
DELETE FROM test.backup_info;
|
||||
LOAD DATA INFILE '../../var/tmp.dat' INTO TABLE test.backup_info FIELDS TERMINATED BY ',';
|
||||
SELECT @the_backup_id:=backup_id FROM test.backup_info;
|
||||
@the_backup_id:=backup_id
|
||||
<the_backup_id>
|
||||
DROP TABLE test.backup_info;
|
||||
drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c;
|
||||
select count(*) from t1;
|
||||
count(*)
|
||||
|
@ -241,6 +248,13 @@ PARTITION BY LINEAR HASH (`relatta`)
|
|||
PARTITIONS 4;
|
||||
ALTER TABLE t7_c
|
||||
PARTITION BY LINEAR KEY (`dardtestard`);
|
||||
CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info (id INT, backup_id INT) ENGINE = HEAP;
|
||||
DELETE FROM test.backup_info;
|
||||
LOAD DATA INFILE '../../var/tmp.dat' INTO TABLE test.backup_info FIELDS TERMINATED BY ',';
|
||||
SELECT @the_backup_id:=backup_id FROM test.backup_info;
|
||||
@the_backup_id:=backup_id
|
||||
<the_backup_id>
|
||||
DROP TABLE test.backup_info;
|
||||
drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c;
|
||||
select count(*) from t1;
|
||||
count(*)
|
||||
|
@ -445,4 +459,4 @@ drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c;
|
|||
Create table test/def/t2_c failed: Translate frm error
|
||||
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
|
||||
drop table if exists t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c;
|
||||
520093696,2
|
||||
520093696,<the_backup_id>
|
||||
|
|
|
@ -91,7 +91,7 @@ PARTITION BY KEY (a)
|
|||
ALTER TABLE t1 ADD PARTITION PARTITIONS 0;
|
||||
ERROR HY000: At least one partition must be added
|
||||
ALTER TABLE t1 ADD PARTITION PARTITIONS 1024;
|
||||
ERROR HY000: Too many partitions were defined
|
||||
ERROR HY000: Too many partitions (including subpartitions) were defined
|
||||
ALTER TABLE t1 DROP PARTITION x0;
|
||||
ERROR HY000: DROP PARTITION can only be used on RANGE/LIST partitions
|
||||
ALTER TABLE t1 COALESCE PARTITION 1;
|
||||
|
|
|
@ -6,10 +6,10 @@ drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
|
|||
start slave;
|
||||
DROP TABLE IF EXISTS test.t1;
|
||||
CREATE TABLE test.t1 (a VARCHAR(255), PRIMARY KEY(a));
|
||||
LOAD DATA INFILE '../../std_data/words2.dat' INTO TABLE test.t1;
|
||||
LOAD DATA INFILE '../std_data_ln/words2.dat' INTO TABLE test.t1;
|
||||
DELETE FROM test.t1 WHERE a = 'abashed';
|
||||
DELETE FROM test.t1;
|
||||
LOAD DATA INFILE '../../std_data/words2.dat' INTO TABLE test.t1;
|
||||
LOAD DATA INFILE '../std_data_ln/words2.dat' INTO TABLE test.t1;
|
||||
SELECT * FROM test.t1 ORDER BY a DESC;
|
||||
a
|
||||
aberration
|
||||
|
|
|
@ -29,9 +29,9 @@ drop table t1;
|
|||
drop table t1;
|
||||
set SQL_LOG_BIN=0;
|
||||
create table t1 (word char(20) not null, index(word))ENGINE=MyISAM;
|
||||
load data infile '../../std_data/words.dat' into table t1;
|
||||
load data infile '../std_data_ln/words.dat' into table t1;
|
||||
create table t2 (word char(20) not null)ENGINE=MyISAM;
|
||||
load data infile '../../std_data/words.dat' into table t2;
|
||||
load data infile '../std_data_ln/words.dat' into table t2;
|
||||
create table t3 (word char(20) not null primary key)ENGINE=MyISAM;
|
||||
load table t1 from master;
|
||||
load table t2 from master;
|
||||
|
|
|
@ -5,7 +5,7 @@ reset slave;
|
|||
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
|
||||
start slave;
|
||||
CREATE TABLE t1 (word CHAR(20) NOT NULL);
|
||||
LOAD DATA INFILE '../../std_data/words.dat' INTO TABLE t1;
|
||||
LOAD DATA INFILE '../std_data_ln/words.dat' INTO TABLE t1;
|
||||
SELECT * FROM t1 ORDER BY word;
|
||||
word
|
||||
Aarhus
|
||||
|
|
|
@ -7,14 +7,14 @@ start slave;
|
|||
drop database if exists mysqltest;
|
||||
USE test;
|
||||
CREATE TABLE t1(a INT, b INT, UNIQUE(b));
|
||||
LOAD DATA INFILE '../../std_data/rpl_loaddata.dat' INTO TABLE test.t1;
|
||||
LOAD DATA INFILE '../std_data_ln/rpl_loaddata.dat' INTO TABLE test.t1;
|
||||
SELECT COUNT(*) FROM test.t1;
|
||||
COUNT(*)
|
||||
2
|
||||
CREATE DATABASE mysqltest;
|
||||
USE mysqltest;
|
||||
CREATE TABLE t1(a INT, b INT, UNIQUE(b));
|
||||
LOAD DATA INFILE '../../std_data/rpl_loaddata.dat' INTO TABLE mysqltest.t1;
|
||||
LOAD DATA INFILE '../std_data_ln/rpl_loaddata.dat' INTO TABLE mysqltest.t1;
|
||||
SELECT COUNT(*) FROM mysqltest.t1;
|
||||
COUNT(*)
|
||||
2
|
||||
|
|
|
@ -8,11 +8,11 @@ DROP PROCEDURE IF EXISTS test.p1;
|
|||
DROP TABLE IF EXISTS test.t1;
|
||||
CREATE TABLE test.t1 (a INT, blob_column LONGBLOB, PRIMARY KEY(a));
|
||||
INSERT INTO test.t1 VALUES(1,'test');
|
||||
UPDATE test.t1 SET blob_column=LOAD_FILE('../../std_data/words2.dat') WHERE a=1;
|
||||
UPDATE test.t1 SET blob_column=LOAD_FILE('../std_data_ln/words2.dat') WHERE a=1;
|
||||
create procedure test.p1()
|
||||
begin
|
||||
INSERT INTO test.t1 VALUES(2,'test');
|
||||
UPDATE test.t1 SET blob_column=LOAD_FILE('../../std_data/words2.dat') WHERE a=2;
|
||||
UPDATE test.t1 SET blob_column=LOAD_FILE('../std_data_ln/words2.dat') WHERE a=2;
|
||||
end|
|
||||
CALL test.p1();
|
||||
SELECT * FROM test.t1 ORDER BY blob_column;
|
||||
|
|
|
@ -49,7 +49,7 @@ CREATE DATABASE BANK;
|
|||
RESET MASTER;
|
||||
CREATE TABLE IF NOT EXISTS cluster_replication.backup_info (id INT, backup_id INT) ENGINE = HEAP;
|
||||
DELETE FROM cluster_replication.backup_info;
|
||||
LOAD DATA INFILE '../../var/tmp.dat' INTO TABLE cluster_replication.backup_info FIELDS TERMINATED BY ',';
|
||||
LOAD DATA INFILE '../tmp.dat' INTO TABLE cluster_replication.backup_info FIELDS TERMINATED BY ',';
|
||||
SELECT @the_backup_id:=backup_id FROM cluster_replication.backup_info;
|
||||
@the_backup_id:=backup_id
|
||||
<the_backup_id>
|
||||
|
|
|
@ -5,7 +5,7 @@ reset slave;
|
|||
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
|
||||
start slave;
|
||||
CREATE TABLE t1 (word CHAR(20) NOT NULL);
|
||||
LOAD DATA INFILE '../../std_data/words.dat' INTO TABLE t1;
|
||||
LOAD DATA INFILE '../std_data_ln/words.dat' INTO TABLE t1;
|
||||
LOAD DATA LOCAL INFILE 'MYSQL_TEST_DIR/std_data/words.dat' INTO TABLE t1;
|
||||
SELECT * FROM t1 ORDER BY word LIMIT 10;
|
||||
word
|
||||
|
|
|
@ -27,7 +27,7 @@ hex(c2) hex(c3) c1
|
|||
0 0 DEFGHIJKL
|
||||
CREATE TEMPORARY TABLE IF NOT EXISTS cluster_replication.backup_info (id INT, backup_id INT)ENGINE=HEAP;
|
||||
DELETE FROM cluster_replication.backup_info;
|
||||
LOAD DATA INFILE '../../var/tmp.dat' INTO TABLE cluster_replication.backup_info FIELDS TERMINATED BY ',';
|
||||
LOAD DATA INFILE '../tmp.dat' INTO TABLE cluster_replication.backup_info FIELDS TERMINATED BY ',';
|
||||
SELECT @the_backup_id:=backup_id FROM cluster_replication.backup_info;
|
||||
@the_backup_id:=backup_id
|
||||
<the_backup_id>
|
||||
|
|
|
@ -5,7 +5,7 @@ reset slave;
|
|||
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
|
||||
start slave;
|
||||
CREATE TABLE t1 (word CHAR(20) NOT NULL);
|
||||
LOAD DATA INFILE '../../std_data/words.dat' INTO TABLE t1;
|
||||
LOAD DATA INFILE '../std_data_ln/words.dat' INTO TABLE t1;
|
||||
LOAD DATA LOCAL INFILE 'MYSQL_TEST_DIR/std_data/words.dat' INTO TABLE t1;
|
||||
SELECT * FROM t1 ORDER BY word LIMIT 10;
|
||||
word
|
||||
|
|
|
@ -41,12 +41,6 @@ t1
|
|||
t2
|
||||
**** On Master ****
|
||||
DROP TABLE t1,t2;
|
||||
SHOW BINLOG EVENTS;
|
||||
Log_name Pos Event_type Server_id End_log_pos Info
|
||||
master-bin.000001 4 Format_desc 1 102 Server ver: VERSION, Binlog ver: 4
|
||||
master-bin.000001 102 Query 1 188 use `test`; CREATE TABLE t1 (a int)
|
||||
master-bin.000001 188 Query 1 274 use `test`; CREATE TABLE t2 (a int)
|
||||
master-bin.000001 274 Query 1 378 use `test`; DROP TABLE `t1` /* generated by server */
|
||||
SHOW TABLES;
|
||||
Tables_in_test
|
||||
t2
|
||||
|
|
|
@ -9,7 +9,7 @@ stop slave;
|
|||
create database mysqltest;
|
||||
use mysqltest;
|
||||
create table t1(a int, b int, unique(b));
|
||||
load data infile '../../std_data/rpl_loaddata.dat' into table t1;
|
||||
load data infile '../std_data_ln/rpl_loaddata.dat' into table t1;
|
||||
show binlog events from 102;
|
||||
Log_name Pos Event_type Server_id End_log_pos Info
|
||||
master-bin.000001 # Query 1 # #
|
||||
|
|
266
mysql-test/r/rpl_row_mysqlbinlog.result
Normal file
266
mysql-test/r/rpl_row_mysqlbinlog.result
Normal file
|
@ -0,0 +1,266 @@
|
|||
stop slave;
|
||||
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
|
||||
reset master;
|
||||
reset slave;
|
||||
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
|
||||
start slave;
|
||||
|
||||
---Setup Section --
|
||||
set timestamp=1000000000;
|
||||
DROP TABLE IF EXISTS t1,t2,t3;
|
||||
CREATE TABLE t1(word VARCHAR(20));
|
||||
CREATE TABLE t2(id INT AUTO_INCREMENT NOT NULL PRIMARY KEY);
|
||||
CREATE TABLE t3(c1 INT NOT NULL PRIMARY KEY, c2 LONGBLOB, c3 TIMESTAMP, c4 TEXT, c5 FLOAT);
|
||||
|
||||
---Test1 check table load --
|
||||
SELECT COUNT(*) from t1;
|
||||
COUNT(*)
|
||||
351
|
||||
SELECT COUNT(*) from t2;
|
||||
COUNT(*)
|
||||
500
|
||||
SELECT COUNT(*) from t3;
|
||||
COUNT(*)
|
||||
500
|
||||
SELECT * FROM t1 ORDER BY word LIMIT 5;
|
||||
word
|
||||
Aarhus
|
||||
Aarhus
|
||||
Aarhus
|
||||
Aarhus
|
||||
Aarhus
|
||||
SELECT * FROM t2 ORDER BY id LIMIT 5;
|
||||
id
|
||||
1
|
||||
2
|
||||
3
|
||||
4
|
||||
5
|
||||
SELECT c1, c3, c4, c5 FROM t3 ORDER BY c1 LIMIT 5;
|
||||
c1 c3 c4 c5
|
||||
1 2006-02-22 00:00:00 Tested in Texas 2.2
|
||||
2 2006-02-22 00:00:00 Tested in Texas 4.4
|
||||
3 2006-02-22 00:00:00 Tested in Texas 6.6
|
||||
4 2006-02-22 00:00:00 Tested in Texas 8.8
|
||||
5 2006-02-22 00:00:00 Tested in Texas 11
|
||||
SELECT COUNT(*) from t1;
|
||||
COUNT(*)
|
||||
351
|
||||
SELECT COUNT(*) from t2;
|
||||
COUNT(*)
|
||||
500
|
||||
SELECT COUNT(*) from t3;
|
||||
COUNT(*)
|
||||
500
|
||||
SELECT * FROM t1 ORDER BY word LIMIT 5;
|
||||
word
|
||||
Aarhus
|
||||
Aarhus
|
||||
Aarhus
|
||||
Aarhus
|
||||
Aarhus
|
||||
SELECT * FROM t2 ORDER BY id LIMIT 5;
|
||||
id
|
||||
1
|
||||
2
|
||||
3
|
||||
4
|
||||
5
|
||||
SELECT c1, c3, c4, c5 FROM t3 ORDER BY c1 LIMIT 5;
|
||||
c1 c3 c4 c5
|
||||
1 2006-02-22 00:00:00 Tested in Texas 2.2
|
||||
2 2006-02-22 00:00:00 Tested in Texas 4.4
|
||||
3 2006-02-22 00:00:00 Tested in Texas 6.6
|
||||
4 2006-02-22 00:00:00 Tested in Texas 8.8
|
||||
5 2006-02-22 00:00:00 Tested in Texas 11
|
||||
insert into t1 values ("Alas");
|
||||
flush logs;
|
||||
|
||||
--- Test 1 Dump binlog to file --
|
||||
|
||||
--- Test 1 delete tables, clean master and slave --
|
||||
DROP TABLE t1;
|
||||
DROP TABLE t2;
|
||||
DROP TABLE t3;
|
||||
stop slave;
|
||||
reset master;
|
||||
reset slave;
|
||||
start slave;
|
||||
|
||||
--- Test 1 Load from Dump binlog file --
|
||||
|
||||
--- Test 1 Check Load Results --
|
||||
SELECT COUNT(*) from t1;
|
||||
COUNT(*)
|
||||
352
|
||||
SELECT COUNT(*) from t2;
|
||||
COUNT(*)
|
||||
500
|
||||
SELECT COUNT(*) from t3;
|
||||
COUNT(*)
|
||||
500
|
||||
SELECT * FROM t1 ORDER BY word LIMIT 5;
|
||||
word
|
||||
Aarhus
|
||||
Aarhus
|
||||
Aarhus
|
||||
Aarhus
|
||||
Aarhus
|
||||
SELECT * FROM t2 ORDER BY id LIMIT 5;
|
||||
id
|
||||
1
|
||||
2
|
||||
3
|
||||
4
|
||||
5
|
||||
SELECT c1, c3, c4, c5 FROM t3 ORDER BY c1 LIMIT 5;
|
||||
c1 c3 c4 c5
|
||||
1 2006-02-22 00:00:00 Tested in Texas 2.2
|
||||
2 2006-02-22 00:00:00 Tested in Texas 4.4
|
||||
3 2006-02-22 00:00:00 Tested in Texas 6.6
|
||||
4 2006-02-22 00:00:00 Tested in Texas 8.8
|
||||
5 2006-02-22 00:00:00 Tested in Texas 11
|
||||
SELECT COUNT(*) from t1;
|
||||
COUNT(*)
|
||||
352
|
||||
SELECT COUNT(*) from t2;
|
||||
COUNT(*)
|
||||
500
|
||||
SELECT COUNT(*) from t3;
|
||||
COUNT(*)
|
||||
500
|
||||
SELECT * FROM t1 ORDER BY word LIMIT 5;
|
||||
word
|
||||
Aarhus
|
||||
Aarhus
|
||||
Aarhus
|
||||
Aarhus
|
||||
Aarhus
|
||||
SELECT * FROM t2 ORDER BY id LIMIT 5;
|
||||
id
|
||||
1
|
||||
2
|
||||
3
|
||||
4
|
||||
5
|
||||
SELECT c1, c3, c4, c5 FROM t3 ORDER BY c1 LIMIT 5;
|
||||
c1 c3 c4 c5
|
||||
1 2006-02-22 00:00:00 Tested in Texas 2.2
|
||||
2 2006-02-22 00:00:00 Tested in Texas 4.4
|
||||
3 2006-02-22 00:00:00 Tested in Texas 6.6
|
||||
4 2006-02-22 00:00:00 Tested in Texas 8.8
|
||||
5 2006-02-22 00:00:00 Tested in Texas 11
|
||||
|
||||
--- Test 2 position test --
|
||||
/*!40019 SET @@session.max_insert_delayed_threads=0*/;
|
||||
/*!50003 SET @OLD_COMPLETION_TYPE=@@COMPLETION_TYPE,COMPLETION_TYPE=0*/;
|
||||
use test;
|
||||
SET TIMESTAMP=1000000000;
|
||||
SET @@session.foreign_key_checks=1, @@session.sql_auto_is_null=1, @@session.unique_checks=1;
|
||||
SET @@session.sql_mode=0;
|
||||
/*!\C latin1 */;
|
||||
SET @@session.character_set_client=8,@@session.collation_connection=8,@@session.collation_server=8;
|
||||
CREATE TABLE t3(c1 INT NOT NULL PRIMARY KEY, c2 LONGBLOB, c3 TIMESTAMP, c4 TEXT, c5 FLOAT);
|
||||
# End of log file
|
||||
ROLLBACK /* added by mysqlbinlog */;
|
||||
/*!50003 SET COMPLETION_TYPE=@OLD_COMPLETION_TYPE*/;
|
||||
|
||||
--- Test 3 First Remote test --
|
||||
/*!40019 SET @@session.max_insert_delayed_threads=0*/;
|
||||
/*!50003 SET @OLD_COMPLETION_TYPE=@@COMPLETION_TYPE,COMPLETION_TYPE=0*/;
|
||||
ROLLBACK;
|
||||
use test;
|
||||
SET TIMESTAMP=1000000000;
|
||||
SET @@session.foreign_key_checks=1, @@session.sql_auto_is_null=1, @@session.unique_checks=1;
|
||||
SET @@session.sql_mode=0;
|
||||
/*!\C latin1 */;
|
||||
SET @@session.character_set_client=8,@@session.collation_connection=8,@@session.collation_server=8;
|
||||
DROP TABLE IF EXISTS t1,t2,t3;
|
||||
SET TIMESTAMP=1000000000;
|
||||
CREATE TABLE t1(word VARCHAR(20));
|
||||
SET TIMESTAMP=1000000000;
|
||||
CREATE TABLE t2(id INT AUTO_INCREMENT NOT NULL PRIMARY KEY);
|
||||
SET TIMESTAMP=1000000000;
|
||||
CREATE TABLE t3(c1 INT NOT NULL PRIMARY KEY, c2 LONGBLOB, c3 TIMESTAMP, c4 TEXT, c5 FLOAT);
|
||||
# End of log file
|
||||
ROLLBACK /* added by mysqlbinlog */;
|
||||
/*!50003 SET COMPLETION_TYPE=@OLD_COMPLETION_TYPE*/;
|
||||
|
||||
--- Test 5 LOAD DATA --
|
||||
/*!40019 SET @@session.max_insert_delayed_threads=0*/;
|
||||
/*!50003 SET @OLD_COMPLETION_TYPE=@@COMPLETION_TYPE,COMPLETION_TYPE=0*/;
|
||||
# End of log file
|
||||
ROLLBACK /* added by mysqlbinlog */;
|
||||
/*!50003 SET COMPLETION_TYPE=@OLD_COMPLETION_TYPE*/;
|
||||
|
||||
--- Test 6 reading stdin --
|
||||
/*!40019 SET @@session.max_insert_delayed_threads=0*/;
|
||||
/*!50003 SET @OLD_COMPLETION_TYPE=@@COMPLETION_TYPE,COMPLETION_TYPE=0*/;
|
||||
ROLLBACK;
|
||||
use test;
|
||||
SET TIMESTAMP=1000000000;
|
||||
SET @@session.foreign_key_checks=1, @@session.sql_auto_is_null=1, @@session.unique_checks=1;
|
||||
SET @@session.sql_mode=0;
|
||||
/*!\C latin1 */;
|
||||
SET @@session.character_set_client=8,@@session.collation_connection=8,@@session.collation_server=8;
|
||||
DROP TABLE IF EXISTS t1,t2,t3;
|
||||
SET TIMESTAMP=1000000000;
|
||||
CREATE TABLE t1(word VARCHAR(20));
|
||||
SET TIMESTAMP=1000000000;
|
||||
CREATE TABLE t2(id INT AUTO_INCREMENT NOT NULL PRIMARY KEY);
|
||||
SET TIMESTAMP=1000000000;
|
||||
CREATE TABLE t3(c1 INT NOT NULL PRIMARY KEY, c2 LONGBLOB, c3 TIMESTAMP, c4 TEXT, c5 FLOAT);
|
||||
# End of log file
|
||||
ROLLBACK /* added by mysqlbinlog */;
|
||||
/*!50003 SET COMPLETION_TYPE=@OLD_COMPLETION_TYPE*/;
|
||||
|
||||
--- Test 7 reading stdin w/position --
|
||||
/*!40019 SET @@session.max_insert_delayed_threads=0*/;
|
||||
/*!50003 SET @OLD_COMPLETION_TYPE=@@COMPLETION_TYPE,COMPLETION_TYPE=0*/;
|
||||
use test;
|
||||
SET TIMESTAMP=1000000000;
|
||||
SET @@session.foreign_key_checks=1, @@session.sql_auto_is_null=1, @@session.unique_checks=1;
|
||||
SET @@session.sql_mode=0;
|
||||
/*!\C latin1 */;
|
||||
SET @@session.character_set_client=8,@@session.collation_connection=8,@@session.collation_server=8;
|
||||
CREATE TABLE t3(c1 INT NOT NULL PRIMARY KEY, c2 LONGBLOB, c3 TIMESTAMP, c4 TEXT, c5 FLOAT);
|
||||
# End of log file
|
||||
ROLLBACK /* added by mysqlbinlog */;
|
||||
/*!50003 SET COMPLETION_TYPE=@OLD_COMPLETION_TYPE*/;
|
||||
|
||||
--- Test 8 switch internal charset --
|
||||
stop slave;
|
||||
reset master;
|
||||
reset slave;
|
||||
start slave;
|
||||
create table t4 (f text character set utf8);
|
||||
create table t5 (f text character set cp932);
|
||||
flush logs;
|
||||
rename table t4 to t04, t5 to t05;
|
||||
select HEX(f) from t04;
|
||||
HEX(f)
|
||||
E382BD
|
||||
select HEX(f) from t4;
|
||||
HEX(f)
|
||||
E382BD
|
||||
select HEX(f) from t05;
|
||||
HEX(f)
|
||||
835C
|
||||
select HEX(f) from t5;
|
||||
HEX(f)
|
||||
835C
|
||||
select HEX(f) from t04;
|
||||
HEX(f)
|
||||
E382BD
|
||||
select HEX(f) from t4;
|
||||
HEX(f)
|
||||
E382BD
|
||||
select HEX(f) from t05;
|
||||
HEX(f)
|
||||
835C
|
||||
select HEX(f) from t5;
|
||||
HEX(f)
|
||||
835C
|
||||
|
||||
--- Test cleanup --
|
||||
DROP TABLE IF EXISTS t1, t2, t3, t04, t05, t4, t5;
|
|
@ -22,7 +22,7 @@ INSERT INTO test.t1 VALUES(1);
|
|||
CALL test.p1();
|
||||
END|
|
||||
CALL test.p2();
|
||||
SELECT * FROM test.t1;
|
||||
SELECT * FROM test.t1 ORDER BY a;
|
||||
a
|
||||
1
|
||||
2
|
||||
|
@ -40,11 +40,11 @@ INSERT INTO test.t2 VALUES(6);
|
|||
CALL test.p3();
|
||||
END|
|
||||
CALL test.p4();
|
||||
SELECT * FROM test.t2;
|
||||
SELECT * FROM test.t2 ORDER BY a;
|
||||
a
|
||||
6
|
||||
7
|
||||
SELECT * FROM test.t2;
|
||||
SELECT * FROM test.t2 ORDER BY a;
|
||||
a
|
||||
6
|
||||
7
|
||||
|
|
|
@ -5,7 +5,7 @@ reset slave;
|
|||
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
|
||||
start slave;
|
||||
create table t1 (words varchar(20)) engine=myisam;
|
||||
load data infile '../../std_data/words.dat' into table t1 (words);
|
||||
load data infile '../std_data_ln/words.dat' into table t1 (words);
|
||||
select count(*) from t1;
|
||||
count(*)
|
||||
70
|
||||
|
|
|
@ -1163,3 +1163,6 @@ end|
|
|||
call bug15091();
|
||||
ERROR 42S02: Unknown table 'c' in field list
|
||||
drop procedure bug15091;
|
||||
drop function if exists bug16896;
|
||||
create aggregate function bug16896() returns int return 1;
|
||||
ERROR 42000: AGGREGATE is not supported for stored functions
|
||||
|
|
|
@ -314,3 +314,12 @@ select * from db_bug14533.t1;
|
|||
ERROR 42000: SELECT command denied to user 'user_bug14533'@'localhost' for table 't1'
|
||||
drop user user_bug14533@localhost;
|
||||
drop database db_bug14533;
|
||||
CREATE DATABASE db_bug7787;
|
||||
use db_bug7787;
|
||||
CREATE PROCEDURE p1()
|
||||
SHOW INNODB STATUS;
|
||||
Warnings:
|
||||
Warning 1287 'SHOW INNODB STATUS' is deprecated; use 'SHOW ENGINE INNODB STATUS' instead
|
||||
GRANT EXECUTE ON PROCEDURE p1 TO user_bug7787@localhost;
|
||||
DROP DATABASE db_bug7787;
|
||||
use test;
|
||||
|
|
|
@ -1413,8 +1413,6 @@ select `foo` ()|
|
|||
5
|
||||
drop function `foo`|
|
||||
drop function if exists t1max|
|
||||
Warnings:
|
||||
Note 1305 FUNCTION t1max does not exist
|
||||
create function t1max() returns int
|
||||
begin
|
||||
declare x int;
|
||||
|
@ -1470,6 +1468,339 @@ zip 3
|
|||
foo 1
|
||||
drop table t3|
|
||||
drop function getcount|
|
||||
drop table if exists t3|
|
||||
drop procedure if exists h_ee|
|
||||
drop procedure if exists h_es|
|
||||
drop procedure if exists h_en|
|
||||
drop procedure if exists h_ew|
|
||||
drop procedure if exists h_ex|
|
||||
drop procedure if exists h_se|
|
||||
drop procedure if exists h_ss|
|
||||
drop procedure if exists h_sn|
|
||||
drop procedure if exists h_sw|
|
||||
drop procedure if exists h_sx|
|
||||
drop procedure if exists h_ne|
|
||||
drop procedure if exists h_ns|
|
||||
drop procedure if exists h_nn|
|
||||
drop procedure if exists h_we|
|
||||
drop procedure if exists h_ws|
|
||||
drop procedure if exists h_ww|
|
||||
drop procedure if exists h_xe|
|
||||
drop procedure if exists h_xs|
|
||||
drop procedure if exists h_xx|
|
||||
create table t3 (a smallint primary key)|
|
||||
insert into t3 (a) values (1)|
|
||||
create procedure h_ee()
|
||||
deterministic
|
||||
begin
|
||||
declare continue handler for 1062 -- ER_DUP_ENTRY
|
||||
select 'Outer (bad)' as 'h_ee';
|
||||
begin
|
||||
declare continue handler for 1062 -- ER_DUP_ENTRY
|
||||
select 'Inner (good)' as 'h_ee';
|
||||
insert into t3 values (1);
|
||||
end;
|
||||
end|
|
||||
create procedure h_es()
|
||||
deterministic
|
||||
begin
|
||||
declare continue handler for 1062 -- ER_DUP_ENTRY
|
||||
select 'Outer (good)' as 'h_es';
|
||||
begin
|
||||
-- integrity constraint violation
|
||||
declare continue handler for sqlstate '23000'
|
||||
select 'Inner (bad)' as 'h_es';
|
||||
insert into t3 values (1);
|
||||
end;
|
||||
end|
|
||||
create procedure h_en()
|
||||
deterministic
|
||||
begin
|
||||
declare continue handler for 1329 -- ER_SP_FETCH_NO_DATA
|
||||
select 'Outer (good)' as 'h_en';
|
||||
begin
|
||||
declare x int;
|
||||
declare continue handler for sqlstate '02000' -- no data
|
||||
select 'Inner (bad)' as 'h_en';
|
||||
select a into x from t3 where a = 42;
|
||||
end;
|
||||
end|
|
||||
create procedure h_ew()
|
||||
deterministic
|
||||
begin
|
||||
declare continue handler for 1264 -- ER_WARN_DATA_OUT_OF_RANGE
|
||||
select 'Outer (good)' as 'h_ew';
|
||||
begin
|
||||
declare continue handler for sqlwarning
|
||||
select 'Inner (bad)' as 'h_ew';
|
||||
insert into t3 values (123456789012);
|
||||
end;
|
||||
delete from t3;
|
||||
insert into t3 values (1);
|
||||
end|
|
||||
create procedure h_ex()
|
||||
deterministic
|
||||
begin
|
||||
declare continue handler for 1062 -- ER_DUP_ENTRY
|
||||
select 'Outer (good)' as 'h_ex';
|
||||
begin
|
||||
declare continue handler for sqlexception
|
||||
select 'Inner (bad)' as 'h_ex';
|
||||
insert into t3 values (1);
|
||||
end;
|
||||
end|
|
||||
create procedure h_se()
|
||||
deterministic
|
||||
begin
|
||||
-- integrity constraint violation
|
||||
declare continue handler for sqlstate '23000'
|
||||
select 'Outer (bad)' as 'h_se';
|
||||
begin
|
||||
declare continue handler for 1062 -- ER_DUP_ENTRY
|
||||
select 'Inner (good)' as 'h_se';
|
||||
insert into t3 values (1);
|
||||
end;
|
||||
end|
|
||||
create procedure h_ss()
|
||||
deterministic
|
||||
begin
|
||||
-- integrity constraint violation
|
||||
declare continue handler for sqlstate '23000'
|
||||
select 'Outer (bad)' as 'h_ss';
|
||||
begin
|
||||
-- integrity constraint violation
|
||||
declare continue handler for sqlstate '23000'
|
||||
select 'Inner (good)' as 'h_ss';
|
||||
insert into t3 values (1);
|
||||
end;
|
||||
end|
|
||||
create procedure h_sn()
|
||||
deterministic
|
||||
begin
|
||||
-- Note: '02000' is more specific than NOT FOUND ;
|
||||
-- there might be other not found states
|
||||
declare continue handler for sqlstate '02000' -- no data
|
||||
select 'Outer (good)' as 'h_sn';
|
||||
begin
|
||||
declare x int;
|
||||
declare continue handler for not found
|
||||
select 'Inner (bad)' as 'h_sn';
|
||||
select a into x from t3 where a = 42;
|
||||
end;
|
||||
end|
|
||||
create procedure h_sw()
|
||||
deterministic
|
||||
begin
|
||||
-- data exception - numeric value out of range
|
||||
declare continue handler for sqlstate '22003'
|
||||
select 'Outer (good)' as 'h_sw';
|
||||
begin
|
||||
declare continue handler for sqlwarning
|
||||
select 'Inner (bad)' as 'h_sw';
|
||||
insert into t3 values (123456789012);
|
||||
end;
|
||||
delete from t3;
|
||||
insert into t3 values (1);
|
||||
end|
|
||||
create procedure h_sx()
|
||||
deterministic
|
||||
begin
|
||||
-- integrity constraint violation
|
||||
declare continue handler for sqlstate '23000'
|
||||
select 'Outer (good)' as 'h_sx';
|
||||
begin
|
||||
declare continue handler for sqlexception
|
||||
select 'Inner (bad)' as 'h_sx';
|
||||
insert into t3 values (1);
|
||||
end;
|
||||
end|
|
||||
create procedure h_ne()
|
||||
deterministic
|
||||
begin
|
||||
declare continue handler for not found
|
||||
select 'Outer (bad)' as 'h_ne';
|
||||
begin
|
||||
declare x int;
|
||||
declare continue handler for 1329 -- ER_SP_FETCH_NO_DATA
|
||||
select 'Inner (good)' as 'h_ne';
|
||||
select a into x from t3 where a = 42;
|
||||
end;
|
||||
end|
|
||||
create procedure h_ns()
|
||||
deterministic
|
||||
begin
|
||||
declare continue handler for not found
|
||||
select 'Outer (bad)' as 'h_ns';
|
||||
begin
|
||||
declare x int;
|
||||
declare continue handler for sqlstate '02000' -- no data
|
||||
select 'Inner (good)' as 'h_ns';
|
||||
select a into x from t3 where a = 42;
|
||||
end;
|
||||
end|
|
||||
create procedure h_nn()
|
||||
deterministic
|
||||
begin
|
||||
declare continue handler for not found
|
||||
select 'Outer (bad)' as 'h_nn';
|
||||
begin
|
||||
declare x int;
|
||||
declare continue handler for not found
|
||||
select 'Inner (good)' as 'h_nn';
|
||||
select a into x from t3 where a = 42;
|
||||
end;
|
||||
end|
|
||||
create procedure h_we()
|
||||
deterministic
|
||||
begin
|
||||
declare continue handler for sqlwarning
|
||||
select 'Outer (bad)' as 'h_we';
|
||||
begin
|
||||
declare continue handler for 1264 -- ER_WARN_DATA_OUT_OF_RANGE
|
||||
select 'Inner (good)' as 'h_we';
|
||||
insert into t3 values (123456789012);
|
||||
end;
|
||||
delete from t3;
|
||||
insert into t3 values (1);
|
||||
end|
|
||||
create procedure h_ws()
|
||||
deterministic
|
||||
begin
|
||||
declare continue handler for sqlwarning
|
||||
select 'Outer (bad)' as 'h_ws';
|
||||
begin
|
||||
-- data exception - numeric value out of range
|
||||
declare continue handler for sqlstate '22003'
|
||||
select 'Inner (good)' as 'h_ws';
|
||||
insert into t3 values (123456789012);
|
||||
end;
|
||||
delete from t3;
|
||||
insert into t3 values (1);
|
||||
end|
|
||||
create procedure h_ww()
|
||||
deterministic
|
||||
begin
|
||||
declare continue handler for sqlwarning
|
||||
select 'Outer (bad)' as 'h_ww';
|
||||
begin
|
||||
declare continue handler for sqlwarning
|
||||
select 'Inner (good)' as 'h_ww';
|
||||
insert into t3 values (123456789012);
|
||||
end;
|
||||
delete from t3;
|
||||
insert into t3 values (1);
|
||||
end|
|
||||
create procedure h_xe()
|
||||
deterministic
|
||||
begin
|
||||
declare continue handler for sqlexception
|
||||
select 'Outer (bad)' as 'h_xe';
|
||||
begin
|
||||
declare continue handler for 1062 -- ER_DUP_ENTRY
|
||||
select 'Inner (good)' as 'h_xe';
|
||||
insert into t3 values (1);
|
||||
end;
|
||||
end|
|
||||
create procedure h_xs()
|
||||
deterministic
|
||||
begin
|
||||
declare continue handler for sqlexception
|
||||
select 'Outer (bad)' as 'h_xs';
|
||||
begin
|
||||
-- integrity constraint violation
|
||||
declare continue handler for sqlstate '23000'
|
||||
select 'Inner (good)' as 'h_xs';
|
||||
insert into t3 values (1);
|
||||
end;
|
||||
end|
|
||||
create procedure h_xx()
|
||||
deterministic
|
||||
begin
|
||||
declare continue handler for sqlexception
|
||||
select 'Outer (bad)' as 'h_xx';
|
||||
begin
|
||||
declare continue handler for sqlexception
|
||||
select 'Inner (good)' as 'h_xx';
|
||||
insert into t3 values (1);
|
||||
end;
|
||||
end|
|
||||
call h_ee()|
|
||||
h_ee
|
||||
Inner (good)
|
||||
call h_es()|
|
||||
h_es
|
||||
Outer (good)
|
||||
call h_en()|
|
||||
h_en
|
||||
Outer (good)
|
||||
call h_ew()|
|
||||
h_ew
|
||||
Outer (good)
|
||||
call h_ex()|
|
||||
h_ex
|
||||
Outer (good)
|
||||
call h_se()|
|
||||
h_se
|
||||
Inner (good)
|
||||
call h_ss()|
|
||||
h_ss
|
||||
Inner (good)
|
||||
call h_sn()|
|
||||
h_sn
|
||||
Outer (good)
|
||||
call h_sw()|
|
||||
h_sw
|
||||
Outer (good)
|
||||
call h_sx()|
|
||||
h_sx
|
||||
Outer (good)
|
||||
call h_ne()|
|
||||
h_ne
|
||||
Inner (good)
|
||||
call h_ns()|
|
||||
h_ns
|
||||
Inner (good)
|
||||
call h_nn()|
|
||||
h_nn
|
||||
Inner (good)
|
||||
call h_we()|
|
||||
h_we
|
||||
Inner (good)
|
||||
call h_ws()|
|
||||
h_ws
|
||||
Inner (good)
|
||||
call h_ww()|
|
||||
h_ww
|
||||
Inner (good)
|
||||
call h_xe()|
|
||||
h_xe
|
||||
Inner (good)
|
||||
call h_xs()|
|
||||
h_xs
|
||||
Inner (good)
|
||||
call h_xx()|
|
||||
h_xx
|
||||
Inner (good)
|
||||
drop table t3|
|
||||
drop procedure h_ee|
|
||||
drop procedure h_es|
|
||||
drop procedure h_en|
|
||||
drop procedure h_ew|
|
||||
drop procedure h_ex|
|
||||
drop procedure h_se|
|
||||
drop procedure h_ss|
|
||||
drop procedure h_sn|
|
||||
drop procedure h_sw|
|
||||
drop procedure h_sx|
|
||||
drop procedure h_ne|
|
||||
drop procedure h_ns|
|
||||
drop procedure h_nn|
|
||||
drop procedure h_we|
|
||||
drop procedure h_ws|
|
||||
drop procedure h_ww|
|
||||
drop procedure h_xe|
|
||||
drop procedure h_xs|
|
||||
drop procedure h_xx|
|
||||
drop procedure if exists bug822|
|
||||
create procedure bug822(a_id char(16), a_data int)
|
||||
begin
|
||||
|
|
1
mysql-test/t/binlog_stm_mix_innodb_myisam-master.opt
Normal file
1
mysql-test/t/binlog_stm_mix_innodb_myisam-master.opt
Normal file
|
@ -0,0 +1 @@
|
|||
--loose-innodb_lock_wait_timeout=2
|
|
@ -28,11 +28,10 @@ rpl_ndb_charset : Bug#17246
|
|||
rpl_ndb_ddl : Bug#17400: delete & update of rows in table without pk fails
|
||||
rpl_ndb_delete_nowhere : Bug#17400: delete & update of rows in table without pk fails
|
||||
rpl_ndb_insert_ignore : Bugs: #17431: INSERT IGNORE INTO returns failed: 1296
|
||||
#rpl_ndb_log : result not deterministic
|
||||
rpl_ndb_log : result not deterministic
|
||||
rpl_ndb_relay_space : Bug#16993
|
||||
rpl_ndb_multi_update3 : Bug#17400: delete & update of rows in table without pk fails
|
||||
rpl_ndb_sp007 : Bug #17290
|
||||
rpl_sp : Bug#16456
|
||||
rpl_until : Unstable test case, bug#15886
|
||||
sp-goto : GOTO is currently is disabled - will be fixed in the future
|
||||
rpl_ndb_log : results are not deterministic
|
||||
|
|
|
@ -135,6 +135,6 @@ select @@character_set_filesystem;
|
|||
LOAD DATA INFILE 't@002d1' INTO TABLE t1;
|
||||
SELECT * FROM t1;
|
||||
DROP TABLE t1;
|
||||
--exec rm $MYSQL_TEST_DIR/var/master-data/test/t@002d1
|
||||
--exec rm $MYSQLTEST_VARDIR/master-data/test/t@002d1
|
||||
SET character_set_filesystem=default;
|
||||
select @@character_set_filesystem;
|
||||
|
|
|
@ -10,19 +10,19 @@ update t1 set a=a+2 where a=2;
|
|||
update t1 set a=a+2 where a=3;
|
||||
|
||||
create table t2 (word varchar(20));
|
||||
load data infile '../../std_data/words.dat' into table t2;
|
||||
load data infile '../std_data_ln/words.dat' into table t2;
|
||||
|
||||
#
|
||||
# Save binlog
|
||||
#
|
||||
--exec $MYSQL_BINLOG --hexdump $MYSQL_TEST_DIR/var/log/master-bin.000001 > $MYSQL_TEST_DIR/var/tmp/mysqlbinlog_base64.sql
|
||||
--exec $MYSQL_BINLOG --hexdump $MYSQLTEST_VARDIR/log/master-bin.000001 > $MYSQLTEST_VARDIR/tmp/mysqlbinlog_base64.sql
|
||||
|
||||
#
|
||||
# Clear database and restore from binlog
|
||||
#
|
||||
drop table t1;
|
||||
drop table t2;
|
||||
--exec $MYSQL test < $MYSQL_TEST_DIR/var/tmp/mysqlbinlog_base64.sql
|
||||
--exec $MYSQL test < $MYSQLTEST_VARDIR/tmp/mysqlbinlog_base64.sql
|
||||
|
||||
#
|
||||
# Verify that all binlog events have been executed
|
||||
|
@ -33,6 +33,6 @@ select * from t2;
|
|||
#
|
||||
# Test cleanup
|
||||
#
|
||||
--exec rm $MYSQL_TEST_DIR/var/tmp/mysqlbinlog_base64.sql
|
||||
--exec rm $MYSQLTEST_VARDIR/tmp/mysqlbinlog_base64.sql
|
||||
drop table t1;
|
||||
drop table t2;
|
||||
|
|
|
@ -13,10 +13,12 @@ engine=ndb;
|
|||
insert into t1 values (1,'one','one'), (2,'two','two'), (3,'three','three');
|
||||
create index c on t1(c);
|
||||
connection server2;
|
||||
show indexes from t1;
|
||||
select * from t1 where c = 'two';
|
||||
connection server1;
|
||||
alter table t1 drop index c;
|
||||
connection server2;
|
||||
show indexes from t1;
|
||||
select * from t1 where c = 'two';
|
||||
connection server1;
|
||||
drop table t1;
|
||||
|
|
|
@ -177,7 +177,7 @@ INSERT INTO t2 VALUES
|
|||
INSERT INTO t3 VALUES
|
||||
(1,1,1);
|
||||
|
||||
--exec $MYSQL_DUMP --skip-comments --compact -Y test > var/tmp/ndb_dd_dump.sql
|
||||
--exec $MYSQL_DUMP --skip-comments --compact -Y test > $MYSQLTEST_VARDIR/tmp/ndb_dd_dump.sql
|
||||
|
||||
DROP TABLE t1;
|
||||
DROP TABLE t2;
|
||||
|
@ -206,7 +206,7 @@ DROP TABLESPACE ts3 ENGINE = NDB;
|
|||
DROP LOGFILE GROUP lg1 ENGINE = NDB;
|
||||
DROP LOGFILE GROUP lg2 ENGINE = NDB;
|
||||
|
||||
--exec $MYSQL test < var/tmp/ndb_dd_dump.sql
|
||||
--exec $MYSQL test < $MYSQLTEST_VARDIR/tmp/ndb_dd_dump.sql
|
||||
|
||||
SELECT DISTINCT
|
||||
LOGFILE_GROUP_NAME,
|
||||
|
|
|
@ -12,12 +12,12 @@ DROP TABLE IF EXISTS t1;
|
|||
# should give duplicate key
|
||||
CREATE TABLE t1 (word CHAR(20) NOT NULL PRIMARY KEY) ENGINE=NDB;
|
||||
--error 1022
|
||||
LOAD DATA INFILE '../../std_data/words.dat' INTO TABLE t1 ;
|
||||
LOAD DATA INFILE '../std_data_ln/words.dat' INTO TABLE t1 ;
|
||||
DROP TABLE t1;
|
||||
|
||||
# now without a primary key we should be ok
|
||||
CREATE TABLE t1 (word CHAR(20) NOT NULL) ENGINE=NDB;
|
||||
LOAD DATA INFILE '../../std_data/words.dat' INTO TABLE t1 ;
|
||||
LOAD DATA INFILE '../std_data_ln/words.dat' INTO TABLE t1 ;
|
||||
SELECT * FROM t1 ORDER BY word;
|
||||
DROP TABLE t1;
|
||||
|
||||
|
|
|
@ -143,10 +143,10 @@ create table t8 engine=myisam as select * from t8_c;
|
|||
create table t9 engine=myisam as select * from t9_c;
|
||||
|
||||
|
||||
--exec $NDB_MGM --no-defaults -e "start backup" >> $NDB_TOOLS_OUTPUT
|
||||
--source include/ndb_backup.inc
|
||||
drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c;
|
||||
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b 1 -n 1 -m -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-1 >> $NDB_TOOLS_OUTPUT
|
||||
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b 1 -n 2 -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-1 >> $NDB_TOOLS_OUTPUT
|
||||
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b $the_backup_id -n 1 -m -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT
|
||||
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b $the_backup_id -n 2 -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT
|
||||
|
||||
# random output order??
|
||||
#show tables;
|
||||
|
@ -230,10 +230,10 @@ PARTITIONS 4;
|
|||
ALTER TABLE t7_c
|
||||
PARTITION BY LINEAR KEY (`dardtestard`);
|
||||
|
||||
--exec $NDB_MGM --no-defaults -e "start backup" >> $NDB_TOOLS_OUTPUT
|
||||
--source include/ndb_backup.inc
|
||||
drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c;
|
||||
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b 2 -n 1 -m -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-2 >> $NDB_TOOLS_OUTPUT
|
||||
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b 2 -n 2 -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-2 >> $NDB_TOOLS_OUTPUT
|
||||
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b $the_backup_id -n 1 -m -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT
|
||||
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b $the_backup_id -n 2 -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT
|
||||
|
||||
select count(*) from t1;
|
||||
select count(*) from t1_c;
|
||||
|
@ -290,8 +290,8 @@ select count(*)
|
|||
select * from t9_c) a;
|
||||
|
||||
drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c;
|
||||
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b 2 -n 1 -m -r --ndb-nodegroup_map '(0,0)' --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-2 >> $NDB_TOOLS_OUTPUT
|
||||
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b 2 -n 2 -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-2 >> $NDB_TOOLS_OUTPUT
|
||||
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b $the_backup_id -n 1 -m -r --ndb-nodegroup_map '(0,0)' --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT
|
||||
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b $the_backup_id -n 2 -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT
|
||||
|
||||
select count(*) from t1;
|
||||
select count(*) from t1_c;
|
||||
|
@ -348,7 +348,7 @@ select count(*)
|
|||
select * from t9_c) a;
|
||||
|
||||
drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c;
|
||||
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults --core=0 -b 2 -n 1 -m -r --ndb-nodegroup_map '(0,1)' $NDB_BACKUP_DIR/BACKUP/BACKUP-2 2>&1 | grep Translate || true
|
||||
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults --core=0 -b $the_backup_id -n 1 -m -r --ndb-nodegroup_map '(0,1)' $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id 2>&1 | grep Translate || true
|
||||
|
||||
#
|
||||
# Cleanup
|
||||
|
@ -363,6 +363,6 @@ drop table if exists t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c;
|
|||
# Test BUG#10287
|
||||
#
|
||||
|
||||
--exec $NDB_TOOLS_DIR/ndb_select_all --no-defaults -d sys -D , SYSTAB_0 | grep 520093696
|
||||
--exec $NDB_TOOLS_DIR/ndb_select_all --no-defaults -d sys -D , SYSTAB_0 | grep 520093696, | sed "s/,$the_backup_id/,<the_backup_id>/"
|
||||
|
||||
# End of 4.1 tests
|
||||
|
|
|
@ -16,10 +16,10 @@ DROP TABLE IF EXISTS test.t1;
|
|||
|
||||
# Section 1 test
|
||||
CREATE TABLE test.t1 (a VARCHAR(255), PRIMARY KEY(a));
|
||||
LOAD DATA INFILE '../../std_data/words2.dat' INTO TABLE test.t1;
|
||||
LOAD DATA INFILE '../std_data_ln/words2.dat' INTO TABLE test.t1;
|
||||
DELETE FROM test.t1 WHERE a = 'abashed';
|
||||
DELETE FROM test.t1;
|
||||
LOAD DATA INFILE '../../std_data/words2.dat' INTO TABLE test.t1;
|
||||
LOAD DATA INFILE '../std_data_ln/words2.dat' INTO TABLE test.t1;
|
||||
|
||||
|
||||
SELECT * FROM test.t1 ORDER BY a DESC;
|
||||
|
|
|
@ -46,7 +46,7 @@ show tables;
|
|||
use test;
|
||||
select * from t1;
|
||||
|
||||
system rm var/master-data/mysqltest1/f1.txt;
|
||||
system rm $MYSQLTEST_VARDIR/master-data/mysqltest1/f1.txt;
|
||||
connection master;
|
||||
DROP DATABASE mysqltest1;
|
||||
sync_slave_with_master;
|
||||
|
|
|
@ -71,9 +71,9 @@ sync_with_master;
|
|||
connection master;
|
||||
set SQL_LOG_BIN=0;
|
||||
create table t1 (word char(20) not null, index(word))ENGINE=MyISAM;
|
||||
load data infile '../../std_data/words.dat' into table t1;
|
||||
load data infile '../std_data_ln/words.dat' into table t1;
|
||||
create table t2 (word char(20) not null)ENGINE=MyISAM;
|
||||
load data infile '../../std_data/words.dat' into table t2;
|
||||
load data infile '../std_data_ln/words.dat' into table t2;
|
||||
create table t3 (word char(20) not null primary key)ENGINE=MyISAM;
|
||||
connection slave;
|
||||
load table t1 from master;
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
--source include/master-slave.inc
|
||||
|
||||
CREATE TABLE t1 (word CHAR(20) NOT NULL);
|
||||
LOAD DATA INFILE '../../std_data/words.dat' INTO TABLE t1;
|
||||
LOAD DATA INFILE '../std_data_ln/words.dat' INTO TABLE t1;
|
||||
SELECT * FROM t1 ORDER BY word;
|
||||
sync_slave_with_master;
|
||||
|
||||
|
|
|
@ -17,14 +17,14 @@ connection master;
|
|||
# 'test' database should be ignored by the slave
|
||||
USE test;
|
||||
CREATE TABLE t1(a INT, b INT, UNIQUE(b));
|
||||
LOAD DATA INFILE '../../std_data/rpl_loaddata.dat' INTO TABLE test.t1;
|
||||
LOAD DATA INFILE '../std_data_ln/rpl_loaddata.dat' INTO TABLE test.t1;
|
||||
SELECT COUNT(*) FROM test.t1;
|
||||
|
||||
# 'mysqltest' database should NOT be ignored by the slave
|
||||
CREATE DATABASE mysqltest;
|
||||
USE mysqltest;
|
||||
CREATE TABLE t1(a INT, b INT, UNIQUE(b));
|
||||
LOAD DATA INFILE '../../std_data/rpl_loaddata.dat' INTO TABLE mysqltest.t1;
|
||||
LOAD DATA INFILE '../std_data_ln/rpl_loaddata.dat' INTO TABLE mysqltest.t1;
|
||||
SELECT COUNT(*) FROM mysqltest.t1;
|
||||
|
||||
# Now lets check the slave to see what we have :-)
|
||||
|
|
|
@ -24,12 +24,12 @@ DROP TABLE IF EXISTS test.t1;
|
|||
|
||||
CREATE TABLE test.t1 (a INT, blob_column LONGBLOB, PRIMARY KEY(a));
|
||||
INSERT INTO test.t1 VALUES(1,'test');
|
||||
UPDATE test.t1 SET blob_column=LOAD_FILE('../../std_data/words2.dat') WHERE a=1;
|
||||
UPDATE test.t1 SET blob_column=LOAD_FILE('../std_data_ln/words2.dat') WHERE a=1;
|
||||
delimiter |;
|
||||
create procedure test.p1()
|
||||
begin
|
||||
INSERT INTO test.t1 VALUES(2,'test');
|
||||
UPDATE test.t1 SET blob_column=LOAD_FILE('../../std_data/words2.dat') WHERE a=2;
|
||||
UPDATE test.t1 SET blob_column=LOAD_FILE('../std_data_ln/words2.dat') WHERE a=2;
|
||||
end|
|
||||
delimiter ;|
|
||||
|
||||
|
|
|
@ -117,10 +117,10 @@ RESET MASTER;
|
|||
--exec $NDB_MGM --no-defaults --ndb-connectstring="localhost:$NDBCLUSTER_PORT" -e "start backup" >> $NDB_TOOLS_OUTPUT
|
||||
|
||||
# there is no neat way to find the backupid, this is a hack to find it...
|
||||
--exec $NDB_TOOLS_DIR/ndb_select_all --ndb-connectstring="localhost:$NDBCLUSTER_PORT" -d sys --delimiter=',' SYSTAB_0 | grep 520093696 > var/tmp.dat
|
||||
--exec $NDB_TOOLS_DIR/ndb_select_all --ndb-connectstring="localhost:$NDBCLUSTER_PORT" -d sys --delimiter=',' SYSTAB_0 | grep 520093696 > $MYSQLTEST_VARDIR/tmp.dat
|
||||
CREATE TABLE IF NOT EXISTS cluster_replication.backup_info (id INT, backup_id INT) ENGINE = HEAP;
|
||||
DELETE FROM cluster_replication.backup_info;
|
||||
LOAD DATA INFILE '../../var/tmp.dat' INTO TABLE cluster_replication.backup_info FIELDS TERMINATED BY ',';
|
||||
LOAD DATA INFILE '../tmp.dat' INTO TABLE cluster_replication.backup_info FIELDS TERMINATED BY ',';
|
||||
--replace_column 1 <the_backup_id>
|
||||
SELECT @the_backup_id:=backup_id FROM cluster_replication.backup_info;
|
||||
let the_backup_id=`select @the_backup_id`;
|
||||
|
@ -191,17 +191,17 @@ while ($1)
|
|||
# 1. dump database BANK on both master and slave
|
||||
# 2. compare, there should be no difference
|
||||
#
|
||||
--exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info BANK ACCOUNT_TYPE ACCOUNT GL TRANSACTION > ./var/tmp/master_BANK.sql
|
||||
--exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info BANK ACCOUNT_TYPE ACCOUNT GL TRANSACTION > $MYSQLTEST_VARDIR/tmp/master_BANK.sql
|
||||
--connection master
|
||||
use test;
|
||||
create table t1 (a int primary key) engine=ndb;
|
||||
insert into t1 values (1);
|
||||
--sync_slave_with_master
|
||||
--exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert --no-create-info BANK ACCOUNT_TYPE ACCOUNT GL TRANSACTION > ./var/tmp/slave_BANK.sql
|
||||
--exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert --no-create-info BANK ACCOUNT_TYPE ACCOUNT GL TRANSACTION > $MYSQLTEST_VARDIR/tmp/slave_BANK.sql
|
||||
--connection master
|
||||
drop table t1;
|
||||
|
||||
--exec diff ./var/tmp/master_BANK.sql ./var/tmp/slave_BANK.sql
|
||||
--exec diff $MYSQLTEST_VARDIR/tmp/master_BANK.sql $MYSQLTEST_VARDIR/tmp/slave_BANK.sql
|
||||
|
||||
--dec $2
|
||||
}
|
||||
|
|
|
@ -26,10 +26,10 @@ SELECT hex(c2),hex(c3),c1 FROM t2 ORDER BY c1;
|
|||
|
||||
# take a backup on master
|
||||
--exec $NDB_MGM --no-defaults --ndb-connectstring="localhost:$NDBCLUSTER_PORT" -e "start backup" >> $NDB_TOOLS_OUTPUT
|
||||
--exec $NDB_TOOLS_DIR/ndb_select_all --no-defaults --ndb-connectstring="localhost:$NDBCLUSTER_PORT" -d sys --delimiter=',' SYSTAB_0 | grep 520093696 > var/tmp.dat
|
||||
--exec $NDB_TOOLS_DIR/ndb_select_all --no-defaults --ndb-connectstring="localhost:$NDBCLUSTER_PORT" -d sys --delimiter=',' SYSTAB_0 | grep 520093696 > $MYSQLTEST_VARDIR/tmp.dat
|
||||
CREATE TEMPORARY TABLE IF NOT EXISTS cluster_replication.backup_info (id INT, backup_id INT)ENGINE=HEAP;
|
||||
DELETE FROM cluster_replication.backup_info;
|
||||
LOAD DATA INFILE '../../var/tmp.dat' INTO TABLE cluster_replication.backup_info FIELDS TERMINATED BY ',';
|
||||
LOAD DATA INFILE '../tmp.dat' INTO TABLE cluster_replication.backup_info FIELDS TERMINATED BY ',';
|
||||
--replace_column 1 <the_backup_id>
|
||||
SELECT @the_backup_id:=backup_id FROM cluster_replication.backup_info;
|
||||
let the_backup_id=`select @the_backup_id` ;
|
||||
|
|
|
@ -50,8 +50,8 @@ sync_slave_with_master;
|
|||
|
||||
connection master;
|
||||
|
||||
--exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info mysqltest1 > ./var/tmp/NOW_master.sql
|
||||
--exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert --no-create-info mysqltest1 > ./var/tmp/NOW_slave.sql
|
||||
--exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info mysqltest1 > $MYSQLTEST_VARDIR/tmp/NOW_master.sql
|
||||
--exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert --no-create-info mysqltest1 > $MYSQLTEST_VARDIR/tmp/NOW_slave.sql
|
||||
|
||||
# lets cleanup
|
||||
DROP TABLE IF EXISTS mysqltest1.t1;
|
||||
|
@ -61,13 +61,13 @@ DROP FUNCTION mysqltest1.f1;
|
|||
# the test will show that the diff statement failed and not reject file
|
||||
# will be created. You will need to go to the mysql-test dir and diff
|
||||
# the files your self to see what is not matching :-) The failed dump
|
||||
# files will be located in mysql-test/var/tmp
|
||||
# files will be located in $MYSQLTEST_VARDIR/tmp
|
||||
|
||||
exec diff ./var/tmp/NOW_master.sql ./var/tmp/NOW_slave.sql;
|
||||
exec diff $MYSQLTEST_VARDIR/tmp/NOW_master.sql $MYSQLTEST_VARDIR/tmp/NOW_slave.sql;
|
||||
|
||||
# If all is good, when can cleanup our dump files.
|
||||
system rm ./var/tmp/NOW_master.sql;
|
||||
system rm ./var/tmp/NOW_slave.sql;
|
||||
system rm $MYSQLTEST_VARDIR/tmp/NOW_master.sql;
|
||||
system rm $MYSQLTEST_VARDIR/tmp/NOW_slave.sql;
|
||||
|
||||
sync_slave_with_master;
|
||||
# End of 5.1 test case
|
||||
|
|
|
@ -31,8 +31,8 @@ connection master;
|
|||
# Should drop the non-temporary table t1 and the temporary table t2
|
||||
DROP TABLE t1,t2;
|
||||
let $VERSION=`select version()`;
|
||||
--replace_result $VERSION VERSION
|
||||
SHOW BINLOG EVENTS;
|
||||
#--replace_result $VERSION VERSION
|
||||
#SHOW BINLOG EVENTS;
|
||||
SHOW TABLES;
|
||||
sync_slave_with_master;
|
||||
--echo **** On Slave ****
|
||||
|
|
|
@ -84,8 +84,8 @@ connection master;
|
|||
|
||||
# time to dump the databases and so we can see if they match
|
||||
|
||||
--exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info test > ./var/tmp/func002_master.sql
|
||||
--exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert --no-create-info test > ./var/tmp/func002_slave.sql
|
||||
--exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info test > $MYSQLTEST_VARDIR/tmp/func002_master.sql
|
||||
--exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert --no-create-info test > $MYSQLTEST_VARDIR/tmp/func002_slave.sql
|
||||
|
||||
# Cleanup
|
||||
DROP FUNCTION test.f1;
|
||||
|
@ -97,7 +97,7 @@ sync_slave_with_master;
|
|||
# the files your self to see what is not matching :-). The files are located
|
||||
# in mysql-test/var/tmp
|
||||
|
||||
exec diff ./var/tmp/func002_master.sql ./var/tmp/func002_slave.sql;
|
||||
exec diff $MYSQLTEST_VARDIR/tmp/func002_master.sql $MYSQLTEST_VARDIR/tmp/func002_slave.sql;
|
||||
|
||||
# End of 5.0 test case
|
||||
|
||||
|
|
1
mysql-test/t/rpl_row_mysqlbinlog-master.opt
Normal file
1
mysql-test/t/rpl_row_mysqlbinlog-master.opt
Normal file
|
@ -0,0 +1 @@
|
|||
--max-binlog-size=1040384
|
319
mysql-test/t/rpl_row_mysqlbinlog.test
Normal file
319
mysql-test/t/rpl_row_mysqlbinlog.test
Normal file
|
@ -0,0 +1,319 @@
|
|||
##################################################################
|
||||
# Author: JBM #
|
||||
# Date: 2006-02-22 #
|
||||
# Purpose: To test changes to mysqlbinlog for row based bin logs #
|
||||
# We are using .opt file since we need small binlog size #
|
||||
##################################################################
|
||||
# Include Section
|
||||
# Make sure that we have row based bin log
|
||||
-- source include/have_binlog_format_row.inc
|
||||
# Embedded server doesn't support binlogging
|
||||
-- source include/not_embedded.inc
|
||||
-- source include/master-slave.inc
|
||||
|
||||
# Setup Section
|
||||
# we need this for getting fixed timestamps inside of this test
|
||||
|
||||
--disable_query_log
|
||||
select "---Setup Section --" as "";
|
||||
--enable_query_log
|
||||
|
||||
set timestamp=1000000000;
|
||||
|
||||
--disable_warnings
|
||||
DROP TABLE IF EXISTS t1,t2,t3;
|
||||
--enable_warnings
|
||||
|
||||
connection master;
|
||||
CREATE TABLE t1(word VARCHAR(20));
|
||||
CREATE TABLE t2(id INT AUTO_INCREMENT NOT NULL PRIMARY KEY);
|
||||
CREATE TABLE t3(c1 INT NOT NULL PRIMARY KEY, c2 LONGBLOB, c3 TIMESTAMP, c4 TEXT, c5 FLOAT);
|
||||
|
||||
|
||||
# Test Section
|
||||
# Lets start by putting some data into the tables.
|
||||
|
||||
--disable_query_log
|
||||
INSERT INTO t1 VALUES ("abirvalg");
|
||||
LOAD DATA INFILE '../std_data_ln/words.dat' INTO TABLE t1;
|
||||
LOAD DATA INFILE '../std_data_ln/words.dat' INTO TABLE t1;
|
||||
LOAD DATA INFILE '../std_data_ln/words.dat' INTO TABLE t1;
|
||||
LOAD DATA INFILE '../std_data_ln/words.dat' INTO TABLE t1;
|
||||
LOAD DATA INFILE '../std_data_ln/words.dat' INTO TABLE t1;
|
||||
|
||||
# d1 length 3000
|
||||
set @d1 = 'dd1';
|
||||
set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1);
|
||||
set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1);
|
||||
set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1);
|
||||
|
||||
let $count=500;
|
||||
while ($count)
|
||||
{
|
||||
INSERT INTO t2 VALUES (NULL);
|
||||
eval INSERT INTO t3 VALUES ($count,@d1,'20060222000000','Tested in Texas',$count*2.2);
|
||||
dec $count;
|
||||
}
|
||||
--enable_query_log
|
||||
|
||||
|
||||
--disable_query_log
|
||||
select "---Test1 check table load --" as "";
|
||||
--enable_query_log
|
||||
|
||||
# Lets Check the tables on the Master
|
||||
SELECT COUNT(*) from t1;
|
||||
SELECT COUNT(*) from t2;
|
||||
SELECT COUNT(*) from t3;
|
||||
SELECT * FROM t1 ORDER BY word LIMIT 5;
|
||||
SELECT * FROM t2 ORDER BY id LIMIT 5;
|
||||
SELECT c1, c3, c4, c5 FROM t3 ORDER BY c1 LIMIT 5;
|
||||
|
||||
# Should have the same on the slave;
|
||||
|
||||
sync_slave_with_master;
|
||||
SELECT COUNT(*) from t1;
|
||||
SELECT COUNT(*) from t2;
|
||||
SELECT COUNT(*) from t3;
|
||||
SELECT * FROM t1 ORDER BY word LIMIT 5;
|
||||
SELECT * FROM t2 ORDER BY id LIMIT 5;
|
||||
SELECT c1, c3, c4, c5 FROM t3 ORDER BY c1 LIMIT 5;
|
||||
|
||||
# Okay time to get busy, back to master
|
||||
|
||||
connection master;
|
||||
|
||||
# simple query to show more in second binlog
|
||||
insert into t1 values ("Alas");
|
||||
flush logs;
|
||||
|
||||
# delimiters are for easier debugging in future
|
||||
--disable_query_log
|
||||
select "--- Test 1 Dump binlog to file --" as "";
|
||||
--enable_query_log
|
||||
|
||||
#
|
||||
# Prepare local temporary file to recreate what we have currently.
|
||||
|
||||
--exec $MYSQL_BINLOG $MYSQLTEST_VARDIR/log/master-bin.000001 > $MYSQLTEST_VARDIR/tmp/master.sql
|
||||
|
||||
--exec $MYSQL_BINLOG $MYSQLTEST_VARDIR/log/master-bin.000002 >> $MYSQLTEST_VARDIR/tmp/master.sql
|
||||
|
||||
# Now that we have our file, lets get rid of the current database.
|
||||
# Cleanup the master and the slave and try to recreate.
|
||||
--disable_query_log
|
||||
select "--- Test 1 delete tables, clean master and slave --" as "";
|
||||
--enable_query_log
|
||||
|
||||
DROP TABLE t1;
|
||||
DROP TABLE t2;
|
||||
DROP TABLE t3;
|
||||
|
||||
sync_slave_with_master;
|
||||
#we expect STOP SLAVE to produce a warning as the slave is stopped
|
||||
#(the server was started with skip-slave-start)
|
||||
--disable_warnings
|
||||
stop slave;
|
||||
--enable_warnings
|
||||
--require r/slave-stopped.result
|
||||
show status like 'Slave_running';
|
||||
connection master;
|
||||
reset master;
|
||||
connection slave;
|
||||
reset slave;
|
||||
start slave;
|
||||
--require r/slave-running.result
|
||||
show status like 'Slave_running';
|
||||
connection master;
|
||||
|
||||
# We should be clean at this point, now we will run in the file from above.
|
||||
--disable_query_log
|
||||
select "--- Test 1 Load from Dump binlog file --" as "";
|
||||
--enable_query_log
|
||||
|
||||
--exec $MYSQL -e "source $MYSQLTEST_VARDIR/tmp/master.sql"
|
||||
|
||||
--disable_query_log
|
||||
select "--- Test 1 Check Load Results --" as "";
|
||||
--enable_query_log
|
||||
|
||||
# Lets Check the tables on the Master
|
||||
SELECT COUNT(*) from t1;
|
||||
SELECT COUNT(*) from t2;
|
||||
SELECT COUNT(*) from t3;
|
||||
SELECT * FROM t1 ORDER BY word LIMIT 5;
|
||||
SELECT * FROM t2 ORDER BY id LIMIT 5;
|
||||
SELECT c1, c3, c4, c5 FROM t3 ORDER BY c1 LIMIT 5;
|
||||
|
||||
# Should have the same on the slave;
|
||||
|
||||
sync_slave_with_master;
|
||||
SELECT COUNT(*) from t1;
|
||||
SELECT COUNT(*) from t2;
|
||||
SELECT COUNT(*) from t3;
|
||||
SELECT * FROM t1 ORDER BY word LIMIT 5;
|
||||
SELECT * FROM t2 ORDER BY id LIMIT 5;
|
||||
SELECT c1, c3, c4, c5 FROM t3 ORDER BY c1 LIMIT 5;
|
||||
connection master;
|
||||
|
||||
# We should be gold by the time, so I will get rid of our file.
|
||||
|
||||
--exec rm $MYSQLTEST_VARDIR/tmp/master.sql
|
||||
|
||||
|
||||
# this test for position option
|
||||
# By setting this position to 412, we should only get the create of t3
|
||||
--disable_query_log
|
||||
select "--- Test 2 position test --" as "";
|
||||
--enable_query_log
|
||||
--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
|
||||
--exec $MYSQL_BINLOG --short-form --local-load=$MYSQLTEST_VARDIR/tmp/ --position=412 $MYSQLTEST_VARDIR/log/master-bin.000001
|
||||
|
||||
# These are tests for remote binlog.
|
||||
# They should return the same as previous test.
|
||||
|
||||
--disable_query_log
|
||||
select "--- Test 3 First Remote test --" as "";
|
||||
--enable_query_log
|
||||
|
||||
# This is broken now
|
||||
--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
|
||||
--exec $MYSQL_BINLOG --short-form --local-load=$MYSQLTEST_VARDIR/tmp/ --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000001
|
||||
|
||||
# This part is disabled due to bug #17654
|
||||
################### Start Bug 17654 ######################
|
||||
#--disable_query_log
|
||||
#select "--- Test 4 Second Remote test --" as "";
|
||||
#--enable_query_log
|
||||
#--exec $MYSQL_BINLOG --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000001 > $MYSQLTEST_VARDIR/tmp/remote.sql
|
||||
|
||||
#--exec $MYSQL_BINLOG --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000002 >> $MYSQLTEST_VARDIR/tmp/remote.sql
|
||||
|
||||
# Now that we have our file, lets get rid of the current database.
|
||||
# Cleanup the master and the slave and try to recreate.
|
||||
|
||||
#DROP TABLE t1;
|
||||
#DROP TABLE t2;
|
||||
#DROP TABLE t3;
|
||||
|
||||
#sync_slave_with_master;
|
||||
|
||||
#we expect STOP SLAVE to produce a warning as the slave is stopped
|
||||
#(the server was started with skip-slave-start)
|
||||
|
||||
#--disable_warnings
|
||||
#stop slave;
|
||||
#--enable_warnings
|
||||
#--require r/slave-stopped.result
|
||||
#show status like 'Slave_running';
|
||||
#connection master;
|
||||
#reset master;
|
||||
#connection slave;
|
||||
#reset slave;
|
||||
#start slave;
|
||||
#--require r/slave-running.result
|
||||
#show status like 'Slave_running';
|
||||
#connection master;
|
||||
|
||||
# We should be clean at this point, now we will run in the file from above.
|
||||
|
||||
#--exec $MYSQL -e "source $MYSQLTEST_VARDIR/tmp/remote.sql"
|
||||
|
||||
# Lets Check the tables on the Master
|
||||
|
||||
#SELECT COUNT(*) from t1;
|
||||
#SELECT COUNT(*) from t2;
|
||||
#SELECT COUNT(*) from t3;
|
||||
#SELECT * FROM t1 ORDER BY word LIMIT 5;
|
||||
#SELECT * FROM t2 ORDER BY id LIMIT 5;
|
||||
#SELECT c1, c3, c4, c5 FROM t3 ORDER BY c1 LIMIT 5;
|
||||
|
||||
# Should have the same on the slave;
|
||||
|
||||
#sync_slave_with_master;
|
||||
#SELECT COUNT(*) from t1;
|
||||
#SELECT COUNT(*) from t2;
|
||||
#SELECT COUNT(*) from t3;
|
||||
#SELECT * FROM t1 ORDER BY word LIMIT 5;
|
||||
#SELECT * FROM t2 ORDER BY id LIMIT 5;
|
||||
#SELECT c1, c3, c4, c5 FROM t3 ORDER BY c1 LIMIT 5;
|
||||
#connection master;
|
||||
|
||||
# We should be gold by the time, so I will get rid of our file.
|
||||
|
||||
#--exec rm $MYSQLTEST_VARDIR/tmp/remote.sql
|
||||
################### End Bug 17654 ######################
|
||||
|
||||
# LOAD DATA
|
||||
--disable_query_log
|
||||
select "--- Test 5 LOAD DATA --" as "";
|
||||
--enable_query_log
|
||||
--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
|
||||
--exec $MYSQL_BINLOG --short-form --local-load=$MYSQLTEST_VARDIR/tmp/ --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000002
|
||||
|
||||
# Bug#7853 (mysqlbinlog does not accept input from stdin)
|
||||
|
||||
--disable_query_log
|
||||
select "--- Test 6 reading stdin --" as "";
|
||||
--enable_query_log
|
||||
--replace_result $MYSQL_TEST_DIR MYSQL_TEST_DIR
|
||||
--exec $MYSQL_BINLOG --short-form - < $MYSQLTEST_VARDIR/log/master-bin.000001
|
||||
|
||||
--disable_query_log
|
||||
select "--- Test 7 reading stdin w/position --" as "";
|
||||
--enable_query_log
|
||||
--replace_result $MYSQL_TEST_DIR MYSQL_TEST_DIR
|
||||
--exec $MYSQL_BINLOG --short-form --position=412 - < $MYSQLTEST_VARDIR/log/master-bin.000001
|
||||
|
||||
# Bug#16217 (mysql client did not know how not switch its internal charset)
|
||||
--disable_query_log
|
||||
select "--- Test 8 switch internal charset --" as "";
|
||||
--enable_query_log
|
||||
sync_slave_with_master;
|
||||
|
||||
#we expect STOP SLAVE to produce a warning as the slave is stopped
|
||||
#(the server was started with skip-slave-start)
|
||||
|
||||
--disable_warnings
|
||||
stop slave;
|
||||
--enable_warnings
|
||||
--require r/slave-stopped.result
|
||||
show status like 'Slave_running';
|
||||
connection master;
|
||||
reset master;
|
||||
connection slave;
|
||||
reset slave;
|
||||
start slave;
|
||||
--require r/slave-running.result
|
||||
show status like 'Slave_running';
|
||||
connection master;
|
||||
|
||||
create table t4 (f text character set utf8);
|
||||
create table t5 (f text character set cp932);
|
||||
--exec $MYSQL --default-character-set=utf8 test -e "insert into t4 values(_utf8'ソ')"
|
||||
--exec $MYSQL --default-character-set=cp932 test -e "insert into t5 values(_cp932'ƒ\');"
|
||||
flush logs;
|
||||
rename table t4 to t04, t5 to t05;
|
||||
--exec $MYSQL_BINLOG $MYSQLTEST_VARDIR/log/master-bin.000001 | $MYSQL --default-character-set=utf8
|
||||
# original and recovered data must be equal
|
||||
select HEX(f) from t04;
|
||||
select HEX(f) from t4;
|
||||
select HEX(f) from t05;
|
||||
select HEX(f) from t5;
|
||||
|
||||
# slave should have same
|
||||
sync_slave_with_master;
|
||||
select HEX(f) from t04;
|
||||
select HEX(f) from t4;
|
||||
select HEX(f) from t05;
|
||||
select HEX(f) from t5;
|
||||
|
||||
--disable_query_log
|
||||
select "--- Test cleanup --" as "";
|
||||
--enable_query_log
|
||||
# clean up
|
||||
connection master;
|
||||
DROP TABLE IF EXISTS t1, t2, t3, t04, t05, t4, t5;
|
||||
sync_slave_with_master;
|
||||
|
||||
# End of 4.1 tests
|
|
@ -119,8 +119,8 @@ SELECT * FROM t2 ORDER BY a;
|
|||
|
||||
# time to dump the databases and so we can see if they match
|
||||
|
||||
--exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info test > ./var/tmp/sp001_master.sql
|
||||
--exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert --no-create-info test > ./var/tmp/sp001_slave.sql
|
||||
--exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info test > $MYSQLTEST_VARDIR/tmp/sp001_master.sql
|
||||
--exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert --no-create-info test > $MYSQLTEST_VARDIR/tmp/sp001_slave.sql
|
||||
|
||||
# First lets cleanup
|
||||
|
||||
|
@ -135,12 +135,12 @@ sync_slave_with_master;
|
|||
# the test will show that the diff statement failed and not reject file
|
||||
# will be created. You will need to go to the mysql-test dir and diff
|
||||
# the files your self to see what is not matching :-) Failed dump files
|
||||
# will be located in mysql-test/var/tmp
|
||||
# will be located in $MYSQLTEST_VARDIR/tmp
|
||||
|
||||
--exec diff ./var/tmp/sp001_master.sql ./var/tmp/sp001_slave.sql;
|
||||
--exec diff $MYSQLTEST_VARDIR/tmp/sp001_master.sql $MYSQLTEST_VARDIR/tmp/sp001_slave.sql;
|
||||
|
||||
# If all is good, when can cleanup our dump files.
|
||||
system rm ./var/tmp/sp001_master.sql;
|
||||
system rm ./var/tmp/sp001_slave.sql;
|
||||
system rm $MYSQLTEST_VARDIR/tmp/sp001_master.sql;
|
||||
system rm $MYSQLTEST_VARDIR/tmp/sp001_slave.sql;
|
||||
|
||||
# End of 5.0 test case
|
||||
|
|
|
@ -37,7 +37,7 @@ BEGIN
|
|||
END|
|
||||
delimiter ;|
|
||||
CALL test.p2();
|
||||
SELECT * FROM test.t1;
|
||||
SELECT * FROM test.t1 ORDER BY a;
|
||||
|
||||
save_master_pos;
|
||||
connection slave;
|
||||
|
@ -59,12 +59,12 @@ BEGIN
|
|||
END|
|
||||
delimiter ;|
|
||||
CALL test.p4();
|
||||
SELECT * FROM test.t2;
|
||||
SELECT * FROM test.t2 ORDER BY a;
|
||||
|
||||
save_master_pos;
|
||||
connection slave;
|
||||
sync_with_master;
|
||||
SELECT * FROM test.t2;
|
||||
SELECT * FROM test.t2 ORDER BY a;
|
||||
|
||||
# Cleanup
|
||||
connection master;
|
||||
|
|
|
@ -79,8 +79,8 @@ CALL test.p1();
|
|||
sync_slave_with_master;
|
||||
#SELECT * FROM test.t2;
|
||||
|
||||
--exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info test > ./var/tmp/sp011_master.sql
|
||||
--exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert --no-create-info test > ./var/tmp/sp011_slave.sql
|
||||
--exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info test > $MYSQLTEST_VARDIR/tmp/sp011_master.sql
|
||||
--exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert --no-create-info test > $MYSQLTEST_VARDIR/tmp/sp011_slave.sql
|
||||
|
||||
# Cleanup
|
||||
connection master;
|
||||
|
@ -100,12 +100,12 @@ sync_slave_with_master;
|
|||
# the test will show that the diff statement failed and not reject file
|
||||
# will be created. You will need to go to the mysql-test dir and diff
|
||||
# the files your self to see what is not matching :-) Failed test
|
||||
# Will leave dump files in mysql-test/var/tmp
|
||||
# Will leave dump files in $MYSQLTEST_VARDIR/tmp
|
||||
|
||||
exec diff ./var/tmp/sp011_master.sql ./var/tmp/sp011_slave.sql;
|
||||
exec diff $MYSQLTEST_VARDIR/tmp/sp011_master.sql $MYSQLTEST_VARDIR/tmp/sp011_slave.sql;
|
||||
|
||||
# If all is good, when can cleanup our dump files.
|
||||
system rm ./var/tmp/sp011_master.sql;
|
||||
system rm ./var/tmp/sp011_slave.sql;
|
||||
system rm $MYSQLTEST_VARDIR/tmp/sp011_master.sql;
|
||||
system rm $MYSQLTEST_VARDIR/tmp/sp011_slave.sql;
|
||||
|
||||
# End of 5.0 test case
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
connection master;
|
||||
create table t1 (words varchar(20)) engine=myisam;
|
||||
|
||||
load data infile '../../std_data/words.dat' into table t1 (words);
|
||||
load data infile '../std_data_ln/words.dat' into table t1 (words);
|
||||
select count(*) from t1;
|
||||
save_master_pos;
|
||||
|
||||
|
|
|
@ -75,8 +75,8 @@ let $message=<End test section 2 (Tiggers & SP)>;
|
|||
|
||||
# time to dump the databases and so we can see if they match
|
||||
|
||||
--exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info test > ./var/tmp/trig001_master.sql
|
||||
--exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert --no-create-info test > ./var/tmp/trig001_slave.sql
|
||||
--exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info test > $MYSQLTEST_VARDIR/tmp/trig001_master.sql
|
||||
--exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert --no-create-info test > $MYSQLTEST_VARDIR/tmp/trig001_slave.sql
|
||||
|
||||
# Cleanup
|
||||
connection master;
|
||||
|
@ -93,8 +93,8 @@ sync_slave_with_master;
|
|||
# the test will show that the diff statement failed and not reject file
|
||||
# will be created. You will need to go to the mysql-test dir and diff
|
||||
# the files your self to see what is not matching :-) Failed tests
|
||||
# will leave dump files in mysql-test/var/tmp
|
||||
# will leave dump files in $MYSQLTEST_VARDIR/tmp
|
||||
|
||||
exec diff ./var/tmp/trig001_master.sql ./var/tmp/trig001_slave.sql;
|
||||
exec diff $MYSQLTEST_VARDIR/tmp/trig001_master.sql $MYSQLTEST_VARDIR/tmp/trig001_slave.sql;
|
||||
|
||||
# End of 5.0 test case
|
||||
|
|
|
@ -125,8 +125,8 @@ connection master;
|
|||
|
||||
# time to dump the databases and so we can see if they match
|
||||
|
||||
--exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info test > ./var/tmp/trg003_master.sql
|
||||
--exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert --no-create-info test > ./var/tmp/trg003_slave.sql
|
||||
--exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info test > $MYSQLTEST_VARDIR/tmp/trg003_master.sql
|
||||
--exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert --no-create-info test > $MYSQLTEST_VARDIR/tmp/trg003_slave.sql
|
||||
|
||||
# cleanup
|
||||
--disable_warnings
|
||||
|
@ -147,6 +147,6 @@ DROP TABLE IF EXISTS test.t2;
|
|||
DROP TABLE IF EXISTS test.t3;
|
||||
--enable_warnings
|
||||
|
||||
exec diff ./var/tmp/trg003_master.sql ./var/tmp/trg003_slave.sql;
|
||||
exec diff $MYSQLTEST_VARDIR/tmp/trg003_master.sql $MYSQLTEST_VARDIR/tmp/trg003_slave.sql;
|
||||
|
||||
# End of 5.0 test case
|
||||
|
|
|
@ -76,8 +76,8 @@ SELECT * FROM test.t1;
|
|||
SELECT * FROM test.t2;
|
||||
#SELECT * FROM test.t3;
|
||||
|
||||
--exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info test > ./var/tmp/sp004_master.sql
|
||||
--exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert --no-create-info test > ./var/tmp/sp004_slave.sql
|
||||
--exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info test > $MYSQLTEST_VARDIR/tmp/sp004_master.sql
|
||||
--exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert --no-create-info test > $MYSQLTEST_VARDIR/tmp/sp004_slave.sql
|
||||
|
||||
# Cleanup
|
||||
connection master;
|
||||
|
@ -91,7 +91,7 @@ sync_slave_with_master;
|
|||
|
||||
# If the test fails, you will need to diff the dumps to see why.
|
||||
|
||||
-- exec diff ./var/tmp/sp004_master.sql ./var/tmp/sp004_slave.sql
|
||||
-- exec diff $MYSQLTEST_VARDIR/tmp/sp004_master.sql $MYSQLTEST_VARDIR/tmp/sp004_slave.sql
|
||||
|
||||
|
||||
# End of 5.0 test case
|
||||
|
|
|
@ -393,7 +393,7 @@ SHOW TABLE STATUS like 't1';
|
|||
--error 1033
|
||||
show create table t1;
|
||||
drop table if exists t1;
|
||||
system rm -f var/master-data/test/t1.frm ;
|
||||
system rm -f $MYSQLTEST_VARDIR/master-data/test/t1.frm ;
|
||||
|
||||
|
||||
# End of 4.1 tests
|
||||
|
|
|
@ -1681,6 +1681,17 @@ call bug15091();
|
|||
drop procedure bug15091;
|
||||
|
||||
|
||||
#
|
||||
# BUG#16896: Stored function: unused AGGREGATE-clause in CREATE FUNCTION
|
||||
#
|
||||
--disable_warnings
|
||||
drop function if exists bug16896;
|
||||
--enable_warnings
|
||||
|
||||
--error ER_SP_NO_AGGREGATE
|
||||
create aggregate function bug16896() returns int return 1;
|
||||
|
||||
|
||||
#
|
||||
# BUG#NNNN: New bug synopsis
|
||||
#
|
||||
|
@ -1688,3 +1699,4 @@ drop procedure bug15091;
|
|||
#drop procedure if exists bugNNNN|
|
||||
#--enable_warnings
|
||||
#create procedure bugNNNN...
|
||||
|
||||
|
|
|
@ -525,4 +525,26 @@ disconnect user_bug14533;
|
|||
drop user user_bug14533@localhost;
|
||||
drop database db_bug14533;
|
||||
|
||||
|
||||
#
|
||||
# BUG#7787: Stored procedures: improper warning for "grant execute" statement
|
||||
#
|
||||
|
||||
# Prepare.
|
||||
|
||||
CREATE DATABASE db_bug7787;
|
||||
use db_bug7787;
|
||||
|
||||
# Test.
|
||||
|
||||
CREATE PROCEDURE p1()
|
||||
SHOW INNODB STATUS;
|
||||
|
||||
GRANT EXECUTE ON PROCEDURE p1 TO user_bug7787@localhost;
|
||||
|
||||
# Cleanup.
|
||||
|
||||
DROP DATABASE db_bug7787;
|
||||
use test;
|
||||
|
||||
# End of 5.0 bugs.
|
||||
|
|
|
@ -1660,7 +1660,7 @@ drop function `foo`|
|
|||
# Implicit LOCK/UNLOCK TABLES for table access in functions
|
||||
#
|
||||
|
||||
--disable_warning
|
||||
--disable_warnings
|
||||
drop function if exists t1max|
|
||||
--enable_warnings
|
||||
create function t1max() returns int
|
||||
|
@ -1704,6 +1704,397 @@ drop table t3|
|
|||
drop function getcount|
|
||||
|
||||
|
||||
# Test cases for different combinations of condition handlers in nested
|
||||
# begin-end blocks in stored procedures.
|
||||
#
|
||||
# Note that the standard specifies that the most specific handler should
|
||||
# be triggered even if it's an outer handler masked by a less specific
|
||||
# handler in an inner block.
|
||||
# Note also that '02000' is more specific than NOT FOUND; there might be
|
||||
# other '02xxx' states, even if we currently do not issue them in any
|
||||
# situation (e.g. '02001').
|
||||
#
|
||||
# The combinations we test are these:
|
||||
#
|
||||
# Inner
|
||||
# errcode sqlstate not found sqlwarning sqlexception
|
||||
# Outer +------------+------------+------------+------------+------------+
|
||||
#errcode | h_ee (i) | h_es (o) | h_en (o) | h_ew (o) | h_ex (o) |
|
||||
#sqlstate | h_se (i) | h_ss (i) | h_sn (o) | h_sw (o) | h_sx (o) |
|
||||
#not found | h_ne (i) | h_ns (i) | h_nn (i) | | |
|
||||
#sqlwarning | h_we (i) | h_ws (i) | | h_ww (i) | |
|
||||
#sqlexception | h_xe (i) | h_xs (i) | | | h_xx (i) |
|
||||
# +------------+---------------------------------------------------+
|
||||
#
|
||||
# (i) means that the inner handler is the one that should be invoked,
|
||||
# (o) means that the outer handler should be invoked.
|
||||
#
|
||||
# ('not found', 'sqlwarning' and 'sqlexception' are mutually exclusive, hence
|
||||
# no tests for those combinations.)
|
||||
#
|
||||
|
||||
--disable_warnings
|
||||
drop table if exists t3|
|
||||
drop procedure if exists h_ee|
|
||||
drop procedure if exists h_es|
|
||||
drop procedure if exists h_en|
|
||||
drop procedure if exists h_ew|
|
||||
drop procedure if exists h_ex|
|
||||
drop procedure if exists h_se|
|
||||
drop procedure if exists h_ss|
|
||||
drop procedure if exists h_sn|
|
||||
drop procedure if exists h_sw|
|
||||
drop procedure if exists h_sx|
|
||||
drop procedure if exists h_ne|
|
||||
drop procedure if exists h_ns|
|
||||
drop procedure if exists h_nn|
|
||||
drop procedure if exists h_we|
|
||||
drop procedure if exists h_ws|
|
||||
drop procedure if exists h_ww|
|
||||
drop procedure if exists h_xe|
|
||||
drop procedure if exists h_xs|
|
||||
drop procedure if exists h_xx|
|
||||
--enable_warnings
|
||||
|
||||
# smallint - to get out of range warnings
|
||||
# primary key - to get constraint errors
|
||||
create table t3 (a smallint primary key)|
|
||||
|
||||
insert into t3 (a) values (1)|
|
||||
|
||||
create procedure h_ee()
|
||||
deterministic
|
||||
begin
|
||||
declare continue handler for 1062 -- ER_DUP_ENTRY
|
||||
select 'Outer (bad)' as 'h_ee';
|
||||
|
||||
begin
|
||||
declare continue handler for 1062 -- ER_DUP_ENTRY
|
||||
select 'Inner (good)' as 'h_ee';
|
||||
|
||||
insert into t3 values (1);
|
||||
end;
|
||||
end|
|
||||
|
||||
create procedure h_es()
|
||||
deterministic
|
||||
begin
|
||||
declare continue handler for 1062 -- ER_DUP_ENTRY
|
||||
select 'Outer (good)' as 'h_es';
|
||||
|
||||
begin
|
||||
-- integrity constraint violation
|
||||
declare continue handler for sqlstate '23000'
|
||||
select 'Inner (bad)' as 'h_es';
|
||||
|
||||
insert into t3 values (1);
|
||||
end;
|
||||
end|
|
||||
|
||||
create procedure h_en()
|
||||
deterministic
|
||||
begin
|
||||
declare continue handler for 1329 -- ER_SP_FETCH_NO_DATA
|
||||
select 'Outer (good)' as 'h_en';
|
||||
|
||||
begin
|
||||
declare x int;
|
||||
declare continue handler for sqlstate '02000' -- no data
|
||||
select 'Inner (bad)' as 'h_en';
|
||||
|
||||
select a into x from t3 where a = 42;
|
||||
end;
|
||||
end|
|
||||
|
||||
create procedure h_ew()
|
||||
deterministic
|
||||
begin
|
||||
declare continue handler for 1264 -- ER_WARN_DATA_OUT_OF_RANGE
|
||||
select 'Outer (good)' as 'h_ew';
|
||||
|
||||
begin
|
||||
declare continue handler for sqlwarning
|
||||
select 'Inner (bad)' as 'h_ew';
|
||||
|
||||
insert into t3 values (123456789012);
|
||||
end;
|
||||
delete from t3;
|
||||
insert into t3 values (1);
|
||||
end|
|
||||
|
||||
create procedure h_ex()
|
||||
deterministic
|
||||
begin
|
||||
declare continue handler for 1062 -- ER_DUP_ENTRY
|
||||
select 'Outer (good)' as 'h_ex';
|
||||
|
||||
begin
|
||||
declare continue handler for sqlexception
|
||||
select 'Inner (bad)' as 'h_ex';
|
||||
|
||||
insert into t3 values (1);
|
||||
end;
|
||||
end|
|
||||
|
||||
create procedure h_se()
|
||||
deterministic
|
||||
begin
|
||||
-- integrity constraint violation
|
||||
declare continue handler for sqlstate '23000'
|
||||
select 'Outer (bad)' as 'h_se';
|
||||
|
||||
begin
|
||||
declare continue handler for 1062 -- ER_DUP_ENTRY
|
||||
select 'Inner (good)' as 'h_se';
|
||||
|
||||
insert into t3 values (1);
|
||||
end;
|
||||
end|
|
||||
|
||||
create procedure h_ss()
|
||||
deterministic
|
||||
begin
|
||||
-- integrity constraint violation
|
||||
declare continue handler for sqlstate '23000'
|
||||
select 'Outer (bad)' as 'h_ss';
|
||||
|
||||
begin
|
||||
-- integrity constraint violation
|
||||
declare continue handler for sqlstate '23000'
|
||||
select 'Inner (good)' as 'h_ss';
|
||||
|
||||
insert into t3 values (1);
|
||||
end;
|
||||
end|
|
||||
|
||||
create procedure h_sn()
|
||||
deterministic
|
||||
begin
|
||||
-- Note: '02000' is more specific than NOT FOUND ;
|
||||
-- there might be other not found states
|
||||
declare continue handler for sqlstate '02000' -- no data
|
||||
select 'Outer (good)' as 'h_sn';
|
||||
|
||||
begin
|
||||
declare x int;
|
||||
declare continue handler for not found
|
||||
select 'Inner (bad)' as 'h_sn';
|
||||
|
||||
select a into x from t3 where a = 42;
|
||||
end;
|
||||
end|
|
||||
|
||||
create procedure h_sw()
|
||||
deterministic
|
||||
begin
|
||||
-- data exception - numeric value out of range
|
||||
declare continue handler for sqlstate '22003'
|
||||
select 'Outer (good)' as 'h_sw';
|
||||
|
||||
begin
|
||||
declare continue handler for sqlwarning
|
||||
select 'Inner (bad)' as 'h_sw';
|
||||
|
||||
insert into t3 values (123456789012);
|
||||
end;
|
||||
delete from t3;
|
||||
insert into t3 values (1);
|
||||
end|
|
||||
|
||||
create procedure h_sx()
|
||||
deterministic
|
||||
begin
|
||||
-- integrity constraint violation
|
||||
declare continue handler for sqlstate '23000'
|
||||
select 'Outer (good)' as 'h_sx';
|
||||
|
||||
begin
|
||||
declare continue handler for sqlexception
|
||||
select 'Inner (bad)' as 'h_sx';
|
||||
|
||||
insert into t3 values (1);
|
||||
end;
|
||||
end|
|
||||
|
||||
create procedure h_ne()
|
||||
deterministic
|
||||
begin
|
||||
declare continue handler for not found
|
||||
select 'Outer (bad)' as 'h_ne';
|
||||
|
||||
begin
|
||||
declare x int;
|
||||
declare continue handler for 1329 -- ER_SP_FETCH_NO_DATA
|
||||
select 'Inner (good)' as 'h_ne';
|
||||
|
||||
select a into x from t3 where a = 42;
|
||||
end;
|
||||
end|
|
||||
|
||||
create procedure h_ns()
|
||||
deterministic
|
||||
begin
|
||||
declare continue handler for not found
|
||||
select 'Outer (bad)' as 'h_ns';
|
||||
|
||||
begin
|
||||
declare x int;
|
||||
declare continue handler for sqlstate '02000' -- no data
|
||||
select 'Inner (good)' as 'h_ns';
|
||||
|
||||
select a into x from t3 where a = 42;
|
||||
end;
|
||||
end|
|
||||
|
||||
create procedure h_nn()
|
||||
deterministic
|
||||
begin
|
||||
declare continue handler for not found
|
||||
select 'Outer (bad)' as 'h_nn';
|
||||
|
||||
begin
|
||||
declare x int;
|
||||
declare continue handler for not found
|
||||
select 'Inner (good)' as 'h_nn';
|
||||
|
||||
select a into x from t3 where a = 42;
|
||||
end;
|
||||
end|
|
||||
|
||||
create procedure h_we()
|
||||
deterministic
|
||||
begin
|
||||
declare continue handler for sqlwarning
|
||||
select 'Outer (bad)' as 'h_we';
|
||||
|
||||
begin
|
||||
declare continue handler for 1264 -- ER_WARN_DATA_OUT_OF_RANGE
|
||||
select 'Inner (good)' as 'h_we';
|
||||
|
||||
insert into t3 values (123456789012);
|
||||
end;
|
||||
delete from t3;
|
||||
insert into t3 values (1);
|
||||
end|
|
||||
|
||||
create procedure h_ws()
|
||||
deterministic
|
||||
begin
|
||||
declare continue handler for sqlwarning
|
||||
select 'Outer (bad)' as 'h_ws';
|
||||
|
||||
begin
|
||||
-- data exception - numeric value out of range
|
||||
declare continue handler for sqlstate '22003'
|
||||
select 'Inner (good)' as 'h_ws';
|
||||
|
||||
insert into t3 values (123456789012);
|
||||
end;
|
||||
delete from t3;
|
||||
insert into t3 values (1);
|
||||
end|
|
||||
|
||||
create procedure h_ww()
|
||||
deterministic
|
||||
begin
|
||||
declare continue handler for sqlwarning
|
||||
select 'Outer (bad)' as 'h_ww';
|
||||
|
||||
begin
|
||||
declare continue handler for sqlwarning
|
||||
select 'Inner (good)' as 'h_ww';
|
||||
|
||||
insert into t3 values (123456789012);
|
||||
end;
|
||||
delete from t3;
|
||||
insert into t3 values (1);
|
||||
end|
|
||||
|
||||
create procedure h_xe()
|
||||
deterministic
|
||||
begin
|
||||
declare continue handler for sqlexception
|
||||
select 'Outer (bad)' as 'h_xe';
|
||||
|
||||
begin
|
||||
declare continue handler for 1062 -- ER_DUP_ENTRY
|
||||
select 'Inner (good)' as 'h_xe';
|
||||
|
||||
insert into t3 values (1);
|
||||
end;
|
||||
end|
|
||||
|
||||
create procedure h_xs()
|
||||
deterministic
|
||||
begin
|
||||
declare continue handler for sqlexception
|
||||
select 'Outer (bad)' as 'h_xs';
|
||||
|
||||
begin
|
||||
-- integrity constraint violation
|
||||
declare continue handler for sqlstate '23000'
|
||||
select 'Inner (good)' as 'h_xs';
|
||||
|
||||
insert into t3 values (1);
|
||||
end;
|
||||
end|
|
||||
|
||||
create procedure h_xx()
|
||||
deterministic
|
||||
begin
|
||||
declare continue handler for sqlexception
|
||||
select 'Outer (bad)' as 'h_xx';
|
||||
|
||||
begin
|
||||
declare continue handler for sqlexception
|
||||
select 'Inner (good)' as 'h_xx';
|
||||
|
||||
insert into t3 values (1);
|
||||
end;
|
||||
end|
|
||||
|
||||
call h_ee()|
|
||||
call h_es()|
|
||||
call h_en()|
|
||||
call h_ew()|
|
||||
call h_ex()|
|
||||
call h_se()|
|
||||
call h_ss()|
|
||||
call h_sn()|
|
||||
call h_sw()|
|
||||
call h_sx()|
|
||||
call h_ne()|
|
||||
call h_ns()|
|
||||
call h_nn()|
|
||||
call h_we()|
|
||||
call h_ws()|
|
||||
call h_ww()|
|
||||
call h_xe()|
|
||||
call h_xs()|
|
||||
call h_xx()|
|
||||
|
||||
drop table t3|
|
||||
drop procedure h_ee|
|
||||
drop procedure h_es|
|
||||
drop procedure h_en|
|
||||
drop procedure h_ew|
|
||||
drop procedure h_ex|
|
||||
drop procedure h_se|
|
||||
drop procedure h_ss|
|
||||
drop procedure h_sn|
|
||||
drop procedure h_sw|
|
||||
drop procedure h_sx|
|
||||
drop procedure h_ne|
|
||||
drop procedure h_ns|
|
||||
drop procedure h_nn|
|
||||
drop procedure h_we|
|
||||
drop procedure h_ws|
|
||||
drop procedure h_ww|
|
||||
drop procedure h_xe|
|
||||
drop procedure h_xs|
|
||||
drop procedure h_xx|
|
||||
|
||||
|
||||
#
|
||||
# Test cases for old bugs
|
||||
#
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
INCLUDES= -I$(top_builddir)/include
|
||||
INCLUDES= -I$(top_builddir)/include -I$(top_srcdir)/include
|
||||
noinst_LTLIBRARIES= mypluglib.la
|
||||
mypluglib_la_SOURCES= plugin_example.c
|
||||
mypluglib_la_LDFLAGS= -module -rpath $(pkglibdir)
|
||||
|
|
|
@ -519,6 +519,7 @@ void ha_ndbcluster::invalidate_dictionary_cache(bool global)
|
|||
{
|
||||
NDBINDEX *index = (NDBINDEX *) m_index[i].index;
|
||||
NDBINDEX *unique_index = (NDBINDEX *) m_index[i].unique_index;
|
||||
if (!index && !unique_index) continue;
|
||||
NDB_INDEX_TYPE idx_type= m_index[i].type;
|
||||
|
||||
switch (idx_type) {
|
||||
|
@ -991,8 +992,8 @@ bool ha_ndbcluster::uses_blob_value()
|
|||
-2 Meta data has changed; Re-read data and try again
|
||||
*/
|
||||
|
||||
static int cmp_frm(const NDBTAB *ndbtab, const void *pack_data,
|
||||
uint pack_length)
|
||||
int cmp_frm(const NDBTAB *ndbtab, const void *pack_data,
|
||||
uint pack_length)
|
||||
{
|
||||
DBUG_ENTER("cmp_frm");
|
||||
/*
|
||||
|
@ -1076,7 +1077,7 @@ int ha_ndbcluster::get_metadata(const char *path)
|
|||
m_table= (void *)tab;
|
||||
m_table_info= NULL; // Set in external lock
|
||||
|
||||
DBUG_RETURN(open_indexes(ndb, table));
|
||||
DBUG_RETURN(open_indexes(ndb, table, FALSE));
|
||||
}
|
||||
|
||||
static int fix_unique_index_attr_order(NDB_INDEX_DATA &data,
|
||||
|
@ -1249,7 +1250,7 @@ int ha_ndbcluster::add_index_handle(THD *thd, NDBDICT *dict, KEY *key_info,
|
|||
/*
|
||||
Associate index handles for each index of a table
|
||||
*/
|
||||
int ha_ndbcluster::open_indexes(Ndb *ndb, TABLE *tab)
|
||||
int ha_ndbcluster::open_indexes(Ndb *ndb, TABLE *tab, bool ignore_error)
|
||||
{
|
||||
uint i;
|
||||
int error= 0;
|
||||
|
@ -1263,7 +1264,10 @@ int ha_ndbcluster::open_indexes(Ndb *ndb, TABLE *tab)
|
|||
for (i= 0; i < tab->s->keys; i++, key_info++, key_name++)
|
||||
{
|
||||
if ((error= add_index_handle(thd, dict, key_info, *key_name, i)))
|
||||
break;
|
||||
if (ignore_error)
|
||||
m_index[i].index= m_index[i].unique_index= NULL;
|
||||
else
|
||||
break;
|
||||
}
|
||||
|
||||
DBUG_RETURN(error);
|
||||
|
@ -3739,7 +3743,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
|
|||
{
|
||||
m_table= (void *)tab;
|
||||
m_table_version = tab->getObjectVersion();
|
||||
if (!(my_errno= open_indexes(ndb, table)))
|
||||
if (!(my_errno= open_indexes(ndb, table, FALSE)))
|
||||
DBUG_RETURN(my_errno);
|
||||
}
|
||||
m_table_info= tab_info;
|
||||
|
|
|
@ -691,6 +691,10 @@ static void set_tabname(const char *pathname, char *tabname);
|
|||
|
||||
private:
|
||||
friend int ndbcluster_drop_database_impl(const char *path);
|
||||
friend int ndb_handle_schema_change(THD *thd,
|
||||
Ndb *ndb, NdbEventOperation *pOp,
|
||||
NDB_SHARE *share);
|
||||
|
||||
int alter_table_name(const char *to);
|
||||
static int delete_table(ha_ndbcluster *h, Ndb *ndb,
|
||||
const char *path,
|
||||
|
@ -708,7 +712,7 @@ private:
|
|||
int create_indexes(Ndb *ndb, TABLE *tab);
|
||||
void clear_index(int i);
|
||||
void clear_indexes();
|
||||
int open_indexes(Ndb *ndb, TABLE *tab);
|
||||
int open_indexes(Ndb *ndb, TABLE *tab, bool ignore_error);
|
||||
void renumber_indexes(Ndb *ndb, TABLE *tab);
|
||||
int drop_indexes(Ndb *ndb, TABLE *tab);
|
||||
int add_index_handle(THD *thd, NdbDictionary::Dictionary *dict,
|
||||
|
|
|
@ -230,6 +230,72 @@ static void run_query(THD *thd, char *buf, char *end,
|
|||
}
|
||||
}
|
||||
|
||||
int
|
||||
ndbcluster_binlog_open_table(THD *thd, NDB_SHARE *share,
|
||||
TABLE_SHARE *table_share, TABLE *table)
|
||||
{
|
||||
int error;
|
||||
MEM_ROOT *mem_root= &share->mem_root;
|
||||
DBUG_ENTER("ndbcluster_binlog_open_table");
|
||||
|
||||
init_tmp_table_share(table_share, share->db, 0, share->table_name,
|
||||
share->key);
|
||||
if ((error= open_table_def(thd, table_share, 0)))
|
||||
{
|
||||
sql_print_error("Unable to get table share for %s, error=%d",
|
||||
share->key, error);
|
||||
DBUG_PRINT("error", ("open_table_def failed %d", error));
|
||||
my_free((gptr) table_share, MYF(0));
|
||||
table_share= 0;
|
||||
my_free((gptr) table, MYF(0));
|
||||
table= 0;
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
if ((error= open_table_from_share(thd, table_share, "", 0,
|
||||
(uint) READ_ALL, 0, table, FALSE)))
|
||||
{
|
||||
sql_print_error("Unable to open table for %s, error=%d(%d)",
|
||||
share->key, error, my_errno);
|
||||
DBUG_PRINT("error", ("open_table_from_share failed %d", error));
|
||||
my_free((gptr) table_share, MYF(0));
|
||||
table_share= 0;
|
||||
my_free((gptr) table, MYF(0));
|
||||
table= 0;
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
assign_new_table_id(table);
|
||||
if (!table->record[1] || table->record[1] == table->record[0])
|
||||
{
|
||||
table->record[1]= alloc_root(&table->mem_root,
|
||||
table->s->rec_buff_length);
|
||||
}
|
||||
table->in_use= injector_thd;
|
||||
|
||||
table->s->db.str= share->db;
|
||||
table->s->db.length= strlen(share->db);
|
||||
table->s->table_name.str= share->table_name;
|
||||
table->s->table_name.length= strlen(share->table_name);
|
||||
|
||||
share->table_share= table_share;
|
||||
share->table= table;
|
||||
#ifndef DBUG_OFF
|
||||
dbug_print_table("table", table);
|
||||
#endif
|
||||
/*
|
||||
! do not touch the contents of the table
|
||||
it may be in use by the injector thread
|
||||
*/
|
||||
share->ndb_value[0]= (NdbValue*)
|
||||
alloc_root(mem_root, sizeof(NdbValue) * table->s->fields
|
||||
+ 1 /*extra for hidden key*/);
|
||||
share->ndb_value[1]= (NdbValue*)
|
||||
alloc_root(mem_root, sizeof(NdbValue) * table->s->fields
|
||||
+1 /*extra for hidden key*/);
|
||||
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Initialize the binlog part of the NDB_SHARE
|
||||
*/
|
||||
|
@ -280,64 +346,12 @@ void ndbcluster_binlog_init_share(NDB_SHARE *share, TABLE *_table)
|
|||
}
|
||||
while (1)
|
||||
{
|
||||
int error;
|
||||
TABLE_SHARE *table_share=
|
||||
(TABLE_SHARE *) my_malloc(sizeof(*table_share), MYF(MY_WME));
|
||||
TABLE *table= (TABLE*) my_malloc(sizeof(*table), MYF(MY_WME));
|
||||
int error;
|
||||
|
||||
init_tmp_table_share(table_share, share->db, 0, share->table_name,
|
||||
share->key);
|
||||
if ((error= open_table_def(thd, table_share, 0)))
|
||||
{
|
||||
sql_print_error("Unable to get table share for %s, error=%d",
|
||||
share->key, error);
|
||||
DBUG_PRINT("error", ("open_table_def failed %d", error));
|
||||
my_free((gptr) table_share, MYF(0));
|
||||
table_share= 0;
|
||||
my_free((gptr) table, MYF(0));
|
||||
table= 0;
|
||||
if ((error= ndbcluster_binlog_open_table(thd, share, table_share, table)))
|
||||
break;
|
||||
}
|
||||
if ((error= open_table_from_share(thd, table_share, "", 0,
|
||||
(uint) READ_ALL, 0, table, FALSE)))
|
||||
{
|
||||
sql_print_error("Unable to open table for %s, error=%d(%d)",
|
||||
share->key, error, my_errno);
|
||||
DBUG_PRINT("error", ("open_table_from_share failed %d", error));
|
||||
my_free((gptr) table_share, MYF(0));
|
||||
table_share= 0;
|
||||
my_free((gptr) table, MYF(0));
|
||||
table= 0;
|
||||
break;
|
||||
}
|
||||
assign_new_table_id(table);
|
||||
if (!table->record[1] || table->record[1] == table->record[0])
|
||||
{
|
||||
table->record[1]= alloc_root(&table->mem_root,
|
||||
table->s->rec_buff_length);
|
||||
}
|
||||
table->in_use= injector_thd;
|
||||
|
||||
table->s->db.str= share->db;
|
||||
table->s->db.length= strlen(share->db);
|
||||
table->s->table_name.str= share->table_name;
|
||||
table->s->table_name.length= strlen(share->table_name);
|
||||
|
||||
share->table_share= table_share;
|
||||
share->table= table;
|
||||
#ifndef DBUG_OFF
|
||||
dbug_print_table("table", table);
|
||||
#endif
|
||||
/*
|
||||
! do not touch the contents of the table
|
||||
it may be in use by the injector thread
|
||||
*/
|
||||
share->ndb_value[0]= (NdbValue*)
|
||||
alloc_root(mem_root, sizeof(NdbValue) * table->s->fields
|
||||
+ 1 /*extra for hidden key*/);
|
||||
share->ndb_value[1]= (NdbValue*)
|
||||
alloc_root(mem_root, sizeof(NdbValue) * table->s->fields
|
||||
+1 /*extra for hidden key*/);
|
||||
if (table->s->primary_key == MAX_KEY)
|
||||
share->flags|= NSF_HIDDEN_PK;
|
||||
if (table->s->blob_fields != 0)
|
||||
|
@ -1285,24 +1299,91 @@ end:
|
|||
/*
|
||||
Handle _non_ data events from the storage nodes
|
||||
*/
|
||||
static int
|
||||
int
|
||||
ndb_handle_schema_change(THD *thd, Ndb *ndb, NdbEventOperation *pOp,
|
||||
NDB_SHARE *share)
|
||||
{
|
||||
DBUG_ENTER("ndb_handle_schema_change");
|
||||
int remote_drop_table= 0, do_close_cached_tables= 0;
|
||||
const char *dbname= share->table->s->db.str;
|
||||
const char *tabname= share->table->s->table_name.str;
|
||||
bool online_alter_table= (pOp->getEventType() == NDBEVENT::TE_ALTER &&
|
||||
pOp->tableFrmChanged());
|
||||
|
||||
if (pOp->getEventType() != NDBEVENT::TE_CLUSTER_FAILURE &&
|
||||
pOp->getReqNodeId() != g_ndb_cluster_connection->node_id())
|
||||
(uint) pOp->getReqNodeId() != g_ndb_cluster_connection->node_id())
|
||||
{
|
||||
TABLE_SHARE *table_share= share->table->s;
|
||||
TABLE* table= share->table;
|
||||
|
||||
/*
|
||||
Invalidate table and all it's indexes
|
||||
*/
|
||||
ndb->setDatabaseName(share->table->s->db.str);
|
||||
ha_ndbcluster::invalidate_dictionary_cache(share->table->s,
|
||||
ndb,
|
||||
share->table->s->db.str,
|
||||
share->table->s->table_name.str,
|
||||
TRUE);
|
||||
Thd_ndb *thd_ndb= get_thd_ndb(thd);
|
||||
DBUG_ASSERT(thd_ndb != NULL);
|
||||
Ndb* old_ndb= thd_ndb->ndb;
|
||||
thd_ndb->ndb= ndb;
|
||||
ha_ndbcluster table_handler(table_share);
|
||||
table_handler.set_dbname(share->key);
|
||||
table_handler.set_tabname(share->key);
|
||||
table_handler.open_indexes(ndb, table, TRUE);
|
||||
table_handler.invalidate_dictionary_cache(TRUE);
|
||||
thd_ndb->ndb= old_ndb;
|
||||
|
||||
if (online_alter_table)
|
||||
{
|
||||
char key[FN_REFLEN];
|
||||
const void *data= 0, *pack_data= 0;
|
||||
uint length, pack_length;
|
||||
int error;
|
||||
NDBDICT *dict= ndb->getDictionary();
|
||||
const NDBTAB *altered_table= pOp->getTable();
|
||||
|
||||
DBUG_PRINT("info", ("Detected frm change of table %s.%s",
|
||||
dbname, tabname));
|
||||
build_table_filename(key, FN_LEN-1, dbname, tabname, NullS);
|
||||
/*
|
||||
If the frm of the altered table is different than the one on
|
||||
disk then overwrite it with the new table definition
|
||||
*/
|
||||
if (readfrm(key, &data, &length) == 0 &&
|
||||
packfrm(data, length, &pack_data, &pack_length) == 0 &&
|
||||
cmp_frm(altered_table, pack_data, pack_length))
|
||||
{
|
||||
DBUG_DUMP("frm", (char*)altered_table->getFrmData(),
|
||||
altered_table->getFrmLength());
|
||||
pthread_mutex_lock(&LOCK_open);
|
||||
const NDBTAB *old= dict->getTable(tabname);
|
||||
if (!old &&
|
||||
old->getObjectVersion() != altered_table->getObjectVersion())
|
||||
dict->putTable(altered_table);
|
||||
|
||||
if ((error= unpackfrm(&data, &length, altered_table->getFrmData())) ||
|
||||
(error= writefrm(key, data, length)))
|
||||
{
|
||||
sql_print_information("NDB: Failed write frm for %s.%s, error %d",
|
||||
dbname, tabname, error);
|
||||
}
|
||||
close_cached_tables((THD*) 0, 0, (TABLE_LIST*) 0, TRUE);
|
||||
if ((error= ndbcluster_binlog_open_table(thd, share,
|
||||
table_share, table)))
|
||||
sql_print_information("NDB: Failed to re-open table %s.%s",
|
||||
dbname, tabname);
|
||||
pthread_mutex_unlock(&LOCK_open);
|
||||
}
|
||||
}
|
||||
remote_drop_table= 1;
|
||||
}
|
||||
|
||||
// If only frm was changed continue replicating
|
||||
if (online_alter_table)
|
||||
{
|
||||
/* Signal ha_ndbcluster::alter_table that drop is done */
|
||||
(void) pthread_cond_signal(&injector_cond);
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
(void) pthread_mutex_lock(&share->mutex);
|
||||
DBUG_ASSERT(share->op == pOp || share->op_old == pOp);
|
||||
if (share->op_old == pOp)
|
||||
|
@ -1481,11 +1562,16 @@ ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb,
|
|||
// skip
|
||||
break;
|
||||
case NDBEVENT::TE_ALTER:
|
||||
/* do the rename of the table in the share */
|
||||
share->table->s->db.str= share->db;
|
||||
share->table->s->db.length= strlen(share->db);
|
||||
share->table->s->table_name.str= share->table_name;
|
||||
share->table->s->table_name.length= strlen(share->table_name);
|
||||
if (pOp->tableNameChanged())
|
||||
{
|
||||
DBUG_PRINT("info", ("Detected name change of table %s.%s",
|
||||
share->db, share->table_name));
|
||||
/* do the rename of the table in the share */
|
||||
share->table->s->db.str= share->db;
|
||||
share->table->s->db.length= strlen(share->db);
|
||||
share->table->s->table_name.str= share->table_name;
|
||||
share->table->s->table_name.length= strlen(share->table_name);
|
||||
}
|
||||
ndb_handle_schema_change(thd, ndb, pOp, share);
|
||||
break;
|
||||
case NDBEVENT::TE_CLUSTER_FAILURE:
|
||||
|
@ -1641,7 +1727,8 @@ int ndb_add_binlog_index(THD *thd, void *_row)
|
|||
{
|
||||
if (need_reopen)
|
||||
{
|
||||
close_tables_for_reopen(thd, &binlog_tables);
|
||||
TABLE_LIST *p_binlog_tables= &binlog_tables;
|
||||
close_tables_for_reopen(thd, &p_binlog_tables);
|
||||
binlog_index= 0;
|
||||
continue;
|
||||
}
|
||||
|
@ -1766,6 +1853,7 @@ int ndbcluster_create_binlog_setup(Ndb *ndb, const char *key,
|
|||
/* Handle any trailing share */
|
||||
NDB_SHARE *share= (NDB_SHARE*) hash_search(&ndbcluster_open_tables,
|
||||
(byte*) key, key_len);
|
||||
|
||||
if (share && share_may_exist)
|
||||
{
|
||||
if (share->flags & NSF_NO_BINLOG ||
|
||||
|
@ -2091,7 +2179,6 @@ ndbcluster_create_event_ops(NDB_SHARE *share, const NDBTAB *ndbtab,
|
|||
op= ndb->createEventOperation(event_name);
|
||||
// reset to catch errors
|
||||
ndb->setDatabaseName("");
|
||||
ndb->setDatabaseSchemaName("");
|
||||
}
|
||||
if (!op)
|
||||
{
|
||||
|
@ -2372,17 +2459,22 @@ ndb_binlog_thread_handle_non_data_event(Ndb *ndb, NdbEventOperation *pOp,
|
|||
share->key, share, pOp, share->op, share->op_old));
|
||||
break;
|
||||
case NDBEVENT::TE_ALTER:
|
||||
/* ToDo: remove printout */
|
||||
if (ndb_extra_logging)
|
||||
sql_print_information("NDB Binlog: rename table %s%s/%s -> %s.",
|
||||
share_prefix, share->table->s->db.str,
|
||||
share->table->s->table_name.str,
|
||||
share->key);
|
||||
/* do the rename of the table in the share */
|
||||
share->table->s->db.str= share->db;
|
||||
share->table->s->db.length= strlen(share->db);
|
||||
share->table->s->table_name.str= share->table_name;
|
||||
share->table->s->table_name.length= strlen(share->table_name);
|
||||
if (pOp->tableNameChanged())
|
||||
{
|
||||
DBUG_PRINT("info", ("Detected name change of table %s.%s",
|
||||
share->db, share->table_name));
|
||||
/* ToDo: remove printout */
|
||||
if (ndb_extra_logging)
|
||||
sql_print_information("NDB Binlog: rename table %s%s/%s -> %s.",
|
||||
share_prefix, share->table->s->db.str,
|
||||
share->table->s->table_name.str,
|
||||
share->key);
|
||||
/* do the rename of the table in the share */
|
||||
share->table->s->db.str= share->db;
|
||||
share->table->s->db.length= strlen(share->db);
|
||||
share->table->s->table_name.str= share->table_name;
|
||||
share->table->s->table_name.length= strlen(share->table_name);
|
||||
}
|
||||
goto drop_alter_common;
|
||||
case NDBEVENT::TE_DROP:
|
||||
if (apply_status_share == share)
|
||||
|
@ -2659,8 +2751,8 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
|
|||
goto err;
|
||||
}
|
||||
|
||||
// empty database and schema
|
||||
if (!(ndb= new Ndb(g_ndb_cluster_connection, "", "")) ||
|
||||
// empty database
|
||||
if (!(ndb= new Ndb(g_ndb_cluster_connection, "")) ||
|
||||
ndb->init())
|
||||
{
|
||||
sql_print_error("NDB Binlog: Getting Ndb object failed");
|
||||
|
@ -2924,7 +3016,6 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
|
|||
ndb_binlog_thread_handle_non_data_event(ndb, pOp, row);
|
||||
// reset to catch errors
|
||||
ndb->setDatabaseName("");
|
||||
ndb->setDatabaseSchemaName("");
|
||||
}
|
||||
|
||||
pOp= ndb->nextEvent();
|
||||
|
|
|
@ -123,6 +123,8 @@ ndbcluster_show_status_binlog(THD* thd, stat_print_fn *stat_print,
|
|||
prototypes for ndb handler utility function also needed by
|
||||
the ndb binlog code
|
||||
*/
|
||||
int cmp_frm(const NDBTAB *ndbtab, const void *pack_data,
|
||||
uint pack_length);
|
||||
int ndbcluster_find_all_files(THD *thd);
|
||||
#endif /* HAVE_NDB_BINLOG */
|
||||
|
||||
|
|
|
@ -5281,6 +5281,7 @@ int Rows_log_event::exec_event(st_relay_log_info *rli)
|
|||
tested replicate-* rules).
|
||||
*/
|
||||
TABLE_LIST table_list;
|
||||
TABLE_LIST *tables= &table_list;
|
||||
bool need_reopen;
|
||||
uint count= 1;
|
||||
bzero(&table_list, sizeof(table_list));
|
||||
|
@ -5330,13 +5331,12 @@ int Rows_log_event::exec_event(st_relay_log_info *rli)
|
|||
*/
|
||||
thd->binlog_flush_pending_rows_event(false);
|
||||
|
||||
close_tables_for_reopen(thd, &table_list);
|
||||
close_tables_for_reopen(thd, &tables);
|
||||
|
||||
/* open the table again, same as in Table_map_event::exec_event */
|
||||
table_list.db= const_cast<char*>(db);
|
||||
table_list.alias= table_list.table_name= const_cast<char*>(table_name);
|
||||
table_list.updating= 1;
|
||||
TABLE_LIST *tables= &table_list;
|
||||
if ((error= open_tables(thd, &tables, &count, 0)) == 0)
|
||||
{
|
||||
/* reset some variables for the table list*/
|
||||
|
|
|
@ -1038,7 +1038,7 @@ void free_io_cache(TABLE *entry);
|
|||
void intern_close_table(TABLE *entry);
|
||||
bool close_thread_table(THD *thd, TABLE **table_ptr);
|
||||
void close_temporary_tables(THD *thd);
|
||||
void close_tables_for_reopen(THD *thd, TABLE_LIST *tables);
|
||||
void close_tables_for_reopen(THD *thd, TABLE_LIST **tables);
|
||||
TABLE_LIST *find_table_in_list(TABLE_LIST *table,
|
||||
uint offset_to_list,
|
||||
const char *db_name,
|
||||
|
|
|
@ -5660,8 +5660,8 @@ ER_PARTITION_NOT_DEFINED_ERROR
|
|||
eng "For the partitioned engine it is necessary to define all %-.64s"
|
||||
swe "För partitioneringsmotorn så är det nödvändigt att definiera alla %-.64s"
|
||||
ER_TOO_MANY_PARTITIONS_ERROR
|
||||
eng "Too many partitions were defined"
|
||||
swe "För många partitioner definierades"
|
||||
eng "Too many partitions (including subpartitions) were defined"
|
||||
swe "För många partitioner (inkluderande subpartitioner) definierades"
|
||||
ER_SUBPARTITION_ERROR
|
||||
eng "It is only possible to mix RANGE/LIST partitioning with HASH/KEY partitioning for subpartitioning"
|
||||
swe "Det är endast möjligt att blanda RANGE/LIST partitionering med HASH/KEY partitionering för subpartitionering"
|
||||
|
@ -5813,3 +5813,5 @@ ER_CANT_CHANGE_TX_ISOLATION 25001
|
|||
ER_WARN_DEPRECATED_STATEMENT
|
||||
eng "The '%s' statement is deprecated and will be removed in MySQL %s. Please use client programs (e.g. %s) instead."
|
||||
|
||||
ER_SP_NO_AGGREGATE 42000
|
||||
eng "AGGREGATE is not supported for stored functions"
|
||||
|
|
11
sql/sp.cc
11
sql/sp.cc
|
@ -1013,6 +1013,7 @@ sp_exist_routines(THD *thd, TABLE_LIST *routines, bool any, bool no_error)
|
|||
{
|
||||
TABLE_LIST *routine;
|
||||
bool result= 0;
|
||||
bool sp_object_found;
|
||||
DBUG_ENTER("sp_exists_routine");
|
||||
for (routine= routines; routine; routine= routine->next_global)
|
||||
{
|
||||
|
@ -1025,10 +1026,12 @@ sp_exist_routines(THD *thd, TABLE_LIST *routines, bool any, bool no_error)
|
|||
lex_name.str= thd->strmake(routine->table_name, lex_name.length);
|
||||
name= new sp_name(lex_db, lex_name);
|
||||
name->init_qname(thd);
|
||||
if (sp_find_routine(thd, TYPE_ENUM_PROCEDURE, name,
|
||||
&thd->sp_proc_cache, FALSE) != NULL ||
|
||||
sp_find_routine(thd, TYPE_ENUM_FUNCTION, name,
|
||||
&thd->sp_func_cache, FALSE) != NULL)
|
||||
sp_object_found= sp_find_routine(thd, TYPE_ENUM_PROCEDURE, name,
|
||||
&thd->sp_proc_cache, FALSE) != NULL ||
|
||||
sp_find_routine(thd, TYPE_ENUM_FUNCTION, name,
|
||||
&thd->sp_func_cache, FALSE) != NULL;
|
||||
mysql_reset_errors(thd, TRUE);
|
||||
if (sp_object_found)
|
||||
{
|
||||
if (any)
|
||||
DBUG_RETURN(1);
|
||||
|
|
|
@ -2692,22 +2692,11 @@ int open_tables(THD *thd, TABLE_LIST **start, uint *counter, uint flags)
|
|||
statement for which table list for prelocking is already built, let
|
||||
us cache routines and try to build such table list.
|
||||
|
||||
NOTE: We can't delay prelocking until we will met some sub-statement
|
||||
which really uses tables, since this will imply that we have to restore
|
||||
its table list to be able execute it in some other context.
|
||||
And current views implementation assumes that view tables are added to
|
||||
global table list only once during PS preparing/first SP execution.
|
||||
Also locking at earlier stage is probably faster altough may decrease
|
||||
concurrency a bit.
|
||||
|
||||
NOTE: We will mark statement as requiring prelocking only if we will
|
||||
have non empty table list. But this does not guarantee that in prelocked
|
||||
mode we will have some locked tables, because queries which use only
|
||||
derived/information schema tables and views possible. Thus "counter"
|
||||
may be still zero for prelocked statement...
|
||||
|
||||
NOTE: The above notes may be out of date. Please wait for psergey to
|
||||
document new prelocked behavior.
|
||||
*/
|
||||
|
||||
if (!thd->prelocked_mode && !thd->lex->requires_prelocking() &&
|
||||
|
@ -2793,48 +2782,23 @@ int open_tables(THD *thd, TABLE_LIST **start, uint *counter, uint flags)
|
|||
|
||||
if (refresh) // Refresh in progress
|
||||
{
|
||||
/* close all 'old' tables used by this thread */
|
||||
pthread_mutex_lock(&LOCK_open);
|
||||
// if query_id is not reset, we will get an error
|
||||
// re-opening a temp table
|
||||
thd->version=refresh_version;
|
||||
TABLE **prev_table= &thd->open_tables;
|
||||
bool found=0;
|
||||
for (TABLE_LIST *tmp= *start; tmp; tmp= tmp->next_global)
|
||||
{
|
||||
/* Close normal (not temporary) changed tables */
|
||||
if (tmp->table && ! tmp->table->s->tmp_table != NO_TMP_TABLE)
|
||||
{
|
||||
if (tmp->table->s->version != refresh_version ||
|
||||
! tmp->table->db_stat)
|
||||
{
|
||||
VOID(hash_delete(&open_cache,(byte*) tmp->table));
|
||||
tmp->table=0;
|
||||
found=1;
|
||||
}
|
||||
else
|
||||
{
|
||||
*prev_table= tmp->table; // Relink open list
|
||||
prev_table= &tmp->table->next;
|
||||
}
|
||||
}
|
||||
}
|
||||
*prev_table=0;
|
||||
pthread_mutex_unlock(&LOCK_open);
|
||||
if (found)
|
||||
VOID(pthread_cond_broadcast(&COND_refresh)); // Signal to refresh
|
||||
/*
|
||||
Let us prepare for recalculation of set of prelocked tables.
|
||||
First we pretend that we have finished calculation which we
|
||||
were doing currently. Then we restore list of tables to be
|
||||
opened and set of used routines to the state in which they were
|
||||
before first open_tables() call for this statement (i.e. before
|
||||
we have calculated current set of tables for prelocking).
|
||||
We have met name-locked or old version of table. Now we have
|
||||
to close all tables which are not up to date. We also have to
|
||||
throw away set of prelocked tables (and thus close tables from
|
||||
this set that were open by now) since it possible that one of
|
||||
tables which determined its content was changed.
|
||||
|
||||
Instead of implementing complex/non-robust logic mentioned
|
||||
above we simply close and then reopen all tables.
|
||||
|
||||
In order to prepare for recalculation of set of prelocked tables
|
||||
we pretend that we have finished calculation which we were doing
|
||||
currently.
|
||||
*/
|
||||
if (query_tables_last_own)
|
||||
thd->lex->mark_as_requiring_prelocking(query_tables_last_own);
|
||||
thd->lex->chop_off_not_own_tables();
|
||||
sp_remove_not_own_routines(thd->lex);
|
||||
close_tables_for_reopen(thd, start);
|
||||
goto restart;
|
||||
}
|
||||
result= -1; // Fatal error
|
||||
|
@ -3045,7 +3009,7 @@ int simple_open_n_lock_tables(THD *thd, TABLE_LIST *tables)
|
|||
break;
|
||||
if (!need_reopen)
|
||||
DBUG_RETURN(-1);
|
||||
close_tables_for_reopen(thd, tables);
|
||||
close_tables_for_reopen(thd, &tables);
|
||||
}
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
@ -3082,7 +3046,7 @@ bool open_and_lock_tables(THD *thd, TABLE_LIST *tables)
|
|||
break;
|
||||
if (!need_reopen)
|
||||
DBUG_RETURN(-1);
|
||||
close_tables_for_reopen(thd, tables);
|
||||
close_tables_for_reopen(thd, &tables);
|
||||
}
|
||||
if (mysql_handle_derived(thd->lex, &mysql_derived_prepare) ||
|
||||
(thd->fill_derived_tables() &&
|
||||
|
@ -3310,18 +3274,24 @@ int lock_tables(THD *thd, TABLE_LIST *tables, uint count, bool *need_reopen)
|
|||
|
||||
SYNOPSIS
|
||||
close_tables_for_reopen()
|
||||
thd Thread context
|
||||
tables List of tables which we were trying to open and lock
|
||||
thd in Thread context
|
||||
tables in/out List of tables which we were trying to open and lock
|
||||
|
||||
*/
|
||||
|
||||
void close_tables_for_reopen(THD *thd, TABLE_LIST *tables)
|
||||
void close_tables_for_reopen(THD *thd, TABLE_LIST **tables)
|
||||
{
|
||||
/*
|
||||
If table list consists only from tables from prelocking set, table list
|
||||
for new attempt should be empty, so we have to update list's root pointer.
|
||||
*/
|
||||
if (thd->lex->first_not_own_table() == *tables)
|
||||
*tables= 0;
|
||||
thd->lex->chop_off_not_own_tables();
|
||||
sp_remove_not_own_routines(thd->lex);
|
||||
for (TABLE_LIST *tmp= tables; tmp; tmp= tmp->next_global)
|
||||
if (tmp->table && !tmp->table->s->tmp_table)
|
||||
tmp->table= 0;
|
||||
for (TABLE_LIST *tmp= *tables; tmp; tmp= tmp->next_global)
|
||||
tmp->table= 0;
|
||||
mark_used_tables_as_free_for_reuse(thd, thd->temporary_tables);
|
||||
close_thread_tables(thd);
|
||||
}
|
||||
|
||||
|
|
|
@ -1142,7 +1142,7 @@ static int mysql_test_update(Prepared_statement *stmt,
|
|||
break;
|
||||
if (!need_reopen)
|
||||
goto error;
|
||||
close_tables_for_reopen(thd, table_list);
|
||||
close_tables_for_reopen(thd, &table_list);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -158,7 +158,7 @@ int mysql_update(THD *thd,
|
|||
break;
|
||||
if (!need_reopen)
|
||||
DBUG_RETURN(1);
|
||||
close_tables_for_reopen(thd, table_list);
|
||||
close_tables_for_reopen(thd, &table_list);
|
||||
}
|
||||
|
||||
if (mysql_handle_derived(thd->lex, &mysql_derived_prepare) ||
|
||||
|
@ -958,7 +958,7 @@ reopen_tables:
|
|||
for (TABLE_LIST *tbl= table_list; tbl; tbl= tbl->next_global)
|
||||
tbl->cleanup_items();
|
||||
|
||||
close_tables_for_reopen(thd, table_list);
|
||||
close_tables_for_reopen(thd, &table_list);
|
||||
goto reopen_tables;
|
||||
}
|
||||
|
||||
|
|
|
@ -1679,6 +1679,16 @@ create_function_tail:
|
|||
LEX *lex= Lex;
|
||||
sp_head *sp;
|
||||
|
||||
/*
|
||||
First check if AGGREGATE was used, in that case it's a
|
||||
syntax error.
|
||||
*/
|
||||
if (lex->udf.type == UDFTYPE_AGGREGATE)
|
||||
{
|
||||
my_error(ER_SP_NO_AGGREGATE, MYF(0));
|
||||
YYABORT;
|
||||
}
|
||||
|
||||
if (lex->sphead)
|
||||
{
|
||||
my_error(ER_SP_NO_RECURSIVE_CREATE, MYF(0), "FUNCTION");
|
||||
|
|
|
@ -144,6 +144,7 @@ char *metaphon(UDF_INIT *initid, UDF_ARGS *args, char *result,
|
|||
my_bool myfunc_double_init(UDF_INIT *, UDF_ARGS *args, char *message);
|
||||
double myfunc_double(UDF_INIT *initid, UDF_ARGS *args, char *is_null,
|
||||
char *error);
|
||||
my_bool myfunc_int_init(UDF_INIT *initid, UDF_ARGS *args, char *message);
|
||||
longlong myfunc_int(UDF_INIT *initid, UDF_ARGS *args, char *is_null,
|
||||
char *error);
|
||||
my_bool sequence_init(UDF_INIT *initid, UDF_ARGS *args, char *message);
|
||||
|
@ -597,6 +598,14 @@ longlong myfunc_int(UDF_INIT *initid, UDF_ARGS *args, char *is_null,
|
|||
return val;
|
||||
}
|
||||
|
||||
/*
|
||||
At least one of _init/_deinit is needed unless the server is started
|
||||
with --allow_suspicious_udfs.
|
||||
*/
|
||||
my_bool myfunc_int_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
Simple example of how to get a sequences starting from the first argument
|
||||
|
|
|
@ -17,9 +17,10 @@
|
|||
INCLUDES = -I$(top_builddir)/include -I$(top_srcdir)/include \
|
||||
@ZLIB_INCLUDES@
|
||||
|
||||
LDADD = libarchive.a $(top_srcdir)/mysys/libmysys.a \
|
||||
$(top_srcdir)/dbug/libdbug.a \
|
||||
$(top_srcdir)/strings/libmystrings.a \
|
||||
LDADD = libarchive.a \
|
||||
$(top_builddir)/mysys/libmysys.a \
|
||||
$(top_builddir)/dbug/libdbug.a \
|
||||
$(top_builddir)/strings/libmystrings.a \
|
||||
@ZLIB_LIBS@
|
||||
pkglib_LIBRARIES = libarchive.a
|
||||
noinst_PROGRAMS = archive_test
|
||||
|
|
|
@ -20,7 +20,8 @@ MYSQLDATAdir = $(localstatedir)
|
|||
MYSQLSHAREdir = $(pkgdatadir)
|
||||
MYSQLBASEdir= $(prefix)
|
||||
MYSQLLIBdir= $(pkglibdir)
|
||||
INCLUDES = -I$(top_srcdir)/include \
|
||||
INCLUDES = -I$(top_builddir)/include \
|
||||
-I$(top_srcdir)/include \
|
||||
-I$(top_srcdir)/regex \
|
||||
-I$(top_srcdir)/sql \
|
||||
-I$(srcdir)
|
||||
|
|
|
@ -15,9 +15,10 @@
|
|||
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
|
||||
INCLUDES = -I$(top_builddir)/include -I$(top_srcdir)/include
|
||||
LDADD = libheap.a $(top_srcdir)/mysys/libmysys.a \
|
||||
$(top_srcdir)/dbug/libdbug.a \
|
||||
$(top_srcdir)/strings/libmystrings.a
|
||||
LDADD = libheap.a \
|
||||
$(top_builddir)/mysys/libmysys.a \
|
||||
$(top_builddir)/dbug/libdbug.a \
|
||||
$(top_builddir)/strings/libmystrings.a
|
||||
pkglib_LIBRARIES = libheap.a
|
||||
noinst_PROGRAMS = hp_test1 hp_test2
|
||||
hp_test1_LDFLAGS = @NOINST_LDFLAGS@
|
||||
|
|
|
@ -66,6 +66,7 @@
|
|||
#define MAX_FRAGMENT_DATA_BYTES (4+(2 * 8 * MAX_REPLICAS * MAX_NDB_NODES))
|
||||
#define MAX_NDB_PARTITIONS 1024
|
||||
#define MAX_RANGE_DATA (131072+MAX_NDB_PARTITIONS) //0.5 MByte of list data
|
||||
#define MAX_WORDS_META_FILE 16382
|
||||
|
||||
#define MIN_ATTRBUF ((MAX_ATTRIBUTES_IN_TABLE/24) + 1)
|
||||
/*
|
||||
|
|
|
@ -39,6 +39,7 @@ class AlterTableReq {
|
|||
friend class NdbEventOperationImpl;
|
||||
friend class NdbDictInterface;
|
||||
friend class Dbdict;
|
||||
friend class Suma;
|
||||
|
||||
/**
|
||||
* For printing
|
||||
|
|
|
@ -1598,6 +1598,14 @@ public:
|
|||
*/
|
||||
const Table * getTable(const char * name) const;
|
||||
|
||||
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
|
||||
/*
|
||||
* Save a table definition in dictionary cache
|
||||
* @param table Object to put into cache
|
||||
*/
|
||||
void putTable(const Table * table);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Get index with given name, NULL if undefined
|
||||
* @param indexName Name of index to get.
|
||||
|
|
|
@ -220,6 +220,7 @@ public:
|
|||
|
||||
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
|
||||
/** these are subject to change at any time */
|
||||
const NdbDictionary::Table* getTable() const;
|
||||
const NdbDictionary::Event *getEvent() const;
|
||||
const NdbRecAttr *getFirstPkAttr() const;
|
||||
const NdbRecAttr *getFirstPkPreAttr() const;
|
||||
|
|
|
@ -530,7 +530,7 @@ public:
|
|||
Config c_defaults;
|
||||
Uint32 m_diskless;
|
||||
|
||||
STATIC_CONST(NO_OF_PAGES_META_FILE = 2);
|
||||
STATIC_CONST(NO_OF_PAGES_META_FILE = MAX_WORDS_META_FILE/BACKUP_WORDS_PER_PAGE);
|
||||
|
||||
/**
|
||||
* Pools
|
||||
|
|
|
@ -42,6 +42,7 @@
|
|||
#include <signaldata/GCPSave.hpp>
|
||||
#include <signaldata/CreateTab.hpp>
|
||||
#include <signaldata/DropTab.hpp>
|
||||
#include <signaldata/AlterTable.hpp>
|
||||
#include <signaldata/AlterTab.hpp>
|
||||
#include <signaldata/DihFragCount.hpp>
|
||||
#include <signaldata/SystemError.hpp>
|
||||
|
@ -3440,7 +3441,7 @@ Suma::execDROP_TAB_CONF(Signal *signal)
|
|||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
static Uint32 b_dti_buf[10000];
|
||||
static Uint32 b_dti_buf[MAX_WORDS_META_FILE];
|
||||
|
||||
void
|
||||
Suma::execALTER_TAB_REQ(Signal *signal)
|
||||
|
@ -3462,7 +3463,7 @@ Suma::execALTER_TAB_REQ(Signal *signal)
|
|||
}
|
||||
|
||||
DBUG_PRINT("info",("alter table id: %d[i=%u]", tableId, tabPtr.i));
|
||||
|
||||
Table::State old_state = tabPtr.p->m_state;
|
||||
tabPtr.p->m_state = Table::ALTERED;
|
||||
// triggers must be removed, waiting for sub stop req for that
|
||||
|
||||
|
@ -3520,6 +3521,11 @@ Suma::execALTER_TAB_REQ(Signal *signal)
|
|||
DBUG_PRINT("info",("sent to subscriber %d", subbPtr.i));
|
||||
}
|
||||
}
|
||||
if (AlterTableReq::getFrmFlag(changeMask))
|
||||
{
|
||||
// Frm changes only are handled on-line
|
||||
tabPtr.p->m_state = old_state;
|
||||
}
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
|
|
|
@ -1349,6 +1349,13 @@ NdbDictionary::Dictionary::getTable(const char * name, void **data) const
|
|||
return 0;
|
||||
}
|
||||
|
||||
void NdbDictionary::Dictionary::putTable(const NdbDictionary::Table * table)
|
||||
{
|
||||
NdbDictionary::Table *copy_table = new NdbDictionary::Table;
|
||||
*copy_table = *table;
|
||||
m_impl.putTable(&NdbTableImpl::getImpl(*copy_table));
|
||||
}
|
||||
|
||||
void NdbDictionary::Dictionary::set_local_table_data_size(unsigned sz)
|
||||
{
|
||||
m_impl.m_local_table_data_size= sz;
|
||||
|
|
|
@ -1305,7 +1305,16 @@ NdbDictionaryImpl::fetchGlobalTableImpl(const BaseString& internalTableName)
|
|||
void
|
||||
NdbDictionaryImpl::putTable(NdbTableImpl *impl)
|
||||
{
|
||||
NdbTableImpl *old;
|
||||
|
||||
m_globalHash->lock();
|
||||
if ((old= m_globalHash->get(impl->m_internalName.c_str())))
|
||||
{
|
||||
m_globalHash->alter_table_rep(old->m_internalName.c_str(),
|
||||
impl->m_id,
|
||||
impl->m_version,
|
||||
FALSE);
|
||||
}
|
||||
m_globalHash->put(impl->m_internalName.c_str(), impl);
|
||||
m_globalHash->unlock();
|
||||
Ndb_local_table_info *info=
|
||||
|
@ -1313,6 +1322,8 @@ NdbDictionaryImpl::putTable(NdbTableImpl *impl)
|
|||
|
||||
m_localHash.put(impl->m_internalName.c_str(), info);
|
||||
|
||||
addBlobTables(*impl);
|
||||
|
||||
m_ndb.theFirstTupleId[impl->getTableId()] = ~0;
|
||||
m_ndb.theLastTupleId[impl->getTableId()] = ~0;
|
||||
}
|
||||
|
|
|
@ -144,6 +144,10 @@ NdbEventOperation::print()
|
|||
/*
|
||||
* Internal for the mysql server
|
||||
*/
|
||||
const NdbDictionary::Table *NdbEventOperation::getTable() const
|
||||
{
|
||||
return m_impl.m_eventImpl->m_tableImpl->m_facade;
|
||||
}
|
||||
const NdbDictionary::Event *NdbEventOperation::getEvent() const
|
||||
{
|
||||
return m_impl.m_eventImpl->m_facade;
|
||||
|
|
|
@ -643,6 +643,14 @@ NdbEventOperationImpl::receive_event()
|
|||
m_buffer.length() / 4,
|
||||
true);
|
||||
m_buffer.clear();
|
||||
if (at)
|
||||
at->buildColumnHash();
|
||||
else
|
||||
{
|
||||
DBUG_PRINT_EVENT("info", ("Failed to parse DictTabInfo error %u",
|
||||
error.code));
|
||||
DBUG_RETURN_EVENT(1);
|
||||
}
|
||||
if ( m_eventImpl->m_tableImpl)
|
||||
delete m_eventImpl->m_tableImpl;
|
||||
m_eventImpl->m_tableImpl = at;
|
||||
|
|
|
@ -709,7 +709,7 @@ fi
|
|||
# itself - note that they must be ordered by date (important when
|
||||
# merging BK trees)
|
||||
%changelog
|
||||
* Mon Feb 20 03:04:32 CET 2006
|
||||
* Mon Feb 20 2006 Kent Boortz <kent@mysql.com>
|
||||
|
||||
- Reintroduced a max build
|
||||
- Limited testing of 'debug' and 'max' servers
|
||||
|
|
|
@ -1,11 +1,16 @@
|
|||
SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX")
|
||||
SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX")
|
||||
|
||||
ADD_DEFINITIONS(-DUSE_TLS -DMYSQL_CLIENT)
|
||||
# The old Windows build method used renamed (.cc -> .cpp) source files, fails
|
||||
# in #include in mysqlbinlog.cc. So disable that using the USING_CMAKE define.
|
||||
ADD_DEFINITIONS(-DUSE_TLS -DUSING_CMAKE)
|
||||
INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include
|
||||
${CMAKE_SOURCE_DIR}/zlib
|
||||
${CMAKE_SOURCE_DIR}/extra/yassl/include
|
||||
${CMAKE_SOURCE_DIR}/libmysql)
|
||||
${CMAKE_SOURCE_DIR}/libmysql
|
||||
${CMAKE_SOURCE_DIR}/regex
|
||||
${CMAKE_SOURCE_DIR}/sql
|
||||
${CMAKE_SOURCE_DIR}/strings)
|
||||
|
||||
ADD_LIBRARY(mysqlclient ../mysys/array.c ../strings/bchange.c ../strings/bmove.c
|
||||
../strings/bmove_upp.c ../mysys/charset-def.c ../mysys/charset.c
|
||||
|
@ -47,3 +52,26 @@ ADD_DEPENDENCIES(mysqlclient GenError)
|
|||
ADD_EXECUTABLE(mysql completion_hash.cc mysql.cc readline.cc sql_string.cc)
|
||||
LINK_DIRECTORIES(${MYSQL_BINARY_DIR}/mysys ${MYSQL_BINARY_DIR}/zlib)
|
||||
TARGET_LINK_LIBRARIES(mysql mysqlclient mysys yassl zlib dbug yassl taocrypt wsock32)
|
||||
|
||||
ADD_EXECUTABLE(mysqltest mysqltest.c)
|
||||
TARGET_LINK_LIBRARIES(mysqltest mysqlclient mysys yassl zlib dbug regex wsock32)
|
||||
|
||||
ADD_EXECUTABLE(mysqlcheck mysqlcheck.c)
|
||||
TARGET_LINK_LIBRARIES(mysqlcheck mysqlclient dbug yassl zlib wsock32)
|
||||
|
||||
ADD_EXECUTABLE(mysqldump mysqldump.c ../sql-common/my_user.c)
|
||||
TARGET_LINK_LIBRARIES(mysqldump mysqlclient mysys dbug yassl zlib wsock32)
|
||||
|
||||
ADD_EXECUTABLE(mysqlimport mysqlimport.c)
|
||||
TARGET_LINK_LIBRARIES(mysqlimport mysqlclient mysys dbug yassl zlib wsock32)
|
||||
|
||||
ADD_EXECUTABLE(mysqlshow mysqlshow.c)
|
||||
TARGET_LINK_LIBRARIES(mysqlshow mysqlclient mysys dbug yassl zlib wsock32)
|
||||
|
||||
ADD_EXECUTABLE(mysqlbinlog mysqlbinlog.cc ../mysys/mf_tempdir.c ../mysys/my_new.cc
|
||||
../mysys/my_bit.c ../mysys/my_bitmap.c ../mysys/my_vle.c
|
||||
../mysys/base64.c)
|
||||
TARGET_LINK_LIBRARIES(mysqlbinlog mysqlclient dbug yassl zlib wsock32)
|
||||
|
||||
ADD_EXECUTABLE(mysqladmin mysqladmin.cc)
|
||||
TARGET_LINK_LIBRARIES(mysqladmin mysqlclient mysys dbug yassl zlib wsock32)
|
||||
|
|
|
@ -19,3 +19,5 @@ ADD_CUSTOM_TARGET(GenError
|
|||
|
||||
|
||||
|
||||
ADD_EXECUTABLE(my_print_defaults my_print_defaults.c)
|
||||
TARGET_LINK_LIBRARIES(my_print_defaults strings mysys dbug taocrypt odbc32 odbccp32 wsock32)
|
||||
|
|
|
@ -76,14 +76,14 @@ ADD_CUSTOM_COMMAND(
|
|||
#ADD_CUSTOM_COMMAND(
|
||||
# SOURCE ${PROJECT_SOURCE_DIR}/include/mysql_version.h.in
|
||||
# OUTPUT ${PROJECT_SOURCE_DIR}/include/mysql_version.h
|
||||
# COMMAND ${PROJECT_SOURCE_DIR}/win/config-version.js
|
||||
# COMMAND cscript.exe ${PROJECT_SOURCE_DIR}/win/config-version.js
|
||||
# DEPENDS ${PROJECT_SOURCE_DIR}/include/mysql_version.h.in)
|
||||
|
||||
# Handlerton file
|
||||
ADD_CUSTOM_COMMAND(
|
||||
SOURCE ${PROJECT_SOURCE_DIR}/sql/handlerton.cc.in
|
||||
OUTPUT ${PROJECT_SOURCE_DIR}/sql/handlerton.cc
|
||||
COMMAND ${PROJECT_SOURCE_DIR}/win/config-handlerton.js ARGS ${PROJECT_SOURCE_DIR}/win/configure.data
|
||||
COMMAND cscript.exe ${PROJECT_SOURCE_DIR}/win/config-handlerton.js ARGS ${PROJECT_SOURCE_DIR}/win/configure.data
|
||||
DEPENDS ${PROJECT_SOURCE_DIR}/sql/handlerton.cc.in)
|
||||
|
||||
# Error file
|
||||
|
|
Loading…
Reference in a new issue