mirror of
https://github.com/MariaDB/server.git
synced 2025-01-31 02:51:44 +01:00
Merge trift2.:/MySQL/M50/mysql-5.0
into trift2.:/MySQL/M50/push-5.0
This commit is contained in:
commit
d8c385c397
40 changed files with 316 additions and 65 deletions
|
@ -104,6 +104,12 @@ check_cpu () {
|
|||
*Athlon*64*)
|
||||
cpu_arg="athlon64";
|
||||
;;
|
||||
*Turion*)
|
||||
cpu_arg="athlon64";
|
||||
;;
|
||||
*Opteron*)
|
||||
cpu_arg="athlon64";
|
||||
;;
|
||||
*Athlon*)
|
||||
cpu_arg="athlon";
|
||||
;;
|
||||
|
|
|
@ -1736,6 +1736,12 @@ AC_ARG_WITH(client-ldflags,
|
|||
[CLIENT_EXTRA_LDFLAGS=])
|
||||
AC_SUBST(CLIENT_EXTRA_LDFLAGS)
|
||||
|
||||
AC_ARG_WITH(mysqld-libs,
|
||||
[ --with-mysqld-libs Extra libraries to link with for mysqld],
|
||||
[MYSQLD_EXTRA_LIBS=$withval],
|
||||
[MYSQLD_EXTRA_LIBS=])
|
||||
AC_SUBST(MYSQLD_EXTRA_LIBS)
|
||||
|
||||
AC_ARG_WITH(lib-ccflags,
|
||||
[ --with-lib-ccflags Extra CC options for libraries],
|
||||
[LIB_EXTRA_CCFLAGS=$withval],
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
--source include/have_log_bin.inc
|
||||
--source include/not_embedded.inc
|
||||
--source ./include/have_federated_db.inc
|
||||
|
||||
|
|
|
@ -3776,8 +3776,7 @@ sub mysqld_arguments ($$$$) {
|
|||
"%s--log-slow-queries=%s-slow.log", $prefix, $log_base_path);
|
||||
|
||||
# Check if "extra_opt" contains --skip-log-bin
|
||||
my $skip_binlog= grep(/^--skip-log-bin/, @$extra_opt);
|
||||
|
||||
my $skip_binlog= grep(/^--skip-log-bin/, @$extra_opt, @opt_extra_mysqld_opt);
|
||||
if ( $mysqld->{'type'} eq 'master' )
|
||||
{
|
||||
if (! ($opt_skip_master_binlog || $skip_binlog) )
|
||||
|
|
|
@ -17,7 +17,7 @@ master-bin.000001 # Query 1 # use `test`; insert t1 values (5)
|
|||
master-bin.000001 # Query 1 # use `test`; COMMIT
|
||||
master-bin.000001 # Query 1 # use `test`; BEGIN
|
||||
master-bin.000001 # Query 1 # use `test`; insert t2 values (5)
|
||||
master-bin.000001 # Xid 1 # COMMIT /* xid=12 */
|
||||
master-bin.000001 # Xid 1 # COMMIT /* xid=13 */
|
||||
drop table t1,t2;
|
||||
reset master;
|
||||
create table t1 (n int) engine=innodb;
|
||||
|
@ -128,7 +128,7 @@ master-bin.000001 # Query 1 # use `test`; insert into t1 values(4 + 4)
|
|||
master-bin.000001 # Query 1 # use `test`; insert into t1 values(3 + 4)
|
||||
master-bin.000001 # Query 1 # use `test`; insert into t1 values(2 + 4)
|
||||
master-bin.000001 # Query 1 # use `test`; insert into t1 values(1 + 4)
|
||||
master-bin.000001 # Xid 1 # COMMIT /* xid=19 */
|
||||
master-bin.000001 # Xid 1 # COMMIT /* xid=20 */
|
||||
master-bin.000001 # Rotate 1 # master-bin.000002;pos=4
|
||||
show binlog events in 'master-bin.000002' from 98;
|
||||
Log_name Pos Event_type Server_id End_log_pos Info
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
Variable_name Value
|
||||
have_log_bin ON
|
||||
log_bin ON
|
||||
|
|
|
@ -11,7 +11,7 @@ Log_name Pos Event_type Server_id End_log_pos Info
|
|||
master-bin.000001 98 Query 1 # use `test`; BEGIN
|
||||
master-bin.000001 166 Query 1 # use `test`; insert into t1 values(1)
|
||||
master-bin.000001 253 Query 1 # use `test`; insert into t2 select * from t1
|
||||
master-bin.000001 347 Xid 1 # COMMIT /* xid=8 */
|
||||
master-bin.000001 347 Xid 1 # COMMIT /* xid=9 */
|
||||
delete from t1;
|
||||
delete from t2;
|
||||
reset master;
|
||||
|
@ -47,7 +47,7 @@ master-bin.000001 253 Query 1 # use `test`; savepoint my_savepoint
|
|||
master-bin.000001 338 Query 1 # use `test`; insert into t1 values(4)
|
||||
master-bin.000001 425 Query 1 # use `test`; insert into t2 select * from t1
|
||||
master-bin.000001 519 Query 1 # use `test`; rollback to savepoint my_savepoint
|
||||
master-bin.000001 616 Xid 1 # COMMIT /* xid=25 */
|
||||
master-bin.000001 616 Xid 1 # COMMIT /* xid=26 */
|
||||
delete from t1;
|
||||
delete from t2;
|
||||
reset master;
|
||||
|
@ -74,7 +74,7 @@ master-bin.000001 338 Query 1 # use `test`; insert into t1 values(6)
|
|||
master-bin.000001 425 Query 1 # use `test`; insert into t2 select * from t1
|
||||
master-bin.000001 519 Query 1 # use `test`; rollback to savepoint my_savepoint
|
||||
master-bin.000001 616 Query 1 # use `test`; insert into t1 values(7)
|
||||
master-bin.000001 703 Xid 1 # COMMIT /* xid=37 */
|
||||
master-bin.000001 703 Xid 1 # COMMIT /* xid=38 */
|
||||
delete from t1;
|
||||
delete from t2;
|
||||
reset master;
|
||||
|
@ -101,7 +101,7 @@ insert into t2 select * from t1;
|
|||
show binlog events from 98;
|
||||
Log_name Pos Event_type Server_id End_log_pos Info
|
||||
master-bin.000001 98 Query 1 # use `test`; insert into t1 values(9)
|
||||
master-bin.000001 185 Xid 1 # COMMIT /* xid=60 */
|
||||
master-bin.000001 185 Xid 1 # COMMIT /* xid=61 */
|
||||
master-bin.000001 212 Query 1 # use `test`; insert into t2 select * from t1
|
||||
delete from t1;
|
||||
delete from t2;
|
||||
|
@ -112,18 +112,18 @@ insert into t2 select * from t1;
|
|||
show binlog events from 98;
|
||||
Log_name Pos Event_type Server_id End_log_pos Info
|
||||
master-bin.000001 98 Query 1 # use `test`; insert into t1 values(10)
|
||||
master-bin.000001 186 Xid 1 # COMMIT /* xid=66 */
|
||||
master-bin.000001 186 Xid 1 # COMMIT /* xid=67 */
|
||||
master-bin.000001 213 Query 1 # use `test`; insert into t2 select * from t1
|
||||
insert into t1 values(11);
|
||||
commit;
|
||||
show binlog events from 98;
|
||||
Log_name Pos Event_type Server_id End_log_pos Info
|
||||
master-bin.000001 98 Query 1 # use `test`; insert into t1 values(10)
|
||||
master-bin.000001 186 Xid 1 # COMMIT /* xid=66 */
|
||||
master-bin.000001 186 Xid 1 # COMMIT /* xid=67 */
|
||||
master-bin.000001 213 Query 1 # use `test`; insert into t2 select * from t1
|
||||
master-bin.000001 307 Query 1 # use `test`; BEGIN
|
||||
master-bin.000001 375 Query 1 # use `test`; insert into t1 values(11)
|
||||
master-bin.000001 463 Xid 1 # COMMIT /* xid=68 */
|
||||
master-bin.000001 463 Xid 1 # COMMIT /* xid=69 */
|
||||
alter table t2 engine=INNODB;
|
||||
delete from t1;
|
||||
delete from t2;
|
||||
|
@ -137,7 +137,7 @@ Log_name Pos Event_type Server_id End_log_pos Info
|
|||
master-bin.000001 98 Query 1 # use `test`; BEGIN
|
||||
master-bin.000001 166 Query 1 # use `test`; insert into t1 values(12)
|
||||
master-bin.000001 254 Query 1 # use `test`; insert into t2 select * from t1
|
||||
master-bin.000001 348 Xid 1 # COMMIT /* xid=78 */
|
||||
master-bin.000001 348 Xid 1 # COMMIT /* xid=79 */
|
||||
delete from t1;
|
||||
delete from t2;
|
||||
reset master;
|
||||
|
@ -161,7 +161,7 @@ show binlog events from 98;
|
|||
Log_name Pos Event_type Server_id End_log_pos Info
|
||||
master-bin.000001 98 Query 1 # use `test`; BEGIN
|
||||
master-bin.000001 166 Query 1 # use `test`; insert into t1 values(14)
|
||||
master-bin.000001 254 Xid 1 # COMMIT /* xid=94 */
|
||||
master-bin.000001 254 Xid 1 # COMMIT /* xid=95 */
|
||||
delete from t1;
|
||||
delete from t2;
|
||||
reset master;
|
||||
|
@ -182,7 +182,7 @@ Log_name Pos Event_type Server_id End_log_pos Info
|
|||
master-bin.000001 98 Query 1 # use `test`; BEGIN
|
||||
master-bin.000001 166 Query 1 # use `test`; insert into t1 values(16)
|
||||
master-bin.000001 254 Query 1 # use `test`; insert into t1 values(18)
|
||||
master-bin.000001 342 Xid 1 # COMMIT /* xid=105 */
|
||||
master-bin.000001 342 Xid 1 # COMMIT /* xid=106 */
|
||||
delete from t1;
|
||||
delete from t2;
|
||||
alter table t2 type=MyISAM;
|
||||
|
@ -234,19 +234,19 @@ Log_name Pos Event_type Server_id End_log_pos Info
|
|||
master-bin.000001 98 Query 1 # use `test`; BEGIN
|
||||
master-bin.000001 166 Query 1 # use `test`; insert into t1 values(16)
|
||||
master-bin.000001 254 Query 1 # use `test`; insert into t1 values(18)
|
||||
master-bin.000001 342 Xid 1 # COMMIT /* xid=105 */
|
||||
master-bin.000001 342 Xid 1 # COMMIT /* xid=106 */
|
||||
master-bin.000001 369 Query 1 # use `test`; delete from t1
|
||||
master-bin.000001 446 Xid 1 # COMMIT /* xid=114 */
|
||||
master-bin.000001 446 Xid 1 # COMMIT /* xid=115 */
|
||||
master-bin.000001 473 Query 1 # use `test`; delete from t2
|
||||
master-bin.000001 550 Xid 1 # COMMIT /* xid=115 */
|
||||
master-bin.000001 550 Xid 1 # COMMIT /* xid=116 */
|
||||
master-bin.000001 577 Query 1 # use `test`; alter table t2 type=MyISAM
|
||||
master-bin.000001 666 Query 1 # use `test`; insert into t1 values (1)
|
||||
master-bin.000001 754 Xid 1 # COMMIT /* xid=117 */
|
||||
master-bin.000001 754 Xid 1 # COMMIT /* xid=118 */
|
||||
master-bin.000001 781 Query 1 # use `test`; insert into t2 values (20)
|
||||
master-bin.000001 870 Query 1 # use `test`; drop table t1,t2
|
||||
master-bin.000001 949 Query 1 # use `test`; create temporary table ti (a int) engine=innodb
|
||||
master-bin.000001 1059 Query 1 # use `test`; insert into ti values(1)
|
||||
master-bin.000001 1146 Xid 1 # COMMIT /* xid=132 */
|
||||
master-bin.000001 1146 Xid 1 # COMMIT /* xid=133 */
|
||||
master-bin.000001 1173 Query 1 # use `test`; create temporary table t1 (a int) engine=myisam
|
||||
master-bin.000001 1283 Query 1 # use `test`; insert t1 values (1)
|
||||
master-bin.000001 1366 Query 1 # use `test`; create table t0 (n int)
|
||||
|
|
|
@ -770,4 +770,75 @@ c abc ab
|
|||
d ab ab
|
||||
e abc abc
|
||||
DROP TABLE t1;
|
||||
create table t1 (a int not null primary key, b int not null) engine=ndb;
|
||||
create table t2 (a int not null primary key, b int not null) engine=ndb;
|
||||
insert into t1 values (1,10), (2,20), (3,30);
|
||||
insert into t2 values (1,10), (2,20), (3,30);
|
||||
select * from t1 order by a;
|
||||
a b
|
||||
1 10
|
||||
2 20
|
||||
3 30
|
||||
delete from t1 where a > 0 order by a desc limit 1;
|
||||
select * from t1 order by a;
|
||||
a b
|
||||
1 10
|
||||
2 20
|
||||
delete from t1,t2 using t1,t2 where t1.a = t2.a;
|
||||
select * from t2 order by a;
|
||||
a b
|
||||
3 30
|
||||
drop table t1,t2;
|
||||
create table t1 (a int not null primary key, b int not null) engine=ndb;
|
||||
insert into t1 values (1,10), (2,20), (3,30);
|
||||
insert into t1 set a=1, b=100;
|
||||
ERROR 23000: Duplicate entry '1' for key 1
|
||||
insert ignore into t1 set a=1, b=100;
|
||||
select * from t1 order by a;
|
||||
a b
|
||||
1 10
|
||||
2 20
|
||||
3 30
|
||||
insert into t1 set a=1, b=1000 on duplicate key update b=b+1;
|
||||
select * from t1 order by a;
|
||||
a b
|
||||
1 11
|
||||
2 20
|
||||
3 30
|
||||
drop table t1;
|
||||
create table t1 (a int not null primary key, b int not null) engine=ndb;
|
||||
create table t2 (c int not null primary key, d int not null) engine=ndb;
|
||||
insert into t1 values (1,10), (2,10), (3,30), (4, 30);
|
||||
insert into t2 values (1,10), (2,10), (3,30), (4, 30);
|
||||
update t1 set a = 1 where a = 3;
|
||||
ERROR 23000: Duplicate entry '1' for key 1
|
||||
select * from t1 order by a;
|
||||
a b
|
||||
1 10
|
||||
2 10
|
||||
3 30
|
||||
4 30
|
||||
update t1 set b = 1 where a > 1 order by a desc limit 1;
|
||||
select * from t1 order by a;
|
||||
a b
|
||||
1 10
|
||||
2 10
|
||||
3 30
|
||||
4 1
|
||||
update t1,t2 set a = 1, c = 1 where a = 3 and c = 3;
|
||||
ERROR 23000: Duplicate entry '1' for key 1
|
||||
select * from t1 order by a;
|
||||
a b
|
||||
1 10
|
||||
2 10
|
||||
3 30
|
||||
4 1
|
||||
update ignore t1,t2 set a = 1, c = 1 where a = 3 and c = 3;
|
||||
select * from t1 order by a;
|
||||
a b
|
||||
1 10
|
||||
2 10
|
||||
3 30
|
||||
4 1
|
||||
drop table t1,t2;
|
||||
End of 5.0 tests
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
#
|
||||
# misc binlogging tests that do not require a slave running
|
||||
#
|
||||
-- source include/have_log_bin.inc
|
||||
-- source include/not_embedded.inc
|
||||
-- source include/have_bdb.inc
|
||||
-- source include/have_innodb.inc
|
||||
|
@ -19,7 +20,7 @@ begin;
|
|||
insert t2 values (5);
|
||||
commit;
|
||||
# first COMMIT must be Query_log_event, second - Xid_log_event
|
||||
--replace_result "xid=21" "xid=12"
|
||||
--replace_result "xid=22" "xid=13"
|
||||
--replace_column 2 # 5 #
|
||||
show binlog events from 98;
|
||||
drop table t1,t2;
|
||||
|
@ -41,7 +42,7 @@ while ($1)
|
|||
--enable_query_log
|
||||
commit;
|
||||
drop table t1;
|
||||
--replace_result "xid=32" "xid=19"
|
||||
--replace_result "xid=33" "xid=20"
|
||||
--replace_column 2 # 5 #
|
||||
show binlog events in 'master-bin.000001' from 98;
|
||||
--replace_column 2 # 5 #
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
#
|
||||
-- source include/not_embedded.inc
|
||||
-- source include/have_blackhole.inc
|
||||
-- source include/have_log_bin.inc
|
||||
|
||||
--disable_warnings
|
||||
drop table if exists t1,t2;
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
-- source include/not_embedded.inc
|
||||
-- source include/have_cp932.inc
|
||||
-- source include/have_log_bin.inc
|
||||
|
||||
--character_set cp932
|
||||
--disable_warnings
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
-- source include/not_embedded.inc
|
||||
-- source include/have_cp932.inc
|
||||
-- source include/have_log_bin.inc
|
||||
|
||||
--character_set cp932
|
||||
--disable_warnings
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
--source include/not_embedded.inc
|
||||
--source include/have_ucs2.inc
|
||||
--source include/have_log_bin.inc
|
||||
|
||||
#
|
||||
# Check correct binlogging of UCS2 user variables (BUG#3875)
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
# Embedded server doesn't support binlog
|
||||
-- source include/not_embedded.inc
|
||||
-- source include/have_log_bin.inc
|
||||
|
||||
--disable_warnings
|
||||
drop database if exists `drop-temp+table-test`;
|
||||
|
|
|
@ -3,6 +3,8 @@
|
|||
# We verify that we did not introduce a deadlock.
|
||||
# This is intended to mimick how mysqldump and innobackup work.
|
||||
|
||||
-- source include/have_log_bin.inc
|
||||
|
||||
# And it requires InnoDB
|
||||
-- source include/not_embedded.inc
|
||||
-- source include/have_innodb.inc
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
# Embedded server doesn't support binlog
|
||||
-- source include/not_embedded.inc
|
||||
-- source include/have_log_bin.inc
|
||||
|
||||
# Check if a partly-completed INSERT SELECT in a MyISAM table goes into the
|
||||
# binlog
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
# slave is always with --skip-innodb in the testsuite. I (Guilhem) however
|
||||
# did some tests manually on a slave; tables are replicated fine and
|
||||
# Exec_Master_Log_Pos advances as expected.
|
||||
|
||||
-- source include/have_log_bin.inc
|
||||
# Embedded server doesn't support binlogging
|
||||
-- source include/not_embedded.inc
|
||||
|
||||
|
@ -29,7 +29,7 @@ insert into t2 select * from t1;
|
|||
commit;
|
||||
|
||||
--replace_column 5 #
|
||||
--replace_result "xid=14" "xid=8"
|
||||
--replace_result "xid=15" "xid=9"
|
||||
show binlog events from 98;
|
||||
|
||||
delete from t1;
|
||||
|
@ -58,7 +58,7 @@ rollback to savepoint my_savepoint;
|
|||
commit;
|
||||
|
||||
--replace_column 5 #
|
||||
--replace_result "xid=47" "xid=25"
|
||||
--replace_result "xid=48" "xid=26"
|
||||
show binlog events from 98;
|
||||
|
||||
delete from t1;
|
||||
|
@ -76,7 +76,7 @@ commit;
|
|||
select a from t1 order by a; # check that savepoints work :)
|
||||
|
||||
--replace_column 5 #
|
||||
--replace_result "xid=69" "xid=37"
|
||||
--replace_result "xid=70" "xid=38"
|
||||
show binlog events from 98;
|
||||
|
||||
# and when ROLLBACK is not explicit?
|
||||
|
@ -109,7 +109,7 @@ insert into t1 values(9);
|
|||
insert into t2 select * from t1;
|
||||
|
||||
--replace_column 5 #
|
||||
--replace_result "xid=117" "xid=60"
|
||||
--replace_result "xid=118" "xid=61"
|
||||
show binlog events from 98;
|
||||
|
||||
# Check that when the query updat1ng the MyISAM table is the first in the
|
||||
|
@ -122,13 +122,13 @@ insert into t1 values(10); # first make t1 non-empty
|
|||
begin;
|
||||
insert into t2 select * from t1;
|
||||
--replace_column 5 #
|
||||
--replace_result "xid=131" "xid=66"
|
||||
--replace_result "xid=132" "xid=67"
|
||||
show binlog events from 98;
|
||||
insert into t1 values(11);
|
||||
commit;
|
||||
|
||||
--replace_column 5 #
|
||||
--replace_result "xid=131" "xid=66" "xid=134" "xid=68"
|
||||
--replace_result "xid=132" "xid=67" "xid=135" "xid=69"
|
||||
show binlog events from 98;
|
||||
|
||||
|
||||
|
@ -147,7 +147,7 @@ insert into t2 select * from t1;
|
|||
commit;
|
||||
|
||||
--replace_column 5 #
|
||||
--replace_result "xid=153" "xid=78"
|
||||
--replace_result "xid=154" "xid=79"
|
||||
show binlog events from 98;
|
||||
|
||||
delete from t1;
|
||||
|
@ -175,7 +175,7 @@ rollback to savepoint my_savepoint;
|
|||
commit;
|
||||
|
||||
--replace_column 5 #
|
||||
--replace_result "xid=185" "xid=94"
|
||||
--replace_result "xid=186" "xid=95"
|
||||
show binlog events from 98;
|
||||
|
||||
delete from t1;
|
||||
|
@ -193,7 +193,7 @@ commit;
|
|||
select a from t1 order by a; # check that savepoints work :)
|
||||
|
||||
--replace_column 5 #
|
||||
--replace_result "xid=206" "xid=105"
|
||||
--replace_result "xid=207" "xid=106"
|
||||
show binlog events from 98;
|
||||
|
||||
# Test for BUG#5714, where a MyISAM update in the transaction used to
|
||||
|
@ -254,7 +254,7 @@ disconnect con2;
|
|||
connection con3;
|
||||
select get_lock("lock1",60);
|
||||
--replace_column 5 #
|
||||
--replace_result "xid=206" "xid=105" "xid=224" "xid=114" "xid=227" "xid=115" "xid=231" "xid=117" "xid=258" "xid=132"
|
||||
--replace_result "xid=207" "xid=106" "xid=225" "xid=115" "xid=228" "xid=116" "xid=232" "xid=118" "xid=259" "xid=133"
|
||||
show binlog events from 98;
|
||||
do release_lock("lock1");
|
||||
drop table t0,t2;
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
# disabled in embedded until tools running is fixed with embedded
|
||||
--source include/not_embedded.inc
|
||||
-- source include/not_embedded.inc
|
||||
-- source include/have_cp932.inc
|
||||
-- source include/have_log_bin.inc
|
||||
|
||||
# Bug#16217 (mysql client did not know how not switch its internal charset)
|
||||
flush logs;
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
# We are using .opt file since we need small binlog size
|
||||
|
||||
-- source include/have_log_bin.inc
|
||||
|
||||
# Embedded server doesn't support binlogging
|
||||
-- source include/not_embedded.inc
|
||||
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
# Test for the new options --start-datetime, stop-datetime,
|
||||
# and a few others.
|
||||
|
||||
-- source include/have_log_bin.inc
|
||||
|
||||
# Embedded server doesn't support binlogging
|
||||
-- source include/not_embedded.inc
|
||||
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
# Embedded server doesn't support external clients
|
||||
--source include/not_embedded.inc
|
||||
|
||||
# Binlog is required
|
||||
--source include/have_log_bin.inc
|
||||
|
||||
--disable_warnings
|
||||
DROP TABLE IF EXISTS t1, `"t"1`, t1aa, t2, t2aa, t3;
|
||||
drop database if exists mysqldump_test_db;
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
-- source include/have_log_bin.inc
|
||||
|
||||
# This test should work in embedded server after mysqltest is fixed
|
||||
-- source include/not_embedded.inc
|
||||
|
||||
|
|
|
@ -740,6 +740,46 @@ INSERT INTO t1 VALUES
|
|||
SELECT * FROM t1 ORDER BY a;
|
||||
DROP TABLE t1;
|
||||
|
||||
# delete
|
||||
create table t1 (a int not null primary key, b int not null) engine=ndb;
|
||||
create table t2 (a int not null primary key, b int not null) engine=ndb;
|
||||
insert into t1 values (1,10), (2,20), (3,30);
|
||||
insert into t2 values (1,10), (2,20), (3,30);
|
||||
select * from t1 order by a;
|
||||
delete from t1 where a > 0 order by a desc limit 1;
|
||||
select * from t1 order by a;
|
||||
delete from t1,t2 using t1,t2 where t1.a = t2.a;
|
||||
select * from t2 order by a;
|
||||
drop table t1,t2;
|
||||
|
||||
# insert ignore
|
||||
create table t1 (a int not null primary key, b int not null) engine=ndb;
|
||||
insert into t1 values (1,10), (2,20), (3,30);
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 set a=1, b=100;
|
||||
insert ignore into t1 set a=1, b=100;
|
||||
select * from t1 order by a;
|
||||
insert into t1 set a=1, b=1000 on duplicate key update b=b+1;
|
||||
select * from t1 order by a;
|
||||
drop table t1;
|
||||
|
||||
# update
|
||||
create table t1 (a int not null primary key, b int not null) engine=ndb;
|
||||
create table t2 (c int not null primary key, d int not null) engine=ndb;
|
||||
insert into t1 values (1,10), (2,10), (3,30), (4, 30);
|
||||
insert into t2 values (1,10), (2,10), (3,30), (4, 30);
|
||||
--error ER_DUP_ENTRY
|
||||
update t1 set a = 1 where a = 3;
|
||||
select * from t1 order by a;
|
||||
update t1 set b = 1 where a > 1 order by a desc limit 1;
|
||||
select * from t1 order by a;
|
||||
--error ER_DUP_ENTRY
|
||||
update t1,t2 set a = 1, c = 1 where a = 3 and c = 3;
|
||||
select * from t1 order by a;
|
||||
update ignore t1,t2 set a = 1, c = 1 where a = 3 and c = 3;
|
||||
select * from t1 order by a;
|
||||
drop table t1,t2;
|
||||
|
||||
# End of 5.0 tests
|
||||
--echo End of 5.0 tests
|
||||
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
# tests that require InnoDB...
|
||||
#
|
||||
|
||||
-- source include/have_log_bin.inc
|
||||
-- source include/have_innodb.inc
|
||||
|
||||
--disable_warnings
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
# Embedded server does not support binlogging
|
||||
--source include/not_embedded.inc
|
||||
--source include/have_log_bin.inc
|
||||
|
||||
# Check that user variables are binlogged correctly (BUG#3875)
|
||||
create table t1 (a varchar(50));
|
||||
|
|
|
@ -959,6 +959,7 @@ TransporterRegistry::performReceive()
|
|||
{
|
||||
Uint32 * ptr;
|
||||
Uint32 sz = t->getReceiveData(&ptr);
|
||||
transporter_recv_from(callbackObj, nodeId);
|
||||
Uint32 szUsed = unpack(ptr, sz, nodeId, ioStates[nodeId]);
|
||||
t->updateReceiveDataPtr(szUsed);
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
Next QMGR 1
|
||||
Next NDBCNTR 1000
|
||||
Next NDBCNTR 1002
|
||||
Next NDBFS 2000
|
||||
Next DBACC 3002
|
||||
Next DBTUP 4014
|
||||
|
@ -501,3 +501,4 @@ TUP:
|
|||
NDBCNTR:
|
||||
|
||||
1000: Crash insertion on SystemError::CopyFragRef
|
||||
1001: Delay sending NODE_FAILREP (to own node), until error is cleared
|
||||
|
|
|
@ -404,6 +404,9 @@ void Dbdict::execFSCLOSECONF(Signal* signal)
|
|||
case FsConnectRecord::OPEN_READ_SCHEMA2:
|
||||
openSchemaFile(signal, 1, fsPtr.i, false, false);
|
||||
break;
|
||||
case FsConnectRecord::OPEN_READ_TAB_FILE2:
|
||||
openTableFile(signal, 1, fsPtr.i, c_readTableRecord.tableId, false);
|
||||
break;
|
||||
default:
|
||||
jamLine((fsPtr.p->fsState & 0xFFF));
|
||||
ndbrequire(false);
|
||||
|
@ -783,8 +786,11 @@ void Dbdict::readTableConf(Signal* signal,
|
|||
void Dbdict::readTableRef(Signal* signal,
|
||||
FsConnectRecordPtr fsPtr)
|
||||
{
|
||||
/**
|
||||
* First close corrupt file
|
||||
*/
|
||||
fsPtr.p->fsState = FsConnectRecord::OPEN_READ_TAB_FILE2;
|
||||
openTableFile(signal, 1, fsPtr.i, c_readTableRecord.tableId, false);
|
||||
closeFile(signal, fsPtr.p->filePtr, fsPtr.i);
|
||||
return;
|
||||
}//Dbdict::readTableRef()
|
||||
|
||||
|
|
|
@ -4459,12 +4459,18 @@ void Dbdih::failedNodeLcpHandling(Signal* signal, NodeRecordPtr failedNodePtr)
|
|||
jam();
|
||||
const Uint32 nodeId = failedNodePtr.i;
|
||||
|
||||
if (c_lcpState.m_participatingLQH.get(failedNodePtr.i)){
|
||||
if (isMaster() && c_lcpState.m_participatingLQH.get(failedNodePtr.i))
|
||||
{
|
||||
/*----------------------------------------------------*/
|
||||
/* THE NODE WAS INVOLVED IN A LOCAL CHECKPOINT. WE */
|
||||
/* MUST UPDATE THE ACTIVE STATUS TO INDICATE THAT */
|
||||
/* THE NODE HAVE MISSED A LOCAL CHECKPOINT. */
|
||||
/*----------------------------------------------------*/
|
||||
|
||||
/**
|
||||
* Bug#28717, Only master should do this, as this status is copied
|
||||
* to other nodes
|
||||
*/
|
||||
switch (failedNodePtr.p->activeStatus) {
|
||||
case Sysfile::NS_Active:
|
||||
jam();
|
||||
|
|
|
@ -2668,7 +2668,8 @@ private:
|
|||
UintR cfirstfreeLogFile;
|
||||
UintR clogFileFileSize;
|
||||
|
||||
#define ZLFO_FILE_SIZE 256 /* MAX 256 OUTSTANDING FILE OPERATIONS */
|
||||
#define ZLFO_MIN_FILE_SIZE 256
|
||||
// RedoBuffer/32K minimum ZLFO_MIN_FILE_SIZE
|
||||
LogFileOperationRecord *logFileOperationRecord;
|
||||
LogFileOperationRecordPtr lfoPtr;
|
||||
UintR cfirstfreeLfo;
|
||||
|
@ -2685,7 +2686,7 @@ private:
|
|||
UintR cfirstfreePageRef;
|
||||
UintR cpageRefFileSize;
|
||||
|
||||
#define ZSCANREC_FILE_SIZE 100
|
||||
// Configurable
|
||||
ArrayPool<ScanRecord> c_scanRecordPool;
|
||||
ScanRecordPtr scanptr;
|
||||
UintR cscanNoFreeRec;
|
||||
|
|
|
@ -32,11 +32,11 @@ void Dblqh::initData()
|
|||
chostFileSize = MAX_NDB_NODES;
|
||||
clcpFileSize = ZNO_CONCURRENT_LCP;
|
||||
clcpLocrecFileSize = ZLCP_LOCREC_FILE_SIZE;
|
||||
clfoFileSize = ZLFO_FILE_SIZE;
|
||||
clfoFileSize = 0;
|
||||
clogFileFileSize = 0;
|
||||
clogPartFileSize = ZLOG_PART_FILE_SIZE;
|
||||
cpageRefFileSize = ZPAGE_REF_FILE_SIZE;
|
||||
cscanrecFileSize = ZSCANREC_FILE_SIZE;
|
||||
cscanrecFileSize = 0;
|
||||
ctabrecFileSize = 0;
|
||||
ctcConnectrecFileSize = 0;
|
||||
ctcNodeFailrecFileSize = MAX_NDB_NODES;
|
||||
|
@ -338,6 +338,11 @@ Dblqh::Dblqh(const class Configuration & conf):
|
|||
|
||||
initData();
|
||||
|
||||
/* maximum number of log file operations */
|
||||
clfoFileSize = clogPageFileSize;
|
||||
if (clfoFileSize < ZLFO_MIN_FILE_SIZE)
|
||||
clfoFileSize = ZLFO_MIN_FILE_SIZE;
|
||||
|
||||
#ifdef VM_TRACE
|
||||
{
|
||||
void* tmp[] = {
|
||||
|
|
|
@ -1375,6 +1375,13 @@ void Ndbcntr::execNODE_FAILREP(Signal* signal)
|
|||
{
|
||||
jamEntry();
|
||||
|
||||
if (ERROR_INSERTED(1001))
|
||||
{
|
||||
sendSignalWithDelay(reference(), GSN_NODE_FAILREP, signal, 100,
|
||||
signal->getLength());
|
||||
return;
|
||||
}
|
||||
|
||||
const NodeFailRep * nodeFail = (NodeFailRep *)&signal->theData[0];
|
||||
NdbNodeBitmask allFailed;
|
||||
allFailed.assign(NdbNodeBitmask::Size, nodeFail->theNodes);
|
||||
|
|
|
@ -569,7 +569,7 @@ AsyncFile*
|
|||
Ndbfs::createAsyncFile(){
|
||||
|
||||
// Check limit of open files
|
||||
if (theFiles.size()+1 == m_maxFiles) {
|
||||
if (theFiles.size() == m_maxFiles) {
|
||||
// Print info about all open files
|
||||
for (unsigned i = 0; i < theFiles.size(); i++){
|
||||
AsyncFile* file = theFiles[i];
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
|
||||
#include <ndb_global.h>
|
||||
#include <my_pthread.h>
|
||||
#include <sys/times.h>
|
||||
|
||||
#include "WatchDog.hpp"
|
||||
#include "GlobalData.hpp"
|
||||
|
@ -129,6 +130,13 @@ WatchDog::run(){
|
|||
break;
|
||||
}//switch
|
||||
g_eventLogger.warning("Ndb kernel is stuck in: %s", last_stuck_action);
|
||||
{
|
||||
struct tms my_tms;
|
||||
times(&my_tms);
|
||||
g_eventLogger.info("User time: %llu System time: %llu",
|
||||
(Uint64)my_tms.tms_utime,
|
||||
(Uint64)my_tms.tms_stime);
|
||||
}
|
||||
if(alerts == 3){
|
||||
shutdownSystem(last_stuck_action);
|
||||
}
|
||||
|
|
|
@ -67,6 +67,8 @@ public:
|
|||
int getRandomNodeOtherNodeGroup(int nodeId, int randomNumber);
|
||||
int getRandomNotMasterNodeId(int randomNumber);
|
||||
|
||||
NdbMgmHandle handle;
|
||||
|
||||
protected:
|
||||
|
||||
int waitClusterState(ndb_mgm_node_status _status,
|
||||
|
@ -89,7 +91,6 @@ protected:
|
|||
|
||||
bool connected;
|
||||
BaseString addr;
|
||||
NdbMgmHandle handle;
|
||||
ndb_mgm_configuration * m_config;
|
||||
protected:
|
||||
ndb_mgm_configuration * getConfig();
|
||||
|
|
|
@ -1422,6 +1422,10 @@ TESTCASE("Scan_4006",
|
|||
INITIALIZER(runScan_4006);
|
||||
FINALIZER(runClearTable);
|
||||
}
|
||||
TESTCASE("Bug28443",
|
||||
""){
|
||||
INITIALIZER(runBug28443);
|
||||
}
|
||||
TESTCASE("ExecuteAsynch",
|
||||
"Check that executeAsync() works (BUG#27495)\n"){
|
||||
INITIALIZER(runTestExecuteAsynch);
|
||||
|
|
|
@ -1215,6 +1215,84 @@ runBug27283(NDBT_Context* ctx, NDBT_Step* step)
|
|||
}
|
||||
pos = 0;
|
||||
}
|
||||
|
||||
return NDBT_OK;
|
||||
}
|
||||
|
||||
int
|
||||
runBug28717(NDBT_Context* ctx, NDBT_Step* step)
|
||||
{
|
||||
int result = NDBT_OK;
|
||||
int loops = ctx->getNumLoops();
|
||||
int records = ctx->getNumRecords();
|
||||
Ndb* pNdb = GETNDB(step);
|
||||
NdbRestarter res;
|
||||
|
||||
if (res.getNumDbNodes() < 4)
|
||||
{
|
||||
return NDBT_OK;
|
||||
}
|
||||
|
||||
int master = res.getMasterNodeId();
|
||||
int node0 = res.getRandomNodeOtherNodeGroup(master, rand());
|
||||
int node1 = res.getRandomNodeSameNodeGroup(node0, rand());
|
||||
|
||||
ndbout_c("master: %d node0: %d node1: %d", master, node0, node1);
|
||||
|
||||
if (res.restartOneDbNode(node0, false, true, true))
|
||||
{
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
{
|
||||
int filter[] = { 15, NDB_MGM_EVENT_CATEGORY_CHECKPOINT, 0 };
|
||||
NdbLogEventHandle handle =
|
||||
ndb_mgm_create_logevent_handle(res.handle, filter);
|
||||
|
||||
|
||||
int dump[] = { DumpStateOrd::DihStartLcpImmediately };
|
||||
struct ndb_logevent event;
|
||||
|
||||
for (Uint32 i = 0; i<3; i++)
|
||||
{
|
||||
res.dumpStateOneNode(master, dump, 1);
|
||||
while(ndb_logevent_get_next(handle, &event, 0) >= 0 &&
|
||||
event.type != NDB_LE_LocalCheckpointStarted);
|
||||
while(ndb_logevent_get_next(handle, &event, 0) >= 0 &&
|
||||
event.type != NDB_LE_LocalCheckpointCompleted);
|
||||
}
|
||||
}
|
||||
|
||||
if (res.waitNodesNoStart(&node0, 1))
|
||||
return NDBT_FAILED;
|
||||
|
||||
int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 };
|
||||
|
||||
if (res.dumpStateOneNode(node0, val2, 2))
|
||||
return NDBT_FAILED;
|
||||
|
||||
if (res.insertErrorInNode(node0, 5010))
|
||||
return NDBT_FAILED;
|
||||
|
||||
if (res.insertErrorInNode(node1, 1001))
|
||||
return NDBT_FAILED;
|
||||
|
||||
if (res.startNodes(&node0, 1))
|
||||
return NDBT_FAILED;
|
||||
|
||||
NdbSleep_SecSleep(3);
|
||||
|
||||
if (res.insertErrorInNode(node1, 0))
|
||||
return NDBT_FAILED;
|
||||
|
||||
if (res.waitNodesNoStart(&node0, 1))
|
||||
return NDBT_FAILED;
|
||||
|
||||
if (res.startNodes(&node0, 1))
|
||||
return NDBT_FAILED;
|
||||
|
||||
if (res.waitClusterStarted())
|
||||
return NDBT_FAILED;
|
||||
|
||||
return NDBT_OK;
|
||||
}
|
||||
|
@ -1552,6 +1630,9 @@ TESTCASE("Bug27003", ""){
|
|||
TESTCASE("Bug27283", ""){
|
||||
INITIALIZER(runBug27283);
|
||||
}
|
||||
TESTCASE("Bug28717", ""){
|
||||
INITIALIZER(runBug28717);
|
||||
}
|
||||
NDBT_TESTSUITE_END(testNodeRestart);
|
||||
|
||||
int main(int argc, const char** argv){
|
||||
|
|
|
@ -508,6 +508,10 @@ max-time: 1500
|
|||
cmd: testDict
|
||||
args: -n CreateAndDrop
|
||||
|
||||
max-time: 1000
|
||||
cmd: testNodeRestart
|
||||
args: -n Bug28717 T1
|
||||
|
||||
max-time: 1500
|
||||
cmd: testDict
|
||||
args: -n CreateAndDropAtRandom -l 200 T1
|
||||
|
|
|
@ -43,7 +43,8 @@ mysqld_LDADD = @MYSQLD_EXTRA_LDFLAGS@ \
|
|||
@innodb_system_libs@ \
|
||||
@ndbcluster_libs@ @ndbcluster_system_libs@ \
|
||||
$(LDADD) $(CXXLDFLAGS) $(WRAPLIBS) @LIBDL@ \
|
||||
$(yassl_libs) $(openssl_libs)
|
||||
$(yassl_libs) $(openssl_libs) \
|
||||
@MYSQLD_EXTRA_LIBS@
|
||||
|
||||
noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \
|
||||
item_strfunc.h item_timefunc.h item_uniq.h \
|
||||
|
|
|
@ -234,11 +234,6 @@ inline
|
|||
int execute_no_commit(ha_ndbcluster *h, NdbTransaction *trans,
|
||||
bool force_release)
|
||||
{
|
||||
#ifdef NOT_USED
|
||||
int m_batch_execute= 0;
|
||||
if (m_batch_execute)
|
||||
return 0;
|
||||
#endif
|
||||
h->release_completed_operations(trans, force_release);
|
||||
return trans->execute(NdbTransaction::NoCommit,
|
||||
NdbTransaction::AbortOnError,
|
||||
|
@ -248,11 +243,6 @@ int execute_no_commit(ha_ndbcluster *h, NdbTransaction *trans,
|
|||
inline
|
||||
int execute_commit(ha_ndbcluster *h, NdbTransaction *trans)
|
||||
{
|
||||
#ifdef NOT_USED
|
||||
int m_batch_execute= 0;
|
||||
if (m_batch_execute)
|
||||
return 0;
|
||||
#endif
|
||||
return trans->execute(NdbTransaction::Commit,
|
||||
NdbTransaction::AbortOnError,
|
||||
h->m_force_send);
|
||||
|
@ -261,11 +251,6 @@ int execute_commit(ha_ndbcluster *h, NdbTransaction *trans)
|
|||
inline
|
||||
int execute_commit(THD *thd, NdbTransaction *trans)
|
||||
{
|
||||
#ifdef NOT_USED
|
||||
int m_batch_execute= 0;
|
||||
if (m_batch_execute)
|
||||
return 0;
|
||||
#endif
|
||||
return trans->execute(NdbTransaction::Commit,
|
||||
NdbTransaction::AbortOnError,
|
||||
thd->variables.ndb_force_send);
|
||||
|
@ -275,11 +260,6 @@ inline
|
|||
int execute_no_commit_ie(ha_ndbcluster *h, NdbTransaction *trans,
|
||||
bool force_release)
|
||||
{
|
||||
#ifdef NOT_USED
|
||||
int m_batch_execute= 0;
|
||||
if (m_batch_execute)
|
||||
return 0;
|
||||
#endif
|
||||
h->release_completed_operations(trans, force_release);
|
||||
return trans->execute(NdbTransaction::NoCommit,
|
||||
NdbTransaction::AO_IgnoreError,
|
||||
|
@ -2469,7 +2449,8 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
|
|||
* If IGNORE the ignore constraint violations on primary and unique keys,
|
||||
* but check that it is not part of INSERT ... ON DUPLICATE KEY UPDATE
|
||||
*/
|
||||
if (m_ignore_dup_key && thd->lex->sql_command == SQLCOM_UPDATE)
|
||||
if (m_ignore_dup_key && (thd->lex->sql_command == SQLCOM_UPDATE ||
|
||||
thd->lex->sql_command == SQLCOM_UPDATE_MULTI))
|
||||
{
|
||||
int peek_res= peek_indexed_rows(new_data, pk_update);
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue