Merge bk-internal.mysql.com:/data0/bk/mysql-5.1-new

into  bk-internal.mysql.com:/data0/bk/mysql-5.1-kt
This commit is contained in:
rburnett@bk-internal.mysql.com 2006-05-22 17:25:46 +02:00
commit 179d483355
62 changed files with 1470 additions and 681 deletions

View file

@ -2346,7 +2346,6 @@ print_table_data(MYSQL_RES *result)
uint visible_length;
uint extra_padding;
/* If this column may have a null value, use "NULL" for empty. */
if (! not_null_flag[off] && (cur[off] == NULL))
{
buffer= "NULL";

View file

@ -1087,7 +1087,7 @@ dnl Is this the right match for DEC OSF on alpha?
# Edit Makefile.in files.
#
echo -n "configuring Makefile.in files for NetWare... "
for file in sql/Makefile.in libmysql/Makefile.in libmysql_r/Makefile.in sql/share/Makefile.in strings/Makefile.in client/Makefile.in
for file in sql/Makefile.in extra/Makefile.in client/Makefile.in
do
# echo "#### $file ####"
filedir="`dirname $file`"
@ -1108,32 +1108,13 @@ dnl Is this the right match for DEC OSF on alpha?
# Add library dependencies to mysqld_DEPENDENCIES
lib_DEPENDENCIES="\$(pstack_libs) \$(openssl_libs) \$(yassl_libs)"
cat > $filesed << EOF
s,\(^.*\$(MAKE) gen_lex_hash\)\$(EXEEXT),#\1,
s,\(\./gen_lex_hash\)\$(EXEEXT),\1.linux,
s%\(mysqld_DEPENDENCIES = \) %\1$lib_DEPENDENCIES %
s%\(mysqld_DEPENDENCIES = \)%\1$lib_DEPENDENCIES %
EOF
;;
sql/share/Makefile.in)
extra/Makefile.in)
cat > $filesed << EOF
s,\(extra/comp_err\),\1.linux,
EOF
;;
libmysql/Makefile.in)
cat > $filesed << EOF
s,\(\./conf_to_src\)\( \$(top_srcdir)\),\1.linux\2,
s,\(: conf_to_src\),\1.linux,
EOF
;;
libmysql_r/Makefile.in)
cat > $filesed << EOF
s,\(\./conf_to_src\)\( \$(top_srcdir)\),\1.linux\2,
s,\(: conf_to_src\),\1.linux,
EOF
;;
strings/Makefile.in)
cat > $filesed << EOF
s,\(\./conf_to_src\)\( \$(top_srcdir)\),\1.linux\2,
s,\(: conf_to_src\),\1.linux,
s,\(extra/comp_err\)\$(EXEEXT),\1.linux,
EOF
;;
client/Makefile.in)

View file

@ -226,12 +226,14 @@ int sigwait(sigset_t *setp, int *sigp); /* Use our implemention */
we want to make sure that no such flags are set.
*/
#if defined(HAVE_SIGACTION) && !defined(my_sigset)
#define my_sigset(A,B) do { struct sigaction s; sigset_t set; \
#define my_sigset(A,B) do { struct sigaction s; sigset_t set; int rc; \
DBUG_ASSERT((A) != 0); \
sigemptyset(&set); \
s.sa_handler = (B); \
s.sa_mask = set; \
s.sa_flags = 0; \
sigaction((A), &s, (struct sigaction *) NULL); \
rc= sigaction((A), &s, (struct sigaction *) NULL);\
DBUG_ASSERT(rc == 0); \
} while (0)
#elif defined(HAVE_SIGSET) && !defined(my_sigset)
#define my_sigset(A,B) sigset((A),(B))

View file

@ -20,12 +20,6 @@
"Enable SSL for connection (automatically enabled with other flags). Disable with --skip-ssl.",
(gptr*) &opt_use_ssl, (gptr*) &opt_use_ssl, 0, GET_BOOL, NO_ARG, 0, 0, 0,
0, 0, 0},
{"ssl-key", OPT_SSL_KEY, "X509 key in PEM format (implies --ssl).",
(gptr*) &opt_ssl_key, (gptr*) &opt_ssl_key, 0, GET_STR, REQUIRED_ARG,
0, 0, 0, 0, 0, 0},
{"ssl-cert", OPT_SSL_CERT, "X509 cert in PEM format (implies --ssl).",
(gptr*) &opt_ssl_cert, (gptr*) &opt_ssl_cert, 0, GET_STR, REQUIRED_ARG,
0, 0, 0, 0, 0, 0},
{"ssl-ca", OPT_SSL_CA,
"CA file in PEM format (check OpenSSL docs, implies --ssl).",
(gptr*) &opt_ssl_ca, (gptr*) &opt_ssl_ca, 0, GET_STR, REQUIRED_ARG,
@ -34,9 +28,15 @@
"CA directory (check OpenSSL docs, implies --ssl).",
(gptr*) &opt_ssl_capath, (gptr*) &opt_ssl_capath, 0, GET_STR, REQUIRED_ARG,
0, 0, 0, 0, 0, 0},
{"ssl-cert", OPT_SSL_CERT, "X509 cert in PEM format (implies --ssl).",
(gptr*) &opt_ssl_cert, (gptr*) &opt_ssl_cert, 0, GET_STR, REQUIRED_ARG,
0, 0, 0, 0, 0, 0},
{"ssl-cipher", OPT_SSL_CIPHER, "SSL cipher to use (implies --ssl).",
(gptr*) &opt_ssl_cipher, (gptr*) &opt_ssl_cipher, 0, GET_STR, REQUIRED_ARG,
0, 0, 0, 0, 0, 0},
{"ssl-key", OPT_SSL_KEY, "X509 key in PEM format (implies --ssl).",
(gptr*) &opt_ssl_key, (gptr*) &opt_ssl_key, 0, GET_STR, REQUIRED_ARG,
0, 0, 0, 0, 0, 0},
#ifdef MYSQL_CLIENT
{"ssl-verify-server-cert", OPT_SSL_VERIFY_SERVER_CERT,
"Verify server's \"Common Name\" in its cert against hostname used when connecting. This option is disabled by default.",

View file

@ -15,13 +15,18 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#ifdef HAVE_OPENSSL
static my_bool opt_use_ssl = 0;
static char *opt_ssl_key = 0;
static char *opt_ssl_cert = 0;
static char *opt_ssl_ca = 0;
static char *opt_ssl_capath = 0;
static char *opt_ssl_cipher = 0;
#ifdef SSL_VARS_NOT_STATIC
#define SSL_STATIC
#else
#define SSL_STATIC static
#endif
SSL_STATIC my_bool opt_use_ssl = 0;
SSL_STATIC char *opt_ssl_ca = 0;
SSL_STATIC char *opt_ssl_capath = 0;
SSL_STATIC char *opt_ssl_cert = 0;
SSL_STATIC char *opt_ssl_cipher = 0;
SSL_STATIC char *opt_ssl_key = 0;
#ifdef MYSQL_CLIENT
static my_bool opt_ssl_verify_server_cert= 0;
SSL_STATIC my_bool opt_ssl_verify_server_cert= 0;
#endif
#endif

View file

@ -88,11 +88,11 @@ INC_LIB= $(top_builddir)/regex/libregex.a \
$(top_builddir)/dbug/libdbug.a \
$(top_builddir)/vio/libvio.a \
@mysql_plugin_libs@ \
$(yassl_las)
$(yassl_inc_libs)
if HAVE_YASSL
yassl_las = $(top_srcdir)/extra/yassl/src/libyassl.la \
$(top_srcdir)/extra/yassl/taocrypt/src/libtaocrypt.la
yassl_inc_libs= $(top_srcdir)/extra/yassl/src/.libs/libyassl.a \
$(top_srcdir)/extra/yassl/taocrypt/src/.libs/libtaocrypt.a
endif
# Storage engine specific compilation options
@ -135,12 +135,12 @@ else
(for arc in ./libmysqld_int.a $(INC_LIB); do \
arpath=`echo $$arc|sed 's|[^/]*$$||'|sed 's|\.libs/$$||'`; \
artmp=`echo $$arc|sed 's|^.*/|tmp/lib-|'`; \
for F in `$(AR) t $$arc`; do \
for F in `$(AR) t $$arc | grep -v SYMDEF`; do \
if test -e "$$arpath/$$F" ; then echo "$$arpath/$$F"; else \
mkdir $$artmp; cd $$artmp > /dev/null; \
$(AR) x ../../$$arc; \
cd $$current_dir > /dev/null; \
ls $$artmp/*; \
ls $$artmp/* | grep -v SYMDEF; \
continue 2; fi; done; \
done; echo $(libmysqld_a_DEPENDENCIES) ) | sort -u | xargs $(AR) cq libmysqld.a ; \
$(RANLIB) libmysqld.a ; \

View file

@ -821,144 +821,6 @@ SELECT MAX(id) FROM t1 WHERE id < 3 AND a=2 AND b=6;
MAX(id)
NULL
DROP TABLE t1;
create table t1m (a int) engine=myisam;
create table t1i (a int) engine=innodb;
create table t2m (a int) engine=myisam;
create table t2i (a int) engine=innodb;
insert into t2m values (5);
insert into t2i values (5);
select min(a) from t1m;
min(a)
NULL
select min(7) from t1m;
min(7)
NULL
select min(7) from DUAL;
min(7)
NULL
explain select min(7) from t2m join t1m;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Select tables optimized away
select min(7) from t2m join t1m;
min(7)
NULL
select max(a) from t1m;
max(a)
NULL
select max(7) from t1m;
max(7)
NULL
select max(7) from DUAL;
max(7)
NULL
explain select max(7) from t2m join t1m;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Select tables optimized away
select max(7) from t2m join t1m;
max(7)
NULL
select 1, min(a) from t1m where a=99;
1 min(a)
1 NULL
select 1, min(a) from t1m where 1=99;
1 min(a)
1 NULL
select 1, min(1) from t1m where a=99;
1 min(1)
1 NULL
select 1, min(1) from t1m where 1=99;
1 min(1)
1 NULL
select 1, max(a) from t1m where a=99;
1 max(a)
1 NULL
select 1, max(a) from t1m where 1=99;
1 max(a)
1 NULL
select 1, max(1) from t1m where a=99;
1 max(1)
1 NULL
select 1, max(1) from t1m where 1=99;
1 max(1)
1 NULL
select min(a) from t1i;
min(a)
NULL
select min(7) from t1i;
min(7)
NULL
select min(7) from DUAL;
min(7)
NULL
explain select min(7) from t2i join t1i;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2i ALL NULL NULL NULL NULL 1
1 SIMPLE t1i ALL NULL NULL NULL NULL 1
select min(7) from t2i join t1i;
min(7)
NULL
select max(a) from t1i;
max(a)
NULL
select max(7) from t1i;
max(7)
NULL
select max(7) from DUAL;
max(7)
NULL
explain select max(7) from t2i join t1i;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2i ALL NULL NULL NULL NULL 1
1 SIMPLE t1i ALL NULL NULL NULL NULL 1
select max(7) from t2i join t1i;
max(7)
NULL
select 1, min(a) from t1i where a=99;
1 min(a)
1 NULL
select 1, min(a) from t1i where 1=99;
1 min(a)
1 NULL
select 1, min(1) from t1i where a=99;
1 min(1)
1 NULL
select 1, min(1) from t1i where 1=99;
1 min(1)
1 NULL
select 1, max(a) from t1i where a=99;
1 max(a)
1 NULL
select 1, max(a) from t1i where 1=99;
1 max(a)
1 NULL
select 1, max(1) from t1i where a=99;
1 max(1)
1 NULL
select 1, max(1) from t1i where 1=99;
1 max(1)
1 NULL
explain select count(*), min(7), max(7) from t1m, t1i;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1m system NULL NULL NULL NULL 0 const row not found
1 SIMPLE t1i ALL NULL NULL NULL NULL 1
select count(*), min(7), max(7) from t1m, t1i;
count(*) min(7) max(7)
0 NULL NULL
explain select count(*), min(7), max(7) from t1m, t2i;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1m system NULL NULL NULL NULL 0 const row not found
1 SIMPLE t2i ALL NULL NULL NULL NULL 1
select count(*), min(7), max(7) from t1m, t2i;
count(*) min(7) max(7)
0 NULL NULL
explain select count(*), min(7), max(7) from t2m, t1i;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2m system NULL NULL NULL NULL 1
1 SIMPLE t1i ALL NULL NULL NULL NULL 1
select count(*), min(7), max(7) from t2m, t1i;
count(*) min(7) max(7)
0 NULL NULL
drop table t1m, t1i, t2m, t2i;
create table t2 (ff double);
insert into t2 values (2.2);
select cast(sum(distinct ff) as decimal(5,2)) from t2;

View file

@ -1046,4 +1046,10 @@ cast(ltrim(' 20.06 ') as decimal(19,2))
select cast(rtrim(ltrim(' 20.06 ')) as decimal(19,2));
cast(rtrim(ltrim(' 20.06 ')) as decimal(19,2))
20.06
select conv("18383815659218730760",10,10) + 0;
conv("18383815659218730760",10,10) + 0
1.8383815659219e+19
select "18383815659218730760" + 0;
"18383815659218730760" + 0
1.8383815659219e+19
End of 5.0 tests

View file

@ -12,7 +12,7 @@ explain extended select count(a) as b from t1 where a=0 having b >=0;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
Warnings:
Note 1003 select count(`test`.`t1`.`a`) AS `b` from `test`.`t1` where 0 having (count(`test`.`t1`.`a`) >= 0)
Note 1003 select count(`test`.`t1`.`a`) AS `b` from `test`.`t1` where 0 having (`b` >= 0)
drop table t1;
CREATE TABLE t1 (
raw_id int(10) NOT NULL default '0',

View file

@ -54,3 +54,141 @@ c.c_id = 218 and expiredate is null;
slai_id
12
drop table t1, t2;
create table t1m (a int) engine=myisam;
create table t1i (a int) engine=innodb;
create table t2m (a int) engine=myisam;
create table t2i (a int) engine=innodb;
insert into t2m values (5);
insert into t2i values (5);
select min(a) from t1m;
min(a)
NULL
select min(7) from t1m;
min(7)
NULL
select min(7) from DUAL;
min(7)
NULL
explain select min(7) from t2m join t1m;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Select tables optimized away
select min(7) from t2m join t1m;
min(7)
NULL
select max(a) from t1m;
max(a)
NULL
select max(7) from t1m;
max(7)
NULL
select max(7) from DUAL;
max(7)
NULL
explain select max(7) from t2m join t1m;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Select tables optimized away
select max(7) from t2m join t1m;
max(7)
NULL
select 1, min(a) from t1m where a=99;
1 min(a)
1 NULL
select 1, min(a) from t1m where 1=99;
1 min(a)
1 NULL
select 1, min(1) from t1m where a=99;
1 min(1)
1 NULL
select 1, min(1) from t1m where 1=99;
1 min(1)
1 NULL
select 1, max(a) from t1m where a=99;
1 max(a)
1 NULL
select 1, max(a) from t1m where 1=99;
1 max(a)
1 NULL
select 1, max(1) from t1m where a=99;
1 max(1)
1 NULL
select 1, max(1) from t1m where 1=99;
1 max(1)
1 NULL
select min(a) from t1i;
min(a)
NULL
select min(7) from t1i;
min(7)
NULL
select min(7) from DUAL;
min(7)
NULL
explain select min(7) from t2i join t1i;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2i ALL NULL NULL NULL NULL 1
1 SIMPLE t1i ALL NULL NULL NULL NULL 1
select min(7) from t2i join t1i;
min(7)
NULL
select max(a) from t1i;
max(a)
NULL
select max(7) from t1i;
max(7)
NULL
select max(7) from DUAL;
max(7)
NULL
explain select max(7) from t2i join t1i;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2i ALL NULL NULL NULL NULL 1
1 SIMPLE t1i ALL NULL NULL NULL NULL 1
select max(7) from t2i join t1i;
max(7)
NULL
select 1, min(a) from t1i where a=99;
1 min(a)
1 NULL
select 1, min(a) from t1i where 1=99;
1 min(a)
1 NULL
select 1, min(1) from t1i where a=99;
1 min(1)
1 NULL
select 1, min(1) from t1i where 1=99;
1 min(1)
1 NULL
select 1, max(a) from t1i where a=99;
1 max(a)
1 NULL
select 1, max(a) from t1i where 1=99;
1 max(a)
1 NULL
select 1, max(1) from t1i where a=99;
1 max(1)
1 NULL
select 1, max(1) from t1i where 1=99;
1 max(1)
1 NULL
explain select count(*), min(7), max(7) from t1m, t1i;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1m system NULL NULL NULL NULL 0 const row not found
1 SIMPLE t1i ALL NULL NULL NULL NULL 1
select count(*), min(7), max(7) from t1m, t1i;
count(*) min(7) max(7)
0 NULL NULL
explain select count(*), min(7), max(7) from t1m, t2i;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1m system NULL NULL NULL NULL 0 const row not found
1 SIMPLE t2i ALL NULL NULL NULL NULL 1
select count(*), min(7), max(7) from t1m, t2i;
count(*) min(7) max(7)
0 NULL NULL
explain select count(*), min(7), max(7) from t2m, t1i;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2m system NULL NULL NULL NULL 1
1 SIMPLE t1i ALL NULL NULL NULL NULL 1
select count(*), min(7), max(7) from t2m, t1i;
count(*) min(7) max(7)
0 NULL NULL
drop table t1m, t1i, t2m, t2i;

View file

@ -1151,8 +1151,8 @@ EXPLAIN
SELECT COUNT(*) FROM t2 LEFT JOIN t1 ON t2.fkey = t1.id
WHERE t1.name LIKE 'A%' OR FALSE;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index PRIMARY,name name 23 NULL 3 Using where; Using index
1 SIMPLE t2 ref fkey fkey 5 test.t1.id 1 Using where; Using index
1 SIMPLE t2 index NULL fkey 5 NULL 5 Using index
1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.fkey 1 Using where
DROP TABLE t1,t2;
DROP VIEW IF EXISTS v1,v2;
DROP TABLE IF EXISTS t1,t2;
@ -1176,3 +1176,38 @@ a b
3 3
DROP VIEW v1,v2;
DROP TABLE t1,t2;
CREATE TABLE t1 (a int);
CREATE TABLE t2 (b int);
INSERT INTO t1 VALUES (1), (2), (3), (4);
INSERT INTO t2 VALUES (2), (3);
SELECT * FROM t1 LEFT JOIN t2 ON t1.a = t2.b WHERE (1=1);
a b
1 NULL
2 2
3 3
4 NULL
SELECT * FROM t1 LEFT JOIN t2 ON t1.a = t2.b WHERE (1 OR 1);
a b
1 NULL
2 2
3 3
4 NULL
SELECT * FROM t1 LEFT JOIN t2 ON t1.a = t2.b WHERE (0 OR 1);
a b
1 NULL
2 2
3 3
4 NULL
SELECT * FROM t1 LEFT JOIN t2 ON t1.a = t2.b WHERE (1=1 OR 2=2);
a b
1 NULL
2 2
3 3
4 NULL
SELECT * FROM t1 LEFT JOIN t2 ON t1.a = t2.b WHERE (1=1 OR 1=0);
a b
1 NULL
2 2
3 3
4 NULL
DROP TABLE t1,t2;

View file

@ -18,6 +18,7 @@ select * from t2;
ERROR 42S02: Table 'test.t2' doesn't exist
show tables like 't2';
Tables_in_test (t2)
reset master;
create table t2 (a int key) engine=ndbcluster;
insert into t2 values (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
select * from t2 order by a limit 3;
@ -30,10 +31,12 @@ a
1
2
3
reset master;
select * from t2;
ERROR 42S02: Table 'test.t2' doesn't exist
show tables like 't2';
Tables_in_test (t2)
reset master;
create table t2 (a int key) engine=ndbcluster;
insert into t2 values (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
select * from t2 order by a limit 3;
@ -46,4 +49,5 @@ a
1
2
3
reset master;
drop table t2;

View file

@ -500,3 +500,69 @@ select count(*) from t1;
count(*)
0
drop table t1;
create table t1 (
a varchar(40) not null,
b mediumint not null,
t text,
c varchar(2) not null,
d bigint not null,
primary key (a,b,c),
key (c,a),
unique key (d)
) engine=ndb;
insert into t1 (a,b,c,d,t) values ('a',1110,'a',1,@v1);
insert into t1 (a,b,c,d,t) values ('b',1110,'a',2,@v2);
insert into t1 (a,b,c,d,t) values ('a',1110,'b',3,@v3);
insert into t1 (a,b,c,d,t) values ('b',1110,'b',4,@v4);
select a,b,c,d,sha1(t) from t1 order by c,a;
a b c d sha1(t)
a 1110 a 1 558a30713786aa72f66abc1e6a521d55aacdeeb5
b 1110 a 2 b238654911689bfb626a3ef9dba4a1ca074e6a5e
a 1110 b 3 2b6515f29c20b8e9e17cc597527e516c0de8d612
b 1110 b 4 NULL
select a,b,c,d,sha1(t) from t1 where a='a' and b=1110 and c='a';
a b c d sha1(t)
a 1110 a 1 558a30713786aa72f66abc1e6a521d55aacdeeb5
select a,b,c,d,sha1(t) from t1 where a='a' and b=1110 and c='b';
a b c d sha1(t)
a 1110 b 3 2b6515f29c20b8e9e17cc597527e516c0de8d612
update t1 set t=@v4 where a='b' and b=1110 and c='a';
update t1 set t=@v2 where a='b' and b=1110 and c='b';
select a,b,c,d,sha1(t) from t1 order by c,a;
a b c d sha1(t)
a 1110 a 1 558a30713786aa72f66abc1e6a521d55aacdeeb5
b 1110 a 2 NULL
a 1110 b 3 2b6515f29c20b8e9e17cc597527e516c0de8d612
b 1110 b 4 b238654911689bfb626a3ef9dba4a1ca074e6a5e
update t1 set t=@v2 where d=2;
update t1 set t=@v4 where d=4;
select a,b,c,d,sha1(t) from t1 order by c,a;
a b c d sha1(t)
a 1110 a 1 558a30713786aa72f66abc1e6a521d55aacdeeb5
b 1110 a 2 b238654911689bfb626a3ef9dba4a1ca074e6a5e
a 1110 b 3 2b6515f29c20b8e9e17cc597527e516c0de8d612
b 1110 b 4 NULL
update t1 set t=@v4 where a='b' and c='a';
update t1 set t=@v2 where a='b' and c='b';
select a,b,c,d,sha1(t) from t1 order by c,a;
a b c d sha1(t)
a 1110 a 1 558a30713786aa72f66abc1e6a521d55aacdeeb5
b 1110 a 2 NULL
a 1110 b 3 2b6515f29c20b8e9e17cc597527e516c0de8d612
b 1110 b 4 b238654911689bfb626a3ef9dba4a1ca074e6a5e
update t1 set t=@v2 where b+d=1112;
update t1 set t=@v4 where b+d=1114;
select a,b,c,d,sha1(t) from t1 order by c,a;
a b c d sha1(t)
a 1110 a 1 558a30713786aa72f66abc1e6a521d55aacdeeb5
b 1110 a 2 b238654911689bfb626a3ef9dba4a1ca074e6a5e
a 1110 b 3 2b6515f29c20b8e9e17cc597527e516c0de8d612
b 1110 b 4 NULL
delete from t1 where a='a' and b=1110 and c='a';
delete from t1 where a='b' and c='a';
delete from t1 where d=3;
delete from t1 where b+d=1114;
select count(*) from t1;
count(*)
0
drop table t1;

View file

@ -3177,3 +3177,9 @@ ERROR 42S22: Unknown column 'no_such_column' in 'where clause'
SELECT * FROM t1 WHERE no_such_column = ANY (SELECT 1);
ERROR 42S22: Unknown column 'no_such_column' in 'IN/ALL/ANY subquery'
DROP TABLE t1;
create table t1 (i int, j bigint);
insert into t1 values (1, 2), (2, 2), (3, 2);
select * from (select min(i) from t1 where j=(select * from (select min(j) from t1) t2)) t3;
min(i)
1
drop table t1;

View file

@ -639,6 +639,35 @@ select @@version, @@version_comment, @@version_compile_machine,
@@version_compile_os;
@@version @@version_comment @@version_compile_machine @@version_compile_os
# # # #
select @@basedir, @@datadir, @@tmpdir;
@@basedir @@datadir @@tmpdir
# # #
show variables like 'basedir';
Variable_name Value
basedir #
show variables like 'datadir';
Variable_name Value
datadir #
show variables like 'tmpdir';
Variable_name Value
tmpdir #
select @@ssl_ca, @@ssl_capath, @@ssl_cert, @@ssl_cipher, @@ssl_key;
@@ssl_ca @@ssl_capath @@ssl_cert @@ssl_cipher @@ssl_key
# # # # #
show variables like 'ssl%';
Variable_name Value
ssl_ca #
ssl_capath #
ssl_cert #
ssl_cipher #
ssl_key #
select @@log_queries_not_using_indexes;
@@log_queries_not_using_indexes
0
show variables like 'log_queries_not_using_indexes';
Variable_name Value
log_queries_not_using_indexes OFF
End of 5.0 tests
set global binlog_cache_size =@my_binlog_cache_size;
set global connect_timeout =@my_connect_timeout;
set global delayed_insert_timeout =@my_delayed_insert_timeout;
@ -666,4 +695,3 @@ set global server_id =@my_server_id;
set global slow_launch_time =@my_slow_launch_time;
set global storage_engine =@my_storage_engine;
set global thread_cache_size =@my_thread_cache_size;
End of 5.0 tests

View file

@ -2660,3 +2660,37 @@ SELECT * FROM v1;
id t COUNT(*)
DROP VIEW v1;
DROP TABLE t1;
CREATE TABLE t1 (i INT, j BIGINT);
INSERT INTO t1 VALUES (1, 2), (2, 2), (3, 2);
CREATE VIEW v1 AS SELECT MIN(j) AS j FROM t1;
CREATE VIEW v2 AS SELECT MIN(i) FROM t1 WHERE j = ( SELECT * FROM v1 );
SELECT * FROM v2;
MIN(i)
1
DROP VIEW v2, v1;
DROP TABLE t1;
CREATE TABLE t1(
fName varchar(25) NOT NULL,
lName varchar(25) NOT NULL,
DOB date NOT NULL,
uID int unsigned NOT NULL AUTO_INCREMENT PRIMARY KEY);
INSERT INTO t1(fName, lName, DOB) VALUES
('Hank', 'Hill', '1964-09-29'),
('Tom', 'Adams', '1908-02-14'),
('Homer', 'Simpson', '1968-03-05');
CREATE VIEW v1 AS
SELECT (year(now())-year(DOB)) AS Age
FROM t1 HAVING Age < 75;
SHOW CREATE VIEW v1;
View Create View
v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache (year(now()) - year(`t1`.`DOB`)) AS `Age` from `t1` having (`Age` < 75)
SELECT (year(now())-year(DOB)) AS Age FROM t1 HAVING Age < 75;
Age
42
38
SELECT * FROM v1;
Age
42
38
DROP VIEW v1;
DROP TABLE t1;

View file

@ -16,7 +16,7 @@ events_scheduling : BUG#19170 2006-04-26 andrey Test case of 19170 fails
events_logs_tests : BUG#17619 2006-05-16 andrey Test case problems
ndb_autodiscover : BUG#18952 2006-02-16 jmiller Needs to be fixed w.r.t binlog
ndb_autodiscover2 : BUG#18952 2006-02-16 jmiller Needs to be fixed w.r.t binlog
ndb_binlog_discover : BUG#19395 2006-04-28 tomas/knielsen mysqld does not always detect cluster shutdown
#ndb_binlog_discover : BUG#19395 2006-04-28 tomas/knielsen mysqld does not always detect cluster shutdown
#ndb_cache2 : BUG#18597 2006-03-28 brian simultaneous drop table and ndb statistics update triggers node failure
#ndb_cache_multi2 : BUG#18597 2006-04-10 kent simultaneous drop table and ndb statistics update triggers node failure
ndb_load : BUG#17233 2006-05-04 tomas failed load data from infile causes mysqld dbug_assert, binlog not flushed

View file

@ -539,77 +539,6 @@ INSERT INTO t1 VALUES
SELECT MAX(id) FROM t1 WHERE id < 3 AND a=2 AND b=6;
DROP TABLE t1;
#
# Bug #12882 min/max inconsistent on empty table
#
--disable_warnings
create table t1m (a int) engine=myisam;
create table t1i (a int) engine=innodb;
create table t2m (a int) engine=myisam;
create table t2i (a int) engine=innodb;
--enable_warnings
insert into t2m values (5);
insert into t2i values (5);
# test with MyISAM
select min(a) from t1m;
select min(7) from t1m;
select min(7) from DUAL;
explain select min(7) from t2m join t1m;
select min(7) from t2m join t1m;
select max(a) from t1m;
select max(7) from t1m;
select max(7) from DUAL;
explain select max(7) from t2m join t1m;
select max(7) from t2m join t1m;
select 1, min(a) from t1m where a=99;
select 1, min(a) from t1m where 1=99;
select 1, min(1) from t1m where a=99;
select 1, min(1) from t1m where 1=99;
select 1, max(a) from t1m where a=99;
select 1, max(a) from t1m where 1=99;
select 1, max(1) from t1m where a=99;
select 1, max(1) from t1m where 1=99;
# test with InnoDB
select min(a) from t1i;
select min(7) from t1i;
select min(7) from DUAL;
explain select min(7) from t2i join t1i;
select min(7) from t2i join t1i;
select max(a) from t1i;
select max(7) from t1i;
select max(7) from DUAL;
explain select max(7) from t2i join t1i;
select max(7) from t2i join t1i;
select 1, min(a) from t1i where a=99;
select 1, min(a) from t1i where 1=99;
select 1, min(1) from t1i where a=99;
select 1, min(1) from t1i where 1=99;
select 1, max(a) from t1i where a=99;
select 1, max(a) from t1i where 1=99;
select 1, max(1) from t1i where a=99;
select 1, max(1) from t1i where 1=99;
# mixed MyISAM/InnoDB test
explain select count(*), min(7), max(7) from t1m, t1i;
select count(*), min(7), max(7) from t1m, t1i;
explain select count(*), min(7), max(7) from t1m, t2i;
select count(*), min(7), max(7) from t1m, t2i;
explain select count(*), min(7), max(7) from t2m, t1i;
select count(*), min(7), max(7) from t2m, t1i;
drop table t1m, t1i, t2m, t2i;
# End of 4.1 tests
#

View file

@ -698,4 +698,10 @@ select cast(rtrim(' 20.06 ') as decimal(19,2));
select cast(ltrim(' 20.06 ') as decimal(19,2));
select cast(rtrim(ltrim(' 20.06 ')) as decimal(19,2));
#
# Bug #13975: "same string" + 0 has 2 different results
#
select conv("18383815659218730760",10,10) + 0;
select "18383815659218730760" + 0;
--echo End of 5.0 tests

View file

@ -57,3 +57,75 @@ where
c.c_id = 218 and expiredate is null;
drop table t1, t2;
#
# Bug #12882 min/max inconsistent on empty table
#
--disable_warnings
create table t1m (a int) engine=myisam;
create table t1i (a int) engine=innodb;
create table t2m (a int) engine=myisam;
create table t2i (a int) engine=innodb;
--enable_warnings
insert into t2m values (5);
insert into t2i values (5);
# test with MyISAM
select min(a) from t1m;
select min(7) from t1m;
select min(7) from DUAL;
explain select min(7) from t2m join t1m;
select min(7) from t2m join t1m;
select max(a) from t1m;
select max(7) from t1m;
select max(7) from DUAL;
explain select max(7) from t2m join t1m;
select max(7) from t2m join t1m;
select 1, min(a) from t1m where a=99;
select 1, min(a) from t1m where 1=99;
select 1, min(1) from t1m where a=99;
select 1, min(1) from t1m where 1=99;
select 1, max(a) from t1m where a=99;
select 1, max(a) from t1m where 1=99;
select 1, max(1) from t1m where a=99;
select 1, max(1) from t1m where 1=99;
# test with InnoDB
select min(a) from t1i;
select min(7) from t1i;
select min(7) from DUAL;
explain select min(7) from t2i join t1i;
select min(7) from t2i join t1i;
select max(a) from t1i;
select max(7) from t1i;
select max(7) from DUAL;
explain select max(7) from t2i join t1i;
select max(7) from t2i join t1i;
select 1, min(a) from t1i where a=99;
select 1, min(a) from t1i where 1=99;
select 1, min(1) from t1i where a=99;
select 1, min(1) from t1i where 1=99;
select 1, max(a) from t1i where a=99;
select 1, max(a) from t1i where 1=99;
select 1, max(1) from t1i where a=99;
select 1, max(1) from t1i where 1=99;
# mixed MyISAM/InnoDB test
explain select count(*), min(7), max(7) from t1m, t1i;
select count(*), min(7), max(7) from t1m, t1i;
explain select count(*), min(7), max(7) from t1m, t2i;
select count(*), min(7), max(7) from t1m, t2i;
explain select count(*), min(7), max(7) from t2m, t1i;
select count(*), min(7), max(7) from t2m, t1i;
drop table t1m, t1i, t2m, t2i;

View file

@ -805,3 +805,21 @@ SELECT v1.a, v2. b
DROP VIEW v1,v2;
DROP TABLE t1,t2;
#
# Bug 19816: LEFT OUTER JOIN with constant ORed predicates in WHERE clause
#
CREATE TABLE t1 (a int);
CREATE TABLE t2 (b int);
INSERT INTO t1 VALUES (1), (2), (3), (4);
INSERT INTO t2 VALUES (2), (3);
SELECT * FROM t1 LEFT JOIN t2 ON t1.a = t2.b WHERE (1=1);
SELECT * FROM t1 LEFT JOIN t2 ON t1.a = t2.b WHERE (1 OR 1);
SELECT * FROM t1 LEFT JOIN t2 ON t1.a = t2.b WHERE (0 OR 1);
SELECT * FROM t1 LEFT JOIN t2 ON t1.a = t2.b WHERE (1=1 OR 2=2);
SELECT * FROM t1 LEFT JOIN t2 ON t1.a = t2.b WHERE (1=1 OR 1=0);
DROP TABLE t1,t2;

View file

@ -43,6 +43,7 @@ select * from t2 order by a limit 3;
--error ER_NO_SUCH_TABLE
select * from t2;
show tables like 't2';
reset master;
create table t2 (a int key) engine=ndbcluster;
insert into t2 values (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
select * from t2 order by a limit 3;
@ -50,6 +51,7 @@ select * from t2 order by a limit 3;
# server 1 should have a stale cache, and in this case wrong frm, transaction must be retried
--connection server1
select * from t2 order by a limit 3;
reset master;
--exec $NDB_MGM --no-defaults -e "all restart -i" >> $NDB_TOOLS_OUTPUT
--exec $NDB_TOOLS_DIR/ndb_waiter --no-defaults >> $NDB_TOOLS_OUTPUT
@ -60,6 +62,7 @@ select * from t2 order by a limit 3;
--error ER_NO_SUCH_TABLE
select * from t2;
show tables like 't2';
reset master;
create table t2 (a int key) engine=ndbcluster;
insert into t2 values (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
select * from t2 order by a limit 3;
@ -67,6 +70,7 @@ select * from t2 order by a limit 3;
# server 2 should have a stale cache, but with right frm, transaction need not be retried
--connection server2
select * from t2 order by a limit 3;
reset master;
drop table t2;
# End of 4.1 tests

View file

@ -428,4 +428,60 @@ truncate t1;
select count(*) from t1;
drop table t1;
# -- bug#19956 - var* key, complex key
create table t1 (
a varchar(40) not null,
b mediumint not null,
t text,
c varchar(2) not null,
d bigint not null,
primary key (a,b,c),
key (c,a),
unique key (d)
) engine=ndb;
--disable_query_log
set @s1 = 'rggurloniukyehuxdbfkkyzlceixzrehqhvxvxbpwizzvjzpucqmzrhzxzfau';
set @s2 = 'ykyymbzqgqlcjhlhmyqelfoaaohvtbekvifukdtnvcrrjveevfakxarxexomz';
set @s3 = 'dbnfqyzgtqxalcrwtfsqabknvtfcbpoonxsjiqvmhnfikxxhcgoexlkoezvah';
set @v1 = repeat(@s1,123);
set @v2 = repeat(@s2,234);
set @v3 = repeat(@s3,345);
set @v4 = NULL;
--enable_query_log
insert into t1 (a,b,c,d,t) values ('a',1110,'a',1,@v1);
insert into t1 (a,b,c,d,t) values ('b',1110,'a',2,@v2);
insert into t1 (a,b,c,d,t) values ('a',1110,'b',3,@v3);
insert into t1 (a,b,c,d,t) values ('b',1110,'b',4,@v4);
select a,b,c,d,sha1(t) from t1 order by c,a;
select a,b,c,d,sha1(t) from t1 where a='a' and b=1110 and c='a';
select a,b,c,d,sha1(t) from t1 where a='a' and b=1110 and c='b';
update t1 set t=@v4 where a='b' and b=1110 and c='a';
update t1 set t=@v2 where a='b' and b=1110 and c='b';
select a,b,c,d,sha1(t) from t1 order by c,a;
update t1 set t=@v2 where d=2;
update t1 set t=@v4 where d=4;
select a,b,c,d,sha1(t) from t1 order by c,a;
update t1 set t=@v4 where a='b' and c='a';
update t1 set t=@v2 where a='b' and c='b';
select a,b,c,d,sha1(t) from t1 order by c,a;
update t1 set t=@v2 where b+d=1112;
update t1 set t=@v4 where b+d=1114;
select a,b,c,d,sha1(t) from t1 order by c,a;
delete from t1 where a='a' and b=1110 and c='a';
delete from t1 where a='b' and c='a';
delete from t1 where d=3;
delete from t1 where b+d=1114;
select count(*) from t1;
drop table t1;
# End of 4.1 tests

View file

@ -194,7 +194,7 @@ drop table t1;
#
#14157: utf8 encoding in binlog without set character_set_client
#
--exec $MYSQL --character-sets-dir=../sql/share/charsets/ --default-character-set=koi8r test -e 'create table t1 (a int); set names koi8r; create temporary table `ÑÝÉË` (a int); insert into `ÑÝÉË` values (1); insert into t1 select * from `ÑÝÉË`'
--exec $MYSQL --character-sets-dir=../sql/share/charsets/ --default-character-set=latin1 test -e 'create table t1 (a int); set names latin1; create temporary table `äöüÄÖÜ` (a int); insert into `äöüÄÖÜ` values (1); insert into t1 select * from `äöüÄÖÜ`'
sync_slave_with_master;
#connection slave;

View file

@ -2100,3 +2100,12 @@ CREATE VIEW v2 AS SELECT * FROM t1 WHERE no_such_column = (SELECT 1);
SELECT * FROM t1 WHERE no_such_column = ANY (SELECT 1);
DROP TABLE t1;
#
# Bug#19077: A nested materialized derived table is used before being populated.
#
create table t1 (i int, j bigint);
insert into t1 values (1, 2), (2, 2), (3, 2);
select * from (select min(i) from t1 where j=(select * from (select min(j) from t1) t2)) t3;
drop table t1;

View file

@ -540,7 +540,42 @@ select @@version, @@version_comment, @@version_compile_machine,
@@version_compile_os;
#
# Bug #19263: variables.test doesn't clean up after itself (II/II -- restore)
# Bug #1039: make tmpdir and datadir available as @@variables (also included
# basedir)
#
# Don't actually output, since it depends on the system
--replace_column 1 # 2 # 3 #
select @@basedir, @@datadir, @@tmpdir;
--replace_column 2 #
show variables like 'basedir';
--replace_column 2 #
show variables like 'datadir';
--replace_column 2 #
show variables like 'tmpdir';
#
# Bug #19606: make ssl settings available via SHOW VARIABLES and @@variables
#
# Don't actually output, since it depends on the system
--replace_column 1 # 2 # 3 # 4 # 5 #
select @@ssl_ca, @@ssl_capath, @@ssl_cert, @@ssl_cipher, @@ssl_key;
--replace_column 2 #
show variables like 'ssl%';
#
# Bug #19616: make log_queries_not_using_indexes available in SHOW VARIABLES
# and as @@log_queries_not_using_indexes
#
select @@log_queries_not_using_indexes;
show variables like 'log_queries_not_using_indexes';
--echo End of 5.0 tests
# This is at the very after the versioned tests, since it involves doing
# cleanup
#
# Bug #19263: variables.test doesn't clean up after itself (II/II --
# restore)
#
set global binlog_cache_size =@my_binlog_cache_size;
set global connect_timeout =@my_connect_timeout;
@ -569,5 +604,3 @@ set global server_id =@my_server_id;
set global slow_launch_time =@my_slow_launch_time;
set global storage_engine =@my_storage_engine;
set global thread_cache_size =@my_thread_cache_size;
--echo End of 5.0 tests

View file

@ -2533,3 +2533,42 @@ SELECT * FROM v1;
DROP VIEW v1;
DROP TABLE t1;
#
# Bug#19077: A nested materialized view is used before being populated.
#
CREATE TABLE t1 (i INT, j BIGINT);
INSERT INTO t1 VALUES (1, 2), (2, 2), (3, 2);
CREATE VIEW v1 AS SELECT MIN(j) AS j FROM t1;
CREATE VIEW v2 AS SELECT MIN(i) FROM t1 WHERE j = ( SELECT * FROM v1 );
SELECT * FROM v2;
DROP VIEW v2, v1;
DROP TABLE t1;
#
# Bug #19573: VIEW with HAVING that refers an alias name
#
CREATE TABLE t1(
fName varchar(25) NOT NULL,
lName varchar(25) NOT NULL,
DOB date NOT NULL,
uID int unsigned NOT NULL AUTO_INCREMENT PRIMARY KEY);
INSERT INTO t1(fName, lName, DOB) VALUES
('Hank', 'Hill', '1964-09-29'),
('Tom', 'Adams', '1908-02-14'),
('Homer', 'Simpson', '1968-03-05');
CREATE VIEW v1 AS
SELECT (year(now())-year(DOB)) AS Age
FROM t1 HAVING Age < 75;
SHOW CREATE VIEW v1;
SELECT (year(now())-year(DOB)) AS Age FROM t1 HAVING Age < 75;
SELECT * FROM v1;
DROP VIEW v1;
DROP TABLE t1;

View file

@ -111,6 +111,26 @@
obj:/lib/ld-*.so
}
{
pthread strstr uninit
Memcheck:Cond
fun:strstr
obj:/lib/tls/libpthread.so.*
obj:/lib/tls/libpthread.so.*
fun:call_init
fun:_dl_init
obj:/lib/ld-*.so
}
{
strlen/_dl_init_paths/dl_main/_dl_sysdep_start(Cond)
Memcheck:Cond
fun:strlen
fun:_dl_init_paths
fun:dl_main
fun:_dl_sysdep_start
}
{
pthread errno
Memcheck:Leak

View file

@ -182,6 +182,8 @@ static const char * ndb_connected_host= 0;
static long ndb_connected_port= 0;
static long ndb_number_of_replicas= 0;
long ndb_number_of_storage_nodes= 0;
long ndb_number_of_ready_storage_nodes= 0;
long ndb_connect_count= 0;
static int update_status_variables(Ndb_cluster_connection *c)
{
@ -190,6 +192,8 @@ static int update_status_variables(Ndb_cluster_connection *c)
ndb_connected_host= c->get_connected_host();
ndb_number_of_replicas= 0;
ndb_number_of_storage_nodes= c->no_db_nodes();
ndb_number_of_ready_storage_nodes= c->get_no_ready();
ndb_connect_count= c->get_connect_count();
return 0;
}
@ -7128,10 +7132,6 @@ void ndbcluster_real_free_share(NDB_SHARE **share)
#ifndef DBUG_OFF
bzero((gptr)(*share)->table_share, sizeof(*(*share)->table_share));
bzero((gptr)(*share)->table, sizeof(*(*share)->table));
#endif
my_free((gptr) (*share)->table_share, MYF(0));
my_free((gptr) (*share)->table, MYF(0));
#ifndef DBUG_OFF
(*share)->table_share= 0;
(*share)->table= 0;
#endif
@ -9361,11 +9361,15 @@ ndbcluster_show_status(THD* thd, stat_print_fn *stat_print,
"cluster_node_id=%u, "
"connected_host=%s, "
"connected_port=%u, "
"number_of_storage_nodes=%u",
"number_of_storage_nodes=%u, "
"number_of_ready_storage_nodes=%u, "
"connect_count=%u",
ndb_cluster_node_id,
ndb_connected_host,
ndb_connected_port,
ndb_number_of_storage_nodes);
ndb_number_of_storage_nodes,
ndb_number_of_ready_storage_nodes,
ndb_connect_count);
if (stat_print(thd, ndbcluster_hton.name, strlen(ndbcluster_hton.name),
"connection", strlen("connection"),
buf, buflen))

View file

@ -113,6 +113,7 @@ typedef struct st_ndbcluster_share {
char *old_names; // for rename table
TABLE_SHARE *table_share;
TABLE *table;
byte *record[2]; // pointer to allocated records for receiving data
NdbValue *ndb_value[2];
MY_BITMAP *subscriber_bitmap;
#endif

View file

@ -25,6 +25,7 @@
#include "slave.h"
#include "ha_ndbcluster_binlog.h"
#include "NdbDictionary.hpp"
#include <util/NdbAutoPtr.hpp>
#ifdef ndb_dynamite
#undef assert
@ -265,7 +266,8 @@ ndbcluster_binlog_close_table(THD *thd, NDB_SHARE *share)
static int
ndbcluster_binlog_open_table(THD *thd, NDB_SHARE *share,
TABLE_SHARE *table_share, TABLE *table)
TABLE_SHARE *table_share, TABLE *table,
int reopen)
{
int error;
DBUG_ENTER("ndbcluster_binlog_open_table");
@ -278,27 +280,34 @@ ndbcluster_binlog_open_table(THD *thd, NDB_SHARE *share,
share->key, error);
DBUG_PRINT("error", ("open_table_def failed %d", error));
free_table_share(table_share);
my_free((gptr) table_share, MYF(0));
my_free((gptr) table, MYF(0));
DBUG_RETURN(error);
}
if ((error= open_table_from_share(thd, table_share, "", 0,
if ((error= open_table_from_share(thd, table_share, "", 0 /* fon't allocate buffers */,
(uint) READ_ALL, 0, table, FALSE)))
{
sql_print_error("Unable to open table for %s, error=%d(%d)",
share->key, error, my_errno);
DBUG_PRINT("error", ("open_table_from_share failed %d", error));
free_table_share(table_share);
my_free((gptr) table_share, MYF(0));
my_free((gptr) table, MYF(0));
DBUG_RETURN(error);
}
assign_new_table_id(table_share);
if (!table->record[1] || table->record[1] == table->record[0])
if (!reopen)
{
table->record[1]= alloc_root(&table->mem_root,
table->s->rec_buff_length);
// allocate memory on ndb share so it can be reused after online alter table
share->record[0]= (byte*) alloc_root(&share->mem_root, table->s->rec_buff_length);
share->record[1]= (byte*) alloc_root(&share->mem_root, table->s->rec_buff_length);
}
{
my_ptrdiff_t row_offset= share->record[0] - table->record[0];
Field **p_field;
for (p_field= table->field; *p_field; p_field++)
(*p_field)->move_field_offset(row_offset);
table->record[0]= share->record[0];
table->record[1]= share->record[1];
}
table->in_use= injector_thd;
table->s->db.str= share->db;
@ -366,10 +375,9 @@ void ndbcluster_binlog_init_share(NDB_SHARE *share, TABLE *_table)
while (1)
{
int error;
TABLE_SHARE *table_share=
(TABLE_SHARE *) my_malloc(sizeof(*table_share), MYF(MY_WME));
TABLE *table= (TABLE*) my_malloc(sizeof(*table), MYF(MY_WME));
if ((error= ndbcluster_binlog_open_table(thd, share, table_share, table)))
TABLE_SHARE *table_share= (TABLE_SHARE *) alloc_root(mem_root, sizeof(*table_share));
TABLE *table= (TABLE*) alloc_root(mem_root, sizeof(*table));
if ((error= ndbcluster_binlog_open_table(thd, share, table_share, table, 0)))
break;
/*
! do not touch the contents of the table
@ -1535,6 +1543,10 @@ ndb_handle_schema_change(THD *thd, Ndb *ndb, NdbEventOperation *pOp,
sql_print_information("NDB: Failed write frm for %s.%s, error %d",
dbname, tabname, error);
}
// copy names as memory will be freed
NdbAutoPtr<char> a1((char *)(dbname= strdup(dbname)));
NdbAutoPtr<char> a2((char *)(tabname= strdup(tabname)));
ndbcluster_binlog_close_table(thd, share);
TABLE_LIST table_list;
@ -1543,10 +1555,16 @@ ndb_handle_schema_change(THD *thd, Ndb *ndb, NdbEventOperation *pOp,
table_list.alias= table_list.table_name= (char *)tabname;
close_cached_tables(thd, 0, &table_list, TRUE);
if ((error= ndbcluster_binlog_open_table(thd, share,
table_share, table)))
if ((error= ndbcluster_binlog_open_table(thd, share,
table_share, table, 1)))
sql_print_information("NDB: Failed to re-open table %s.%s",
dbname, tabname);
table= share->table;
table_share= share->table_share;
dbname= table_share->db.str;
tabname= table_share->table_name.str;
pthread_mutex_unlock(&LOCK_open);
}
my_free((char*)data, MYF(MY_ALLOW_ZERO_PTR));
@ -1776,7 +1794,8 @@ ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb,
break;
case NDBEVENT::TE_CLUSTER_FAILURE:
if (ndb_extra_logging)
sql_print_information("NDB Binlog: cluster failure for %s.", schema_share->key);
sql_print_information("NDB Binlog: cluster failure for %s at epoch %u.",
schema_share->key, (unsigned) pOp->getGCI());
// fall through
case NDBEVENT::TE_DROP:
if (ndb_extra_logging &&
@ -1785,7 +1804,6 @@ ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb,
"read only on reconnect.");
free_share(&schema_share);
schema_share= 0;
ndb_binlog_tables_inited= FALSE;
close_cached_tables((THD*) 0, 0, (TABLE_LIST*) 0, FALSE);
// fall through
case NDBEVENT::TE_ALTER:
@ -2829,7 +2847,8 @@ ndb_binlog_thread_handle_non_data_event(THD *thd, Ndb *ndb,
{
case NDBEVENT::TE_CLUSTER_FAILURE:
if (ndb_extra_logging)
sql_print_information("NDB Binlog: cluster failure for %s.", share->key);
sql_print_information("NDB Binlog: cluster failure for %s at epoch %u.",
share->key, (unsigned) pOp->getGCI());
if (apply_status_share == share)
{
if (ndb_extra_logging &&
@ -2838,7 +2857,6 @@ ndb_binlog_thread_handle_non_data_event(THD *thd, Ndb *ndb,
"read only on reconnect.");
free_share(&apply_status_share);
apply_status_share= 0;
ndb_binlog_tables_inited= FALSE;
}
DBUG_PRINT("info", ("CLUSTER FAILURE EVENT: "
"%s received share: 0x%lx op: %lx share op: %lx "
@ -2854,7 +2872,6 @@ ndb_binlog_thread_handle_non_data_event(THD *thd, Ndb *ndb,
"read only on reconnect.");
free_share(&apply_status_share);
apply_status_share= 0;
ndb_binlog_tables_inited= FALSE;
}
/* ToDo: remove printout */
if (ndb_extra_logging)
@ -3267,46 +3284,43 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
pthread_mutex_unlock(&injector_mutex);
pthread_cond_signal(&injector_cond);
thd->proc_info= "Waiting for ndbcluster to start";
pthread_mutex_lock(&injector_mutex);
while (!schema_share ||
(ndb_binlog_running && !apply_status_share))
{
/* ndb not connected yet */
struct timespec abstime;
set_timespec(abstime, 1);
pthread_cond_timedwait(&injector_cond, &injector_mutex, &abstime);
if (abort_loop)
{
pthread_mutex_unlock(&injector_mutex);
goto err;
}
}
pthread_mutex_unlock(&injector_mutex);
restart:
/*
Main NDB Injector loop
*/
{
thd->proc_info= "Waiting for ndbcluster to start";
DBUG_ASSERT(ndbcluster_hton.slot != ~(uint)0);
if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb()))
{
sql_print_error("Could not allocate Thd_ndb object");
goto err;
}
set_thd_ndb(thd, thd_ndb);
thd_ndb->options|= TNO_NO_LOG_SCHEMA_OP;
thd->query_id= 0; // to keep valgrind quiet
{
static char db[]= "";
thd->db= db;
if (ndb_binlog_running)
open_binlog_index(thd, &binlog_tables, &binlog_index);
thd->db= db;
pthread_mutex_lock(&injector_mutex);
while (!schema_share ||
(ndb_binlog_running && !apply_status_share))
{
/* ndb not connected yet */
struct timespec abstime;
set_timespec(abstime, 1);
pthread_cond_timedwait(&injector_cond, &injector_mutex, &abstime);
if (abort_loop)
{
pthread_mutex_unlock(&injector_mutex);
goto err;
}
}
pthread_mutex_unlock(&injector_mutex);
if (thd_ndb == NULL)
{
DBUG_ASSERT(ndbcluster_hton.slot != ~(uint)0);
if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb()))
{
sql_print_error("Could not allocate Thd_ndb object");
goto err;
}
set_thd_ndb(thd, thd_ndb);
thd_ndb->options|= TNO_NO_LOG_SCHEMA_OP;
thd->query_id= 0; // to keep valgrind quiet
}
}
restart:
{
// wait for the first event
thd->proc_info= "Waiting for first event from ndbcluster";
@ -3321,6 +3335,9 @@ restart:
DBUG_PRINT("info", ("schema_res: %d schema_gci: %d", schema_res, schema_gci));
if (schema_res > 0)
{
i_ndb->pollEvents(0);
i_ndb->flushIncompleteEvents(schema_gci);
s_ndb->flushIncompleteEvents(schema_gci);
if (schema_gci < ndb_latest_handled_binlog_epoch)
{
sql_print_error("NDB Binlog: cluster has been restarted --initial or with older filesystem. "
@ -3334,7 +3351,13 @@ restart:
}
}
}
{
static char db[]= "";
thd->db= db;
if (ndb_binlog_running)
open_binlog_index(thd, &binlog_tables, &binlog_index);
thd->db= db;
}
do_ndbcluster_binlog_close_connection= BCCC_running;
for ( ; !((abort_loop || do_ndbcluster_binlog_close_connection) &&
ndb_latest_handled_binlog_epoch >= g_latest_trans_gci) &&
@ -3683,7 +3706,12 @@ restart:
ndb_latest_handled_binlog_epoch= ndb_latest_received_binlog_epoch;
}
if (do_ndbcluster_binlog_close_connection == BCCC_restart)
{
ndb_binlog_tables_inited= FALSE;
close_thread_tables(thd);
binlog_index= 0;
goto restart;
}
err:
DBUG_PRINT("info",("Shutting down cluster binlog thread"));
thd->proc_info= "Shutting down";

View file

@ -304,6 +304,7 @@ Item::Item():
marker= 0;
maybe_null=null_value=with_sum_func=unsigned_flag=0;
decimals= 0; max_length= 0;
with_subselect= 0;
/* Put item in free list so that we can free all items at end */
THD *thd= current_thd;
@ -4881,7 +4882,16 @@ void Item_ref::cleanup()
void Item_ref::print(String *str)
{
if (ref)
(*ref)->print(str);
{
if ((*ref)->type() != Item::CACHE_ITEM && ref_type() != VIEW_REF &&
name && alias_name_used)
{
THD *thd= current_thd;
append_identifier(thd, str, name, (uint) strlen(name));
}
else
(*ref)->print(str);
}
else
Item_ident::print(str);
}
@ -5413,7 +5423,7 @@ bool Item_trigger_field::eq(const Item *item, bool binary_cmp) const
}
void Item_trigger_field::set_required_privilege(const bool rw)
void Item_trigger_field::set_required_privilege(bool rw)
{
/*
Require SELECT and UPDATE privilege if this field will be read and

View file

@ -490,6 +490,9 @@ public:
my_bool is_autogenerated_name; /* indicate was name of this Item
autogenerated or set by user */
DTCollation collation;
my_bool with_subselect; /* If this item is a subselect or some
of its arguments is or contains a
subselect */
// alloc & destruct is done as start of select using sql_alloc
Item();
@ -2233,7 +2236,7 @@ public:
void cleanup();
private:
void set_required_privilege(const bool rw);
void set_required_privilege(bool rw);
bool set_value(THD *thd, sp_rcontext *ctx, Item **it);
public:

View file

@ -204,10 +204,28 @@ longlong Item_func_nop_all::val_int()
/*
Convert a constant expression or string to an integer.
This is done when comparing DATE's of different formats and
also when comparing bigint to strings (in which case the string
is converted once to a bigint).
Convert a constant item to an int and replace the original item
SYNOPSIS
convert_constant_item()
thd thread handle
field item will be converted using the type of this field
item [in/out] reference to the item to convert
DESCRIPTION
The function converts a constant expression or string to an integer.
On successful conversion the original item is substituted for the
result of the item evaluation.
This is done when comparing DATE/TIME of different formats and
also when comparing bigint to strings (in which case strings
are converted to bigints).
NOTES
This function is called only at prepare stage.
As all derived tables are filled only after all derived tables
are prepared we do not evaluate items with subselects here because
they can contain derived tables and thus we may attempt to use a
table that has not been populated yet.
RESULT VALUES
0 Can't convert item
@ -216,7 +234,7 @@ longlong Item_func_nop_all::val_int()
static bool convert_constant_item(THD *thd, Field *field, Item **item)
{
if ((*item)->const_item())
if (!(*item)->with_subselect && (*item)->const_item())
{
/* For comparison purposes allow invalid dates like 2000-01-32 */
ulong orig_sql_mode= field->table->in_use->variables.sql_mode;
@ -2570,7 +2588,9 @@ Item_cond::fix_fields(THD *thd, Item **ref)
(item= *li.ref())->check_cols(1))
return TRUE; /* purecov: inspected */
used_tables_cache|= item->used_tables();
if (!item->const_item())
if (item->const_item())
and_tables_cache= (table_map) 0;
else
{
tmp_table_map= item->not_null_tables();
not_null_tables_cache|= tmp_table_map;
@ -2578,6 +2598,7 @@ Item_cond::fix_fields(THD *thd, Item **ref)
const_item_cache= FALSE;
}
with_sum_func= with_sum_func || item->with_sum_func;
with_subselect|= item->with_subselect;
if (item->maybe_null)
maybe_null=1;
}

View file

@ -184,6 +184,7 @@ Item_func::fix_fields(THD *thd, Item **ref)
used_tables_cache|= item->used_tables();
not_null_tables_cache|= item->not_null_tables();
const_item_cache&= item->const_item();
with_subselect|= item->with_subselect;
}
}
fix_length_and_dec();

View file

@ -542,7 +542,7 @@ public:
void fix_length_and_dec()
{
collation.set(default_charset());
decimals=0; max_length=64;
max_length= 64;
}
};

View file

@ -39,6 +39,7 @@ Item_subselect::Item_subselect():
engine(0), old_engine(0), used_tables_cache(0), have_to_be_excluded(0),
const_item_cache(1), engine_changed(0), changed(0)
{
with_subselect= 1;
reset();
/*
item value is NULL if select_subselect not changed this value

View file

@ -1498,6 +1498,7 @@ extern my_bool locked_in_memory;
extern bool opt_using_transactions, mysqld_embedded;
extern bool using_update_log, opt_large_files, server_id_supplied;
extern bool opt_log, opt_update_log, opt_bin_log, opt_slow_log, opt_error_log;
extern my_bool opt_log_queries_not_using_indexes;
extern bool opt_disable_networking, opt_skip_show_db;
extern my_bool opt_character_set_client_handshake;
extern bool volatile abort_loop, shutdown_in_progress, grant_option;

View file

@ -318,7 +318,6 @@ static bool volatile ready_to_exit;
static my_bool opt_debugging= 0, opt_external_locking= 0, opt_console= 0;
static my_bool opt_bdb, opt_isam, opt_ndbcluster;
static my_bool opt_short_log_format= 0;
static my_bool opt_log_queries_not_using_indexes= 0;
static uint kill_cached_threads, wake_thread;
static ulong killed_threads, thread_created;
static ulong max_used_connections;
@ -344,6 +343,7 @@ static my_bool opt_sync_bdb_logs;
/* Global variables */
bool opt_log, opt_update_log, opt_bin_log, opt_slow_log;
my_bool opt_log_queries_not_using_indexes= 0;
bool opt_error_log= IF_WIN(1,0);
bool opt_disable_networking=0, opt_skip_show_db=0;
my_bool opt_character_set_client_handshake= 1;
@ -693,6 +693,7 @@ my_bool opt_enable_shared_memory;
HANDLE smem_event_connect_request= 0;
#endif
#define SSL_VARS_NOT_STATIC
#include "sslopt-vars.h"
#ifdef HAVE_OPENSSL
#include <openssl/crypto.h>
@ -1043,7 +1044,8 @@ static void __cdecl kill_server(int sig_ptr)
RETURN_FROM_KILL_SERVER;
kill_in_progress=TRUE;
abort_loop=1; // This should be set
my_sigset(sig,SIG_IGN);
if (sig != 0) // 0 is not a valid signal number
my_sigset(sig,SIG_IGN);
if (sig == MYSQL_KILL_SIGNAL || sig == 0)
sql_print_information(ER(ER_NORMAL_SHUTDOWN),my_progname);
else

View file

@ -52,6 +52,10 @@
#include <signal.h>
#include <errno.h>
#ifdef __NETWARE__
#include <sys/select.h>
#endif
#ifdef EMBEDDED_LIBRARY
#undef MYSQL_SERVER
#undef MYSQL_CLIENT

View file

@ -162,6 +162,7 @@ void fix_sql_mode_var(THD *thd, enum_var_type type);
static byte *get_error_count(THD *thd);
static byte *get_warning_count(THD *thd);
static byte *get_prepared_stmt_count(THD *thd);
static byte *get_tmpdir(THD *thd);
/*
Variable definition list
@ -184,6 +185,7 @@ sys_var_thd_ulong sys_auto_increment_offset("auto_increment_offset",
sys_var_bool_ptr sys_automatic_sp_privileges("automatic_sp_privileges",
&sp_automatic_privileges);
sys_var_const_str sys_basedir("basedir", mysql_home);
sys_var_long_ptr sys_binlog_cache_size("binlog_cache_size",
&binlog_cache_size);
sys_var_thd_binlog_format sys_binlog_format("binlog_format",
@ -209,6 +211,7 @@ sys_var_long_ptr sys_concurrent_insert("concurrent_insert",
&myisam_concurrent_insert);
sys_var_long_ptr sys_connect_timeout("connect_timeout",
&connect_timeout);
sys_var_const_str sys_datadir("datadir", mysql_real_data_home);
#ifndef DBUG_OFF
sys_var_thd_dbug sys_dbug("debug");
#endif
@ -262,6 +265,9 @@ sys_trust_routine_creators("log_bin_trust_routine_creators",
sys_var_bool_ptr
sys_trust_function_creators("log_bin_trust_function_creators",
&trust_function_creators);
sys_var_bool_ptr
sys_log_queries_not_using_indexes("log_queries_not_using_indexes",
&opt_log_queries_not_using_indexes);
sys_var_thd_ulong sys_log_warnings("log_warnings", &SV::log_warnings);
sys_var_thd_ulong sys_long_query_time("long_query_time",
&SV::long_query_time);
@ -389,6 +395,7 @@ sys_var_thd_ulong sys_query_alloc_block_size("query_alloc_block_size",
sys_var_thd_ulong sys_query_prealloc_size("query_prealloc_size",
&SV::query_prealloc_size,
0, fix_thd_mem_root);
sys_var_readonly sys_tmpdir("tmpdir", OPT_GLOBAL, SHOW_CHAR, get_tmpdir);
sys_var_thd_ulong sys_trans_alloc_block_size("transaction_alloc_block_size",
&SV::trans_alloc_block_size,
0, fix_trans_mem_root);
@ -425,6 +432,21 @@ sys_var_thd_ulong sys_sort_buffer("sort_buffer_size",
&SV::sortbuff_size);
sys_var_thd_sql_mode sys_sql_mode("sql_mode",
&SV::sql_mode);
#ifdef HAVE_OPENSSL
extern char *opt_ssl_ca, *opt_ssl_capath, *opt_ssl_cert, *opt_ssl_cipher,
*opt_ssl_key;
sys_var_const_str_ptr sys_ssl_ca("ssl_ca", &opt_ssl_ca);
sys_var_const_str_ptr sys_ssl_capath("ssl_capath", &opt_ssl_capath);
sys_var_const_str_ptr sys_ssl_cert("ssl_cert", &opt_ssl_cert);
sys_var_const_str_ptr sys_ssl_cipher("ssl_cipher", &opt_ssl_cipher);
sys_var_const_str_ptr sys_ssl_key("ssl_key", &opt_ssl_key);
#else
sys_var_const_str sys_ssl_ca("ssl_ca", NULL);
sys_var_const_str sys_ssl_capath("ssl_capath", NULL);
sys_var_const_str sys_ssl_cert("ssl_cert", NULL);
sys_var_const_str sys_ssl_cipher("ssl_cipher", NULL);
sys_var_const_str sys_ssl_key("ssl_key", NULL);
#endif
sys_var_thd_enum
sys_updatable_views_with_limit("updatable_views_with_limit",
&SV::updatable_views_with_limit,
@ -696,7 +718,6 @@ static int show_slave_skip_errors(THD *thd, SHOW_VAR *var, char *buff)
}
#endif /* HAVE_REPLICATION */
/*
Variables shown by SHOW variables in alphabetical order
*/
@ -706,7 +727,7 @@ SHOW_VAR init_vars[]= {
{"auto_increment_offset", (char*) &sys_auto_increment_offset, SHOW_SYS},
{sys_automatic_sp_privileges.name,(char*) &sys_automatic_sp_privileges, SHOW_SYS},
{"back_log", (char*) &back_log, SHOW_LONG},
{"basedir", mysql_home, SHOW_CHAR},
{sys_basedir.name, (char*) &sys_basedir, SHOW_SYS},
{"bdb_cache_parts", (char*) &berkeley_cache_parts, SHOW_LONG},
{"bdb_cache_size", (char*) &berkeley_cache_size, SHOW_LONGLONG},
{"bdb_home", (char*) &berkeley_home, SHOW_CHAR_PTR},
@ -733,7 +754,7 @@ SHOW_VAR init_vars[]= {
{sys_completion_type.name, (char*) &sys_completion_type, SHOW_SYS},
{sys_concurrent_insert.name,(char*) &sys_concurrent_insert, SHOW_SYS},
{sys_connect_timeout.name, (char*) &sys_connect_timeout, SHOW_SYS},
{"datadir", mysql_real_data_home, SHOW_CHAR},
{sys_datadir.name, (char*) &sys_datadir, SHOW_SYS},
{sys_date_format.name, (char*) &sys_date_format, SHOW_SYS},
{sys_datetime_format.name, (char*) &sys_datetime_format, SHOW_SYS},
#ifndef DBUG_OFF
@ -833,6 +854,8 @@ SHOW_VAR init_vars[]= {
{"log_bin", (char*) &opt_bin_log, SHOW_BOOL},
{sys_trust_function_creators.name,(char*) &sys_trust_function_creators, SHOW_SYS},
{"log_error", (char*) log_error_file, SHOW_CHAR},
{sys_log_queries_not_using_indexes.name,
(char*) &sys_log_queries_not_using_indexes, SHOW_SYS},
#ifdef HAVE_REPLICATION
{"log_slave_updates", (char*) &opt_log_slave_updates, SHOW_MY_BOOL},
#endif
@ -962,6 +985,11 @@ SHOW_VAR init_vars[]= {
{sys_sql_mode.name, (char*) &sys_sql_mode, SHOW_SYS},
{"sql_notes", (char*) &sys_sql_notes, SHOW_SYS},
{"sql_warnings", (char*) &sys_sql_warnings, SHOW_SYS},
{sys_ssl_ca.name, (char*) &sys_ssl_ca, SHOW_SYS},
{sys_ssl_capath.name, (char*) &sys_ssl_capath, SHOW_SYS},
{sys_ssl_cert.name, (char*) &sys_ssl_cert, SHOW_SYS},
{sys_ssl_cipher.name, (char*) &sys_ssl_cipher, SHOW_SYS},
{sys_ssl_key.name, (char*) &sys_ssl_key, SHOW_SYS},
{sys_storage_engine.name, (char*) &sys_storage_engine, SHOW_SYS},
#ifdef HAVE_REPLICATION
{sys_sync_binlog_period.name,(char*) &sys_sync_binlog_period, SHOW_SYS},
@ -983,7 +1011,7 @@ SHOW_VAR init_vars[]= {
{"time_zone", (char*) &sys_time_zone, SHOW_SYS},
{sys_timed_mutexes.name, (char*) &sys_timed_mutexes, SHOW_SYS},
{sys_tmp_table_size.name, (char*) &sys_tmp_table_size, SHOW_SYS},
{"tmpdir", (char*) &opt_mysql_tmpdir, SHOW_CHAR_PTR},
{sys_tmpdir.name, (char*) &sys_tmpdir, SHOW_SYS},
{sys_trans_alloc_block_size.name, (char*) &sys_trans_alloc_block_size,
SHOW_SYS},
{sys_trans_prealloc_size.name, (char*) &sys_trans_prealloc_size, SHOW_SYS},
@ -2855,6 +2883,31 @@ static byte *get_prepared_stmt_count(THD *thd)
return (byte*) &thd->sys_var_tmp.ulong_value;
}
/*
Get the tmpdir that was specified or chosen by default
SYNOPSIS
get_tmpdir()
thd thread handle
DESCRIPTION
This is necessary because if the user does not specify a temporary
directory via the command line, one is chosen based on the environment
or system defaults. But we can't just always use mysql_tmpdir, because
that is actually a call to my_tmpdir() which cycles among possible
temporary directories.
RETURN VALUES
ptr pointer to NUL-terminated string
*/
static byte *get_tmpdir(THD *thd)
{
if (opt_mysql_tmpdir)
return (byte *)opt_mysql_tmpdir;
return (byte*)mysql_tmpdir;
}
/****************************************************************************
Main handling of variables:
- Initialisation

View file

@ -231,6 +231,35 @@ public:
};
class sys_var_const_str_ptr :public sys_var
{
public:
char **value; // Pointer to const value
sys_var_const_str_ptr(const char *name_arg, char **value_arg)
:sys_var(name_arg),value(value_arg)
{}
bool check(THD *thd, set_var *var)
{
return 1;
}
bool update(THD *thd, set_var *var)
{
return 1;
}
SHOW_TYPE type() { return SHOW_CHAR; }
byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base)
{
return (byte*) *value;
}
bool check_update_type(Item_result type)
{
return 1;
}
bool check_default(enum_var_type type) { return 1; }
bool is_readonly() const { return 1; }
};
class sys_var_enum :public sys_var
{
uint *value;

View file

@ -6154,20 +6154,21 @@ void fill_effective_table_privileges(THD *thd, GRANT_INFO *grant,
}
/* table privileges */
rw_rdlock(&LOCK_grant);
if (grant->version != grant_version)
{
rw_rdlock(&LOCK_grant);
grant->grant_table=
table_hash_search(sctx->host, sctx->ip, db,
sctx->priv_user,
table, 0); /* purecov: inspected */
grant->version= grant_version; /* purecov: inspected */
rw_unlock(&LOCK_grant);
}
if (grant->grant_table != 0)
{
grant->privilege|= grant->grant_table->privs;
}
rw_unlock(&LOCK_grant);
DBUG_PRINT("info", ("privilege 0x%lx", grant->privilege));
DBUG_VOID_RETURN;
}

View file

@ -146,6 +146,11 @@ static byte* tina_get_key(TINA_SHARE *share,uint *length,
int get_mmap(TINA_SHARE *share, int write)
{
DBUG_ENTER("ha_tina::get_mmap");
#ifdef __NETWARE__
my_message(errno, "Sorry, no mmap() on Netware", 0);
DBUG_ASSERT(0);
DBUG_RETURN(1);
#else
if (share->mapped_file && my_munmap(share->mapped_file,
share->file_stat.st_size))
DBUG_RETURN(1);
@ -180,6 +185,7 @@ int get_mmap(TINA_SHARE *share, int write)
share->mapped_file= NULL;
DBUG_RETURN(0);
#endif /* __NETWARE__ */
}

View file

@ -1262,6 +1262,7 @@ public:
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
int flushIncompleteEvents(Uint64 gci);
NdbEventOperation *getEventOperation(NdbEventOperation* eventOp= 0);
Uint64 getLatestGCI();
void forceGCP();

View file

@ -1132,7 +1132,8 @@ public:
_TE_NODE_FAILURE=10,
_TE_SUBSCRIBE=11,
_TE_UNSUBSCRIBE=12,
_TE_NUL=13 // internal (e.g. INS o DEL within same GCI)
_TE_NUL=13, // internal (e.g. INS o DEL within same GCI)
_TE_ACTIVE=14 // internal (node becomes active)
};
#endif
/**

View file

@ -106,7 +106,8 @@ public:
void stopSessions(bool wait = false);
void foreachSession(void (*f)(Session*, void*), void *data);
void checkSessions();
private:
struct SessionInstance {
Service * m_service;
@ -117,12 +118,13 @@ private:
Service * m_service;
NDB_SOCKET_TYPE m_socket;
};
MutexVector<SessionInstance> m_sessions;
NdbLockable m_session_mutex;
Vector<SessionInstance> m_sessions;
MutexVector<ServiceInstance> m_services;
unsigned m_maxSessions;
void doAccept();
void checkSessions();
void checkSessionsImpl();
void startSession(SessionInstance &);
/**

View file

@ -184,9 +184,12 @@ SocketServer::doAccept(){
SessionInstance s;
s.m_service = si.m_service;
s.m_session = si.m_service->newSession(childSock);
if(s.m_session != 0){
if(s.m_session != 0)
{
m_session_mutex.lock();
m_sessions.push_back(s);
startSession(m_sessions.back());
m_session_mutex.unlock();
}
continue;
@ -240,10 +243,13 @@ void
SocketServer::doRun(){
while(!m_stopThread){
checkSessions();
m_session_mutex.lock();
checkSessionsImpl();
if(m_sessions.size() < m_maxSessions){
m_session_mutex.unlock();
doAccept();
} else {
m_session_mutex.unlock();
NdbSleep_MilliSleep(200);
}
}
@ -276,17 +282,30 @@ transfer(NDB_SOCKET_TYPE sock){
void
SocketServer::foreachSession(void (*func)(SocketServer::Session*, void *), void *data)
{
m_session_mutex.lock();
for(int i = m_sessions.size() - 1; i >= 0; i--){
(*func)(m_sessions[i].m_session, data);
}
checkSessions();
m_session_mutex.unlock();
}
void
SocketServer::checkSessions(){
for(int i = m_sessions.size() - 1; i >= 0; i--){
if(m_sessions[i].m_session->m_stopped){
if(m_sessions[i].m_thread != 0){
SocketServer::checkSessions()
{
m_session_mutex.lock();
checkSessionsImpl();
m_session_mutex.unlock();
}
void
SocketServer::checkSessionsImpl()
{
for(int i = m_sessions.size() - 1; i >= 0; i--)
{
if(m_sessions[i].m_session->m_stopped)
{
if(m_sessions[i].m_thread != 0)
{
void* ret;
NdbThread_WaitFor(m_sessions[i].m_thread, &ret);
NdbThread_Destroy(&m_sessions[i].m_thread);
@ -301,19 +320,26 @@ SocketServer::checkSessions(){
void
SocketServer::stopSessions(bool wait){
int i;
m_session_mutex.lock();
for(i = m_sessions.size() - 1; i>=0; i--)
{
m_sessions[i].m_session->stopSession();
m_sessions[i].m_session->m_stop = true; // to make sure
}
m_session_mutex.unlock();
for(i = m_services.size() - 1; i>=0; i--)
m_services[i].m_service->stopSessions();
if(wait){
m_session_mutex.lock();
while(m_sessions.size() > 0){
checkSessions();
checkSessionsImpl();
m_session_mutex.unlock();
NdbSleep_MilliSleep(100);
m_session_mutex.lock();
}
m_session_mutex.unlock();
}
}
@ -348,4 +374,4 @@ sessionThread_C(void* _sc){
}
template class MutexVector<SocketServer::ServiceInstance>;
template class MutexVector<SocketServer::SessionInstance>;
template class Vector<SocketServer::SessionInstance>;

View file

@ -48,58 +48,66 @@ read_socket(NDB_SOCKET_TYPE socket, int timeout_millis,
extern "C"
int
readln_socket(NDB_SOCKET_TYPE socket, int timeout_millis,
readln_socket(NDB_SOCKET_TYPE socket, int timeout_millis,
char * buf, int buflen){
if(buflen <= 1)
return 0;
int sock_flags= fcntl(socket, F_GETFL);
if(fcntl(socket, F_SETFL, sock_flags | O_NONBLOCK) == -1)
return -1;
fd_set readset;
FD_ZERO(&readset);
FD_SET(socket, &readset);
struct timeval timeout;
timeout.tv_sec = (timeout_millis / 1000);
timeout.tv_usec = (timeout_millis % 1000) * 1000;
const int selectRes = select(socket + 1, &readset, 0, 0, &timeout);
if(selectRes == 0)
if(selectRes == 0){
return 0;
}
if(selectRes == -1){
fcntl(socket, F_SETFL, sock_flags);
return -1;
}
int pos = 0; buf[pos] = 0;
while(true){
const int t = recv(socket, &buf[pos], 1, 0);
if(t != 1){
return -1;
}
if(buf[pos] == '\n'){
buf[pos] = 0;
if(pos > 0 && buf[pos-1] == '\r'){
pos--;
buf[pos] = 0;
buf[0] = 0;
const int t = recv(socket, buf, buflen, MSG_PEEK);
if(t < 1)
{
fcntl(socket, F_SETFL, sock_flags);
return -1;
}
for(int i=0; i< t;i++)
{
if(buf[i] == '\n'){
recv(socket, buf, i+1, 0);
buf[i] = 0;
if(i > 0 && buf[i-1] == '\r'){
i--;
buf[i] = 0;
}
return pos;
}
pos++;
if(pos == (buflen - 1)){
buf[pos] = 0;
return buflen;
}
FD_ZERO(&readset);
FD_SET(socket, &readset);
timeout.tv_sec = (timeout_millis / 1000);
timeout.tv_usec = (timeout_millis % 1000) * 1000;
const int selectRes = select(socket + 1, &readset, 0, 0, &timeout);
if(selectRes != 1){
return -1;
fcntl(socket, F_SETFL, sock_flags);
return t;
}
}
if(t == (buflen - 1)){
recv(socket, buf, t, 0);
buf[t] = 0;
fcntl(socket, F_SETFL, sock_flags);
return buflen;
}
return 0;
}
extern "C"

View file

@ -1926,6 +1926,7 @@ int Dbtup::interpreterNextLab(Signal* signal,
// word read. Thus we set the register to be a 32 bit register.
/* ------------------------------------------------------------- */
TregMemBuffer[theRegister]= 0x50;
// arithmetic conversion if big-endian
* (Int64*)(TregMemBuffer+theRegister+2)=
TregMemBuffer[theRegister+1];
} else if (TnoDataRW == 3) {
@ -1985,6 +1986,11 @@ int Dbtup::interpreterNextLab(Signal* signal,
Tlen= TattrNoOfWords + 1;
if (Toptype == ZUPDATE) {
if (TattrNoOfWords <= 2) {
if (TattrNoOfWords == 1) {
// arithmetic conversion if big-endian
TdataForUpdate[1] = *(Int64*)&TregMemBuffer[theRegister + 2];
TdataForUpdate[2] = 0;
}
if (TregType == 0) {
/* --------------------------------------------------------- */
// Write a NULL value into the attribute

View file

@ -2649,6 +2649,22 @@ Suma::reportAllSubscribers(Signal *signal,
SubscriptionPtr subPtr,
SubscriberPtr subbPtr)
{
SubTableData * data = (SubTableData*)signal->getDataPtrSend();
if (table_event == NdbDictionary::Event::_TE_SUBSCRIBE)
{
data->gci = m_last_complete_gci + 1;
data->tableId = subPtr.p->m_tableId;
data->operation = NdbDictionary::Event::_TE_ACTIVE;
data->ndbd_nodeid = refToNode(reference());
data->changeMask = 0;
data->totalLen = 0;
data->req_nodeid = refToNode(subbPtr.p->m_senderRef);
data->senderData = subbPtr.p->m_senderData;
sendSignal(subbPtr.p->m_senderRef, GSN_SUB_TABLE_DATA, signal,
SubTableData::SignalLength, JBB);
}
if (!(subPtr.p->m_options & Subscription::REPORT_SUBSCRIBE))
{
return;
@ -2663,7 +2679,6 @@ Suma::reportAllSubscribers(Signal *signal,
ndbout_c("reportAllSubscribers subPtr.i: %d subPtr.p->n_subscribers: %d",
subPtr.i, subPtr.p->n_subscribers);
//#endif
SubTableData * data = (SubTableData*)signal->getDataPtrSend();
data->gci = m_last_complete_gci + 1;
data->tableId = subPtr.p->m_tableId;
data->operation = table_event;

View file

@ -502,6 +502,7 @@ MgmApiSession::get_nodeid(Parser_t::Context &,
ps.tick= tick;
m_mgmsrv.get_socket_server()->
foreachSession(stop_session_if_timed_out,&ps);
m_mgmsrv.get_socket_server()->checkSessions();
error_string = "";
continue;
}
@ -1559,6 +1560,7 @@ MgmApiSession::purge_stale_sessions(Parser_t::Context &ctx,
ps.free_nodes.bitXORC(NodeBitmask()); // invert connected_nodes to get free nodes
m_mgmsrv.get_socket_server()->foreachSession(stop_session_if_not_connected,&ps);
m_mgmsrv.get_socket_server()->checkSessions();
m_output->println("purge stale sessions reply");
if (str.length() > 0)

View file

@ -1324,6 +1324,12 @@ Ndb::pollEvents(int aMillisecondNumber, Uint64 *latestGCI)
return theEventBuffer->pollEvents(aMillisecondNumber, latestGCI);
}
int
Ndb::flushIncompleteEvents(Uint64 gci)
{
return theEventBuffer->flushIncompleteEvents(gci);
}
NdbEventOperation *Ndb::nextEvent()
{
return theEventBuffer->nextEvent();

View file

@ -3002,63 +3002,6 @@ NdbDictionaryImpl::removeCachedObject(NdbTableImpl & impl)
DBUG_RETURN(0);
}
/*****************************************************************
* Get index info
*/
NdbIndexImpl*
NdbDictionaryImpl::getIndexImpl(const char * externalName,
const BaseString& internalName)
{
ASSERT_NOT_MYSQLD;
Ndb_local_table_info * info = get_local_table_info(internalName);
if(info == 0){
m_error.code = 4243;
return 0;
}
NdbTableImpl * tab = info->m_table_impl;
if(tab->m_indexType == NdbDictionary::Object::TypeUndefined)
{
// Not an index
m_error.code = 4243;
return 0;
}
NdbTableImpl* prim = getTable(tab->m_primaryTable.c_str());
if(prim == 0){
m_error.code = 4243;
return 0;
}
return getIndexImpl(externalName, internalName, *tab, *prim);
}
NdbIndexImpl*
NdbDictionaryImpl::getIndexImpl(const char * externalName,
const BaseString& internalName,
NdbTableImpl &tab,
NdbTableImpl &prim)
{
DBUG_ENTER("NdbDictionaryImpl::getIndexImpl");
DBUG_ASSERT(tab.m_indexType != NdbDictionary::Object::TypeUndefined);
/**
* Create index impl
*/
NdbIndexImpl* idx;
if(NdbDictInterface::create_index_obj_from_table(&idx, &tab, &prim) == 0){
idx->m_table = &tab;
idx->m_externalName.assign(externalName);
idx->m_internalName.assign(internalName);
idx->m_table_id = prim.getObjectId();
idx->m_table_version = prim.getObjectVersion();
// TODO Assign idx to tab->m_index
// Don't do it right now since assign can't asign a table with index
// tab->m_index = idx;
DBUG_RETURN(idx);
}
DBUG_RETURN(0);
}
int
NdbDictInterface::create_index_obj_from_table(NdbIndexImpl** dst,
NdbTableImpl* tab,
@ -3116,6 +3059,9 @@ NdbDictInterface::create_index_obj_from_table(NdbIndexImpl** dst,
tab->m_columns[i]->m_distributionKey = 0;
}
idx->m_table_id = prim->getObjectId();
idx->m_table_version = prim->getObjectVersion();
* dst = idx;
DBUG_PRINT("exit", ("m_id: %d m_version: %d", idx->m_id, idx->m_version));
DBUG_RETURN(0);

View file

@ -617,6 +617,7 @@ public:
get_local_table_info(const BaseString& internalTableName);
NdbIndexImpl * getIndex(const char * indexName,
const char * tableName);
NdbIndexImpl * getIndex(const char * indexName, const NdbTableImpl& prim);
NdbEventImpl * getEvent(const char * eventName, NdbTableImpl* = NULL);
NdbEventImpl * getBlobEvent(const NdbEventImpl& ev, uint col_no);
NdbEventImpl * getEventImpl(const char * internalName);
@ -958,51 +959,36 @@ NdbDictionaryImpl::get_local_table_info(const BaseString& internalTableName)
DBUG_RETURN(info); // autoincrement already initialized
}
class InitIndexGlobal : public GlobalCacheInitObject
{
public:
const char *m_index_name;
NdbTableImpl &m_prim;
InitIndexGlobal(NdbDictionaryImpl *dict,
const BaseString &internal_indexname,
const char *index_name,
NdbTableImpl &prim) :
GlobalCacheInitObject(dict, internal_indexname),
m_index_name(index_name),
m_prim(prim)
{}
int init(NdbTableImpl &tab) const
{
tab.m_index= m_dict->getIndexImpl(m_index_name, m_name, tab, m_prim);
if (tab.m_index == 0)
return 1;
tab.m_index->m_table= &tab;
return 0;
}
};
class InitIndex : public GlobalCacheInitObject
{
public:
const char *m_index_name;
const NdbTableImpl &m_prim;
InitIndex(NdbDictionaryImpl *dict,
const BaseString &internal_indexname,
const char *index_name) :
GlobalCacheInitObject(dict, internal_indexname),
m_index_name(index_name)
{}
int init(NdbTableImpl &tab) const
{
DBUG_ASSERT(tab.m_index == 0);
tab.m_index= m_dict->getIndexImpl(m_index_name, m_name);
if (tab.m_index)
InitIndex(const BaseString &internal_indexname,
const char *index_name,
const NdbTableImpl &prim) :
GlobalCacheInitObject(0, internal_indexname),
m_index_name(index_name),
m_prim(prim)
{}
int init(NdbTableImpl &tab) const {
DBUG_ENTER("InitIndex::init");
DBUG_ASSERT(tab.m_indexType != NdbDictionary::Object::TypeUndefined);
/**
* Create index impl
*/
NdbIndexImpl* idx;
if(NdbDictInterface::create_index_obj_from_table(&idx, &tab, &m_prim) == 0)
{
tab.m_index->m_table= &tab;
return 0;
idx->m_table = &tab;
idx->m_externalName.assign(m_index_name);
idx->m_internalName.assign(m_name);
tab.m_index = idx;
DBUG_RETURN(0);
}
return 1;
DBUG_RETURN(1);
}
};
@ -1019,14 +1005,14 @@ NdbDictionaryImpl::getIndexGlobal(const char * index_name,
while (retry)
{
NdbTableImpl *tab=
fetchGlobalTableImplRef(InitIndexGlobal(this, internal_indexname,
index_name, ndbtab));
fetchGlobalTableImplRef(InitIndex(internal_indexname,
index_name, ndbtab));
if (tab)
{
// tab->m_index sould be set. otherwise tab == 0
NdbIndexImpl *idx= tab->m_index;
if (idx->m_table_id != ndbtab.getObjectId() ||
idx->m_table_version != ndbtab.getObjectVersion())
if (idx->m_table_id != (unsigned)ndbtab.getObjectId() ||
idx->m_table_version != (unsigned)ndbtab.getObjectVersion())
{
releaseIndexGlobal(*idx, 1);
retry--;
@ -1067,41 +1053,54 @@ NdbIndexImpl *
NdbDictionaryImpl::getIndex(const char * index_name,
const char * table_name)
{
while (table_name || m_ndb.usingFullyQualifiedNames())
if (table_name == 0)
{
const BaseString internal_indexname(
(table_name)
?
m_ndb.internalize_index_name(getTable(table_name), index_name)
:
m_ndb.internalize_table_name(index_name)); // Index is also a table
if (internal_indexname.length())
{
Ndb_local_table_info *info= m_localHash.get(internal_indexname.c_str());
NdbTableImpl *tab;
if (info == 0)
{
tab= fetchGlobalTableImplRef(InitIndex(this, internal_indexname,
index_name));
if (tab)
{
info= Ndb_local_table_info::create(tab, 0);
if (info)
m_localHash.put(internal_indexname.c_str(), info);
else
break;
}
else
break;
}
else
tab= info->m_table_impl;
return tab->m_index;
}
break;
assert(0);
m_error.code= 4243;
return 0;
}
NdbTableImpl* prim = getTable(table_name);
if (prim == 0)
{
m_error.code= 4243;
return 0;
}
return getIndex(index_name, *prim);
}
inline
NdbIndexImpl *
NdbDictionaryImpl::getIndex(const char* index_name,
const NdbTableImpl& prim)
{
const BaseString
internal_indexname(m_ndb.internalize_index_name(&prim, index_name));
Ndb_local_table_info *info= m_localHash.get(internal_indexname.c_str());
NdbTableImpl *tab;
if (info == 0)
{
tab= fetchGlobalTableImplRef(InitIndex(internal_indexname,
index_name,
prim));
if (!tab)
goto err;
info= Ndb_local_table_info::create(tab, 0);
if (!info)
goto err;
m_localHash.put(internal_indexname.c_str(), info);
}
else
tab= info->m_table_impl;
return tab->m_index;
err:
m_error.code= 4243;
return 0;
}

View file

@ -153,11 +153,14 @@ NdbEventOperationImpl::init(NdbEventImpl& evnt)
m_state= EO_CREATED;
m_node_bit_mask.clear();
#ifdef ndb_event_stores_merge_events_flag
m_mergeEvents = m_eventImpl->m_mergeEvents;
#else
m_mergeEvents = false;
m_mergeEvents = false;
#endif
m_ref_count = 0;
DBUG_PRINT("info", ("m_ref_count = 0 for op: %p", this));
m_has_error= 0;
@ -530,7 +533,11 @@ NdbEventOperationImpl::execute_nolock()
}
}
if (r == 0)
{
m_ref_count++;
DBUG_PRINT("info", ("m_ref_count: %u for op: %p", m_ref_count, this));
DBUG_RETURN(0);
}
}
//Error
m_state= EO_ERROR;
@ -657,80 +664,79 @@ NdbEventOperationImpl::execSUB_TABLE_DATA(NdbApiSignal * signal,
int
NdbEventOperationImpl::receive_event()
{
DBUG_ENTER_EVENT("NdbEventOperationImpl::receive_event");
Uint32 operation= (Uint32)m_data_item->sdata->operation;
DBUG_PRINT_EVENT("info",("sdata->operation %u",operation));
if (operation == NdbDictionary::Event::_TE_ALTER)
{
// Parse the new table definition and
// create a table object
NdbDictionary::Dictionary *myDict = m_ndb->getDictionary();
NdbDictionaryImpl *dict = & NdbDictionaryImpl::getImpl(*myDict);
NdbError error;
NdbDictInterface dif(error);
NdbTableImpl *at;
m_change_mask = m_data_item->sdata->changeMask;
error.code = dif.parseTableInfo(&at,
(Uint32*)m_buffer.get_data(),
m_buffer.length() / 4,
true);
m_buffer.clear();
if (at)
at->buildColumnHash();
else
{
DBUG_PRINT_EVENT("info", ("Failed to parse DictTabInfo error %u",
error.code));
DBUG_RETURN_EVENT(1);
}
NdbTableImpl *tmp_table_impl= m_eventImpl->m_tableImpl;
m_eventImpl->m_tableImpl = at;
DBUG_PRINT("info", ("switching table impl 0x%x -> 0x%x",
tmp_table_impl, at));
// change the rec attrs to refer to the new table object
int i;
for (i = 0; i < 2; i++)
{
NdbRecAttr *p = theFirstPkAttrs[i];
while (p)
{
int no = p->getColumn()->getColumnNo();
NdbColumnImpl *tAttrInfo = at->getColumn(no);
DBUG_PRINT("info", ("rec_attr: 0x%x "
"switching column impl 0x%x -> 0x%x",
p, p->m_column, tAttrInfo));
p->m_column = tAttrInfo;
p = p->next();
}
}
for (i = 0; i < 2; i++)
{
NdbRecAttr *p = theFirstDataAttrs[i];
while (p)
{
int no = p->getColumn()->getColumnNo();
NdbColumnImpl *tAttrInfo = at->getColumn(no);
DBUG_PRINT("info", ("rec_attr: 0x%x "
"switching column impl 0x%x -> 0x%x",
p, p->m_column, tAttrInfo));
p->m_column = tAttrInfo;
p = p->next();
}
}
if (tmp_table_impl)
delete tmp_table_impl;
}
if (unlikely(operation >= NdbDictionary::Event::_TE_FIRST_NON_DATA_EVENT))
{
DBUG_RETURN_EVENT(1);
DBUG_ENTER("NdbEventOperationImpl::receive_event");
DBUG_PRINT("info",("sdata->operation %u this: %p", operation, this));
if (operation == NdbDictionary::Event::_TE_ALTER)
{
// Parse the new table definition and
// create a table object
NdbDictionary::Dictionary *myDict = m_ndb->getDictionary();
NdbDictionaryImpl *dict = & NdbDictionaryImpl::getImpl(*myDict);
NdbError error;
NdbDictInterface dif(error);
NdbTableImpl *at;
m_change_mask = m_data_item->sdata->changeMask;
error.code = dif.parseTableInfo(&at,
(Uint32*)m_buffer.get_data(),
m_buffer.length() / 4,
true);
m_buffer.clear();
if (unlikely(!at))
{
DBUG_PRINT("info", ("Failed to parse DictTabInfo error %u",
error.code));
ndbout_c("Failed to parse DictTabInfo error %u", error.code);
DBUG_RETURN(1);
}
at->buildColumnHash();
NdbTableImpl *tmp_table_impl= m_eventImpl->m_tableImpl;
m_eventImpl->m_tableImpl = at;
DBUG_PRINT("info", ("switching table impl 0x%x -> 0x%x",
tmp_table_impl, at));
// change the rec attrs to refer to the new table object
int i;
for (i = 0; i < 2; i++)
{
NdbRecAttr *p = theFirstPkAttrs[i];
while (p)
{
int no = p->getColumn()->getColumnNo();
NdbColumnImpl *tAttrInfo = at->getColumn(no);
DBUG_PRINT("info", ("rec_attr: 0x%x "
"switching column impl 0x%x -> 0x%x",
p, p->m_column, tAttrInfo));
p->m_column = tAttrInfo;
p = p->next();
}
}
for (i = 0; i < 2; i++)
{
NdbRecAttr *p = theFirstDataAttrs[i];
while (p)
{
int no = p->getColumn()->getColumnNo();
NdbColumnImpl *tAttrInfo = at->getColumn(no);
DBUG_PRINT("info", ("rec_attr: 0x%x "
"switching column impl 0x%x -> 0x%x",
p, p->m_column, tAttrInfo));
p->m_column = tAttrInfo;
p = p->next();
}
}
if (tmp_table_impl)
delete tmp_table_impl;
}
DBUG_RETURN(1);
}
DBUG_ENTER_EVENT("NdbEventOperationImpl::receive_event");
DBUG_PRINT_EVENT("info",("sdata->operation %u this: %p", operation, this));
// now move the data into the RecAttrs
int is_update= operation == NdbDictionary::Event::_TE_UPDATE;
@ -1089,6 +1095,33 @@ NdbEventBuffer::pollEvents(int aMillisecondNumber, Uint64 *latestGCI)
return ret;
}
int
NdbEventBuffer::flushIncompleteEvents(Uint64 gci)
{
/**
* Find min complete gci
*/
Uint32 i;
Uint32 sz= m_active_gci.size();
Gci_container* array = (Gci_container*)m_active_gci.getBase();
for(i = 0; i < sz; i++)
{
Gci_container* tmp = array + i;
if (tmp->m_gci && tmp->m_gci < gci)
{
// we have found an old not-completed gci, remove it
ndbout_c("ndb: flushing incomplete epoch %lld (<%lld)", tmp->m_gci, gci);
if(!tmp->m_data.is_empty())
{
free_list(tmp->m_data);
}
tmp->~Gci_container();
bzero(tmp, sizeof(Gci_container));
}
}
return 0;
}
NdbEventOperation *
NdbEventBuffer::nextEvent()
{
@ -1157,7 +1190,10 @@ NdbEventBuffer::nextEvent()
}
EventBufData_list::Gci_ops *gci_ops = m_available_data.first_gci_ops();
while (gci_ops && op->getGCI() > gci_ops->m_gci)
{
deleteUsedEventOperations();
gci_ops = m_available_data.next_gci_ops();
}
assert(gci_ops && (op->getGCI() == gci_ops->m_gci));
DBUG_RETURN_EVENT(op->m_facade);
}
@ -1177,7 +1213,10 @@ NdbEventBuffer::nextEvent()
// free all "per gci unique" collected operations
EventBufData_list::Gci_ops *gci_ops = m_available_data.first_gci_ops();
while (gci_ops)
{
deleteUsedEventOperations();
gci_ops = m_available_data.next_gci_ops();
}
DBUG_RETURN_EVENT(0);
}
@ -1191,31 +1230,37 @@ NdbEventBuffer::getGCIEventOperations(Uint32* iter, Uint32* event_types)
EventBufData_list::Gci_op g = gci_ops->m_gci_op_list[(*iter)++];
if (event_types != NULL)
*event_types = g.event_types;
DBUG_PRINT("info", ("gci: %d", (unsigned)gci_ops->m_gci));
DBUG_PRINT("info", ("gci: %d g.op: %x g.event_types: %x",
(unsigned)gci_ops->m_gci, g.op, g.event_types));
DBUG_RETURN(g.op);
}
DBUG_RETURN(NULL);
}
void
NdbEventBuffer::lock()
NdbEventBuffer::deleteUsedEventOperations()
{
NdbMutex_Lock(m_mutex);
}
void
NdbEventBuffer::unlock()
{
NdbMutex_Unlock(m_mutex);
}
void
NdbEventBuffer::add_drop_lock()
{
NdbMutex_Lock(p_add_drop_mutex);
}
void
NdbEventBuffer::add_drop_unlock()
{
NdbMutex_Unlock(p_add_drop_mutex);
Uint32 iter= 0;
const NdbEventOperation *op_f;
while ((op_f= getGCIEventOperations(&iter, NULL)) != NULL)
{
NdbEventOperationImpl *op = &op_f->m_impl;
DBUG_ASSERT(op->m_ref_count > 0);
op->m_ref_count--;
DBUG_PRINT("info", ("m_ref_count: %u for op: %p", op->m_ref_count, op));
if (op->m_ref_count == 0)
{
DBUG_PRINT("info", ("deleting op: %p", op));
DBUG_ASSERT(op->m_node_bit_mask.isclear());
if (op->m_next)
op->m_next->m_prev = op->m_prev;
if (op->m_prev)
op->m_prev->m_next = op->m_next;
else
m_dropped_ev_op = op->m_next;
delete op->m_facade;
}
}
}
static
@ -1469,6 +1514,10 @@ NdbEventBuffer::complete_outof_order_gcis()
void
NdbEventBuffer::report_node_failure(Uint32 node_id)
{
NdbEventOperation* op= m_ndb->getEventOperation(0);
if (op == 0)
return;
DBUG_ENTER("NdbEventBuffer::report_node_failure");
SubTableData data;
LinearSectionPtr ptr[3];
@ -1484,12 +1533,20 @@ NdbEventBuffer::report_node_failure(Uint32 node_id)
/**
* Insert this event for each operation
*/
NdbEventOperation* op= 0;
while((op = m_ndb->getEventOperation(op)))
{
NdbEventOperationImpl* impl= &op->m_impl;
data.senderData = impl->m_oid;
insertDataL(impl, &data, ptr);
// no need to lock()/unlock(), receive thread calls this
NdbEventOperationImpl* impl = &op->m_impl;
do if (!impl->m_node_bit_mask.isclear())
{
data.senderData = impl->m_oid;
insertDataL(impl, &data, ptr);
} while((impl = impl->m_next));
for (impl = m_dropped_ev_op; impl; impl = impl->m_next)
if (!impl->m_node_bit_mask.isclear())
{
data.senderData = impl->m_oid;
insertDataL(impl, &data, ptr);
}
}
DBUG_VOID_RETURN;
}
@ -1515,12 +1572,21 @@ NdbEventBuffer::completeClusterFailed()
/**
* Insert this event for each operation
*/
do
{
NdbEventOperationImpl* impl= &op->m_impl;
data.senderData = impl->m_oid;
insertDataL(impl, &data, ptr);
} while((op = m_ndb->getEventOperation(op)));
// no need to lock()/unlock(), receive thread calls this
NdbEventOperationImpl* impl = &op->m_impl;
do if (!impl->m_node_bit_mask.isclear())
{
data.senderData = impl->m_oid;
insertDataL(impl, &data, ptr);
} while((impl = impl->m_next));
for (impl = m_dropped_ev_op; impl; impl = impl->m_next)
if (!impl->m_node_bit_mask.isclear())
{
data.senderData = impl->m_oid;
insertDataL(impl, &data, ptr);
}
}
/**
* Release all GCI's with m_gci > gci
@ -1565,7 +1631,11 @@ NdbEventBuffer::completeClusterFailed()
}
}
assert(bucket != 0);
if (bucket == 0)
{
// no bucket to complete
DBUG_VOID_RETURN;
}
const Uint32 cnt= bucket->m_gcp_complete_rep_count = 1;
bucket->m_gci = gci;
@ -1595,6 +1665,40 @@ NdbEventBuffer::insertDataL(NdbEventOperationImpl *op,
{
DBUG_ENTER_EVENT("NdbEventBuffer::insertDataL");
Uint64 gci= sdata->gci;
const bool is_data_event =
sdata->operation < NdbDictionary::Event::_TE_FIRST_NON_DATA_EVENT;
if (!is_data_event)
{
switch (sdata->operation)
{
case NdbDictionary::Event::_TE_NODE_FAILURE:
op->m_node_bit_mask.clear(sdata->ndbd_nodeid);
break;
case NdbDictionary::Event::_TE_ACTIVE:
op->m_node_bit_mask.set(sdata->ndbd_nodeid);
// internal event, do not relay to user
DBUG_RETURN_EVENT(0);
break;
case NdbDictionary::Event::_TE_CLUSTER_FAILURE:
op->m_node_bit_mask.clear();
DBUG_ASSERT(op->m_ref_count > 0);
op->m_ref_count--;
DBUG_PRINT("info", ("m_ref_count: %u for op: %p", op->m_ref_count, op));
break;
case NdbDictionary::Event::_TE_STOP:
op->m_node_bit_mask.clear(sdata->ndbd_nodeid);
if (op->m_node_bit_mask.isclear())
{
DBUG_ASSERT(op->m_ref_count > 0);
op->m_ref_count--;
DBUG_PRINT("info", ("m_ref_count: %u for op: %p", op->m_ref_count, op));
}
break;
default:
break;
}
}
if ( likely((Uint32)op->mi_type & (1 << (Uint32)sdata->operation)) )
{
@ -1615,8 +1719,6 @@ NdbEventBuffer::insertDataL(NdbEventOperationImpl *op,
}
const bool is_blob_event = (op->theMainOp != NULL);
const bool is_data_event =
sdata->operation < NdbDictionary::Event::_TE_FIRST_NON_DATA_EVENT;
const bool use_hash = op->m_mergeEvents && is_data_event;
if (! is_data_event && is_blob_event)
@ -2244,6 +2346,8 @@ void EventBufData_list::append_list(EventBufData_list *list, Uint64 gci)
void
EventBufData_list::add_gci_op(Gci_op g, bool del)
{
DBUG_ENTER_EVENT("EventBufData_list::add_gci_op");
DBUG_PRINT_EVENT("info", ("p.op: %p g.event_types: %x", g.op, g.event_types));
assert(g.op != NULL);
Uint32 i;
for (i = 0; i < m_gci_op_count; i++) {
@ -2273,8 +2377,15 @@ EventBufData_list::add_gci_op(Gci_op g, bool del)
}
assert(m_gci_op_count < m_gci_op_alloc);
assert(! del);
#ifndef DBUG_OFF
i = m_gci_op_count;
#endif
g.op->m_ref_count++;
DBUG_PRINT("info", ("m_ref_count: %u for op: %p", g.op->m_ref_count, g.op));
m_gci_op_list[m_gci_op_count++] = g;
}
DBUG_PRINT_EVENT("exit", ("m_gci_op_list[%u].event_types: %x", i, m_gci_op_list[i].event_types));
DBUG_VOID_RETURN_EVENT;
}
void
@ -2337,6 +2448,9 @@ NdbEventBuffer::createEventOperation(const char* eventName,
delete tOp;
DBUG_RETURN(NULL);
}
getEventOperationImpl(tOp)->m_ref_count = 1;
DBUG_PRINT("info", ("m_ref_count: %u for op: %p",
getEventOperationImpl(tOp)->m_ref_count, getEventOperationImpl(tOp)));
DBUG_RETURN(tOp);
}
@ -2362,16 +2476,10 @@ NdbEventBuffer::createEventOperation(NdbEventImpl& evnt,
void
NdbEventBuffer::dropEventOperation(NdbEventOperation* tOp)
{
DBUG_ENTER("NdbEventBuffer::dropEventOperation");
NdbEventOperationImpl* op= getEventOperationImpl(tOp);
op->stop();
op->m_next= m_dropped_ev_op;
op->m_prev= 0;
if (m_dropped_ev_op)
m_dropped_ev_op->m_prev= op;
m_dropped_ev_op= op;
// stop blob event ops
if (op->theMainOp == NULL)
{
@ -2391,11 +2499,24 @@ NdbEventBuffer::dropEventOperation(NdbEventOperation* tOp)
}
}
// ToDo, take care of these to be deleted at the
// appropriate time, after we are sure that there
// are _no_ more events coming
// delete tOp;
DBUG_ASSERT(op->m_ref_count > 0);
op->m_ref_count--;
DBUG_PRINT("info", ("m_ref_count: %u for op: %p", op->m_ref_count, op));
if (op->m_ref_count == 0)
{
DBUG_PRINT("info", ("deleting op: %p", op));
DBUG_ASSERT(op->m_node_bit_mask.isclear());
delete op->m_facade;
}
else
{
op->m_next= m_dropped_ev_op;
op->m_prev= 0;
if (m_dropped_ev_op)
m_dropped_ev_op->m_prev= op;
m_dropped_ev_op= op;
}
DBUG_VOID_RETURN;
}
void

View file

@ -367,6 +367,8 @@ public:
Uint32 m_eventId;
Uint32 m_oid;
Bitmask<(unsigned int)_NDB_NODE_BITMASK_SIZE> m_node_bit_mask;
int m_ref_count;
bool m_mergeEvents;
EventBufData *m_data_item;
@ -406,10 +408,10 @@ public:
void dropEventOperation(NdbEventOperation *);
static NdbEventOperationImpl* getEventOperationImpl(NdbEventOperation* tOp);
void add_drop_lock();
void add_drop_unlock();
void lock();
void unlock();
void add_drop_lock() { NdbMutex_Lock(p_add_drop_mutex); }
void add_drop_unlock() { NdbMutex_Unlock(p_add_drop_mutex); }
void lock() { NdbMutex_Lock(m_mutex); }
void unlock() { NdbMutex_Unlock(m_mutex); }
void add_op();
void remove_op();
@ -430,9 +432,11 @@ public:
Uint32 getEventId(int bufferId);
int pollEvents(int aMillisecondNumber, Uint64 *latestGCI= 0);
int flushIncompleteEvents(Uint64 gci);
NdbEventOperation *nextEvent();
NdbEventOperationImpl* getGCIEventOperations(Uint32* iter,
Uint32* event_types);
void deleteUsedEventOperations();
NdbEventOperationImpl *move_data();

View file

@ -24,6 +24,7 @@ testOIBasic \
testOperations \
testRestartGci \
testScan \
testInterpreter \
testScanInterpreter \
testScanPerf \
testSystemRestart \
@ -64,6 +65,7 @@ testOIBasic_SOURCES = testOIBasic.cpp
testOperations_SOURCES = testOperations.cpp
testRestartGci_SOURCES = testRestartGci.cpp
testScan_SOURCES = testScan.cpp ScanFunctions.hpp
testInterpreter_SOURCES = testInterpreter.cpp
testScanInterpreter_SOURCES = testScanInterpreter.cpp ScanFilter.hpp ScanInterpretTest.hpp
testScanPerf_SOURCES = testScanPerf.cpp
testSystemRestart_SOURCES = testSystemRestart.cpp

View file

@ -79,46 +79,46 @@ int runTestIncValue32(NDBT_Context* ctx, NDBT_Step* step){
Ndb* pNdb = GETNDB(step);
NdbConnection* pTrans = pNdb->startTransaction();
if (pTrans == NULL){
ERR(pNdb->getNdbError());
return NDBT_FAILED;
}
NdbOperation* pOp = pTrans->getNdbOperation(pTab->getName());
if (pOp == NULL) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
}
int check = pOp->interpretedUpdateTuple();
if( check == -1 ) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
}
// Primary keys
Uint32 pkVal = 1;
check = pOp->equal("KOL1", pkVal );
if( check == -1 ) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
}
// Attributes
// Update column
Uint32 valToIncWith = 1;
check = pOp->incValue("KOL2", valToIncWith);
if( check == -1 ) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
}
NdbConnection* pTrans = pNdb->startTransaction();
if (pTrans == NULL){
ERR(pNdb->getNdbError());
return NDBT_FAILED;
}
NdbOperation* pOp = pTrans->getNdbOperation(pTab->getName());
if (pOp == NULL) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
}
int check = pOp->interpretedUpdateTuple();
if( check == -1 ) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
}
// Primary keys
Uint32 pkVal = 1;
check = pOp->equal("KOL1", pkVal );
if( check == -1 ) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
}
// Attributes
// Update column
Uint32 valToIncWith = 1;
check = pOp->incValue("KOL2", valToIncWith);
if( check == -1 ) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
}
NdbRecAttr* valueRec = pOp->getValue("KOL2");
if( valueRec == NULL ) {
@ -142,6 +142,122 @@ int runTestIncValue32(NDBT_Context* ctx, NDBT_Step* step){
return NDBT_OK;
}
int runTestBug19537(NDBT_Context* ctx, NDBT_Step* step){
int result = NDBT_OK;
const NdbDictionary::Table * pTab = ctx->getTab();
Ndb* pNdb = GETNDB(step);
if (strcmp(pTab->getName(), "T1") != 0) {
g_err << "runTestBug19537: skip, table != T1" << endl;
return NDBT_OK;
}
NdbConnection* pTrans = pNdb->startTransaction();
if (pTrans == NULL){
ERR(pNdb->getNdbError());
return NDBT_FAILED;
}
NdbOperation* pOp = pTrans->getNdbOperation(pTab->getName());
if (pOp == NULL) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
}
if (pOp->interpretedUpdateTuple() == -1) {
ERR(pOp->getNdbError());
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
}
// Primary keys
const Uint32 pkVal = 1;
if (pOp->equal("KOL1", pkVal) == -1) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
}
// Load 64-bit constant into register 1 and
// write from register 1 to 32-bit column KOL2
const Uint64 reg_val = 0x0102030405060708ULL;
const Uint32* reg_ptr32 = (const Uint32*)&reg_val;
if (reg_ptr32[0] == 0x05060708 && reg_ptr32[1] == 0x01020304) {
g_err << "runTestBug19537: platform is LITTLE endian" << endl;
} else if (reg_ptr32[0] == 0x01020304 && reg_ptr32[1] == 0x05060708) {
g_err << "runTestBug19537: platform is BIG endian" << endl;
} else {
g_err << "runTestBug19537: impossible platform"
<< hex << " [0]=" << reg_ptr32[0] << " [1]=" <<reg_ptr32[1] << endl;
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
}
if (pOp->load_const_u64(1, reg_val) == -1 ||
pOp->write_attr("KOL2", 1) == -1) {
ERR(pOp->getNdbError());
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
}
if (pTrans->execute(Commit) == -1) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
}
// Read value via a new transaction
pTrans = pNdb->startTransaction();
if (pTrans == NULL){
ERR(pNdb->getNdbError());
return NDBT_FAILED;
}
pOp = pTrans->getNdbOperation(pTab->getName());
if (pOp == NULL) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
}
Uint32 kol2 = 0x09090909;
if (pOp->readTuple() == -1 ||
pOp->equal("KOL1", pkVal) == -1 ||
pOp->getValue("KOL2", (char*)&kol2) == 0) {
ERR(pOp->getNdbError());
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
}
if (pTrans->execute(Commit) == -1) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
}
// Expected conversion as in C - truncate to lower (logical) word
if (kol2 == 0x01020304) {
g_err << "runTestBug19537: the bug manifests itself !" << endl;
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
}
if (kol2 != 0x05060708) {
g_err << "runTestBug19537: impossible KOL2 " << hex << kol2 << endl;
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
}
pNdb->closeTransaction(pTrans);
return NDBT_OK;
}
NDBT_TESTSUITE(testInterpreter);
TESTCASE("IncValue32",
@ -156,6 +272,12 @@ TESTCASE("IncValue64",
INITIALIZER(runTestIncValue64);
FINALIZER(runClearTable);
}
TESTCASE("Bug19537",
"Test big-endian write_attr of 32 bit integer\n"){
INITIALIZER(runLoadTable);
INITIALIZER(runTestBug19537);
FINALIZER(runClearTable);
}
#if 0
TESTCASE("MaxTransactions",
"Start transactions until no more can be created\n"){

View file

@ -217,6 +217,7 @@ sh -c "PATH=\"${MYSQL_BUILD_PATH:-$PATH}\" \
--with-fast-mutexes \
--with-mysqld-user=%{mysqld_user} \
--with-unix-socket-path=/var/lib/mysql/mysql.sock \
--with-pic \
--prefix=/ \
--with-extra-charsets=all \
%if %{YASSL_BUILD}
@ -319,7 +320,7 @@ BuildMySQL "--enable-shared \
--with-example-storage-engine \
--with-blackhole-storage-engine \
--with-federated-storage-engine \
--with-big-tables \
--with-big-tables \
--with-comment=\"MySQL Community Server - Max (GPL)\"")
# We might want to save the config log file
@ -683,12 +684,15 @@ fi
# itself - note that they must be ordered by date (important when
# merging BK trees)
%changelog
%changelog
* Sat May 20 2006 Kent Boortz <kent@mysql.com>
- Always compile for PIC, position independent code.
* Wed May 10 2006 Kent Boortz <kent@mysql.com>
- Use character set "all" for the "max", to make Cluster nodes
independent on the character set directory, and the problem that
two RPM sub packages both wants to install this directory.
- Use character set "all" when compiling with Cluster, to make Cluster
nodes independent on the character set directory, and the problem
that two RPM sub packages both wants to install this directory.
* Mon May 01 2006 Kent Boortz <kent@mysql.com>

View file

@ -22,6 +22,13 @@ if HAVE_YASSL
else
yassl_dummy_link_fix=
endif
if THREAD_SAFE_CLIENT
LIBMYSQLCLIENT_LA = $(top_builddir)/libmysql_r/libmysqlclient_r.la
else
LIBMYSQLCLIENT_LA = $(top_builddir)/libmysql/libmysqlclient.la
endif
EXTRA_DIST = auto_increment.res auto_increment.tst \
function.res function.tst lock_test.pl lock_test.res \
export.pl big_record.pl \
@ -42,7 +49,7 @@ INCLUDES = -I$(top_builddir)/include -I$(top_srcdir)/include \
$(openssl_includes)
LIBS = @CLIENT_LIBS@
LDADD = @CLIENT_EXTRA_LDFLAGS@ \
$(top_builddir)/libmysql/libmysqlclient.la
$(LIBMYSQLCLIENT_LA)
mysql_client_test_LDADD= $(LDADD) $(CXXLDFLAGS) -L../mysys -lmysys
mysql_client_test_SOURCES= mysql_client_test.c $(yassl_dummy_link_fix)
insert_test_SOURCES= insert_test.c $(yassl_dummy_link_fix)

View file

@ -2,7 +2,7 @@
#include "my_config.h"
#include <stdlib.h>
#include <tap.h>
#include "../tap.h"
int main() {
plan(5);