mirror of
https://github.com/MariaDB/server.git
synced 2025-01-22 06:44:16 +01:00
Merge mskold@bk-internal.mysql.com:/home/bk/mysql-5.1-new
into mysql.com:/usr/local/home/marty/MySQL/mysql-5.1-new
This commit is contained in:
commit
a3102373d1
41 changed files with 1500 additions and 2343 deletions
|
@ -48,6 +48,7 @@ mysqlbinlog_SOURCES = mysqlbinlog.cc $(top_srcdir)/mysys/mf_tempdir.c \
|
||||||
$(top_srcdir)/mysys/my_vle.c \
|
$(top_srcdir)/mysys/my_vle.c \
|
||||||
$(top_srcdir)/mysys/base64.c
|
$(top_srcdir)/mysys/base64.c
|
||||||
mysqlbinlog_LDADD = $(LDADD) $(CXXLDFLAGS)
|
mysqlbinlog_LDADD = $(LDADD) $(CXXLDFLAGS)
|
||||||
|
mysqlslap_LDADD = $(LDADD) $(CXXLDFLAGS) -lpthread
|
||||||
mysqltestmanager_pwgen_SOURCES = mysqlmanager-pwgen.c
|
mysqltestmanager_pwgen_SOURCES = mysqlmanager-pwgen.c
|
||||||
mysqltestmanagerc_SOURCES= mysqlmanagerc.c $(yassl_dummy_link_fix)
|
mysqltestmanagerc_SOURCES= mysqlmanagerc.c $(yassl_dummy_link_fix)
|
||||||
mysqlcheck_SOURCES= mysqlcheck.c $(yassl_dummy_link_fix)
|
mysqlcheck_SOURCES= mysqlcheck.c $(yassl_dummy_link_fix)
|
||||||
|
|
|
@ -53,6 +53,7 @@ enum options_client
|
||||||
OPT_MYSQL_ONLY_PRINT,
|
OPT_MYSQL_ONLY_PRINT,
|
||||||
OPT_MYSQL_LOCK_DIRECTORY,
|
OPT_MYSQL_LOCK_DIRECTORY,
|
||||||
OPT_MYSQL_SLAP_SLAVE,
|
OPT_MYSQL_SLAP_SLAVE,
|
||||||
|
OPT_USE_THREADS,
|
||||||
OPT_MYSQL_NUMBER_OF_QUERY,
|
OPT_MYSQL_NUMBER_OF_QUERY,
|
||||||
OPT_MYSQL_PRESERVE_SCHEMA,
|
OPT_MYSQL_PRESERVE_SCHEMA,
|
||||||
OPT_IGNORE_TABLE,OPT_INSERT_IGNORE,OPT_SHOW_WARNINGS,OPT_DROP_DATABASE,
|
OPT_IGNORE_TABLE,OPT_INSERT_IGNORE,OPT_SHOW_WARNINGS,OPT_DROP_DATABASE,
|
||||||
|
|
|
@ -87,6 +87,7 @@ TODO:
|
||||||
#include <sys/types.h>
|
#include <sys/types.h>
|
||||||
#include <sys/wait.h>
|
#include <sys/wait.h>
|
||||||
#include <ctype.h>
|
#include <ctype.h>
|
||||||
|
#include <my_pthread.h>
|
||||||
|
|
||||||
#define MYSLAPLOCK "/myslaplock.lck"
|
#define MYSLAPLOCK "/myslaplock.lck"
|
||||||
#define MYSLAPLOCK_DIR "/tmp"
|
#define MYSLAPLOCK_DIR "/tmp"
|
||||||
|
@ -132,6 +133,7 @@ static uint opt_protocol= 0;
|
||||||
|
|
||||||
static int get_options(int *argc,char ***argv);
|
static int get_options(int *argc,char ***argv);
|
||||||
static uint opt_mysql_port= 0;
|
static uint opt_mysql_port= 0;
|
||||||
|
static uint opt_use_threads;
|
||||||
|
|
||||||
static const char *load_default_groups[]= { "mysqlslap","client",0 };
|
static const char *load_default_groups[]= { "mysqlslap","client",0 };
|
||||||
|
|
||||||
|
@ -151,6 +153,13 @@ struct stats {
|
||||||
unsigned long long rows;
|
unsigned long long rows;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
typedef struct thread_context thread_context;
|
||||||
|
|
||||||
|
struct thread_context {
|
||||||
|
statement *stmt;
|
||||||
|
ulonglong limit;
|
||||||
|
};
|
||||||
|
|
||||||
typedef struct conclusions conclusions;
|
typedef struct conclusions conclusions;
|
||||||
|
|
||||||
struct conclusions {
|
struct conclusions {
|
||||||
|
@ -184,7 +193,7 @@ static int create_schema(MYSQL *mysql, const char *db, statement *stmt,
|
||||||
statement *engine_stmt);
|
statement *engine_stmt);
|
||||||
static int run_scheduler(stats *sptr, statement *stmts, uint concur,
|
static int run_scheduler(stats *sptr, statement *stmts, uint concur,
|
||||||
ulonglong limit);
|
ulonglong limit);
|
||||||
int run_task(statement *stmt, ulonglong limit);
|
int run_task(thread_context *con);
|
||||||
void statement_cleanup(statement *stmt);
|
void statement_cleanup(statement *stmt);
|
||||||
|
|
||||||
static const char ALPHANUMERICS[]=
|
static const char ALPHANUMERICS[]=
|
||||||
|
@ -440,6 +449,10 @@ static struct my_option my_long_options[] =
|
||||||
{"socket", 'S', "Socket file to use for connection.",
|
{"socket", 'S', "Socket file to use for connection.",
|
||||||
(gptr*) &opt_mysql_unix_port, (gptr*) &opt_mysql_unix_port, 0, GET_STR,
|
(gptr*) &opt_mysql_unix_port, (gptr*) &opt_mysql_unix_port, 0, GET_STR,
|
||||||
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
|
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
|
||||||
|
{"use-threads", OPT_USE_THREADS,
|
||||||
|
"Use pthread calls instead of fork() calls (default on Windows)",
|
||||||
|
(gptr*) &opt_use_threads, (gptr*) &opt_use_threads, 0,
|
||||||
|
GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
|
||||||
#include <sslopt-longopts.h>
|
#include <sslopt-longopts.h>
|
||||||
#ifndef DONT_ALLOW_USER_CHANGE
|
#ifndef DONT_ALLOW_USER_CHANGE
|
||||||
{"user", 'u', "User for login if not current user.", (gptr*) &user,
|
{"user", 'u', "User for login if not current user.", (gptr*) &user,
|
||||||
|
@ -930,8 +943,11 @@ run_scheduler(stats *sptr, statement *stmts, uint concur, ulonglong limit)
|
||||||
uint x;
|
uint x;
|
||||||
File lock_file;
|
File lock_file;
|
||||||
struct timeval start_time, end_time;
|
struct timeval start_time, end_time;
|
||||||
|
thread_context con;
|
||||||
DBUG_ENTER("run_scheduler");
|
DBUG_ENTER("run_scheduler");
|
||||||
|
|
||||||
|
con.stmt= stmts;
|
||||||
|
con.limit= limit;
|
||||||
|
|
||||||
lock_file= my_open(lock_file_str, O_CREAT|O_WRONLY|O_TRUNC, MYF(0));
|
lock_file= my_open(lock_file_str, O_CREAT|O_WRONLY|O_TRUNC, MYF(0));
|
||||||
|
|
||||||
|
@ -943,6 +959,30 @@ run_scheduler(stats *sptr, statement *stmts, uint concur, ulonglong limit)
|
||||||
exit(0);
|
exit(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (opt_use_threads)
|
||||||
|
{
|
||||||
|
pthread_t mainthread; /* Thread descriptor */
|
||||||
|
pthread_attr_t attr; /* Thread attributes */
|
||||||
|
|
||||||
|
for (x= 0; x < concur; x++)
|
||||||
|
{
|
||||||
|
pthread_attr_init(&attr);
|
||||||
|
pthread_attr_setdetachstate(&attr,
|
||||||
|
PTHREAD_CREATE_DETACHED);
|
||||||
|
|
||||||
|
/* now create the thread */
|
||||||
|
if (pthread_create(&mainthread, &attr, (void *)run_task,
|
||||||
|
(void *)&con) != 0)
|
||||||
|
{
|
||||||
|
fprintf(stderr,"%s: Could not create thread\n",
|
||||||
|
my_progname);
|
||||||
|
exit(0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
fflush(NULL);
|
||||||
for (x= 0; x < concur; x++)
|
for (x= 0; x < concur; x++)
|
||||||
{
|
{
|
||||||
int pid;
|
int pid;
|
||||||
|
@ -958,7 +998,7 @@ run_scheduler(stats *sptr, statement *stmts, uint concur, ulonglong limit)
|
||||||
fprintf(stderr,
|
fprintf(stderr,
|
||||||
"%s: fork returned 0, calling task pid %d gid %d\n",
|
"%s: fork returned 0, calling task pid %d gid %d\n",
|
||||||
my_progname, pid, getgid());
|
my_progname, pid, getgid());
|
||||||
run_task(stmts, limit);
|
run_task(&con);
|
||||||
exit(0);
|
exit(0);
|
||||||
break;
|
break;
|
||||||
case -1:
|
case -1:
|
||||||
|
@ -979,14 +1019,30 @@ run_scheduler(stats *sptr, statement *stmts, uint concur, ulonglong limit)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* Lets release use some clients! */
|
/* Lets release use some clients! */
|
||||||
if (!opt_slave)
|
if (!opt_slave)
|
||||||
my_lock(lock_file, F_UNLCK, 0, F_TO_EOF, MYF(0));
|
my_lock(lock_file, F_UNLCK, 0, F_TO_EOF, MYF(0));
|
||||||
|
|
||||||
gettimeofday(&start_time, NULL);
|
gettimeofday(&start_time, NULL);
|
||||||
|
|
||||||
my_close(lock_file, MYF(0));
|
/*
|
||||||
|
We look to grab a write lock at this point. Once we get it we know that
|
||||||
|
all clients have completed their work.
|
||||||
|
*/
|
||||||
|
if (opt_use_threads)
|
||||||
|
{
|
||||||
|
if (my_lock(lock_file, F_WRLCK, 0, F_TO_EOF, MYF(0)))
|
||||||
|
{
|
||||||
|
fprintf(stderr,"%s: Could not get lockfile\n",
|
||||||
|
my_progname);
|
||||||
|
exit(0);
|
||||||
|
}
|
||||||
|
my_lock(lock_file, F_UNLCK, 0, F_TO_EOF, MYF(0));
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
WAIT:
|
WAIT:
|
||||||
while (x--)
|
while (x--)
|
||||||
{
|
{
|
||||||
|
@ -994,8 +1050,11 @@ WAIT:
|
||||||
pid= wait(&status);
|
pid= wait(&status);
|
||||||
DBUG_PRINT("info", ("Parent: child %d status %d", pid, status));
|
DBUG_PRINT("info", ("Parent: child %d status %d", pid, status));
|
||||||
}
|
}
|
||||||
|
}
|
||||||
gettimeofday(&end_time, NULL);
|
gettimeofday(&end_time, NULL);
|
||||||
|
|
||||||
|
my_close(lock_file, MYF(0));
|
||||||
|
|
||||||
sptr->timing= timedif(end_time, start_time);
|
sptr->timing= timedif(end_time, start_time);
|
||||||
sptr->users= concur;
|
sptr->users= concur;
|
||||||
sptr->rows= limit;
|
sptr->rows= limit;
|
||||||
|
@ -1004,7 +1063,7 @@ WAIT:
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
run_task(statement *qstmt, ulonglong limit)
|
run_task(thread_context *con)
|
||||||
{
|
{
|
||||||
ulonglong counter= 0, queries;
|
ulonglong counter= 0, queries;
|
||||||
File lock_file;
|
File lock_file;
|
||||||
|
@ -1014,7 +1073,7 @@ run_task(statement *qstmt, ulonglong limit)
|
||||||
statement *ptr;
|
statement *ptr;
|
||||||
|
|
||||||
DBUG_ENTER("run_task");
|
DBUG_ENTER("run_task");
|
||||||
DBUG_PRINT("info", ("task script \"%s\"", qstmt->string));
|
DBUG_PRINT("info", ("task script \"%s\"", con->stmt->string));
|
||||||
|
|
||||||
mysql_init(&mysql);
|
mysql_init(&mysql);
|
||||||
|
|
||||||
|
@ -1036,7 +1095,7 @@ run_task(statement *qstmt, ulonglong limit)
|
||||||
queries= 0;
|
queries= 0;
|
||||||
|
|
||||||
limit_not_met:
|
limit_not_met:
|
||||||
for (ptr= qstmt; ptr && ptr->length; ptr= ptr->next)
|
for (ptr= con->stmt; ptr && ptr->length; ptr= ptr->next)
|
||||||
{
|
{
|
||||||
if (opt_only_print)
|
if (opt_only_print)
|
||||||
{
|
{
|
||||||
|
@ -1060,11 +1119,11 @@ limit_not_met:
|
||||||
}
|
}
|
||||||
queries++;
|
queries++;
|
||||||
|
|
||||||
if (limit && queries == limit)
|
if (con->limit && queries == con->limit)
|
||||||
DBUG_RETURN(0);
|
DBUG_RETURN(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (limit && queries < limit)
|
if (con->limit && queries < con->limit)
|
||||||
goto limit_not_met;
|
goto limit_not_met;
|
||||||
|
|
||||||
my_lock(lock_file, F_UNLCK, 0, F_TO_EOF, MYF(0));
|
my_lock(lock_file, F_UNLCK, 0, F_TO_EOF, MYF(0));
|
||||||
|
|
|
@ -206,6 +206,11 @@ enum ha_base_keytype {
|
||||||
#define HA_NULL_ARE_EQUAL 2048 /* NULL in key are cmp as equal */
|
#define HA_NULL_ARE_EQUAL 2048 /* NULL in key are cmp as equal */
|
||||||
#define HA_GENERATED_KEY 8192 /* Automaticly generated key */
|
#define HA_GENERATED_KEY 8192 /* Automaticly generated key */
|
||||||
|
|
||||||
|
/* The combination of the above can be used for key type comparison. */
|
||||||
|
#define HA_KEYFLAG_MASK (HA_NOSAME | HA_PACK_KEY | HA_AUTO_KEY | \
|
||||||
|
HA_BINARY_PACK_KEY | HA_FULLTEXT | HA_UNIQUE_CHECK | \
|
||||||
|
HA_SPATIAL | HA_NULL_ARE_EQUAL | HA_GENERATED_KEY)
|
||||||
|
|
||||||
/* Automatic bits in key-flag */
|
/* Automatic bits in key-flag */
|
||||||
|
|
||||||
#define HA_SPACE_PACK_USED 4 /* Test for if SPACE_PACK used */
|
#define HA_SPACE_PACK_USED 4 /* Test for if SPACE_PACK used */
|
||||||
|
@ -349,8 +354,9 @@ enum ha_base_keytype {
|
||||||
#define HA_ERR_NO_PARTITION_FOUND 160 /* There's no partition in table for
|
#define HA_ERR_NO_PARTITION_FOUND 160 /* There's no partition in table for
|
||||||
given value */
|
given value */
|
||||||
#define HA_ERR_RBR_LOGGING_FAILED 161 /* Row-based binlogging of row failed */
|
#define HA_ERR_RBR_LOGGING_FAILED 161 /* Row-based binlogging of row failed */
|
||||||
|
#define HA_ERR_DROP_INDEX_FK 162 /* Index needed in foreign key constr. */
|
||||||
|
|
||||||
#define HA_ERR_LAST 161 /* Copy last error no */
|
#define HA_ERR_LAST 162 /* Copy last error no */
|
||||||
|
|
||||||
/* Add error numbers before HA_ERR_LAST and change it accordingly. */
|
/* Add error numbers before HA_ERR_LAST and change it accordingly. */
|
||||||
#define HA_ERR_ERRORS (HA_ERR_LAST - HA_ERR_FIRST + 1)
|
#define HA_ERR_ERRORS (HA_ERR_LAST - HA_ERR_FIRST + 1)
|
||||||
|
|
|
@ -396,3 +396,71 @@ a int(11) NO PRI
|
||||||
b varchar(20) NO MUL
|
b varchar(20) NO MUL
|
||||||
c varchar(20) NO
|
c varchar(20) NO
|
||||||
drop table t1;
|
drop table t1;
|
||||||
|
create table t1 (
|
||||||
|
c1 int,
|
||||||
|
c2 char(12),
|
||||||
|
c3 varchar(123),
|
||||||
|
c4 timestamp,
|
||||||
|
index (c1),
|
||||||
|
index i1 (c1),
|
||||||
|
index i2 (c2),
|
||||||
|
index i3 (c3),
|
||||||
|
unique i4 (c4),
|
||||||
|
index i5 (c1, c2, c3, c4),
|
||||||
|
primary key (c2, c3),
|
||||||
|
index (c2, c4));
|
||||||
|
show create table t1;
|
||||||
|
Table Create Table
|
||||||
|
t1 CREATE TABLE `t1` (
|
||||||
|
`c1` int(11) default NULL,
|
||||||
|
`c2` char(12) NOT NULL default '',
|
||||||
|
`c3` varchar(123) NOT NULL default '',
|
||||||
|
`c4` timestamp NOT NULL default CURRENT_TIMESTAMP on update CURRENT_TIMESTAMP,
|
||||||
|
PRIMARY KEY (`c2`,`c3`),
|
||||||
|
UNIQUE KEY `i4` (`c4`),
|
||||||
|
KEY `c1` (`c1`),
|
||||||
|
KEY `i1` (`c1`),
|
||||||
|
KEY `i2` (`c2`),
|
||||||
|
KEY `i3` (`c3`),
|
||||||
|
KEY `i5` (`c1`,`c2`,`c3`,`c4`),
|
||||||
|
KEY `c2` (`c2`,`c4`)
|
||||||
|
) ENGINE=MyISAM DEFAULT CHARSET=latin1
|
||||||
|
alter table t1 drop index c1;
|
||||||
|
alter table t1 add index (c1);
|
||||||
|
alter table t1 add index (c1);
|
||||||
|
alter table t1 drop index i3;
|
||||||
|
alter table t1 add index i3 (c3);
|
||||||
|
alter table t1 drop index i2, drop index i4;
|
||||||
|
alter table t1 add index i2 (c2), add index i4 (c4);
|
||||||
|
alter table t1 drop index i2, drop index i4, add index i6 (c2, c4);
|
||||||
|
alter table t1 add index i2 (c2), add index i4 (c4), drop index i6;
|
||||||
|
alter table t1 drop index i2, drop index i4, add unique i4 (c4);
|
||||||
|
alter table t1 add index i2 (c2), drop index i4, add index i4 (c4);
|
||||||
|
alter table t1 drop index c2, add index (c2(4),c3(7));
|
||||||
|
alter table t1 drop index c2, add index (c2(4),c3(7));
|
||||||
|
alter table t1 add primary key (c1, c2), drop primary key;
|
||||||
|
alter table t1 drop primary key;
|
||||||
|
alter table t1 add primary key (c1, c2), drop primary key;
|
||||||
|
ERROR 42000: Can't DROP 'PRIMARY'; check that column/key exists
|
||||||
|
show create table t1;
|
||||||
|
Table Create Table
|
||||||
|
t1 CREATE TABLE `t1` (
|
||||||
|
`c1` int(11) NOT NULL default '0',
|
||||||
|
`c2` char(12) NOT NULL default '',
|
||||||
|
`c3` varchar(123) NOT NULL default '',
|
||||||
|
`c4` timestamp NOT NULL default CURRENT_TIMESTAMP on update CURRENT_TIMESTAMP,
|
||||||
|
KEY `i1` (`c1`),
|
||||||
|
KEY `i5` (`c1`,`c2`,`c3`,`c4`),
|
||||||
|
KEY `c1` (`c1`),
|
||||||
|
KEY `c1_2` (`c1`),
|
||||||
|
KEY `i3` (`c3`),
|
||||||
|
KEY `i2` (`c2`),
|
||||||
|
KEY `i4` (`c4`),
|
||||||
|
KEY `c2` (`c2`(4),`c3`(7))
|
||||||
|
) ENGINE=MyISAM DEFAULT CHARSET=latin1
|
||||||
|
insert into t1 values(1, 'a', 'a', NULL);
|
||||||
|
insert into t1 values(1, 'b', 'b', NULL);
|
||||||
|
alter table t1 drop index i3, drop index i2, drop index i1;
|
||||||
|
alter table t1 add index i3 (c3), add index i2 (c2), add unique index i1 (c1);
|
||||||
|
ERROR 23000: Duplicate entry '1' for key 1
|
||||||
|
drop table t1;
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,365 +0,0 @@
|
||||||
stop slave;
|
|
||||||
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
|
|
||||||
reset master;
|
|
||||||
reset slave;
|
|
||||||
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
|
|
||||||
start slave;
|
|
||||||
drop database if exists mysqltest1;
|
|
||||||
create database mysqltest1;
|
|
||||||
use mysqltest1;
|
|
||||||
create table t1 (a varchar(100));
|
|
||||||
use mysqltest1;
|
|
||||||
create procedure foo()
|
|
||||||
begin
|
|
||||||
declare b int;
|
|
||||||
set b = 8;
|
|
||||||
insert into t1 values (b);
|
|
||||||
insert into t1 values (unix_timestamp());
|
|
||||||
end|
|
|
||||||
select * from mysql.proc where name='foo' and db='mysqltest1';
|
|
||||||
db name type specific_name language sql_data_access is_deterministic security_type param_list returns body definer created modified sql_mode comment
|
|
||||||
mysqltest1 foo PROCEDURE foo SQL CONTAINS_SQL NO DEFINER begin
|
|
||||||
declare b int;
|
|
||||||
set b = 8;
|
|
||||||
insert into t1 values (b);
|
|
||||||
insert into t1 values (unix_timestamp());
|
|
||||||
end root@localhost # #
|
|
||||||
select * from mysql.proc where name='foo' and db='mysqltest1';
|
|
||||||
db name type specific_name language sql_data_access is_deterministic security_type param_list returns body definer created modified sql_mode comment
|
|
||||||
mysqltest1 foo PROCEDURE foo SQL CONTAINS_SQL NO DEFINER begin
|
|
||||||
declare b int;
|
|
||||||
set b = 8;
|
|
||||||
insert into t1 values (b);
|
|
||||||
insert into t1 values (unix_timestamp());
|
|
||||||
end root@localhost # #
|
|
||||||
set timestamp=1000000000;
|
|
||||||
call foo();
|
|
||||||
select * from t1;
|
|
||||||
a
|
|
||||||
8
|
|
||||||
1000000000
|
|
||||||
select * from t1;
|
|
||||||
a
|
|
||||||
8
|
|
||||||
1000000000
|
|
||||||
delete from t1;
|
|
||||||
create procedure foo2()
|
|
||||||
not deterministic
|
|
||||||
select * from mysqltest1.t1;
|
|
||||||
call foo2();
|
|
||||||
a
|
|
||||||
alter procedure foo2 contains sql;
|
|
||||||
drop table t1;
|
|
||||||
create table t1 (a int);
|
|
||||||
create table t2 (a int);
|
|
||||||
create procedure foo3()
|
|
||||||
deterministic
|
|
||||||
insert into t1 values (15);
|
|
||||||
grant CREATE ROUTINE, EXECUTE on mysqltest1.* to "zedjzlcsjhd"@127.0.0.1;
|
|
||||||
grant SELECT on mysqltest1.t1 to "zedjzlcsjhd"@127.0.0.1;
|
|
||||||
grant SELECT, INSERT on mysqltest1.t2 to "zedjzlcsjhd"@127.0.0.1;
|
|
||||||
SELECT 1;
|
|
||||||
1
|
|
||||||
1
|
|
||||||
create procedure foo4()
|
|
||||||
deterministic
|
|
||||||
begin
|
|
||||||
insert into t2 values(3);
|
|
||||||
insert into t1 values (5);
|
|
||||||
end|
|
|
||||||
call foo4();
|
|
||||||
Got one of the listed errors
|
|
||||||
call foo3();
|
|
||||||
show warnings;
|
|
||||||
Level Code Message
|
|
||||||
call foo4();
|
|
||||||
Got one of the listed errors
|
|
||||||
alter procedure foo4 sql security invoker;
|
|
||||||
call foo4();
|
|
||||||
show warnings;
|
|
||||||
Level Code Message
|
|
||||||
select * from t1;
|
|
||||||
a
|
|
||||||
15
|
|
||||||
5
|
|
||||||
select * from t2;
|
|
||||||
a
|
|
||||||
3
|
|
||||||
3
|
|
||||||
3
|
|
||||||
select * from t1;
|
|
||||||
a
|
|
||||||
15
|
|
||||||
5
|
|
||||||
select * from t2;
|
|
||||||
a
|
|
||||||
3
|
|
||||||
3
|
|
||||||
3
|
|
||||||
delete from t2;
|
|
||||||
alter table t2 add unique (a);
|
|
||||||
drop procedure foo4;
|
|
||||||
create procedure foo4()
|
|
||||||
deterministic
|
|
||||||
begin
|
|
||||||
insert into t2 values(20),(20);
|
|
||||||
end|
|
|
||||||
call foo4();
|
|
||||||
ERROR 23000: Duplicate entry '20' for key 1
|
|
||||||
show warnings;
|
|
||||||
Level Code Message
|
|
||||||
Error 1062 Duplicate entry '20' for key 1
|
|
||||||
select * from t2;
|
|
||||||
a
|
|
||||||
20
|
|
||||||
select * from t2;
|
|
||||||
a
|
|
||||||
20
|
|
||||||
select * from mysql.proc where name="foo4" and db='mysqltest1';
|
|
||||||
db name type specific_name language sql_data_access is_deterministic security_type param_list returns body definer created modified sql_mode comment
|
|
||||||
mysqltest1 foo4 PROCEDURE foo4 SQL CONTAINS_SQL YES DEFINER begin
|
|
||||||
insert into t2 values(20),(20);
|
|
||||||
end root@localhost # #
|
|
||||||
drop procedure foo4;
|
|
||||||
select * from mysql.proc where name="foo4" and db='mysqltest1';
|
|
||||||
db name type specific_name language sql_data_access is_deterministic security_type param_list returns body definer created modified sql_mode comment
|
|
||||||
select * from mysql.proc where name="foo4" and db='mysqltest1';
|
|
||||||
db name type specific_name language sql_data_access is_deterministic security_type param_list returns body definer created modified sql_mode comment
|
|
||||||
drop procedure foo;
|
|
||||||
drop procedure foo2;
|
|
||||||
drop procedure foo3;
|
|
||||||
create function fn1(x int)
|
|
||||||
returns int
|
|
||||||
deterministic
|
|
||||||
begin
|
|
||||||
insert into t1 values (x);
|
|
||||||
return x+2;
|
|
||||||
end|
|
|
||||||
delete t1,t2 from t1,t2;
|
|
||||||
select fn1(20);
|
|
||||||
fn1(20)
|
|
||||||
22
|
|
||||||
insert into t2 values(fn1(21));
|
|
||||||
select * from t1;
|
|
||||||
a
|
|
||||||
21
|
|
||||||
20
|
|
||||||
select * from t2;
|
|
||||||
a
|
|
||||||
23
|
|
||||||
select * from t1;
|
|
||||||
a
|
|
||||||
21
|
|
||||||
20
|
|
||||||
select * from t2;
|
|
||||||
a
|
|
||||||
23
|
|
||||||
drop function fn1;
|
|
||||||
create function fn1()
|
|
||||||
returns int
|
|
||||||
begin
|
|
||||||
return unix_timestamp();
|
|
||||||
end|
|
|
||||||
alter function fn1 no sql;
|
|
||||||
delete from t1;
|
|
||||||
set timestamp=1000000000;
|
|
||||||
insert into t1 values(fn1());
|
|
||||||
create function fn2()
|
|
||||||
returns int
|
|
||||||
no sql
|
|
||||||
begin
|
|
||||||
return unix_timestamp();
|
|
||||||
end|
|
|
||||||
create function fn3()
|
|
||||||
returns int
|
|
||||||
not deterministic
|
|
||||||
reads sql data
|
|
||||||
begin
|
|
||||||
return 0;
|
|
||||||
end|
|
|
||||||
select fn3();
|
|
||||||
fn3()
|
|
||||||
0
|
|
||||||
select * from mysql.proc where db='mysqltest1';
|
|
||||||
db name type specific_name language sql_data_access is_deterministic security_type param_list returns body definer created modified sql_mode comment
|
|
||||||
mysqltest1 fn1 FUNCTION fn1 SQL NO_SQL NO DEFINER int(11) begin
|
|
||||||
return unix_timestamp();
|
|
||||||
end root@localhost # #
|
|
||||||
mysqltest1 fn2 FUNCTION fn2 SQL NO_SQL NO DEFINER int(11) begin
|
|
||||||
return unix_timestamp();
|
|
||||||
end zedjzlcsjhd@localhost # #
|
|
||||||
mysqltest1 fn3 FUNCTION fn3 SQL READS_SQL_DATA NO DEFINER int(11) begin
|
|
||||||
return 0;
|
|
||||||
end root@localhost # #
|
|
||||||
select * from t1;
|
|
||||||
a
|
|
||||||
1000000000
|
|
||||||
use mysqltest1;
|
|
||||||
select * from t1;
|
|
||||||
a
|
|
||||||
1000000000
|
|
||||||
select * from mysql.proc where db='mysqltest1';
|
|
||||||
db name type specific_name language sql_data_access is_deterministic security_type param_list returns body definer created modified sql_mode comment
|
|
||||||
mysqltest1 fn1 FUNCTION fn1 SQL NO_SQL NO DEFINER int(11) begin
|
|
||||||
return unix_timestamp();
|
|
||||||
end root@localhost # #
|
|
||||||
mysqltest1 fn2 FUNCTION fn2 SQL NO_SQL NO DEFINER int(11) begin
|
|
||||||
return unix_timestamp();
|
|
||||||
end zedjzlcsjhd@localhost # #
|
|
||||||
mysqltest1 fn3 FUNCTION fn3 SQL READS_SQL_DATA NO DEFINER int(11) begin
|
|
||||||
return 0;
|
|
||||||
end root@localhost # #
|
|
||||||
delete from t2;
|
|
||||||
alter table t2 add unique (a);
|
|
||||||
drop function fn1;
|
|
||||||
create function fn1()
|
|
||||||
returns int
|
|
||||||
begin
|
|
||||||
insert into t2 values(20),(20);
|
|
||||||
return 10;
|
|
||||||
end|
|
|
||||||
select fn1();
|
|
||||||
ERROR 23000: Duplicate entry '20' for key 1
|
|
||||||
select * from t2;
|
|
||||||
a
|
|
||||||
20
|
|
||||||
select * from t2;
|
|
||||||
a
|
|
||||||
20
|
|
||||||
create trigger trg before insert on t1 for each row set new.a= 10;
|
|
||||||
ERROR 42000: Access denied; you need the SUPER privilege for this operation
|
|
||||||
delete from t1;
|
|
||||||
create trigger trg before insert on t1 for each row set new.a= 10;
|
|
||||||
insert into t1 values (1);
|
|
||||||
select * from t1;
|
|
||||||
a
|
|
||||||
10
|
|
||||||
select * from t1;
|
|
||||||
a
|
|
||||||
10
|
|
||||||
delete from t1;
|
|
||||||
drop trigger trg;
|
|
||||||
insert into t1 values (1);
|
|
||||||
select * from t1;
|
|
||||||
a
|
|
||||||
1
|
|
||||||
show binlog events in 'master-bin.000001' from 102;
|
|
||||||
Log_name Pos Event_type Server_id End_log_pos Info
|
|
||||||
master-bin.000001 # Query 1 # drop database if exists mysqltest1
|
|
||||||
master-bin.000001 # Query 1 # create database mysqltest1
|
|
||||||
master-bin.000001 # Query 1 # use `mysqltest1`; create table t1 (a varchar(100))
|
|
||||||
master-bin.000001 # Table_map 1 # mysql.proc
|
|
||||||
master-bin.000001 # Write_rows 1 #
|
|
||||||
master-bin.000001 # Table_map 1 # mysqltest1.t1
|
|
||||||
master-bin.000001 # Write_rows 1 #
|
|
||||||
master-bin.000001 # Table_map 1 # mysqltest1.t1
|
|
||||||
master-bin.000001 # Write_rows 1 #
|
|
||||||
master-bin.000001 # Query 1 # use `mysqltest1`; delete from t1
|
|
||||||
master-bin.000001 # Table_map 1 # mysql.proc
|
|
||||||
master-bin.000001 # Write_rows 1 #
|
|
||||||
master-bin.000001 # Table_map 1 # mysql.proc
|
|
||||||
master-bin.000001 # Update_rows 1 #
|
|
||||||
master-bin.000001 # Query 1 # use `mysqltest1`; drop table t1
|
|
||||||
master-bin.000001 # Query 1 # use `mysqltest1`; create table t1 (a int)
|
|
||||||
master-bin.000001 # Query 1 # use `mysqltest1`; create table t2 (a int)
|
|
||||||
master-bin.000001 # Table_map 1 # mysql.proc
|
|
||||||
master-bin.000001 # Write_rows 1 #
|
|
||||||
master-bin.000001 # Table_map 1 # mysql.user
|
|
||||||
master-bin.000001 # Write_rows 1 #
|
|
||||||
master-bin.000001 # Table_map 1 # mysql.db
|
|
||||||
master-bin.000001 # Write_rows 1 #
|
|
||||||
master-bin.000001 # Table_map 1 # mysql.tables_priv
|
|
||||||
master-bin.000001 # Write_rows 1 #
|
|
||||||
master-bin.000001 # Table_map 1 # mysql.tables_priv
|
|
||||||
master-bin.000001 # Write_rows 1 #
|
|
||||||
master-bin.000001 # Table_map 1 # mysql.proc
|
|
||||||
master-bin.000001 # Write_rows 1 #
|
|
||||||
master-bin.000001 # Table_map 1 # mysql.procs_priv
|
|
||||||
master-bin.000001 # Write_rows 1 #
|
|
||||||
master-bin.000001 # Table_map 1 # mysqltest1.t2
|
|
||||||
master-bin.000001 # Write_rows 1 #
|
|
||||||
master-bin.000001 # Table_map 1 # mysqltest1.t1
|
|
||||||
master-bin.000001 # Write_rows 1 #
|
|
||||||
master-bin.000001 # Table_map 1 # mysqltest1.t2
|
|
||||||
master-bin.000001 # Write_rows 1 #
|
|
||||||
master-bin.000001 # Table_map 1 # mysql.proc
|
|
||||||
master-bin.000001 # Update_rows 1 #
|
|
||||||
master-bin.000001 # Table_map 1 # mysqltest1.t2
|
|
||||||
master-bin.000001 # Write_rows 1 #
|
|
||||||
master-bin.000001 # Table_map 1 # mysqltest1.t1
|
|
||||||
master-bin.000001 # Write_rows 1 #
|
|
||||||
master-bin.000001 # Query 1 # use `mysqltest1`; delete from t2
|
|
||||||
master-bin.000001 # Query 1 # use `mysqltest1`; alter table t2 add unique (a)
|
|
||||||
master-bin.000001 # Table_map 1 # mysql.procs_priv
|
|
||||||
master-bin.000001 # Delete_rows 1 #
|
|
||||||
master-bin.000001 # Table_map 1 # mysql.proc
|
|
||||||
master-bin.000001 # Delete_rows 1 #
|
|
||||||
master-bin.000001 # Table_map 1 # mysql.proc
|
|
||||||
master-bin.000001 # Write_rows 1 #
|
|
||||||
master-bin.000001 # Table_map 1 # mysqltest1.t2
|
|
||||||
master-bin.000001 # Write_rows 1 #
|
|
||||||
master-bin.000001 # Table_map 1 # mysql.proc
|
|
||||||
master-bin.000001 # Delete_rows 1 #
|
|
||||||
master-bin.000001 # Table_map 1 # mysql.proc
|
|
||||||
master-bin.000001 # Delete_rows 1 #
|
|
||||||
master-bin.000001 # Table_map 1 # mysql.proc
|
|
||||||
master-bin.000001 # Delete_rows 1 #
|
|
||||||
master-bin.000001 # Table_map 1 # mysql.proc
|
|
||||||
master-bin.000001 # Delete_rows 1 #
|
|
||||||
master-bin.000001 # Table_map 1 # mysql.proc
|
|
||||||
master-bin.000001 # Write_rows 1 #
|
|
||||||
master-bin.000001 # Table_map 1 # mysqltest1.t2
|
|
||||||
master-bin.000001 # Delete_rows 1 #
|
|
||||||
master-bin.000001 # Table_map 1 # mysqltest1.t1
|
|
||||||
master-bin.000001 # Delete_rows 1 #
|
|
||||||
master-bin.000001 # Table_map 1 # mysqltest1.t1
|
|
||||||
master-bin.000001 # Write_rows 1 #
|
|
||||||
master-bin.000001 # Table_map 1 # mysqltest1.t1
|
|
||||||
master-bin.000001 # Write_rows 1 #
|
|
||||||
master-bin.000001 # Table_map 1 # mysqltest1.t2
|
|
||||||
master-bin.000001 # Write_rows 1 #
|
|
||||||
master-bin.000001 # Table_map 1 # mysql.proc
|
|
||||||
master-bin.000001 # Delete_rows 1 #
|
|
||||||
master-bin.000001 # Table_map 1 # mysql.proc
|
|
||||||
master-bin.000001 # Write_rows 1 #
|
|
||||||
master-bin.000001 # Table_map 1 # mysql.proc
|
|
||||||
master-bin.000001 # Update_rows 1 #
|
|
||||||
master-bin.000001 # Query 1 # use `mysqltest1`; delete from t1
|
|
||||||
master-bin.000001 # Table_map 1 # mysqltest1.t1
|
|
||||||
master-bin.000001 # Write_rows 1 #
|
|
||||||
master-bin.000001 # Table_map 1 # mysql.proc
|
|
||||||
master-bin.000001 # Write_rows 1 #
|
|
||||||
master-bin.000001 # Table_map 1 # mysql.procs_priv
|
|
||||||
master-bin.000001 # Write_rows 1 #
|
|
||||||
master-bin.000001 # Table_map 1 # mysql.proc
|
|
||||||
master-bin.000001 # Write_rows 1 #
|
|
||||||
master-bin.000001 # Query 1 # use `mysqltest1`; delete from t2
|
|
||||||
master-bin.000001 # Query 1 # use `mysqltest1`; alter table t2 add unique (a)
|
|
||||||
master-bin.000001 # Table_map 1 # mysql.proc
|
|
||||||
master-bin.000001 # Delete_rows 1 #
|
|
||||||
master-bin.000001 # Table_map 1 # mysql.proc
|
|
||||||
master-bin.000001 # Write_rows 1 #
|
|
||||||
master-bin.000001 # Table_map 1 # mysqltest1.t2
|
|
||||||
master-bin.000001 # Write_rows 1 #
|
|
||||||
master-bin.000001 # Query 1 # use `mysqltest1`; delete from t1
|
|
||||||
master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` trigger trg before insert on t1 for each row set new.a= 10
|
|
||||||
master-bin.000001 # Table_map 1 # mysqltest1.t1
|
|
||||||
master-bin.000001 # Write_rows 1 #
|
|
||||||
master-bin.000001 # Query 1 # use `mysqltest1`; delete from t1
|
|
||||||
master-bin.000001 # Query 1 # use `mysqltest1`; drop trigger trg
|
|
||||||
master-bin.000001 # Table_map 1 # mysqltest1.t1
|
|
||||||
master-bin.000001 # Write_rows 1 #
|
|
||||||
select * from t1;
|
|
||||||
a
|
|
||||||
1
|
|
||||||
create procedure foo()
|
|
||||||
not deterministic
|
|
||||||
reads sql data
|
|
||||||
select * from t1;
|
|
||||||
call foo();
|
|
||||||
a
|
|
||||||
1
|
|
||||||
drop procedure foo;
|
|
||||||
drop function fn1;
|
|
||||||
drop database mysqltest1;
|
|
||||||
drop user "zedjzlcsjhd"@127.0.0.1;
|
|
|
@ -18,8 +18,6 @@ group_min_max : Bug #15448
|
||||||
innodb_concurrent : Results are not deterministic, Elliot will fix (BUG#3300)
|
innodb_concurrent : Results are not deterministic, Elliot will fix (BUG#3300)
|
||||||
subselect : Bug#15706
|
subselect : Bug#15706
|
||||||
type_time : Bug#15805
|
type_time : Bug#15805
|
||||||
#rpl000002 : Bug#15920 Temporary tables are not binlogged in SBR
|
|
||||||
#ps_7ndb : Bug#15923 Core dump in RBR mode when executing test suite
|
|
||||||
rpl_ddl : Bug#15963 SBR does not show "Definer" correctly
|
rpl_ddl : Bug#15963 SBR does not show "Definer" correctly
|
||||||
mysqlslap : Bug#16167
|
|
||||||
events : Affects flush test case. A table lock not released somewhere
|
events : Affects flush test case. A table lock not released somewhere
|
||||||
|
rpl_sp : Bug #16456
|
||||||
|
|
|
@ -372,3 +372,61 @@ desc t1;
|
||||||
drop table t1;
|
drop table t1;
|
||||||
|
|
||||||
# End of 4.1 tests
|
# End of 4.1 tests
|
||||||
|
|
||||||
|
#
|
||||||
|
# WL#1563 - Modify MySQL to support on-line CREATE/DROP INDEX
|
||||||
|
# To test if this really works, you need to run with --debug
|
||||||
|
# and check the trace file.
|
||||||
|
#
|
||||||
|
# Create a table with named and unnamed indexes.
|
||||||
|
create table t1 (
|
||||||
|
c1 int,
|
||||||
|
c2 char(12),
|
||||||
|
c3 varchar(123),
|
||||||
|
c4 timestamp,
|
||||||
|
index (c1),
|
||||||
|
index i1 (c1),
|
||||||
|
index i2 (c2),
|
||||||
|
index i3 (c3),
|
||||||
|
unique i4 (c4),
|
||||||
|
index i5 (c1, c2, c3, c4),
|
||||||
|
primary key (c2, c3),
|
||||||
|
index (c2, c4));
|
||||||
|
show create table t1;
|
||||||
|
# Some simple tests.
|
||||||
|
alter table t1 drop index c1;
|
||||||
|
alter table t1 add index (c1);
|
||||||
|
# This creates index 'c1_2'.
|
||||||
|
alter table t1 add index (c1);
|
||||||
|
alter table t1 drop index i3;
|
||||||
|
alter table t1 add index i3 (c3);
|
||||||
|
# Two indexes at the same time.
|
||||||
|
alter table t1 drop index i2, drop index i4;
|
||||||
|
alter table t1 add index i2 (c2), add index i4 (c4);
|
||||||
|
# Three indexes, one of them reversely.
|
||||||
|
alter table t1 drop index i2, drop index i4, add index i6 (c2, c4);
|
||||||
|
alter table t1 add index i2 (c2), add index i4 (c4), drop index i6;
|
||||||
|
# include an unique index.
|
||||||
|
alter table t1 drop index i2, drop index i4, add unique i4 (c4);
|
||||||
|
alter table t1 add index i2 (c2), drop index i4, add index i4 (c4);
|
||||||
|
# Modify an index by changing its definition.
|
||||||
|
alter table t1 drop index c2, add index (c2(4),c3(7));
|
||||||
|
# Change nothing. The new key definition is the same as the old one.
|
||||||
|
alter table t1 drop index c2, add index (c2(4),c3(7));
|
||||||
|
# Test primary key handling.
|
||||||
|
alter table t1 add primary key (c1, c2), drop primary key;
|
||||||
|
alter table t1 drop primary key;
|
||||||
|
# Drop is checked first. Primary key must exist.
|
||||||
|
--error 1091
|
||||||
|
alter table t1 add primary key (c1, c2), drop primary key;
|
||||||
|
show create table t1;
|
||||||
|
# Insert non-unique values.
|
||||||
|
insert into t1 values(1, 'a', 'a', NULL);
|
||||||
|
insert into t1 values(1, 'b', 'b', NULL);
|
||||||
|
# Drop some indexes for new adds.
|
||||||
|
alter table t1 drop index i3, drop index i2, drop index i1;
|
||||||
|
# Add indexes, one is unique on non-unique values.
|
||||||
|
--error 1062
|
||||||
|
alter table t1 add index i3 (c3), add index i2 (c2), add unique index i1 (c1);
|
||||||
|
drop table t1;
|
||||||
|
|
||||||
|
|
|
@ -4,6 +4,8 @@
|
||||||
|
|
||||||
--exec $MYSQL_SLAP --silent --concurrency=5 --iterations=20 --number-int-cols=2 --number-char-cols=3 --auto-generate-sql
|
--exec $MYSQL_SLAP --silent --concurrency=5 --iterations=20 --number-int-cols=2 --number-char-cols=3 --auto-generate-sql
|
||||||
|
|
||||||
|
--exec $MYSQL_SLAP --silent --concurrency=5 --iterations=20 --number-int-cols=2 --number-char-cols=3 --auto-generate-sql --use-threads
|
||||||
|
|
||||||
--exec $MYSQL_SLAP --only-print --iterations=20 --query="select * from t1" --create="CREATE TABLE t1 (id int, name varchar(64)); INSERT INTO t1 VALUES (1, 'This is a test')" --delimiter=";"
|
--exec $MYSQL_SLAP --only-print --iterations=20 --query="select * from t1" --create="CREATE TABLE t1 (id int, name varchar(64)); INSERT INTO t1 VALUES (1, 'This is a test')" --delimiter=";"
|
||||||
--exec $MYSQL_SLAP --silent --concurrency=5 --iterations=20 --query="select * from t1" --create="CREATE TABLE t1 (id int, name varchar(64)); INSERT INTO t1 VALUES (1, 'This is a test')" --delimiter=";"
|
--exec $MYSQL_SLAP --silent --concurrency=5 --iterations=20 --query="select * from t1" --create="CREATE TABLE t1 (id int, name varchar(64)); INSERT INTO t1 VALUES (1, 'This is a test')" --delimiter=";"
|
||||||
|
|
||||||
|
|
|
@ -1037,7 +1037,7 @@ drop table t1;
|
||||||
--error 1
|
--error 1
|
||||||
--exec $MYSQL_TEST --record -x $MYSQL_TEST_DIR/var/tmp/bug11731.sql -R $MYSQL_TEST_DIR/var/tmp/bug11731.out 2>&1
|
--exec $MYSQL_TEST --record -x $MYSQL_TEST_DIR/var/tmp/bug11731.sql -R $MYSQL_TEST_DIR/var/tmp/bug11731.out 2>&1
|
||||||
# The .out file should be non existent
|
# The .out file should be non existent
|
||||||
--exec test ! -e $MYSQL_TEST_DIR/var/tmp/bug11731.out
|
--exec test ! -s $MYSQL_TEST_DIR/var/tmp/bug11731.out
|
||||||
drop table t1;
|
drop table t1;
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1 +0,0 @@
|
||||||
--log_bin_trust_routine_creators=0
|
|
|
@ -1,331 +0,0 @@
|
||||||
# row-based and statement have expected binlog difference in result files
|
|
||||||
-- source include/have_binlog_format_row.inc
|
|
||||||
|
|
||||||
# Test of replication of stored procedures in row-based replication.
|
|
||||||
# Initially copied from the statement-based version rpl_stm_sp.test.
|
|
||||||
|
|
||||||
# Note that in the .opt files we still use the old variable name
|
|
||||||
# log-bin-trust-routine-creators so that this test checks that it's
|
|
||||||
# still accepted (this test also checks that the new name is
|
|
||||||
# accepted). The old name could be removed in 5.1 or 6.0.
|
|
||||||
|
|
||||||
source include/master-slave.inc;
|
|
||||||
|
|
||||||
# we need a db != test, where we don't have automatic grants
|
|
||||||
--disable_warnings
|
|
||||||
drop database if exists mysqltest1;
|
|
||||||
--enable_warnings
|
|
||||||
create database mysqltest1;
|
|
||||||
use mysqltest1;
|
|
||||||
create table t1 (a varchar(100));
|
|
||||||
sync_slave_with_master;
|
|
||||||
use mysqltest1;
|
|
||||||
|
|
||||||
# ********************** PART 1 : STORED PROCEDURES ***************
|
|
||||||
|
|
||||||
# Does the same proc as on master get inserted into mysql.proc ?
|
|
||||||
# (all same properties)
|
|
||||||
|
|
||||||
connection master;
|
|
||||||
|
|
||||||
delimiter |;
|
|
||||||
create procedure foo()
|
|
||||||
begin
|
|
||||||
declare b int;
|
|
||||||
set b = 8;
|
|
||||||
insert into t1 values (b);
|
|
||||||
insert into t1 values (unix_timestamp());
|
|
||||||
end|
|
|
||||||
delimiter ;|
|
|
||||||
|
|
||||||
# we replace columns having times
|
|
||||||
# (even with fixed timestamp displayed time may changed based on TZ)
|
|
||||||
--replace_result localhost.localdomain localhost 127.0.0.1 localhost
|
|
||||||
--replace_column 13 # 14 #
|
|
||||||
select * from mysql.proc where name='foo' and db='mysqltest1';
|
|
||||||
sync_slave_with_master;
|
|
||||||
--replace_result localhost.localdomain localhost 127.0.0.1 localhost
|
|
||||||
--replace_column 13 # 14 #
|
|
||||||
select * from mysql.proc where name='foo' and db='mysqltest1';
|
|
||||||
|
|
||||||
connection master;
|
|
||||||
# see if timestamp used in SP on slave is same as on master
|
|
||||||
set timestamp=1000000000;
|
|
||||||
call foo();
|
|
||||||
select * from t1;
|
|
||||||
sync_slave_with_master;
|
|
||||||
select * from t1;
|
|
||||||
|
|
||||||
# Now a SP which is not updating tables
|
|
||||||
|
|
||||||
connection master;
|
|
||||||
delete from t1;
|
|
||||||
create procedure foo2()
|
|
||||||
not deterministic
|
|
||||||
select * from mysqltest1.t1;
|
|
||||||
call foo2();
|
|
||||||
|
|
||||||
alter procedure foo2 contains sql;
|
|
||||||
|
|
||||||
# SP with definer's right
|
|
||||||
|
|
||||||
drop table t1;
|
|
||||||
create table t1 (a int);
|
|
||||||
create table t2 (a int);
|
|
||||||
|
|
||||||
create procedure foo3()
|
|
||||||
deterministic
|
|
||||||
insert into t1 values (15);
|
|
||||||
|
|
||||||
# let's create a non-privileged user
|
|
||||||
grant CREATE ROUTINE, EXECUTE on mysqltest1.* to "zedjzlcsjhd"@127.0.0.1;
|
|
||||||
grant SELECT on mysqltest1.t1 to "zedjzlcsjhd"@127.0.0.1;
|
|
||||||
grant SELECT, INSERT on mysqltest1.t2 to "zedjzlcsjhd"@127.0.0.1;
|
|
||||||
|
|
||||||
# ToDo: BUG#14931: There is a race between the last grant binlogging, and
|
|
||||||
# the binlogging in the new connection made below, causing sporadic test
|
|
||||||
# failures due to switched statement order in binlog. To fix this we do
|
|
||||||
# SELECT 1 in the first connection before starting the second, ensuring
|
|
||||||
# that binlogging is done in the expected order.
|
|
||||||
# Please remove this SELECT 1 when BUG#14931 is fixed.
|
|
||||||
SELECT 1;
|
|
||||||
|
|
||||||
connect (con1,127.0.0.1,zedjzlcsjhd,,mysqltest1,$MASTER_MYPORT,);
|
|
||||||
connection con1;
|
|
||||||
|
|
||||||
# this routine will fail in the second INSERT because of privileges
|
|
||||||
delimiter |;
|
|
||||||
create procedure foo4()
|
|
||||||
deterministic
|
|
||||||
begin
|
|
||||||
insert into t2 values(3);
|
|
||||||
insert into t1 values (5);
|
|
||||||
end|
|
|
||||||
|
|
||||||
delimiter ;|
|
|
||||||
|
|
||||||
# I add ,0 so that it does not print the error in the test output,
|
|
||||||
# because this error is hostname-dependent
|
|
||||||
--error 1142,0
|
|
||||||
call foo4(); # invoker has no INSERT grant on table t1 => failure
|
|
||||||
|
|
||||||
connection master;
|
|
||||||
call foo3(); # success (definer == root)
|
|
||||||
show warnings;
|
|
||||||
|
|
||||||
--error 1142,0
|
|
||||||
call foo4(); # definer's rights => failure
|
|
||||||
|
|
||||||
# we test replication of ALTER PROCEDURE
|
|
||||||
alter procedure foo4 sql security invoker;
|
|
||||||
call foo4(); # invoker's rights => success
|
|
||||||
show warnings;
|
|
||||||
|
|
||||||
# Note that half-failed procedure calls are ok with binlogging;
|
|
||||||
# if we compare t2 on master and slave we see they are identical:
|
|
||||||
|
|
||||||
select * from t1;
|
|
||||||
select * from t2;
|
|
||||||
sync_slave_with_master;
|
|
||||||
select * from t1;
|
|
||||||
select * from t2;
|
|
||||||
|
|
||||||
# Let's check another failing-in-the-middle procedure
|
|
||||||
connection master;
|
|
||||||
delete from t2;
|
|
||||||
alter table t2 add unique (a);
|
|
||||||
|
|
||||||
drop procedure foo4;
|
|
||||||
delimiter |;
|
|
||||||
create procedure foo4()
|
|
||||||
deterministic
|
|
||||||
begin
|
|
||||||
insert into t2 values(20),(20);
|
|
||||||
end|
|
|
||||||
|
|
||||||
delimiter ;|
|
|
||||||
|
|
||||||
--error 1062
|
|
||||||
call foo4();
|
|
||||||
show warnings;
|
|
||||||
|
|
||||||
select * from t2;
|
|
||||||
sync_slave_with_master;
|
|
||||||
# check that this failed-in-the-middle replicated right:
|
|
||||||
select * from t2;
|
|
||||||
|
|
||||||
# Test of DROP PROCEDURE
|
|
||||||
|
|
||||||
--replace_result localhost.localdomain localhost 127.0.0.1 localhost
|
|
||||||
--replace_column 13 # 14 #
|
|
||||||
select * from mysql.proc where name="foo4" and db='mysqltest1';
|
|
||||||
connection master;
|
|
||||||
drop procedure foo4;
|
|
||||||
select * from mysql.proc where name="foo4" and db='mysqltest1';
|
|
||||||
sync_slave_with_master;
|
|
||||||
select * from mysql.proc where name="foo4" and db='mysqltest1';
|
|
||||||
|
|
||||||
# Test of a procedure and function containing UUID() is done in
|
|
||||||
# rpl_row_UUID.
|
|
||||||
|
|
||||||
|
|
||||||
# ********************** PART 2 : FUNCTIONS ***************
|
|
||||||
|
|
||||||
connection master;
|
|
||||||
drop procedure foo;
|
|
||||||
drop procedure foo2;
|
|
||||||
drop procedure foo3;
|
|
||||||
|
|
||||||
delimiter |;
|
|
||||||
create function fn1(x int)
|
|
||||||
returns int
|
|
||||||
deterministic
|
|
||||||
begin
|
|
||||||
insert into t1 values (x);
|
|
||||||
return x+2;
|
|
||||||
end|
|
|
||||||
|
|
||||||
delimiter ;|
|
|
||||||
delete t1,t2 from t1,t2;
|
|
||||||
select fn1(20);
|
|
||||||
insert into t2 values(fn1(21));
|
|
||||||
select * from t1;
|
|
||||||
select * from t2;
|
|
||||||
sync_slave_with_master;
|
|
||||||
select * from t1;
|
|
||||||
select * from t2;
|
|
||||||
|
|
||||||
connection master;
|
|
||||||
delimiter |;
|
|
||||||
|
|
||||||
drop function fn1;
|
|
||||||
|
|
||||||
create function fn1()
|
|
||||||
returns int
|
|
||||||
begin
|
|
||||||
return unix_timestamp();
|
|
||||||
end|
|
|
||||||
|
|
||||||
delimiter ;|
|
|
||||||
# Just to test ALTER FUNCTION
|
|
||||||
alter function fn1 no sql;
|
|
||||||
delete from t1;
|
|
||||||
set timestamp=1000000000;
|
|
||||||
insert into t1 values(fn1());
|
|
||||||
|
|
||||||
connection con1;
|
|
||||||
|
|
||||||
delimiter |;
|
|
||||||
create function fn2()
|
|
||||||
returns int
|
|
||||||
no sql
|
|
||||||
begin
|
|
||||||
return unix_timestamp();
|
|
||||||
end|
|
|
||||||
delimiter ;|
|
|
||||||
|
|
||||||
connection master;
|
|
||||||
|
|
||||||
delimiter |;
|
|
||||||
create function fn3()
|
|
||||||
returns int
|
|
||||||
not deterministic
|
|
||||||
reads sql data
|
|
||||||
begin
|
|
||||||
return 0;
|
|
||||||
end|
|
|
||||||
delimiter ;|
|
|
||||||
|
|
||||||
select fn3();
|
|
||||||
|
|
||||||
--replace_result localhost.localdomain localhost 127.0.0.1 localhost
|
|
||||||
--replace_column 13 # 14 #
|
|
||||||
select * from mysql.proc where db='mysqltest1';
|
|
||||||
select * from t1;
|
|
||||||
|
|
||||||
sync_slave_with_master;
|
|
||||||
use mysqltest1;
|
|
||||||
select * from t1;
|
|
||||||
--replace_result localhost.localdomain localhost 127.0.0.1 localhost
|
|
||||||
--replace_column 13 # 14 #
|
|
||||||
select * from mysql.proc where db='mysqltest1';
|
|
||||||
|
|
||||||
# Let's check a failing-in-the-middle function
|
|
||||||
connection master;
|
|
||||||
delete from t2;
|
|
||||||
alter table t2 add unique (a);
|
|
||||||
|
|
||||||
drop function fn1;
|
|
||||||
|
|
||||||
delimiter |;
|
|
||||||
create function fn1()
|
|
||||||
returns int
|
|
||||||
begin
|
|
||||||
insert into t2 values(20),(20);
|
|
||||||
return 10;
|
|
||||||
end|
|
|
||||||
|
|
||||||
delimiter ;|
|
|
||||||
|
|
||||||
--error 1062
|
|
||||||
select fn1();
|
|
||||||
|
|
||||||
select * from t2;
|
|
||||||
sync_slave_with_master;
|
|
||||||
|
|
||||||
# check that this failed-in-the-middle replicated right:
|
|
||||||
select * from t2;
|
|
||||||
|
|
||||||
# ********************** PART 3 : TRIGGERS ***************
|
|
||||||
|
|
||||||
connection con1;
|
|
||||||
--error 1227
|
|
||||||
create trigger trg before insert on t1 for each row set new.a= 10;
|
|
||||||
|
|
||||||
connection master;
|
|
||||||
delete from t1;
|
|
||||||
# TODO: when triggers can contain an update, test that this update
|
|
||||||
# does not go into binlog.
|
|
||||||
# I'm not setting user vars in the trigger, because replication of user vars
|
|
||||||
# would take care of propagating the user var's value to slave, so even if
|
|
||||||
# the trigger was not executed on slave it would not be discovered.
|
|
||||||
create trigger trg before insert on t1 for each row set new.a= 10;
|
|
||||||
insert into t1 values (1);
|
|
||||||
select * from t1;
|
|
||||||
sync_slave_with_master;
|
|
||||||
select * from t1;
|
|
||||||
|
|
||||||
connection master;
|
|
||||||
delete from t1;
|
|
||||||
drop trigger trg;
|
|
||||||
insert into t1 values (1);
|
|
||||||
select * from t1;
|
|
||||||
--replace_column 2 # 5 #
|
|
||||||
show binlog events in 'master-bin.000001' from 102;
|
|
||||||
sync_slave_with_master;
|
|
||||||
select * from t1;
|
|
||||||
|
|
||||||
|
|
||||||
#
|
|
||||||
# Test for bug #13969 "Routines which are replicated from master can't be
|
|
||||||
# executed on slave".
|
|
||||||
#
|
|
||||||
connection master;
|
|
||||||
create procedure foo()
|
|
||||||
not deterministic
|
|
||||||
reads sql data
|
|
||||||
select * from t1;
|
|
||||||
sync_slave_with_master;
|
|
||||||
# This should not fail
|
|
||||||
call foo();
|
|
||||||
connection master;
|
|
||||||
drop procedure foo;
|
|
||||||
sync_slave_with_master;
|
|
||||||
|
|
||||||
|
|
||||||
# Clean up
|
|
||||||
connection master;
|
|
||||||
drop function fn1;
|
|
||||||
drop database mysqltest1;
|
|
||||||
drop user "zedjzlcsjhd"@127.0.0.1;
|
|
||||||
sync_slave_with_master;
|
|
|
@ -1,5 +1,4 @@
|
||||||
# row-based and statement have expected binlog difference in result files
|
# row-based and statement have expected binlog difference in result files
|
||||||
-- source include/have_binlog_format_statement.inc
|
|
||||||
|
|
||||||
# Test of replication of stored procedures (WL#2146 for MySQL 5.0)
|
# Test of replication of stored procedures (WL#2146 for MySQL 5.0)
|
||||||
# Modified by WL#2971.
|
# Modified by WL#2971.
|
|
@ -1,7 +1,21 @@
|
||||||
-- source include/have_binlog_format_statement.inc
|
################################
|
||||||
|
# Change Author: JBM
|
||||||
|
# Change Date: 2006-01-12
|
||||||
|
# Change: Added back have stm binlog
|
||||||
|
# and added requirments comments
|
||||||
|
################################
|
||||||
# test case to make slave thread get ahead by 22 bytes
|
# test case to make slave thread get ahead by 22 bytes
|
||||||
|
################################
|
||||||
|
#REQUIREMENT: If there is a faked slave duplicate key insert
|
||||||
|
#error and the slave is restarted, the replication should
|
||||||
|
#proceed in a correct way.
|
||||||
|
################################
|
||||||
|
#REQUIREMENT: If there is a faked slave non-existing record
|
||||||
|
#delete error and the slave is restarted, then the replication
|
||||||
|
#should proceed in a correct way.
|
||||||
|
#################################
|
||||||
|
|
||||||
|
-- source include/have_binlog_format_statement.inc
|
||||||
-- source include/master-slave.inc
|
-- source include/master-slave.inc
|
||||||
|
|
||||||
# first, cause a duplicate key problem on the slave
|
# first, cause a duplicate key problem on the slave
|
||||||
|
|
|
@ -1 +0,0 @@
|
||||||
--log_bin_trust_routine_creators=0
|
|
|
@ -136,7 +136,7 @@ public:
|
||||||
init_ends(THD *thd, Item *ends);
|
init_ends(THD *thd, Item *ends);
|
||||||
|
|
||||||
void
|
void
|
||||||
event_timed::init_body(THD *thd);
|
init_body(THD *thd);
|
||||||
|
|
||||||
void
|
void
|
||||||
init_comment(THD *thd, LEX_STRING *set_comment);
|
init_comment(THD *thd, LEX_STRING *set_comment);
|
||||||
|
|
|
@ -184,6 +184,13 @@ event_executor_main(void *arg)
|
||||||
// needs to call my_thread_init(), otherwise we get a coredump in DBUG_ stuff
|
// needs to call my_thread_init(), otherwise we get a coredump in DBUG_ stuff
|
||||||
my_thread_init();
|
my_thread_init();
|
||||||
|
|
||||||
|
if (sizeof(my_time_t) != sizeof(time_t))
|
||||||
|
{
|
||||||
|
sql_print_error("sizeof(my_time_t) != sizeof(time_t) ."
|
||||||
|
"The scheduler will not work correctly. Stopping.");
|
||||||
|
goto err_no_thd;
|
||||||
|
}
|
||||||
|
|
||||||
//TODO Andrey: Check for NULL
|
//TODO Andrey: Check for NULL
|
||||||
if (!(thd = new THD)) // note that contructor of THD uses DBUG_ !
|
if (!(thd = new THD)) // note that contructor of THD uses DBUG_ !
|
||||||
{
|
{
|
||||||
|
@ -275,7 +282,7 @@ event_executor_main(void *arg)
|
||||||
}
|
}
|
||||||
|
|
||||||
DBUG_PRINT("evex main thread",("computing time to sleep till next exec"));
|
DBUG_PRINT("evex main thread",("computing time to sleep till next exec"));
|
||||||
time(&now);
|
time((time_t *)&now);
|
||||||
my_tz_UTC->gmt_sec_to_TIME(&time_now, now);
|
my_tz_UTC->gmt_sec_to_TIME(&time_now, now);
|
||||||
t2sleep= evex_time_diff(&et->execute_at, &time_now);
|
t2sleep= evex_time_diff(&et->execute_at, &time_now);
|
||||||
VOID(pthread_mutex_unlock(&LOCK_event_arrays));
|
VOID(pthread_mutex_unlock(&LOCK_event_arrays));
|
||||||
|
|
|
@ -537,7 +537,7 @@ event_timed::compute_next_execution_time()
|
||||||
}
|
}
|
||||||
goto ret;
|
goto ret;
|
||||||
}
|
}
|
||||||
time(&now);
|
time((time_t *)&now);
|
||||||
my_tz_UTC->gmt_sec_to_TIME(&time_now, now);
|
my_tz_UTC->gmt_sec_to_TIME(&time_now, now);
|
||||||
/*
|
/*
|
||||||
sql_print_information("[%s.%s]", dbname.str, name.str);
|
sql_print_information("[%s.%s]", dbname.str, name.str);
|
||||||
|
@ -703,7 +703,7 @@ event_timed::mark_last_executed()
|
||||||
TIME time_now;
|
TIME time_now;
|
||||||
my_time_t now;
|
my_time_t now;
|
||||||
|
|
||||||
time(&now);
|
time((time_t *)&now);
|
||||||
my_tz_UTC->gmt_sec_to_TIME(&time_now, now);
|
my_tz_UTC->gmt_sec_to_TIME(&time_now, now);
|
||||||
|
|
||||||
last_executed= time_now; // was execute_at
|
last_executed= time_now; // was execute_at
|
||||||
|
|
|
@ -1851,7 +1851,12 @@ void handler::print_error(int error, myf errflag)
|
||||||
str.length(max_length-4);
|
str.length(max_length-4);
|
||||||
str.append(STRING_WITH_LEN("..."));
|
str.append(STRING_WITH_LEN("..."));
|
||||||
}
|
}
|
||||||
|
#ifdef XXX_TO_BE_DONE_BY_A_FOLLOWUP_OF_WL1563
|
||||||
|
my_printf_error(ER_DUP_ENTRY, "Duplicate entry '%s' for key '%s'",
|
||||||
|
MYF(0), str.c_ptr(), table->key_info[key_nr].name);
|
||||||
|
#else
|
||||||
my_error(ER_DUP_ENTRY, MYF(0), str.c_ptr(), key_nr+1);
|
my_error(ER_DUP_ENTRY, MYF(0), str.c_ptr(), key_nr+1);
|
||||||
|
#endif
|
||||||
DBUG_VOID_RETURN;
|
DBUG_VOID_RETURN;
|
||||||
}
|
}
|
||||||
textno=ER_DUP_KEY;
|
textno=ER_DUP_KEY;
|
||||||
|
@ -1936,6 +1941,15 @@ void handler::print_error(int error, myf errflag)
|
||||||
case HA_ERR_RBR_LOGGING_FAILED:
|
case HA_ERR_RBR_LOGGING_FAILED:
|
||||||
textno= ER_BINLOG_ROW_LOGGING_FAILED;
|
textno= ER_BINLOG_ROW_LOGGING_FAILED;
|
||||||
break;
|
break;
|
||||||
|
case HA_ERR_DROP_INDEX_FK:
|
||||||
|
{
|
||||||
|
const char *ptr= "???";
|
||||||
|
uint key_nr= get_dup_key(error);
|
||||||
|
if ((int) key_nr >= 0)
|
||||||
|
ptr= table->key_info[key_nr].name;
|
||||||
|
my_error(ER_DROP_INDEX_FK, MYF(0), ptr);
|
||||||
|
DBUG_VOID_RETURN;
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
{
|
{
|
||||||
/* The error was "unknown" to this function.
|
/* The error was "unknown" to this function.
|
||||||
|
@ -1984,7 +1998,7 @@ uint handler::get_dup_key(int error)
|
||||||
DBUG_ENTER("handler::get_dup_key");
|
DBUG_ENTER("handler::get_dup_key");
|
||||||
table->file->errkey = (uint) -1;
|
table->file->errkey = (uint) -1;
|
||||||
if (error == HA_ERR_FOUND_DUPP_KEY || error == HA_ERR_FOUND_DUPP_UNIQUE ||
|
if (error == HA_ERR_FOUND_DUPP_KEY || error == HA_ERR_FOUND_DUPP_UNIQUE ||
|
||||||
error == HA_ERR_NULL_IN_SPATIAL)
|
error == HA_ERR_NULL_IN_SPATIAL || error == HA_ERR_DROP_INDEX_FK)
|
||||||
info(HA_STATUS_ERRKEY | HA_STATUS_NO_LOCK);
|
info(HA_STATUS_ERRKEY | HA_STATUS_NO_LOCK);
|
||||||
DBUG_RETURN(table->file->errkey);
|
DBUG_RETURN(table->file->errkey);
|
||||||
}
|
}
|
||||||
|
|
|
@ -110,8 +110,29 @@
|
||||||
#define HA_KEYREAD_ONLY 64 /* Support HA_EXTRA_KEYREAD */
|
#define HA_KEYREAD_ONLY 64 /* Support HA_EXTRA_KEYREAD */
|
||||||
|
|
||||||
/* bits in alter_table_flags */
|
/* bits in alter_table_flags */
|
||||||
#define HA_ONLINE_ADD_EMPTY_PARTITION 1
|
#define HA_ONLINE_ADD_EMPTY_PARTITION 0x00000001
|
||||||
#define HA_ONLINE_DROP_PARTITION 2
|
#define HA_ONLINE_DROP_PARTITION 0x00000002
|
||||||
|
/*
|
||||||
|
These bits are set if different kinds of indexes can be created
|
||||||
|
off-line without re-create of the table (but with a table lock).
|
||||||
|
*/
|
||||||
|
#define HA_ONLINE_ADD_INDEX_NO_WRITES 0x00000004 /*add index w/lock*/
|
||||||
|
#define HA_ONLINE_DROP_INDEX_NO_WRITES 0x00000008 /*drop index w/lock*/
|
||||||
|
#define HA_ONLINE_ADD_UNIQUE_INDEX_NO_WRITES 0x00000010 /*add unique w/lock*/
|
||||||
|
#define HA_ONLINE_DROP_UNIQUE_INDEX_NO_WRITES 0x00000020 /*drop uniq. w/lock*/
|
||||||
|
#define HA_ONLINE_ADD_PK_INDEX_NO_WRITES 0x00000040 /*add prim. w/lock*/
|
||||||
|
#define HA_ONLINE_DROP_PK_INDEX_NO_WRITES 0x00000080 /*drop prim. w/lock*/
|
||||||
|
/*
|
||||||
|
These are set if different kinds of indexes can be created on-line
|
||||||
|
(without a table lock). If a handler is capable of one or more of
|
||||||
|
these, it should also set the corresponding *_NO_WRITES bit(s).
|
||||||
|
*/
|
||||||
|
#define HA_ONLINE_ADD_INDEX 0x00000100 /*add index online*/
|
||||||
|
#define HA_ONLINE_DROP_INDEX 0x00000200 /*drop index online*/
|
||||||
|
#define HA_ONLINE_ADD_UNIQUE_INDEX 0x00000400 /*add unique online*/
|
||||||
|
#define HA_ONLINE_DROP_UNIQUE_INDEX 0x00000800 /*drop uniq. online*/
|
||||||
|
#define HA_ONLINE_ADD_PK_INDEX 0x00001000 /*add prim. online*/
|
||||||
|
#define HA_ONLINE_DROP_PK_INDEX 0x00002000 /*drop prim. online*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Index scan will not return records in rowid order. Not guaranteed to be
|
Index scan will not return records in rowid order. Not guaranteed to be
|
||||||
|
@ -134,16 +155,6 @@
|
||||||
*/
|
*/
|
||||||
#define MAX_HA 15
|
#define MAX_HA 15
|
||||||
|
|
||||||
/*
|
|
||||||
Bits in index_ddl_flags(KEY *wanted_index)
|
|
||||||
for what ddl you can do with index
|
|
||||||
If none is set, the wanted type of index is not supported
|
|
||||||
by the handler at all. See WorkLog 1563.
|
|
||||||
*/
|
|
||||||
#define HA_DDL_SUPPORT 1 /* Supported by handler */
|
|
||||||
#define HA_DDL_WITH_LOCK 2 /* Can create/drop with locked table */
|
|
||||||
#define HA_DDL_ONLINE 4 /* Can create/drop without lock */
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Parameters for open() (in register form->filestat)
|
Parameters for open() (in register form->filestat)
|
||||||
HA_GET_INFO does an implicit HA_ABORT_IF_LOCKED
|
HA_GET_INFO does an implicit HA_ABORT_IF_LOCKED
|
||||||
|
@ -1442,11 +1453,13 @@ public:
|
||||||
virtual void set_part_info(partition_info *part_info) { return; }
|
virtual void set_part_info(partition_info *part_info) { return; }
|
||||||
#endif
|
#endif
|
||||||
virtual ulong index_flags(uint idx, uint part, bool all_parts) const =0;
|
virtual ulong index_flags(uint idx, uint part, bool all_parts) const =0;
|
||||||
virtual ulong index_ddl_flags(KEY *wanted_index) const
|
|
||||||
{ return (HA_DDL_SUPPORT); }
|
|
||||||
virtual int add_index(TABLE *table_arg, KEY *key_info, uint num_of_keys)
|
virtual int add_index(TABLE *table_arg, KEY *key_info, uint num_of_keys)
|
||||||
{ return (HA_ERR_WRONG_COMMAND); }
|
{ return (HA_ERR_WRONG_COMMAND); }
|
||||||
virtual int drop_index(TABLE *table_arg, uint *key_num, uint num_of_keys)
|
virtual int prepare_drop_index(TABLE *table_arg, uint *key_num,
|
||||||
|
uint num_of_keys)
|
||||||
|
{ return (HA_ERR_WRONG_COMMAND); }
|
||||||
|
virtual int final_drop_index(TABLE *table_arg)
|
||||||
{ return (HA_ERR_WRONG_COMMAND); }
|
{ return (HA_ERR_WRONG_COMMAND); }
|
||||||
|
|
||||||
uint max_record_length() const
|
uint max_record_length() const
|
||||||
|
|
|
@ -5779,3 +5779,6 @@ ER_EVENT_SAME_NAME
|
||||||
eng "Same old and new event name"
|
eng "Same old and new event name"
|
||||||
ER_EVENT_DATA_TOO_LONG
|
ER_EVENT_DATA_TOO_LONG
|
||||||
eng "Data for column '%s' too long"
|
eng "Data for column '%s' too long"
|
||||||
|
ER_DROP_INDEX_FK
|
||||||
|
eng "Cannot drop index '%-.64s': needed in a foreign key constraint"
|
||||||
|
ger "Kann Index '%-.64s' nicht löschen: wird für einen einen Fremdschlüssel benötigt"
|
||||||
|
|
805
sql/sql_table.cc
805
sql/sql_table.cc
File diff suppressed because it is too large
Load diff
|
@ -1051,7 +1051,8 @@ public:
|
||||||
_TE_CREATE=6,
|
_TE_CREATE=6,
|
||||||
_TE_GCP_COMPLETE=7,
|
_TE_GCP_COMPLETE=7,
|
||||||
_TE_CLUSTER_FAILURE=8,
|
_TE_CLUSTER_FAILURE=8,
|
||||||
_TE_STOP=9
|
_TE_STOP=9,
|
||||||
|
_TE_NUL=10 // internal (INS o DEL within same GCI)
|
||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -93,6 +93,12 @@ public:
|
||||||
* Retrieve current state of the NdbEventOperation object
|
* Retrieve current state of the NdbEventOperation object
|
||||||
*/
|
*/
|
||||||
State getState();
|
State getState();
|
||||||
|
/**
|
||||||
|
* By default events on same NdbEventOperation within same GCI
|
||||||
|
* are merged into a single event. This can be changed with
|
||||||
|
* separateEvents(true).
|
||||||
|
*/
|
||||||
|
void separateEvents(bool flag);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Activates the NdbEventOperation to start receiving events. The
|
* Activates the NdbEventOperation to start receiving events. The
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
TARGET = ndbapi_event
|
TARGET = ndbapi_event
|
||||||
SRCS = ndbapi_event.cpp
|
SRCS = ndbapi_event.cpp
|
||||||
OBJS = ndbapi_event.o
|
OBJS = ndbapi_event.o
|
||||||
CXX = g++
|
CXX = g++ -g
|
||||||
CFLAGS = -c -Wall -fno-rtti -fno-exceptions
|
CFLAGS = -c -Wall -fno-rtti -fno-exceptions
|
||||||
CXXFLAGS =
|
CXXFLAGS =
|
||||||
DEBUG =
|
DEBUG =
|
||||||
|
@ -17,7 +17,7 @@ $(TARGET): $(OBJS)
|
||||||
$(CXX) $(CXXFLAGS) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET)
|
$(CXX) $(CXXFLAGS) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET)
|
||||||
|
|
||||||
$(TARGET).o: $(SRCS)
|
$(TARGET).o: $(SRCS)
|
||||||
$(CXX) $(CFLAGS) -I$(INCLUDE_DIR) -I$(INCLUDE_DIR)/ndbapi $(SRCS)
|
$(CXX) $(CFLAGS) -I$(INCLUDE_DIR) -I$(INCLUDE_DIR)/ndbapi -I$(TOP_SRCDIR)/include $(SRCS)
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
rm -f *.o $(TARGET)
|
rm -f *.o $(TARGET)
|
||||||
|
|
|
@ -58,24 +58,29 @@
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
* Assume that there is a table TAB0 which is being updated by
|
* Assume that there is a table t0 which is being updated by
|
||||||
* another process (e.g. flexBench -l 0 -stdtables).
|
* another process (e.g. flexBench -l 0 -stdtables).
|
||||||
* We want to monitor what happens with columns COL0, COL2, COL11
|
* We want to monitor what happens with columns c0,c1,c2,c3.
|
||||||
*
|
*
|
||||||
* or together with the mysql client;
|
* or together with the mysql client;
|
||||||
*
|
*
|
||||||
* shell> mysql -u root
|
* shell> mysql -u root
|
||||||
* mysql> create database TEST_DB;
|
* mysql> create database TEST_DB;
|
||||||
* mysql> use TEST_DB;
|
* mysql> use TEST_DB;
|
||||||
* mysql> create table TAB0 (COL0 int primary key, COL1 int, COL11 int) engine=ndb;
|
* mysql> create table t0 (c0 int, c1 int, c2 char(4), c3 char(4),
|
||||||
|
* primary key(c0, c2)) engine ndb charset latin1;
|
||||||
*
|
*
|
||||||
* In another window start ndbapi_event, wait until properly started
|
* In another window start ndbapi_event, wait until properly started
|
||||||
*
|
*
|
||||||
insert into TAB0 values (1,2,3);
|
insert into t0 values (1, 2, 'a', 'b');
|
||||||
insert into TAB0 values (2,2,3);
|
insert into t0 values (3, 4, 'c', 'd');
|
||||||
insert into TAB0 values (3,2,9);
|
update t0 set c3 = 'e' where c0 = 1 and c2 = 'a'; -- use pk
|
||||||
update TAB0 set COL1=10 where COL0=1;
|
update t0 set c3 = 'f'; -- use scan
|
||||||
delete from TAB0 where COL0=1;
|
update t0 set c3 = 'F'; -- use scan update to 'same'
|
||||||
|
update t0 set c2 = 'g' where c0 = 1; -- update pk part
|
||||||
|
update t0 set c2 = 'G' where c0 = 1; -- update pk part to 'same'
|
||||||
|
update t0 set c0 = 5, c2 = 'H' where c0 = 3; -- update full PK
|
||||||
|
delete from t0;
|
||||||
*
|
*
|
||||||
* you should see the data popping up in the example window
|
* you should see the data popping up in the example window
|
||||||
*
|
*
|
||||||
|
@ -92,9 +97,10 @@ int myCreateEvent(Ndb* myNdb,
|
||||||
const char **eventColumnName,
|
const char **eventColumnName,
|
||||||
const int noEventColumnName);
|
const int noEventColumnName);
|
||||||
|
|
||||||
int main()
|
int main(int argc, char** argv)
|
||||||
{
|
{
|
||||||
ndb_init();
|
ndb_init();
|
||||||
|
bool sep = argc > 1 && strcmp(argv[1], "-s") == 0;
|
||||||
|
|
||||||
Ndb_cluster_connection *cluster_connection=
|
Ndb_cluster_connection *cluster_connection=
|
||||||
new Ndb_cluster_connection(); // Object representing the cluster
|
new Ndb_cluster_connection(); // Object representing the cluster
|
||||||
|
@ -126,13 +132,15 @@ int main()
|
||||||
|
|
||||||
if (myNdb->init() == -1) APIERROR(myNdb->getNdbError());
|
if (myNdb->init() == -1) APIERROR(myNdb->getNdbError());
|
||||||
|
|
||||||
const char *eventName= "CHNG_IN_TAB0";
|
const char *eventName= "CHNG_IN_t0";
|
||||||
const char *eventTableName= "TAB0";
|
const char *eventTableName= "t0";
|
||||||
const int noEventColumnName= 3;
|
const int noEventColumnName= 4;
|
||||||
const char *eventColumnName[noEventColumnName]=
|
const char *eventColumnName[noEventColumnName]=
|
||||||
{"COL0",
|
{"c0",
|
||||||
"COL1",
|
"c1",
|
||||||
"COL11"};
|
"c2",
|
||||||
|
"c3"
|
||||||
|
};
|
||||||
|
|
||||||
// Create events
|
// Create events
|
||||||
myCreateEvent(myNdb,
|
myCreateEvent(myNdb,
|
||||||
|
@ -142,13 +150,14 @@ int main()
|
||||||
noEventColumnName);
|
noEventColumnName);
|
||||||
|
|
||||||
int j= 0;
|
int j= 0;
|
||||||
while (j < 5) {
|
while (j < 99) {
|
||||||
|
|
||||||
// Start "transaction" for handling events
|
// Start "transaction" for handling events
|
||||||
NdbEventOperation* op;
|
NdbEventOperation* op;
|
||||||
printf("create EventOperation\n");
|
printf("create EventOperation\n");
|
||||||
if ((op = myNdb->createEventOperation(eventName)) == NULL)
|
if ((op = myNdb->createEventOperation(eventName)) == NULL)
|
||||||
APIERROR(myNdb->getNdbError());
|
APIERROR(myNdb->getNdbError());
|
||||||
|
op->separateEvents(sep);
|
||||||
|
|
||||||
printf("get values\n");
|
printf("get values\n");
|
||||||
NdbRecAttr* recAttr[noEventColumnName];
|
NdbRecAttr* recAttr[noEventColumnName];
|
||||||
|
@ -175,32 +184,43 @@ int main()
|
||||||
i++;
|
i++;
|
||||||
switch (op->getEventType()) {
|
switch (op->getEventType()) {
|
||||||
case NdbDictionary::Event::TE_INSERT:
|
case NdbDictionary::Event::TE_INSERT:
|
||||||
printf("%u INSERT: ", i);
|
printf("%u INSERT", i);
|
||||||
break;
|
break;
|
||||||
case NdbDictionary::Event::TE_DELETE:
|
case NdbDictionary::Event::TE_DELETE:
|
||||||
printf("%u DELETE: ", i);
|
printf("%u DELETE", i);
|
||||||
break;
|
break;
|
||||||
case NdbDictionary::Event::TE_UPDATE:
|
case NdbDictionary::Event::TE_UPDATE:
|
||||||
printf("%u UPDATE: ", i);
|
printf("%u UPDATE", i);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
abort(); // should not happen
|
abort(); // should not happen
|
||||||
}
|
}
|
||||||
for (int i = 1; i < noEventColumnName; i++) {
|
printf(" gci=%d\n", op->getGCI());
|
||||||
|
printf("post: ");
|
||||||
|
for (int i = 0; i < noEventColumnName; i++) {
|
||||||
if (recAttr[i]->isNULL() >= 0) { // we have a value
|
if (recAttr[i]->isNULL() >= 0) { // we have a value
|
||||||
printf(" post[%u]=", i);
|
if (recAttr[i]->isNULL() == 0) { // we have a non-null value
|
||||||
if (recAttr[i]->isNULL() == 0) // we have a non-null value
|
if (i < 2)
|
||||||
printf("%u", recAttr[i]->u_32_value());
|
printf("%-5u", recAttr[i]->u_32_value());
|
||||||
else // we have a null value
|
else
|
||||||
printf("NULL");
|
printf("%-5.4s", recAttr[i]->aRef());
|
||||||
|
} else // we have a null value
|
||||||
|
printf("%-5s", "NULL");
|
||||||
|
} else
|
||||||
|
printf("%-5s", "-");
|
||||||
}
|
}
|
||||||
|
printf("\npre : ");
|
||||||
|
for (int i = 0; i < noEventColumnName; i++) {
|
||||||
if (recAttrPre[i]->isNULL() >= 0) { // we have a value
|
if (recAttrPre[i]->isNULL() >= 0) { // we have a value
|
||||||
printf(" pre[%u]=", i);
|
if (recAttrPre[i]->isNULL() == 0) { // we have a non-null value
|
||||||
if (recAttrPre[i]->isNULL() == 0) // we have a non-null value
|
if (i < 2)
|
||||||
printf("%u", recAttrPre[i]->u_32_value());
|
printf("%-5u", recAttrPre[i]->u_32_value());
|
||||||
else // we have a null value
|
else
|
||||||
printf("NULL");
|
printf("%-5.4s", recAttrPre[i]->aRef());
|
||||||
}
|
} else // we have a null value
|
||||||
|
printf("%-5s", "NULL");
|
||||||
|
} else
|
||||||
|
printf("%-5s", "-");
|
||||||
}
|
}
|
||||||
printf("\n");
|
printf("\n");
|
||||||
}
|
}
|
||||||
|
|
|
@ -132,7 +132,7 @@ void Dbtup::updatePackedList(Signal* signal, Uint16 hostId)
|
||||||
void Dbtup::sendReadAttrinfo(Signal* signal,
|
void Dbtup::sendReadAttrinfo(Signal* signal,
|
||||||
KeyReqStruct *req_struct,
|
KeyReqStruct *req_struct,
|
||||||
Uint32 ToutBufIndex,
|
Uint32 ToutBufIndex,
|
||||||
const Operationrec * const regOperPtr)
|
const Operationrec *regOperPtr)
|
||||||
{
|
{
|
||||||
if(ToutBufIndex == 0)
|
if(ToutBufIndex == 0)
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -242,8 +242,8 @@ Dbtup::alloc_page(Tablerec* tabPtrP, Fragrecord* fragPtrP,
|
||||||
}
|
}
|
||||||
|
|
||||||
Uint32*
|
Uint32*
|
||||||
Dbtup::alloc_fix_rowid(Fragrecord* const regFragPtr,
|
Dbtup::alloc_fix_rowid(Fragrecord* regFragPtr,
|
||||||
Tablerec* const regTabPtr,
|
Tablerec* regTabPtr,
|
||||||
Local_key* key,
|
Local_key* key,
|
||||||
Uint32 * out_frag_page_id)
|
Uint32 * out_frag_page_id)
|
||||||
{
|
{
|
||||||
|
|
|
@ -89,7 +89,7 @@
|
||||||
//
|
//
|
||||||
// The full page range struct
|
// The full page range struct
|
||||||
|
|
||||||
Uint32 Dbtup::getEmptyPage(Fragrecord* const regFragPtr)
|
Uint32 Dbtup::getEmptyPage(Fragrecord* regFragPtr)
|
||||||
{
|
{
|
||||||
Uint32 pageId = regFragPtr->emptyPrimPage.firstItem;
|
Uint32 pageId = regFragPtr->emptyPrimPage.firstItem;
|
||||||
if (pageId == RNIL) {
|
if (pageId == RNIL) {
|
||||||
|
@ -108,7 +108,7 @@ Uint32 Dbtup::getEmptyPage(Fragrecord* const regFragPtr)
|
||||||
return pageId;
|
return pageId;
|
||||||
}//Dbtup::getEmptyPage()
|
}//Dbtup::getEmptyPage()
|
||||||
|
|
||||||
Uint32 Dbtup::getRealpid(Fragrecord* const regFragPtr, Uint32 logicalPageId)
|
Uint32 Dbtup::getRealpid(Fragrecord* regFragPtr, Uint32 logicalPageId)
|
||||||
{
|
{
|
||||||
PageRangePtr grpPageRangePtr;
|
PageRangePtr grpPageRangePtr;
|
||||||
Uint32 loopLimit;
|
Uint32 loopLimit;
|
||||||
|
@ -241,7 +241,7 @@ bool Dbtup::insertPageRangeTab(Fragrecord* const regFragPtr,
|
||||||
}//Dbtup::insertPageRangeTab()
|
}//Dbtup::insertPageRangeTab()
|
||||||
|
|
||||||
|
|
||||||
void Dbtup::releaseFragPages(Fragrecord* const regFragPtr)
|
void Dbtup::releaseFragPages(Fragrecord* regFragPtr)
|
||||||
{
|
{
|
||||||
if (regFragPtr->rootPageRange == RNIL) {
|
if (regFragPtr->rootPageRange == RNIL) {
|
||||||
ljam();
|
ljam();
|
||||||
|
@ -349,7 +349,7 @@ void Dbtup::initFragRange(Fragrecord* const regFragPtr)
|
||||||
regFragPtr->nextStartRange = 0;
|
regFragPtr->nextStartRange = 0;
|
||||||
}//initFragRange()
|
}//initFragRange()
|
||||||
|
|
||||||
Uint32 Dbtup::allocFragPages(Fragrecord* const regFragPtr, Uint32 tafpNoAllocRequested)
|
Uint32 Dbtup::allocFragPages(Fragrecord* regFragPtr, Uint32 tafpNoAllocRequested)
|
||||||
{
|
{
|
||||||
Uint32 tafpPagesAllocated = 0;
|
Uint32 tafpPagesAllocated = 0;
|
||||||
while (true) {
|
while (true) {
|
||||||
|
|
|
@ -28,7 +28,7 @@
|
||||||
#define ljamEntry() { jamEntryLine(3000 + __LINE__); }
|
#define ljamEntry() { jamEntryLine(3000 + __LINE__); }
|
||||||
|
|
||||||
void
|
void
|
||||||
Dbtup::setUpQueryRoutines(Tablerec* const regTabPtr)
|
Dbtup::setUpQueryRoutines(Tablerec *regTabPtr)
|
||||||
{
|
{
|
||||||
Uint32 startDescriptor= regTabPtr->tabDescriptor;
|
Uint32 startDescriptor= regTabPtr->tabDescriptor;
|
||||||
ndbrequire((startDescriptor + (regTabPtr->m_no_of_attributes << ZAD_LOG_SIZE))
|
ndbrequire((startDescriptor + (regTabPtr->m_no_of_attributes << ZAD_LOG_SIZE))
|
||||||
|
|
|
@ -356,8 +356,8 @@ Dbtup::dropTrigger(Tablerec* table, const DropTrigReq* req)
|
||||||
/* ---------------------------------------------------------------- */
|
/* ---------------------------------------------------------------- */
|
||||||
void
|
void
|
||||||
Dbtup::checkImmediateTriggersAfterInsert(KeyReqStruct *req_struct,
|
Dbtup::checkImmediateTriggersAfterInsert(KeyReqStruct *req_struct,
|
||||||
Operationrec* const regOperPtr,
|
Operationrec *regOperPtr,
|
||||||
Tablerec* const regTablePtr)
|
Tablerec *regTablePtr)
|
||||||
{
|
{
|
||||||
if(refToBlock(req_struct->TC_ref) != DBTC) {
|
if(refToBlock(req_struct->TC_ref) != DBTC) {
|
||||||
return;
|
return;
|
||||||
|
@ -374,8 +374,8 @@ Dbtup::checkImmediateTriggersAfterInsert(KeyReqStruct *req_struct,
|
||||||
|
|
||||||
void
|
void
|
||||||
Dbtup::checkImmediateTriggersAfterUpdate(KeyReqStruct *req_struct,
|
Dbtup::checkImmediateTriggersAfterUpdate(KeyReqStruct *req_struct,
|
||||||
Operationrec* const regOperPtr,
|
Operationrec* regOperPtr,
|
||||||
Tablerec* const regTablePtr)
|
Tablerec* regTablePtr)
|
||||||
{
|
{
|
||||||
if(refToBlock(req_struct->TC_ref) != DBTC) {
|
if(refToBlock(req_struct->TC_ref) != DBTC) {
|
||||||
return;
|
return;
|
||||||
|
@ -399,8 +399,8 @@ Dbtup::checkImmediateTriggersAfterUpdate(KeyReqStruct *req_struct,
|
||||||
|
|
||||||
void
|
void
|
||||||
Dbtup::checkImmediateTriggersAfterDelete(KeyReqStruct *req_struct,
|
Dbtup::checkImmediateTriggersAfterDelete(KeyReqStruct *req_struct,
|
||||||
Operationrec* const regOperPtr,
|
Operationrec* regOperPtr,
|
||||||
Tablerec* const regTablePtr)
|
Tablerec* regTablePtr)
|
||||||
{
|
{
|
||||||
if(refToBlock(req_struct->TC_ref) != DBTC) {
|
if(refToBlock(req_struct->TC_ref) != DBTC) {
|
||||||
return;
|
return;
|
||||||
|
@ -444,8 +444,8 @@ void Dbtup::checkDeferredTriggers(Signal* signal,
|
||||||
/* */
|
/* */
|
||||||
/* ---------------------------------------------------------------- */
|
/* ---------------------------------------------------------------- */
|
||||||
void Dbtup::checkDetachedTriggers(KeyReqStruct *req_struct,
|
void Dbtup::checkDetachedTriggers(KeyReqStruct *req_struct,
|
||||||
Operationrec* const regOperPtr,
|
Operationrec* regOperPtr,
|
||||||
Tablerec* const regTablePtr)
|
Tablerec* regTablePtr)
|
||||||
{
|
{
|
||||||
Uint32 save_type = regOperPtr->op_struct.op_type;
|
Uint32 save_type = regOperPtr->op_struct.op_type;
|
||||||
Tuple_header *save_ptr = req_struct->m_tuple_ptr;
|
Tuple_header *save_ptr = req_struct->m_tuple_ptr;
|
||||||
|
@ -1049,9 +1049,9 @@ void Dbtup::sendFireTrigOrd(Signal* signal,
|
||||||
|
|
||||||
int
|
int
|
||||||
Dbtup::executeTuxInsertTriggers(Signal* signal,
|
Dbtup::executeTuxInsertTriggers(Signal* signal,
|
||||||
Operationrec* const regOperPtr,
|
Operationrec* regOperPtr,
|
||||||
Fragrecord* const regFragPtr,
|
Fragrecord* regFragPtr,
|
||||||
Tablerec* const regTabPtr)
|
Tablerec* regTabPtr)
|
||||||
{
|
{
|
||||||
TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend();
|
TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend();
|
||||||
// fill in constant part
|
// fill in constant part
|
||||||
|
@ -1066,9 +1066,9 @@ Dbtup::executeTuxInsertTriggers(Signal* signal,
|
||||||
|
|
||||||
int
|
int
|
||||||
Dbtup::executeTuxUpdateTriggers(Signal* signal,
|
Dbtup::executeTuxUpdateTriggers(Signal* signal,
|
||||||
Operationrec* const regOperPtr,
|
Operationrec* regOperPtr,
|
||||||
Fragrecord* const regFragPtr,
|
Fragrecord* regFragPtr,
|
||||||
Tablerec* const regTabPtr)
|
Tablerec* regTabPtr)
|
||||||
{
|
{
|
||||||
TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend();
|
TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend();
|
||||||
// fill in constant part
|
// fill in constant part
|
||||||
|
@ -1139,8 +1139,8 @@ Dbtup::executeTuxDeleteTriggers(Signal* signal,
|
||||||
void
|
void
|
||||||
Dbtup::executeTuxCommitTriggers(Signal* signal,
|
Dbtup::executeTuxCommitTriggers(Signal* signal,
|
||||||
Operationrec* regOperPtr,
|
Operationrec* regOperPtr,
|
||||||
Fragrecord* const regFragPtr,
|
Fragrecord* regFragPtr,
|
||||||
Tablerec* const regTabPtr)
|
Tablerec* regTabPtr)
|
||||||
{
|
{
|
||||||
TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend();
|
TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend();
|
||||||
Uint32 tupVersion;
|
Uint32 tupVersion;
|
||||||
|
@ -1174,8 +1174,8 @@ Dbtup::executeTuxCommitTriggers(Signal* signal,
|
||||||
void
|
void
|
||||||
Dbtup::executeTuxAbortTriggers(Signal* signal,
|
Dbtup::executeTuxAbortTriggers(Signal* signal,
|
||||||
Operationrec* regOperPtr,
|
Operationrec* regOperPtr,
|
||||||
Fragrecord* const regFragPtr,
|
Fragrecord* regFragPtr,
|
||||||
Tablerec* const regTabPtr)
|
Tablerec* regTabPtr)
|
||||||
{
|
{
|
||||||
TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend();
|
TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend();
|
||||||
// get version
|
// get version
|
||||||
|
|
|
@ -83,6 +83,8 @@ Ndbd_mem_manager::init(Uint32 pages)
|
||||||
|
|
||||||
release(start+1, end - 1 - start);
|
release(start+1, end - 1 - start);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
|
|
@ -38,6 +38,11 @@ NdbEventOperation::State NdbEventOperation::getState()
|
||||||
return m_impl.getState();
|
return m_impl.getState();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void NdbEventOperation::separateEvents(bool flag)
|
||||||
|
{
|
||||||
|
m_impl.m_separateEvents = flag;
|
||||||
|
}
|
||||||
|
|
||||||
NdbRecAttr *
|
NdbRecAttr *
|
||||||
NdbEventOperation::getValue(const char *colName, char *aValue)
|
NdbEventOperation::getValue(const char *colName, char *aValue)
|
||||||
{
|
{
|
||||||
|
|
|
@ -104,6 +104,8 @@ NdbEventOperationImpl::NdbEventOperationImpl(NdbEventOperation &N,
|
||||||
|
|
||||||
m_state= EO_CREATED;
|
m_state= EO_CREATED;
|
||||||
|
|
||||||
|
m_separateEvents = false;
|
||||||
|
|
||||||
m_has_error= 0;
|
m_has_error= 0;
|
||||||
|
|
||||||
DBUG_PRINT("exit",("this: 0x%x oid: %u", this, m_oid));
|
DBUG_PRINT("exit",("this: 0x%x oid: %u", this, m_oid));
|
||||||
|
@ -693,6 +695,21 @@ NdbEventBuffer::pollEvents(int aMillisecondNumber, Uint64 *latestGCI)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef VM_TRACE
|
||||||
|
static void
|
||||||
|
print_std(const char* tag, const SubTableData * sdata, LinearSectionPtr ptr[3])
|
||||||
|
{
|
||||||
|
printf("%s\n", tag);
|
||||||
|
printf("addr=%p gci=%d op=%d\n", (void*)sdata, sdata->gci, sdata->operation);
|
||||||
|
for (int i = 0; i <= 2; i++) {
|
||||||
|
printf("sec=%d addr=%p sz=%d\n", i, (void*)ptr[i].p, ptr[i].sz);
|
||||||
|
for (int j = 0; j < ptr[i].sz; j++)
|
||||||
|
printf("%08x ", ptr[i].p[j]);
|
||||||
|
printf("\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
NdbEventOperation *
|
NdbEventOperation *
|
||||||
NdbEventBuffer::nextEvent()
|
NdbEventBuffer::nextEvent()
|
||||||
{
|
{
|
||||||
|
@ -734,6 +751,10 @@ NdbEventBuffer::nextEvent()
|
||||||
op->m_data_done_count++;
|
op->m_data_done_count++;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
// NUL event is not returned
|
||||||
|
if (data->sdata->operation == NdbDictionary::Event::_TE_NUL)
|
||||||
|
continue;
|
||||||
|
|
||||||
int r= op->receive_event();
|
int r= op->receive_event();
|
||||||
if (r > 0)
|
if (r > 0)
|
||||||
{
|
{
|
||||||
|
@ -1099,13 +1120,15 @@ NdbEventBuffer::insertDataL(NdbEventOperationImpl *op,
|
||||||
DBUG_ENTER("NdbEventBuffer::insertDataL");
|
DBUG_ENTER("NdbEventBuffer::insertDataL");
|
||||||
|
|
||||||
Uint64 gci= sdata->gci;
|
Uint64 gci= sdata->gci;
|
||||||
EventBufData *data= m_free_data;
|
|
||||||
|
|
||||||
if ( likely((Uint32)op->mi_type & 1 << (Uint32)sdata->operation) )
|
if ( likely((Uint32)op->mi_type & 1 << (Uint32)sdata->operation) )
|
||||||
{
|
{
|
||||||
Gci_container* bucket= find_bucket(&m_active_gci, gci);
|
Gci_container* bucket= find_bucket(&m_active_gci, gci);
|
||||||
|
|
||||||
DBUG_PRINT("info", ("data insertion in eventId %d", op->m_eventId));
|
DBUG_PRINT("info", ("data insertion in eventId %d", op->m_eventId));
|
||||||
|
DBUG_PRINT("info", ("gci=%d tab=%d op=%d node=%d",
|
||||||
|
sdata->gci, sdata->tableId, sdata->operation,
|
||||||
|
sdata->req_nodeid));
|
||||||
|
|
||||||
if (unlikely(bucket == 0))
|
if (unlikely(bucket == 0))
|
||||||
{
|
{
|
||||||
|
@ -1116,6 +1139,84 @@ NdbEventBuffer::insertDataL(NdbEventOperationImpl *op,
|
||||||
DBUG_RETURN(0);
|
DBUG_RETURN(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool use_hash =
|
||||||
|
! op->m_separateEvents &&
|
||||||
|
sdata->operation < NdbDictionary::Event::_TE_FIRST_NON_DATA_EVENT;
|
||||||
|
|
||||||
|
// find position in bucket hash table
|
||||||
|
EventBufData* data = 0;
|
||||||
|
EventBufData_hash::Pos hpos;
|
||||||
|
if (use_hash)
|
||||||
|
{
|
||||||
|
bucket->m_data_hash.search(hpos, op, ptr);
|
||||||
|
data = hpos.data;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (data == 0)
|
||||||
|
{
|
||||||
|
// allocate new result buffer
|
||||||
|
data = alloc_data();
|
||||||
|
if (unlikely(data == 0))
|
||||||
|
{
|
||||||
|
op->m_has_error = 2;
|
||||||
|
DBUG_RETURN(-1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (unlikely(copy_data(sdata, ptr, data)))
|
||||||
|
{
|
||||||
|
op->m_has_error = 3;
|
||||||
|
DBUG_RETURN(-1);
|
||||||
|
}
|
||||||
|
// add it to list and hash table
|
||||||
|
bucket->m_data.append(data);
|
||||||
|
if (use_hash)
|
||||||
|
{
|
||||||
|
bucket->m_data_hash.append(hpos, data);
|
||||||
|
}
|
||||||
|
#ifdef VM_TRACE
|
||||||
|
op->m_data_count++;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
// event with same op, PK found, merge into old buffer
|
||||||
|
if (unlikely(merge_data(sdata, ptr, data)))
|
||||||
|
{
|
||||||
|
op->m_has_error = 3;
|
||||||
|
DBUG_RETURN(-1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
data->m_event_op = op;
|
||||||
|
if (use_hash)
|
||||||
|
{
|
||||||
|
data->m_pkhash = hpos.pkhash;
|
||||||
|
}
|
||||||
|
DBUG_RETURN(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef VM_TRACE
|
||||||
|
if ((Uint32)op->m_eventImpl->mi_type & 1 << (Uint32)sdata->operation)
|
||||||
|
{
|
||||||
|
// XXX never reached
|
||||||
|
DBUG_PRINT("info",("Data arrived before ready eventId", op->m_eventId));
|
||||||
|
DBUG_RETURN(0);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
DBUG_PRINT("info",("skipped"));
|
||||||
|
DBUG_RETURN(0);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
return 0;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
// allocate EventBufData
|
||||||
|
EventBufData*
|
||||||
|
NdbEventBuffer::alloc_data()
|
||||||
|
{
|
||||||
|
DBUG_ENTER("alloc_data");
|
||||||
|
EventBufData* data = m_free_data;
|
||||||
|
|
||||||
if (unlikely(data == 0))
|
if (unlikely(data == 0))
|
||||||
{
|
{
|
||||||
#ifdef VM_TRACE
|
#ifdef VM_TRACE
|
||||||
|
@ -1125,7 +1226,7 @@ NdbEventBuffer::insertDataL(NdbEventOperationImpl *op,
|
||||||
expand(4000);
|
expand(4000);
|
||||||
reportStatus();
|
reportStatus();
|
||||||
|
|
||||||
data= m_free_data;
|
data = m_free_data;
|
||||||
if (unlikely(data == 0))
|
if (unlikely(data == 0))
|
||||||
{
|
{
|
||||||
#ifdef VM_TRACE
|
#ifdef VM_TRACE
|
||||||
|
@ -1139,124 +1240,295 @@ NdbEventBuffer::insertDataL(NdbEventOperationImpl *op,
|
||||||
m_available_data.m_tail ? m_available_data.m_tail->sdata->gci : 0);
|
m_available_data.m_tail ? m_available_data.m_tail->sdata->gci : 0);
|
||||||
printf("m_used_data_count %d\n", m_used_data.m_count);
|
printf("m_used_data_count %d\n", m_used_data.m_count);
|
||||||
#endif
|
#endif
|
||||||
op->m_has_error= 2;
|
DBUG_RETURN(0); // TODO handle this, overrun, or, skip?
|
||||||
DBUG_RETURN(-1); // TODO handle this, overrun, or, skip?
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// remove data from free list
|
// remove data from free list
|
||||||
m_free_data= data->m_next;
|
m_free_data = data->m_next;
|
||||||
|
data->m_next = 0;
|
||||||
#ifdef VM_TRACE
|
#ifdef VM_TRACE
|
||||||
m_free_data_count--;
|
m_free_data_count--;
|
||||||
assert(m_free_data_sz >= data->sz);
|
assert(m_free_data_sz >= data->sz);
|
||||||
#endif
|
#endif
|
||||||
m_free_data_sz-= data->sz;
|
m_free_data_sz -= data->sz;
|
||||||
|
DBUG_RETURN(data);
|
||||||
|
}
|
||||||
|
|
||||||
if (unlikely(copy_data_alloc(sdata, ptr, data)))
|
// allocate initial or bigger memory area in EventBufData
|
||||||
|
// takes sizes from given ptr and sets up data->ptr
|
||||||
|
int
|
||||||
|
NdbEventBuffer::alloc_mem(EventBufData* data, LinearSectionPtr ptr[3])
|
||||||
|
{
|
||||||
|
const Uint32 min_alloc_size = 128;
|
||||||
|
|
||||||
|
Uint32 sz4 = (sizeof(SubTableData) + 3) >> 2;
|
||||||
|
Uint32 alloc_size = (sz4 + ptr[0].sz + ptr[1].sz + ptr[2].sz) << 2;
|
||||||
|
if (alloc_size < min_alloc_size)
|
||||||
|
alloc_size = min_alloc_size;
|
||||||
|
|
||||||
|
if (data->sz < alloc_size)
|
||||||
{
|
{
|
||||||
op->m_has_error= 3;
|
NdbMem_Free((char*)data->memory);
|
||||||
DBUG_RETURN(-1);
|
assert(m_total_alloc >= data->sz);
|
||||||
|
m_total_alloc -= data->sz;
|
||||||
|
data->memory = 0;
|
||||||
|
data->sz = 0;
|
||||||
|
|
||||||
|
data->memory = (Uint32*)NdbMem_Allocate(alloc_size);
|
||||||
|
if (data->memory == 0)
|
||||||
|
return -1;
|
||||||
|
data->sz = alloc_size;
|
||||||
|
m_total_alloc += data->sz;
|
||||||
}
|
}
|
||||||
|
|
||||||
// add it to received data
|
Uint32* memptr = data->memory;
|
||||||
bucket->m_data.append(data);
|
memptr += sz4;
|
||||||
|
int i;
|
||||||
data->m_event_op= op;
|
for (i = 0; i <= 2; i++)
|
||||||
#ifdef VM_TRACE
|
|
||||||
op->m_data_count++;
|
|
||||||
#endif
|
|
||||||
DBUG_RETURN(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef VM_TRACE
|
|
||||||
if ((Uint32)op->m_eventImpl->mi_type & 1 << (Uint32)sdata->operation)
|
|
||||||
{
|
{
|
||||||
DBUG_PRINT("info",("Data arrived before ready eventId", op->m_eventId));
|
data->ptr[i].p = memptr;
|
||||||
DBUG_RETURN(0);
|
data->ptr[i].sz = ptr[i].sz;
|
||||||
|
memptr += ptr[i].sz;
|
||||||
}
|
}
|
||||||
else {
|
|
||||||
DBUG_PRINT("info",("skipped"));
|
|
||||||
DBUG_RETURN(0);
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
return 0;
|
return 0;
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
NdbEventBuffer::copy_data_alloc(const SubTableData * const f_sdata,
|
NdbEventBuffer::copy_data(const SubTableData * const sdata,
|
||||||
LinearSectionPtr f_ptr[3],
|
LinearSectionPtr ptr[3],
|
||||||
EventBufData *ev_buf)
|
EventBufData* data)
|
||||||
{
|
{
|
||||||
DBUG_ENTER("NdbEventBuffer::copy_data_alloc");
|
DBUG_ENTER("NdbEventBuffer::copy_data");
|
||||||
const unsigned min_alloc_size= 128;
|
|
||||||
const unsigned sz4= (sizeof(SubTableData)+3)>>2;
|
if (alloc_mem(data, ptr) != 0)
|
||||||
Uint32 f_ptr_sz_0= f_ptr[0].sz;
|
DBUG_RETURN(-1);
|
||||||
Uint32 f_ptr_sz_1= f_ptr[1].sz;
|
memcpy(data->sdata, sdata, sizeof(SubTableData));
|
||||||
Uint32 f_ptr_sz_2= f_ptr[2].sz;
|
int i;
|
||||||
LinearSectionPtr *t_ptr= ev_buf->ptr;
|
for (i = 0; i <= 2; i++)
|
||||||
SubTableData *sdata= ev_buf->sdata;
|
memcpy(data->ptr[i].p, ptr[i].p, ptr[i].sz << 2);
|
||||||
const unsigned alloc_size= (sz4 +
|
DBUG_RETURN(0);
|
||||||
f_ptr_sz_0 +
|
}
|
||||||
f_ptr_sz_1 +
|
|
||||||
f_ptr_sz_2) * sizeof(Uint32);
|
static struct Ev_t {
|
||||||
Uint32 *ptr;
|
enum {
|
||||||
if (alloc_size > min_alloc_size)
|
INS = NdbDictionary::Event::_TE_INSERT,
|
||||||
|
DEL = NdbDictionary::Event::_TE_DELETE,
|
||||||
|
UPD = NdbDictionary::Event::_TE_UPDATE,
|
||||||
|
NUL = NdbDictionary::Event::_TE_NUL,
|
||||||
|
ERR = 255
|
||||||
|
};
|
||||||
|
int t1, t2, t3;
|
||||||
|
} ev_t[] = {
|
||||||
|
{ Ev_t::INS, Ev_t::INS, Ev_t::ERR },
|
||||||
|
{ Ev_t::INS, Ev_t::DEL, Ev_t::NUL }, //ok
|
||||||
|
{ Ev_t::INS, Ev_t::UPD, Ev_t::INS }, //ok
|
||||||
|
{ Ev_t::DEL, Ev_t::INS, Ev_t::UPD }, //ok
|
||||||
|
{ Ev_t::DEL, Ev_t::DEL, Ev_t::ERR },
|
||||||
|
{ Ev_t::DEL, Ev_t::UPD, Ev_t::ERR },
|
||||||
|
{ Ev_t::UPD, Ev_t::INS, Ev_t::ERR },
|
||||||
|
{ Ev_t::UPD, Ev_t::DEL, Ev_t::DEL }, //ok
|
||||||
|
{ Ev_t::UPD, Ev_t::UPD, Ev_t::UPD } //ok
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* | INS | DEL | UPD
|
||||||
|
* 0 | pk ah + all ah | pk ah | pk ah + new ah
|
||||||
|
* 1 | pk ad + all ad | old pk ad | new pk ad + new ad
|
||||||
|
* 2 | empty | old non-pk ah+ad | old ah+ad
|
||||||
|
*/
|
||||||
|
|
||||||
|
static AttributeHeader
|
||||||
|
copy_head(Uint32& i1, Uint32* p1, Uint32& i2, const Uint32* p2,
|
||||||
|
Uint32 flags)
|
||||||
|
{
|
||||||
|
AttributeHeader ah(p2[i2]);
|
||||||
|
bool do_copy = (flags & 1);
|
||||||
|
if (do_copy)
|
||||||
|
p1[i1] = p2[i2];
|
||||||
|
i1++;
|
||||||
|
i2++;
|
||||||
|
return ah;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
copy_attr(AttributeHeader ah,
|
||||||
|
Uint32& j1, Uint32* p1, Uint32& j2, const Uint32* p2,
|
||||||
|
Uint32 flags)
|
||||||
|
{
|
||||||
|
bool do_copy = (flags & 1);
|
||||||
|
bool with_head = (flags & 2);
|
||||||
|
Uint32 n = with_head + ah.getDataSize();
|
||||||
|
if (do_copy)
|
||||||
{
|
{
|
||||||
if (sdata)
|
Uint32 k;
|
||||||
{
|
for (k = 0; k < n; k++)
|
||||||
NdbMem_Free((char*)sdata);
|
p1[j1++] = p2[j2++];
|
||||||
#ifdef VM_TRACE
|
|
||||||
assert(m_total_alloc >= ev_buf->sz);
|
|
||||||
#endif
|
|
||||||
m_total_alloc-= ev_buf->sz;
|
|
||||||
}
|
|
||||||
ptr= (Uint32*)NdbMem_Allocate(alloc_size);
|
|
||||||
ev_buf->sdata= (SubTableData *)ptr;
|
|
||||||
ev_buf->sz= alloc_size;
|
|
||||||
m_total_alloc+= alloc_size;
|
|
||||||
}
|
|
||||||
else /* alloc_size <= min_alloc_size */
|
|
||||||
{
|
|
||||||
if (sdata)
|
|
||||||
ptr= (Uint32*)sdata;
|
|
||||||
else
|
|
||||||
{
|
|
||||||
ptr= (Uint32*)NdbMem_Allocate(min_alloc_size);
|
|
||||||
ev_buf->sdata= (SubTableData *)ptr;
|
|
||||||
ev_buf->sz= min_alloc_size;
|
|
||||||
m_total_alloc+= min_alloc_size;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
memcpy(ptr,f_sdata,sizeof(SubTableData));
|
|
||||||
ptr+= sz4;
|
|
||||||
|
|
||||||
t_ptr->p= ptr;
|
|
||||||
t_ptr->sz= f_ptr_sz_0;
|
|
||||||
|
|
||||||
memcpy(ptr, f_ptr[0].p, sizeof(Uint32)*f_ptr_sz_0);
|
|
||||||
ptr+= f_ptr_sz_0;
|
|
||||||
t_ptr++;
|
|
||||||
|
|
||||||
t_ptr->p= ptr;
|
|
||||||
t_ptr->sz= f_ptr_sz_1;
|
|
||||||
|
|
||||||
memcpy(ptr, f_ptr[1].p, sizeof(Uint32)*f_ptr_sz_1);
|
|
||||||
ptr+= f_ptr_sz_1;
|
|
||||||
t_ptr++;
|
|
||||||
|
|
||||||
if (f_ptr_sz_2)
|
|
||||||
{
|
|
||||||
t_ptr->p= ptr;
|
|
||||||
t_ptr->sz= f_ptr_sz_2;
|
|
||||||
memcpy(ptr, f_ptr[2].p, sizeof(Uint32)*f_ptr_sz_2);
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
t_ptr->p= 0;
|
j1 += n;
|
||||||
t_ptr->sz= 0;
|
j2 += n;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int
|
||||||
|
NdbEventBuffer::merge_data(const SubTableData * const sdata,
|
||||||
|
LinearSectionPtr ptr2[3],
|
||||||
|
EventBufData* data)
|
||||||
|
{
|
||||||
|
DBUG_ENTER("NdbEventBuffer::merge_data");
|
||||||
|
|
||||||
|
Uint32 nkey = data->m_event_op->m_eventImpl->m_tableImpl->m_noOfKeys;
|
||||||
|
|
||||||
|
int t1 = data->sdata->operation;
|
||||||
|
int t2 = sdata->operation;
|
||||||
|
if (t1 == Ev_t::NUL)
|
||||||
|
DBUG_RETURN(copy_data(sdata, ptr2, data));
|
||||||
|
|
||||||
|
Ev_t* tp = 0;
|
||||||
|
int i;
|
||||||
|
for (i = 0; i < sizeof(ev_t)/sizeof(ev_t[0]); i++) {
|
||||||
|
if (ev_t[i].t1 == t1 && ev_t[i].t2 == t2) {
|
||||||
|
tp = &ev_t[i];
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assert(tp != 0 && tp->t3 != Ev_t::ERR);
|
||||||
|
|
||||||
|
// save old data
|
||||||
|
EventBufData olddata = *data;
|
||||||
|
data->memory = 0;
|
||||||
|
data->sz = 0;
|
||||||
|
|
||||||
|
// compose ptr1 o ptr2 = ptr
|
||||||
|
LinearSectionPtr (&ptr1) [3] = olddata.ptr;
|
||||||
|
LinearSectionPtr (&ptr) [3] = data->ptr;
|
||||||
|
|
||||||
|
// loop twice where first loop only sets sizes
|
||||||
|
int loop;
|
||||||
|
for (loop = 0; loop <= 1; loop++)
|
||||||
|
{
|
||||||
|
if (loop == 1)
|
||||||
|
{
|
||||||
|
if (alloc_mem(data, ptr) != 0)
|
||||||
|
DBUG_RETURN(-1);
|
||||||
|
*data->sdata = *sdata;
|
||||||
|
data->sdata->operation = tp->t3;
|
||||||
|
}
|
||||||
|
|
||||||
|
ptr[0].sz = ptr[1].sz = ptr[3].sz = 0;
|
||||||
|
|
||||||
|
// copy pk from new version
|
||||||
|
{
|
||||||
|
AttributeHeader ah;
|
||||||
|
Uint32 i = 0;
|
||||||
|
Uint32 j = 0;
|
||||||
|
Uint32 i2 = 0;
|
||||||
|
Uint32 j2 = 0;
|
||||||
|
while (i < nkey)
|
||||||
|
{
|
||||||
|
ah = copy_head(i, ptr[0].p, i2, ptr2[0].p, loop);
|
||||||
|
copy_attr(ah, j, ptr[1].p, j2, ptr2[1].p, loop);
|
||||||
|
}
|
||||||
|
ptr[0].sz = i;
|
||||||
|
ptr[1].sz = j;
|
||||||
|
}
|
||||||
|
|
||||||
|
// merge after values, new version overrides
|
||||||
|
if (tp->t3 != Ev_t::DEL)
|
||||||
|
{
|
||||||
|
AttributeHeader ah;
|
||||||
|
Uint32 i = ptr[0].sz;
|
||||||
|
Uint32 j = ptr[1].sz;
|
||||||
|
Uint32 i1 = 0;
|
||||||
|
Uint32 j1 = 0;
|
||||||
|
Uint32 i2 = nkey;
|
||||||
|
Uint32 j2 = ptr[1].sz;
|
||||||
|
while (i1 < nkey)
|
||||||
|
{
|
||||||
|
j1 += AttributeHeader(ptr1[0].p[i1++]).getDataSize();
|
||||||
|
}
|
||||||
|
while (1)
|
||||||
|
{
|
||||||
|
bool b1 = (i1 < ptr1[0].sz);
|
||||||
|
bool b2 = (i2 < ptr2[0].sz);
|
||||||
|
if (b1 && b2)
|
||||||
|
{
|
||||||
|
Uint32 id1 = AttributeHeader(ptr1[0].p[i1]).getAttributeId();
|
||||||
|
Uint32 id2 = AttributeHeader(ptr2[0].p[i2]).getAttributeId();
|
||||||
|
if (id1 < id2)
|
||||||
|
b2 = false;
|
||||||
|
else if (id1 > id2)
|
||||||
|
b1 = false;
|
||||||
|
else
|
||||||
|
{
|
||||||
|
j1 += AttributeHeader(ptr1[0].p[i1++]).getDataSize();
|
||||||
|
b1 = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (b1)
|
||||||
|
{
|
||||||
|
ah = copy_head(i, ptr[0].p, i1, ptr1[0].p, loop);
|
||||||
|
copy_attr(ah, j, ptr[1].p, j1, ptr1[1].p, loop);
|
||||||
|
}
|
||||||
|
else if (b2)
|
||||||
|
{
|
||||||
|
ah = copy_head(i, ptr[0].p, i2, ptr2[0].p, loop);
|
||||||
|
copy_attr(ah, j, ptr[1].p, j2, ptr2[1].p, loop);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
ptr[0].sz = i;
|
||||||
|
ptr[1].sz = j;
|
||||||
|
}
|
||||||
|
|
||||||
|
// merge before values, old version overrides
|
||||||
|
if (tp->t3 != Ev_t::INS)
|
||||||
|
{
|
||||||
|
AttributeHeader ah;
|
||||||
|
Uint32 k = 0;
|
||||||
|
Uint32 k1 = 0;
|
||||||
|
Uint32 k2 = 0;
|
||||||
|
while (1)
|
||||||
|
{
|
||||||
|
bool b1 = (k1 < ptr1[2].sz);
|
||||||
|
bool b2 = (k2 < ptr2[2].sz);
|
||||||
|
if (b1 && b2)
|
||||||
|
{
|
||||||
|
Uint32 id1 = AttributeHeader(ptr1[2].p[k1]).getAttributeId();
|
||||||
|
Uint32 id2 = AttributeHeader(ptr2[2].p[k2]).getAttributeId();
|
||||||
|
if (id1 < id2)
|
||||||
|
b2 = false;
|
||||||
|
else if (id1 > id2)
|
||||||
|
b1 = false;
|
||||||
|
else
|
||||||
|
{
|
||||||
|
k2 += 1 + AttributeHeader(ptr2[2].p[k2]).getDataSize();
|
||||||
|
b2 = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (b1)
|
||||||
|
{
|
||||||
|
ah = AttributeHeader(ptr1[2].p[k1]);
|
||||||
|
copy_attr(ah, k, ptr[2].p, k1, ptr1[2].p, loop | 2);
|
||||||
|
}
|
||||||
|
else if (b2)
|
||||||
|
{
|
||||||
|
ah = AttributeHeader(ptr2[2].p[k2]);
|
||||||
|
copy_attr(ah, k, ptr[2].p, k2, ptr2[2].p, loop | 2);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
ptr[2].sz = k;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// free old data
|
||||||
|
NdbMem_Free((char*)olddata.memory);
|
||||||
|
|
||||||
DBUG_RETURN(0);
|
DBUG_RETURN(0);
|
||||||
}
|
}
|
||||||
|
@ -1399,5 +1671,107 @@ send_report:
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// hash table routines
|
||||||
|
|
||||||
|
// could optimize the all-fixed case
|
||||||
|
Uint32
|
||||||
|
EventBufData_hash::getpkhash(NdbEventOperationImpl* op, LinearSectionPtr ptr[3])
|
||||||
|
{
|
||||||
|
const NdbTableImpl* tab = op->m_eventImpl->m_tableImpl;
|
||||||
|
|
||||||
|
// in all cases ptr[0] = pk ah.. ptr[1] = pk ad..
|
||||||
|
// for pk update (to equivalent pk) post/pre values give same hash
|
||||||
|
Uint32 nkey = tab->m_noOfKeys;
|
||||||
|
assert(nkey != 0 && nkey <= ptr[0].sz);
|
||||||
|
const Uint32* hptr = ptr[0].p;
|
||||||
|
const uchar* dptr = (uchar*)ptr[1].p;
|
||||||
|
|
||||||
|
// hash registers
|
||||||
|
ulong nr1 = 0;
|
||||||
|
ulong nr2 = 0;
|
||||||
|
while (nkey-- != 0)
|
||||||
|
{
|
||||||
|
AttributeHeader ah(*hptr++);
|
||||||
|
Uint32 bytesize = ah.getByteSize();
|
||||||
|
assert(dptr + bytesize <= (uchar*)(ptr[1].p + ptr[1].sz));
|
||||||
|
|
||||||
|
Uint32 i = ah.getAttributeId();
|
||||||
|
const NdbColumnImpl* col = tab->getColumn(i);
|
||||||
|
assert(col != 0);
|
||||||
|
|
||||||
|
Uint32 lb, len;
|
||||||
|
bool ok = NdbSqlUtil::get_var_length(col->m_type, dptr, bytesize, lb, len);
|
||||||
|
assert(ok);
|
||||||
|
|
||||||
|
CHARSET_INFO* cs = col->m_cs ? col->m_cs : &my_charset_bin;
|
||||||
|
(*cs->coll->hash_sort)(cs, dptr + lb, len, &nr1, &nr2);
|
||||||
|
dptr += bytesize;
|
||||||
|
}
|
||||||
|
return nr1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// this is seldom invoked
|
||||||
|
bool
|
||||||
|
EventBufData_hash::getpkequal(NdbEventOperationImpl* op, LinearSectionPtr ptr1[3], LinearSectionPtr ptr2[3])
|
||||||
|
{
|
||||||
|
const NdbTableImpl* tab = op->m_eventImpl->m_tableImpl;
|
||||||
|
|
||||||
|
Uint32 nkey = tab->m_noOfKeys;
|
||||||
|
assert(nkey != 0 && nkey <= ptr1[0].sz && nkey <= ptr2[0].sz);
|
||||||
|
const Uint32* hptr1 = ptr1[0].p;
|
||||||
|
const Uint32* hptr2 = ptr2[0].p;
|
||||||
|
const uchar* dptr1 = (uchar*)ptr1[1].p;
|
||||||
|
const uchar* dptr2 = (uchar*)ptr2[1].p;
|
||||||
|
|
||||||
|
while (nkey-- != 0)
|
||||||
|
{
|
||||||
|
AttributeHeader ah1(*hptr1++);
|
||||||
|
AttributeHeader ah2(*hptr2++);
|
||||||
|
// sizes can differ on update of varchar endspace
|
||||||
|
Uint32 bytesize1 = ah1.getByteSize();
|
||||||
|
Uint32 bytesize2 = ah1.getByteSize();
|
||||||
|
assert(dptr1 + bytesize1 <= (uchar*)(ptr1[1].p + ptr1[1].sz));
|
||||||
|
assert(dptr2 + bytesize2 <= (uchar*)(ptr2[1].p + ptr2[1].sz));
|
||||||
|
|
||||||
|
assert(ah1.getAttributeId() == ah2.getAttributeId());
|
||||||
|
Uint32 i = ah1.getAttributeId();
|
||||||
|
const NdbColumnImpl* col = tab->getColumn(i);
|
||||||
|
assert(col != 0);
|
||||||
|
|
||||||
|
Uint32 lb1, len1;
|
||||||
|
bool ok1 = NdbSqlUtil::get_var_length(col->m_type, dptr1, bytesize1, lb1, len1);
|
||||||
|
Uint32 lb2, len2;
|
||||||
|
bool ok2 = NdbSqlUtil::get_var_length(col->m_type, dptr2, bytesize2, lb2, len2);
|
||||||
|
assert(ok1 && ok2 && lb1 == lb2);
|
||||||
|
|
||||||
|
CHARSET_INFO* cs = col->m_cs ? col->m_cs : &my_charset_bin;
|
||||||
|
int res = (cs->coll->strnncollsp)(cs, dptr1 + lb1, len1, dptr2 + lb2, len2, false);
|
||||||
|
if (res != 0)
|
||||||
|
return false;
|
||||||
|
dptr1 += bytesize1;
|
||||||
|
dptr2 += bytesize2;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
EventBufData_hash::search(Pos& hpos, NdbEventOperationImpl* op, LinearSectionPtr ptr[3])
|
||||||
|
{
|
||||||
|
Uint32 pkhash = getpkhash(op, ptr);
|
||||||
|
Uint32 index = (op->m_oid ^ pkhash) % GCI_EVENT_HASH_SIZE;
|
||||||
|
EventBufData* data = m_hash[index];
|
||||||
|
while (data != 0)
|
||||||
|
{
|
||||||
|
if (data->m_event_op == op &&
|
||||||
|
data->m_pkhash == pkhash &&
|
||||||
|
getpkequal(op, data->ptr, ptr))
|
||||||
|
break;
|
||||||
|
data = data->m_next_hash;
|
||||||
|
}
|
||||||
|
hpos.index = index;
|
||||||
|
hpos.data = data;
|
||||||
|
hpos.pkhash = pkhash;
|
||||||
|
}
|
||||||
|
|
||||||
template class Vector<Gci_container>;
|
template class Vector<Gci_container>;
|
||||||
template class Vector<NdbEventBuffer::EventBufData_chunk*>;
|
template class Vector<NdbEventBuffer::EventBufData_chunk*>;
|
||||||
|
|
|
@ -25,16 +25,19 @@
|
||||||
#define NDB_EVENT_OP_MAGIC_NUMBER 0xA9F301B4
|
#define NDB_EVENT_OP_MAGIC_NUMBER 0xA9F301B4
|
||||||
|
|
||||||
class NdbEventOperationImpl;
|
class NdbEventOperationImpl;
|
||||||
|
|
||||||
struct EventBufData
|
struct EventBufData
|
||||||
{
|
{
|
||||||
union {
|
union {
|
||||||
SubTableData *sdata;
|
SubTableData *sdata;
|
||||||
char *memory;
|
Uint32 *memory;
|
||||||
};
|
};
|
||||||
LinearSectionPtr ptr[3];
|
LinearSectionPtr ptr[3];
|
||||||
unsigned sz;
|
unsigned sz;
|
||||||
NdbEventOperationImpl *m_event_op;
|
NdbEventOperationImpl *m_event_op;
|
||||||
EventBufData *m_next; // Next wrt to global order
|
EventBufData *m_next; // Next wrt to global order
|
||||||
|
EventBufData *m_next_hash; // Next in per-GCI hash
|
||||||
|
Uint32 m_pkhash; // PK hash (without op) for fast compare
|
||||||
};
|
};
|
||||||
|
|
||||||
class EventBufData_list
|
class EventBufData_list
|
||||||
|
@ -116,6 +119,34 @@ void EventBufData_list::append(const EventBufData_list &list)
|
||||||
m_sz+= list.m_sz;
|
m_sz+= list.m_sz;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GCI bucket has also a hash over data, with key event op, table PK.
|
||||||
|
// It can only be appended to and is invalid after remove_first().
|
||||||
|
class EventBufData_hash
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
struct Pos { // search result
|
||||||
|
Uint32 index; // index into hash array
|
||||||
|
EventBufData* data; // non-zero if found
|
||||||
|
Uint32 pkhash; // PK hash
|
||||||
|
};
|
||||||
|
|
||||||
|
static Uint32 getpkhash(NdbEventOperationImpl* op, LinearSectionPtr ptr[3]);
|
||||||
|
static bool getpkequal(NdbEventOperationImpl* op, LinearSectionPtr ptr1[3], LinearSectionPtr ptr2[3]);
|
||||||
|
|
||||||
|
void search(Pos& hpos, NdbEventOperationImpl* op, LinearSectionPtr ptr[3]);
|
||||||
|
void append(Pos& hpos, EventBufData* data);
|
||||||
|
|
||||||
|
enum { GCI_EVENT_HASH_SIZE = 101 };
|
||||||
|
EventBufData* m_hash[GCI_EVENT_HASH_SIZE];
|
||||||
|
};
|
||||||
|
|
||||||
|
inline
|
||||||
|
void EventBufData_hash::append(Pos& hpos, EventBufData* data)
|
||||||
|
{
|
||||||
|
data->m_next_hash = m_hash[hpos.index];
|
||||||
|
m_hash[hpos.index] = data;
|
||||||
|
}
|
||||||
|
|
||||||
struct Gci_container
|
struct Gci_container
|
||||||
{
|
{
|
||||||
enum State
|
enum State
|
||||||
|
@ -127,6 +158,7 @@ struct Gci_container
|
||||||
Uint32 m_gcp_complete_rep_count; // Remaining SUB_GCP_COMPLETE_REP until done
|
Uint32 m_gcp_complete_rep_count; // Remaining SUB_GCP_COMPLETE_REP until done
|
||||||
Uint64 m_gci; // GCI
|
Uint64 m_gci; // GCI
|
||||||
EventBufData_list m_data;
|
EventBufData_list m_data;
|
||||||
|
EventBufData_hash m_data_hash;
|
||||||
};
|
};
|
||||||
|
|
||||||
class NdbEventOperationImpl : public NdbEventOperation {
|
class NdbEventOperationImpl : public NdbEventOperation {
|
||||||
|
@ -174,6 +206,8 @@ public:
|
||||||
Uint32 m_eventId;
|
Uint32 m_eventId;
|
||||||
Uint32 m_oid;
|
Uint32 m_oid;
|
||||||
|
|
||||||
|
bool m_separateEvents;
|
||||||
|
|
||||||
EventBufData *m_data_item;
|
EventBufData *m_data_item;
|
||||||
|
|
||||||
void *m_custom_data;
|
void *m_custom_data;
|
||||||
|
@ -212,7 +246,6 @@ public:
|
||||||
void add_op();
|
void add_op();
|
||||||
void remove_op();
|
void remove_op();
|
||||||
void init_gci_containers();
|
void init_gci_containers();
|
||||||
Uint32 m_active_op_count;
|
|
||||||
|
|
||||||
// accessed from the "receive thread"
|
// accessed from the "receive thread"
|
||||||
int insertDataL(NdbEventOperationImpl *op,
|
int insertDataL(NdbEventOperationImpl *op,
|
||||||
|
@ -233,10 +266,15 @@ public:
|
||||||
|
|
||||||
NdbEventOperationImpl *move_data();
|
NdbEventOperationImpl *move_data();
|
||||||
|
|
||||||
// used by both user thread and receive thread
|
// routines to copy/merge events
|
||||||
int copy_data_alloc(const SubTableData * const f_sdata,
|
EventBufData* alloc_data();
|
||||||
LinearSectionPtr f_ptr[3],
|
int alloc_mem(EventBufData* data, LinearSectionPtr ptr[3]);
|
||||||
EventBufData *ev_buf);
|
int copy_data(const SubTableData * const sdata,
|
||||||
|
LinearSectionPtr ptr[3],
|
||||||
|
EventBufData* data);
|
||||||
|
int merge_data(const SubTableData * const sdata,
|
||||||
|
LinearSectionPtr ptr[3],
|
||||||
|
EventBufData* data);
|
||||||
|
|
||||||
void free_list(EventBufData_list &list);
|
void free_list(EventBufData_list &list);
|
||||||
|
|
||||||
|
@ -290,6 +328,8 @@ private:
|
||||||
// dropped event operations that have not yet
|
// dropped event operations that have not yet
|
||||||
// been deleted
|
// been deleted
|
||||||
NdbEventOperationImpl *m_dropped_ev_op;
|
NdbEventOperationImpl *m_dropped_ev_op;
|
||||||
|
|
||||||
|
Uint32 m_active_op_count;
|
||||||
};
|
};
|
||||||
|
|
||||||
inline
|
inline
|
||||||
|
|
|
@ -169,6 +169,7 @@ eventOperation(Ndb* pNdb, const NdbDictionary::Table &tab, void* pstats, int rec
|
||||||
g_err << function << "Event operation creation failed\n";
|
g_err << function << "Event operation creation failed\n";
|
||||||
return NDBT_FAILED;
|
return NDBT_FAILED;
|
||||||
}
|
}
|
||||||
|
pOp->separateEvents(true);
|
||||||
|
|
||||||
g_info << function << "get values\n";
|
g_info << function << "get values\n";
|
||||||
NdbRecAttr* recAttr[1024];
|
NdbRecAttr* recAttr[1024];
|
||||||
|
@ -380,6 +381,7 @@ int runCreateDropEventOperation(NDBT_Context* ctx, NDBT_Step* step)
|
||||||
g_err << "Event operation creation failed\n";
|
g_err << "Event operation creation failed\n";
|
||||||
return NDBT_FAILED;
|
return NDBT_FAILED;
|
||||||
}
|
}
|
||||||
|
pOp->separateEvents(true);
|
||||||
|
|
||||||
g_info << "dropping event operation" << endl;
|
g_info << "dropping event operation" << endl;
|
||||||
int res = pNdb->dropEventOperation(pOp);
|
int res = pNdb->dropEventOperation(pOp);
|
||||||
|
@ -550,6 +552,7 @@ int runEventApplier(NDBT_Context* ctx, NDBT_Step* step)
|
||||||
g_err << "Event operation creation failed on %s" << buf << endl;
|
g_err << "Event operation creation failed on %s" << buf << endl;
|
||||||
DBUG_RETURN(NDBT_FAILED);
|
DBUG_RETURN(NDBT_FAILED);
|
||||||
}
|
}
|
||||||
|
pOp->separateEvents(true);
|
||||||
|
|
||||||
int i;
|
int i;
|
||||||
int n_columns= table->getNoOfColumns();
|
int n_columns= table->getNoOfColumns();
|
||||||
|
@ -1195,6 +1198,7 @@ static int createEventOperations(Ndb * ndb)
|
||||||
{
|
{
|
||||||
DBUG_RETURN(NDBT_FAILED);
|
DBUG_RETURN(NDBT_FAILED);
|
||||||
}
|
}
|
||||||
|
pOp->separateEvents(true);
|
||||||
|
|
||||||
int n_columns= pTabs[i]->getNoOfColumns();
|
int n_columns= pTabs[i]->getNoOfColumns();
|
||||||
for (int j = 0; j < n_columns; j++)
|
for (int j = 0; j < n_columns; j++)
|
||||||
|
|
|
@ -473,9 +473,9 @@ struct Op { // single or composite
|
||||||
Kind kind;
|
Kind kind;
|
||||||
Type type;
|
Type type;
|
||||||
Op* next_op; // within one commit
|
Op* next_op; // within one commit
|
||||||
Op* next_com; // next commit chain or next event
|
Op* next_com; // next commit chain
|
||||||
Op* next_gci; // groups commit chains (unless --separate-events)
|
Op* next_gci; // groups commit chains (unless --separate-events)
|
||||||
Op* next_ev;
|
Op* next_ev; // next event
|
||||||
Op* next_free; // free list
|
Op* next_free; // free list
|
||||||
bool free; // on free list
|
bool free; // on free list
|
||||||
uint num_op;
|
uint num_op;
|
||||||
|
@ -564,6 +564,8 @@ static NdbRecAttr* g_ev_ra[2][g_maxcol]; // 0-post 1-pre
|
||||||
static NdbBlob* g_ev_bh[2][g_maxcol]; // 0-post 1-pre
|
static NdbBlob* g_ev_bh[2][g_maxcol]; // 0-post 1-pre
|
||||||
static Op* g_rec_ev;
|
static Op* g_rec_ev;
|
||||||
static uint g_ev_pos[g_maxpk];
|
static uint g_ev_pos[g_maxpk];
|
||||||
|
static uint g_num_gci = 0;
|
||||||
|
static uint g_num_ev = 0;
|
||||||
|
|
||||||
static Op*
|
static Op*
|
||||||
getop(Op::Kind a_kind)
|
getop(Op::Kind a_kind)
|
||||||
|
@ -651,6 +653,7 @@ resetmem()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
assert(g_usedops == 0);
|
assert(g_usedops == 0);
|
||||||
|
g_num_gci = g_num_ev = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct Comp {
|
struct Comp {
|
||||||
|
@ -877,9 +880,8 @@ createeventop()
|
||||||
chkdb((g_evt_op = g_ndb->createEventOperation(g_evt->getName(), bsz)) != 0);
|
chkdb((g_evt_op = g_ndb->createEventOperation(g_evt->getName(), bsz)) != 0);
|
||||||
#else
|
#else
|
||||||
chkdb((g_evt_op = g_ndb->createEventOperation(g_evt->getName())) != 0);
|
chkdb((g_evt_op = g_ndb->createEventOperation(g_evt->getName())) != 0);
|
||||||
#ifdef version51rbr
|
// available in gci merge changeset
|
||||||
g_evt_op->separateEvents(g_opts.separate_events); // not yet inherited
|
g_evt_op->separateEvents(g_opts.separate_events); // not yet inherited
|
||||||
#endif
|
|
||||||
#endif
|
#endif
|
||||||
uint i;
|
uint i;
|
||||||
for (i = 0; i < ncol(); i++) {
|
for (i = 0; i < ncol(); i++) {
|
||||||
|
@ -1203,8 +1205,9 @@ makeops()
|
||||||
// copy to gci level
|
// copy to gci level
|
||||||
copyop(com_op, gci_op);
|
copyop(com_op, gci_op);
|
||||||
tot_op->num_com += 1;
|
tot_op->num_com += 1;
|
||||||
|
g_num_gci += 1;
|
||||||
}
|
}
|
||||||
ll1("makeops: used ops = " << g_usedops);
|
ll1("makeops: used ops = " << g_usedops << " com ops = " << g_num_gci);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
|
@ -1341,12 +1344,13 @@ mergeops()
|
||||||
gci_op2 = gci_op2->next_gci;
|
gci_op2 = gci_op2->next_gci;
|
||||||
freeop(tmp_op);
|
freeop(tmp_op);
|
||||||
mergecnt++;
|
mergecnt++;
|
||||||
|
assert(g_num_gci != 0);
|
||||||
|
g_num_gci--;
|
||||||
}
|
}
|
||||||
gci_op = gci_op->next_gci = gci_op2;
|
gci_op = gci_op->next_gci = gci_op2;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ll1("mergeops: used ops = " << g_usedops);
|
ll1("mergeops: used ops = " << g_usedops << " gci ops = " << g_num_gci);
|
||||||
ll1("mergeops: merged " << mergecnt << " gci entries");
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1504,27 +1508,37 @@ matchevents()
|
||||||
static int
|
static int
|
||||||
matchops()
|
matchops()
|
||||||
{
|
{
|
||||||
|
ll1("matchops");
|
||||||
|
uint nomatch = 0;
|
||||||
Uint32 pk1;
|
Uint32 pk1;
|
||||||
for (pk1 = 0; pk1 < g_opts.maxpk; pk1++) {
|
for (pk1 = 0; pk1 < g_opts.maxpk; pk1++) {
|
||||||
Op* tot_op = g_pk_op[pk1];
|
Op* tot_op = g_pk_op[pk1];
|
||||||
if (tot_op == 0)
|
if (tot_op == 0)
|
||||||
continue;
|
continue;
|
||||||
Op* com_op = tot_op->next_com;
|
Op* gci_op = tot_op->next_gci;
|
||||||
while (com_op != 0) {
|
while (gci_op != 0) {
|
||||||
if (com_op->type != Op::NUL && ! com_op->match) {
|
if (gci_op->type == Op::NUL) {
|
||||||
|
ll2("GCI: " << *gci_op << " [skip NUL]");
|
||||||
|
} else if (gci_op->match) {
|
||||||
|
ll2("GCI: " << *gci_op << " [match OK]");
|
||||||
|
} else {
|
||||||
|
ll0("GCI: " << *gci_op);
|
||||||
|
Op* com_op = gci_op->next_com;
|
||||||
|
assert(com_op != 0);
|
||||||
ll0("COM: " << *com_op);
|
ll0("COM: " << *com_op);
|
||||||
Op* op = com_op->next_op;
|
Op* op = com_op->next_op;
|
||||||
assert(op != 0);
|
assert(op != 0);
|
||||||
while (op != 0) {
|
while (op != 0) {
|
||||||
ll0("---: " << *op);
|
ll0("OP : " << *op);
|
||||||
op = op->next_op;
|
op = op->next_op;
|
||||||
}
|
}
|
||||||
ll0("no matching event");
|
ll0("no matching event");
|
||||||
return -1;
|
nomatch++;
|
||||||
}
|
}
|
||||||
com_op = com_op->next_com;
|
gci_op = gci_op->next_gci;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
chkrc(nomatch == 0);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1619,9 +1633,10 @@ runevents()
|
||||||
Op* ev = getop(Op::EV);
|
Op* ev = getop(Op::EV);
|
||||||
copyop(g_rec_ev, ev);
|
copyop(g_rec_ev, ev);
|
||||||
last_ev->next_ev = ev;
|
last_ev->next_ev = ev;
|
||||||
|
g_num_ev++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ll1("runevents: used ops = " << g_usedops);
|
ll1("runevents: used ops = " << g_usedops << " events = " << g_num_ev);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1666,6 +1681,7 @@ runtest()
|
||||||
chkrc(mergeops() == 0);
|
chkrc(mergeops() == 0);
|
||||||
cmppostpre();
|
cmppostpre();
|
||||||
chkrc(runevents() == 0);
|
chkrc(runevents() == 0);
|
||||||
|
ll0("counts: gci = " << g_num_gci << " ev = " << g_num_ev);
|
||||||
chkrc(matchevents() == 0);
|
chkrc(matchevents() == 0);
|
||||||
chkrc(matchops() == 0);
|
chkrc(matchops() == 0);
|
||||||
chkrc(dropeventop() == 0);
|
chkrc(dropeventop() == 0);
|
||||||
|
|
Loading…
Add table
Reference in a new issue