mirror of
https://github.com/MariaDB/server.git
synced 2025-01-18 21:12:26 +01:00
Merge tulin@bk-internal.mysql.com:/home/bk/mysql-5.1-new
into poseidon.ndb.mysql.com:/home/tomas/mysql-5.1-new-ndb
This commit is contained in:
commit
08438b6054
27 changed files with 459 additions and 242 deletions
|
@ -237,7 +237,7 @@ AC_DEFUN([MYSQL_SETUP_NDBCLUSTER], [
|
|||
|
||||
if test X"$have_ndb_binlog" = Xyes
|
||||
then
|
||||
AC_DEFINE([HAVE_NDB_BINLOG], [1],
|
||||
AC_DEFINE([WITH_NDB_BINLOG], [1],
|
||||
[Including Ndb Cluster Binlog])
|
||||
AC_MSG_RESULT([Including Ndb Cluster Binlog])
|
||||
else
|
||||
|
|
|
@ -15,7 +15,7 @@ AC_DEFUN([MYSQL_CHECK_REPLICATION], [
|
|||
|
||||
case "$row_based" in
|
||||
yes )
|
||||
AC_DEFINE([HAVE_ROW_BASED_REPLICATION], [1], [Define to have row-based replication])
|
||||
AC_DEFINE([WITH_ROW_BASED_REPLICATION], [1], [Define to have row-based replication])
|
||||
AC_MSG_RESULT([-- including row-based replication])
|
||||
[have_row_based=yes]
|
||||
;;
|
||||
|
|
|
@ -78,6 +78,15 @@
|
|||
#endif
|
||||
#endif /* _WIN32... */
|
||||
|
||||
#ifndef EMBEDDED_LIBRARY
|
||||
#ifdef WITH_ROW_BASED_REPLICATION
|
||||
#define HAVE_ROW_BASED_REPLICATION 1
|
||||
#endif
|
||||
#ifdef WITH_NDB_BINLOG
|
||||
#define HAVE_NDB_BINLOG 1
|
||||
#endif
|
||||
#endif /* !EMBEDDED_LIBRARY */
|
||||
|
||||
/* Some defines to avoid ifdefs in the code */
|
||||
#ifndef NETWARE_YIELD
|
||||
#define NETWARE_YIELD
|
||||
|
|
|
@ -167,10 +167,13 @@ link_sources:
|
|||
rm -f $$f; \
|
||||
@LN_CP_F@ $(top_srcdir)/libmysql/$$f $$f; \
|
||||
done; \
|
||||
for f in $(sqlstoragesources); do \
|
||||
rm -f $$f; \
|
||||
@LN_CP_F@ `find $(srcdir)/../sql -name $$f` $$f; \
|
||||
done; \
|
||||
if test -n "$(sqlstoragesources)" ; \
|
||||
then \
|
||||
for f in "$(sqlstoragesources)"; do \
|
||||
rm -f "$$f"; \
|
||||
@LN_CP_F@ `find $(srcdir)/../sql -name "$$f"` "$$f"; \
|
||||
done; \
|
||||
fi; \
|
||||
rm -f client_settings.h; \
|
||||
@LN_CP_F@ $(top_srcdir)/libmysql/client_settings.h client_settings.h
|
||||
|
||||
|
|
|
@ -6,13 +6,6 @@ attr1 INT NOT NULL,
|
|||
attr2 INT,
|
||||
attr3 VARCHAR(10)
|
||||
) ENGINE=ndbcluster;
|
||||
drop table t1;
|
||||
CREATE TABLE t1 (
|
||||
pk1 INT NOT NULL PRIMARY KEY,
|
||||
attr1 INT NOT NULL,
|
||||
attr2 INT,
|
||||
attr3 VARCHAR(10)
|
||||
) ENGINE=ndbcluster;
|
||||
SHOW INDEX FROM t1;
|
||||
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment
|
||||
t1 0 PRIMARY 1 pk1 A 0 NULL NULL BTREE
|
||||
|
|
|
@ -56,86 +56,90 @@ INSERT INTO test.t1 VALUES (8,1,1,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,4
|
|||
UNLOCK TABLES;
|
||||
UPDATE test.t1 set x034 = 50 where bit3 = b'000000';
|
||||
UPDATE test.t1 set VNotSupp = 33 where bit1 = b'0';
|
||||
SELECT oSupp, sSuppD, GSuppDf, VNotSupp, x034 FROM test.t1;
|
||||
SELECT oSupp, sSuppD, GSuppDf, VNotSupp, x034
|
||||
FROM test.t1
|
||||
ORDER BY oSupp, sSuppD, GSuppDf, VNotSupp, x034;
|
||||
oSupp sSuppD GSuppDf VNotSupp x034
|
||||
5 5 3 NULL 1
|
||||
5 5 3 2 1
|
||||
5 5 3 33 1
|
||||
5 5 3 2 50
|
||||
5 5 3 33 1
|
||||
5 5 3 33 1
|
||||
5 5 3 33 1
|
||||
5 5 3 33 1
|
||||
SELECT hex(bit1) from test.t1;
|
||||
5 5 3 33 1
|
||||
SELECT hex(bit1) from test.t1 ORDER BY bit1;
|
||||
hex(bit1)
|
||||
0
|
||||
0
|
||||
0
|
||||
0
|
||||
0
|
||||
2A
|
||||
3F
|
||||
3F
|
||||
0
|
||||
2A
|
||||
0
|
||||
0
|
||||
0
|
||||
0
|
||||
SELECT hex(bit2) from test.t1;
|
||||
SELECT hex(bit2) from test.t1 ORDER BY bit2;
|
||||
hex(bit2)
|
||||
3E
|
||||
0
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
2A
|
||||
3E
|
||||
3F
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
SELECT hex(bit3) from test.t1;
|
||||
SELECT hex(bit3) from test.t1 ORDER BY bit3;
|
||||
hex(bit3)
|
||||
35
|
||||
24
|
||||
15
|
||||
0
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
SELECT oSupp, sSuppD, GSuppDf, VNotSupp, x034 FROM test.t1;
|
||||
15
|
||||
24
|
||||
35
|
||||
SELECT oSupp, sSuppD, GSuppDf, VNotSupp, x034
|
||||
FROM test.t1
|
||||
ORDER BY oSupp, sSuppD, GSuppDf, VNotSupp, x034;
|
||||
oSupp sSuppD GSuppDf VNotSupp x034
|
||||
5 5 3 NULL 1
|
||||
5 5 3 2 1
|
||||
5 5 3 33 1
|
||||
5 5 3 2 50
|
||||
5 5 3 33 1
|
||||
5 5 3 33 1
|
||||
5 5 3 33 1
|
||||
5 5 3 33 1
|
||||
SELECT hex(bit1) from test.t1;
|
||||
5 5 3 33 1
|
||||
SELECT hex(bit1) from test.t1 ORDER BY bit1;
|
||||
hex(bit1)
|
||||
0
|
||||
0
|
||||
0
|
||||
0
|
||||
0
|
||||
2A
|
||||
3F
|
||||
3F
|
||||
0
|
||||
2A
|
||||
0
|
||||
0
|
||||
0
|
||||
0
|
||||
SELECT hex(bit2) from test.t1;
|
||||
SELECT hex(bit2) from test.t1 ORDER BY bit2;
|
||||
hex(bit2)
|
||||
3E
|
||||
0
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
2A
|
||||
3E
|
||||
3F
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
SELECT hex(bit3) from test.t1;
|
||||
SELECT hex(bit3) from test.t1 ORDER BY bit3;
|
||||
hex(bit3)
|
||||
35
|
||||
24
|
||||
15
|
||||
0
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
15
|
||||
24
|
||||
35
|
||||
CREATE TABLE test.t2 (a INT, b BIT(1));
|
||||
INSERT INTO test.t2 VALUES (1, b'0');
|
||||
INSERT INTO test.t2 VALUES (1, b'1');
|
||||
|
@ -144,19 +148,19 @@ CREATE TABLE test.t3 (a INT, b INT);
|
|||
INSERT INTO test.t3 VALUES (1, NULL);
|
||||
INSERT INTO test.t3 VALUES (1, 0);
|
||||
UPDATE test.t3 SET a = 2 WHERE b = 0;
|
||||
SELECT a, hex(b) FROM test.t2;
|
||||
SELECT a, hex(b) FROM test.t2 ORDER BY a,b;
|
||||
a hex(b)
|
||||
1 0
|
||||
2 1
|
||||
SELECT * FROM test.t3;
|
||||
SELECT * FROM test.t3 ORDER BY a,b;
|
||||
a b
|
||||
1 NULL
|
||||
2 0
|
||||
SELECT a, hex(b) FROM test.t2;
|
||||
SELECT a, hex(b) FROM test.t2 ORDER BY a,b;
|
||||
a hex(b)
|
||||
1 0
|
||||
2 1
|
||||
SELECT * FROM test.t3;
|
||||
SELECT * FROM test.t3 ORDER BY a,b;
|
||||
a b
|
||||
1 NULL
|
||||
2 0
|
||||
|
|
|
@ -27,9 +27,9 @@ rpl_ndb_auto_inc : BUG#17086 2006-02-16 jmiller CR: auto_increment_incre
|
|||
rpl_ndb_commit_afterflush : BUG#19328 2006-05-04 tomas Slave timeout with COM_REGISTER_SLAVE error causing stop
|
||||
rpl_ndb_dd_partitions : BUG#19259 2006-04-21 rpl_ndb_dd_partitions fails on s/AMD
|
||||
rpl_ndb_ddl : BUG#18946 result file needs update + test needs to checked
|
||||
rpl_ndb_innodb2ndb : BUG#17400 2006-04-19 tomas Cluster Replication: delete & update of rows in table without pk fails on slave.
|
||||
rpl_ndb_innodb2ndb : Bug #19710 Cluster replication to partition table fails on DELETE FROM statement
|
||||
rpl_ndb_log : BUG#18947 2006-03-21 tomas CRBR: order in binlog of create table and insert (on different table) not determ
|
||||
rpl_ndb_myisam2ndb : BUG#17400 2006-04-19 tomas Cluster Replication: delete & update of rows in table without pk fails on slave.
|
||||
rpl_ndb_myisam2ndb : Bug #19710 Cluster replication to partition table fails on DELETE FROM statement
|
||||
rpl_switch_stm_row_mixed : BUG#18590 2006-03-28 brian
|
||||
rpl_row_blob_innodb : BUG#18980 2006-04-10 kent Test fails randomly
|
||||
rpl_row_func003 : BUG#19074 2006-13-04 andrei test failed
|
||||
|
|
|
@ -6,17 +6,16 @@ DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
|
|||
drop database if exists mysqltest;
|
||||
--enable_warnings
|
||||
|
||||
# workaround for bug#16445
|
||||
# remove to reproduce bug and run tests from ndb start
|
||||
# and with ndb_autodiscover disabled. Fails on Linux 50 % of the times
|
||||
|
||||
CREATE TABLE t1 (
|
||||
pk1 INT NOT NULL PRIMARY KEY,
|
||||
attr1 INT NOT NULL,
|
||||
attr2 INT,
|
||||
attr3 VARCHAR(10)
|
||||
) ENGINE=ndbcluster;
|
||||
drop table t1;
|
||||
## workaround for bug#16445
|
||||
## remove to reproduce bug and run tests from ndb start
|
||||
## and with ndb_autodiscover disabled. Fails on Linux 50 % of the times
|
||||
#CREATE TABLE t1 (
|
||||
# pk1 INT NOT NULL PRIMARY KEY,
|
||||
# attr1 INT NOT NULL,
|
||||
# attr2 INT,
|
||||
# attr3 VARCHAR(10)
|
||||
#) ENGINE=ndbcluster;
|
||||
#drop table t1;
|
||||
|
||||
#
|
||||
# Basic test to show that the NDB
|
||||
|
|
|
@ -70,18 +70,22 @@ UNLOCK TABLES;
|
|||
|
||||
UPDATE test.t1 set x034 = 50 where bit3 = b'000000';
|
||||
UPDATE test.t1 set VNotSupp = 33 where bit1 = b'0';
|
||||
SELECT oSupp, sSuppD, GSuppDf, VNotSupp, x034 FROM test.t1;
|
||||
SELECT hex(bit1) from test.t1;
|
||||
SELECT hex(bit2) from test.t1;
|
||||
SELECT hex(bit3) from test.t1;
|
||||
SELECT oSupp, sSuppD, GSuppDf, VNotSupp, x034
|
||||
FROM test.t1
|
||||
ORDER BY oSupp, sSuppD, GSuppDf, VNotSupp, x034;
|
||||
SELECT hex(bit1) from test.t1 ORDER BY bit1;
|
||||
SELECT hex(bit2) from test.t1 ORDER BY bit2;
|
||||
SELECT hex(bit3) from test.t1 ORDER BY bit3;
|
||||
save_master_pos;
|
||||
|
||||
connection slave;
|
||||
sync_with_master;
|
||||
SELECT oSupp, sSuppD, GSuppDf, VNotSupp, x034 FROM test.t1;
|
||||
SELECT hex(bit1) from test.t1;
|
||||
SELECT hex(bit2) from test.t1;
|
||||
SELECT hex(bit3) from test.t1;
|
||||
SELECT oSupp, sSuppD, GSuppDf, VNotSupp, x034
|
||||
FROM test.t1
|
||||
ORDER BY oSupp, sSuppD, GSuppDf, VNotSupp, x034;
|
||||
SELECT hex(bit1) from test.t1 ORDER BY bit1;
|
||||
SELECT hex(bit2) from test.t1 ORDER BY bit2;
|
||||
SELECT hex(bit3) from test.t1 ORDER BY bit3;
|
||||
|
||||
connection master;
|
||||
CREATE TABLE test.t2 (a INT, b BIT(1));
|
||||
|
@ -94,14 +98,14 @@ INSERT INTO test.t3 VALUES (1, NULL);
|
|||
INSERT INTO test.t3 VALUES (1, 0);
|
||||
UPDATE test.t3 SET a = 2 WHERE b = 0;
|
||||
|
||||
SELECT a, hex(b) FROM test.t2;
|
||||
SELECT * FROM test.t3;
|
||||
SELECT a, hex(b) FROM test.t2 ORDER BY a,b;
|
||||
SELECT * FROM test.t3 ORDER BY a,b;
|
||||
save_master_pos;
|
||||
|
||||
connection slave;
|
||||
sync_with_master;
|
||||
SELECT a, hex(b) FROM test.t2;
|
||||
SELECT * FROM test.t3;
|
||||
SELECT a, hex(b) FROM test.t2 ORDER BY a,b;
|
||||
SELECT * FROM test.t3 ORDER BY a,b;
|
||||
|
||||
connection master;
|
||||
DROP TABLE IF EXISTS test.t1;
|
||||
|
|
|
@ -1 +1 @@
|
|||
--binlog-format=row --default-storage-engine=ndb
|
||||
--binlog-format=row --default-storage-engine=ndbcluster
|
||||
|
|
|
@ -1 +1 @@
|
|||
--default-storage-engine=ndb --binlog-format=row
|
||||
--default-storage-engine=ndbcluster --binlog-format=row
|
||||
|
|
|
@ -14,6 +14,54 @@
|
|||
fun:pthread_create@@GLIBC_2.1
|
||||
}
|
||||
|
||||
{
|
||||
pthread allocate_tls memory loss 2
|
||||
Memcheck:Leak
|
||||
fun:calloc
|
||||
fun:_dl_allocate_tls
|
||||
fun:pthread_create@@GLIBC_2.1
|
||||
}
|
||||
|
||||
{
|
||||
pthead_exit memory loss 1
|
||||
Memcheck:Leak
|
||||
fun:malloc
|
||||
fun:_dl_new_object
|
||||
fun:_dl_map_object_from_fd
|
||||
}
|
||||
|
||||
{
|
||||
pthread_exit memory loss 2
|
||||
Memcheck:Leak
|
||||
fun:malloc
|
||||
fun:_dl_map_object
|
||||
fun:dl_open_worker
|
||||
}
|
||||
|
||||
{
|
||||
pthread_exit memory loss 3
|
||||
Memcheck:Leak
|
||||
fun:malloc
|
||||
fun:_dl_map_object_deps
|
||||
fun:dl_open_worker
|
||||
}
|
||||
|
||||
{
|
||||
pthread_exit memory loss 4
|
||||
Memcheck:Leak
|
||||
fun:calloc
|
||||
fun:_dl_check_map_versions
|
||||
fun:dl_open_worker
|
||||
}
|
||||
|
||||
{
|
||||
pthread_exit memory loss 5
|
||||
Memcheck:Leak
|
||||
fun:calloc
|
||||
fun:_dl_new_object
|
||||
fun:_dl_map_object_from_fd
|
||||
}
|
||||
|
||||
{
|
||||
pthread allocate_dtv memory loss
|
||||
Memcheck:Leak
|
||||
|
@ -134,6 +182,24 @@
|
|||
fun:compress2
|
||||
}
|
||||
|
||||
{
|
||||
libz longest_match3
|
||||
Memcheck:Cond
|
||||
fun:longest_match
|
||||
fun:deflate_slow
|
||||
fun:deflate
|
||||
fun:azflush
|
||||
}
|
||||
|
||||
{
|
||||
libz longest_match3
|
||||
Memcheck:Cond
|
||||
fun:longest_match
|
||||
fun:deflate_slow
|
||||
fun:deflate
|
||||
fun:azclose
|
||||
}
|
||||
|
||||
{
|
||||
libz deflate
|
||||
Memcheck:Cond
|
||||
|
@ -173,3 +239,10 @@
|
|||
fun:my_thread_init
|
||||
fun:kill_server_thread
|
||||
}
|
||||
|
||||
# Red Hat AS 4 32 bit
|
||||
{
|
||||
dl_relocate_object
|
||||
Memcheck:Cond
|
||||
fun:_dl_relocate_object
|
||||
}
|
||||
|
|
|
@ -2875,7 +2875,7 @@ void ndb_unpack_record(TABLE *table, NdbValue *value,
|
|||
MY_BITMAP *defined, byte *buf)
|
||||
{
|
||||
Field **p_field= table->field, *field= *p_field;
|
||||
uint row_offset= (uint) (buf - table->record[0]);
|
||||
my_ptrdiff_t row_offset= buf - table->record[0];
|
||||
DBUG_ENTER("ndb_unpack_record");
|
||||
|
||||
// Set null flag(s)
|
||||
|
@ -2906,24 +2906,34 @@ void ndb_unpack_record(TABLE *table, NdbValue *value,
|
|||
}
|
||||
else if (field->type() == MYSQL_TYPE_BIT)
|
||||
{
|
||||
byte *save_field_ptr= field->ptr;
|
||||
field->ptr= save_field_ptr + row_offset;
|
||||
Field_bit *field_bit= static_cast<Field_bit*>(field);
|
||||
|
||||
/*
|
||||
Move internal field pointer to point to 'buf'. Calling
|
||||
the correct member function directly since we know the
|
||||
type of the object.
|
||||
*/
|
||||
field_bit->Field_bit::move_field_offset(row_offset);
|
||||
if (field->pack_length() < 5)
|
||||
{
|
||||
DBUG_PRINT("info", ("bit field H'%.8X",
|
||||
(*value).rec->u_32_value()));
|
||||
((Field_bit*) field)->store((longlong)
|
||||
(*value).rec->u_32_value(), FALSE);
|
||||
field_bit->Field_bit::store((longlong) (*value).rec->u_32_value(),
|
||||
FALSE);
|
||||
}
|
||||
else
|
||||
{
|
||||
DBUG_PRINT("info", ("bit field H'%.8X%.8X",
|
||||
*(Uint32*) (*value).rec->aRef(),
|
||||
*((Uint32*) (*value).rec->aRef()+1)));
|
||||
((Field_bit*) field)->store((longlong)
|
||||
(*value).rec->u_64_value(),TRUE);
|
||||
field_bit->Field_bit::store((longlong) (*value).rec->u_64_value(),
|
||||
TRUE);
|
||||
}
|
||||
field->ptr= save_field_ptr;
|
||||
/*
|
||||
Move back internal field pointer to point to original
|
||||
value (usually record[0]).
|
||||
*/
|
||||
field_bit->Field_bit::move_field_offset(-row_offset);
|
||||
DBUG_PRINT("info",("[%u] SET",
|
||||
(*value).rec->getColumn()->getColumnNo()));
|
||||
DBUG_DUMP("info", (const char*) field->ptr, field->field_length);
|
||||
|
|
|
@ -1689,10 +1689,15 @@ ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb,
|
|||
pthread_mutex_lock(&LOCK_open);
|
||||
if (ndb_create_table_from_engine(thd, schema->db, schema->name))
|
||||
{
|
||||
sql_print_error("Could not discover table '%s.%s' from "
|
||||
"binlog schema event '%s' from node %d",
|
||||
sql_print_error("NDB binlog: Could not discover table '%s.%s' from "
|
||||
"binlog schema event '%s' from node %d. "
|
||||
"my_errno: %d",
|
||||
schema->db, schema->name, schema->query,
|
||||
schema->node_id);
|
||||
schema->node_id, my_errno);
|
||||
List_iterator_fast<MYSQL_ERROR> it(thd->warn_list);
|
||||
MYSQL_ERROR *err;
|
||||
while ((err= it++))
|
||||
sql_print_warning("NDB binlog: (%d)%s", err->code, err->msg);
|
||||
}
|
||||
pthread_mutex_unlock(&LOCK_open);
|
||||
log_query= 1;
|
||||
|
@ -1916,7 +1921,7 @@ ndb_binlog_thread_handle_schema_event_post_epoch(THD *thd,
|
|||
// fall through
|
||||
case SOT_RENAME_TABLE_NEW:
|
||||
log_query= 1;
|
||||
if (ndb_binlog_running)
|
||||
if (ndb_binlog_running && (!share || !share->op))
|
||||
{
|
||||
/*
|
||||
we need to free any share here as command below
|
||||
|
@ -1930,10 +1935,14 @@ ndb_binlog_thread_handle_schema_event_post_epoch(THD *thd,
|
|||
pthread_mutex_lock(&LOCK_open);
|
||||
if (ndb_create_table_from_engine(thd, schema->db, schema->name))
|
||||
{
|
||||
sql_print_error("Could not discover table '%s.%s' from "
|
||||
"binlog schema event '%s' from node %d",
|
||||
sql_print_error("NDB binlog: Could not discover table '%s.%s' from "
|
||||
"binlog schema event '%s' from node %d. my_errno: %d",
|
||||
schema->db, schema->name, schema->query,
|
||||
schema->node_id);
|
||||
schema->node_id, my_errno);
|
||||
List_iterator_fast<MYSQL_ERROR> it(thd->warn_list);
|
||||
MYSQL_ERROR *err;
|
||||
while ((err= it++))
|
||||
sql_print_warning("NDB binlog: (%d)%s", err->code, err->msg);
|
||||
}
|
||||
pthread_mutex_unlock(&LOCK_open);
|
||||
}
|
||||
|
|
10
sql/log.cc
10
sql/log.cc
|
@ -1090,12 +1090,16 @@ binlog_end_trans(THD *thd, binlog_trx_data *trx_data, Log_event *end_ev)
|
|||
were, we would have to ensure that we're not ending a statement
|
||||
inside a stored function.
|
||||
*/
|
||||
#ifdef HAVE_ROW_BASED_REPLICATION
|
||||
thd->binlog_flush_pending_rows_event(TRUE);
|
||||
#endif
|
||||
error= mysql_bin_log.write(thd, trans_log, end_ev);
|
||||
}
|
||||
else
|
||||
{
|
||||
#ifdef HAVE_ROW_BASED_REPLICATION
|
||||
thd->binlog_delete_pending_rows_event();
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2620,6 +2624,7 @@ bool MYSQL_LOG::is_query_in_union(THD *thd, query_id_t query_id_param)
|
|||
}
|
||||
|
||||
|
||||
#ifdef HAVE_ROW_BASED_REPLICATION
|
||||
/*
|
||||
These functions are placed in this file since they need access to
|
||||
binlog_hton, which has internal linkage.
|
||||
|
@ -2797,6 +2802,7 @@ int MYSQL_LOG::flush_and_set_pending_rows_event(THD *thd, Rows_log_event* event)
|
|||
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
#endif /*HAVE_ROW_BASED_REPLICATION*/
|
||||
|
||||
/*
|
||||
Write an event to the binary log
|
||||
|
@ -2831,7 +2837,9 @@ bool MYSQL_LOG::write(Log_event *event_info)
|
|||
*/
|
||||
bool const end_stmt=
|
||||
thd->prelocked_mode && thd->lex->requires_prelocking();
|
||||
#ifdef HAVE_ROW_BASED_REPLICATION
|
||||
thd->binlog_flush_pending_rows_event(end_stmt);
|
||||
#endif /*HAVE_ROW_BASED_REPLICATION*/
|
||||
|
||||
pthread_mutex_lock(&LOCK_log);
|
||||
|
||||
|
@ -2873,8 +2881,10 @@ bool MYSQL_LOG::write(Log_event *event_info)
|
|||
*/
|
||||
if (opt_using_transactions && thd)
|
||||
{
|
||||
#ifdef HAVE_ROW_BASED_REPLICATION
|
||||
if (thd->binlog_setup_trx_data())
|
||||
goto err;
|
||||
#endif /*HAVE_ROW_BASED_REPLICATION*/
|
||||
|
||||
binlog_trx_data *const trx_data=
|
||||
(binlog_trx_data*) thd->ha_data[binlog_hton.slot];
|
||||
|
|
102
sql/log_event.cc
102
sql/log_event.cc
|
@ -5350,6 +5350,7 @@ int Rows_log_event::exec_event(st_relay_log_info *rli)
|
|||
slave_print_msg(ERROR_LEVEL, rli, error,
|
||||
"Error in %s event: when locking tables",
|
||||
get_type_str());
|
||||
rli->clear_tables_to_lock();
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
|
||||
|
@ -5385,6 +5386,7 @@ int Rows_log_event::exec_event(st_relay_log_info *rli)
|
|||
"unexpected success or fatal error"));
|
||||
thd->query_error= 1;
|
||||
}
|
||||
rli->clear_tables_to_lock();
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
}
|
||||
|
@ -5393,19 +5395,17 @@ int Rows_log_event::exec_event(st_relay_log_info *rli)
|
|||
the table map and remove them from tables to lock.
|
||||
*/
|
||||
|
||||
TABLE_LIST *ptr= rli->tables_to_lock;
|
||||
while (ptr)
|
||||
TABLE_LIST *ptr;
|
||||
for (ptr= rli->tables_to_lock ; ptr ; ptr= ptr->next_global)
|
||||
{
|
||||
rli->m_table_map.set_table(ptr->table_id, ptr->table);
|
||||
rli->touching_table(ptr->db, ptr->table_name, ptr->table_id);
|
||||
char *to_free= reinterpret_cast<char*>(ptr);
|
||||
ptr= ptr->next_global;
|
||||
my_free(to_free, MYF(MY_WME));
|
||||
}
|
||||
rli->tables_to_lock= 0;
|
||||
rli->tables_to_lock_count= 0;
|
||||
rli->clear_tables_to_lock();
|
||||
}
|
||||
|
||||
DBUG_ASSERT(rli->tables_to_lock == NULL && rli->tables_to_lock_count == 0);
|
||||
|
||||
TABLE* table= rli->m_table_map.get_table(m_table_id);
|
||||
|
||||
if (table)
|
||||
|
@ -5816,12 +5816,8 @@ int Table_map_log_event::exec_event(st_relay_log_info *rli)
|
|||
&tname_mem, NAME_LEN + 1,
|
||||
NULL);
|
||||
|
||||
/*
|
||||
If memory is allocated, it the pointer to it should be stored in
|
||||
table_list. If this is not true, the memory will not be correctly
|
||||
free:ed later.
|
||||
*/
|
||||
DBUG_ASSERT(memory == NULL || memory == table_list);
|
||||
if (memory == NULL)
|
||||
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
|
||||
|
||||
uint32 dummy_len;
|
||||
bzero(table_list, sizeof(*table_list));
|
||||
|
@ -5836,8 +5832,12 @@ int Table_map_log_event::exec_event(st_relay_log_info *rli)
|
|||
|
||||
int error= 0;
|
||||
|
||||
if (rpl_filter->db_ok(table_list->db) &&
|
||||
(!rpl_filter->is_on() || rpl_filter->tables_ok("", table_list)))
|
||||
if (!rpl_filter->db_ok(table_list->db) ||
|
||||
(rpl_filter->is_on() && !rpl_filter->tables_ok("", table_list)))
|
||||
{
|
||||
my_free((gptr) memory, MYF(MY_WME));
|
||||
}
|
||||
else
|
||||
{
|
||||
/*
|
||||
Check if the slave is set to use SBR. If so, it should switch
|
||||
|
@ -6416,12 +6416,17 @@ static int find_and_fetch_row(TABLE *table, byte *key)
|
|||
if (table->s->keys > 0)
|
||||
{
|
||||
int error;
|
||||
/*
|
||||
We need to set the null bytes to ensure that the filler bit
|
||||
are all set when returning. There are storage engines that
|
||||
just set the necessary bits on the bytes and don't set the
|
||||
filler bits correctly.
|
||||
*/
|
||||
/* We have a key: search the table using the index */
|
||||
if (!table->file->inited)
|
||||
if ((error= table->file->ha_index_init(0, FALSE)))
|
||||
return error;
|
||||
|
||||
/*
|
||||
We need to set the null bytes to ensure that the filler bit are
|
||||
all set when returning. There are storage engines that just set
|
||||
the necessary bits on the bytes and don't set the filler bits
|
||||
correctly.
|
||||
*/
|
||||
my_ptrdiff_t const pos=
|
||||
table->s->null_bytes > 0 ? table->s->null_bytes - 1 : 0;
|
||||
table->record[1][pos]= 0xFF;
|
||||
|
@ -6430,6 +6435,7 @@ static int find_and_fetch_row(TABLE *table, byte *key)
|
|||
HA_READ_KEY_EXACT)))
|
||||
{
|
||||
table->file->print_error(error, MYF(0));
|
||||
table->file->ha_index_end();
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
|
||||
|
@ -6448,7 +6454,10 @@ static int find_and_fetch_row(TABLE *table, byte *key)
|
|||
chose the row to change only using a PK or an UNNI.
|
||||
*/
|
||||
if (table->key_info->flags & HA_NOSAME)
|
||||
{
|
||||
table->file->ha_index_end();
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
while (record_compare(table))
|
||||
{
|
||||
|
@ -6465,15 +6474,26 @@ static int find_and_fetch_row(TABLE *table, byte *key)
|
|||
if ((error= table->file->index_next(table->record[1])))
|
||||
{
|
||||
table->file->print_error(error, MYF(0));
|
||||
table->file->ha_index_end();
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Have to restart the scan to be able to fetch the next row.
|
||||
*/
|
||||
table->file->ha_index_end();
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Continue until we find the right record or have made a full loop */
|
||||
int restart_count= 0; // Number of times scanning has restarted from top
|
||||
int error= 0;
|
||||
int error;
|
||||
|
||||
/* We don't have a key: search the table using rnd_next() */
|
||||
if ((error= table->file->ha_rnd_init(1)))
|
||||
return error;
|
||||
|
||||
/* Continue until we find the right record or have made a full loop */
|
||||
do
|
||||
{
|
||||
/*
|
||||
|
@ -6499,11 +6519,17 @@ static int find_and_fetch_row(TABLE *table, byte *key)
|
|||
|
||||
default:
|
||||
table->file->print_error(error, MYF(0));
|
||||
table->file->ha_rnd_end();
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
}
|
||||
while (restart_count < 2 && record_compare(table));
|
||||
|
||||
/*
|
||||
Have to restart the scan to be able to fetch the next row.
|
||||
*/
|
||||
table->file->ha_rnd_end();
|
||||
|
||||
DBUG_ASSERT(error == HA_ERR_END_OF_FILE || error == 0);
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
|
@ -6626,20 +6652,6 @@ int Delete_rows_log_event::do_exec_row(TABLE *table)
|
|||
{
|
||||
DBUG_ASSERT(table != NULL);
|
||||
|
||||
if (table->s->keys > 0)
|
||||
{
|
||||
/* We have a key: search the table using the index */
|
||||
if (!table->file->inited)
|
||||
if (int error= table->file->ha_index_init(0, FALSE))
|
||||
return error;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* We doesn't have a key: search the table using rnd_next() */
|
||||
if (int error= table->file->ha_rnd_init(1))
|
||||
return error;
|
||||
}
|
||||
|
||||
int error= find_and_fetch_row(table, m_key);
|
||||
if (error)
|
||||
return error;
|
||||
|
@ -6651,11 +6663,6 @@ int Delete_rows_log_event::do_exec_row(TABLE *table)
|
|||
*/
|
||||
error= table->file->ha_delete_row(table->record[0]);
|
||||
|
||||
/*
|
||||
Have to restart the scan to be able to fetch the next row.
|
||||
*/
|
||||
table->file->ha_index_or_rnd_end();
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -6736,17 +6743,6 @@ int Update_rows_log_event::do_before_row_operations(TABLE *table)
|
|||
if (!m_memory)
|
||||
return HA_ERR_OUT_OF_MEM;
|
||||
|
||||
if (table->s->keys > 0)
|
||||
{
|
||||
/* We have a key: search the table using the index */
|
||||
if (!table->file->inited)
|
||||
error= table->file->ha_index_init(0, FALSE);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* We doesn't have a key: search the table using rnd_next() */
|
||||
error= table->file->ha_rnd_init(1);
|
||||
}
|
||||
table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
|
||||
|
||||
return error;
|
||||
|
|
|
@ -3092,7 +3092,7 @@ with --log-bin instead.");
|
|||
}
|
||||
if (global_system_variables.binlog_format == BINLOG_FORMAT_UNSPEC)
|
||||
{
|
||||
#ifdef HAVE_NDB_BINLOG
|
||||
#if defined(HAVE_NDB_BINLOG) && defined(HAVE_ROW_BASED_REPLICATION)
|
||||
if (opt_bin_log && have_ndbcluster == SHOW_OPTION_YES)
|
||||
global_system_variables.binlog_format= BINLOG_FORMAT_ROW;
|
||||
else
|
||||
|
|
|
@ -309,15 +309,14 @@ typedef struct st_relay_log_info
|
|||
|
||||
void cleanup_context(THD *, bool);
|
||||
void clear_tables_to_lock() {
|
||||
TABLE_LIST *ptr= tables_to_lock;
|
||||
while (ptr)
|
||||
while (tables_to_lock)
|
||||
{
|
||||
char *to_free= reinterpret_cast<char*>(ptr);
|
||||
ptr= ptr->next_global;
|
||||
char *to_free= reinterpret_cast<gptr>(tables_to_lock);
|
||||
tables_to_lock= tables_to_lock->next_global;
|
||||
tables_to_lock_count--;
|
||||
my_free(to_free, MYF(MY_WME));
|
||||
}
|
||||
tables_to_lock= 0;
|
||||
tables_to_lock_count= 0;
|
||||
DBUG_ASSERT(tables_to_lock == NULL && tables_to_lock_count == 0);
|
||||
}
|
||||
|
||||
time_t unsafe_to_stop_at;
|
||||
|
|
|
@ -1270,12 +1270,14 @@ bool sys_var_thd_binlog_format::is_readonly() const
|
|||
And this test will also prevent switching from RBR to RBR (a no-op which
|
||||
should not happen too often).
|
||||
*/
|
||||
#ifdef HAVE_ROW_BASED_REPLICATION
|
||||
if ((thd->variables.binlog_format == BINLOG_FORMAT_ROW) &&
|
||||
thd->temporary_tables)
|
||||
{
|
||||
my_error(ER_TEMP_TABLE_PREVENTS_SWITCH_OUT_OF_RBR, MYF(0));
|
||||
return 1;
|
||||
}
|
||||
#endif /*HAVE_ROW_BASED_REPLICATION*/
|
||||
/*
|
||||
if in a stored function, it's too late to change mode
|
||||
*/
|
||||
|
@ -1299,7 +1301,9 @@ bool sys_var_thd_binlog_format::is_readonly() const
|
|||
|
||||
void fix_binlog_format_after_update(THD *thd, enum_var_type type)
|
||||
{
|
||||
#ifdef HAVE_ROW_BASED_REPLICATION
|
||||
thd->reset_current_stmt_binlog_row_based();
|
||||
#endif /*HAVE_ROW_BASED_REPLICATION*/
|
||||
}
|
||||
|
||||
static void fix_max_binlog_size(THD *thd, enum_var_type type)
|
||||
|
|
|
@ -1069,7 +1069,9 @@ void close_thread_tables(THD *thd, bool lock_in_use, bool skip_derived)
|
|||
handled either before writing a query log event (inside
|
||||
binlog_query()) or when preparing a pending event.
|
||||
*/
|
||||
#ifdef HAVE_ROW_BASED_REPLICATION
|
||||
thd->binlog_flush_pending_rows_event(TRUE);
|
||||
#endif /*HAVE_ROW_BASED_REPLICATION*/
|
||||
mysql_unlock_tables(thd, thd->lock);
|
||||
thd->lock=0;
|
||||
}
|
||||
|
|
|
@ -197,7 +197,10 @@ THD::THD()
|
|||
:Statement(CONVENTIONAL_EXECUTION, 0, ALLOC_ROOT_MIN_BLOCK_SIZE, 0),
|
||||
Open_tables_state(refresh_version), rli_fake(0),
|
||||
lock_id(&main_lock_id),
|
||||
user_time(0), in_sub_stmt(0), binlog_table_maps(0),
|
||||
user_time(0), in_sub_stmt(0),
|
||||
#ifdef HAVE_ROW_BASED_REPLICATION
|
||||
binlog_table_maps(0),
|
||||
#endif /*HAVE_ROW_BASED_REPLICATION*/
|
||||
global_read_lock(0), is_fatal_error(0),
|
||||
rand_used(0), time_zone_used(0),
|
||||
last_insert_id_used(0), insert_id_used(0), clear_next_insert_id(0),
|
||||
|
@ -333,7 +336,9 @@ void THD::init(void)
|
|||
bzero((char*) warn_count, sizeof(warn_count));
|
||||
total_warn_count= 0;
|
||||
update_charset();
|
||||
#ifdef HAVE_ROW_BASED_REPLICATION
|
||||
reset_current_stmt_binlog_row_based();
|
||||
#endif /*HAVE_ROW_BASED_REPLICATION*/
|
||||
bzero((char *) &status_var, sizeof(status_var));
|
||||
}
|
||||
|
||||
|
@ -2713,8 +2718,10 @@ int THD::binlog_query(THD::enum_binlog_query_type qtype,
|
|||
the flushing will be done inside the top-most
|
||||
close_thread_tables().
|
||||
*/
|
||||
#ifdef HAVE_ROW_BASED_REPLICATION
|
||||
if (this->lock)
|
||||
DBUG_RETURN(binlog_flush_pending_rows_event(TRUE));
|
||||
#endif /*HAVE_ROW_BASED_REPLICATION*/
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
/* Otherwise, we fall through */
|
||||
|
@ -2733,7 +2740,9 @@ int THD::binlog_query(THD::enum_binlog_query_type qtype,
|
|||
table maps were written.
|
||||
*/
|
||||
int error= mysql_bin_log.write(&qinfo);
|
||||
#ifdef HAVE_ROW_BASED_REPLICATION
|
||||
binlog_table_maps= 0;
|
||||
#endif /*HAVE_ROW_BASED_REPLICATION*/
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
break;
|
||||
|
|
|
@ -1380,6 +1380,7 @@ public:
|
|||
void restore_sub_statement_state(Sub_statement_state *backup);
|
||||
void set_n_backup_active_arena(Query_arena *set, Query_arena *backup);
|
||||
void restore_active_arena(Query_arena *set, Query_arena *backup);
|
||||
#ifdef HAVE_ROW_BASED_REPLICATION
|
||||
inline void set_current_stmt_binlog_row_based_if_mixed()
|
||||
{
|
||||
if (variables.binlog_format == BINLOG_FORMAT_MIXED)
|
||||
|
@ -1393,6 +1394,7 @@ public:
|
|||
{
|
||||
current_stmt_binlog_row_based= test(variables.binlog_format == BINLOG_FORMAT_ROW);
|
||||
}
|
||||
#endif /*HAVE_ROW_BASED_REPLICATION*/
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -2468,8 +2468,10 @@ mysql_execute_command(THD *thd)
|
|||
statistic_increment(thd->status_var.com_stat[lex->sql_command],
|
||||
&LOCK_status);
|
||||
|
||||
#ifdef HAVE_ROW_BASED_REPLICATION
|
||||
if (lex->binlog_row_based_if_mixed)
|
||||
thd->set_current_stmt_binlog_row_based_if_mixed();
|
||||
#endif /*HAVE_ROW_BASED_REPLICATION*/
|
||||
|
||||
switch (lex->sql_command) {
|
||||
case SQLCOM_SELECT:
|
||||
|
@ -5133,7 +5135,9 @@ end:
|
|||
*/
|
||||
if (thd->one_shot_set && lex->sql_command != SQLCOM_SET_OPTION)
|
||||
reset_one_shot_variables(thd);
|
||||
#ifdef HAVE_ROW_BASED_REPLICATION
|
||||
thd->reset_current_stmt_binlog_row_based();
|
||||
#endif /*HAVE_ROW_BASED_REPLICATION*/
|
||||
|
||||
/*
|
||||
The return value for ROW_COUNT() is "implementation dependent" if the
|
||||
|
|
|
@ -134,8 +134,8 @@ void GlobalDictCache::printCache()
|
|||
const unsigned sz = vers->size();
|
||||
for(unsigned i = 0; i<sz ; i++){
|
||||
TableVersion tv= (*vers)[i];
|
||||
DBUG_PRINT(" ", ("vers[%d]: ver: %d, refCount: %d, status: %d",
|
||||
sz, tv.m_version, tv.m_refCount, tv.m_status));
|
||||
DBUG_PRINT(" ", ("impl: %p vers[%d]: ver: %d, refCount: %d, status: %d",
|
||||
tv.m_impl, i, tv.m_version, tv.m_refCount, tv.m_status));
|
||||
if(tv.m_impl != 0)
|
||||
{
|
||||
DBUG_PRINT(" ", ("m_impl: internalname: %s",
|
||||
|
|
|
@ -79,18 +79,26 @@ is_ndb_blob_table(const NdbTableImpl* t)
|
|||
NdbColumnImpl::NdbColumnImpl()
|
||||
: NdbDictionary::Column(* this), m_attrId(-1), m_facade(this)
|
||||
{
|
||||
DBUG_ENTER("NdbColumnImpl::NdbColumnImpl");
|
||||
DBUG_PRINT("info", ("this: %p", this));
|
||||
init();
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
NdbColumnImpl::NdbColumnImpl(NdbDictionary::Column & f)
|
||||
: NdbDictionary::Column(* this), m_attrId(-1), m_facade(&f)
|
||||
{
|
||||
DBUG_ENTER("NdbColumnImpl::NdbColumnImpl");
|
||||
DBUG_PRINT("info", ("this: %p", this));
|
||||
init();
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
NdbColumnImpl&
|
||||
NdbColumnImpl::operator=(const NdbColumnImpl& col)
|
||||
{
|
||||
DBUG_ENTER("NdbColumnImpl::operator=");
|
||||
DBUG_PRINT("info", ("this: %p &col: %p", this, &col));
|
||||
m_attrId = col.m_attrId;
|
||||
m_name = col.m_name;
|
||||
m_type = col.m_type;
|
||||
|
@ -112,13 +120,14 @@ NdbColumnImpl::operator=(const NdbColumnImpl& col)
|
|||
if (col.m_blobTable == NULL)
|
||||
m_blobTable = NULL;
|
||||
else {
|
||||
m_blobTable = new NdbTableImpl();
|
||||
if (m_blobTable == NULL)
|
||||
m_blobTable = new NdbTableImpl();
|
||||
m_blobTable->assign(*col.m_blobTable);
|
||||
}
|
||||
m_column_no = col.m_column_no;
|
||||
// Do not copy m_facade !!
|
||||
|
||||
return *this;
|
||||
DBUG_RETURN(*this);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -261,15 +270,19 @@ NdbColumnImpl::init(Type t)
|
|||
|
||||
NdbColumnImpl::~NdbColumnImpl()
|
||||
{
|
||||
DBUG_ENTER("NdbColumnImpl::~NdbColumnImpl");
|
||||
DBUG_PRINT("info", ("this: %p", this));
|
||||
if (m_blobTable != NULL)
|
||||
delete m_blobTable;
|
||||
m_blobTable = NULL;
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
bool
|
||||
NdbColumnImpl::equal(const NdbColumnImpl& col) const
|
||||
{
|
||||
DBUG_ENTER("NdbColumnImpl::equal");
|
||||
DBUG_PRINT("info", ("this: %p &col: %p", this, &col));
|
||||
if(strcmp(m_name.c_str(), col.m_name.c_str()) != 0){
|
||||
DBUG_RETURN(false);
|
||||
}
|
||||
|
@ -377,24 +390,33 @@ NdbTableImpl::NdbTableImpl()
|
|||
: NdbDictionary::Table(* this),
|
||||
NdbDictObjectImpl(NdbDictionary::Object::UserTable), m_facade(this)
|
||||
{
|
||||
DBUG_ENTER("NdbTableImpl::NdbTableImpl");
|
||||
DBUG_PRINT("info", ("this: %p", this));
|
||||
init();
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
NdbTableImpl::NdbTableImpl(NdbDictionary::Table & f)
|
||||
: NdbDictionary::Table(* this),
|
||||
NdbDictObjectImpl(NdbDictionary::Object::UserTable), m_facade(&f)
|
||||
{
|
||||
DBUG_ENTER("NdbTableImpl::NdbTableImpl");
|
||||
DBUG_PRINT("info", ("this: %p", this));
|
||||
init();
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
NdbTableImpl::~NdbTableImpl()
|
||||
{
|
||||
DBUG_ENTER("NdbTableImpl::~NdbTableImpl");
|
||||
DBUG_PRINT("info", ("this: %p", this));
|
||||
if (m_index != 0) {
|
||||
delete m_index;
|
||||
m_index = 0;
|
||||
}
|
||||
for (unsigned i = 0; i < m_columns.size(); i++)
|
||||
delete m_columns[i];
|
||||
delete m_columns[i];
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -636,6 +658,8 @@ NdbTableImpl::equal(const NdbTableImpl& obj) const
|
|||
void
|
||||
NdbTableImpl::assign(const NdbTableImpl& org)
|
||||
{
|
||||
DBUG_ENTER("NdbColumnImpl::assign");
|
||||
DBUG_PRINT("info", ("this: %p &org: %p", this, &org));
|
||||
/* m_changeMask intentionally not copied */
|
||||
m_primaryTableId = org.m_primaryTableId;
|
||||
m_internalName.assign(org.m_internalName);
|
||||
|
@ -662,7 +686,14 @@ NdbTableImpl::assign(const NdbTableImpl& org)
|
|||
m_columnHashMask, m_columnHash, m_hashValueMask, m_hashpointerValue
|
||||
is state calculated by computeAggregates and buildColumnHash
|
||||
*/
|
||||
for(unsigned i = 0; i<org.m_columns.size(); i++){
|
||||
unsigned i;
|
||||
for(i = 0; i < m_columns.size(); i++)
|
||||
{
|
||||
delete m_columns[i];
|
||||
}
|
||||
m_columns.clear();
|
||||
for(i = 0; i < org.m_columns.size(); i++)
|
||||
{
|
||||
NdbColumnImpl * col = new NdbColumnImpl();
|
||||
const NdbColumnImpl * iorg = org.m_columns[i];
|
||||
(* col) = (* iorg);
|
||||
|
@ -702,6 +733,7 @@ NdbTableImpl::assign(const NdbTableImpl& org)
|
|||
m_tablespace_name = org.m_tablespace_name;
|
||||
m_tablespace_id= org.m_tablespace_id;
|
||||
m_tablespace_version = org.m_tablespace_version;
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
void NdbTableImpl::setName(const char * name)
|
||||
|
@ -1085,14 +1117,20 @@ NdbEventImpl::NdbEventImpl() :
|
|||
NdbDictionary::Event(* this),
|
||||
NdbDictObjectImpl(NdbDictionary::Object::TypeUndefined), m_facade(this)
|
||||
{
|
||||
DBUG_ENTER("NdbEventImpl::NdbEventImpl");
|
||||
DBUG_PRINT("info", ("this: %p", this));
|
||||
init();
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
NdbEventImpl::NdbEventImpl(NdbDictionary::Event & f) :
|
||||
NdbDictionary::Event(* this),
|
||||
NdbDictObjectImpl(NdbDictionary::Object::TypeUndefined), m_facade(&f)
|
||||
{
|
||||
DBUG_ENTER("NdbEventImpl::NdbEventImpl");
|
||||
DBUG_PRINT("info", ("this: %p", this));
|
||||
init();
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
void NdbEventImpl::init()
|
||||
|
@ -1108,10 +1146,13 @@ void NdbEventImpl::init()
|
|||
|
||||
NdbEventImpl::~NdbEventImpl()
|
||||
{
|
||||
DBUG_ENTER("NdbEventImpl::~NdbEventImpl");
|
||||
DBUG_PRINT("info", ("this: %p", this));
|
||||
for (unsigned i = 0; i < m_columns.size(); i++)
|
||||
delete m_columns[i];
|
||||
if (m_tableImpl)
|
||||
delete m_tableImpl;
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
void NdbEventImpl::setName(const char * name)
|
||||
|
@ -1134,11 +1175,14 @@ NdbEventImpl::setTable(const NdbDictionary::Table& table)
|
|||
void
|
||||
NdbEventImpl::setTable(NdbTableImpl *tableImpl)
|
||||
{
|
||||
DBUG_ENTER("NdbEventImpl::setTable");
|
||||
DBUG_PRINT("info", ("this: %p tableImpl: %p", this, tableImpl));
|
||||
DBUG_ASSERT(tableImpl->m_status != NdbDictionary::Object::Invalid);
|
||||
if (!m_tableImpl)
|
||||
m_tableImpl = new NdbTableImpl();
|
||||
// Copy table, since event might be accessed from different threads
|
||||
m_tableImpl->assign(*tableImpl);
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
const NdbDictionary::Table *
|
||||
|
@ -3681,9 +3725,12 @@ NdbDictionaryImpl::getEvent(const char * eventName, NdbTableImpl* tab)
|
|||
DBUG_RETURN(NULL);
|
||||
}
|
||||
}
|
||||
ev->setTable(tab);
|
||||
releaseTableGlobal(*tab, 0);
|
||||
}
|
||||
|
||||
ev->setTable(tab);
|
||||
else
|
||||
ev->setTable(tab);
|
||||
tab = 0;
|
||||
|
||||
ev->setTable(m_ndb.externalizeTableName(ev->getTableName()));
|
||||
// get the columns from the attrListBitmask
|
||||
|
@ -3944,6 +3991,7 @@ NdbDictionaryImpl::dropBlobEvents(const NdbEventImpl& evnt)
|
|||
if (blob_evnt == NULL)
|
||||
continue;
|
||||
(void)dropEvent(*blob_evnt);
|
||||
delete blob_evnt;
|
||||
}
|
||||
} else {
|
||||
// loop over MAX_ATTRIBUTES_IN_TABLE ...
|
||||
|
|
|
@ -70,21 +70,6 @@ print_std(const SubTableData * sdata, LinearSectionPtr ptr[3])
|
|||
*
|
||||
*/
|
||||
|
||||
//#define EVENT_DEBUG
|
||||
#ifdef EVENT_DEBUG
|
||||
#define DBUG_ENTER_EVENT(A) DBUG_ENTER(A)
|
||||
#define DBUG_RETURN_EVENT(A) DBUG_RETURN(A)
|
||||
#define DBUG_VOID_RETURN_EVENT DBUG_VOID_RETURN
|
||||
#define DBUG_PRINT_EVENT(A,B) DBUG_PRINT(A,B)
|
||||
#define DBUG_DUMP_EVENT(A,B,C) DBUG_DUMP(A,B,C)
|
||||
#else
|
||||
#define DBUG_ENTER_EVENT(A)
|
||||
#define DBUG_RETURN_EVENT(A) return(A)
|
||||
#define DBUG_VOID_RETURN_EVENT return
|
||||
#define DBUG_PRINT_EVENT(A,B)
|
||||
#define DBUG_DUMP_EVENT(A,B,C)
|
||||
#endif
|
||||
|
||||
// todo handle several ndb objects
|
||||
// todo free allocated data when closing NdbEventBuffer
|
||||
|
||||
|
@ -189,6 +174,17 @@ NdbEventOperationImpl::~NdbEventOperationImpl()
|
|||
// m_bufferHandle->dropSubscribeEvent(m_bufferId);
|
||||
; // ToDo? We should send stop signal here
|
||||
|
||||
if (theMainOp == NULL)
|
||||
{
|
||||
NdbEventOperationImpl* tBlobOp = theBlobOpList;
|
||||
while (tBlobOp != NULL)
|
||||
{
|
||||
NdbEventOperationImpl *op = tBlobOp;
|
||||
tBlobOp = tBlobOp->m_next;
|
||||
delete op;
|
||||
}
|
||||
}
|
||||
|
||||
m_ndb->theImpl->theNdbObjectIdMap.unmap(m_oid, this);
|
||||
DBUG_PRINT("exit",("this: %p/%p oid: %u main: %p",
|
||||
this, m_facade, m_oid, theMainOp));
|
||||
|
@ -973,7 +969,15 @@ NdbEventBuffer::~NdbEventBuffer()
|
|||
delete op->m_facade;
|
||||
}
|
||||
|
||||
for (unsigned j= 0; j < m_allocated_data.size(); j++)
|
||||
unsigned j;
|
||||
Uint32 sz= m_active_gci.size();
|
||||
Gci_container* array = (Gci_container*)m_active_gci.getBase();
|
||||
for(j = 0; j < sz; j++)
|
||||
{
|
||||
array[j].~Gci_container();
|
||||
}
|
||||
|
||||
for (j= 0; j < m_allocated_data.size(); j++)
|
||||
{
|
||||
unsigned sz= m_allocated_data[j]->sz;
|
||||
EventBufData *data= m_allocated_data[j]->data;
|
||||
|
@ -1476,6 +1480,7 @@ NdbEventBuffer::report_node_failure(Uint32 node_id)
|
|||
data.req_nodeid = (Uint8)node_id;
|
||||
data.ndbd_nodeid = (Uint8)node_id;
|
||||
data.logType = SubTableData::LOG;
|
||||
data.gci = m_latestGCI + 1;
|
||||
/**
|
||||
* Insert this event for each operation
|
||||
*/
|
||||
|
@ -1492,8 +1497,11 @@ NdbEventBuffer::report_node_failure(Uint32 node_id)
|
|||
void
|
||||
NdbEventBuffer::completeClusterFailed()
|
||||
{
|
||||
DBUG_ENTER("NdbEventBuffer::completeClusterFailed");
|
||||
NdbEventOperation* op= m_ndb->getEventOperation(0);
|
||||
if (op == 0)
|
||||
return;
|
||||
|
||||
DBUG_ENTER("NdbEventBuffer::completeClusterFailed");
|
||||
SubTableData data;
|
||||
LinearSectionPtr ptr[3];
|
||||
bzero(&data, sizeof(data));
|
||||
|
@ -1502,73 +1510,66 @@ NdbEventBuffer::completeClusterFailed()
|
|||
data.tableId = ~0;
|
||||
data.operation = NdbDictionary::Event::_TE_CLUSTER_FAILURE;
|
||||
data.logType = SubTableData::LOG;
|
||||
|
||||
/**
|
||||
* Find min not completed GCI
|
||||
*/
|
||||
Uint32 sz= m_active_gci.size();
|
||||
Uint64 gci= ~0;
|
||||
Gci_container* bucket = 0;
|
||||
Gci_container* array = (Gci_container*)m_active_gci.getBase();
|
||||
for(Uint32 i = 0; i<sz; i++)
|
||||
{
|
||||
if(array[i].m_gcp_complete_rep_count && array[i].m_gci < gci)
|
||||
{
|
||||
bucket= array + i;
|
||||
gci = bucket->m_gci;
|
||||
}
|
||||
}
|
||||
|
||||
if(bucket == 0)
|
||||
{
|
||||
/**
|
||||
* Did not find any not completed GCI's
|
||||
* lets fake one...
|
||||
*/
|
||||
gci = m_latestGCI + 1;
|
||||
bucket = array + ( gci & ACTIVE_GCI_MASK );
|
||||
bucket->m_gcp_complete_rep_count = 1;
|
||||
}
|
||||
|
||||
const Uint32 cnt= bucket->m_gcp_complete_rep_count = 1;
|
||||
|
||||
/**
|
||||
* Release all GCI's
|
||||
*/
|
||||
for(Uint32 i = 0; i<sz; i++)
|
||||
{
|
||||
Gci_container* tmp = array + i;
|
||||
if(!tmp->m_data.is_empty())
|
||||
{
|
||||
free_list(tmp->m_data);
|
||||
#if 0
|
||||
m_free_data_count++;
|
||||
EventBufData* loop= tmp->m_head;
|
||||
while(loop != tmp->m_tail)
|
||||
{
|
||||
m_free_data_count++;
|
||||
loop = loop->m_next;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
bzero(tmp, sizeof(Gci_container));
|
||||
}
|
||||
|
||||
bucket->m_gci = gci;
|
||||
bucket->m_gcp_complete_rep_count = cnt;
|
||||
|
||||
data.gci = gci;
|
||||
data.gci = m_latestGCI + 1;
|
||||
|
||||
/**
|
||||
* Insert this event for each operation
|
||||
*/
|
||||
NdbEventOperation* op= 0;
|
||||
while((op = m_ndb->getEventOperation(op)))
|
||||
do
|
||||
{
|
||||
NdbEventOperationImpl* impl= &op->m_impl;
|
||||
data.senderData = impl->m_oid;
|
||||
insertDataL(impl, &data, ptr);
|
||||
insertDataL(impl, &data, ptr);
|
||||
} while((op = m_ndb->getEventOperation(op)));
|
||||
|
||||
/**
|
||||
* Release all GCI's with m_gci > gci
|
||||
*/
|
||||
Uint32 i;
|
||||
Uint32 sz= m_active_gci.size();
|
||||
Uint64 gci= data.gci;
|
||||
Gci_container* bucket = 0;
|
||||
Gci_container* array = (Gci_container*)m_active_gci.getBase();
|
||||
for(i = 0; i < sz; i++)
|
||||
{
|
||||
Gci_container* tmp = array + i;
|
||||
if (tmp->m_gci > gci)
|
||||
{
|
||||
if(!tmp->m_data.is_empty())
|
||||
{
|
||||
free_list(tmp->m_data);
|
||||
}
|
||||
tmp->~Gci_container();
|
||||
bzero(tmp, sizeof(Gci_container));
|
||||
}
|
||||
else if (tmp->m_gcp_complete_rep_count)
|
||||
{
|
||||
if (tmp->m_gci == gci)
|
||||
{
|
||||
bucket= tmp;
|
||||
continue;
|
||||
}
|
||||
// we have found an old not-completed gci
|
||||
// something is wrong, assert in debug, but try so salvage
|
||||
// in release
|
||||
ndbout_c("out of order bucket detected at cluster disconnect, "
|
||||
"data.gci: %u. tmp->m_gci: %u",
|
||||
(unsigned)data.gci, (unsigned)tmp->m_gci);
|
||||
assert(false);
|
||||
if(!tmp->m_data.is_empty())
|
||||
{
|
||||
free_list(tmp->m_data);
|
||||
}
|
||||
tmp->~Gci_container();
|
||||
bzero(tmp, sizeof(Gci_container));
|
||||
}
|
||||
}
|
||||
|
||||
assert(bucket != 0);
|
||||
|
||||
const Uint32 cnt= bucket->m_gcp_complete_rep_count = 1;
|
||||
bucket->m_gci = gci;
|
||||
bucket->m_gcp_complete_rep_count = cnt;
|
||||
|
||||
/**
|
||||
* And finally complete this GCI
|
||||
|
@ -2262,8 +2263,12 @@ EventBufData_list::add_gci_op(Gci_op g, bool del)
|
|||
if (m_gci_op_alloc != 0) {
|
||||
Uint32 bytes = m_gci_op_alloc * sizeof(Gci_op);
|
||||
memcpy(m_gci_op_list, old_list, bytes);
|
||||
DBUG_PRINT_EVENT("info", ("this: %p delete m_gci_op_list: %p",
|
||||
this, old_list));
|
||||
delete [] old_list;
|
||||
}
|
||||
DBUG_PRINT_EVENT("info", ("this: %p new m_gci_op_list: %p",
|
||||
this, m_gci_op_list));
|
||||
m_gci_op_alloc = n;
|
||||
}
|
||||
assert(m_gci_op_count < m_gci_op_alloc);
|
||||
|
@ -2275,6 +2280,9 @@ EventBufData_list::add_gci_op(Gci_op g, bool del)
|
|||
void
|
||||
EventBufData_list::move_gci_ops(EventBufData_list *list, Uint64 gci)
|
||||
{
|
||||
DBUG_ENTER_EVENT("EventBufData_list::move_gci_ops");
|
||||
DBUG_PRINT_EVENT("info", ("this: %p list: %p gci: %llu",
|
||||
this, list, gci));
|
||||
assert(!m_is_not_multi_list);
|
||||
if (!list->m_is_not_multi_list)
|
||||
{
|
||||
|
@ -2290,6 +2298,8 @@ EventBufData_list::move_gci_ops(EventBufData_list *list, Uint64 gci)
|
|||
}
|
||||
{
|
||||
Gci_ops *new_gci_ops = new Gci_ops;
|
||||
DBUG_PRINT_EVENT("info", ("this: %p m_gci_op_list: %p",
|
||||
new_gci_ops, list->m_gci_op_list));
|
||||
if (m_gci_ops_list_tail)
|
||||
m_gci_ops_list_tail->m_next = new_gci_ops;
|
||||
else
|
||||
|
@ -2308,6 +2318,7 @@ end:
|
|||
list->m_gci_op_list = 0;
|
||||
list->m_gci_ops_list_tail = 0;
|
||||
list->m_gci_op_alloc = 0;
|
||||
DBUG_VOID_RETURN_EVENT;
|
||||
}
|
||||
|
||||
NdbEventOperation*
|
||||
|
|
|
@ -25,6 +25,20 @@
|
|||
#include <UtilBuffer.hpp>
|
||||
|
||||
#define NDB_EVENT_OP_MAGIC_NUMBER 0xA9F301B4
|
||||
//#define EVENT_DEBUG
|
||||
#ifdef EVENT_DEBUG
|
||||
#define DBUG_ENTER_EVENT(A) DBUG_ENTER(A)
|
||||
#define DBUG_RETURN_EVENT(A) DBUG_RETURN(A)
|
||||
#define DBUG_VOID_RETURN_EVENT DBUG_VOID_RETURN
|
||||
#define DBUG_PRINT_EVENT(A,B) DBUG_PRINT(A,B)
|
||||
#define DBUG_DUMP_EVENT(A,B,C) DBUG_DUMP(A,B,C)
|
||||
#else
|
||||
#define DBUG_ENTER_EVENT(A)
|
||||
#define DBUG_RETURN_EVENT(A) return(A)
|
||||
#define DBUG_VOID_RETURN_EVENT return
|
||||
#define DBUG_PRINT_EVENT(A,B)
|
||||
#define DBUG_DUMP_EVENT(A,B,C)
|
||||
#endif
|
||||
|
||||
class NdbEventOperationImpl;
|
||||
|
||||
|
@ -149,19 +163,29 @@ EventBufData_list::EventBufData_list()
|
|||
m_gci_ops_list_tail(0),
|
||||
m_gci_op_alloc(0)
|
||||
{
|
||||
DBUG_ENTER_EVENT("EventBufData_list::EventBufData_list");
|
||||
DBUG_PRINT_EVENT("info", ("this: %p", this));
|
||||
DBUG_VOID_RETURN_EVENT;
|
||||
}
|
||||
|
||||
inline
|
||||
EventBufData_list::~EventBufData_list()
|
||||
{
|
||||
DBUG_ENTER_EVENT("EventBufData_list::~EventBufData_list");
|
||||
DBUG_PRINT_EVENT("info", ("this: %p m_is_not_multi_list: %u",
|
||||
this, m_is_not_multi_list));
|
||||
if (m_is_not_multi_list)
|
||||
{
|
||||
DBUG_PRINT_EVENT("info", ("delete m_gci_op_list: %p", m_gci_op_list));
|
||||
delete [] m_gci_op_list;
|
||||
}
|
||||
else
|
||||
{
|
||||
Gci_ops *op = first_gci_ops();
|
||||
while (op)
|
||||
op = next_gci_ops();
|
||||
}
|
||||
DBUG_VOID_RETURN_EVENT;
|
||||
}
|
||||
|
||||
inline
|
||||
|
@ -223,7 +247,11 @@ EventBufData_list::next_gci_ops()
|
|||
Gci_ops *first = m_gci_ops_list;
|
||||
m_gci_ops_list = first->m_next;
|
||||
if (first->m_gci_op_list)
|
||||
{
|
||||
DBUG_PRINT_EVENT("info", ("this: %p delete m_gci_op_list: %p",
|
||||
this, first->m_gci_op_list));
|
||||
delete [] first->m_gci_op_list;
|
||||
}
|
||||
delete first;
|
||||
if (m_gci_ops_list == 0)
|
||||
m_gci_ops_list_tail = 0;
|
||||
|
|
Loading…
Reference in a new issue