Merge perch.ndb.mysql.com:/home/jonas/src/51-new

into  perch.ndb.mysql.com:/home/jonas/src/51-ndb
This commit is contained in:
jonas@perch.ndb.mysql.com 2006-01-19 15:05:57 +01:00
commit 60f25a4c1b
63 changed files with 2350 additions and 363 deletions

View file

@ -359,6 +359,8 @@ int main(int argc, char **argv)
static struct my_option my_long_options[] =
{
{"help", '?', "Display this help and exit.", 0, 0, 0, GET_NO_ARG, NO_ARG,
0, 0, 0, 0, 0, 0},
{"auto-generate-sql", 'a',
"Generate SQL where not supplied by file or command line.",
(gptr*) &auto_generate_sql, (gptr*) &auto_generate_sql,
@ -388,8 +390,6 @@ static struct my_option my_long_options[] =
{"engine", 'e', "Storage engine to use for creating the table.",
(gptr*) &default_engine, (gptr*) &default_engine, 0,
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"help", '?', "Display this help and exit.", 0, 0, 0, GET_NO_ARG, NO_ARG,
0, 0, 0, 0, 0, 0},
{"host", 'h', "Connect to host.", (gptr*) &host, (gptr*) &host, 0, GET_STR,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"iterations", 'i', "Number of times too run the tests.", (gptr*) &iterations,
@ -402,28 +402,28 @@ static struct my_option my_long_options[] =
(gptr*) &num_char_cols, (gptr*) &num_char_cols, 0, GET_UINT, REQUIRED_ARG,
1, 0, 0, 0, 0, 0},
{"number-int-cols", 'y',
"Number of VARCHAR columns to create table with if specifying \
--sql-generate-sql.", (gptr*) &num_int_cols, (gptr*) &num_int_cols, 0,
"Number of VARCHAR columns to create table with if specifying "
"--sql-generate-sql.", (gptr*) &num_int_cols, (gptr*) &num_int_cols, 0,
GET_UINT, REQUIRED_ARG, 1, 0, 0, 0, 0, 0},
{"number-of-query", OPT_MYSQL_NUMBER_OF_QUERY,
{"number-of-queries", OPT_MYSQL_NUMBER_OF_QUERY,
"Limit each client to this number of queries (this is not exact).",
(gptr*) &num_of_query, (gptr*) &num_of_query, 0,
GET_ULL, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"only-print", OPT_MYSQL_ONLY_PRINT,
"This causes mysqlslap to not connect to the databases, but instead print \
out what it would have done instead.",
"This causes mysqlslap to not connect to the databases, but instead print "
"out what it would have done instead.",
(gptr*) &opt_only_print, (gptr*) &opt_only_print, 0, GET_BOOL, NO_ARG,
0, 0, 0, 0, 0, 0},
{"password", 'p',
"Password to use when connecting to server. If password is not given it's \
asked from the tty.", 0, 0, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0},
{"port", 'P', "Port number to use for connection.", (gptr*) &opt_mysql_port,
(gptr*) &opt_mysql_port, 0, GET_UINT, REQUIRED_ARG, MYSQL_PORT, 0, 0, 0, 0,
0},
"Password to use when connecting to server. If password is not given it's "
"asked from the tty.", 0, 0, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0},
#ifdef __WIN__
{"pipe", 'W', "Use named pipes to connect to server.", 0, 0, 0, GET_NO_ARG,
NO_ARG, 0, 0, 0, 0, 0, 0},
#endif
{"port", 'P', "Port number to use for connection.", (gptr*) &opt_mysql_port,
(gptr*) &opt_mysql_port, 0, GET_UINT, REQUIRED_ARG, MYSQL_PORT, 0, 0, 0, 0,
0},
{"preserve-schema", OPT_MYSQL_PRESERVE_SCHEMA,
"Preserve the schema from the mysqlslap run.",
(gptr*) &opt_preserve, (gptr*) &opt_preserve, 0, GET_BOOL,
@ -434,33 +434,33 @@ static struct my_option my_long_options[] =
{"query", 'q', "Query to run or file containing query to run.",
(gptr*) &user_supplied_query, (gptr*) &user_supplied_query,
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"silent", 's', "Run program in silent mode - no output.",
(gptr*) &opt_silent, (gptr*) &opt_silent, 0, GET_BOOL, NO_ARG,
0, 0, 0, 0, 0, 0},
#ifdef HAVE_SMEM
{"shared-memory-base-name", OPT_SHARED_MEMORY_BASE_NAME,
"Base name of shared memory.", (gptr*) &shared_memory_base_name,
(gptr*) &shared_memory_base_name, 0, GET_STR_ALLOC, REQUIRED_ARG,
0, 0, 0, 0, 0, 0},
#endif
{"silent", 's', "Run program in silent mode - no output.",
(gptr*) &opt_silent, (gptr*) &opt_silent, 0, GET_BOOL, NO_ARG,
0, 0, 0, 0, 0, 0},
{"slave", OPT_MYSQL_SLAP_SLAVE, "Follow master locks for other slap clients",
(gptr*) &opt_slave, (gptr*) &opt_slave, 0, GET_BOOL, NO_ARG,
0, 0, 0, 0, 0, 0},
{"socket", 'S', "Socket file to use for connection.",
(gptr*) &opt_mysql_unix_port, (gptr*) &opt_mysql_unix_port, 0, GET_STR,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
#include <sslopt-longopts.h>
{"use-threads", OPT_USE_THREADS,
"Use pthread calls instead of fork() calls (default on Windows)",
(gptr*) &opt_use_threads, (gptr*) &opt_use_threads, 0,
GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
#include <sslopt-longopts.h>
#ifndef DONT_ALLOW_USER_CHANGE
{"user", 'u', "User for login if not current user.", (gptr*) &user,
(gptr*) &user, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
#endif
{"verbose", 'v',
"More verbose output; You can use this multiple times to get even more \
verbose output.", (gptr*) &verbose, (gptr*) &verbose, 0,
"More verbose output; You can use this multiple times to get even more "
"verbose output.", (gptr*) &verbose, (gptr*) &verbose, 0,
GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
{"version", 'V', "Output version information and exit.", 0, 0, 0, GET_NO_ARG,
NO_ARG, 0, 0, 0, 0, 0, 0},

View file

@ -2437,7 +2437,8 @@ MYSQL_STORAGE_ENGINE(archive,,,,,,storage/archive,,
\$(top_builddir)/storage/archive/libarchive.a, [
AC_CONFIG_FILES(storage/archive/Makefile)
])
MYSQL_STORAGE_ENGINE(csv,,,,,no,storage/csv,,,[
MYSQL_STORAGE_ENGINE(csv,,,"yes",,tina_hton,storage/csv,
../storage/csv/ha_tina.o,,[
AC_CONFIG_FILES(storage/csv/Makefile)
])
MYSQL_STORAGE_ENGINE(blackhole)

View file

@ -66,7 +66,8 @@ extern MY_UNICASE_INFO *my_unicase_turkish[256];
#define MY_CS_UNICODE 128 /* is a charset is full unicode */
#define MY_CS_READY 256 /* if a charset is initialized */
#define MY_CS_AVAILABLE 512 /* If either compiled-in or loaded*/
#define MY_CS_CSSORT 1024 /* if case sensitive sort order */
#define MY_CS_CSSORT 1024 /* if case sensitive sort order */
#define MY_CS_HIDDEN 2048 /* don't display in SHOW */
#define MY_CHARSET_UNDEFINED 0

View file

@ -155,13 +155,18 @@ enum ha_extra_function {
*/
HA_EXTRA_KEYREAD_PRESERVE_FIELDS,
HA_EXTRA_MMAP,
/*
/*
Ignore if the a tuple is not found, continue processing the
transaction and ignore that 'row'. Needed for idempotency
handling on the slave
*/
HA_EXTRA_IGNORE_NO_KEY,
HA_EXTRA_NO_IGNORE_NO_KEY
HA_EXTRA_NO_IGNORE_NO_KEY,
/*
Mark the table as a log table. For some handlers (e.g. CSV) this results
in a special locking for the table.
*/
HA_EXTRA_MARK_AS_LOG_TABLE
};
/* The following is parameter to ha_panic() */

View file

@ -3,5 +3,12 @@
--source include/not_windows.inc
# check that CSV engine was compiled in, as IM the test suite uses
# logs tables-specific option and the option is not present if CSV
# (and => the log tables) are not in.
# NOTE: In future we should remove this check and make the test suite
# to pass correct opyions to IM depending on the CSV presence
--source include/have_csv.inc
--connection default
--disconnect dflt_server_con

View file

@ -745,3 +745,4 @@ if ($fixed_bug16370)
--source include/partition_12.inc
}
DROP TABLE t1;
DROP TABLE if exists t0_template;

View file

@ -13,3 +13,5 @@ show create table columns_priv;
show create table procs_priv;
show create table proc;
show create table event;
show create table general_log;
show create table slow_log;

View file

@ -570,6 +570,10 @@ CREATE TABLE proc (
) character set utf8 comment='Stored Procedures';
CREATE PROCEDURE create_log_tables() BEGIN DECLARE is_csv_enabled int DEFAULT 0; SELECT @@have_csv = 'YES' INTO is_csv_enabled; IF (is_csv_enabled) THEN CREATE TABLE general_log (event_time TIMESTAMP NOT NULL, user_host MEDIUMTEXT, thread_id INTEGER, server_id INTEGER, command_type VARCHAR(64), argument MEDIUMTEXT) engine=CSV CHARACTER SET utf8 comment='General log'; CREATE TABLE slow_log (start_time TIMESTAMP NOT NULL, user_host MEDIUMTEXT NOT NULL, query_time TIME NOT NULL, lock_time TIME NOT NULL, rows_sent INTEGER NOT NULL, rows_examined INTEGER NOT NULL, db VARCHAR(512), last_insert_id INTEGER, insert_id INTEGER, server_id INTEGER, sql_text MEDIUMTEXT NOT NULL) engine=CSV CHARACTER SET utf8 comment='Slow log'; END IF; END;
CALL create_log_tables();
DROP PROCEDURE create_log_tables;
CREATE TABLE event (
db char(64) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '',
name char(64) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '',

View file

@ -909,6 +909,7 @@ sub command_line_setup () {
path_datadir => "$opt_vardir/im_mysqld_1.data",
path_sock => "$sockdir/mysqld_1.sock",
path_pid => "$opt_vardir/run/mysqld_1.pid",
old_log_format => 1
};
$instance_manager->{'instances'}->[1]=
@ -919,6 +920,7 @@ sub command_line_setup () {
path_sock => "$sockdir/mysqld_2.sock",
path_pid => "$opt_vardir/run/mysqld_2.pid",
nonguarded => 1,
old_log_format => 1
};
if ( $opt_extern )
@ -1825,6 +1827,7 @@ EOF
;
print OUT "nonguarded\n" if $instance->{'nonguarded'};
print OUT "old-log-format\n" if $instance->{'old_log_format'};
print OUT "\n";
}

View file

@ -5,6 +5,7 @@ columns_priv
db
event
func
general_log
help_category
help_keyword
help_relation
@ -13,6 +14,7 @@ host
plugin
proc
procs_priv
slow_log
tables_priv
time_zone
time_zone_leap_second
@ -34,6 +36,7 @@ columns_priv
db
event
func
general_log
help_category
help_keyword
help_relation
@ -42,6 +45,7 @@ host
plugin
proc
procs_priv
slow_log
tables_priv
time_zone
time_zone_leap_second
@ -71,6 +75,7 @@ columns_priv
db
event
func
general_log
help_category
help_keyword
help_relation
@ -79,6 +84,7 @@ host
plugin
proc
procs_priv
slow_log
tables_priv
time_zone
time_zone_leap_second

View file

@ -4976,6 +4976,23 @@ c1
4
5
DROP TABLE bug14672;
CREATE TABLE test_concurrent_insert ( val integer ) ENGINE = CSV;
LOCK TABLES test_concurrent_insert READ LOCAL;
INSERT INTO test_concurrent_insert VALUES (1);
SELECT * FROM test_concurrent_insert;
val
1
SELECT * FROM test_concurrent_insert;
val
UNLOCK TABLES;
LOCK TABLES test_concurrent_insert WRITE;
INSERT INTO test_concurrent_insert VALUES (2);
SELECT * FROM test_concurrent_insert;
val
1
2
UNLOCK TABLES;
DROP TABLE test_concurrent_insert;
create table t1 (a int) engine=csv;
insert t1 values (1);
delete from t1;

View file

@ -21,6 +21,7 @@ skip-stack-trace VALUE
skip-innodb VALUE
skip-bdb VALUE
skip-ndbcluster VALUE
old-log-format VALUE
SHOW INSTANCE OPTIONS mysqld2;
option_name value
instance_name VALUE
@ -41,6 +42,7 @@ skip-stack-trace VALUE
skip-innodb VALUE
skip-bdb VALUE
skip-ndbcluster VALUE
old-log-format VALUE
START INSTANCE mysqld2;
STOP INSTANCE mysqld2;
SHOW mysqld1 LOG FILES;

View file

@ -62,6 +62,7 @@ columns_priv
db
event
func
general_log
help_category
help_keyword
help_relation
@ -70,6 +71,7 @@ host
plugin
proc
procs_priv
slow_log
tables_priv
time_zone
time_zone_leap_second
@ -732,7 +734,7 @@ CREATE TABLE t_crashme ( f1 BIGINT);
CREATE VIEW a1 (t_CRASHME) AS SELECT f1 FROM t_crashme GROUP BY f1;
CREATE VIEW a2 AS SELECT t_CRASHME FROM a1;
count(*)
107
109
drop view a2, a1;
drop table t_crashme;
select table_schema,table_name, column_name from
@ -816,7 +818,7 @@ SELECT table_schema, count(*) FROM information_schema.TABLES GROUP BY TABLE_SCHE
table_schema count(*)
cluster_replication 1
information_schema 19
mysql 19
mysql 21
create table t1 (i int, j int);
create trigger trg1 before insert on t1 for each row
begin

View file

@ -0,0 +1,54 @@
use mysql;
truncate table general_log;
select * from general_log;
event_time user_host thread_id server_id command_type argument
TIMESTAMP root[root] @ localhost [] 1 1 Query select * from general_log
truncate table slow_log;
select * from slow_log;
start_time user_host query_time lock_time rows_sent rows_examined db last_insert_id insert_id server_id sql_text
truncate table general_log;
select * from general_log where argument like '%general_log%';
event_time user_host thread_id server_id command_type argument
TIMESTAMP root[root] @ localhost [] 1 1 Query select * from general_log where argument like '%general_log%'
create table join_test (verbose_comment varchar (80), command_type varchar(64));
insert into join_test values ("User performed a usual SQL query", "Query");
insert into join_test values ("New DB connection was registered", "Connect");
insert into join_test values ("Get the table info", "Field List");
select verbose_comment, user_host, argument
from mysql.general_log join join_test
on (mysql.general_log.command_type = join_test.command_type);
verbose_comment user_host argument
User performed a usual SQL query root[root] @ localhost [] select * from general_log where argument like '%general_log%'
User performed a usual SQL query root[root] @ localhost [] create table join_test (verbose_comment varchar (80), command_type varchar(64))
User performed a usual SQL query root[root] @ localhost [] insert into join_test values ("User performed a usual SQL query", "Query")
User performed a usual SQL query root[root] @ localhost [] insert into join_test values ("New DB connection was registered", "Connect")
User performed a usual SQL query root[root] @ localhost [] insert into join_test values ("Get the table info", "Field List")
User performed a usual SQL query root[root] @ localhost [] select verbose_comment, user_host, argument
from mysql.general_log join join_test
on (mysql.general_log.command_type = join_test.command_type)
drop table join_test;
flush logs;
lock tables mysql.general_log WRITE;
ERROR HY000: You can't write-lock a log table. Only read access is possible.
lock tables mysql.slow_log WRITE;
ERROR HY000: You can't write-lock a log table. Only read access is possible.
lock tables mysql.general_log READ;
ERROR HY000: You can't use usual read lock with log tables. Try READ LOCAL instead.
lock tables mysql.slow_log READ;
ERROR HY000: You can't use usual read lock with log tables. Try READ LOCAL instead.
lock tables mysql.slow_log READ LOCAL, mysql.general_log READ LOCAL;
unlock tables;
lock tables mysql.general_log READ LOCAL;
flush logs;
unlock tables;
select "Mark that we woke up from flush logs in the test"
as "test passed";
test passed
Mark that we woke up from flush logs in the test
lock tables mysql.general_log READ LOCAL;
truncate mysql.general_log;
unlock tables;
select "Mark that we woke up from TRUNCATE in the test"
as "test passed";
test passed
Mark that we woke up from TRUNCATE in the test

View file

@ -3,6 +3,8 @@ mysql.columns_priv OK
mysql.db OK
mysql.event OK
mysql.func OK
mysql.general_log
note : The storage engine for the table doesn't support optimize
mysql.help_category OK
mysql.help_keyword OK
mysql.help_relation OK
@ -11,6 +13,8 @@ mysql.host OK
mysql.plugin OK
mysql.proc OK
mysql.procs_priv OK
mysql.slow_log
note : The storage engine for the table doesn't support optimize
mysql.tables_priv OK
mysql.time_zone OK
mysql.time_zone_leap_second OK
@ -22,6 +26,8 @@ mysql.columns_priv OK
mysql.db OK
mysql.event OK
mysql.func OK
mysql.general_log
note : The storage engine for the table doesn't support optimize
mysql.help_category OK
mysql.help_keyword OK
mysql.help_relation OK
@ -30,6 +36,8 @@ mysql.host OK
mysql.plugin OK
mysql.proc OK
mysql.procs_priv OK
mysql.slow_log
note : The storage engine for the table doesn't support optimize
mysql.tables_priv OK
mysql.time_zone OK
mysql.time_zone_leap_second OK

View file

@ -1722,3 +1722,4 @@ INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 100 - 1;
ALTER TABLE t1 ADD PARTITION (PARTITION part2 VALUES LESS THAN (2147483647)
(SUBPARTITION subpart21));
DROP TABLE t1;
DROP TABLE if exists t0_template;

View file

@ -147,10 +147,14 @@ drop table t1;
flush tables;
show open tables;
Database Table In_use Name_locked
mysql general_log 1 0
mysql slow_log 1 0
create table t1(n int);
insert into t1 values (1);
show open tables;
Database Table In_use Name_locked
mysql general_log 1 0
mysql slow_log 1 0
test t1 0 0
drop table t1;
create table t1 (a int not null, b VARCHAR(10), INDEX (b) ) AVG_ROW_LENGTH=10 CHECKSUM=1 COMMENT="test" ENGINE=MYISAM MIN_ROWS=10 MAX_ROWS=100 PACK_KEYS=1 DELAY_KEY_WRITE=1 ROW_FORMAT=fixed;
@ -564,20 +568,24 @@ SELECT 1 FROM mysql.db, mysql.proc, mysql.user, mysql.time_zone, mysql.time_zone
1
SHOW OPEN TABLES;
Database Table In_use Name_locked
mysql db 0 0
mysql proc 0 0
test urkunde 0 0
mysql time_zone 0 0
mysql user 0 0
mysql db 0 0
test txt1 0 0
mysql proc 0 0
mysql slow_log 1 0
test tyt2 0 0
mysql general_log 1 0
mysql user 0 0
mysql time_zone_name 0 0
SHOW OPEN TABLES FROM mysql;
Database Table In_use Name_locked
mysql db 0 0
mysql time_zone 0 0
mysql user 0 0
mysql proc 0 0
mysql time_zone 0 0
mysql db 0 0
mysql slow_log 1 0
mysql general_log 1 0
mysql user 0 0
mysql time_zone_name 0 0
SHOW OPEN TABLES FROM mysql LIKE 'u%';
Database Table In_use Name_locked
@ -590,12 +598,16 @@ test tyt2 0 0
mysql time_zone_name 0 0
SHOW OPEN TABLES LIKE '%o%';
Database Table In_use Name_locked
mysql time_zone 0 0
mysql proc 0 0
mysql time_zone 0 0
mysql slow_log 1 0
mysql general_log 1 0
mysql time_zone_name 0 0
FLUSH TABLES;
SHOW OPEN TABLES;
Database Table In_use Name_locked
mysql general_log 1 0
mysql slow_log 1 0
DROP TABLE txt1;
DROP TABLE tyt2;
DROP TABLE urkunde;

View file

@ -5,6 +5,7 @@ columns_priv
db
event
func
general_log
help_category
help_keyword
help_relation
@ -13,6 +14,7 @@ host
plugin
proc
procs_priv
slow_log
tables_priv
time_zone
time_zone_leap_second
@ -184,6 +186,31 @@ proc CREATE TABLE `proc` (
`comment` char(64) character set utf8 collate utf8_bin NOT NULL default '',
PRIMARY KEY (`db`,`name`,`type`)
) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT='Stored Procedures'
show create table general_log;
Table Create Table
general_log CREATE TABLE `general_log` (
`event_time` timestamp NOT NULL default CURRENT_TIMESTAMP on update CURRENT_TIMESTAMP,
`user_host` mediumtext,
`thread_id` int(11) default NULL,
`server_id` int(11) default NULL,
`command_type` varchar(64) default NULL,
`argument` mediumtext
) ENGINE=CSV DEFAULT CHARSET=utf8 COMMENT='General log'
show create table slow_log;
Table Create Table
slow_log CREATE TABLE `slow_log` (
`start_time` timestamp NOT NULL default CURRENT_TIMESTAMP on update CURRENT_TIMESTAMP,
`user_host` mediumtext NOT NULL,
`query_time` time NOT NULL,
`lock_time` time NOT NULL,
`rows_sent` int(11) NOT NULL,
`rows_examined` int(11) NOT NULL,
`db` varchar(512) default NULL,
`last_insert_id` int(11) default NULL,
`insert_id` int(11) default NULL,
`server_id` int(11) default NULL,
`sql_text` mediumtext NOT NULL
) ENGINE=CSV DEFAULT CHARSET=utf8 COMMENT='Slow log'
show create table event;
Table Create Table
event CREATE TABLE `event` (

View file

@ -5,6 +5,10 @@
# This test makes no sense with the embedded server
--source include/not_embedded.inc
# check that CSV engine was compiled in, as the test relies on the presence
# of the log tables (which are CSV-based). By connect mysql; show tables;
--source include/have_csv.inc
--disable_warnings
drop table if exists t1,t2;
--enable_warnings

View file

@ -2,7 +2,7 @@
# Test for the CSV engine
#
-- source include/have_csv.inc
--source include/have_csv.inc
#
# Simple select test
@ -1353,6 +1353,40 @@ DROP TABLE bug14672;
# End of 4.1 tests
#
# Test CONCURRENT INSERT (5.1)
#
CREATE TABLE test_concurrent_insert ( val integer ) ENGINE = CSV;
connect (con1,localhost,root,,);
connect (con2,localhost,root,,);
connection con1;
# obtain TL_READ lock on the table
LOCK TABLES test_concurrent_insert READ LOCAL;
connection con2;
# should pass despite of the lock
INSERT INTO test_concurrent_insert VALUES (1);
SELECT * FROM test_concurrent_insert;
connection con1;
# first connection should not notice the changes
SELECT * FROM test_concurrent_insert;
UNLOCK TABLES;
# Now check that we see our own changes
LOCK TABLES test_concurrent_insert WRITE;
INSERT INTO test_concurrent_insert VALUES (2);
SELECT * FROM test_concurrent_insert;
UNLOCK TABLES;
# cleanup
DROP TABLE test_concurrent_insert;
#
# BUG#13406 - incorrect amount of "records deleted"
#

View file

@ -26,3 +26,6 @@ rpl_ndb_basic : Bug#16228
rpl_sp : Bug #16456
ndb_autodiscover : Needs to be fixed w.r.t binlog
ndb_autodiscover2 : Needs to be fixed w.r.t binlog
ndb_restore : Needs fixing
system_mysql_db : Needs fixing
system_mysql_db_fix : Needs fixing

View file

@ -1,6 +1,10 @@
# This test uses grants, which can't get tested for embedded server
-- source include/not_embedded.inc
# check that CSV engine was compiled in, as the result of the test
# depends on the presence of the log tables (which are CSV-based).
--source include/have_csv.inc
# Test for information_schema.schemata &
# show databases

View file

@ -0,0 +1,146 @@
#
# Basic log tables test
#
# check that CSV engine was compiled in
--source include/have_csv.inc
use mysql;
#
# Check that log tables work and we can do basic selects. This also
# tests truncate, which works in a special mode with the log tables
#
truncate table general_log;
--replace_column 1 TIMESTAMP
select * from general_log;
truncate table slow_log;
--replace_column 1 TIMESTAMP
select * from slow_log;
#
# We want to check that a record newly written to a log table shows up for
# the query: since log tables use concurrent insert machinery and log tables
# are always locked by artificial THD, this feature requires additional
# check in ha_tina::write_row. This simple test should prove that the
# log table flag in the table handler is triggered and working.
#
truncate table general_log;
--replace_column 1 TIMESTAMP
select * from general_log where argument like '%general_log%';
#
# Check some basic queries interfering with the log tables.
# In our test we'll use a tbale with verbose comments to the short
# command type names, used in the tables
#
create table join_test (verbose_comment varchar (80), command_type varchar(64));
insert into join_test values ("User performed a usual SQL query", "Query");
insert into join_test values ("New DB connection was registered", "Connect");
insert into join_test values ("Get the table info", "Field List");
select verbose_comment, user_host, argument
from mysql.general_log join join_test
on (mysql.general_log.command_type = join_test.command_type);
drop table join_test;
#
# check that flush of the log table work fine
#
flush logs;
#
# check locking of the log tables
#
--error 1532
lock tables mysql.general_log WRITE;
--error 1532
lock tables mysql.slow_log WRITE;
#
# This attemts to get TL_READ_NO_INSERT lock, which is incompatible with
# TL_WRITE_CONCURRENT_INSERT. This should fail. We issue this error as log
# tables are always opened and locked by the logger.
#
--error 1533
lock tables mysql.general_log READ;
--error 1533
lock tables mysql.slow_log READ;
#
# This call should result in TL_READ lock on the log table. This is ok and
# should pass.
#
lock tables mysql.slow_log READ LOCAL, mysql.general_log READ LOCAL;
unlock tables;
#
# check that FLUSH LOGS waits for all readers of the log table to vanish
#
connect (con1,localhost,root,,);
connect (con2,localhost,root,,);
connection con1;
lock tables mysql.general_log READ LOCAL;
connection con2;
# this should wait for log tables to unlock
send flush logs;
connection con1;
unlock tables;
# this connection should be alive by the time
connection con2;
reap;
select "Mark that we woke up from flush logs in the test"
as "test passed";
#
# perform the same check for TRUNCATE: it should also wait for readers
# to disappear
#
connection con1;
lock tables mysql.general_log READ LOCAL;
connection con2;
# this should wait for log tables to unlock
send truncate mysql.general_log;
connection con1;
unlock tables;
# this connection should be alive by the time
connection con2;
reap;
select "Mark that we woke up from TRUNCATE in the test"
as "test passed";
disconnect con2;
disconnect con1;

View file

@ -1,6 +1,10 @@
# Embedded server doesn't support external clients
--source include/not_embedded.inc
# check that CSV engine was compiled in, as the result of the test
# depends on the presence of the log tables (which are CSV-based).
--source include/have_csv.inc
#
# Bug #13783 mysqlcheck tries to optimize and analyze information_schema
#

View file

@ -2,6 +2,10 @@
# embedded server testing
-- source include/not_embedded.inc
# check that CSV engine was compiled in, as the result of the test
# depends on the presence of the log tables (which are CSV-based).
--source include/have_csv.inc
#
# Test of some show commands
#

View file

@ -2,6 +2,10 @@
# This test must examine integrity of system database "mysql"
#
# check that CSV engine was compiled in, as the result of the test
# depends on the presence of the log tables (which are CSV-based).
--source include/have_csv.inc
# First delete some tables maybe left over from previous tests
--disable_warnings
drop table if exists t1,t1aa,t2aa;

View file

@ -1,6 +1,10 @@
# Embedded server doesn't support external clients
--source include/not_embedded.inc
# check that CSV engine was compiled in, as the test relies on the presence
# of the log tables (which are CSV-based)
--source include/have_csv.inc
#
# This is the test for mysql_fix_privilege_tables
#
@ -85,7 +89,10 @@ INSERT INTO user VALUES ('localhost','', '','N','N','N','N','N','N','N','N','
-- disable_query_log
DROP TABLE db, host, user, func, plugin, tables_priv, columns_priv, procs_priv, help_category, help_keyword, help_relation, help_topic, proc, time_zone, time_zone_leap_second, time_zone_name, time_zone_transition, time_zone_transition_type, event;
DROP TABLE db, host, user, func, plugin, tables_priv, columns_priv,
procs_priv, help_category, help_keyword, help_relation, help_topic, proc,
time_zone, time_zone_leap_second, time_zone_name, time_zone_transition,
time_zone_transition_type, general_log, slow_log, event;
-- enable_query_log

View file

@ -78,6 +78,7 @@ my_bool init_compiled_charsets(myf flags __attribute__((unused)))
CHARSET_INFO *cs;
add_compiled_collation(&my_charset_bin);
add_compiled_collation(&my_charset_filename);
add_compiled_collation(&my_charset_latin1);
add_compiled_collation(&my_charset_latin1_bin);

View file

@ -153,12 +153,12 @@ int packfrm(const void *data, uint len,
if (!(blob= (struct frm_blob_struct*) my_malloc(blob_len,MYF(MY_WME))))
goto err;
// Store compressed blob in machine independent format
/* Store compressed blob in machine independent format */
int4store((char*)(&blob->head.ver), 1);
int4store((char*)(&blob->head.orglen), comp_len);
int4store((char*)(&blob->head.complen), org_len);
// Copy frm data into blob, already in machine independent format
/* Copy frm data into blob, already in machine independent format */
memcpy(blob->data, data, org_len);
*pack_data= blob;

View file

@ -42,6 +42,7 @@ i_ht=""
c_tzn="" c_tz="" c_tzt="" c_tztt="" c_tzls="" c_pl=""
i_tzn="" i_tz="" i_tzt="" i_tztt="" i_tzls="" i_pl=""
c_p="" c_pp=""
c_gl="" c_sl=""
# Check for old tables
if test ! -f $mdata/db.frm
@ -354,6 +355,7 @@ then
c_hr="$c_hr comment='keyword-topic relation';"
fi
if test ! -f $mdata/time_zone_name.frm
then
if test "$1" = "verbose" ; then
@ -744,6 +746,27 @@ then
fi
if test ! -f $mdata/general_log.frm
then
if test "$1" = "verbose" ; then
echo "Preparing general_log table" 1>&2;
fi
c_gl="$c_gl CREATE PROCEDURE create_general_log_table() BEGIN DECLARE is_csv_enabled int DEFAULT 0; SELECT @@have_csv = 'YES' INTO is_csv_enabled; IF (is_csv_enabled) THEN CREATE TABLE general_log (event_time TIMESTAMP NOT NULL, user_host MEDIUMTEXT, thread_id INTEGER, server_id INTEGER, command_type VARCHAR(64), argument MEDIUMTEXT) engine=CSV CHARACTER SET utf8 comment='General log'; END IF; END;
CALL create_general_log_table();
DROP PROCEDURE create_general_log_table;"
fi
if test ! -f $mdata/slow_log.frm
then
if test "$1" = "verbose" ; then
echo "Preparing slow_log table" 1>&2;
fi
c_sl="$c_sl CREATE PROCEDURE create_slow_log_table() BEGIN DECLARE is_csv_enabled int DEFAULT 0; SELECT @@have_csv = 'YES' INTO is_csv_enabled; IF (is_csv_enabled) THEN CREATE TABLE slow_log (start_time TIMESTAMP NOT NULL, user_host MEDIUMTEXT NOT NULL, query_time TIME NOT NULL, lock_time TIME NOT NULL, rows_sent INTEGER NOT NULL, rows_examined INTEGER NOT NULL, db VARCHAR(512), last_insert_id INTEGER, insert_id INTEGER, server_id INTEGER, sql_text MEDIUMTEXT NOT NULL) engine=CSV CHARACTER SET utf8 comment='Slow log'; END IF; END;
CALL create_slow_log_table();
DROP PROCEDURE create_slow_log_table;"
fi
if test ! -f $mdata/event.frm
then
c_ev="$c_ev CREATE TABLE event ("
@ -812,6 +835,8 @@ $i_tzls
$c_p
$c_pp
$c_gl
$c_sl
$c_ev
CREATE DATABASE IF NOT EXISTS cluster_replication;
CREATE TABLE IF NOT EXISTS cluster_replication.binlog_index (Position BIGINT UNSIGNED NOT NULL, File VARCHAR(255) NOT NULL, epoch BIGINT UNSIGNED NOT NULL, inserts BIGINT UNSIGNED NOT NULL, updates BIGINT UNSIGNED NOT NULL, deletes BIGINT UNSIGNED NOT NULL, schemaops BIGINT UNSIGNED NOT NULL, PRIMARY KEY(epoch)) ENGINE=MYISAM;

View file

@ -527,6 +527,42 @@ ALTER TABLE proc MODIFY db
MODIFY comment
char(64) collate utf8_bin DEFAULT '' NOT NULL;
--
-- Create missing log tables (5.1)
--
delimiter //
CREATE PROCEDURE create_log_tables()
BEGIN
DECLARE is_csv_enabled int DEFAULT 0;
SELECT @@have_csv = 'YES' INTO is_csv_enabled;
IF (is_csv_enabled) THEN
CREATE TABLE IF NOT EXISTS general_log (
event_time TIMESTAMP NOT NULL,
user_host MEDIUMTEXT,
thread_id INTEGER,
server_id INTEGER,
command_type VARCHAR(64),
argument MEDIUMTEXT
) engine=CSV CHARACTER SET utf8 comment='General log';
CREATE TABLE IF NOT EXISTS slow_log (
start_time TIMESTAMP NOT NULL,
user_host MEDIUMTEXT NOT NULL,
query_time TIME NOT NULL,
lock_time TIME NOT NULL,
rows_sent INTEGER NOT NULL,
rows_examined INTEGER NOT NULL,
db VARCHAR(512),
last_insert_id INTEGER,
insert_id INTEGER,
server_id INTEGER,
sql_text MEDIUMTEXT NOT NULL
) engine=CSV CHARACTER SET utf8 comment='Slow log';
END IF;
END//
delimiter ;
CALL create_log_tables();
DROP PROCEDURE create_log_tables;
#
# EVENT table
#

View file

@ -295,6 +295,25 @@ err:
}
#endif /* HAVE_REPLICATION */
bool ha_myisam::check_if_locking_is_allowed(THD *thd, TABLE *table, uint count)
{
/*
To be able to open and lock for reading system tables like 'mysql.proc',
when we already have some tables opened and locked, and avoid deadlocks
we have to disallow write-locking of these tables with any other tables.
*/
if (table->s->system_table &&
table->reginfo.lock_type >= TL_WRITE_ALLOW_WRITE &&
count != 1)
{
my_error(ER_WRONG_LOCK_OF_SYSTEM_TABLE, MYF(0), table->s->db.str,
table->s->table_name.str);
return FALSE;
}
return TRUE;
}
/* Name is here without an extension */
int ha_myisam::open(const char *name, int mode, uint test_if_locked)

View file

@ -60,6 +60,7 @@ class ha_myisam: public handler
uint max_supported_key_part_length() const { return MI_MAX_KEY_LENGTH; }
uint checksum() const;
virtual bool check_if_locking_is_allowed(THD *thd, TABLE *table, uint count);
int open(const char *name, int mode, uint test_if_locked);
int close(void);
int write_row(byte * buf);

View file

@ -987,7 +987,7 @@ int ha_ndbcluster::get_metadata(const char *path)
DBUG_RETURN(1);
}
if (cmp_frm(tab, pack_data, pack_length))
if (m_share->state != NSS_ALTERED && cmp_frm(tab, pack_data, pack_length))
{
if (!invalidating_ndb_table)
{
@ -1059,6 +1059,36 @@ static int fix_unique_index_attr_order(NDB_INDEX_DATA &data,
DBUG_RETURN(0);
}
int ha_ndbcluster::table_changed(const void *pack_frm_data, uint pack_frm_len)
{
Ndb *ndb;
NDBDICT *dict;
const NDBTAB *orig_tab;
NdbDictionary::Table new_tab;
int result;
DBUG_ENTER("ha_ndbcluster::table_changed");
DBUG_PRINT("info", ("Modifying frm for table %s", m_tabname));
if (check_ndb_connection())
DBUG_RETURN(my_errno= HA_ERR_NO_CONNECTION);
ndb= get_ndb();
dict= ndb->getDictionary();
if (!(orig_tab= dict->getTable(m_tabname)))
ERR_RETURN(dict->getNdbError());
// Check if thread has stale local cache
if (orig_tab->getObjectStatus() == NdbDictionary::Object::Invalid)
{
dict->removeCachedTable(m_tabname);
if (!(orig_tab= dict->getTable(m_tabname)))
ERR_RETURN(dict->getNdbError());
}
new_tab= *orig_tab;
new_tab.setFrm(pack_frm_data, pack_frm_len);
if (dict->alterTable(new_tab) != 0)
ERR_RETURN(dict->getNdbError());
DBUG_RETURN(0);
}
/*
Create all the indexes for a table.
If any index should fail to be created,
@ -4316,6 +4346,47 @@ int ha_ndbcluster::create(const char *name,
DBUG_RETURN(my_errno);
}
int ha_ndbcluster::create_handler_files(const char *file)
{
const char *name;
Ndb* ndb;
const NDBTAB *tab;
const void *data, *pack_data;
uint length, pack_length;
int error= 0;
DBUG_ENTER("create_handler_files");
if (!(ndb= get_ndb()))
DBUG_RETURN(HA_ERR_NO_CONNECTION);
NDBDICT *dict= ndb->getDictionary();
if (!(tab= dict->getTable(m_tabname)))
DBUG_RETURN(0); // Must be a create, ignore since frm is saved in create
name= table->s->normalized_path.str;
DBUG_PRINT("enter", ("m_tabname: %s, path: %s", m_tabname, name));
if (readfrm(name, &data, &length) ||
packfrm(data, length, &pack_data, &pack_length))
{
DBUG_PRINT("info", ("Missing frm for %s", m_tabname));
my_free((char*)data, MYF(MY_ALLOW_ZERO_PTR));
my_free((char*)pack_data, MYF(MY_ALLOW_ZERO_PTR));
DBUG_RETURN(1);
}
if (cmp_frm(tab, pack_data, pack_length))
{
DBUG_PRINT("info", ("Table %s has changed, altering frm in ndb",
m_tabname));
error= table_changed(pack_data, pack_length);
m_share->state= NSS_INITIAL;
}
my_free((char*)data, MYF(MY_ALLOW_ZERO_PTR));
my_free((char*)pack_data, MYF(MY_ALLOW_ZERO_PTR));
DBUG_RETURN(error);
}
int ha_ndbcluster::create_index(const char *name, KEY *key_info,
NDB_INDEX_TYPE idx_type, uint idx_no)
{
@ -4443,7 +4514,7 @@ int ha_ndbcluster::add_index(TABLE *table_arg,
if((error= create_index(key_info[idx].name, key, idx_type, idx)))
break;
}
m_share->state= NSS_ALTERED;
DBUG_RETURN(error);
}
@ -4478,6 +4549,7 @@ int ha_ndbcluster::prepare_drop_index(TABLE *table_arg,
THD *thd= current_thd;
Thd_ndb *thd_ndb= get_thd_ndb(thd);
Ndb *ndb= thd_ndb->ndb;
m_share->state= NSS_ALTERED;
DBUG_RETURN(renumber_indexes(ndb, table_arg));
}
@ -4488,14 +4560,11 @@ int ha_ndbcluster::final_drop_index(TABLE *table_arg)
{
DBUG_ENTER("ha_ndbcluster::final_drop_index");
DBUG_PRINT("info", ("ha_ndbcluster::final_drop_index"));
int error= 0;
// Really drop indexes
THD *thd= current_thd;
Thd_ndb *thd_ndb= get_thd_ndb(thd);
Ndb *ndb= thd_ndb->ndb;
error= drop_indexes(ndb, table_arg);
DBUG_RETURN(error);
DBUG_RETURN(drop_indexes(ndb, table_arg));
}
/*
@ -5331,9 +5400,15 @@ int ndbcluster_find_all_files(THD *thd)
}
else if (cmp_frm(ndbtab, pack_data, pack_length))
{
discover= 1;
sql_print_information("NDB: mismatch in frm for %s.%s, discovering...",
elmt.database, elmt.name);
NDB_SHARE *share= get_share(key, 0, false);
if (!share || share->state != NSS_ALTERED)
{
discover= 1;
sql_print_information("NDB: mismatch in frm for %s.%s, discovering...",
elmt.database, elmt.name);
}
if (share)
free_share(&share);
}
my_free((char*) data, MYF(MY_ALLOW_ZERO_PTR));
my_free((char*) pack_data, MYF(MY_ALLOW_ZERO_PTR));
@ -6529,7 +6604,7 @@ NDB_SHARE *ndbcluster_get_share(const char *key, TABLE *table,
MEM_ROOT *old_root= *root_ptr;
init_sql_alloc(&share->mem_root, 1024, 0);
*root_ptr= &share->mem_root; // remember to reset before return
share->state= NSS_INITIAL;
/* enough space for key, db, and table_name */
share->key= alloc_root(*root_ptr, 2 * (length + 1));
share->key_length= length;
@ -9086,13 +9161,7 @@ uint ha_ndbcluster::set_up_partition_info(partition_info *part_info,
bool ha_ndbcluster::check_if_incompatible_data(HA_CREATE_INFO *info,
uint table_changes)
{
/*
TODO: Remove the dummy return below, when cluster gets
signal from alter table when only .frm is changed. Cluster
needs it to manage the copies.
*/
return COMPATIBLE_DATA_NO;
return COMPATIBLE_DATA_NO; // Disable fast add/drop index
if (table_changes != IS_EQUAL_YES)
return COMPATIBLE_DATA_NO;

View file

@ -80,10 +80,12 @@ typedef union { const NdbRecAttr *rec; NdbBlob *blob; void *ptr; } NdbValue;
typedef enum {
NSS_INITIAL= 0,
NSS_DROPPED
NSS_DROPPED,
NSS_ALTERED
} NDB_SHARE_STATE;
typedef struct st_ndbcluster_share {
NDB_SHARE_STATE state;
MEM_ROOT mem_root;
THR_LOCK lock;
pthread_mutex_t mutex;
@ -97,7 +99,6 @@ typedef struct st_ndbcluster_share {
char *table_name;
#ifdef HAVE_NDB_BINLOG
uint32 flags;
NDB_SHARE_STATE state;
NdbEventOperation *op;
NdbEventOperation *op_old; // for rename table
char *old_names; // for rename table
@ -579,6 +580,7 @@ class ha_ndbcluster: public handler
int rename_table(const char *from, const char *to);
int delete_table(const char *name);
int create(const char *name, TABLE *form, HA_CREATE_INFO *info);
int create_handler_files(const char *file);
int get_default_no_partitions(ulonglong max_rows);
bool get_no_parts(const char *name, uint *no_parts);
void set_auto_partitions(partition_info *part_info);
@ -669,6 +671,7 @@ private:
int create_index(const char *name, KEY *key_info,
NDB_INDEX_TYPE idx_type, uint idx_no);
int drop_ndb_index(const char *name);
int table_changed(const void *pack_frm_data, uint pack_frm_len);
// Index list management
int create_indexes(Ndb *ndb, TABLE *tab);
void clear_index(int i);

View file

@ -5009,8 +5009,11 @@ void ha_partition::print_error(int error, myf errflag)
DBUG_PRINT("enter", ("error = %d", error));
if (error == HA_ERR_NO_PARTITION_FOUND)
{
char buf[100];
my_error(ER_NO_PARTITION_FOR_GIVEN_VALUE, MYF(0),
m_part_info->part_expr->val_int());
llstr(m_part_info->part_expr->val_int(), buf));
}
else
m_file[0]->print_error(error, errflag);
DBUG_VOID_RETURN;

View file

@ -1089,6 +1089,30 @@ public:
{
/* TODO: DBUG_ASSERT(inited == NONE); */
}
/*
Check whether a handler allows to lock the table.
SYNOPSIS
check_if_locking_is_allowed()
thd Handler of the thread, trying to lock the table
table Table handler to check
count Number of locks already granted to the table
DESCRIPTION
Check whether a handler allows to lock the table. For instance,
MyISAM does not allow to lock mysql.proc along with other tables.
This limitation stems from the fact that MyISAM does not support
row-level locking and we have to add this limitation to avoid
deadlocks.
RETURN
TRUE Locking is allowed
FALSE Locking is not allowed. The error was thrown.
*/
virtual bool check_if_locking_is_allowed(THD *thd, TABLE *table, uint count)
{
return TRUE;
}
virtual int ha_initialise();
int ha_open(TABLE *table, const char *name, int mode, int test_if_locked);
bool update_auto_increment();

View file

@ -614,18 +614,12 @@ static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count,
lock_count++;
}
/*
To be able to open and lock for reading system tables like 'mysql.proc',
when we already have some tables opened and locked, and avoid deadlocks
we have to disallow write-locking of these tables with any other tables.
Check if we can lock the table. For some tables we cannot do that
beacause of handler-specific locking issues.
*/
if (table_ptr[i]->s->system_table &&
table_ptr[i]->reginfo.lock_type >= TL_WRITE_ALLOW_WRITE &&
count != 1)
{
my_error(ER_WRONG_LOCK_OF_SYSTEM_TABLE, MYF(0), table_ptr[i]->s->db.str,
table_ptr[i]->s->table_name.str);
DBUG_RETURN(0);
}
if (!table_ptr[i]->file->check_if_locking_is_allowed(thd, table_ptr[i],
count))
return 0;
}
if (!(sql_lock= (MYSQL_LOCK*)

1235
sql/log.cc

File diff suppressed because it is too large Load diff

178
sql/log.h
View file

@ -132,6 +132,21 @@ typedef struct st_log_info
~st_log_info() { pthread_mutex_destroy(&lock);}
} LOG_INFO;
/*
Currently we have only 3 kinds of logging functions: old-fashioned
logs, stdout and csv logging routines.
*/
#define MAX_LOG_HANDLERS_NUM 3
enum enum_printer
{
NONE,
LEGACY,
CSV,
LEGACY_AND_CSV
};
class Log_event;
class Rows_log_event;
@ -276,10 +291,18 @@ public:
bool open_index_file(const char *index_file_name_arg,
const char *log_name);
void new_file(bool need_lock);
bool write(THD *thd, enum enum_server_command command,
const char *format,...);
bool write(THD *thd, const char *query, uint query_length,
time_t query_start=0);
/* log a command to the old-fashioned general log */
bool write(time_t event_time, const char *user_host,
uint user_host_len, int thread_id,
const char *command_type, uint command_type_len,
const char *sql_text, uint sql_text_len);
/* log a query to the old-fashioned slow query log */
bool write(THD *thd, time_t current_time, time_t query_start_arg,
const char *user_host, uint user_host_len,
longlong query_time, longlong lock_time, bool is_command,
const char *sql_text, uint sql_text_len);
bool write(Log_event* event_info); // binary log write
bool write(THD *thd, IO_CACHE *cache, Log_event *commit_event);
@ -329,4 +352,151 @@ public:
inline uint32 get_open_count() { return open_count; }
};
class Log_event_handler
{
public:
virtual bool init()= 0;
virtual void cleanup()= 0;
virtual bool log_slow(THD *thd, time_t current_time,
time_t query_start_arg, const char *user_host,
uint user_host_len, longlong query_time,
longlong lock_time, bool is_command,
const char *sql_text, uint sql_text_len)= 0;
virtual bool log_error(enum loglevel level, const char *format,
va_list args)= 0;
virtual bool log_general(time_t event_time, const char *user_host,
uint user_host_len, int thread_id,
const char *command_type, uint command_type_len,
const char *sql_text, uint sql_text_len)= 0;
virtual ~Log_event_handler() {}
};
class Log_to_csv_event_handler: public Log_event_handler
{
/*
We create artificial THD for each of the logs. This is to avoid
locking issues: we don't want locks on the log tables reside in the
THD's of the query. The reason is the locking order and duration.
*/
THD *general_log_thd, *slow_log_thd;
friend class LOGGER;
TABLE_LIST general_log, slow_log;
private:
bool open_log_table(uint log_type);
public:
Log_to_csv_event_handler();
~Log_to_csv_event_handler();
virtual bool init();
virtual void cleanup();
virtual bool log_slow(THD *thd, time_t current_time,
time_t query_start_arg, const char *user_host,
uint user_host_len, longlong query_time,
longlong lock_time, bool is_command,
const char *sql_text, uint sql_text_len);
virtual bool log_error(enum loglevel level, const char *format,
va_list args);
virtual bool log_general(time_t event_time, const char *user_host,
uint user_host_len, int thread_id,
const char *command_type, uint command_type_len,
const char *sql_text, uint sql_text_len);
bool flush(THD *thd, TABLE_LIST *close_slow_Log,
TABLE_LIST* close_general_log);
void close_log_table(uint log_type, bool lock_in_use);
bool reopen_log_table(uint log_type);
};
class Log_to_file_event_handler: public Log_event_handler
{
MYSQL_LOG mysql_log, mysql_slow_log;
bool is_initialized;
public:
Log_to_file_event_handler(): is_initialized(FALSE)
{}
virtual bool init();
virtual void cleanup();
virtual bool log_slow(THD *thd, time_t current_time,
time_t query_start_arg, const char *user_host,
uint user_host_len, longlong query_time,
longlong lock_time, bool is_command,
const char *sql_text, uint sql_text_len);
virtual bool log_error(enum loglevel level, const char *format,
va_list args);
virtual bool log_general(time_t event_time, const char *user_host,
uint user_host_len, int thread_id,
const char *command_type, uint command_type_len,
const char *sql_text, uint sql_text_len);
void flush();
void init_pthread_objects();
};
/* Class which manages slow, general and error log event handlers */
class LOGGER
{
pthread_mutex_t LOCK_logger;
/* flag to check whether logger mutex is initialized */
uint inited;
/* available log handlers */
Log_to_csv_event_handler *table_log_handler;
Log_to_file_event_handler *file_log_handler;
/* NULL-terminated arrays of log handlers */
Log_event_handler *error_log_handler_list[MAX_LOG_HANDLERS_NUM + 1];
Log_event_handler *slow_log_handler_list[MAX_LOG_HANDLERS_NUM + 1];
Log_event_handler *general_log_handler_list[MAX_LOG_HANDLERS_NUM + 1];
public:
bool is_log_tables_initialized;
LOGGER() : inited(0), table_log_handler(NULL),
file_log_handler(NULL), is_log_tables_initialized(FALSE)
{}
void lock() { (void) pthread_mutex_lock(&LOCK_logger); }
void unlock() { (void) pthread_mutex_unlock(&LOCK_logger); }
/*
We want to initialize all log mutexes as soon as possible,
but we cannot do it in constructor, as safe_mutex relies on
initialization, performed by MY_INIT(). This why this is done in
this function.
*/
void init_base();
void init_log_tables();
bool flush_logs(THD *thd);
THD *get_general_log_thd()
{
return (THD *) table_log_handler->general_log_thd;
}
THD *get_slow_log_thd()
{
return (THD *) table_log_handler->slow_log_thd;
}
void cleanup();
bool error_log_print(enum loglevel level, const char *format,
va_list args);
bool slow_log_print(THD *thd, const char *query, uint query_length,
time_t query_start_arg);
bool general_log_print(THD *thd,enum enum_server_command command,
const char *format, va_list args);
void close_log_table(uint log_type, bool lock_in_use);
bool reopen_log_table(uint log_type);
/* we use this function to setup all enabled log event handlers */
int set_handlers(enum enum_printer error_log_printer,
enum enum_printer slow_log_printer,
enum enum_printer general_log_printer);
void init_error_log(enum enum_printer error_log_printer);
void init_slow_log(enum enum_printer slow_log_printer);
void init_general_log(enum enum_printer general_log_printer);
};
#endif /* LOG_H */

View file

@ -1795,7 +1795,7 @@ START SLAVE; . Query: '%s'", expected_error, thd->query);
/* If the query was not ignored, it is printed to the general log */
if (thd->net.last_errno != ER_SLAVE_IGNORED_TABLE)
mysql_log.write(thd,COM_QUERY,"%s",thd->query);
general_log_print(thd, COM_QUERY, "%s", thd->query);
compare_errors:
@ -3513,7 +3513,8 @@ void Xid_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
int Xid_log_event::exec_event(struct st_relay_log_info* rli)
{
/* For a slave Xid_log_event is COMMIT */
mysql_log.write(thd,COM_QUERY,"COMMIT /* implicit, from Xid_log_event */");
general_log_print(thd, COM_QUERY,
"COMMIT /* implicit, from Xid_log_event */");
return end_trans(thd, COMMIT) || Log_event::exec_event(rli);
}
#endif /* !MYSQL_CLIENT */

View file

@ -897,7 +897,7 @@ void free_status_vars();
/* information schema */
extern LEX_STRING information_schema_name;
const extern LEX_STRING partition_keywords[];
extern const LEX_STRING partition_keywords[];
LEX_STRING *make_lex_string(THD *thd, LEX_STRING *lex_str,
const char* str, uint length,
bool allocate_lex_string);
@ -1171,12 +1171,23 @@ int key_rec_cmp(void *key_info, byte *a, byte *b);
bool init_errmessage(void);
void sql_perror(const char *message);
void vprint_msg_to_log(enum loglevel level, const char *format, va_list args);
int vprint_msg_to_log(enum loglevel level, const char *format, va_list args);
void sql_print_error(const char *format, ...);
void sql_print_warning(const char *format, ...);
void sql_print_information(const char *format, ...);
/* type of the log table */
#define LOG_SLOW 1
#define LOG_GENERAL 2
int error_log_print(enum loglevel level, const char *format,
va_list args);
bool slow_log_print(THD *thd, const char *query, uint query_length,
time_t query_start_arg);
bool general_log_print(THD *thd, enum enum_server_command command,
const char *format,...);
bool fn_format_relative_to_data_home(my_string to, const char *name,
const char *dir, const char *extension);
@ -1217,7 +1228,7 @@ extern char *mysql_data_home,server_version[SERVER_VERSION_LENGTH],
def_ft_boolean_syntax[sizeof(ft_boolean_syntax)];
#define mysql_tmpdir (my_tmpdir(&mysql_tmpdir_list))
extern MY_TMPDIR mysql_tmpdir_list;
extern const char *command_name[];
extern LEX_STRING command_name[];
extern const char *first_keyword, *my_localhost, *delayed_user, *binary_keyword;
extern const char **errmesg; /* Error messages */
extern const char *myisam_recover_options_str;
@ -1279,6 +1290,7 @@ extern my_bool locked_in_memory;
extern bool opt_using_transactions, mysqld_embedded;
extern bool using_update_log, opt_large_files, server_id_supplied;
extern bool opt_log, opt_update_log, opt_bin_log, opt_slow_log, opt_error_log;
extern bool opt_old_log_format;
extern bool opt_disable_networking, opt_skip_show_db;
extern my_bool opt_character_set_client_handshake;
extern bool volatile abort_loop, shutdown_in_progress, grant_option;
@ -1300,7 +1312,9 @@ extern char *default_tz_name;
extern my_bool opt_large_pages;
extern uint opt_large_page_size;
extern MYSQL_LOG mysql_log,mysql_slow_log,mysql_bin_log;
extern MYSQL_LOG mysql_bin_log;
extern LOGGER logger;
extern TABLE_LIST general_log, slow_log;
extern FILE *bootstrap_file;
extern int bootstrap_error;
extern FILE *stderror_file;

View file

@ -331,6 +331,9 @@ static my_bool opt_sync_bdb_logs;
bool opt_log, opt_update_log, opt_bin_log, opt_slow_log;
bool opt_error_log= IF_WIN(1,0);
#ifdef WITH_CSV_STORAGE_ENGINE
bool opt_old_log_format, opt_both_log_formats;
#endif
bool opt_disable_networking=0, opt_skip_show_db=0;
my_bool opt_character_set_client_handshake= 1;
bool server_id_supplied = 0;
@ -603,6 +606,7 @@ char *opt_relay_logname = 0, *opt_relaylog_index_name=0;
my_bool master_ssl;
char *master_ssl_key, *master_ssl_cert;
char *master_ssl_ca, *master_ssl_capath, *master_ssl_cipher;
char *opt_logname, *opt_slow_logname;
/* Static variables */
@ -610,8 +614,8 @@ static bool kill_in_progress, segfaulted;
static my_bool opt_do_pstack, opt_bootstrap, opt_myisam_log;
static int cleanup_done;
static ulong opt_specialflag, opt_myisam_block_size;
static char *opt_logname, *opt_update_logname, *opt_binlog_index_name;
static char *opt_slow_logname, *opt_tc_heuristic_recover;
static char *opt_update_logname, *opt_binlog_index_name;
static char *opt_tc_heuristic_recover;
static char *mysql_home_ptr, *pidfile_name_ptr;
static char **defaults_argv;
static char *opt_bin_logname;
@ -1138,8 +1142,7 @@ void clean_up(bool print_message)
if (cleanup_done++)
return; /* purecov: inspected */
mysql_log.cleanup();
mysql_slow_log.cleanup();
logger.cleanup();
/*
make sure that handlers finish up
what they have that is dependent on the binlog
@ -2389,6 +2392,9 @@ pthread_handler_t signal_hand(void *arg __attribute__((unused)))
#ifdef EXTRA_DEBUG
sql_print_information("Got signal %d to shutdown mysqld",sig);
#endif
/* switch to the old log message processing */
logger.set_handlers(LEGACY, opt_slow_log ? LEGACY:NONE,
opt_log ? LEGACY:NONE);
DBUG_PRINT("info",("Got signal: %d abort_loop: %d",sig,abort_loop));
if (!abort_loop)
{
@ -2416,6 +2422,9 @@ pthread_handler_t signal_hand(void *arg __attribute__((unused)))
REFRESH_THREADS | REFRESH_HOSTS),
(TABLE_LIST*) 0, &not_used); // Flush logs
}
/* reenable logs after the options were reloaded */
logger.set_handlers(LEGACY, opt_slow_log ? CSV:NONE,
opt_log ? CSV:NONE);
break;
#ifdef USE_ONE_SIGNAL_HAND
case THR_SERVER_ALARM:
@ -2680,8 +2689,6 @@ static int init_common_variables(const char *conf_file_name, int argc,
global MYSQL_LOGs in their constructors, because then they would be inited
before MY_INIT(). So we do it here.
*/
mysql_log.init_pthread_objects();
mysql_slow_log.init_pthread_objects();
mysql_bin_log.init_pthread_objects();
if (gethostname(glob_hostname,sizeof(glob_hostname)-4) < 0)
@ -3047,9 +3054,48 @@ static int init_server_components()
#ifdef HAVE_REPLICATION
init_slave_list();
#endif
/* Setup log files */
if (opt_log)
mysql_log.open_query_log(opt_logname);
/* Setup logs */
/* enable old-fashioned error log */
if (opt_error_log)
{
if (!log_error_file_ptr[0])
fn_format(log_error_file, glob_hostname, mysql_data_home, ".err",
MY_REPLACE_EXT); /* replace '.<domain>' by '.err', bug#4997 */
else
fn_format(log_error_file, log_error_file_ptr, mysql_data_home, ".err",
MY_UNPACK_FILENAME | MY_SAFE_PATH);
if (!log_error_file[0])
opt_error_log= 1; // Too long file name
else
{
#ifndef EMBEDDED_LIBRARY
if (freopen(log_error_file, "a+", stdout))
#endif
freopen(log_error_file, "a+", stderr);
}
}
#ifdef WITH_CSV_STORAGE_ENGINE
logger.init_log_tables();
if (opt_old_log_format || (have_csv_db != SHOW_OPTION_YES))
logger.set_handlers(LEGACY, opt_slow_log ? LEGACY:NONE,
opt_log ? LEGACY:NONE);
else
if (opt_both_log_formats)
logger.set_handlers(LEGACY,
opt_slow_log ? LEGACY_AND_CSV:NONE,
opt_log ? LEGACY_AND_CSV:NONE);
else
/* the default is CSV log tables */
logger.set_handlers(LEGACY, opt_slow_log ? CSV:NONE,
opt_log ? CSV:NONE);
#else
logger.set_handlers(LEGACY, opt_slow_log ? LEGACY:NONE,
opt_log ? LEGACY:NONE);
#endif
if (opt_update_log)
{
/*
@ -3146,9 +3192,6 @@ with --log-bin instead.");
array_elements(binlog_format_names)-1);
opt_binlog_format= binlog_format_names[opt_binlog_format_id];
if (opt_slow_log)
mysql_slow_log.open_slow_log(opt_slow_logname);
#ifdef HAVE_REPLICATION
if (opt_log_slave_updates && replicate_same_server_id)
{
@ -3160,25 +3203,6 @@ server.");
}
#endif
if (opt_error_log)
{
if (!log_error_file_ptr[0])
fn_format(log_error_file, glob_hostname, mysql_data_home, ".err",
MY_REPLACE_EXT); /* replace '.<domain>' by '.err', bug#4997 */
else
fn_format(log_error_file, log_error_file_ptr, mysql_data_home, ".err",
MY_UNPACK_FILENAME | MY_SAFE_PATH);
if (!log_error_file[0])
opt_error_log= 1; // Too long file name
else
{
#ifndef EMBEDDED_LIBRARY
if (freopen(log_error_file, "a+", stdout))
#endif
stderror_file= freopen(log_error_file, "a+", stderr);
}
}
if (opt_bin_log)
{
char buf[FN_REFLEN];
@ -3432,6 +3456,12 @@ int main(int argc, char **argv)
MY_INIT(argv[0]); // init my_sys library & pthreads
/*
Perform basic logger initialization logger. Should be called after
MY_INIT, as it initializes mutexes. Log tables are inited later.
*/
logger.init_base();
#ifdef _CUSTOMSTARTUPCONFIG_
if (_cust_check_startup())
{
@ -3577,6 +3607,7 @@ we force server id to 2, but this MySQL server will not act as a slave.");
*/
error_handler_hook= my_message_sql;
start_signal_handler(); // Creates pidfile
if (acl_init(opt_noacl) ||
my_tz_init((THD *)0, default_tz_name, opt_bootstrap))
{
@ -3701,7 +3732,7 @@ we force server id to 2, but this MySQL server will not act as a slave.");
clean_up_mutexes();
shutdown_events();
my_end(opt_endinfo ? MY_CHECK_ERROR | MY_GIVE_INFO : 0);
exit(0);
return(0); /* purecov: deadcode */
}
@ -4639,7 +4670,7 @@ enum options_mysqld
OPT_REPLICATE_IGNORE_TABLE, OPT_REPLICATE_WILD_DO_TABLE,
OPT_REPLICATE_WILD_IGNORE_TABLE, OPT_REPLICATE_SAME_SERVER_ID,
OPT_DISCONNECT_SLAVE_EVENT_COUNT, OPT_TC_HEURISTIC_RECOVER,
OPT_ABORT_SLAVE_EVENT_COUNT,
OPT_ABORT_SLAVE_EVENT_COUNT, OPT_OLD_LOG_FORMAT, OPT_BOTH_LOG_FORMATS,
OPT_INNODB_DATA_HOME_DIR,
OPT_INNODB_DATA_FILE_PATH,
OPT_INNODB_LOG_GROUP_HOME_DIR,
@ -5195,6 +5226,16 @@ Disable with --skip-innodb-doublewrite.", (gptr*) &innobase_use_doublewrite,
"Log slow queries to this log file. Defaults logging to hostname-slow.log file. Must be enabled to activate other slow log options.",
(gptr*) &opt_slow_logname, (gptr*) &opt_slow_logname, 0, GET_STR, OPT_ARG,
0, 0, 0, 0, 0, 0},
#ifdef WITH_CSV_STORAGE_ENGINE
{"old-log-format", OPT_OLD_LOG_FORMAT,
"Enable old log file format. (No SELECT * FROM logs)",
(gptr*) &opt_old_log_format, 0, 0, GET_BOOL, OPT_ARG,
0, 0, 0, 0, 0, 0},
{"both-log-formats", OPT_BOTH_LOG_FORMATS,
"Enable old log file format along with log tables",
(gptr*) &opt_both_log_formats, 0, 0, GET_BOOL, OPT_ARG,
0, 0, 0, 0, 0, 0},
#endif
{"log-tc", OPT_LOG_TC,
"Path to transaction coordinator log (used for transactions that affect "
"more than one storage engine, when binary log is disabled)",
@ -6886,6 +6927,10 @@ static void mysql_init_variables(void)
opt_skip_slave_start= opt_reckless_slave = 0;
mysql_home[0]= pidfile_name[0]= log_error_file[0]= 0;
opt_log= opt_update_log= opt_slow_log= 0;
#ifdef WITH_CSV_STORAGE_ENGINE
opt_old_log_format= 0;
opt_both_log_formats= 0;
#endif
opt_bin_log= 0;
opt_disable_networking= opt_skip_show_db=0;
opt_logname= opt_update_logname= opt_binlog_index_name= opt_slow_logname= 0;
@ -7294,8 +7339,16 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
}
#endif /* HAVE_REPLICATION */
case (int) OPT_SLOW_QUERY_LOG:
opt_slow_log=1;
opt_slow_log= 1;
break;
#ifdef WITH_CSV_STORAGE_ENGINE
case (int) OPT_OLD_LOG_FORMAT:
opt_old_log_format= 1;
break;
case (int) OPT_BOTH_LOG_FORMATS:
opt_both_log_formats= 1;
break;
#endif
case (int) OPT_SKIP_NEW:
opt_specialflag|= SPECIAL_NO_NEW_FUNC;
delay_key_write_options= (uint) DELAY_KEY_WRITE_NONE;

View file

@ -5738,7 +5738,7 @@ ER_PLUGIN_IS_NOT_LOADED
ER_WRONG_VALUE
eng "Incorrect %-.32s value: '%-.128s'"
ER_NO_PARTITION_FOR_GIVEN_VALUE
eng "Table has no partition for value %ld"
eng "Table has no partition for value %-.64s"
ER_TABLESPACE_OPTION_ONLY_ONCE
eng "It is not allowed to specify %s more than once"
ER_CREATE_TABLESPACE_FAILED
@ -5794,3 +5794,7 @@ ER_EVENT_DATA_TOO_LONG
ER_DROP_INDEX_FK
eng "Cannot drop index '%-.64s': needed in a foreign key constraint"
ger "Kann Index '%-.64s' nicht löschen: wird für einen einen Fremdschlüssel benötigt"
ER_CANT_WRITE_LOCK_LOG_TABLE
eng "You can't write-lock a log table. Only read access is possible."
ER_CANT_READ_LOCK_LOG_TABLE
eng "You can't use usual read lock with log tables. Try READ LOCAL instead."

View file

@ -4354,8 +4354,8 @@ replication resumed in log '%s' at position %s", mi->user,
else
{
change_rpl_status(RPL_IDLE_SLAVE,RPL_ACTIVE_SLAVE);
mysql_log.write(thd, COM_CONNECT_OUT, "%s@%s:%d",
mi->user, mi->host, mi->port);
general_log_print(thd, COM_CONNECT_OUT, "%s@%s:%d",
mi->user, mi->host, mi->port);
}
#ifdef SIGNAL_WITH_VIO_CLOSE
thd->set_active_vio(mysql->net.vio);

View file

@ -835,7 +835,8 @@ bool close_cached_tables(THD *thd, bool if_wait_for_refresh,
bool found=0;
for (TABLE_LIST *table= tables; table; table= table->next_local)
{
if (remove_table_from_cache(thd, table->db, table->table_name,
if ((!table->table || !table->table->s->log_table) &&
remove_table_from_cache(thd, table->db, table->table_name,
RTFC_OWNED_BY_THD_FLAG))
found=1;
}
@ -869,7 +870,8 @@ bool close_cached_tables(THD *thd, bool if_wait_for_refresh,
for (uint idx=0 ; idx < open_cache.records ; idx++)
{
TABLE *table=(TABLE*) hash_element(&open_cache,idx);
if ((table->s->version) < refresh_version && table->db_stat)
if (!table->s->log_table &&
((table->s->version) < refresh_version && table->db_stat))
{
found=1;
pthread_cond_wait(&COND_refresh,&LOCK_open);
@ -1852,7 +1854,7 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root,
if (!thd->open_tables)
thd->version=refresh_version;
else if ((thd->version != refresh_version) &&
! (flags & MYSQL_LOCK_IGNORE_FLUSH))
! (flags & MYSQL_LOCK_IGNORE_FLUSH) && !table->s->log_table)
{
/* Someone did a refresh while thread was opening tables */
if (refresh)
@ -1873,7 +1875,11 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root,
{
if (table->s->version != refresh_version)
{
if (flags & MYSQL_LOCK_IGNORE_FLUSH)
/*
Don't close tables if we are working with a log table or were
asked not to close the table explicitly
*/
if (flags & MYSQL_LOCK_IGNORE_FLUSH || table->s->log_table)
{
/* Force close at once after usage */
thd->version= table->s->version;
@ -2236,6 +2242,10 @@ void close_old_data_files(THD *thd, TABLE *table, bool abort_locks,
Wait until all threads has closed the tables in the list
We have also to wait if there is thread that has a lock on this table even
if the table is closed
NOTE: log tables are handled differently by the logging routines.
E.g. general_log is always opened and locked by the logger
and the table handler used by the logger, will be skipped by
this check.
*/
bool table_is_used(TABLE *table, bool wait_for_name_lock)
@ -2254,9 +2264,10 @@ bool table_is_used(TABLE *table, bool wait_for_name_lock)
search= (TABLE*) hash_next(&open_cache, (byte*) key,
key_length, &state))
{
DBUG_PRINT("info", ("share: 0x%lx locked_by_flush: %d "
"locked_by_name: %d db_stat: %u version: %u",
(ulong) search->s,
DBUG_PRINT("info", ("share: 0x%lx locked_by_logger: %d "
"locked_by_flush: %d locked_by_name: %d "
"db_stat: %u version: %u",
(ulong) search->s, search->locked_by_logger,
search->locked_by_flush, search->locked_by_name,
search->db_stat,
search->s->version));
@ -2267,12 +2278,15 @@ bool table_is_used(TABLE *table, bool wait_for_name_lock)
- There is an name lock on it (Table is to be deleted or altered)
- If we are in flush table and we didn't execute the flush
- If the table engine is open and it's an old version
(We must wait until all engines are shut down to use the table)
(We must wait until all engines are shut down to use the table)
However we fo not wait if we encountered a table, locked by the logger.
Log tables are managed separately by logging routines.
*/
if (search->locked_by_name && wait_for_name_lock ||
search->locked_by_flush ||
(search->db_stat && search->s->version < refresh_version))
return 1;
if (!search->locked_by_logger &&
(search->locked_by_name && wait_for_name_lock ||
search->locked_by_flush ||
(search->db_stat && search->s->version < refresh_version)))
return 1;
}
} while ((table=table->next));
DBUG_RETURN(0);
@ -5867,6 +5881,7 @@ bool remove_table_from_cache(THD *thd, const char *db, const char *table_name,
&state))
{
THD *in_use;
table->s->version=0L; /* Free when thread is ready */
if (!(in_use=table->in_use))
{

View file

@ -1158,8 +1158,8 @@ bool mysql_change_db(THD *thd, const char *name, bool no_access_check)
sctx->priv_user,
sctx->priv_host,
dbname);
mysql_log.write(thd, COM_INIT_DB, ER(ER_DBACCESS_DENIED_ERROR),
sctx->priv_user, sctx->priv_host, dbname);
general_log_print(thd, COM_INIT_DB, ER(ER_DBACCESS_DENIED_ERROR),
sctx->priv_user, sctx->priv_host, dbname);
my_free(dbname,MYF(0));
DBUG_RETURN(1);
}

View file

@ -857,6 +857,8 @@ bool mysql_truncate(THD *thd, TABLE_LIST *table_list, bool dont_send_ok)
char path[FN_REFLEN];
TABLE *table;
bool error;
uint closed_log_tables= 0, lock_logger= 0;
TABLE_LIST *tmp_table_list;
uint path_length;
DBUG_ENTER("mysql_truncate");
@ -905,13 +907,36 @@ bool mysql_truncate(THD *thd, TABLE_LIST *table_list, bool dont_send_ok)
HTON_CAN_RECREATE)
|| thd->lex->sphead)
goto trunc_by_del;
if (lock_and_wait_for_table_name(thd, table_list))
DBUG_RETURN(TRUE);
}
// Remove the .frm extension
// AIX 5.2 64-bit compiler bug (BUG#16155): this crashes, replacement works.
// *(path + path_length - reg_ext_length)= '\0';
/* close log tables in use */
if (!my_strcasecmp(system_charset_info, table_list->db, "mysql"))
{
if (!my_strcasecmp(system_charset_info, table_list->table_name,
"general_log"))
{
lock_logger= 1;
logger.lock();
logger.close_log_table(LOG_GENERAL, FALSE);
closed_log_tables= closed_log_tables | LOG_GENERAL;
}
else
if (!my_strcasecmp(system_charset_info, table_list->table_name,
"slow_log"))
{
lock_logger= 1;
logger.lock();
logger.close_log_table(LOG_SLOW, FALSE);
closed_log_tables= closed_log_tables | LOG_SLOW;
}
}
// Remove the .frm extension AIX 5.2 64-bit compiler bug (BUG#16155): this
// crashes, replacement works. *(path + path_length - reg_ext_length)=
// '\0';
path[path_length - reg_ext_length] = 0;
error= ha_create_table(thd, path, table_list->db, table_list->table_name,
&create_info, 1);
@ -937,6 +962,14 @@ end:
VOID(pthread_mutex_lock(&LOCK_open));
unlock_table_name(thd, table_list);
VOID(pthread_mutex_unlock(&LOCK_open));
if (closed_log_tables & LOG_SLOW)
logger.reopen_log_table(LOG_SLOW);
if (closed_log_tables & LOG_GENERAL)
logger.reopen_log_table(LOG_GENERAL);
if (lock_logger)
logger.unlock();
}
else if (error)
{

View file

@ -179,6 +179,7 @@ void lex_start(THD *thd, const uchar *buf, uint length)
lex->query_tables_own_last= 0;
lex->escape_used= lex->et_compile_phase= FALSE;
lex->name= 0;
lex->et= NULL;
if (lex->sroutines.records)

View file

@ -73,14 +73,23 @@ static bool append_file_to_dir(THD *thd, const char **filename_ptr,
const char *any_db="*any*"; // Special symbol for check_access
const char *command_name[]={
"Sleep", "Quit", "Init DB", "Query", "Field List", "Create DB",
"Drop DB", "Refresh", "Shutdown", "Statistics", "Processlist",
"Connect","Kill","Debug","Ping","Time","Delayed insert","Change user",
"Binlog Dump","Table Dump", "Connect Out", "Register Slave",
"Prepare", "Execute", "Long Data", "Close stmt",
"Reset stmt", "Set option", "Fetch", "Daemon",
"Error" // Last command number
LEX_STRING command_name[]={
STRING_WITH_LEN("Sleep"), STRING_WITH_LEN("Quit"),
STRING_WITH_LEN("Init DB"), STRING_WITH_LEN("Query"),
STRING_WITH_LEN("Field List"), STRING_WITH_LEN("Create DB"),
STRING_WITH_LEN("Drop DB"), STRING_WITH_LEN("Refresh"),
STRING_WITH_LEN("Shutdown"), STRING_WITH_LEN("Statistics"),
STRING_WITH_LEN("Processlist"), STRING_WITH_LEN("Connect"),
STRING_WITH_LEN("Kill"), STRING_WITH_LEN("Debug"),
STRING_WITH_LEN("Ping"), STRING_WITH_LEN("Time"),
STRING_WITH_LEN("Delayed insert"), STRING_WITH_LEN("Change user"),
STRING_WITH_LEN("Binlog Dump"), STRING_WITH_LEN("Table Dump"),
STRING_WITH_LEN("Connect Out"), STRING_WITH_LEN("Register Slave"),
STRING_WITH_LEN("Prepare"), STRING_WITH_LEN("Execute"),
STRING_WITH_LEN("Long Data"), STRING_WITH_LEN("Close stmt"),
STRING_WITH_LEN("Reset stmt"), STRING_WITH_LEN("Set option"),
STRING_WITH_LEN("Fetch"), STRING_WITH_LEN("Daemon"),
STRING_WITH_LEN("Error") // Last command number
};
const char *xa_state_names[]={
@ -322,7 +331,7 @@ int check_user(THD *thd, enum enum_server_command command,
if (opt_secure_auth_local && passwd_len == SCRAMBLE_LENGTH_323)
{
net_printf_error(thd, ER_NOT_SUPPORTED_AUTH_MODE);
mysql_log.write(thd, COM_CONNECT, ER(ER_NOT_SUPPORTED_AUTH_MODE));
general_log_print(thd, COM_CONNECT, ER(ER_NOT_SUPPORTED_AUTH_MODE));
DBUG_RETURN(-1);
}
if (passwd_len != 0 &&
@ -356,9 +365,9 @@ int check_user(THD *thd, enum enum_server_command command,
net_printf_error(thd, ER_SERVER_IS_IN_SECURE_AUTH_MODE,
thd->main_security_ctx.user,
thd->main_security_ctx.host_or_ip);
mysql_log.write(thd, COM_CONNECT, ER(ER_SERVER_IS_IN_SECURE_AUTH_MODE),
thd->main_security_ctx.user,
thd->main_security_ctx.host_or_ip);
general_log_print(thd, COM_CONNECT, ER(ER_SERVER_IS_IN_SECURE_AUTH_MODE),
thd->main_security_ctx.user,
thd->main_security_ctx.host_or_ip);
DBUG_RETURN(-1);
}
/* We have to read very specific packet size */
@ -406,14 +415,14 @@ int check_user(THD *thd, enum enum_server_command command,
}
/* Why logging is performed before all checks've passed? */
mysql_log.write(thd, command,
(thd->main_security_ctx.priv_user ==
thd->main_security_ctx.user ?
(char*) "%s@%s on %s" :
(char*) "%s@%s as anonymous on %s"),
thd->main_security_ctx.user,
thd->main_security_ctx.host_or_ip,
db ? db : (char*) "");
general_log_print(thd, command,
(thd->main_security_ctx.priv_user ==
thd->main_security_ctx.user ?
(char*) "%s@%s on %s" :
(char*) "%s@%s as anonymous on %s"),
thd->main_security_ctx.user,
thd->main_security_ctx.host_or_ip,
db ? db : (char*) "");
/*
This is the default access rights for the current database. It's
@ -460,17 +469,17 @@ int check_user(THD *thd, enum enum_server_command command,
else if (res == 2) // client gave short hash, server has long hash
{
net_printf_error(thd, ER_NOT_SUPPORTED_AUTH_MODE);
mysql_log.write(thd,COM_CONNECT,ER(ER_NOT_SUPPORTED_AUTH_MODE));
general_log_print(thd, COM_CONNECT, ER(ER_NOT_SUPPORTED_AUTH_MODE));
DBUG_RETURN(-1);
}
net_printf_error(thd, ER_ACCESS_DENIED_ERROR,
thd->main_security_ctx.user,
thd->main_security_ctx.host_or_ip,
passwd_len ? ER(ER_YES) : ER(ER_NO));
mysql_log.write(thd, COM_CONNECT, ER(ER_ACCESS_DENIED_ERROR),
thd->main_security_ctx.user,
thd->main_security_ctx.host_or_ip,
passwd_len ? ER(ER_YES) : ER(ER_NO));
general_log_print(thd, COM_CONNECT, ER(ER_ACCESS_DENIED_ERROR),
thd->main_security_ctx.user,
thd->main_security_ctx.host_or_ip,
passwd_len ? ER(ER_YES) : ER(ER_NO));
DBUG_RETURN(-1);
#endif /* NO_EMBEDDED_ACCESS_CHECKS */
}
@ -1570,7 +1579,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
packet, strlen(packet), thd->charset());
if (!mysql_change_db(thd, tmp.str, FALSE))
{
mysql_log.write(thd,command,"%s",thd->db);
general_log_print(thd, command, "%s",thd->db);
send_ok(thd);
}
break;
@ -1703,7 +1712,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
if (alloc_query(thd, packet, packet_length))
break; // fatal error is set
char *packet_end= thd->query + thd->query_length;
mysql_log.write(thd,command,"%s",thd->query);
general_log_print(thd, command, "%s", thd->query);
DBUG_PRINT("query",("%-.4096s",thd->query));
if (!(specialflag & SPECIAL_NO_PRIOR))
@ -1812,7 +1821,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
thd->query_length= strlen(packet); // for simplicity: don't optimize
if (!(thd->query=fields=thd->memdup(packet,thd->query_length+1)))
break;
mysql_log.write(thd,command,"%s %s",table_list.table_name, fields);
general_log_print(thd, command, "%s %s", table_list.table_name, fields);
if (lower_case_table_names)
my_casedn_str(files_charset_info, table_list.table_name);
remove_escape(table_list.table_name); // This can't have wildcards
@ -1841,7 +1850,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
#endif
case COM_QUIT:
/* We don't calculate statistics for this command */
mysql_log.write(thd,command,NullS);
general_log_print(thd, command, NullS);
net->error=0; // Don't give 'abort' message
error=TRUE; // End server
break;
@ -1861,7 +1870,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
}
if (check_access(thd,CREATE_ACL,db,0,1,0,is_schema_db(db)))
break;
mysql_log.write(thd,command,packet);
general_log_print(thd, command, packet);
bzero(&create_info, sizeof(create_info));
mysql_create_db(thd, (lower_case_table_names == 2 ? alias : db),
&create_info, 0);
@ -1886,7 +1895,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
ER(ER_LOCK_OR_ACTIVE_TRANSACTION), MYF(0));
break;
}
mysql_log.write(thd,command,db);
general_log_print(thd, command, db);
mysql_rm_db(thd, db, 0, 0);
break;
}
@ -1910,7 +1919,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
kill_zombie_dump_threads(slave_server_id);
thd->server_id = slave_server_id;
mysql_log.write(thd, command, "Log: '%s' Pos: %ld", packet+10,
general_log_print(thd, command, "Log: '%s' Pos: %ld", packet+10,
(long) pos);
mysql_binlog_send(thd, thd->strdup(packet + 10), (my_off_t) pos, flags);
unregister_slave(thd,1,1);
@ -1928,7 +1937,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
ulong options= (ulong) (uchar) packet[0];
if (check_global_access(thd,RELOAD_ACL))
break;
mysql_log.write(thd,command,NullS);
general_log_print(thd, command, NullS);
if (!reload_acl_and_cache(thd, options, (TABLE_LIST*) 0, &not_used))
send_ok(thd);
break;
@ -1956,7 +1965,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
break;
}
DBUG_PRINT("quit",("Got shutdown command for level %u", level));
mysql_log.write(thd,command,NullS);
general_log_print(thd, command, NullS);
send_eof(thd);
#ifdef __WIN__
sleep(1); // must wait after eof()
@ -1973,7 +1982,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
#endif
case COM_STATISTICS:
{
mysql_log.write(thd,command,NullS);
general_log_print(thd, command, NullS);
statistic_increment(thd->status_var.com_stat[SQLCOM_SHOW_STATUS],
&LOCK_status);
#ifndef EMBEDDED_LIBRARY
@ -2013,7 +2022,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
if (!thd->security_ctx->priv_user[0] &&
check_global_access(thd, PROCESS_ACL))
break;
mysql_log.write(thd,command,NullS);
general_log_print(thd, command, NullS);
mysqld_list_processes(thd,
thd->security_ctx->master_access & PROCESS_ACL ?
NullS : thd->security_ctx->priv_user, 0);
@ -2050,7 +2059,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
if (check_global_access(thd, SUPER_ACL))
break; /* purecov: inspected */
mysql_print_status();
mysql_log.write(thd,command,NullS);
general_log_print(thd, command, NullS);
send_eof(thd);
break;
case COM_SLEEP:
@ -2132,7 +2141,7 @@ void log_slow_statement(THD *thd)
(specialflag & SPECIAL_LOG_QUERIES_NOT_USING_INDEXES)))
{
thd->status_var.long_query_count++;
mysql_slow_log.write(thd, thd->query, thd->query_length, start_of_query);
slow_log_print(thd, thd->query, thd->query_length, start_of_query);
}
}
}
@ -6541,7 +6550,8 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables,
{
/*
Flush the normal query log, the update log, the binary log,
the slow query log, and the relay log (if it exists).
the slow query log, the relay log (if it exists) and the log
tables.
*/
/*
@ -6551,14 +6561,16 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables,
than it would help them)
*/
tmp_write_to_binlog= 0;
mysql_log.new_file(1);
mysql_slow_log.new_file(1);
mysql_bin_log.rotate_and_purge(RP_FORCE_ROTATE);
#ifdef HAVE_REPLICATION
pthread_mutex_lock(&LOCK_active_mi);
rotate_relay_log(active_mi);
pthread_mutex_unlock(&LOCK_active_mi);
#endif
/* flush slow and general logs */
logger.flush_logs(thd);
if (ha_flush_logs(NULL))
result=1;
if (flush_error_log())

View file

@ -3867,7 +3867,9 @@ bool mysql_unpack_partition(THD *thd, const uchar *part_buf,
DBUG_PRINT("info", ("Successful parse"));
part_info= lex.part_info;
DBUG_PRINT("info", ("default engine = %d", ha_legacy_type(part_info->default_engine_type)));
DBUG_PRINT("info", ("default engine = %d, default_db_type = %d",
ha_legacy_type(part_info->default_engine_type),
ha_legacy_type(default_db_type)));
if (is_create_table_ind)
{
if (old_lex->name)
@ -3877,10 +3879,6 @@ bool mysql_unpack_partition(THD *thd, const uchar *part_buf,
old_lex->name contains the t2 and the table we are opening has
name t1.
*/
Table_ident *ti= (Table_ident*)old_lex->name;
const char *db_name= ti->db.str ? ti->db.str : thd->db;
const char *table_name= ti->table.str;
handler *file;
if (partition_default_handling(table, part_info))
{
DBUG_RETURN(TRUE);
@ -3892,7 +3890,9 @@ bool mysql_unpack_partition(THD *thd, const uchar *part_buf,
table->part_info= part_info;
table->file->set_part_info(part_info);
if (part_info->default_engine_type == NULL)
{
part_info->default_engine_type= default_db_type;
}
else
{
DBUG_ASSERT(part_info->default_engine_type == default_db_type);

View file

@ -1866,7 +1866,7 @@ void mysql_stmt_prepare(THD *thd, const char *packet, uint packet_length)
thd->stmt_map.erase(stmt);
}
else
mysql_log.write(thd, COM_STMT_PREPARE, "[%lu] %s", stmt->id, packet);
general_log_print(thd, COM_STMT_PREPARE, "[%lu] %s", stmt->id, packet);
/* check_prepared_statemnt sends the metadata packet in case of success */
DBUG_VOID_RETURN;
@ -2228,7 +2228,7 @@ void mysql_stmt_execute(THD *thd, char *packet_arg, uint packet_length)
if (!(specialflag & SPECIAL_NO_PRIOR))
my_pthread_setprio(pthread_self(), WAIT_PRIOR);
if (error == 0)
mysql_log.write(thd, COM_STMT_EXECUTE, "[%lu] %s", stmt->id, thd->query);
general_log_print(thd, COM_STMT_EXECUTE, "[%lu] %s", stmt->id, thd->query);
DBUG_VOID_RETURN;
@ -2607,7 +2607,7 @@ void Prepared_statement::setup_set_params()
{
/* Setup binary logging */
if (mysql_bin_log.is_open() && is_update_query(lex->sql_command) ||
mysql_log.is_open() || mysql_slow_log.is_open())
opt_log || opt_slow_log)
{
set_params_from_vars= insert_params_from_vars_with_log;
#ifndef EMBEDDED_LIBRARY

View file

@ -605,8 +605,8 @@ bool mysqld_show_create_db(THD *thd, char *dbname,
{
my_error(ER_DBACCESS_DENIED_ERROR, MYF(0),
sctx->priv_user, sctx->host_or_ip, dbname);
mysql_log.write(thd,COM_INIT_DB,ER(ER_DBACCESS_DENIED_ERROR),
sctx->priv_user, sctx->host_or_ip, dbname);
general_log_print(thd,COM_INIT_DB,ER(ER_DBACCESS_DENIED_ERROR),
sctx->priv_user, sctx->host_or_ip, dbname);
DBUG_RETURN(TRUE);
}
#endif
@ -1502,7 +1502,7 @@ void mysqld_list_processes(THD *thd,const char *user, bool verbose)
if (thd_info->proc_info)
protocol->store(thd_info->proc_info, system_charset_info);
else
protocol->store(command_name[thd_info->command], system_charset_info);
protocol->store(command_name[thd_info->command].str, system_charset_info);
if (thd_info->start_time)
protocol->store((uint32) (now - thd_info->start_time));
else
@ -2835,6 +2835,7 @@ int fill_schema_charsets(THD *thd, TABLE_LIST *tables, COND *cond)
CHARSET_INFO *tmp_cs= cs[0];
if (tmp_cs && (tmp_cs->state & MY_CS_PRIMARY) &&
(tmp_cs->state & MY_CS_AVAILABLE) &&
!(tmp_cs->state & MY_CS_HIDDEN) &&
!(wild && wild[0] &&
wild_case_compare(scs, tmp_cs->csname,wild)))
{
@ -2904,6 +2905,7 @@ int fill_schema_collation(THD *thd, TABLE_LIST *tables, COND *cond)
CHARSET_INFO **cl;
CHARSET_INFO *tmp_cs= cs[0];
if (!tmp_cs || !(tmp_cs->state & MY_CS_AVAILABLE) ||
(tmp_cs->state & MY_CS_HIDDEN) ||
!(tmp_cs->state & MY_CS_PRIMARY))
continue;
for (cl= all_charsets; cl < all_charsets+255 ;cl ++)

View file

@ -2928,7 +2928,8 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
}
/* Close all instances of the table to allow repair to rename files */
if (lock_type == TL_WRITE && table->table->s->version)
if (lock_type == TL_WRITE && table->table->s->version &&
!table->table->s->log_table)
{
pthread_mutex_lock(&LOCK_open);
const char *old_message=thd->enter_cond(&COND_refresh, &LOCK_open,
@ -3098,9 +3099,10 @@ send_result_message:
}
if (table->table)
{
/* in the below check we do not refresh the log tables */
if (fatal_error)
table->table->s->version=0; // Force close of table
else if (open_for_modify)
else if (open_for_modify && !table->table->s->log_table)
{
pthread_mutex_lock(&LOCK_open);
remove_table_from_cache(thd, table->table->s->db.str,
@ -3842,6 +3844,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
uint *index_drop_buffer;
uint index_add_count;
uint *index_add_buffer;
bool committed= 0;
DBUG_ENTER("mysql_alter_table");
thd->proc_info="init";
@ -4759,6 +4762,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
DBUG_PRINT("info", ("Committing after add/drop index"));
if (ha_commit_stmt(thd) || ha_commit(thd))
goto err;
committed= 1;
}
}
/*end of if (! new_table) for add/drop index*/
@ -4890,7 +4894,6 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
VOID(pthread_mutex_unlock(&LOCK_open));
goto err;
}
#ifdef XXX_TO_BE_DONE_LATER_BY_WL1892
if (! need_copy_table)
{
if (! table)
@ -4907,7 +4910,6 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
goto err;
}
}
#endif
if (thd->lock || new_name != table_name) // True if WIN32
{
/*
@ -4957,11 +4959,14 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
wait_if_global_read_lock(), which could create a deadlock if called
with LOCK_open.
*/
error = ha_commit_stmt(thd);
if (ha_commit(thd))
error=1;
if (error)
goto err;
if (!committed)
{
error = ha_commit_stmt(thd);
if (ha_commit(thd))
error=1;
if (error)
goto err;
}
thd->proc_info="end";
DBUG_ASSERT(!(mysql_bin_log.is_open() && binlog_row_based &&

View file

@ -310,16 +310,29 @@ int open_table_def(THD *thd, TABLE_SHARE *share, uint db_flags)
error= open_binary_frm(thd, share, head, file);
*root_ptr= old_root;
/*
We can't mark all tables in 'mysql' database as system since we don't
allow to lock such tables for writing with any other tables (even with
other system tables) and some privilege tables need this.
*/
if (share->db.length == 5 &&
!my_strcasecmp(system_charset_info, share->db.str, "mysql") &&
(!my_strcasecmp(system_charset_info, share->table_name.str, "proc") ||
!my_strcasecmp(system_charset_info, share->table_name.str, "event")))
share->system_table= 1;
!my_strcasecmp(system_charset_info, share->db.str, "mysql"))
{
/*
We can't mark all tables in 'mysql' database as system since we don't
allow to lock such tables for writing with any other tables (even with
other system tables) and some privilege tables need this.
*/
if (!my_strcasecmp(system_charset_info, share->table_name.str, "proc")
|| !my_strcasecmp(system_charset_info, share->table_name.str,
"event"))
share->system_table= 1;
else
{
if (!my_strcasecmp(system_charset_info, share->table_name.str,
"general_log"))
share->log_table= LOG_GENERAL;
else
if (!my_strcasecmp(system_charset_info, share->table_name.str,
"slow_log"))
share->log_table= LOG_SLOW;
}
}
error_given= 1;
}

View file

@ -198,6 +198,11 @@ typedef struct st_table_share
locking of this table for writing. FALSE - otherwise.
*/
bool system_table;
/*
This flag is set for the log tables. Used during FLUSH instances to skip
log tables, while closing tables (since logs must be always available)
*/
bool log_table;
#ifdef WITH_PARTITION_STORAGE_ENGINE
const uchar *partition_info;
uint partition_info_len;
@ -286,6 +291,7 @@ struct st_table {
my_bool distinct,const_table,no_rows;
my_bool key_read, no_keyread;
my_bool locked_by_flush;
my_bool locked_by_logger;
my_bool locked_by_name;
my_bool fulltext_searched;
my_bool no_cache;

View file

@ -197,8 +197,16 @@ static TINA_SHARE *get_share(const char *table_name, TABLE *table)
char *tmp_name;
uint length;
if (!tina_init)
tina_init_func();
pthread_mutex_lock(&tina_mutex);
length=(uint) strlen(table_name);
/*
If share is not present in the hash, create a new share and
initialize its members.
*/
if (!(share=(TINA_SHARE*) hash_search(&tina_open_tables,
(byte*) table_name,
length)))
@ -214,6 +222,7 @@ static TINA_SHARE *get_share(const char *table_name, TABLE *table)
}
share->use_count= 0;
share->is_log_table= FALSE;
share->table_name_length= length;
share->table_name= tmp_name;
strmov(share->table_name, table_name);
@ -238,6 +247,9 @@ static TINA_SHARE *get_share(const char *table_name, TABLE *table)
share->mapped_file= NULL; // We don't know the state as we just allocated it
if (get_mmap(share, 0) > 0)
goto error3;
/* init file length value used by readers */
share->saved_data_file_length= share->file_stat.st_size;
}
share->use_count++;
pthread_mutex_unlock(&tina_mutex);
@ -311,14 +323,16 @@ ha_tina::ha_tina(TABLE_SHARE *table_arg)
These definitions are found in handler.h
They are not probably completely right.
*/
current_position(0), next_position(0), chain_alloced(0),
chain_size(DEFAULT_CHAIN_LENGTH), records_is_known(0)
current_position(0), next_position(0), local_saved_data_file_length(0),
chain_alloced(0), chain_size(DEFAULT_CHAIN_LENGTH),
records_is_known(0)
{
/* Set our original buffers from pre-allocated memory */
buffer.set(byte_buffer, IO_SIZE, system_charset_info);
chain= chain_buffer;
}
/*
Encode a buffer into the quoted format.
*/
@ -427,13 +441,18 @@ int ha_tina::chain_append()
*/
int ha_tina::find_current_row(byte *buf)
{
byte *mapped_ptr= (byte *)share->mapped_file + current_position;
byte *mapped_ptr;
byte *end_ptr;
DBUG_ENTER("ha_tina::find_current_row");
/* EOF should be counted as new line */
mapped_ptr= (byte *)share->mapped_file + current_position;
/*
We do not read further then local_saved_data_file_length in order
not to conflict with undergoing concurrent insert.
*/
if ((end_ptr= find_eoln(share->mapped_file, current_position,
share->file_stat.st_size)) == 0)
local_saved_data_file_length)) == 0)
DBUG_RETURN(HA_ERR_END_OF_FILE);
for (Field **field=table->field ; *field ; field++)
@ -491,6 +510,112 @@ const char **ha_tina::bas_ext() const
return ha_tina_exts;
}
/*
Three functions below are needed to enable concurrent insert functionality
for CSV engine. For more details see mysys/thr_lock.c
*/
void tina_get_status(void* param, int concurrent_insert)
{
ha_tina *tina= (ha_tina*) param;
tina->get_status();
}
void tina_update_status(void* param)
{
ha_tina *tina= (ha_tina*) param;
tina->update_status();
}
/* this should exist and return 0 for concurrent insert to work */
my_bool tina_check_status(void* param)
{
return 0;
}
/*
Save the state of the table
SYNOPSIS
get_status()
DESCRIPTION
This function is used to retrieve the file length. During the lock
phase of concurrent insert. For more details see comment to
ha_tina::update_status below.
*/
void ha_tina::get_status()
{
if (share->is_log_table)
{
/*
We have to use mutex to follow pthreads memory visibility
rules for share->saved_data_file_length
*/
pthread_mutex_lock(&share->mutex);
local_saved_data_file_length= share->saved_data_file_length;
pthread_mutex_unlock(&share->mutex);
return;
}
local_saved_data_file_length= share->saved_data_file_length;
}
/*
Correct the state of the table. Called by unlock routines
before the write lock is released.
SYNOPSIS
update_status()
DESCRIPTION
When we employ concurrent insert lock, we save current length of the file
during the lock phase. We do not read further saved value, as we don't
want to interfere with undergoing concurrent insert. Writers update file
length info during unlock with update_status().
NOTE
For log tables concurrent insert works different. The reason is that
log tables are always opened and locked. And as they do not unlock
tables, the file length after writes should be updated in a different
way. For this purpose we need is_log_table flag. When this flag is set
we call update_status() explicitly after each row write.
*/
void ha_tina::update_status()
{
/* correct local_saved_data_file_length for writers */
share->saved_data_file_length= share->file_stat.st_size;
}
bool ha_tina::check_if_locking_is_allowed(THD *thd, TABLE *table, uint count)
{
/*
Deny locking of the log tables, which is incompatible with
concurrent insert. Unless called from a logger THD:
general_log_thd or slow_log_thd.
*/
if (table->s->log_table &&
thd->lex->sql_command != SQLCOM_TRUNCATE &&
!(thd->lex->sql_command == SQLCOM_FLUSH &&
thd->lex->type & REFRESH_LOG) &&
(thd != logger.get_general_log_thd()) &&
(thd != logger.get_slow_log_thd()) &&
(table->reginfo.lock_type >= TL_READ_NO_INSERT))
{
/*
The check >= TL_READ_NO_INSERT denies all write locks
plus the only read lock (TL_READ_NO_INSERT itself)
*/
table->reginfo.lock_type == TL_READ_NO_INSERT ?
my_error(ER_CANT_READ_LOCK_LOG_TABLE, MYF(0)) :
my_error(ER_CANT_WRITE_LOCK_LOG_TABLE, MYF(0));
return FALSE;
}
return TRUE;
}
/*
Open a database file. Keep in mind that tables are caches, so
@ -503,9 +628,19 @@ int ha_tina::open(const char *name, int mode, uint test_if_locked)
if (!(share= get_share(name, table)))
DBUG_RETURN(1);
thr_lock_data_init(&share->lock,&lock,NULL);
/*
Init locking. Pass handler object to the locking routines,
so that they could save/update local_saved_data_file_length value
during locking. This is needed to enable concurrent inserts.
*/
thr_lock_data_init(&share->lock, &lock, (void*) this);
ref_length=sizeof(off_t);
share->lock.get_status= tina_get_status;
share->lock.update_status= tina_update_status;
share->lock.check_status= tina_check_status;
DBUG_RETURN(0);
}
@ -549,6 +684,18 @@ int ha_tina::write_row(byte * buf)
*/
if (get_mmap(share, 0) > 0)
DBUG_RETURN(-1);
/* update local copy of the max position to see our own changes */
local_saved_data_file_length= share->file_stat.st_size;
/* update status for the log tables */
if (share->is_log_table)
{
pthread_mutex_lock(&share->mutex);
update_status();
pthread_mutex_unlock(&share->mutex);
}
records++;
DBUG_RETURN(0);
}
@ -567,6 +714,7 @@ int ha_tina::update_row(const byte * old_data, byte * new_data)
int size;
DBUG_ENTER("ha_tina::update_row");
statistic_increment(table->in_use->status_var.ha_read_rnd_next_count,
&LOCK_status);
@ -580,6 +728,13 @@ int ha_tina::update_row(const byte * old_data, byte * new_data)
if (my_write(share->data_file, buffer.ptr(), size, MYF(MY_WME | MY_NABP)))
DBUG_RETURN(-1);
/* UPDATE should never happen on the log tables */
DBUG_ASSERT(!share->is_log_table);
/* update local copy of the max position to see our own changes */
local_saved_data_file_length= share->file_stat.st_size;
DBUG_RETURN(0);
}
@ -604,6 +759,9 @@ int ha_tina::delete_row(const byte * buf)
--records;
/* DELETE should never happen on the log table */
DBUG_ASSERT(!share->is_log_table);
DBUG_RETURN(0);
}
@ -811,6 +969,12 @@ void ha_tina::info(uint flag)
int ha_tina::extra(enum ha_extra_function operation)
{
DBUG_ENTER("ha_tina::extra");
if (operation == HA_EXTRA_MARK_AS_LOG_TABLE)
{
pthread_mutex_lock(&share->mutex);
share->is_log_table= TRUE;
pthread_mutex_unlock(&share->mutex);
}
DBUG_RETURN(0);
}

View file

@ -23,9 +23,20 @@
typedef struct st_tina_share {
char *table_name;
byte *mapped_file; /* mapped region of file */
uint table_name_length,use_count;
uint table_name_length, use_count;
/*
Below flag is needed to make log tables work with concurrent insert.
For more details see comment to ha_tina::update_status.
*/
my_bool is_log_table;
MY_STAT file_stat; /* Stat information for the data file */
File data_file; /* Current open data file */
/*
Here we save the length of the file for readers. This is updated by
inserts, updates and deletes. The var is initialized along with the
share initialization.
*/
off_t saved_data_file_length;
pthread_mutex_t mutex;
THR_LOCK lock;
} TINA_SHARE;
@ -41,6 +52,7 @@ class ha_tina: public handler
TINA_SHARE *share; /* Shared lock info */
off_t current_position; /* Current position in the file during a file scan */
off_t next_position; /* Next position in the file scan */
off_t local_saved_data_file_length; /* save position for reads */
byte byte_buffer[IO_SIZE];
String buffer;
/*
@ -92,6 +104,7 @@ public:
*/
ha_rows estimate_rows_upper_bound() { return HA_POS_ERROR; }
virtual bool check_if_locking_is_allowed(THD *thd, TABLE *table, uint count);
int open(const char *name, int mode, uint test_if_locked);
int close(void);
int write_row(byte * buf);
@ -120,6 +133,13 @@ public:
THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
enum thr_lock_type lock_type);
/*
These functions used to get/update status of the handler.
Needed to enable concurrent inserts.
*/
void get_status();
void update_status();
/* The following methods were added just for TINA */
int encode_quote(byte *buf);
int find_current_row(byte *buf);

View file

@ -3,7 +3,7 @@ LDADD += \
$(top_builddir)/storage/ndb/src/libndbclient.la \
$(top_builddir)/dbug/libdbug.a \
$(top_builddir)/mysys/libmysys.a \
$(top_builddir)/strings/libmystrings.a @NDB_SCI_LIBS@
$(top_builddir)/strings/libmystrings.a @NDB_SCI_LIBS@ @ZLIB_LIBS@
INCLUDES += -I$(srcdir) -I$(top_srcdir)/include \
-I$(top_srcdir)/storage/ndb/include \

View file

@ -916,7 +916,8 @@ bool Dbtup::readTriggerInfo(TupTriggerData* const trigPtr,
req_struct->m_tuple_ptr= save;
ndbrequire(ret != -1);
noBeforeWords = ret;
if ((noAfterWords == noBeforeWords) &&
if (trigPtr->m_receiverBlock != SUMA &&
(noAfterWords == noBeforeWords) &&
(memcmp(afterBuffer, beforeBuffer, noAfterWords << 2) == 0)) {
//--------------------------------------------------------------------
// Although a trigger was fired it was not necessary since the old

View file

@ -1392,6 +1392,8 @@ Suma::initTable(Signal *signal, Uint32 tableId, TablePtr &tabPtr)
DBUG_PRINT("info",("Suma::Table[%u,i=%u]::n_subscribers: %u",
tabPtr.p->m_tableId, tabPtr.i, tabPtr.p->n_subscribers));
tabPtr.p->m_reportAll = false;
tabPtr.p->m_error = 0;
tabPtr.p->m_schemaVersion = RNIL;
tabPtr.p->m_state = Table::DEFINING;

View file

@ -55,7 +55,7 @@ ndb_drop_index_LDFLAGS = @ndb_bin_am_ldflags@
ndb_show_tables_LDFLAGS = @ndb_bin_am_ldflags@
ndb_select_all_LDFLAGS = @ndb_bin_am_ldflags@
ndb_select_count_LDFLAGS = @ndb_bin_am_ldflags@
ndb_restore_LDFLAGS = @ndb_bin_am_ldflags@ @ZLIB_LIBS@
ndb_restore_LDFLAGS = @ndb_bin_am_ldflags@
ndb_config_LDFLAGS = @ndb_bin_am_ldflags@
# Don't update the files from bitkeeper

View file

@ -4048,8 +4048,8 @@ static MY_CHARSET_HANDLER my_charset_filename_handler=
CHARSET_INFO my_charset_filename=
{
33,0,0, /* number */
MY_CS_COMPILED|MY_CS_PRIMARY|MY_CS_STRNXFRM|MY_CS_UNICODE, /* state */
17,0,0, /* number */
MY_CS_COMPILED|MY_CS_PRIMARY|MY_CS_STRNXFRM|MY_CS_UNICODE|MY_CS_HIDDEN,
"filename", /* cs name */
"filename", /* name */
"", /* comment */