Merge quad.:/mnt/raid/alik/MySQL/devel/5.1

into  quad.:/mnt/raid/alik/MySQL/devel/5.1-rt-merged-5.0-rt


mysql-test/r/innodb_mysql.result:
  Auto merged
mysql-test/t/disabled.def:
  Auto merged
sql/sql_insert.cc:
  Auto merged
sql/sql_select.cc:
  Auto merged
sql/sql_update.cc:
  Auto merged
This commit is contained in:
unknown 2008-01-24 17:40:16 +03:00
commit 5785aba2f8
38 changed files with 1894 additions and 655 deletions

View file

@ -1688,6 +1688,30 @@ case "$with_atomic_ops" in
*) AC_MSG_ERROR(["$with_atomic_ops" is not a valid value for --with-atomic-ops]) ;;
esac
AC_CACHE_CHECK([whether the compiler provides atomic builtins],
[mysql_cv_gcc_atomic_builtins], [AC_TRY_RUN([
int main()
{
int foo= -10; int bar= 10;
if (!__sync_fetch_and_add(&foo, bar) || foo)
return -1;
bar= __sync_lock_test_and_set(&foo, bar);
if (bar || foo != 10)
return -1;
bar= __sync_val_compare_and_swap(&bar, foo, 15);
if (bar)
return -1;
return 0;
}
], [mysql_cv_gcc_atomic_builtins=yes],
[mysql_cv_gcc_atomic_builtins=no],
[mysql_cv_gcc_atomic_builtins=no])])
if test "x$mysql_cv_gcc_atomic_builtins" = xyes; then
AC_DEFINE(HAVE_GCC_ATOMIC_BUILTINS, 1,
[Define to 1 if compiler provides atomic builtins.])
fi
# Force static compilation to avoid linking problems/get more speed
AC_ARG_WITH(mysqld-ldflags,
[ --with-mysqld-ldflags Extra linking arguments for mysqld],

View file

@ -36,7 +36,7 @@ noinst_HEADERS = config-win.h config-netware.h \
mysql_version.h.in my_handler.h my_time.h \
my_vle.h my_user.h my_atomic.h atomic/nolock.h \
atomic/rwlock.h atomic/x86-gcc.h atomic/x86-msvc.h \
my_libwrap.h
atomic/gcc_builtins.h my_libwrap.h
# Remove built files and the symlinked directories
CLEANFILES = $(BUILT_SOURCES) readline openssl

View file

@ -0,0 +1,33 @@
/* Copyright (C) 2008 MySQL AB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#define make_atomic_add_body(S) \
v= __sync_fetch_and_add(a, v);
#define make_atomic_swap_body(S) \
v= __sync_lock_test_and_set(a, v);
#define make_atomic_cas_body(S) \
int ## S sav; \
sav= __sync_val_compare_and_swap(a, *cmp, set); \
if (!(ret= (sav == *cmp))) *cmp= sav;
#ifdef MY_ATOMIC_MODE_DUMMY
#define make_atomic_load_body(S) ret= *a
#define make_atomic_store_body(S) *a= v
#else
#define make_atomic_load_body(S) \
ret= __sync_fetch_and_or(a, 0);
#define make_atomic_store_body(S) \
(void) __sync_lock_test_and_set(a, v);
#endif

View file

@ -13,7 +13,7 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#if defined(__i386__) || defined(_M_IX86)
#if defined(__i386__) || defined(_M_IX86) || defined(HAVE_GCC_ATOMIC_BUILTINS)
#ifdef MY_ATOMIC_MODE_DUMMY
# define LOCK ""
@ -21,7 +21,9 @@
# define LOCK "lock"
#endif
#ifdef __GNUC__
#ifdef HAVE_GCC_ATOMIC_BUILTINS
#include "gcc_builtins.h"
#elif __GNUC__
#include "x86-gcc.h"
#elif defined(_MSC_VER)
#include "x86-msvc.h"

View file

@ -527,3 +527,23 @@ SELECT * FROM t1 WHERE a = INTERVAL(3,2,1) + 1;
a b
3 1998-01-01 00:00:00
DROP TABLE t1;
DROP TABLE IF EXISTS t1,t2,t3;
CREATE TABLE t1 (a1 INT, a2 INT, a3 INT, a4 DATETIME);
CREATE TABLE t2 LIKE t1;
CREATE TABLE t3 LIKE t1;
SELECT t1.* FROM t1 AS t0, { OJ t2 INNER JOIN t1 ON (t1.a1=t2.a1) } WHERE t0.a3=2;
a1 a2 a3 a4
SELECT t1.*,t2.* FROM { OJ ((t1 INNER JOIN t2 ON (t1.a1=t2.a2)) LEFT OUTER JOIN t3 ON t3.a3=t2.a1)};
a1 a2 a3 a4 a1 a2 a3 a4
SELECT t1.*,t2.* FROM { OJ ((t1 LEFT OUTER JOIN t2 ON t1.a3=t2.a2) INNER JOIN t3 ON (t3.a1=t2.a2))};
a1 a2 a3 a4 a1 a2 a3 a4
SELECT t1.*,t2.* FROM { OJ (t1 LEFT OUTER JOIN t2 ON t1.a1=t2.a2) CROSS JOIN t3 ON (t3.a2=t2.a3)};
a1 a2 a3 a4 a1 a2 a3 a4
SELECT * FROM {oj t1 LEFT OUTER JOIN t2 ON t1.a1=t2.a3} WHERE t1.a2 > 10;
a1 a2 a3 a4 a1 a2 a3 a4
SELECT {fn CONCAT(a1,a2)} FROM t1;
{fn CONCAT(a1,a2)}
UPDATE t3 SET a4={d '1789-07-14'} WHERE a1=0;
SELECT a1, a4 FROM t2 WHERE a4 LIKE {fn UCASE('1789-07-14')};
a1 a4
DROP TABLE t1, t2, t3;

View file

@ -0,0 +1,24 @@
flush status;
set query_cache_type=DEMAND;
set global query_cache_size= 1024*1024*512;
drop table if exists t1;
create table t1 (a varchar(100));
insert into t1 values ('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'),('bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb');
Activate debug hook and attempt to retrieve the statement from the cache.
set session debug='+d,wait_in_query_cache_insert';
select SQL_CACHE * from t1;;
On a second connection; clear the query cache.
show status like 'Qcache_queries_in_cache';
Variable_name Value
Qcache_queries_in_cache 1
set global query_cache_size= 0;
Signal the debug hook to release the lock.
select id from information_schema.processlist where state='wait_in_query_cache_insert' into @thread_id;
kill query @thread_id;
Show query cache status.
show status like 'Qcache_queries_in_cache';
Variable_name Value
Qcache_queries_in_cache 0
set global query_cache_size= 0;
use test;
drop table t1;

View file

@ -733,6 +733,115 @@ optimizer: keep hreturn
drop table t1;
drop procedure proc_26977_broken;
drop procedure proc_26977_works;
drop procedure if exists proc_33618_h;
drop procedure if exists proc_33618_c;
create procedure proc_33618_h(num int)
begin
declare count1 int default '0';
declare vb varchar(30);
declare last_row int;
while(num>=1) do
set num=num-1;
begin
declare cur1 cursor for select `a` from t_33618;
declare continue handler for not found set last_row = 1;
set last_row:=0;
open cur1;
rep1:
repeat
begin
declare exit handler for 1062 begin end;
fetch cur1 into vb;
if (last_row = 1) then
## should generate a hpop instruction here
leave rep1;
end if;
end;
until last_row=1
end repeat;
close cur1;
end;
end while;
end//
create procedure proc_33618_c(num int)
begin
declare count1 int default '0';
declare vb varchar(30);
declare last_row int;
while(num>=1) do
set num=num-1;
begin
declare cur1 cursor for select `a` from t_33618;
declare continue handler for not found set last_row = 1;
set last_row:=0;
open cur1;
rep1:
repeat
begin
declare cur2 cursor for select `b` from t_33618;
fetch cur1 into vb;
if (last_row = 1) then
## should generate a cpop instruction here
leave rep1;
end if;
end;
until last_row=1
end repeat;
close cur1;
end;
end while;
end//
show procedure code proc_33618_h;
Pos Instruction
0 set count1@1 _latin1'0'
1 set vb@2 NULL
2 set last_row@3 NULL
3 jump_if_not 24(24) (num@0 >= 1)
4 set num@0 (num@0 - 1)
5 cpush cur1@0
6 hpush_jump 9 4 CONTINUE
7 set last_row@3 1
8 hreturn 4
9 set last_row@3 0
10 copen cur1@0
11 hpush_jump 13 4 EXIT
12 hreturn 0 17
13 cfetch cur1@0 vb@2
14 jump_if_not 17(17) (last_row@3 = 1)
15 hpop 1
16 jump 19
17 hpop 1
18 jump_if_not 11(19) (last_row@3 = 1)
19 cclose cur1@0
20 hpop 1
21 cpop 1
22 jump 3
show procedure code proc_33618_c;
Pos Instruction
0 set count1@1 _latin1'0'
1 set vb@2 NULL
2 set last_row@3 NULL
3 jump_if_not 23(23) (num@0 >= 1)
4 set num@0 (num@0 - 1)
5 cpush cur1@0
6 hpush_jump 9 4 CONTINUE
7 set last_row@3 1
8 hreturn 4
9 set last_row@3 0
10 copen cur1@0
11 cpush cur2@1
12 cfetch cur1@0 vb@2
13 jump_if_not 16(16) (last_row@3 = 1)
14 cpop 1
15 jump 18
16 cpop 1
17 jump_if_not 11(18) (last_row@3 = 1)
18 cclose cur1@0
19 hpop 1
20 cpop 1
21 jump 3
drop procedure proc_33618_h;
drop procedure proc_33618_c;
End of 5.0 tests.
CREATE PROCEDURE p1()
BEGIN

View file

@ -1579,3 +1579,51 @@ drop function f2;
drop table t2;
ERROR 42S02: Unknown table 't2'
End of 5.1 tests
drop procedure if exists proc_33983_a;
drop procedure if exists proc_33983_b;
drop procedure if exists proc_33983_c;
drop procedure if exists proc_33983_d;
create procedure proc_33983_a()
begin
label1:
begin
label2:
begin
select 1;
end label1;
end;
end|
ERROR 42000: End-label label1 without match
create procedure proc_33983_b()
begin
label1:
repeat
label2:
repeat
select 1;
until FALSE end repeat label1;
until FALSE end repeat;
end|
ERROR 42000: End-label label1 without match
create procedure proc_33983_c()
begin
label1:
while TRUE do
label2:
while TRUE do
select 1;
end while label1;
end while;
end|
ERROR 42000: End-label label1 without match
create procedure proc_33983_d()
begin
label1:
loop
label2:
loop
select 1;
end loop label1;
end loop;
end|
ERROR 42000: End-label label1 without match

View file

@ -6812,7 +6812,59 @@ DROP PROCEDURE db28318_b.t2;
DROP DATABASE db28318_a;
DROP DATABASE db28318_b;
use test;
End of 5.0 tests
DROP TABLE IF EXISTS t1;
DROP PROCEDURE IF EXISTS bug29770;
CREATE TABLE t1(a int);
CREATE PROCEDURE bug29770()
BEGIN
DECLARE CONTINUE HANDLER FOR SQLSTATE '42S22' SET @state:= 'run';
DECLARE CONTINUE HANDLER FOR SQLEXCEPTION SET @exception:= 'run';
SELECT x FROM t1;
END|
CALL bug29770();
SELECT @state, @exception;
@state @exception
run NULL
DROP TABLE t1;
DROP PROCEDURE bug29770;
use test;
drop table if exists t_33618;
drop procedure if exists proc_33618;
create table t_33618 (`a` int, unique(`a`), `b` varchar(30)) engine=myisam;
insert into t_33618 (`a`,`b`) values (1,'1'),(2,'2');
create procedure proc_33618(num int)
begin
declare count1 int default '0';
declare vb varchar(30);
declare last_row int;
while(num>=1) do
set num=num-1;
begin
declare cur1 cursor for select `a` from t_33618;
declare continue handler for not found set last_row = 1;
set last_row:=0;
open cur1;
rep1:
repeat
begin
declare exit handler for 1062 begin end;
fetch cur1 into vb;
if (last_row = 1) then
leave rep1;
end if;
end;
until last_row=1
end repeat;
close cur1;
end;
end while;
end//
call proc_33618(20);
drop table t_33618;
drop procedure proc_33618;
# ------------------------------------------------------------------
# -- End of 5.0 tests
# ------------------------------------------------------------------
#
# Bug#20550.
@ -6911,4 +6963,6 @@ END latin1 latin1_swedish_ci latin1_swedish_ci
DROP FUNCTION f1;
End of 5.1 tests
# ------------------------------------------------------------------
# -- End of 5.1 tests
# ------------------------------------------------------------------

View file

@ -162,3 +162,24 @@ Variable_name Value
Com_show_status 8
rnd_diff tmp_table_diff
20 8
show global status like 'Com%function%';
Variable_name Value
Com_alter_function 0
Com_create_function 0
Com_drop_function 0
Com_show_function_code 0
Com_show_function_status 0
create function f1 (x INTEGER) returns integer
begin
declare ret integer;
set ret = x * 10;
return ret;
end //
drop function f1;
show global status like 'Com%function%';
Variable_name Value
Com_alter_function 0
Com_create_function 1
Com_drop_function 1
Com_show_function_code 0
Com_show_function_status 0

View file

@ -657,3 +657,23 @@ CREATE TABLE t1 (a INT, b DATETIME);
INSERT INTO t1 VALUES (INTERVAL(3,2,1) + 1, "1997-12-31 23:59:59" + INTERVAL 1 SECOND);
SELECT * FROM t1 WHERE a = INTERVAL(3,2,1) + 1;
DROP TABLE t1;
#
# Bug#28317 Left Outer Join with {oj outer-join}
#
--disable_warnings
DROP TABLE IF EXISTS t1,t2,t3;
--enable_warnings
CREATE TABLE t1 (a1 INT, a2 INT, a3 INT, a4 DATETIME);
CREATE TABLE t2 LIKE t1;
CREATE TABLE t3 LIKE t1;
SELECT t1.* FROM t1 AS t0, { OJ t2 INNER JOIN t1 ON (t1.a1=t2.a1) } WHERE t0.a3=2;
SELECT t1.*,t2.* FROM { OJ ((t1 INNER JOIN t2 ON (t1.a1=t2.a2)) LEFT OUTER JOIN t3 ON t3.a3=t2.a1)};
SELECT t1.*,t2.* FROM { OJ ((t1 LEFT OUTER JOIN t2 ON t1.a3=t2.a2) INNER JOIN t3 ON (t3.a1=t2.a2))};
SELECT t1.*,t2.* FROM { OJ (t1 LEFT OUTER JOIN t2 ON t1.a1=t2.a2) CROSS JOIN t3 ON (t3.a2=t2.a3)};
SELECT * FROM {oj t1 LEFT OUTER JOIN t2 ON t1.a1=t2.a3} WHERE t1.a2 > 10;
SELECT {fn CONCAT(a1,a2)} FROM t1;
UPDATE t3 SET a4={d '1789-07-14'} WHERE a1=0;
SELECT a1, a4 FROM t2 WHERE a4 LIKE {fn UCASE('1789-07-14')};
DROP TABLE t1, t2, t3;

View file

@ -1316,6 +1316,5 @@ SELECT 1 FROM t1 GROUP BY
(SELECT LAST_INSERT_ID() FROM t1 ORDER BY MIN(a) ASC LIMIT 1);
DROP TABLE t1;
--echo End of 5.1 tests

View file

@ -0,0 +1,46 @@
--source include/not_embedded.inc
--source include/have_query_cache.inc
--source include/have_debug.inc
#
# Bug #30887 Server crashes on SET GLOBAL query_cache_size=0
#
flush status;
set query_cache_type=DEMAND;
set global query_cache_size= 1024*1024*512;
--disable_warnings
drop table if exists t1;
--enable_warnings
create table t1 (a varchar(100));
insert into t1 values ('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'),('bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb');
connect (bug30887con1, localhost, root, ,test);
connect (bug30887con2, localhost, root, ,test);
connection bug30887con1;
--echo Activate debug hook and attempt to retrieve the statement from the cache.
set session debug='+d,wait_in_query_cache_insert';
--send select SQL_CACHE * from t1;
connection default;
let $wait_condition= select count(*)= 1 from information_schema.processlist where state= 'wait_in_query_cache_insert';
--source include/wait_condition.inc
connection bug30887con2;
--echo On a second connection; clear the query cache.
show status like 'Qcache_queries_in_cache';
set global query_cache_size= 0;
connection default;
--echo Signal the debug hook to release the lock.
select id from information_schema.processlist where state='wait_in_query_cache_insert' into @thread_id;
kill query @thread_id;
--echo Show query cache status.
show status like 'Qcache_queries_in_cache';
disconnect bug30887con1;
disconnect bug30887con2;
set global query_cache_size= 0;
use test;
drop table t1;

View file

@ -520,6 +520,83 @@ drop table t1;
drop procedure proc_26977_broken;
drop procedure proc_26977_works;
#
# Bug#33618 Crash in sp_rcontext
#
--disable_warnings
drop procedure if exists proc_33618_h;
drop procedure if exists proc_33618_c;
--enable_warnings
delimiter //;
create procedure proc_33618_h(num int)
begin
declare count1 int default '0';
declare vb varchar(30);
declare last_row int;
while(num>=1) do
set num=num-1;
begin
declare cur1 cursor for select `a` from t_33618;
declare continue handler for not found set last_row = 1;
set last_row:=0;
open cur1;
rep1:
repeat
begin
declare exit handler for 1062 begin end;
fetch cur1 into vb;
if (last_row = 1) then
## should generate a hpop instruction here
leave rep1;
end if;
end;
until last_row=1
end repeat;
close cur1;
end;
end while;
end//
create procedure proc_33618_c(num int)
begin
declare count1 int default '0';
declare vb varchar(30);
declare last_row int;
while(num>=1) do
set num=num-1;
begin
declare cur1 cursor for select `a` from t_33618;
declare continue handler for not found set last_row = 1;
set last_row:=0;
open cur1;
rep1:
repeat
begin
declare cur2 cursor for select `b` from t_33618;
fetch cur1 into vb;
if (last_row = 1) then
## should generate a cpop instruction here
leave rep1;
end if;
end;
until last_row=1
end repeat;
close cur1;
end;
end while;
end//
delimiter ;//
show procedure code proc_33618_h;
show procedure code proc_33618_c;
drop procedure proc_33618_h;
drop procedure proc_33618_c;
--echo End of 5.0 tests.

View file

@ -2305,6 +2305,69 @@ drop table t2;
--echo End of 5.1 tests
#
# Bug#33983 (Stored Procedures: wrong end <label> syntax is accepted)
#
--disable_warnings
drop procedure if exists proc_33983_a;
drop procedure if exists proc_33983_b;
drop procedure if exists proc_33983_c;
drop procedure if exists proc_33983_d;
--enable_warnings
delimiter |;
--error ER_SP_LABEL_MISMATCH
create procedure proc_33983_a()
begin
label1:
begin
label2:
begin
select 1;
end label1;
end;
end|
--error ER_SP_LABEL_MISMATCH
create procedure proc_33983_b()
begin
label1:
repeat
label2:
repeat
select 1;
until FALSE end repeat label1;
until FALSE end repeat;
end|
--error ER_SP_LABEL_MISMATCH
create procedure proc_33983_c()
begin
label1:
while TRUE do
label2:
while TRUE do
select 1;
end while label1;
end while;
end|
--error ER_SP_LABEL_MISMATCH
create procedure proc_33983_d()
begin
label1:
loop
label2:
loop
select 1;
end loop label1;
end loop;
end|
delimiter ;|
#
# BUG#NNNN: New bug synopsis
#

View file

@ -7902,7 +7902,86 @@ use test;
###########################################################################
--echo End of 5.0 tests
#
# Bug#29770 Two handlers are allowed to catch an error in an stored procedure.
#
--disable_warnings
DROP TABLE IF EXISTS t1;
DROP PROCEDURE IF EXISTS bug29770;
--enable_warnings
CREATE TABLE t1(a int);
delimiter |;
CREATE PROCEDURE bug29770()
BEGIN
DECLARE CONTINUE HANDLER FOR SQLSTATE '42S22' SET @state:= 'run';
DECLARE CONTINUE HANDLER FOR SQLEXCEPTION SET @exception:= 'run';
SELECT x FROM t1;
END|
delimiter ;|
CALL bug29770();
SELECT @state, @exception;
DROP TABLE t1;
DROP PROCEDURE bug29770;
#
# Bug#33618 Crash in sp_rcontext
#
use test;
--disable_warnings
drop table if exists t_33618;
drop procedure if exists proc_33618;
--enable_warnings
create table t_33618 (`a` int, unique(`a`), `b` varchar(30)) engine=myisam;
insert into t_33618 (`a`,`b`) values (1,'1'),(2,'2');
delimiter //;
create procedure proc_33618(num int)
begin
declare count1 int default '0';
declare vb varchar(30);
declare last_row int;
while(num>=1) do
set num=num-1;
begin
declare cur1 cursor for select `a` from t_33618;
declare continue handler for not found set last_row = 1;
set last_row:=0;
open cur1;
rep1:
repeat
begin
declare exit handler for 1062 begin end;
fetch cur1 into vb;
if (last_row = 1) then
leave rep1;
end if;
end;
until last_row=1
end repeat;
close cur1;
end;
end while;
end//
delimiter ;//
call proc_33618(20);
drop table t_33618;
drop procedure proc_33618;
###########################################################################
--echo # ------------------------------------------------------------------
--echo # -- End of 5.0 tests
--echo # ------------------------------------------------------------------
###########################################################################
@ -8056,4 +8135,6 @@ DROP FUNCTION f1;
###########################################################################
--echo End of 5.1 tests
--echo # ------------------------------------------------------------------
--echo # -- End of 5.1 tests
--echo # ------------------------------------------------------------------

View file

@ -242,4 +242,23 @@ let $tmp_table2 = `show global status like 'Created_tmp_tables'`;
eval select substring_index('$rnd_next2',0x9,-1)-substring_index('$rnd_next',0x9,-1) as rnd_diff, substring_index('$tmp_table2',0x9,-1)-substring_index('$tmp_table',0x9,-1) as tmp_table_diff;
--enable_query_log
#
# Bug#30252 Com_create_function is not incremented.
#
show global status like 'Com%function%';
DELIMITER //;
create function f1 (x INTEGER) returns integer
begin
declare ret integer;
set ret = x * 10;
return ret;
end //
DELIMITER ;//
drop function f1;
show global status like 'Com%function%';
# End of 5.1 tests

View file

@ -367,7 +367,7 @@ bool ha_partition::initialise_partition(MEM_ROOT *mem_root)
HA_CAN_GEOMETRY, HA_CAN_FULLTEXT, HA_CAN_SQL_HANDLER, HA_DUPLICATE_POS,
HA_CAN_INSERT_DELAYED is disabled until further investigated.
*/
m_table_flags= (ulong)m_file[0]->table_flags();
m_table_flags= (ulong)m_file[0]->ha_table_flags();
m_low_byte_first= m_file[0]->low_byte_first();
m_pkey_is_clustered= TRUE;
file_array= m_file;
@ -382,7 +382,7 @@ bool ha_partition::initialise_partition(MEM_ROOT *mem_root)
}
if (!file->primary_key_is_clustered())
m_pkey_is_clustered= FALSE;
m_table_flags&= file->table_flags();
m_table_flags&= file->ha_table_flags();
} while (*(++file_array));
m_table_flags&= ~(HA_CAN_GEOMETRY | HA_CAN_FULLTEXT | HA_DUPLICATE_POS |
HA_CAN_SQL_HANDLER | HA_CAN_INSERT_DELAYED |
@ -616,7 +616,7 @@ int ha_partition::drop_partitions(const char *path)
sub_elem->partition_name, name_variant);
file= m_file[part];
DBUG_PRINT("info", ("Drop subpartition %s", part_name_buff));
if ((ret_error= file->delete_table((const char *) part_name_buff)))
if ((ret_error= file->ha_delete_table(part_name_buff)))
error= ret_error;
if (deactivate_ddl_log_entry(sub_elem->log_entry->entry_pos))
error= 1;
@ -629,7 +629,7 @@ int ha_partition::drop_partitions(const char *path)
TRUE);
file= m_file[i];
DBUG_PRINT("info", ("Drop partition %s", part_name_buff));
if ((ret_error= file->delete_table((const char *) part_name_buff)))
if ((ret_error= file->ha_delete_table(part_name_buff)))
error= ret_error;
if (deactivate_ddl_log_entry(part_elem->log_entry->entry_pos))
error= 1;
@ -707,7 +707,7 @@ int ha_partition::rename_partitions(const char *path)
sub_elem->partition_name,
NORMAL_PART_NAME);
DBUG_PRINT("info", ("Delete subpartition %s", norm_name_buff));
if ((ret_error= file->delete_table((const char *) norm_name_buff)))
if ((ret_error= file->ha_delete_table(norm_name_buff)))
error= ret_error;
else if (deactivate_ddl_log_entry(sub_elem->log_entry->entry_pos))
error= 1;
@ -722,7 +722,7 @@ int ha_partition::rename_partitions(const char *path)
part_elem->partition_name, NORMAL_PART_NAME,
TRUE);
DBUG_PRINT("info", ("Delete partition %s", norm_name_buff));
if ((ret_error= file->delete_table((const char *) norm_name_buff)))
if ((ret_error= file->ha_delete_table(norm_name_buff)))
error= ret_error;
else if (deactivate_ddl_log_entry(part_elem->log_entry->entry_pos))
error= 1;
@ -778,7 +778,7 @@ int ha_partition::rename_partitions(const char *path)
{
file= m_reorged_file[part_count++];
DBUG_PRINT("info", ("Delete subpartition %s", norm_name_buff));
if ((ret_error= file->delete_table((const char *) norm_name_buff)))
if ((ret_error= file->ha_delete_table(norm_name_buff)))
error= ret_error;
else if (deactivate_ddl_log_entry(sub_elem->log_entry->entry_pos))
error= 1;
@ -791,8 +791,8 @@ int ha_partition::rename_partitions(const char *path)
TEMP_PART_NAME);
DBUG_PRINT("info", ("Rename subpartition from %s to %s",
part_name_buff, norm_name_buff));
if ((ret_error= file->rename_table((const char *) part_name_buff,
(const char *) norm_name_buff)))
if ((ret_error= file->ha_rename_table(part_name_buff,
norm_name_buff)))
error= ret_error;
else if (deactivate_ddl_log_entry(sub_elem->log_entry->entry_pos))
error= 1;
@ -809,7 +809,7 @@ int ha_partition::rename_partitions(const char *path)
{
file= m_reorged_file[part_count++];
DBUG_PRINT("info", ("Delete partition %s", norm_name_buff));
if ((ret_error= file->delete_table((const char *) norm_name_buff)))
if ((ret_error= file->ha_delete_table(norm_name_buff)))
error= ret_error;
else if (deactivate_ddl_log_entry(part_elem->log_entry->entry_pos))
error= 1;
@ -821,8 +821,8 @@ int ha_partition::rename_partitions(const char *path)
TRUE);
DBUG_PRINT("info", ("Rename partition from %s to %s",
part_name_buff, norm_name_buff));
if ((ret_error= file->rename_table((const char *) part_name_buff,
(const char *) norm_name_buff)))
if ((ret_error= file->ha_rename_table(part_name_buff,
norm_name_buff)))
error= ret_error;
else if (deactivate_ddl_log_entry(part_elem->log_entry->entry_pos))
error= 1;
@ -1036,9 +1036,9 @@ static int handle_opt_part(THD *thd, HA_CHECK_OPT *check_opt,
DBUG_PRINT("enter", ("flag = %u", flag));
if (flag == OPTIMIZE_PARTS)
error= file->optimize(thd, check_opt);
error= file->ha_optimize(thd, check_opt);
else if (flag == ANALYZE_PARTS)
error= file->analyze(thd, check_opt);
error= file->ha_analyze(thd, check_opt);
else if (flag == CHECK_PARTS)
error= file->ha_check(thd, check_opt);
else if (flag == REPAIR_PARTS)
@ -1139,7 +1139,7 @@ int ha_partition::prepare_new_partition(TABLE *tbl,
if ((error= set_up_table_before_create(tbl, part_name, create_info,
0, p_elem)))
goto error;
if ((error= file->create(part_name, tbl, create_info)))
if ((error= file->ha_create(part_name, tbl, create_info)))
goto error;
create_flag= TRUE;
if ((error= file->ha_open(tbl, part_name, m_mode, m_open_test_lock)))
@ -1150,13 +1150,13 @@ int ha_partition::prepare_new_partition(TABLE *tbl,
assumes that external_lock() is last call that may fail here.
Otherwise see description for cleanup_new_partition().
*/
if ((error= file->external_lock(current_thd, m_lock_type)))
if ((error= file->ha_external_lock(current_thd, m_lock_type)))
goto error;
DBUG_RETURN(0);
error:
if (create_flag)
VOID(file->delete_table(part_name));
VOID(file->ha_delete_table(part_name));
DBUG_RETURN(error);
}
@ -1574,14 +1574,18 @@ int ha_partition::copy_partitions(ulonglong *copied, ulonglong *deleted)
}
else
{
THD *thd= ha_thd();
/* Copy record to new handler */
copied++;
if ((result= m_new_file[new_part]->write_row(m_rec0)))
tmp_disable_binlog(thd); /* Do not replicate the low-level changes. */
result= m_new_file[new_part]->ha_write_row(m_rec0);
reenable_binlog(thd);
if (result)
goto error;
}
}
late_extra_no_cache(reorg_part);
file->rnd_end();
file->ha_rnd_end();
reorg_part++;
}
DBUG_RETURN(FALSE);
@ -1698,16 +1702,15 @@ uint ha_partition::del_ren_cre_table(const char *from,
{ // Rename branch
create_partition_name(to_buff, to, name_buffer_ptr, NORMAL_PART_NAME,
FALSE);
error= (*file)->rename_table((const char*) from_buff,
(const char*) to_buff);
error= (*file)->ha_rename_table(from_buff, to_buff);
}
else if (table_arg == NULL) // delete branch
error= (*file)->delete_table((const char*) from_buff);
error= (*file)->ha_delete_table(from_buff);
else
{
if ((error= set_up_table_before_create(table_arg, from_buff,
create_info, i, NULL)) ||
((error= (*file)->create(from_buff, table_arg, create_info))))
((error= (*file)->ha_create(from_buff, table_arg, create_info))))
goto create_error;
}
name_buffer_ptr= strend(name_buffer_ptr) + 1;
@ -1722,7 +1725,7 @@ create_error:
{
create_partition_name(from_buff, from, name_buffer_ptr, NORMAL_PART_NAME,
FALSE);
VOID((*file)->delete_table((const char*) from_buff));
VOID((*file)->ha_delete_table((const char*) from_buff));
name_buffer_ptr= strend(name_buffer_ptr) + 1;
}
DBUG_RETURN(error);
@ -2314,7 +2317,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
}
/* Recalculate table flags as they may change after open */
m_table_flags= m_file[0]->table_flags();
m_table_flags= m_file[0]->ha_table_flags();
file= m_file;
do
{
@ -2326,7 +2329,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
m_no_locks+= (*file)->lock_count();
name_buffer_ptr+= strlen(name_buffer_ptr) + 1;
set_if_bigger(ref_length, ((*file)->ref_length));
m_table_flags&= (*file)->table_flags();
m_table_flags&= (*file)->ha_table_flags();
} while (*(++file));
m_table_flags&= ~(HA_CAN_GEOMETRY | HA_CAN_FULLTEXT | HA_DUPLICATE_POS |
HA_CAN_SQL_HANDLER | HA_CAN_INSERT_DELAYED);
@ -2482,7 +2485,7 @@ repeat:
{
DBUG_PRINT("info", ("external_lock(thd, %d) iteration %d",
lock_type, (int) (file - m_file)));
if ((error= (*file)->external_lock(thd, lock_type)))
if ((error= (*file)->ha_external_lock(thd, lock_type)))
{
if (F_UNLCK != lock_type)
goto err_handler;
@ -2501,7 +2504,7 @@ repeat:
err_handler:
while (file-- != m_file)
{
(*file)->external_lock(thd, F_UNLCK);
(*file)->ha_external_lock(thd, F_UNLCK);
}
DBUG_RETURN(error);
}
@ -2694,6 +2697,7 @@ int ha_partition::write_row(uchar * buf)
longlong func_value;
bool autoincrement_lock= FALSE;
my_bitmap_map *old_map;
THD *thd= ha_thd();
#ifdef NOT_NEEDED
uchar *rec0= m_rec0;
#endif
@ -2765,7 +2769,9 @@ int ha_partition::write_row(uchar * buf)
}
m_last_part= part_id;
DBUG_PRINT("info", ("Insert in partition %d", part_id));
error= m_file[part_id]->write_row(buf);
tmp_disable_binlog(thd); /* Do not replicate the low-level changes. */
error= m_file[part_id]->ha_write_row(buf);
reenable_binlog(thd);
exit:
if (autoincrement_lock)
pthread_mutex_unlock(&table_share->mutex);
@ -2806,6 +2812,7 @@ exit:
int ha_partition::update_row(const uchar *old_data, uchar *new_data)
{
THD *thd= ha_thd();
uint32 new_part_id, old_part_id;
int error= 0;
longlong func_value;
@ -2840,16 +2847,25 @@ int ha_partition::update_row(const uchar *old_data, uchar *new_data)
if (new_part_id == old_part_id)
{
DBUG_PRINT("info", ("Update in partition %d", new_part_id));
error= m_file[new_part_id]->update_row(old_data, new_data);
tmp_disable_binlog(thd); /* Do not replicate the low-level changes. */
error= m_file[new_part_id]->ha_update_row(old_data, new_data);
reenable_binlog(thd);
goto exit;
}
else
{
DBUG_PRINT("info", ("Update from partition %d to partition %d",
old_part_id, new_part_id));
if ((error= m_file[new_part_id]->write_row(new_data)))
tmp_disable_binlog(thd); /* Do not replicate the low-level changes. */
error= m_file[new_part_id]->ha_write_row(new_data);
reenable_binlog(thd);
if (error)
goto exit;
if ((error= m_file[old_part_id]->delete_row(old_data)))
tmp_disable_binlog(thd); /* Do not replicate the low-level changes. */
error= m_file[old_part_id]->ha_delete_row(old_data);
reenable_binlog(thd);
if (error)
{
#ifdef IN_THE_FUTURE
(void) m_file[new_part_id]->delete_last_inserted_row(new_data);
@ -2896,6 +2912,7 @@ int ha_partition::delete_row(const uchar *buf)
{
uint32 part_id;
int error;
THD *thd= ha_thd();
DBUG_ENTER("ha_partition::delete_row");
if ((error= get_part_for_delete(buf, m_rec0, m_part_info, &part_id)))
@ -2903,7 +2920,10 @@ int ha_partition::delete_row(const uchar *buf)
DBUG_RETURN(error);
}
m_last_part= part_id;
DBUG_RETURN(m_file[part_id]->delete_row(buf));
tmp_disable_binlog(thd);
error= m_file[part_id]->ha_delete_row(buf);
reenable_binlog(thd);
DBUG_RETURN(error);
}
@ -2938,7 +2958,7 @@ int ha_partition::delete_all_rows()
file= m_file;
do
{
if ((error= (*file)->delete_all_rows()))
if ((error= (*file)->ha_delete_all_rows()))
DBUG_RETURN(error);
} while (*(++file));
DBUG_RETURN(0);
@ -3980,7 +4000,7 @@ int ha_partition::partition_scan_set_up(uchar * buf, bool idx_read_flag)
int ha_partition::handle_unordered_next(uchar *buf, bool is_next_same)
{
handler *file= file= m_file[m_part_spec.start_part];
handler *file= m_file[m_part_spec.start_part];
int error;
DBUG_ENTER("ha_partition::handle_unordered_next");
@ -3999,7 +4019,7 @@ int ha_partition::handle_unordered_next(uchar *buf, bool is_next_same)
}
else if (!(error= file->index_next(buf)))
{
if (!(file->table_flags() & HA_READ_ORDER) ||
if (!(file->ha_table_flags() & HA_READ_ORDER) ||
compare_key(end_range) <= 0)
{
m_last_part= m_part_spec.start_part;
@ -4077,7 +4097,7 @@ int ha_partition::handle_unordered_scan_next_partition(uchar * buf)
}
if (!error)
{
if (!(file->table_flags() & HA_READ_ORDER) ||
if (!(file->ha_table_flags() & HA_READ_ORDER) ||
compare_key(end_range) <= 0)
{
m_last_part= i;
@ -4999,7 +5019,7 @@ int ha_partition::reset(void)
file= m_file;
do
{
if ((tmp= (*file)->reset()))
if ((tmp= (*file)->ha_reset()))
result= tmp;
} while (*(++file));
DBUG_RETURN(result);
@ -5641,7 +5661,7 @@ void ha_partition::release_auto_increment()
for (uint i= 0; i < m_tot_parts; i++)
{
m_file[i]->release_auto_increment();
m_file[i]->ha_release_auto_increment();
}
DBUG_VOID_RETURN;
}
@ -5677,7 +5697,7 @@ int ha_partition::disable_indexes(uint mode)
for (file= m_file; *file; file++)
{
if ((error= (*file)->disable_indexes(mode)))
if ((error= (*file)->ha_disable_indexes(mode)))
break;
}
return error;
@ -5701,7 +5721,7 @@ int ha_partition::enable_indexes(uint mode)
for (file= m_file; *file; file++)
{
if ((error= (*file)->enable_indexes(mode)))
if ((error= (*file)->ha_enable_indexes(mode)))
break;
}
return error;

View file

@ -1468,7 +1468,7 @@ int ha_delete_table(THD *thd, handlerton *table_type, const char *path,
DBUG_RETURN(ENOENT);
path= check_lowercase_names(file, path, tmp_path);
if ((error= file->delete_table(path)) && generate_warning)
if ((error= file->ha_delete_table(path)) && generate_warning)
{
/*
Because file->print_error() use my_error() to generate the error message
@ -1487,8 +1487,7 @@ int ha_delete_table(THD *thd, handlerton *table_type, const char *path,
dummy_share.table_name.length= strlen(alias);
dummy_table.alias= alias;
file->table_share= &dummy_share;
file->table= &dummy_table;
file->change_table_ptr(&dummy_table, &dummy_share);
thd->push_internal_handler(&ha_delete_table_error_handler);
file->print_error(error, 0);
@ -2502,6 +2501,12 @@ int handler::ha_check(THD *thd, HA_CHECK_OPT *check_opt)
}
/**
Repair table: public interface.
@sa handler::repair()
*/
int handler::ha_repair(THD* thd, HA_CHECK_OPT* check_opt)
{
int result;
@ -2511,6 +2516,328 @@ int handler::ha_repair(THD* thd, HA_CHECK_OPT* check_opt)
}
/**
Bulk update row: public interface.
@sa handler::bulk_update_row()
*/
int
handler::ha_bulk_update_row(const uchar *old_data, uchar *new_data,
uint *dup_key_found)
{
return bulk_update_row(old_data, new_data, dup_key_found);
}
/**
Delete all rows: public interface.
@sa handler::delete_all_rows()
*/
int
handler::ha_delete_all_rows()
{
return delete_all_rows();
}
/**
Reset auto increment: public interface.
@sa handler::reset_auto_increment()
*/
int
handler::ha_reset_auto_increment(ulonglong value)
{
return reset_auto_increment(value);
}
/**
Backup table: public interface.
@sa handler::backup()
*/
int
handler::ha_backup(THD* thd, HA_CHECK_OPT* check_opt)
{
return backup(thd, check_opt);
}
/**
Restore table: public interface.
@sa handler::restore()
*/
int
handler::ha_restore(THD* thd, HA_CHECK_OPT* check_opt)
{
return restore(thd, check_opt);
}
/**
Optimize table: public interface.
@sa handler::optimize()
*/
int
handler::ha_optimize(THD* thd, HA_CHECK_OPT* check_opt)
{
return optimize(thd, check_opt);
}
/**
Analyze table: public interface.
@sa handler::analyze()
*/
int
handler::ha_analyze(THD* thd, HA_CHECK_OPT* check_opt)
{
return analyze(thd, check_opt);
}
/**
Check and repair table: public interface.
@sa handler::check_and_repair()
*/
bool
handler::ha_check_and_repair(THD *thd)
{
return check_and_repair(thd);
}
/**
Disable indexes: public interface.
@sa handler::disable_indexes()
*/
int
handler::ha_disable_indexes(uint mode)
{
return disable_indexes(mode);
}
/**
Enable indexes: public interface.
@sa handler::enable_indexes()
*/
int
handler::ha_enable_indexes(uint mode)
{
return enable_indexes(mode);
}
/**
Discard or import tablespace: public interface.
@sa handler::discard_or_import_tablespace()
*/
int
handler::ha_discard_or_import_tablespace(my_bool discard)
{
return discard_or_import_tablespace(discard);
}
/**
Prepare for alter: public interface.
Called to prepare an *online* ALTER.
@sa handler::prepare_for_alter()
*/
void
handler::ha_prepare_for_alter()
{
prepare_for_alter();
}
/**
Rename table: public interface.
@sa handler::rename_table()
*/
int
handler::ha_rename_table(const char *from, const char *to)
{
return rename_table(from, to);
}
/**
Delete table: public interface.
@sa handler::delete_table()
*/
int
handler::ha_delete_table(const char *name)
{
return delete_table(name);
}
/**
Drop table in the engine: public interface.
@sa handler::drop_table()
*/
void
handler::ha_drop_table(const char *name)
{
return drop_table(name);
}
/**
Create a table in the engine: public interface.
@sa handler::create()
*/
int
handler::ha_create(const char *name, TABLE *form, HA_CREATE_INFO *info)
{
return create(name, form, info);
}
/**
Create handler files for CREATE TABLE: public interface.
@sa handler::create_handler_files()
*/
int
handler::ha_create_handler_files(const char *name, const char *old_name,
int action_flag, HA_CREATE_INFO *info)
{
return create_handler_files(name, old_name, action_flag, info);
}
/**
Change partitions: public interface.
@sa handler::change_partitions()
*/
int
handler::ha_change_partitions(HA_CREATE_INFO *create_info,
const char *path,
ulonglong *copied,
ulonglong *deleted,
const uchar *pack_frm_data,
size_t pack_frm_len)
{
return change_partitions(create_info, path, copied, deleted,
pack_frm_data, pack_frm_len);
}
/**
Drop partitions: public interface.
@sa handler::drop_partitions()
*/
int
handler::ha_drop_partitions(const char *path)
{
return drop_partitions(path);
}
/**
Rename partitions: public interface.
@sa handler::rename_partitions()
*/
int
handler::ha_rename_partitions(const char *path)
{
return rename_partitions(path);
}
/**
Optimize partitions: public interface.
@sa handler::optimize_partitions()
*/
int
handler::ha_optimize_partitions(THD *thd)
{
return optimize_partitions(thd);
}
/**
Analyze partitions: public interface.
@sa handler::analyze_partitions()
*/
int
handler::ha_analyze_partitions(THD *thd)
{
return analyze_partitions(thd);
}
/**
Check partitions: public interface.
@sa handler::check_partitions()
*/
int
handler::ha_check_partitions(THD *thd)
{
return check_partitions(thd);
}
/**
Repair partitions: public interface.
@sa handler::repair_partitions()
*/
int
handler::ha_repair_partitions(THD *thd)
{
return repair_partitions(thd);
}
/** @brief
Tell the storage engine that it is allowed to "disable transaction" in the
handler. It is a hint that ACID is not required - it is used in NDB for
@ -2654,7 +2981,7 @@ int ha_create_table(THD *thd, const char *path,
name= check_lowercase_names(table.file, share.path.str, name_buff);
error= table.file->create(name, &table, create_info);
error= table.file->ha_create(name, &table, create_info);
VOID(closefrm(&table, 0));
if (error)
{
@ -2724,7 +3051,7 @@ int ha_create_table_from_engine(THD* thd, const char *db, const char *name)
create_info.table_options|= HA_OPTION_CREATE_FROM_ENGINE;
check_lowercase_names(table.file, path, path);
error=table.file->create(path,&table,&create_info);
error=table.file->ha_create(path, &table, &create_info);
VOID(closefrm(&table, 1));
DBUG_RETURN(error != 0);

View file

@ -959,46 +959,22 @@ uint calculate_key_len(TABLE *, uint, const uchar *, key_part_map);
*/
#define make_prev_keypart_map(N) (((key_part_map)1 << (N)) - 1)
/*
/**
The handler class is the interface for dynamically loadable
storage engines. Do not add ifdefs and take care when adding or
changing virtual functions to avoid vtable confusion
*/
*/
class handler :public Sql_alloc
{
friend class ha_partition;
friend int ha_delete_table(THD*,handlerton*,const char*,const char*,
const char*,bool);
public:
typedef ulonglong Table_flags;
protected:
protected:
struct st_table_share *table_share; /* The table definition */
struct st_table *table; /* The current open table */
Table_flags cached_table_flags; /* Set on init() and open() */
virtual int index_init(uint idx, bool sorted) { active_index=idx; return 0; }
virtual int index_end() { active_index=MAX_KEY; return 0; }
/*
rnd_init() can be called two times without rnd_end() in between
(it only makes sense if scan=1).
then the second call should prepare for the new table scan (e.g
if rnd_init allocates the cursor, second call should position it
to the start of the table, no need to deallocate and allocate it again
*/
virtual int rnd_init(bool scan) =0;
virtual int rnd_end() { return 0; }
virtual Table_flags table_flags(void) const =0;
void ha_statistic_increment(ulong SSV::*offset) const;
void **ha_data(THD *) const;
THD *ha_thd(void) const;
ha_rows estimation_rows_to_insert;
virtual void start_bulk_insert(ha_rows rows) {}
virtual int end_bulk_insert() {return 0; }
public:
handlerton *ht; /* storage engine of this handler */
uchar *ref; /* Pointer to current row */
@ -1006,13 +982,13 @@ public:
ha_statistics stats;
/* The following are for read_multi_range */
/** The following are for read_multi_range */
bool multi_range_sorted;
KEY_MULTI_RANGE *multi_range_curr;
KEY_MULTI_RANGE *multi_range_end;
HANDLER_BUFFER *multi_range_buffer;
/* The following are for read_range() */
/** The following are for read_range() */
key_range save_end_range, *end_range;
KEY_PART_INFO *range_key_part;
int key_compare_result_on_equal;
@ -1021,14 +997,14 @@ public:
uint errkey; /* Last dup key */
uint key_used_on_scan;
uint active_index;
/* Length of ref (1-8 or the clustered key length) */
/** Length of ref (1-8 or the clustered key length) */
uint ref_length;
FT_INFO *ft_handler;
enum {NONE=0, INDEX, RND} inited;
bool locked;
bool implicit_emptied; /* Can be !=0 only if HEAP */
const COND *pushed_cond;
/*
/**
next_insert_id is the next value which should be inserted into the
auto_increment column: in a inserting-multi-row statement (like INSERT
SELECT), for the first row where the autoinc value is not specified by the
@ -1038,14 +1014,14 @@ public:
get_auto_increment().
*/
ulonglong next_insert_id;
/*
/**
insert id for the current row (*autogenerated*; if not
autogenerated, it's 0).
At first successful insertion, this variable is stored into
THD::first_successful_insert_id_in_cur_stmt.
*/
ulonglong insert_id_for_cur_row;
/*
/**
Interval returned by get_auto_increment() and being consumed by the
inserter.
*/
@ -1066,74 +1042,14 @@ public:
/* TODO: DBUG_ASSERT(inited == NONE); */
}
virtual handler *clone(MEM_ROOT *mem_root);
/* This is called after create to allow us to set up cached variables */
/** This is called after create to allow us to set up cached variables */
void init()
{
cached_table_flags= table_flags();
}
/* ha_ methods: pubilc wrappers for private virtual API */
int ha_open(TABLE *table, const char *name, int mode, int test_if_locked);
void adjust_next_insert_id_after_explicit_value(ulonglong nr);
int update_auto_increment();
void print_keydup_error(uint key_nr, const char *msg);
virtual void print_error(int error, myf errflag);
virtual bool get_error_message(int error, String *buf);
uint get_dup_key(int error);
virtual void change_table_ptr(TABLE *table_arg, TABLE_SHARE *share)
{
table= table_arg;
table_share= share;
}
virtual double scan_time()
{ return ulonglong2double(stats.data_file_length) / IO_SIZE + 2; }
virtual double read_time(uint index, uint ranges, ha_rows rows)
{ return rows2double(ranges+rows); }
virtual const key_map *keys_to_use_for_scanning() { return &key_map_empty; }
bool has_transactions()
{ return (ha_table_flags() & HA_NO_TRANSACTIONS) == 0; }
virtual uint extra_rec_buf_length() const { return 0; }
/*
This method is used to analyse the error to see whether the error
is ignorable or not, certain handlers can have more error that are
ignorable than others. E.g. the partition handler can get inserts
into a range where there is no partition and this is an ignorable
error.
HA_ERR_FOUND_DUP_UNIQUE is a special case in MyISAM that means the
same thing as HA_ERR_FOUND_DUP_KEY but can in some cases lead to
a slightly different error message.
*/
virtual bool is_fatal_error(int error, uint flags)
{
if (!error ||
((flags & HA_CHECK_DUP_KEY) &&
(error == HA_ERR_FOUND_DUPP_KEY ||
error == HA_ERR_FOUND_DUPP_UNIQUE)))
return FALSE;
return TRUE;
}
/*
Number of rows in table. It will only be called if
(table_flags() & (HA_HAS_RECORDS | HA_STATS_RECORDS_IS_EXACT)) != 0
*/
virtual ha_rows records() { return stats.records; }
/*
Return upper bound of current number of records in the table
(max. of how many records one will retrieve when doing a full table scan)
If upper bound is not known, HA_POS_ERROR should be returned as a max
possible upper bound.
*/
virtual ha_rows estimate_rows_upper_bound()
{ return stats.records+EXTRA_RECORDS; }
/*
Get the row type from the storage engine. If this method returns
ROW_TYPE_NOT_USED, the information in HA_CREATE_INFO should be used.
*/
virtual enum row_type get_row_type() const { return ROW_TYPE_NOT_USED; }
virtual const char *index_type(uint key_number) { DBUG_ASSERT(0); return "";}
int ha_index_init(uint idx, bool sorted)
{
int result;
@ -1166,15 +1082,137 @@ public:
DBUG_RETURN(rnd_end());
}
int ha_reset();
/* this is necessary in many places, e.g. in HANDLER command */
int ha_index_or_rnd_end()
{
return inited == INDEX ? ha_index_end() : inited == RND ? ha_rnd_end() : 0;
}
Table_flags ha_table_flags() const { return cached_table_flags; }
/**
These functions represent the public interface to *users* of the
handler class, hence they are *not* virtual. For the inheritance
interface, see the (private) functions write_row(), update_row(),
and delete_row() below.
*/
int ha_external_lock(THD *thd, int lock_type);
int ha_write_row(uchar * buf);
int ha_update_row(const uchar * old_data, uchar * new_data);
int ha_delete_row(const uchar * buf);
void ha_release_auto_increment();
/*
int ha_check_for_upgrade(HA_CHECK_OPT *check_opt);
/** to be actually called to get 'check()' functionality*/
int ha_check(THD *thd, HA_CHECK_OPT *check_opt);
int ha_repair(THD* thd, HA_CHECK_OPT* check_opt);
void ha_start_bulk_insert(ha_rows rows)
{
estimation_rows_to_insert= rows;
start_bulk_insert(rows);
}
int ha_end_bulk_insert()
{
estimation_rows_to_insert= 0;
return end_bulk_insert();
}
int ha_bulk_update_row(const uchar *old_data, uchar *new_data,
uint *dup_key_found);
int ha_delete_all_rows();
int ha_reset_auto_increment(ulonglong value);
int ha_backup(THD* thd, HA_CHECK_OPT* check_opt);
int ha_restore(THD* thd, HA_CHECK_OPT* check_opt);
int ha_optimize(THD* thd, HA_CHECK_OPT* check_opt);
int ha_analyze(THD* thd, HA_CHECK_OPT* check_opt);
bool ha_check_and_repair(THD *thd);
int ha_disable_indexes(uint mode);
int ha_enable_indexes(uint mode);
int ha_discard_or_import_tablespace(my_bool discard);
void ha_prepare_for_alter();
int ha_rename_table(const char *from, const char *to);
int ha_delete_table(const char *name);
void ha_drop_table(const char *name);
int ha_create(const char *name, TABLE *form, HA_CREATE_INFO *info);
int ha_create_handler_files(const char *name, const char *old_name,
int action_flag, HA_CREATE_INFO *info);
int ha_change_partitions(HA_CREATE_INFO *create_info,
const char *path,
ulonglong *copied,
ulonglong *deleted,
const uchar *pack_frm_data,
size_t pack_frm_len);
int ha_drop_partitions(const char *path);
int ha_rename_partitions(const char *path);
int ha_optimize_partitions(THD *thd);
int ha_analyze_partitions(THD *thd);
int ha_check_partitions(THD *thd);
int ha_repair_partitions(THD *thd);
void adjust_next_insert_id_after_explicit_value(ulonglong nr);
int update_auto_increment();
void print_keydup_error(uint key_nr, const char *msg);
virtual void print_error(int error, myf errflag);
virtual bool get_error_message(int error, String *buf);
uint get_dup_key(int error);
virtual void change_table_ptr(TABLE *table_arg, TABLE_SHARE *share)
{
table= table_arg;
table_share= share;
}
virtual double scan_time()
{ return ulonglong2double(stats.data_file_length) / IO_SIZE + 2; }
virtual double read_time(uint index, uint ranges, ha_rows rows)
{ return rows2double(ranges+rows); }
virtual const key_map *keys_to_use_for_scanning() { return &key_map_empty; }
bool has_transactions()
{ return (ha_table_flags() & HA_NO_TRANSACTIONS) == 0; }
virtual uint extra_rec_buf_length() const { return 0; }
/**
This method is used to analyse the error to see whether the error
is ignorable or not, certain handlers can have more error that are
ignorable than others. E.g. the partition handler can get inserts
into a range where there is no partition and this is an ignorable
error.
HA_ERR_FOUND_DUP_UNIQUE is a special case in MyISAM that means the
same thing as HA_ERR_FOUND_DUP_KEY but can in some cases lead to
a slightly different error message.
*/
virtual bool is_fatal_error(int error, uint flags)
{
if (!error ||
((flags & HA_CHECK_DUP_KEY) &&
(error == HA_ERR_FOUND_DUPP_KEY ||
error == HA_ERR_FOUND_DUPP_UNIQUE)))
return FALSE;
return TRUE;
}
/**
Number of rows in table. It will only be called if
(table_flags() & (HA_HAS_RECORDS | HA_STATS_RECORDS_IS_EXACT)) != 0
*/
virtual ha_rows records() { return stats.records; }
/**
Return upper bound of current number of records in the table
(max. of how many records one will retrieve when doing a full table scan)
If upper bound is not known, HA_POS_ERROR should be returned as a max
possible upper bound.
*/
virtual ha_rows estimate_rows_upper_bound()
{ return stats.records+EXTRA_RECORDS; }
/**
Get the row type from the storage engine. If this method returns
ROW_TYPE_NOT_USED, the information in HA_CREATE_INFO should be used.
*/
virtual enum row_type get_row_type() const { return ROW_TYPE_NOT_USED; }
virtual const char *index_type(uint key_number) { DBUG_ASSERT(0); return "";}
/**
Signal that the table->read_set and table->write_set table maps changed
The handler is allowed to set additional bits in the above map in this
call. Normally the handler should ignore all calls until we have done
@ -1183,104 +1221,50 @@ public:
*/
virtual void column_bitmaps_signal();
uint get_index(void) const { return active_index; }
virtual int open(const char *name, int mode, uint test_if_locked)=0;
virtual int close(void)=0;
/*
These functions represent the public interface to *users* of the
handler class, hence they are *not* virtual. For the inheritance
interface, see the (private) functions write_row(), update_row(),
and delete_row() below.
*/
int ha_external_lock(THD *thd, int lock_type);
int ha_write_row(uchar * buf);
int ha_update_row(const uchar * old_data, uchar * new_data);
int ha_delete_row(const uchar * buf);
/*
SYNOPSIS
start_bulk_update()
RETURN
0 Bulk update used by handler
1 Bulk update not used, normal operation used
/**
@retval 0 Bulk update used by handler
@retval 1 Bulk update not used, normal operation used
*/
virtual bool start_bulk_update() { return 1; }
/*
SYNOPSIS
start_bulk_delete()
RETURN
0 Bulk delete used by handler
1 Bulk delete not used, normal operation used
/**
@retval 0 Bulk delete used by handler
@retval 1 Bulk delete not used, normal operation used
*/
virtual bool start_bulk_delete() { return 1; }
/*
SYNOPSIS
This method is similar to update_row, however the handler doesn't need
to execute the updates at this point in time. The handler can be certain
that another call to bulk_update_row will occur OR a call to
exec_bulk_update before the set of updates in this query is concluded.
bulk_update_row()
old_data Old record
new_data New record
dup_key_found Number of duplicate keys found
RETURN
0 Bulk delete used by handler
1 Bulk delete not used, normal operation used
*/
virtual int bulk_update_row(const uchar *old_data, uchar *new_data,
uint *dup_key_found)
{
DBUG_ASSERT(FALSE);
return HA_ERR_WRONG_COMMAND;
}
/*
SYNOPSIS
/**
After this call all outstanding updates must be performed. The number
of duplicate key errors are reported in the duplicate key parameter.
It is allowed to continue to the batched update after this call, the
handler has to wait until end_bulk_update with changing state.
exec_bulk_update()
dup_key_found Number of duplicate keys found
RETURN
0 Success
>0 Error code
@param dup_key_found Number of duplicate keys found
@retval 0 Success
@retval >0 Error code
*/
virtual int exec_bulk_update(uint *dup_key_found)
{
DBUG_ASSERT(FALSE);
return HA_ERR_WRONG_COMMAND;
}
/*
SYNOPSIS
/**
Perform any needed clean-up, no outstanding updates are there at the
moment.
end_bulk_update()
RETURN
Nothing
*/
virtual void end_bulk_update() { return; }
/*
SYNOPSIS
/**
Execute all outstanding deletes and close down the bulk delete.
end_bulk_delete()
RETURN
0 Success
>0 Error code
@retval 0 Success
@retval >0 Error code
*/
virtual int end_bulk_delete()
{
DBUG_ASSERT(FALSE);
return HA_ERR_WRONG_COMMAND;
}
private:
virtual int index_read(uchar * buf, const uchar * key, uint key_len,
enum ha_rkey_function find_flag)
{ return HA_ERR_WRONG_COMMAND; }
public:
/**
@brief
Positions an index cursor to the index specified in the handle. Fetches the
@ -1312,10 +1296,6 @@ public:
virtual int index_last(uchar * buf)
{ return HA_ERR_WRONG_COMMAND; }
virtual int index_next_same(uchar *buf, const uchar *key, uint keylen);
private:
virtual int index_read_last(uchar * buf, const uchar * key, uint key_len)
{ return (my_errno=HA_ERR_WRONG_COMMAND); }
public:
/**
@brief
The following functions works like index_read, but it find the last
@ -1343,11 +1323,11 @@ public:
virtual int ft_read(uchar *buf) { return HA_ERR_WRONG_COMMAND; }
virtual int rnd_next(uchar *buf)=0;
virtual int rnd_pos(uchar * buf, uchar *pos)=0;
/*
one has to use this method when to find
/**
One has to use this method when to find
random position by record as the plain
position() call doesn't work for some
handlers for random position
handlers for random position.
*/
virtual int rnd_pos_by_record(uchar *record)
{
@ -1355,9 +1335,9 @@ public:
return rnd_pos(record, ref);
}
virtual int read_first_row(uchar *buf, uint primary_key);
/*
/**
The following function is only needed for tables that may be temporary
tables during joins
tables during joins.
*/
virtual int restart_rnd_next(uchar *buf, uchar *pos)
{ return HA_ERR_WRONG_COMMAND; }
@ -1374,13 +1354,7 @@ public:
virtual int extra_opt(enum ha_extra_function operation, ulong cache_size)
{ return extra(operation); }
/*
Reset state of file to after 'open'
This function is called after every statement for all tables used
by that statement.
*/
virtual int reset() { return 0; }
/*
/**
In an UPDATE or DELETE, if the row under the cursor was locked by another
transaction, and the engine used an optimistic read of the last
committed row value under the cursor, then the engine returns 1 from this
@ -1393,7 +1367,7 @@ public:
engine that the next read will be a locking re-read of the row.
*/
virtual bool was_semi_consistent_read() { return 0; }
/*
/**
Tell the engine whether it should avoid unnecessary lock waits.
If yes, in an UPDATE or DELETE, if the row under the cursor was locked
by another transaction, the engine may try an optimistic read of
@ -1402,22 +1376,10 @@ public:
virtual void try_semi_consistent_read(bool) {}
virtual void unlock_row() {}
virtual int start_stmt(THD *thd, thr_lock_type lock_type) {return 0;}
/*
This is called to delete all rows in a table
If the handler don't support this, then this function will
return HA_ERR_WRONG_COMMAND and MySQL will delete the rows one
by one.
*/
virtual int delete_all_rows()
{ return (my_errno=HA_ERR_WRONG_COMMAND); }
virtual void get_auto_increment(ulonglong offset, ulonglong increment,
ulonglong nb_desired_values,
ulonglong *first_value,
ulonglong *nb_reserved_values);
private:
virtual void release_auto_increment() { return; };
public:
void ha_release_auto_increment();
void set_next_insert_id(ulonglong id)
{
DBUG_PRINT("info",("auto_increment: next value %lu", (ulong)id));
@ -1438,91 +1400,30 @@ public:
next_insert_id= (prev_insert_id > 0) ? prev_insert_id :
insert_id_for_cur_row;
}
/*
Reset the auto-increment counter to the given value, i.e. the next row
inserted will get the given value. This is called e.g. after TRUNCATE
is emulated by doing a 'DELETE FROM t'. HA_ERR_WRONG_COMMAND is
returned by storage engines that don't support this operation.
*/
virtual int reset_auto_increment(ulonglong value)
{ return HA_ERR_WRONG_COMMAND; }
virtual void update_create_info(HA_CREATE_INFO *create_info) {}
protected:
/* to be implemented in handlers */
/* admin commands - called from mysql_admin_table */
virtual int check(THD* thd, HA_CHECK_OPT* check_opt)
{ return HA_ADMIN_NOT_IMPLEMENTED; }
/*
in these two methods check_opt can be modified
to specify CHECK option to use to call check()
upon the table
*/
virtual int check_for_upgrade(HA_CHECK_OPT *check_opt)
{ return 0; }
public:
int ha_check_for_upgrade(HA_CHECK_OPT *check_opt);
int check_old_types();
/* to be actually called to get 'check()' functionality*/
int ha_check(THD *thd, HA_CHECK_OPT *check_opt);
virtual int backup(THD* thd, HA_CHECK_OPT* check_opt)
{ return HA_ADMIN_NOT_IMPLEMENTED; }
/*
restore assumes .frm file must exist, and that generate_table() has been
called; It will just copy the data file and run repair.
*/
virtual int restore(THD* thd, HA_CHECK_OPT* check_opt)
{ return HA_ADMIN_NOT_IMPLEMENTED; }
protected:
virtual int repair(THD* thd, HA_CHECK_OPT* check_opt)
{ return HA_ADMIN_NOT_IMPLEMENTED; }
public:
int ha_repair(THD* thd, HA_CHECK_OPT* check_opt);
virtual int optimize(THD* thd, HA_CHECK_OPT* check_opt)
{ return HA_ADMIN_NOT_IMPLEMENTED; }
virtual int analyze(THD* thd, HA_CHECK_OPT* check_opt)
{ return HA_ADMIN_NOT_IMPLEMENTED; }
virtual int assign_to_keycache(THD* thd, HA_CHECK_OPT* check_opt)
{ return HA_ADMIN_NOT_IMPLEMENTED; }
virtual int preload_keys(THD* thd, HA_CHECK_OPT* check_opt)
{ return HA_ADMIN_NOT_IMPLEMENTED; }
/* end of the list of admin commands */
virtual bool check_and_repair(THD *thd) { return HA_ERR_WRONG_COMMAND; }
virtual int dump(THD* thd, int fd = -1) { return HA_ERR_WRONG_COMMAND; }
virtual int disable_indexes(uint mode) { return HA_ERR_WRONG_COMMAND; }
virtual int enable_indexes(uint mode) { return HA_ERR_WRONG_COMMAND; }
virtual int indexes_are_disabled(void) {return 0;}
void ha_start_bulk_insert(ha_rows rows)
{
estimation_rows_to_insert= rows;
start_bulk_insert(rows);
}
int ha_end_bulk_insert()
{
estimation_rows_to_insert= 0;
return end_bulk_insert();
}
virtual int discard_or_import_tablespace(my_bool discard)
{return HA_ERR_WRONG_COMMAND;}
virtual int net_read_dump(NET* net) { return HA_ERR_WRONG_COMMAND; }
virtual char *update_table_comment(const char * comment)
{ return (char*) comment;}
virtual void append_create_info(String *packet) {}
/*
SYNOPSIS
is_fk_defined_on_table_or_index()
index Index to check if foreign key uses it
RETURN VALUE
TRUE Foreign key defined on table or index
FALSE No foreign key defined
DESCRIPTION
If index == MAX_KEY then a check for table is made and if index <
MAX_KEY then a check is made if the table has foreign keys and if
a foreign key uses this index (and thus the index cannot be dropped).
/**
If index == MAX_KEY then a check for table is made and if index <
MAX_KEY then a check is made if the table has foreign keys and if
a foreign key uses this index (and thus the index cannot be dropped).
@param index Index to check if foreign key uses it
@retval TRUE Foreign key defined on table or index
@retval FALSE No foreign key defined
*/
virtual bool is_fk_defined_on_table_or_index(uint index)
{ return FALSE; }
@ -1530,18 +1431,18 @@ public:
{ return(NULL);} /* gets foreign key create string from InnoDB */
virtual char* get_tablespace_name(THD *thd, char *name, uint name_len)
{ return(NULL);} /* gets tablespace name from handler */
/* used in ALTER TABLE; 1 if changing storage engine is allowed */
/** used in ALTER TABLE; 1 if changing storage engine is allowed */
virtual bool can_switch_engines() { return 1; }
/* used in REPLACE; is > 0 if table is referred by a FOREIGN KEY */
/** used in REPLACE; is > 0 if table is referred by a FOREIGN KEY */
virtual int get_foreign_key_list(THD *thd, List<FOREIGN_KEY_INFO> *f_key_list)
{ return 0; }
virtual uint referenced_by_foreign_key() { return 0;}
virtual void init_table_handle_for_HANDLER()
{ return; } /* prepare InnoDB for HANDLER */
virtual void free_foreign_key_create_info(char* str) {}
/* The following can be called without an open handler */
/** The following can be called without an open handler */
virtual const char *table_type() const =0;
/*
/**
If frm_error() is called then we will use this to find out what file
extentions exist for the storage engine. This is also used by the default
rename_table and delete_table method in handler.cc.
@ -1566,7 +1467,6 @@ public:
virtual ulong index_flags(uint idx, uint part, bool all_parts) const =0;
virtual void prepare_for_alter() { return; }
virtual int add_index(TABLE *table_arg, KEY *key_info, uint num_of_keys)
{ return (HA_ERR_WRONG_COMMAND); }
virtual int prepare_drop_index(TABLE *table_arg, uint *key_num,
@ -1598,44 +1498,12 @@ public:
virtual bool is_crashed() const { return 0; }
virtual bool auto_repair() const { return 0; }
/*
default rename_table() and delete_table() rename/delete files with a
given name and extensions from bas_ext()
*/
virtual int rename_table(const char *from, const char *to);
virtual int delete_table(const char *name);
virtual void drop_table(const char *name);
virtual int create(const char *name, TABLE *form, HA_CREATE_INFO *info)=0;
#define CHF_CREATE_FLAG 0
#define CHF_DELETE_FLAG 1
#define CHF_RENAME_FLAG 2
#define CHF_INDEX_FLAG 3
virtual int create_handler_files(const char *name, const char *old_name,
int action_flag, HA_CREATE_INFO *info)
{ return FALSE; }
virtual int change_partitions(HA_CREATE_INFO *create_info,
const char *path,
ulonglong *copied,
ulonglong *deleted,
const uchar *pack_frm_data,
size_t pack_frm_len)
{ return HA_ERR_WRONG_COMMAND; }
virtual int drop_partitions(const char *path)
{ return HA_ERR_WRONG_COMMAND; }
virtual int rename_partitions(const char *path)
{ return HA_ERR_WRONG_COMMAND; }
virtual int optimize_partitions(THD *thd)
{ return HA_ERR_WRONG_COMMAND; }
virtual int analyze_partitions(THD *thd)
{ return HA_ERR_WRONG_COMMAND; }
virtual int check_partitions(THD *thd)
{ return HA_ERR_WRONG_COMMAND; }
virtual int repair_partitions(THD *thd)
{ return HA_ERR_WRONG_COMMAND; }
/**
@note lock_count() can return > 1 if the table is MERGE or partitioned.
@ -1658,7 +1526,7 @@ public:
THR_LOCK_DATA **to,
enum thr_lock_type lock_type)=0;
/* Type of table for caching query */
/** Type of table for caching query */
virtual uint8 table_cache_type() { return HA_CACHE_TBL_NONTRANSACT; }
@ -1704,56 +1572,94 @@ public:
/*
RETURN
true Primary key (if there is one) is clustered key covering all fields
false otherwise
@retval TRUE Primary key (if there is one) is clustered
key covering all fields
@retval FALSE otherwise
*/
virtual bool primary_key_is_clustered() { return FALSE; }
virtual int cmp_ref(const uchar *ref1, const uchar *ref2)
{
return memcmp(ref1, ref2, ref_length);
}
/*
Condition pushdown to storage engines
*/
/*
/**
Push condition down to the table handler.
SYNOPSIS
cond_push()
cond Condition to be pushed. The condition tree must not be
modified by the by the caller.
RETURN
@param cond Condition to be pushed. The condition tree must not be
modified by the by the caller.
@return
The 'remainder' condition that caller must use to filter out records.
NULL means the handler will not return rows that do not match the
passed condition.
NOTES
@note
The pushed conditions form a stack (from which one can remove the
last pushed condition using cond_pop).
The table handler filters out rows using (pushed_cond1 AND pushed_cond2
AND ... AND pushed_condN)
or less restrictive condition, depending on handler's capabilities.
handler->ha_reset() call empties the condition stack.
Calls to rnd_init/rnd_end, index_init/index_end etc do not affect the
condition stack.
*/
virtual const COND *cond_push(const COND *cond) { return cond; };
/*
/**
Pop the top condition from the condition stack of the handler instance.
SYNOPSIS
cond_pop()
Pops the top if condition stack, if stack is not empty
Pops the top if condition stack, if stack is not empty.
*/
virtual void cond_pop() { return; };
virtual bool check_if_incompatible_data(HA_CREATE_INFO *create_info,
uint table_changes)
{ return COMPATIBLE_DATA_NO; }
/* These are only called from sql_select for internal temporary tables */
/**
use_hidden_primary_key() is called in case of an update/delete when
(table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined
but we don't have a primary key
*/
virtual void use_hidden_primary_key();
protected:
/* Service methods for use by storage engines. */
void ha_statistic_increment(ulong SSV::*offset) const;
void **ha_data(THD *) const;
THD *ha_thd(void) const;
/**
Default rename_table() and delete_table() rename/delete files with a
given name and extensions from bas_ext().
These methods can be overridden, but their default implementation
provide useful functionality.
*/
virtual int rename_table(const char *from, const char *to);
virtual int delete_table(const char *name);
private:
/*
Low-level primitives for storage engines. These should be
overridden by the storage engine class. To call these methods, use
the corresponding 'ha_*' method above.
*/
virtual int open(const char *name, int mode, uint test_if_locked)=0;
virtual int index_init(uint idx, bool sorted) { active_index= idx; return 0; }
virtual int index_end() { active_index= MAX_KEY; return 0; }
/**
rnd_init() can be called two times without rnd_end() in between
(it only makes sense if scan=1).
then the second call should prepare for the new table scan (e.g
if rnd_init allocates the cursor, second call should position it
to the start of the table, no need to deallocate and allocate it again
*/
virtual int rnd_init(bool scan)= 0;
virtual int rnd_end() { return 0; }
virtual int write_row(uchar *buf __attribute__((unused)))
{
return HA_ERR_WRONG_COMMAND;
@ -1769,20 +1675,13 @@ public:
{
return HA_ERR_WRONG_COMMAND;
}
/*
use_hidden_primary_key() is called in case of an update/delete when
(table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined
but we don't have a primary key
/**
Reset state of file to after 'open'.
This function is called after every statement for all tables used
by that statement.
*/
virtual void use_hidden_primary_key();
private:
/*
Row-level primitives for storage engines. These should be
overridden by the storage engine class. To call these methods, use
the corresponding 'ha_*' method above.
*/
virtual int reset() { return 0; }
virtual Table_flags table_flags(void) const= 0;
/**
Is not invoked for non-transactional temporary tables.
@ -1810,8 +1709,109 @@ private:
{
return 0;
}
virtual void release_auto_increment() { return; };
/** admin commands - called from mysql_admin_table */
virtual int check_for_upgrade(HA_CHECK_OPT *check_opt)
{ return 0; }
virtual int check(THD* thd, HA_CHECK_OPT* check_opt)
{ return HA_ADMIN_NOT_IMPLEMENTED; }
/**
In this method check_opt can be modified
to specify CHECK option to use to call check()
upon the table.
*/
virtual int repair(THD* thd, HA_CHECK_OPT* check_opt)
{ return HA_ADMIN_NOT_IMPLEMENTED; }
virtual void start_bulk_insert(ha_rows rows) {}
virtual int end_bulk_insert() { return 0; }
virtual int index_read(uchar * buf, const uchar * key, uint key_len,
enum ha_rkey_function find_flag)
{ return HA_ERR_WRONG_COMMAND; }
virtual int index_read_last(uchar * buf, const uchar * key, uint key_len)
{ return (my_errno= HA_ERR_WRONG_COMMAND); }
/**
This method is similar to update_row, however the handler doesn't need
to execute the updates at this point in time. The handler can be certain
that another call to bulk_update_row will occur OR a call to
exec_bulk_update before the set of updates in this query is concluded.
@param old_data Old record
@param new_data New record
@param dup_key_found Number of duplicate keys found
@retval 0 Bulk delete used by handler
@retval 1 Bulk delete not used, normal operation used
*/
virtual int bulk_update_row(const uchar *old_data, uchar *new_data,
uint *dup_key_found)
{
DBUG_ASSERT(FALSE);
return HA_ERR_WRONG_COMMAND;
}
/**
This is called to delete all rows in a table
If the handler don't support this, then this function will
return HA_ERR_WRONG_COMMAND and MySQL will delete the rows one
by one.
*/
virtual int delete_all_rows()
{ return (my_errno=HA_ERR_WRONG_COMMAND); }
/**
Reset the auto-increment counter to the given value, i.e. the next row
inserted will get the given value. This is called e.g. after TRUNCATE
is emulated by doing a 'DELETE FROM t'. HA_ERR_WRONG_COMMAND is
returned by storage engines that don't support this operation.
*/
virtual int reset_auto_increment(ulonglong value)
{ return HA_ERR_WRONG_COMMAND; }
virtual int backup(THD* thd, HA_CHECK_OPT* check_opt)
{ return HA_ADMIN_NOT_IMPLEMENTED; }
/**
Restore assumes .frm file must exist, and that generate_table() has been
called; It will just copy the data file and run repair.
*/
virtual int restore(THD* thd, HA_CHECK_OPT* check_opt)
{ return HA_ADMIN_NOT_IMPLEMENTED; }
virtual int optimize(THD* thd, HA_CHECK_OPT* check_opt)
{ return HA_ADMIN_NOT_IMPLEMENTED; }
virtual int analyze(THD* thd, HA_CHECK_OPT* check_opt)
{ return HA_ADMIN_NOT_IMPLEMENTED; }
virtual bool check_and_repair(THD *thd) { return HA_ERR_WRONG_COMMAND; }
virtual int disable_indexes(uint mode) { return HA_ERR_WRONG_COMMAND; }
virtual int enable_indexes(uint mode) { return HA_ERR_WRONG_COMMAND; }
virtual int discard_or_import_tablespace(my_bool discard)
{ return (my_errno=HA_ERR_WRONG_COMMAND); }
virtual void prepare_for_alter() { return; }
virtual void drop_table(const char *name);
virtual int create(const char *name, TABLE *form, HA_CREATE_INFO *info)=0;
virtual int create_handler_files(const char *name, const char *old_name,
int action_flag, HA_CREATE_INFO *info)
{ return FALSE; }
virtual int change_partitions(HA_CREATE_INFO *create_info,
const char *path,
ulonglong *copied,
ulonglong *deleted,
const uchar *pack_frm_data,
size_t pack_frm_len)
{ return HA_ERR_WRONG_COMMAND; }
virtual int drop_partitions(const char *path)
{ return HA_ERR_WRONG_COMMAND; }
virtual int rename_partitions(const char *path)
{ return HA_ERR_WRONG_COMMAND; }
virtual int optimize_partitions(THD *thd)
{ return HA_ERR_WRONG_COMMAND; }
virtual int analyze_partitions(THD *thd)
{ return HA_ERR_WRONG_COMMAND; }
virtual int check_partitions(THD *thd)
{ return HA_ERR_WRONG_COMMAND; }
virtual int repair_partitions(THD *thd)
{ return HA_ERR_WRONG_COMMAND; }
};
/* Some extern variables used with handlers */
extern const char *ha_row_type[];

View file

@ -2603,7 +2603,7 @@ void Item_sum_count_distinct::clear()
else if (table)
{
table->file->extra(HA_EXTRA_NO_CACHE);
table->file->delete_all_rows();
table->file->ha_delete_all_rows();
table->file->extra(HA_EXTRA_WRITE_CACHE);
}
}

View file

@ -607,7 +607,10 @@ char *opt_logname, *opt_slow_logname;
/* Static variables */
static bool kill_in_progress, segfaulted;
static my_bool opt_do_pstack, opt_bootstrap, opt_myisam_log;
#ifdef HAVE_STACK_TRACE_ON_SEGV
static my_bool opt_do_pstack;
#endif /* HAVE_STACK_TRACE_ON_SEGV */
static my_bool opt_bootstrap, opt_myisam_log;
static int cleanup_done;
static ulong opt_specialflag, opt_myisam_block_size;
static char *opt_update_logname, *opt_binlog_index_name;
@ -1375,7 +1378,6 @@ static void set_ports()
char *env;
if (!mysqld_port && !opt_disable_networking)
{ // Get port if not from commandline
struct servent *serv_ptr;
mysqld_port= MYSQL_PORT;
/*
@ -1389,6 +1391,7 @@ static void set_ports()
*/
#if MYSQL_PORT_DEFAULT == 0
struct servent *serv_ptr;
if ((serv_ptr= getservbyname("mysql", "tcp")))
mysqld_port= ntohs((u_short) serv_ptr->s_port); /* purecov: inspected */
#endif
@ -2763,6 +2766,154 @@ static bool init_global_datetime_format(timestamp_type format_type,
return 0;
}
SHOW_VAR com_status_vars[]= {
{"admin_commands", (char*) offsetof(STATUS_VAR, com_other), SHOW_LONG_STATUS},
{"assign_to_keycache", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_ASSIGN_TO_KEYCACHE]), SHOW_LONG_STATUS},
{"alter_db", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_ALTER_DB]), SHOW_LONG_STATUS},
{"alter_db_upgrade", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_ALTER_DB_UPGRADE]), SHOW_LONG_STATUS},
{"alter_event", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_ALTER_EVENT]), SHOW_LONG_STATUS},
{"alter_function", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_ALTER_FUNCTION]), SHOW_LONG_STATUS},
{"alter_procedure", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_ALTER_PROCEDURE]), SHOW_LONG_STATUS},
{"alter_server", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_ALTER_SERVER]), SHOW_LONG_STATUS},
{"alter_table", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_ALTER_TABLE]), SHOW_LONG_STATUS},
{"alter_tablespace", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_ALTER_TABLESPACE]), SHOW_LONG_STATUS},
{"analyze", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_ANALYZE]), SHOW_LONG_STATUS},
{"backup_table", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_BACKUP_TABLE]), SHOW_LONG_STATUS},
{"begin", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_BEGIN]), SHOW_LONG_STATUS},
{"binlog", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_BINLOG_BASE64_EVENT]), SHOW_LONG_STATUS},
{"call_procedure", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CALL]), SHOW_LONG_STATUS},
{"change_db", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CHANGE_DB]), SHOW_LONG_STATUS},
{"change_master", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CHANGE_MASTER]), SHOW_LONG_STATUS},
{"check", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CHECK]), SHOW_LONG_STATUS},
{"checksum", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CHECKSUM]), SHOW_LONG_STATUS},
{"commit", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_COMMIT]), SHOW_LONG_STATUS},
{"create_db", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CREATE_DB]), SHOW_LONG_STATUS},
{"create_event", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CREATE_EVENT]), SHOW_LONG_STATUS},
{"create_function", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CREATE_SPFUNCTION]), SHOW_LONG_STATUS},
{"create_index", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CREATE_INDEX]), SHOW_LONG_STATUS},
{"create_procedure", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CREATE_PROCEDURE]), SHOW_LONG_STATUS},
{"create_server", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CREATE_SERVER]), SHOW_LONG_STATUS},
{"create_table", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CREATE_TABLE]), SHOW_LONG_STATUS},
{"create_trigger", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CREATE_TRIGGER]), SHOW_LONG_STATUS},
{"create_udf", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CREATE_FUNCTION]), SHOW_LONG_STATUS},
{"create_user", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CREATE_USER]), SHOW_LONG_STATUS},
{"create_view", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CREATE_VIEW]), SHOW_LONG_STATUS},
{"dealloc_sql", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DEALLOCATE_PREPARE]), SHOW_LONG_STATUS},
{"delete", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DELETE]), SHOW_LONG_STATUS},
{"delete_multi", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DELETE_MULTI]), SHOW_LONG_STATUS},
{"do", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DO]), SHOW_LONG_STATUS},
{"drop_db", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DROP_DB]), SHOW_LONG_STATUS},
{"drop_event", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DROP_EVENT]), SHOW_LONG_STATUS},
{"drop_function", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DROP_FUNCTION]), SHOW_LONG_STATUS},
{"drop_index", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DROP_INDEX]), SHOW_LONG_STATUS},
{"drop_procedure", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DROP_PROCEDURE]), SHOW_LONG_STATUS},
{"drop_server", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DROP_SERVER]), SHOW_LONG_STATUS},
{"drop_table", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DROP_TABLE]), SHOW_LONG_STATUS},
{"drop_trigger", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DROP_TRIGGER]), SHOW_LONG_STATUS},
{"drop_user", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DROP_USER]), SHOW_LONG_STATUS},
{"drop_view", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DROP_VIEW]), SHOW_LONG_STATUS},
{"empty_query", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_EMPTY_QUERY]), SHOW_LONG_STATUS},
{"execute_sql", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_EXECUTE]), SHOW_LONG_STATUS},
{"flush", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_FLUSH]), SHOW_LONG_STATUS},
{"grant", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_GRANT]), SHOW_LONG_STATUS},
{"ha_close", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_HA_CLOSE]), SHOW_LONG_STATUS},
{"ha_open", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_HA_OPEN]), SHOW_LONG_STATUS},
{"ha_read", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_HA_READ]), SHOW_LONG_STATUS},
{"help", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_HELP]), SHOW_LONG_STATUS},
{"insert", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_INSERT]), SHOW_LONG_STATUS},
{"insert_select", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_INSERT_SELECT]), SHOW_LONG_STATUS},
{"install_plugin", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_INSTALL_PLUGIN]), SHOW_LONG_STATUS},
{"kill", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_KILL]), SHOW_LONG_STATUS},
{"load", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_LOAD]), SHOW_LONG_STATUS},
{"load_master_data", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_LOAD_MASTER_DATA]), SHOW_LONG_STATUS},
{"load_master_table", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_LOAD_MASTER_TABLE]), SHOW_LONG_STATUS},
{"lock_tables", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_LOCK_TABLES]), SHOW_LONG_STATUS},
{"optimize", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_OPTIMIZE]), SHOW_LONG_STATUS},
{"preload_keys", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_PRELOAD_KEYS]), SHOW_LONG_STATUS},
{"prepare_sql", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_PREPARE]), SHOW_LONG_STATUS},
{"purge", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_PURGE]), SHOW_LONG_STATUS},
{"purge_before_date", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_PURGE_BEFORE]), SHOW_LONG_STATUS},
{"release_savepoint", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_RELEASE_SAVEPOINT]), SHOW_LONG_STATUS},
{"rename_table", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_RENAME_TABLE]), SHOW_LONG_STATUS},
{"rename_user", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_RENAME_USER]), SHOW_LONG_STATUS},
{"repair", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_REPAIR]), SHOW_LONG_STATUS},
{"replace", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_REPLACE]), SHOW_LONG_STATUS},
{"replace_select", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_REPLACE_SELECT]), SHOW_LONG_STATUS},
{"reset", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_RESET]), SHOW_LONG_STATUS},
{"restore_table", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_RESTORE_TABLE]), SHOW_LONG_STATUS},
{"revoke", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_REVOKE]), SHOW_LONG_STATUS},
{"revoke_all", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_REVOKE_ALL]), SHOW_LONG_STATUS},
{"rollback", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_ROLLBACK]), SHOW_LONG_STATUS},
{"rollback_to_savepoint",(char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_ROLLBACK_TO_SAVEPOINT]), SHOW_LONG_STATUS},
{"savepoint", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SAVEPOINT]), SHOW_LONG_STATUS},
{"select", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SELECT]), SHOW_LONG_STATUS},
{"set_option", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SET_OPTION]), SHOW_LONG_STATUS},
{"show_authors", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_AUTHORS]), SHOW_LONG_STATUS},
{"show_binlog_events", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_BINLOG_EVENTS]), SHOW_LONG_STATUS},
{"show_binlogs", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_BINLOGS]), SHOW_LONG_STATUS},
{"show_charsets", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_CHARSETS]), SHOW_LONG_STATUS},
{"show_collations", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_COLLATIONS]), SHOW_LONG_STATUS},
{"show_column_types", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_COLUMN_TYPES]), SHOW_LONG_STATUS},
{"show_contributors", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_CONTRIBUTORS]), SHOW_LONG_STATUS},
{"show_create_db", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_CREATE_DB]), SHOW_LONG_STATUS},
{"show_create_event", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_CREATE_EVENT]), SHOW_LONG_STATUS},
{"show_create_func", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_CREATE_FUNC]), SHOW_LONG_STATUS},
{"show_create_proc", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_CREATE_PROC]), SHOW_LONG_STATUS},
{"show_create_table", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_CREATE]), SHOW_LONG_STATUS},
{"show_create_trigger", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_CREATE_TRIGGER]), SHOW_LONG_STATUS},
{"show_databases", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_DATABASES]), SHOW_LONG_STATUS},
{"show_engine_logs", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_ENGINE_LOGS]), SHOW_LONG_STATUS},
{"show_engine_mutex", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_ENGINE_MUTEX]), SHOW_LONG_STATUS},
{"show_engine_status", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_ENGINE_STATUS]), SHOW_LONG_STATUS},
{"show_events", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_EVENTS]), SHOW_LONG_STATUS},
{"show_errors", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_ERRORS]), SHOW_LONG_STATUS},
{"show_fields", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_FIELDS]), SHOW_LONG_STATUS},
#ifndef DBUG_OFF
{"show_function_code", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_FUNC_CODE]), SHOW_LONG_STATUS},
#endif
{"show_function_status", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_STATUS_FUNC]), SHOW_LONG_STATUS},
{"show_grants", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_GRANTS]), SHOW_LONG_STATUS},
{"show_keys", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_KEYS]), SHOW_LONG_STATUS},
{"show_master_status", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_MASTER_STAT]), SHOW_LONG_STATUS},
{"show_new_master", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_NEW_MASTER]), SHOW_LONG_STATUS},
{"show_open_tables", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_OPEN_TABLES]), SHOW_LONG_STATUS},
{"show_plugins", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_PLUGINS]), SHOW_LONG_STATUS},
{"show_privileges", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_PRIVILEGES]), SHOW_LONG_STATUS},
#ifndef DBUG_OFF
{"show_procedure_code", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_PROC_CODE]), SHOW_LONG_STATUS},
#endif
{"show_procedure_status",(char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_STATUS_PROC]), SHOW_LONG_STATUS},
{"show_processlist", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_PROCESSLIST]), SHOW_LONG_STATUS},
{"show_slave_hosts", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_SLAVE_HOSTS]), SHOW_LONG_STATUS},
{"show_slave_status", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_SLAVE_STAT]), SHOW_LONG_STATUS},
{"show_status", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_STATUS]), SHOW_LONG_STATUS},
{"show_storage_engines", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_STORAGE_ENGINES]), SHOW_LONG_STATUS},
{"show_table_status", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_TABLE_STATUS]), SHOW_LONG_STATUS},
{"show_tables", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_TABLES]), SHOW_LONG_STATUS},
{"show_triggers", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_TRIGGERS]), SHOW_LONG_STATUS},
{"show_variables", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_VARIABLES]), SHOW_LONG_STATUS},
{"show_warnings", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_WARNS]), SHOW_LONG_STATUS},
{"slave_start", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SLAVE_START]), SHOW_LONG_STATUS},
{"slave_stop", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SLAVE_STOP]), SHOW_LONG_STATUS},
{"stmt_close", (char*) offsetof(STATUS_VAR, com_stmt_close), SHOW_LONG_STATUS},
{"stmt_execute", (char*) offsetof(STATUS_VAR, com_stmt_execute), SHOW_LONG_STATUS},
{"stmt_fetch", (char*) offsetof(STATUS_VAR, com_stmt_fetch), SHOW_LONG_STATUS},
{"stmt_prepare", (char*) offsetof(STATUS_VAR, com_stmt_prepare), SHOW_LONG_STATUS},
{"stmt_reset", (char*) offsetof(STATUS_VAR, com_stmt_reset), SHOW_LONG_STATUS},
{"stmt_send_long_data", (char*) offsetof(STATUS_VAR, com_stmt_send_long_data), SHOW_LONG_STATUS},
{"truncate", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_TRUNCATE]), SHOW_LONG_STATUS},
{"uninstall_plugin", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_UNINSTALL_PLUGIN]), SHOW_LONG_STATUS},
{"unlock_tables", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_UNLOCK_TABLES]), SHOW_LONG_STATUS},
{"update", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_UPDATE]), SHOW_LONG_STATUS},
{"update_multi", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_UPDATE_MULTI]), SHOW_LONG_STATUS},
{"xa_commit", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_XA_COMMIT]),SHOW_LONG_STATUS},
{"xa_end", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_XA_END]),SHOW_LONG_STATUS},
{"xa_prepare", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_XA_PREPARE]),SHOW_LONG_STATUS},
{"xa_recover", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_XA_RECOVER]),SHOW_LONG_STATUS},
{"xa_rollback", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_XA_ROLLBACK]),SHOW_LONG_STATUS},
{"xa_start", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_XA_START]),SHOW_LONG_STATUS},
{NullS, NullS, SHOW_LONG}
};
static int init_common_variables(const char *conf_file_name, int argc,
char **argv, const char **groups)
@ -2776,7 +2927,7 @@ static int init_common_variables(const char *conf_file_name, int argc,
server_start_time= my_time(0);
rpl_filter= new Rpl_filter;
binlog_filter= new Rpl_filter;
if (!rpl_filter || !binlog_filter)
if (!rpl_filter || !binlog_filter)
{
sql_perror("Could not allocate replication and binlog filters");
exit(1);
@ -2796,13 +2947,13 @@ static int init_common_variables(const char *conf_file_name, int argc,
}
#endif
/*
We set SYSTEM time zone as reasonable default and
We set SYSTEM time zone as reasonable default and
also for failure of my_tz_init() and bootstrap mode.
If user explicitly set time zone with --default-time-zone
option we will change this value in my_tz_init().
*/
global_system_variables.time_zone= my_tz_SYSTEM;
/*
Init mutexes for the global MYSQL_BIN_LOG objects.
As safe_mutex depends on what MY_INIT() does, we can't init the mutexes of
@ -2831,6 +2982,31 @@ static int init_common_variables(const char *conf_file_name, int argc,
if (add_status_vars(status_vars))
return 1; // an error was already reported
#ifndef DBUG_OFF
/*
We have few debug-only commands in com_status_vars, only visible in debug
builds. for simplicity we enable the assert only in debug builds
There are 7 Com_ variables which don't have corresponding SQLCOM_ values:
(TODO strictly speaking they shouldn't be here, should not have Com_ prefix
that is. Perhaps Stmt_ ? Comstmt_ ? Prepstmt_ ?)
Com_admin_commands => com_other
Com_stmt_close => com_stmt_close
Com_stmt_execute => com_stmt_execute
Com_stmt_fetch => com_stmt_fetch
Com_stmt_prepare => com_stmt_prepare
Com_stmt_reset => com_stmt_reset
Com_stmt_send_long_data => com_stmt_send_long_data
With this correction the number of Com_ variables (number of elements in
the array, excluding the last element - terminator) must match the number
of SQLCOM_ constants.
*/
compile_time_assert(sizeof(com_status_vars)/sizeof(com_status_vars[0]) - 1 ==
SQLCOM_END + 7);
#endif
load_defaults(conf_file_name, groups, &argc, &argv);
defaults_argv=argv;
defaults_argc=argc;
@ -2887,7 +3063,7 @@ static int init_common_variables(const char *conf_file_name, int argc,
*/
table_cache_size= (ulong) min(max((files-10-max_connections)/2,
TABLE_OPEN_CACHE_MIN),
table_cache_size);
table_cache_size);
DBUG_PRINT("warning",
("Changed limits: max_open_files: %u max_connections: %ld table_cache: %ld",
files, max_connections, table_cache_size));
@ -5312,9 +5488,11 @@ struct my_option my_long_options[] =
(uchar**) &opt_enable_named_pipe, (uchar**) &opt_enable_named_pipe, 0, GET_BOOL,
NO_ARG, 0, 0, 0, 0, 0, 0},
#endif
#ifdef HAVE_STACK_TRACE_ON_SEGV
{"enable-pstack", OPT_DO_PSTACK, "Print a symbolic stack trace on failure.",
(uchar**) &opt_do_pstack, (uchar**) &opt_do_pstack, 0, GET_BOOL, NO_ARG, 0, 0,
0, 0, 0, 0},
#endif /* HAVE_STACK_TRACE_ON_SEGV */
{"engine-condition-pushdown",
OPT_ENGINE_CONDITION_PUSHDOWN,
"Push supported query conditions to the storage engine.",
@ -6796,115 +6974,7 @@ SHOW_VAR status_vars[]= {
{"Binlog_cache_use", (char*) &binlog_cache_use, SHOW_LONG},
{"Bytes_received", (char*) offsetof(STATUS_VAR, bytes_received), SHOW_LONGLONG_STATUS},
{"Bytes_sent", (char*) offsetof(STATUS_VAR, bytes_sent), SHOW_LONGLONG_STATUS},
{"Com_admin_commands", (char*) offsetof(STATUS_VAR, com_other), SHOW_LONG_STATUS},
{"Com_alter_db", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_ALTER_DB]), SHOW_LONG_STATUS},
{"Com_alter_event", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_ALTER_EVENT]), SHOW_LONG_STATUS},
{"Com_alter_table", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_ALTER_TABLE]), SHOW_LONG_STATUS},
{"Com_analyze", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_ANALYZE]), SHOW_LONG_STATUS},
{"Com_backup_table", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_BACKUP_TABLE]), SHOW_LONG_STATUS},
{"Com_begin", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_BEGIN]), SHOW_LONG_STATUS},
{"Com_call_procedure", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CALL]), SHOW_LONG_STATUS},
{"Com_change_db", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CHANGE_DB]), SHOW_LONG_STATUS},
{"Com_change_master", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CHANGE_MASTER]), SHOW_LONG_STATUS},
{"Com_check", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CHECK]), SHOW_LONG_STATUS},
{"Com_checksum", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CHECKSUM]), SHOW_LONG_STATUS},
{"Com_commit", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_COMMIT]), SHOW_LONG_STATUS},
{"Com_create_db", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CREATE_DB]), SHOW_LONG_STATUS},
{"Com_create_event", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CREATE_EVENT]), SHOW_LONG_STATUS},
{"Com_create_function", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CREATE_FUNCTION]), SHOW_LONG_STATUS},
{"Com_create_index", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CREATE_INDEX]), SHOW_LONG_STATUS},
{"Com_create_table", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CREATE_TABLE]), SHOW_LONG_STATUS},
{"Com_create_user", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CREATE_USER]), SHOW_LONG_STATUS},
{"Com_dealloc_sql", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DEALLOCATE_PREPARE]), SHOW_LONG_STATUS},
{"Com_delete", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DELETE]), SHOW_LONG_STATUS},
{"Com_delete_multi", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DELETE_MULTI]), SHOW_LONG_STATUS},
{"Com_do", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DO]), SHOW_LONG_STATUS},
{"Com_drop_db", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DROP_DB]), SHOW_LONG_STATUS},
{"Com_drop_event", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DROP_EVENT]), SHOW_LONG_STATUS},
{"Com_drop_function", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DROP_FUNCTION]), SHOW_LONG_STATUS},
{"Com_drop_index", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DROP_INDEX]), SHOW_LONG_STATUS},
{"Com_drop_table", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DROP_TABLE]), SHOW_LONG_STATUS},
{"Com_drop_user", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DROP_USER]), SHOW_LONG_STATUS},
{"Com_execute_sql", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_EXECUTE]), SHOW_LONG_STATUS},
{"Com_flush", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_FLUSH]), SHOW_LONG_STATUS},
{"Com_grant", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_GRANT]), SHOW_LONG_STATUS},
{"Com_ha_close", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_HA_CLOSE]), SHOW_LONG_STATUS},
{"Com_ha_open", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_HA_OPEN]), SHOW_LONG_STATUS},
{"Com_ha_read", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_HA_READ]), SHOW_LONG_STATUS},
{"Com_help", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_HELP]), SHOW_LONG_STATUS},
{"Com_insert", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_INSERT]), SHOW_LONG_STATUS},
{"Com_insert_select", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_INSERT_SELECT]), SHOW_LONG_STATUS},
{"Com_kill", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_KILL]), SHOW_LONG_STATUS},
{"Com_load", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_LOAD]), SHOW_LONG_STATUS},
{"Com_load_master_data", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_LOAD_MASTER_DATA]), SHOW_LONG_STATUS},
{"Com_load_master_table", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_LOAD_MASTER_TABLE]), SHOW_LONG_STATUS},
{"Com_lock_tables", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_LOCK_TABLES]), SHOW_LONG_STATUS},
{"Com_optimize", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_OPTIMIZE]), SHOW_LONG_STATUS},
{"Com_preload_keys", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_PRELOAD_KEYS]), SHOW_LONG_STATUS},
{"Com_prepare_sql", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_PREPARE]), SHOW_LONG_STATUS},
{"Com_purge", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_PURGE]), SHOW_LONG_STATUS},
{"Com_purge_before_date", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_PURGE_BEFORE]), SHOW_LONG_STATUS},
{"Com_rename_table", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_RENAME_TABLE]), SHOW_LONG_STATUS},
{"Com_repair", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_REPAIR]), SHOW_LONG_STATUS},
{"Com_replace", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_REPLACE]), SHOW_LONG_STATUS},
{"Com_replace_select", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_REPLACE_SELECT]), SHOW_LONG_STATUS},
{"Com_reset", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_RESET]), SHOW_LONG_STATUS},
{"Com_restore_table", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_RESTORE_TABLE]), SHOW_LONG_STATUS},
{"Com_revoke", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_REVOKE]), SHOW_LONG_STATUS},
{"Com_revoke_all", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_REVOKE_ALL]), SHOW_LONG_STATUS},
{"Com_rollback", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_ROLLBACK]), SHOW_LONG_STATUS},
{"Com_savepoint", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SAVEPOINT]), SHOW_LONG_STATUS},
{"Com_select", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SELECT]), SHOW_LONG_STATUS},
{"Com_set_option", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SET_OPTION]), SHOW_LONG_STATUS},
{"Com_show_binlog_events", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_BINLOG_EVENTS]), SHOW_LONG_STATUS},
{"Com_show_binlogs", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_BINLOGS]), SHOW_LONG_STATUS},
{"Com_show_charsets", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_CHARSETS]), SHOW_LONG_STATUS},
{"Com_show_collations", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_COLLATIONS]), SHOW_LONG_STATUS},
{"Com_show_column_types", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_COLUMN_TYPES]), SHOW_LONG_STATUS},
{"Com_show_create_db", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_CREATE_DB]), SHOW_LONG_STATUS},
{"Com_show_create_event", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_CREATE_EVENT]), SHOW_LONG_STATUS},
{"Com_show_create_table", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_CREATE]), SHOW_LONG_STATUS},
{"Com_show_databases", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_DATABASES]), SHOW_LONG_STATUS},
{"Com_show_engine_logs", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_ENGINE_LOGS]), SHOW_LONG_STATUS},
{"Com_show_engine_mutex", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_ENGINE_MUTEX]), SHOW_LONG_STATUS},
{"Com_show_engine_status", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_ENGINE_STATUS]), SHOW_LONG_STATUS},
{"Com_show_events", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_EVENTS]), SHOW_LONG_STATUS},
{"Com_show_errors", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_ERRORS]), SHOW_LONG_STATUS},
{"Com_show_fields", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_FIELDS]), SHOW_LONG_STATUS},
{"Com_show_grants", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_GRANTS]), SHOW_LONG_STATUS},
{"Com_show_keys", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_KEYS]), SHOW_LONG_STATUS},
{"Com_show_master_status", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_MASTER_STAT]), SHOW_LONG_STATUS},
{"Com_show_new_master", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_NEW_MASTER]), SHOW_LONG_STATUS},
{"Com_show_open_tables", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_OPEN_TABLES]), SHOW_LONG_STATUS},
{"Com_show_plugins", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_PLUGINS]), SHOW_LONG_STATUS},
{"Com_show_privileges", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_PRIVILEGES]), SHOW_LONG_STATUS},
{"Com_show_processlist", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_PROCESSLIST]), SHOW_LONG_STATUS},
{"Com_show_slave_hosts", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_SLAVE_HOSTS]), SHOW_LONG_STATUS},
{"Com_show_slave_status", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_SLAVE_STAT]), SHOW_LONG_STATUS},
{"Com_show_status", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_STATUS]), SHOW_LONG_STATUS},
{"Com_show_storage_engines", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_STORAGE_ENGINES]), SHOW_LONG_STATUS},
{"Com_show_tables", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_TABLES]), SHOW_LONG_STATUS},
{"Com_show_triggers", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_TRIGGERS]), SHOW_LONG_STATUS},
{"Com_show_variables", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_VARIABLES]), SHOW_LONG_STATUS},
{"Com_show_warnings", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_WARNS]), SHOW_LONG_STATUS},
{"Com_slave_start", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SLAVE_START]), SHOW_LONG_STATUS},
{"Com_slave_stop", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SLAVE_STOP]), SHOW_LONG_STATUS},
{"Com_stmt_close", (char*) offsetof(STATUS_VAR, com_stmt_close), SHOW_LONG_STATUS},
{"Com_stmt_execute", (char*) offsetof(STATUS_VAR, com_stmt_execute), SHOW_LONG_STATUS},
{"Com_stmt_fetch", (char*) offsetof(STATUS_VAR, com_stmt_fetch), SHOW_LONG_STATUS},
{"Com_stmt_prepare", (char*) offsetof(STATUS_VAR, com_stmt_prepare), SHOW_LONG_STATUS},
{"Com_stmt_reset", (char*) offsetof(STATUS_VAR, com_stmt_reset), SHOW_LONG_STATUS},
{"Com_stmt_send_long_data", (char*) offsetof(STATUS_VAR, com_stmt_send_long_data), SHOW_LONG_STATUS},
{"Com_truncate", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_TRUNCATE]), SHOW_LONG_STATUS},
{"Com_unlock_tables", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_UNLOCK_TABLES]), SHOW_LONG_STATUS},
{"Com_update", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_UPDATE]), SHOW_LONG_STATUS},
{"Com_update_multi", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_UPDATE_MULTI]), SHOW_LONG_STATUS},
{"Com_xa_commit", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_XA_COMMIT]),SHOW_LONG_STATUS},
{"Com_xa_end", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_XA_END]),SHOW_LONG_STATUS},
{"Com_xa_prepare", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_XA_PREPARE]),SHOW_LONG_STATUS},
{"Com_xa_recover", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_XA_RECOVER]),SHOW_LONG_STATUS},
{"Com_xa_rollback", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_XA_ROLLBACK]),SHOW_LONG_STATUS},
{"Com_xa_start", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_XA_START]),SHOW_LONG_STATUS},
{"Com", (char*) com_status_vars, SHOW_ARRAY},
{"Compression", (char*) &show_net_compression, SHOW_FUNC},
{"Connections", (char*) &thread_id, SHOW_LONG_NOFLUSH},
{"Created_tmp_disk_tables", (char*) offsetof(STATUS_VAR, created_tmp_disk_tables), SHOW_LONG_STATUS},

View file

@ -2074,11 +2074,17 @@ sp_head::backpatch(sp_label_t *lab)
uint dest= instructions();
List_iterator_fast<bp_t> li(m_backpatch);
DBUG_ENTER("sp_head::backpatch");
while ((bp= li++))
{
if (bp->lab == lab)
{
DBUG_PRINT("info", ("backpatch: (m_ip %d, label 0x%lx <%s>) to dest %d",
bp->instr->m_ip, (ulong) lab, lab->name, dest));
bp->instr->backpatch(dest, lab->ctx);
}
}
DBUG_VOID_RETURN;
}
/*

View file

@ -877,8 +877,9 @@ public:
virtual void backpatch(uint dest, sp_pcontext *dst_ctx)
{
if (m_dest == 0) // Don't reset
m_dest= dest;
/* Calling backpatch twice is a logic flaw in jump resolution. */
DBUG_ASSERT(m_dest == 0);
m_dest= dest;
}
/*

View file

@ -314,17 +314,91 @@ sp_rcontext::handle_error(uint sql_errno,
void
sp_rcontext::push_cursor(sp_lex_keeper *lex_keeper, sp_instr_cpush *i)
{
DBUG_ENTER("sp_rcontext::push_cursor");
DBUG_ASSERT(m_ccount < m_root_parsing_ctx->max_cursor_index());
m_cstack[m_ccount++]= new sp_cursor(lex_keeper, i);
DBUG_PRINT("info", ("m_ccount: %d", m_ccount));
DBUG_VOID_RETURN;
}
void
sp_rcontext::pop_cursors(uint count)
{
DBUG_ENTER("sp_rcontext::pop_cursors");
DBUG_ASSERT(m_ccount >= count);
while (count--)
{
delete m_cstack[--m_ccount];
}
DBUG_PRINT("info", ("m_ccount: %d", m_ccount));
DBUG_VOID_RETURN;
}
void
sp_rcontext::push_handler(struct sp_cond_type *cond, uint h, int type, uint f)
{
DBUG_ENTER("sp_rcontext::push_handler");
DBUG_ASSERT(m_hcount < m_root_parsing_ctx->max_handler_index());
m_handler[m_hcount].cond= cond;
m_handler[m_hcount].handler= h;
m_handler[m_hcount].type= type;
m_handler[m_hcount].foffset= f;
m_hcount+= 1;
DBUG_PRINT("info", ("m_hcount: %d", m_hcount));
DBUG_VOID_RETURN;
}
void
sp_rcontext::pop_handlers(uint count)
{
DBUG_ENTER("sp_rcontext::pop_handlers");
DBUG_ASSERT(m_hcount >= count);
m_hcount-= count;
DBUG_PRINT("info", ("m_hcount: %d", m_hcount));
DBUG_VOID_RETURN;
}
void
sp_rcontext::push_hstack(uint h)
{
DBUG_ENTER("sp_rcontext::push_hstack");
DBUG_ASSERT(m_hsp < m_root_parsing_ctx->max_handler_index());
m_hstack[m_hsp++]= h;
DBUG_PRINT("info", ("m_hsp: %d", m_hsp));
DBUG_VOID_RETURN;
}
uint
sp_rcontext::pop_hstack()
{
uint handler;
DBUG_ENTER("sp_rcontext::pop_hstack");
DBUG_ASSERT(m_hsp);
handler= m_hstack[--m_hsp];
DBUG_PRINT("info", ("m_hsp: %d", m_hsp));
DBUG_RETURN(handler);
}
void
sp_rcontext::enter_handler(int hid)
{
DBUG_ENTER("sp_rcontext::enter_handler");
DBUG_ASSERT(m_ihsp < m_root_parsing_ctx->max_handler_index());
m_in_handler[m_ihsp++]= hid;
DBUG_PRINT("info", ("m_ihsp: %d", m_ihsp));
DBUG_VOID_RETURN;
}
void
sp_rcontext::exit_handler()
{
DBUG_ENTER("sp_rcontext::exit_handler");
DBUG_ASSERT(m_ihsp);
m_ihsp-= 1;
DBUG_PRINT("info", ("m_ihsp: %d", m_ihsp));
DBUG_VOID_RETURN;
}

View file

@ -107,21 +107,9 @@ class sp_rcontext : public Sql_alloc
return m_return_value_set;
}
inline void
push_handler(struct sp_cond_type *cond, uint h, int type, uint f)
{
m_handler[m_hcount].cond= cond;
m_handler[m_hcount].handler= h;
m_handler[m_hcount].type= type;
m_handler[m_hcount].foffset= f;
m_hcount+= 1;
}
void push_handler(struct sp_cond_type *cond, uint h, int type, uint f);
inline void
pop_handlers(uint count)
{
m_hcount-= count;
}
void pop_handlers(uint count);
// Returns 1 if a handler was found, 0 otherwise.
bool
@ -158,29 +146,13 @@ class sp_rcontext : public Sql_alloc
m_hfound= -1;
}
inline void
push_hstack(uint h)
{
m_hstack[m_hsp++]= h;
}
void push_hstack(uint h);
inline uint
pop_hstack()
{
return m_hstack[--m_hsp];
}
uint pop_hstack();
inline void
enter_handler(int hid)
{
m_in_handler[m_ihsp++]= hid;
}
void enter_handler(int hid);
inline void
exit_handler()
{
m_ihsp-= 1;
}
void exit_handler();
void
push_cursor(sp_lex_keeper *lex_keeper, sp_instr_cpush *i);

View file

@ -3934,7 +3934,7 @@ retry:
READ_KEYINFO | COMPUTE_TYPES | EXTRA_RECORD,
ha_open_options | HA_OPEN_FOR_REPAIR,
entry, FALSE) || ! entry->file ||
(entry->file->is_crashed() && entry->file->check_and_repair(thd)))
(entry->file->is_crashed() && entry->file->ha_check_and_repair(thd)))
{
/* Give right error message */
thd->clear_error();
@ -5400,7 +5400,7 @@ bool rm_temporary_table(handlerton *base, char *path)
error=1; /* purecov: inspected */
*ext= 0; // remove extension
file= get_new_handler((TABLE_SHARE*) 0, current_thd->mem_root, base);
if (file && file->delete_table(path))
if (file && file->ha_delete_table(path))
{
error=1;
sql_print_warning("Could not remove temporary table: '%s', error: %d",
@ -8099,7 +8099,7 @@ my_bool mysql_rm_tmp_tables(void)
((handler_file= get_new_handler(&share, thd->mem_root,
share.db_type()))))
{
handler_file->delete_table(filePathCopy);
handler_file->ha_delete_table(filePathCopy);
delete handler_file;
}
free_table_share(&share);

View file

@ -371,6 +371,32 @@ TODO list:
__LINE__,(ulong)(B)));B->query()->unlock_reading();}
#define DUMP(C) DBUG_EXECUTE("qcache", {\
(C)->cache_dump(); (C)->queries_dump();(C)->tables_dump();})
/**
Causes the thread to wait in a spin lock for a query kill signal.
This function is used by the test frame work to identify race conditions.
The signal is caught and ignored and the thread is not killed.
*/
static void debug_wait_for_kill(const char *info)
{
DBUG_ENTER("debug_wait_for_kill");
const char *prev_info;
THD *thd;
thd= current_thd;
prev_info= thd->proc_info;
thd->proc_info= info;
sql_print_information(info);
while(!thd->killed)
my_sleep(1000);
thd->killed= THD::NOT_KILLED;
sql_print_information("Exit debug_wait_for_kill");
thd->proc_info= prev_info;
DBUG_VOID_RETURN;
}
#else
#define MUTEX_LOCK(M) pthread_mutex_lock(M)
#define MUTEX_UNLOCK(M) pthread_mutex_unlock(M)
@ -647,13 +673,16 @@ void query_cache_insert(NET *net, const char *packet, ulong length)
if (net->query_cache_query == 0)
DBUG_VOID_RETURN;
DBUG_EXECUTE_IF("wait_in_query_cache_insert",
debug_wait_for_kill("wait_in_query_cache_insert"); );
STRUCT_LOCK(&query_cache.structure_guard_mutex);
bool interrupt;
query_cache.wait_while_table_flush_is_in_progress(&interrupt);
if (interrupt)
{
STRUCT_UNLOCK(&query_cache.structure_guard_mutex);
return;
DBUG_VOID_RETURN;
}
Query_cache_block *query_block= (Query_cache_block*)net->query_cache_query;
@ -667,11 +696,11 @@ void query_cache_insert(NET *net, const char *packet, ulong length)
DBUG_VOID_RETURN;
}
BLOCK_LOCK_WR(query_block);
Query_cache_query *header= query_block->query();
Query_cache_block *result= header->result();
DUMP(&query_cache);
BLOCK_LOCK_WR(query_block);
DBUG_PRINT("qcache", ("insert packet %lu bytes long",length));
/*
@ -687,6 +716,7 @@ void query_cache_insert(NET *net, const char *packet, ulong length)
DBUG_PRINT("qcache", ("free query 0x%lx", (ulong) query_block));
// The following call will remove the lock on query_block
query_cache.free_query(query_block);
query_cache.refused++;
// append_result_data no success => we need unlock
STRUCT_UNLOCK(&query_cache.structure_guard_mutex);
DBUG_VOID_RETURN;
@ -883,6 +913,31 @@ ulong Query_cache::resize(ulong query_cache_size_arg)
m_cache_status= Query_cache::FLUSH_IN_PROGRESS;
STRUCT_UNLOCK(&structure_guard_mutex);
/*
Wait for all readers and writers to exit. When the list of all queries
is iterated over with a block level lock, we are done.
*/
Query_cache_block *block= queries_blocks;
if (block)
{
do
{
BLOCK_LOCK_WR(block);
Query_cache_query *query= block->query();
if (query && query->writer())
{
/*
Drop the writer; this will cancel any attempts to store
the processed statement associated with this writer.
*/
query->writer()->query_cache_query= 0;
query->writer(0);
refused++;
}
BLOCK_UNLOCK_WR(block);
block= block->next;
} while (block != queries_blocks);
}
free_cache();
query_cache_size= query_cache_size_arg;

View file

@ -123,7 +123,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
ha_rows const maybe_deleted= table->file->stats.records;
DBUG_PRINT("debug", ("Trying to use delete_all_rows()"));
if (!(error=table->file->delete_all_rows()))
if (!(error=table->file->ha_delete_all_rows()))
{
error= -1; // ok
deleted= maybe_deleted;
@ -328,7 +328,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
We're really doing a truncate and need to reset the table's
auto-increment counter.
*/
int error2= table->file->reset_auto_increment(0);
int error2= table->file->ha_reset_auto_increment(0);
if (error2 && (error2 != HA_ERR_WRONG_COMMAND))
{

View file

@ -1863,7 +1863,6 @@ bool delayed_get_table(THD *thd, TABLE_LIST *table_list)
{
if (!(di= new Delayed_insert()))
{
my_error(ER_OUTOFMEMORY,MYF(0),sizeof(Delayed_insert));
thd->fatal_error();
goto end_create;
}

View file

@ -5108,9 +5108,9 @@ static bool mysql_change_partitions(ALTER_PARTITION_PARAM_TYPE *lpt)
DBUG_ENTER("mysql_change_partitions");
build_table_filename(path, sizeof(path), lpt->db, lpt->table_name, "", 0);
if ((error= file->change_partitions(lpt->create_info, path, &lpt->copied,
&lpt->deleted, lpt->pack_frm_data,
lpt->pack_frm_len)))
if ((error= file->ha_change_partitions(lpt->create_info, path, &lpt->copied,
&lpt->deleted, lpt->pack_frm_data,
lpt->pack_frm_len)))
{
if (error != ER_OUTOFMEMORY)
file->print_error(error, MYF(0));
@ -5148,7 +5148,7 @@ static bool mysql_rename_partitions(ALTER_PARTITION_PARAM_TYPE *lpt)
DBUG_ENTER("mysql_rename_partitions");
build_table_filename(path, sizeof(path), lpt->db, lpt->table_name, "", 0);
if ((error= lpt->table->file->rename_partitions(path)))
if ((error= lpt->table->file->ha_rename_partitions(path)))
{
if (error != 1)
lpt->table->file->print_error(error, MYF(0));
@ -5189,7 +5189,7 @@ static bool mysql_drop_partitions(ALTER_PARTITION_PARAM_TYPE *lpt)
DBUG_ENTER("mysql_drop_partitions");
build_table_filename(path, sizeof(path), lpt->db, lpt->table_name, "", 0);
if ((error= lpt->table->file->drop_partitions(path)))
if ((error= lpt->table->file->ha_drop_partitions(path)))
{
lpt->table->file->print_error(error, MYF(0));
DBUG_RETURN(TRUE);
@ -6105,13 +6105,13 @@ uint fast_alter_partition_table(THD *thd, TABLE *table,
int error;
written_bin_log= FALSE;
if (((alter_info->flags & ALTER_OPTIMIZE_PARTITION) &&
(error= table->file->optimize_partitions(thd))) ||
(error= table->file->ha_optimize_partitions(thd))) ||
((alter_info->flags & ALTER_ANALYZE_PARTITION) &&
(error= table->file->analyze_partitions(thd))) ||
(error= table->file->ha_analyze_partitions(thd))) ||
((alter_info->flags & ALTER_CHECK_PARTITION) &&
(error= table->file->check_partitions(thd))) ||
(error= table->file->ha_check_partitions(thd))) ||
((alter_info->flags & ALTER_REPAIR_PARTITION) &&
(error= table->file->repair_partitions(thd))))
(error= table->file->ha_repair_partitions(thd))))
{
table->file->print_error(error, MYF(0));
goto err;

View file

@ -1534,14 +1534,14 @@ JOIN::reinit()
if (exec_tmp_table1)
{
exec_tmp_table1->file->extra(HA_EXTRA_RESET_STATE);
exec_tmp_table1->file->delete_all_rows();
exec_tmp_table1->file->ha_delete_all_rows();
free_io_cache(exec_tmp_table1);
filesort_free_buffers(exec_tmp_table1,0);
}
if (exec_tmp_table2)
{
exec_tmp_table2->file->extra(HA_EXTRA_RESET_STATE);
exec_tmp_table2->file->delete_all_rows();
exec_tmp_table2->file->ha_delete_all_rows();
free_io_cache(exec_tmp_table2);
filesort_free_buffers(exec_tmp_table2,0);
}
@ -10470,9 +10470,9 @@ free_tmp_table(THD *thd, TABLE *entry)
if (entry->file)
{
if (entry->db_stat)
entry->file->drop_table(entry->s->table_name.str);
entry->file->ha_drop_table(entry->s->table_name.str);
else
entry->file->delete_table(entry->s->table_name.str);
entry->file->ha_delete_table(entry->s->table_name.str);
delete entry->file;
}
@ -10528,7 +10528,7 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
if (open_tmp_table(&new_table))
goto err1;
if (table->file->indexes_are_disabled())
new_table.file->disable_indexes(HA_KEY_SWITCH_ALL);
new_table.file->ha_disable_indexes(HA_KEY_SWITCH_ALL);
table->file->ha_index_or_rnd_end();
table->file->ha_rnd_init(1);
if (table->no_rows)
@ -10557,13 +10557,13 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
*/
while (!table->file->rnd_next(new_table.record[1]))
{
write_err= new_table.file->write_row(new_table.record[1]);
write_err= new_table.file->ha_write_row(new_table.record[1]);
DBUG_EXECUTE_IF("raise_error", write_err= HA_ERR_FOUND_DUPP_KEY ;);
if (write_err)
goto err;
}
/* copy row that filled HEAP table */
if ((write_err=new_table.file->write_row(table->record[0])))
if ((write_err=new_table.file->ha_write_row(table->record[0])))
{
if (new_table.file->is_fatal_error(write_err, HA_CHECK_DUP) ||
!ignore_last_dupp_key_error)
@ -10594,7 +10594,7 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
(void) table->file->ha_rnd_end();
(void) new_table.file->close();
err1:
new_table.file->delete_table(new_table.s->table_name.str);
new_table.file->ha_delete_table(new_table.s->table_name.str);
err2:
delete new_table.file;
thd->proc_info=save_proc_info;
@ -12026,7 +12026,7 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
{
int error;
join->found_records++;
if ((error=table->file->write_row(table->record[0])))
if ((error=table->file->ha_write_row(table->record[0])))
{
if (!table->file->is_fatal_error(error, HA_CHECK_DUP))
goto end;
@ -12088,8 +12088,8 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
{ /* Update old record */
restore_record(table,record[1]);
update_tmptable_sum_func(join->sum_funcs,table);
if ((error=table->file->update_row(table->record[1],
table->record[0])))
if ((error=table->file->ha_update_row(table->record[1],
table->record[0])))
{
table->file->print_error(error,MYF(0)); /* purecov: inspected */
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
@ -12112,7 +12112,7 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
}
init_tmptable_sum_functions(join->sum_funcs);
copy_funcs(join->tmp_table_param.items_to_copy);
if ((error=table->file->write_row(table->record[0])))
if ((error=table->file->ha_write_row(table->record[0])))
{
if (create_myisam_from_heap(join->thd, table, &join->tmp_table_param,
error, 0))
@ -12148,7 +12148,7 @@ end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
copy_fields(&join->tmp_table_param); // Groups are copied twice.
copy_funcs(join->tmp_table_param.items_to_copy);
if (!(error=table->file->write_row(table->record[0])))
if (!(error=table->file->ha_write_row(table->record[0])))
join->send_records++; // New group
else
{
@ -12164,8 +12164,8 @@ end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
}
restore_record(table,record[1]);
update_tmptable_sum_func(join->sum_funcs,table);
if ((error=table->file->update_row(table->record[1],
table->record[0])))
if ((error=table->file->ha_update_row(table->record[1],
table->record[0])))
{
table->file->print_error(error,MYF(0)); /* purecov: inspected */
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
@ -12208,7 +12208,7 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
join->sum_funcs_end[send_group_parts]);
if (!join->having || join->having->val_int())
{
int error= table->file->write_row(table->record[0]);
int error= table->file->ha_write_row(table->record[0]);
if (error && create_myisam_from_heap(join->thd, table,
&join->tmp_table_param,
error, 0))
@ -13437,7 +13437,7 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
}
if (having && !having->val_int())
{
if ((error=file->delete_row(record)))
if ((error=file->ha_delete_row(record)))
goto err;
error=file->rnd_next(record);
continue;
@ -13464,7 +13464,7 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
}
if (compare_record(table, first_field) == 0)
{
if ((error=file->delete_row(record)))
if ((error=file->ha_delete_row(record)))
goto err;
}
else if (!found)
@ -13561,7 +13561,7 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
}
if (having && !having->val_int())
{
if ((error=file->delete_row(record)))
if ((error=file->ha_delete_row(record)))
goto err;
continue;
}
@ -13578,7 +13578,7 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
if (hash_search(&hash, org_key_pos, key_length))
{
/* Duplicated found ; Remove the row */
if ((error=file->delete_row(record)))
if ((error=file->ha_delete_row(record)))
goto err;
}
else
@ -15586,7 +15586,7 @@ int JOIN::rollup_write_data(uint idx, TABLE *table_arg)
item->save_in_result_field(1);
}
copy_sum_funcs(sum_funcs_end[i+1], sum_funcs_end[i]);
if ((write_error= table_arg->file->write_row(table_arg->record[0])))
if ((write_error= table_arg->file->ha_write_row(table_arg->record[0])))
{
if (create_myisam_from_heap(thd, table_arg, &tmp_table_param,
write_error, 0))

View file

@ -5864,7 +5864,7 @@ bool get_schema_tables_result(JOIN *join,
{
table_list->table->file->extra(HA_EXTRA_NO_CACHE);
table_list->table->file->extra(HA_EXTRA_RESET_STATE);
table_list->table->file->delete_all_rows();
table_list->table->file->ha_delete_all_rows();
free_io_cache(table_list->table);
filesort_free_buffers(table_list->table,1);
table_list->table->null_row= 0;

View file

@ -630,7 +630,7 @@ static int execute_ddl_log_action(THD *thd, DDL_LOG_ENTRY *ddl_log_entry)
}
else
{
if ((error= file->delete_table(ddl_log_entry->name)))
if ((error= file->ha_delete_table(ddl_log_entry->name)))
{
if (error != ENOENT && error != HA_ERR_NO_SUCH_TABLE)
break;
@ -667,8 +667,8 @@ static int execute_ddl_log_action(THD *thd, DDL_LOG_ENTRY *ddl_log_entry)
}
else
{
if (file->rename_table(ddl_log_entry->from_name,
ddl_log_entry->name))
if (file->ha_rename_table(ddl_log_entry->from_name,
ddl_log_entry->name))
break;
}
if ((deactivate_ddl_log_entry(ddl_log_entry->entry_pos)))
@ -1299,9 +1299,9 @@ bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags)
lpt->table_name, lpt->create_info,
lpt->alter_info->create_list, lpt->key_count,
lpt->key_info_buffer, lpt->table->file)) ||
lpt->table->file->create_handler_files(shadow_path, NULL,
CHF_CREATE_FLAG,
lpt->create_info))
lpt->table->file->ha_create_handler_files(shadow_path, NULL,
CHF_CREATE_FLAG,
lpt->create_info))
{
my_delete(shadow_frm_name, MYF(0));
error= 1;
@ -1353,15 +1353,15 @@ bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags)
VOID(pthread_mutex_lock(&LOCK_open));
if (my_delete(frm_name, MYF(MY_WME)) ||
#ifdef WITH_PARTITION_STORAGE_ENGINE
lpt->table->file->create_handler_files(path, shadow_path,
CHF_DELETE_FLAG, NULL) ||
lpt->table->file->ha_create_handler_files(path, shadow_path,
CHF_DELETE_FLAG, NULL) ||
deactivate_ddl_log_entry(part_info->frm_log_entry->entry_pos) ||
(sync_ddl_log(), FALSE) ||
#endif
#ifdef WITH_PARTITION_STORAGE_ENGINE
my_rename(shadow_frm_name, frm_name, MYF(MY_WME)) ||
lpt->table->file->create_handler_files(path, shadow_path,
CHF_RENAME_FLAG, NULL))
lpt->table->file->ha_create_handler_files(path, shadow_path,
CHF_RENAME_FLAG, NULL))
#else
my_rename(shadow_frm_name, frm_name, MYF(MY_WME)))
#endif
@ -3718,14 +3718,14 @@ mysql_rename_table(handlerton *base, const char *old_db,
to_base= lc_to;
}
if (!file || !(error=file->rename_table(from_base, to_base)))
if (!file || !(error=file->ha_rename_table(from_base, to_base)))
{
if (!(flags & NO_FRM_RENAME) && rename_file_ext(from,to,reg_ext))
{
error=my_errno;
/* Restore old file name */
if (file)
file->rename_table(to_base, from_base);
file->ha_rename_table(to_base, from_base);
}
}
delete file;
@ -4376,7 +4376,7 @@ send_result_message:
if (!result_code) // recreation went ok
{
if ((table->table= open_ltable(thd, table, lock_type, 0)) &&
((result_code= table->table->file->analyze(thd, check_opt)) > 0))
((result_code= table->table->file->ha_analyze(thd, check_opt)) > 0))
result_code= 0; // analyze went ok
}
if (result_code) // either mysql_recreate_table or analyze failed
@ -4486,7 +4486,7 @@ bool mysql_backup_table(THD* thd, TABLE_LIST* table_list)
"MySQL Administrator (mysqldump, mysql)");
DBUG_RETURN(mysql_admin_table(thd, table_list, 0,
"backup", TL_READ, 0, 0, 0, 0,
&handler::backup, 0));
&handler::ha_backup, 0));
}
@ -4498,7 +4498,7 @@ bool mysql_restore_table(THD* thd, TABLE_LIST* table_list)
DBUG_RETURN(mysql_admin_table(thd, table_list, 0,
"restore", TL_WRITE, 1, 1, 0,
&prepare_for_restore,
&handler::restore, 0));
&handler::ha_restore, 0));
}
@ -4519,7 +4519,7 @@ bool mysql_optimize_table(THD* thd, TABLE_LIST* tables, HA_CHECK_OPT* check_opt)
DBUG_ENTER("mysql_optimize_table");
DBUG_RETURN(mysql_admin_table(thd, tables, check_opt,
"optimize", TL_WRITE, 1,0,0,0,
&handler::optimize, 0));
&handler::ha_optimize, 0));
}
@ -4933,7 +4933,7 @@ bool mysql_analyze_table(THD* thd, TABLE_LIST* tables, HA_CHECK_OPT* check_opt)
DBUG_ENTER("mysql_analyze_table");
DBUG_RETURN(mysql_admin_table(thd, tables, check_opt,
"analyze", lock_type, 1, 0, 0, 0,
&handler::analyze, 0));
&handler::ha_analyze, 0));
}
@ -4980,7 +4980,7 @@ mysql_discard_or_import_tablespace(THD *thd,
DBUG_RETURN(-1);
}
error=table->file->discard_or_import_tablespace(discard);
error= table->file->ha_discard_or_import_tablespace(discard);
thd->proc_info="end";
@ -5353,14 +5353,14 @@ bool alter_table_manage_keys(TABLE *table, int indexes_were_disabled,
switch (keys_onoff) {
case ENABLE:
error= table->file->enable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE);
error= table->file->ha_enable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE);
break;
case LEAVE_AS_IS:
if (!indexes_were_disabled)
break;
/* fall-through: disabled indexes */
case DISABLE:
error= table->file->disable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE);
error= table->file->ha_disable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE);
}
if (error == HA_ERR_WRONG_COMMAND)
@ -6130,14 +6130,14 @@ view_err:
wait_while_table_is_used(thd, table, HA_EXTRA_FORCE_REOPEN);
VOID(pthread_mutex_unlock(&LOCK_open));
DBUG_EXECUTE_IF("sleep_alter_enable_indexes", my_sleep(6000000););
error= table->file->enable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE);
error= table->file->ha_enable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE);
/* COND_refresh will be signaled in close_thread_tables() */
break;
case DISABLE:
VOID(pthread_mutex_lock(&LOCK_open));
wait_while_table_is_used(thd, table, HA_EXTRA_FORCE_REOPEN);
VOID(pthread_mutex_unlock(&LOCK_open));
error=table->file->disable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE);
error=table->file->ha_disable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE);
/* COND_refresh will be signaled in close_thread_tables() */
break;
default:
@ -6545,7 +6545,7 @@ view_err:
KEY_PART_INFO *part_end;
DBUG_PRINT("info", ("No new_table, checking add/drop index"));
table->file->prepare_for_alter();
table->file->ha_prepare_for_alter();
if (index_add_count)
{
/* The add_index() method takes an array of KEY structs. */
@ -6763,8 +6763,8 @@ view_err:
t_table= table;
}
/* Tell the handler that a new frm file is in place. */
if (t_table->file->create_handler_files(path, NULL, CHF_INDEX_FLAG,
create_info))
if (t_table->file->ha_create_handler_files(path, NULL, CHF_INDEX_FLAG,
create_info))
goto err_with_placeholders;
if (thd->locked_tables && new_name == table_name && new_db == db)
{
@ -7064,7 +7064,7 @@ copy_data_between_tables(TABLE *from,TABLE *to,
copy_ptr->do_copy(copy_ptr);
}
prev_insert_id= to->file->next_insert_id;
error=to->file->write_row(to->record[0]);
error=to->file->ha_write_row(to->record[0]);
to->auto_increment_field_not_null= FALSE;
if (error)
{

View file

@ -440,10 +440,10 @@ bool st_select_lex_unit::exec()
{
item->assigned(0); // We will reinit & rexecute unit
item->reset();
table->file->delete_all_rows();
table->file->ha_delete_all_rows();
}
/* re-enabling indexes for next subselect iteration */
if (union_distinct && table->file->enable_indexes(HA_KEY_SWITCH_ALL))
if (union_distinct && table->file->ha_enable_indexes(HA_KEY_SWITCH_ALL))
{
DBUG_ASSERT(0);
}
@ -485,7 +485,7 @@ bool st_select_lex_unit::exec()
sl->join->exec();
if (sl == union_distinct)
{
if (table->file->disable_indexes(HA_KEY_SWITCH_ALL))
if (table->file->ha_disable_indexes(HA_KEY_SWITCH_ALL))
DBUG_RETURN(TRUE);
table->no_keyread=1;
}

View file

@ -625,9 +625,9 @@ int mysql_update(THD *thd,
call then it should be included in the count of dup_key_found
and error should be set to 0 (only if these errors are ignored).
*/
error= table->file->bulk_update_row(table->record[1],
table->record[0],
&dup_key_found);
error= table->file->ha_bulk_update_row(table->record[1],
table->record[0],
&dup_key_found);
limit+= dup_key_found;
updated-= dup_key_found;
}

View file

@ -508,10 +508,10 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%pure_parser /* We have threads */
/*
Currently there are 177 shift/reduce conflicts.
Currently there are 169 shift/reduce conflicts.
We should not introduce new conflicts any more.
*/
%expect 177
%expect 169
/*
Comments for TOKENS.
@ -1193,7 +1193,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%type <table_list>
join_table_list join_table
table_factor table_ref
table_factor table_ref esc_table_ref
select_derived derived_table_list
%type <date_time_type> date_time_type;
@ -1284,7 +1284,9 @@ END_OF_INPUT
%type <NONE> call sp_proc_stmts sp_proc_stmts1 sp_proc_stmt
%type <NONE> sp_proc_stmt_statement sp_proc_stmt_return
%type <NONE> sp_proc_stmt_if
%type <NONE> sp_labeled_control sp_proc_stmt_unlabeled sp_proc_stmt_leave
%type <NONE> sp_labeled_control sp_proc_stmt_unlabeled
%type <NONE> sp_labeled_block sp_unlabeled_block
%type <NONE> sp_proc_stmt_leave
%type <NONE> sp_proc_stmt_iterate
%type <NONE> sp_proc_stmt_open sp_proc_stmt_fetch sp_proc_stmt_close
%type <NONE> case_stmt_specification simple_case_stmt searched_case_stmt
@ -1945,6 +1947,8 @@ ev_sql_stmt_inner:
| sp_proc_stmt_return
| sp_proc_stmt_if
| case_stmt_specification
| sp_labeled_block
| sp_unlabeled_block
| sp_labeled_control
| sp_proc_stmt_unlabeled
| sp_proc_stmt_leave
@ -2519,6 +2523,8 @@ sp_proc_stmt:
| sp_proc_stmt_return
| sp_proc_stmt_if
| case_stmt_specification
| sp_labeled_block
| sp_unlabeled_block
| sp_labeled_control
| sp_proc_stmt_unlabeled
| sp_proc_stmt_leave
@ -2645,14 +2651,35 @@ sp_proc_stmt_leave:
sp_instr_jump *i;
uint ip= sp->instructions();
uint n;
/*
When jumping to a BEGIN-END block end, the target jump
points to the block hpop/cpop cleanup instructions,
so we should exclude the block context here.
When jumping to something else (i.e., SP_LAB_ITER),
there are no hpop/cpop at the jump destination,
so we should include the block context here for cleanup.
*/
bool exclusive= (lab->type == SP_LAB_BEGIN);
n= ctx->diff_handlers(lab->ctx, TRUE); /* Exclusive the dest. */
n= ctx->diff_handlers(lab->ctx, exclusive);
if (n)
sp->add_instr(new sp_instr_hpop(ip++, ctx, n));
n= ctx->diff_cursors(lab->ctx, TRUE); /* Exclusive the dest. */
{
sp_instr_hpop *hpop= new sp_instr_hpop(ip++, ctx, n);
if (hpop == NULL)
MYSQL_YYABORT;
sp->add_instr(hpop);
}
n= ctx->diff_cursors(lab->ctx, exclusive);
if (n)
sp->add_instr(new sp_instr_cpop(ip++, ctx, n));
{
sp_instr_cpop *cpop= new sp_instr_cpop(ip++, ctx, n);
if (cpop == NULL)
MYSQL_YYABORT;
sp->add_instr(cpop);
}
i= new sp_instr_jump(ip, ctx);
if (i == NULL)
MYSQL_YYABORT;
sp->push_backpatch(i, lab); /* Jumping forward */
sp->add_instr(i);
}
@ -2680,10 +2707,20 @@ sp_proc_stmt_iterate:
n= ctx->diff_handlers(lab->ctx, FALSE); /* Inclusive the dest. */
if (n)
sp->add_instr(new sp_instr_hpop(ip++, ctx, n));
{
sp_instr_hpop *hpop= new sp_instr_hpop(ip++, ctx, n);
if (hpop == NULL)
MYSQL_YYABORT;
sp->add_instr(hpop);
}
n= ctx->diff_cursors(lab->ctx, FALSE); /* Inclusive the dest. */
if (n)
sp->add_instr(new sp_instr_cpop(ip++, ctx, n));
{
sp_instr_cpop *cpop= new sp_instr_cpop(ip++, ctx, n);
if (cpop == NULL)
MYSQL_YYABORT;
sp->add_instr(cpop);
}
i= new sp_instr_jump(ip, ctx, lab->ip); /* Jump back */
sp->add_instr(i);
}
@ -2967,19 +3004,17 @@ sp_labeled_control:
sp_unlabeled_control sp_opt_label
{
LEX *lex= Lex;
sp_label_t *lab= lex->spcont->pop_label();
if ($5.str)
{
sp_label_t *lab= lex->spcont->find_label($5.str);
if (!lab ||
my_strcasecmp(system_charset_info, $5.str, lab->name) != 0)
if (my_strcasecmp(system_charset_info, $5.str, lab->name) != 0)
{
my_error(ER_SP_LABEL_MISMATCH, MYF(0), $5.str);
MYSQL_YYABORT;
}
}
lex->sphead->backpatch(lex->spcont->pop_label());
lex->sphead->backpatch(lab);
}
;
@ -2988,15 +3023,59 @@ sp_opt_label:
| label_ident { $$= $1; }
;
sp_unlabeled_control:
sp_labeled_block:
label_ident ':'
{
LEX *lex= Lex;
sp_pcontext *ctx= lex->spcont;
sp_label_t *lab= ctx->find_label($1.str);
if (lab)
{
my_error(ER_SP_LABEL_REDEFINE, MYF(0), $1.str);
MYSQL_YYABORT;
}
lab= lex->spcont->push_label($1.str,
lex->sphead->instructions());
lab->type= SP_LAB_BEGIN;
}
sp_block_content sp_opt_label
{
LEX *lex= Lex;
sp_label_t *lab= lex->spcont->pop_label();
if ($5.str)
{
if (my_strcasecmp(system_charset_info, $5.str, lab->name) != 0)
{
my_error(ER_SP_LABEL_MISMATCH, MYF(0), $5.str);
MYSQL_YYABORT;
}
}
}
;
sp_unlabeled_block:
{ /* Unlabeled blocks get a secret label. */
LEX *lex= Lex;
uint ip= lex->sphead->instructions();
sp_label_t *lab= lex->spcont->push_label((char *)"", ip);
lab->type= SP_LAB_BEGIN;
}
sp_block_content
{
LEX *lex= Lex;
lex->spcont->pop_label();
}
;
sp_block_content:
BEGIN_SYM
{ /* QQ This is just a dummy for grouping declarations and statements
together. No [[NOT] ATOMIC] yet, and we need to figure out how
make it coexist with the existing BEGIN COMMIT/ROLLBACK. */
LEX *lex= Lex;
sp_label_t *lab= lex->spcont->last_label();
lab->type= SP_LAB_BEGIN;
lex->spcont= lex->spcont->push_context(LABEL_DEFAULT_SCOPE);
}
sp_decls
@ -3016,7 +3095,10 @@ sp_unlabeled_control:
$3.curs));
lex->spcont= ctx->pop_context();
}
| LOOP_SYM
;
sp_unlabeled_control:
LOOP_SYM
sp_proc_stmts1 END LOOP_SYM
{
LEX *lex= Lex;
@ -7452,10 +7534,22 @@ join_table_list:
derived_table_list { MYSQL_YYABORT_UNLESS($$=$1); }
;
/*
The ODBC escape syntax for Outer Join is: '{' OJ join_table '}'
The parser does not define OJ as a token, any ident is accepted
instead in $2 (ident). Also, all productions from table_ref can
be escaped, not only join_table. Both syntax extensions are safe
and are ignored.
*/
esc_table_ref:
table_ref { $$=$1; }
| '{' ident table_ref '}' { $$=$3; }
;
/* Warning - may return NULL in case of incomplete SELECT */
derived_table_list:
table_ref { $$=$1; }
| derived_table_list ',' table_ref
esc_table_ref { $$=$1; }
| derived_table_list ',' esc_table_ref
{
MYSQL_YYABORT_UNLESS($1 && ($$=$3));
}
@ -7620,25 +7714,6 @@ table_factor:
MYSQL_YYABORT;
Select->add_joined_table($$);
}
| '{' ident table_ref LEFT OUTER JOIN_SYM table_ref
ON
{
/* Change the current name resolution context to a local context. */
if (push_new_name_resolution_context(YYTHD, $3, $7))
MYSQL_YYABORT;
}
expr '}'
{
LEX *lex= Lex;
MYSQL_YYABORT_UNLESS($3 && $7);
add_join_on($7,$10);
Lex->pop_context();
$7->outer_join|=JOIN_TYPE_LEFT;
$$=$7;
if (!($$= lex->current_select->nest_last_join(lex->thd)))
MYSQL_YYABORT;
}
| select_derived_init get_select_lex select_derived2
{
LEX *lex= Lex;

View file

@ -401,7 +401,7 @@ int rea_create_table(THD *thd, const char *path,
DBUG_ASSERT(*fn_rext(frm_name));
if (thd->variables.keep_files_on_create)
create_info->options|= HA_CREATE_KEEP_FILES;
if (file->create_handler_files(path, NULL, CHF_CREATE_FLAG, create_info))
if (file->ha_create_handler_files(path, NULL, CHF_CREATE_FLAG, create_info))
goto err_handler;
if (!create_info->frm_only && ha_create_table(thd, path, db, table_name,
create_info,0))
@ -409,7 +409,7 @@ int rea_create_table(THD *thd, const char *path,
DBUG_RETURN(0);
err_handler:
VOID(file->create_handler_files(path, NULL, CHF_DELETE_FLAG, create_info));
VOID(file->ha_create_handler_files(path, NULL, CHF_DELETE_FLAG, create_info));
my_delete(frm_name, MYF(0));
DBUG_RETURN(1);
} /* rea_create_table */