mirror of
https://github.com/MariaDB/server.git
synced 2025-01-17 04:22:27 +01:00
Automatic merge
This commit is contained in:
commit
c752596183
33 changed files with 2541 additions and 60 deletions
|
@ -1969,3 +1969,4 @@ plugin/handler_socket/client/hsclient
|
|||
client/strings_def.h
|
||||
libmysql/strings_def.h
|
||||
libmysql_r/strings_def.h
|
||||
scripts/mytop
|
||||
|
|
|
@ -18,7 +18,7 @@ dnl When merging new MySQL releases, update the version number to match the
|
|||
dnl MySQL version number.
|
||||
dnl
|
||||
dnl Note: the following line must be parseable by win/configure.js:GetVersion()
|
||||
AC_INIT([MariaDB Server], [5.3.0-MariaDB-alpha], [], [mysql])
|
||||
AC_INIT([MariaDB Server], [5.3.0-MariaDB-beta], [], [mysql])
|
||||
|
||||
AC_CONFIG_SRCDIR([sql/mysqld.cc])
|
||||
AC_CANONICAL_SYSTEM
|
||||
|
|
|
@ -156,25 +156,33 @@ Variable_name Value
|
|||
Com_show_status 3
|
||||
show status like 'hand%write%';
|
||||
Variable_name Value
|
||||
Handler_tmp_write 0
|
||||
Handler_write 0
|
||||
show status like '%tmp%';
|
||||
Variable_name Value
|
||||
Created_tmp_disk_tables 0
|
||||
Created_tmp_files 0
|
||||
Created_tmp_tables 0
|
||||
Handler_tmp_update 0
|
||||
Handler_tmp_write 0
|
||||
Rows_tmp_read 5
|
||||
show status like 'hand%write%';
|
||||
Variable_name Value
|
||||
Handler_tmp_write 0
|
||||
Handler_write 0
|
||||
show status like '%tmp%';
|
||||
Variable_name Value
|
||||
Created_tmp_disk_tables 0
|
||||
Created_tmp_files 0
|
||||
Created_tmp_tables 0
|
||||
Handler_tmp_update 0
|
||||
Handler_tmp_write 0
|
||||
Rows_tmp_read 13
|
||||
show status like 'com_show_status';
|
||||
Variable_name Value
|
||||
Com_show_status 8
|
||||
rnd_diff tmp_table_diff
|
||||
20 8
|
||||
28 8
|
||||
flush status;
|
||||
show status like 'Com%function';
|
||||
Variable_name Value
|
||||
|
@ -238,5 +246,57 @@ SELECT 9;
|
|||
9
|
||||
DROP PROCEDURE p1;
|
||||
DROP FUNCTION f1;
|
||||
flush status;
|
||||
create table t1 (a int not null auto_increment primary key, g int, b blob);
|
||||
insert into t1 (g,b) values (1,'a'), (2, 'b'), (3, 'b'), (1, 'c');
|
||||
select * from t1;
|
||||
a g b
|
||||
1 1 a
|
||||
2 2 b
|
||||
3 3 b
|
||||
4 1 c
|
||||
select b, count(*) from t1 group by b;
|
||||
b count(*)
|
||||
a 1
|
||||
b 2
|
||||
c 1
|
||||
select g, count(*) from t1 group by g;
|
||||
g count(*)
|
||||
1 2
|
||||
2 1
|
||||
3 1
|
||||
show status like 'Row%';
|
||||
Variable_name Value
|
||||
Rows_read 12
|
||||
Rows_sent 10
|
||||
Rows_tmp_read 14
|
||||
show status like 'Handler%';
|
||||
Variable_name Value
|
||||
Handler_commit 0
|
||||
Handler_delete 0
|
||||
Handler_discover 0
|
||||
Handler_prepare 0
|
||||
Handler_read_first 0
|
||||
Handler_read_key 4
|
||||
Handler_read_next 0
|
||||
Handler_read_prev 0
|
||||
Handler_read_rnd 7
|
||||
Handler_read_rnd_next 23
|
||||
Handler_rollback 0
|
||||
Handler_savepoint 0
|
||||
Handler_savepoint_rollback 0
|
||||
Handler_tmp_update 2
|
||||
Handler_tmp_write 7
|
||||
Handler_update 0
|
||||
Handler_write 4
|
||||
show status like '%tmp%';
|
||||
Variable_name Value
|
||||
Created_tmp_disk_tables 1
|
||||
Created_tmp_files 0
|
||||
Created_tmp_tables 2
|
||||
Handler_tmp_update 2
|
||||
Handler_tmp_write 7
|
||||
Rows_tmp_read 34
|
||||
drop table t1;
|
||||
set @@global.concurrent_insert= @old_concurrent_insert;
|
||||
SET GLOBAL log_output = @old_log_output;
|
||||
|
|
|
@ -94,6 +94,7 @@ show status like "rows%";
|
|||
Variable_name Value
|
||||
Rows_read 6
|
||||
Rows_sent 1
|
||||
Rows_tmp_read 0
|
||||
show status like "ha%";
|
||||
Variable_name Value
|
||||
Handler_commit 19
|
||||
|
@ -109,6 +110,8 @@ Handler_read_rnd_next 5
|
|||
Handler_rollback 2
|
||||
Handler_savepoint 0
|
||||
Handler_savepoint_rollback 0
|
||||
Handler_tmp_update 0
|
||||
Handler_tmp_write 0
|
||||
Handler_update 5
|
||||
Handler_write 7
|
||||
select variable_value - @global_read_key as "handler_read_key" from information_schema.global_status where variable_name="handler_read_key";
|
||||
|
@ -133,7 +136,7 @@ CONCURRENT_CONNECTIONS 0
|
|||
ROWS_READ 6
|
||||
ROWS_SENT 2
|
||||
ROWS_DELETED 1
|
||||
ROWS_INSERTED 8
|
||||
ROWS_INSERTED 7
|
||||
ROWS_UPDATED 5
|
||||
SELECT_COMMANDS 3
|
||||
UPDATE_COMMANDS 11
|
||||
|
@ -150,7 +153,7 @@ CONCURRENT_CONNECTIONS 0
|
|||
ROWS_READ 6
|
||||
ROWS_SENT 2
|
||||
ROWS_DELETED 1
|
||||
ROWS_INSERTED 8
|
||||
ROWS_INSERTED 7
|
||||
ROWS_UPDATED 5
|
||||
SELECT_COMMANDS 3
|
||||
UPDATE_COMMANDS 11
|
||||
|
|
|
@ -22,6 +22,17 @@ CHECK TABLE t1;
|
|||
Table Op Msg_type Msg_text
|
||||
test.t1 check status OK
|
||||
drop table t1;
|
||||
create table t1 (a int primary key auto_increment) engine=aria;
|
||||
insert into t1 values (1);
|
||||
update t1 set a=0 where a=1;
|
||||
check table t1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 check warning Found row where the auto_increment column has the value 0
|
||||
test.t1 check status OK
|
||||
select * from t1;
|
||||
a
|
||||
0
|
||||
drop table t1;
|
||||
create table t1 (a tinyint not null auto_increment, b blob not null, primary key (a));
|
||||
check table t1;
|
||||
Table Op Msg_type Msg_text
|
||||
|
|
|
@ -41,6 +41,16 @@ INSERT INTO t1 VALUES ('WWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWW
|
|||
CHECK TABLE t1;
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# Test auto_increment warning
|
||||
#
|
||||
create table t1 (a int primary key auto_increment) engine=aria;
|
||||
insert into t1 values (1);
|
||||
update t1 set a=0 where a=1;
|
||||
check table t1;
|
||||
select * from t1;
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# Test problem with rows that are 65517-65520 bytes long
|
||||
#
|
||||
|
|
|
@ -353,6 +353,23 @@ DROP FUNCTION f1;
|
|||
|
||||
# End of 5.1 tests
|
||||
|
||||
#
|
||||
# Test of internal temporary table status variables
|
||||
#
|
||||
|
||||
flush status;
|
||||
create table t1 (a int not null auto_increment primary key, g int, b blob);
|
||||
insert into t1 (g,b) values (1,'a'), (2, 'b'), (3, 'b'), (1, 'c');
|
||||
select * from t1;
|
||||
select b, count(*) from t1 group by b;
|
||||
select g, count(*) from t1 group by g;
|
||||
show status like 'Row%';
|
||||
show status like 'Handler%';
|
||||
show status like '%tmp%';
|
||||
drop table t1;
|
||||
|
||||
# End of 5.3 tests
|
||||
|
||||
# Restore global concurrent_insert value. Keep in the end of the test file.
|
||||
--connection default
|
||||
set @@global.concurrent_insert= @old_concurrent_insert;
|
||||
|
|
|
@ -80,7 +80,10 @@ CONFIGURE_FILE(mysqldumpslow.sh
|
|||
CONFIGURE_FILE(mysqlhotcopy.sh
|
||||
${CMAKE_BINARY_DIR}/scripts/mysqlhotcopy.pl ESCAPE_QUOTES @ONLY)
|
||||
|
||||
FOREACH(f mysqldumpslow.pl mysqlhotcopy.pl mysql_config.pl
|
||||
CONFIGURE_FILE(mytop.sh
|
||||
${CMAKE_BINARY_DIR}/scripts/mytop.pl ESCAPE_QUOTES @ONLY)
|
||||
|
||||
FOREACH(f mysqldumpslow.pl mysqlhotcopy.pl mytop.pl mysql_config.pl
|
||||
mysql_convert_table_format.pl mysql_install_db.pl
|
||||
mysql_secure_installation.pl mysqld_multi.pl)
|
||||
INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/${f}
|
||||
|
@ -90,4 +93,4 @@ ENDFOREACH()
|
|||
INSTALL(FILES fill_help_tables.sql mysql_system_tables.sql
|
||||
mysql_system_tables_data.sql mysql_system_tables_fix.sql mysql_test_data_timezone.sql
|
||||
DESTINATION share COMPONENT Server)
|
||||
INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/mysql_fix_privilege_tables.sql DESTINATION share COMPONENT Server)
|
||||
INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/mysql_fix_privilege_tables.sql DESTINATION share COMPONENT Server)
|
||||
|
|
|
@ -33,6 +33,7 @@ bin_SCRIPTS = @server_scripts@ \
|
|||
mysql_convert_table_format \
|
||||
mysql_find_rows \
|
||||
mysqlhotcopy \
|
||||
mytop \
|
||||
mysqldumpslow \
|
||||
mysqld_multi
|
||||
|
||||
|
@ -58,6 +59,7 @@ EXTRA_SCRIPTS = make_binary_distribution.sh \
|
|||
mysql_convert_table_format.sh \
|
||||
mysql_find_rows.sh \
|
||||
mysqlhotcopy.sh \
|
||||
mytop.sh \
|
||||
mysqldumpslow.sh \
|
||||
mysqld_multi.sh \
|
||||
mysqld_safe.sh \
|
||||
|
@ -91,6 +93,7 @@ CLEANFILES = @server_scripts@ \
|
|||
mysql_convert_table_format \
|
||||
mysql_find_rows \
|
||||
mysqlhotcopy \
|
||||
mytop \
|
||||
mysqldumpslow \
|
||||
mysqld_multi \
|
||||
convert-debug-for-diff \
|
||||
|
|
|
@ -459,6 +459,7 @@ copyfileto $BASE/bin $BIN_FILES
|
|||
|
||||
$CP netware/*.pl $BASE/scripts
|
||||
$CP scripts/mysqlhotcopy $BASE/scripts/mysqlhotcopy.pl
|
||||
$CP scripts/mytop $BASE/scripts/mytop.pl
|
||||
|
||||
copyfileto $BASE/lib \
|
||||
libmysql/.libs/libmysqlclient.a \
|
||||
|
|
|
@ -373,6 +373,7 @@ mysql_secure_installation.pl \
|
|||
mysqld_multi.pl \
|
||||
mysqldumpslow.pl \
|
||||
mysqlhotcopy.pl \
|
||||
mytop.pl \
|
||||
"
|
||||
|
||||
mkdir -p $DESTDIR/scripts
|
||||
|
|
2340
scripts/mytop.sh
Executable file
2340
scripts/mytop.sh
Executable file
File diff suppressed because it is too large
Load diff
|
@ -1301,7 +1301,7 @@ int ha_partition::prepare_new_partition(TABLE *tbl,
|
|||
|
||||
DBUG_RETURN(0);
|
||||
error_external_lock:
|
||||
VOID(file->close());
|
||||
VOID(file->ha_close());
|
||||
error_open:
|
||||
VOID(file->ha_delete_table(part_name));
|
||||
error_create:
|
||||
|
@ -1347,7 +1347,7 @@ void ha_partition::cleanup_new_partition(uint part_count)
|
|||
while ((part_count > 0) && (*file))
|
||||
{
|
||||
(*file)->ha_external_lock(thd, F_UNLCK);
|
||||
(*file)->close();
|
||||
(*file)->ha_close();
|
||||
|
||||
/* Leave the (*file)->ha_delete_table(part_name) to the ddl-log */
|
||||
|
||||
|
@ -2842,7 +2842,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
|
|||
err_handler:
|
||||
DEBUG_SYNC(ha_thd(), "partition_open_error");
|
||||
while (file-- != m_file)
|
||||
(*file)->close();
|
||||
(*file)->ha_close();
|
||||
err_alloc:
|
||||
bitmap_free(&m_bulk_insert_started);
|
||||
if (!m_is_clone_of)
|
||||
|
@ -2928,7 +2928,7 @@ int ha_partition::close(void)
|
|||
repeat:
|
||||
do
|
||||
{
|
||||
(*file)->close();
|
||||
(*file)->ha_close();
|
||||
} while (*(++file));
|
||||
|
||||
if (first && m_added_file && m_added_file[0])
|
||||
|
|
|
@ -2168,7 +2168,7 @@ THD *handler::ha_thd(void) const
|
|||
Don't wait for locks if not HA_OPEN_WAIT_IF_LOCKED is set
|
||||
*/
|
||||
int handler::ha_open(TABLE *table_arg, const char *name, int mode,
|
||||
int test_if_locked)
|
||||
uint test_if_locked)
|
||||
{
|
||||
int error;
|
||||
DBUG_ENTER("handler::ha_open");
|
||||
|
@ -2212,11 +2212,22 @@ int handler::ha_open(TABLE *table_arg, const char *name, int mode,
|
|||
dup_ref=ref+ALIGN_SIZE(ref_length);
|
||||
cached_table_flags= table_flags();
|
||||
}
|
||||
rows_read= rows_changed= 0;
|
||||
memset(index_rows_read, 0, sizeof(index_rows_read));
|
||||
reset_statistics();
|
||||
internal_tmp_table= test(test_if_locked & HA_OPEN_INTERNAL_TABLE);
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
|
||||
int handler::ha_close()
|
||||
{
|
||||
DBUG_ENTER("ha_close");
|
||||
/*
|
||||
Increment global statistics for temporary tables.
|
||||
In_use is 0 for tables that was closed from the table cache.
|
||||
*/
|
||||
if (table->in_use)
|
||||
status_var_add(table->in_use->status_var.rows_tmp_read, rows_tmp_read);
|
||||
DBUG_RETURN(close());
|
||||
}
|
||||
|
||||
/* Initialize handler for random reading, with error handling */
|
||||
|
||||
|
@ -3238,7 +3249,7 @@ int handler::rename_table(const char * from, const char * to)
|
|||
|
||||
void handler::drop_table(const char *name)
|
||||
{
|
||||
close();
|
||||
ha_close();
|
||||
delete_table(name);
|
||||
}
|
||||
|
||||
|
@ -3757,6 +3768,7 @@ void handler::update_global_table_stats()
|
|||
TABLE_STATS * table_stats;
|
||||
|
||||
status_var_add(table->in_use->status_var.rows_read, rows_read);
|
||||
DBUG_ASSERT(rows_tmp_read == 0);
|
||||
|
||||
if (!table->in_use->userstat_running)
|
||||
{
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
/* Copyright 2000-2008 MySQL AB, 2008 Sun Microsystems, Inc.
|
||||
Copyright 2009-2011 Monty Program Ab
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
|
@ -1599,6 +1600,7 @@ public:
|
|||
KEY_PART_INFO *range_key_part;
|
||||
int key_compare_result_on_equal;
|
||||
bool eq_range;
|
||||
bool internal_tmp_table; /* If internal tmp table */
|
||||
|
||||
/*
|
||||
TRUE <=> the engine guarantees that returned records are within the range
|
||||
|
@ -1643,6 +1645,7 @@ public:
|
|||
*/
|
||||
/* Statistics variables */
|
||||
ulonglong rows_read;
|
||||
ulonglong rows_tmp_read;
|
||||
ulonglong rows_changed;
|
||||
/* One bigger than needed to avoid to test if key == MAX_KEY */
|
||||
ulonglong index_rows_read[MAX_KEY+1];
|
||||
|
@ -1685,7 +1688,7 @@ public:
|
|||
}
|
||||
/* ha_ methods: pubilc wrappers for private virtual API */
|
||||
|
||||
int ha_open(TABLE *table, const char *name, int mode, int test_if_locked);
|
||||
int ha_open(TABLE *table, const char *name, int mode, uint test_if_locked);
|
||||
int ha_index_init(uint idx, bool sorted)
|
||||
{
|
||||
int result;
|
||||
|
@ -1809,7 +1812,7 @@ public:
|
|||
uint get_dup_key(int error);
|
||||
void reset_statistics()
|
||||
{
|
||||
rows_read= rows_changed= 0;
|
||||
rows_read= rows_changed= rows_tmp_read= 0;
|
||||
bzero(index_rows_read, sizeof(index_rows_read));
|
||||
}
|
||||
virtual void change_table_ptr(TABLE *table_arg, TABLE_SHARE *share)
|
||||
|
@ -1894,7 +1897,7 @@ public:
|
|||
*/
|
||||
uint get_index(void) const
|
||||
{ return inited == INDEX ? active_index : MAX_KEY; }
|
||||
virtual int close(void)=0;
|
||||
int ha_close(void);
|
||||
|
||||
/**
|
||||
@retval 0 Bulk update used by handler
|
||||
|
@ -1970,10 +1973,18 @@ protected:
|
|||
virtual int index_last(uchar * buf)
|
||||
{ return HA_ERR_WRONG_COMMAND; }
|
||||
virtual int index_next_same(uchar *buf, const uchar *key, uint keylen);
|
||||
virtual int close(void)=0;
|
||||
inline void update_rows_read()
|
||||
{
|
||||
if (likely(!internal_tmp_table))
|
||||
rows_read++;
|
||||
else
|
||||
rows_tmp_read++;
|
||||
}
|
||||
inline void update_index_statistics()
|
||||
{
|
||||
index_rows_read[active_index]++;
|
||||
rows_read++;
|
||||
update_rows_read();
|
||||
}
|
||||
public:
|
||||
|
||||
|
@ -2604,6 +2615,7 @@ public:
|
|||
virtual handlerton *partition_ht() const
|
||||
{ return ht; }
|
||||
inline int ha_write_tmp_row(uchar *buf);
|
||||
inline int ha_update_tmp_row(const uchar * old_data, uchar * new_data);
|
||||
};
|
||||
|
||||
#include "multi_range_read.h"
|
||||
|
|
|
@ -2587,7 +2587,7 @@ bool Item_sum_count_distinct::add()
|
|||
*/
|
||||
return tree->unique_add(table->record[0] + table->s->null_bytes);
|
||||
}
|
||||
if ((error= table->file->ha_write_row(table->record[0])) &&
|
||||
if ((error= table->file->ha_write_tmp_row(table->record[0])) &&
|
||||
table->file->is_fatal_error(error, HA_CHECK_DUP))
|
||||
return TRUE;
|
||||
return FALSE;
|
||||
|
|
|
@ -1075,7 +1075,7 @@ void DsMrr_impl::close_second_handler()
|
|||
{
|
||||
secondary_file->ha_index_or_rnd_end();
|
||||
secondary_file->ha_external_lock(current_thd, F_UNLCK);
|
||||
secondary_file->close();
|
||||
secondary_file->ha_close();
|
||||
delete secondary_file;
|
||||
secondary_file= NULL;
|
||||
}
|
||||
|
|
|
@ -3274,6 +3274,7 @@ const char *load_default_groups[]= {
|
|||
#endif
|
||||
"mysqld", "server", MYSQL_BASE_VERSION,
|
||||
"mariadb", MARIADB_BASE_VERSION,
|
||||
"client-server",
|
||||
0, 0};
|
||||
|
||||
#if defined(__WIN__) && !defined(EMBEDDED_LIBRARY)
|
||||
|
@ -8265,6 +8266,8 @@ SHOW_VAR status_vars[]= {
|
|||
{"Handler_savepoint_rollback",(char*) offsetof(STATUS_VAR, ha_savepoint_rollback_count), SHOW_LONG_STATUS},
|
||||
{"Handler_update", (char*) offsetof(STATUS_VAR, ha_update_count), SHOW_LONG_STATUS},
|
||||
{"Handler_write", (char*) offsetof(STATUS_VAR, ha_write_count), SHOW_LONG_STATUS},
|
||||
{"Handler_tmp_update", (char*) offsetof(STATUS_VAR, ha_tmp_update_count), SHOW_LONG_STATUS},
|
||||
{"Handler_tmp_write", (char*) offsetof(STATUS_VAR, ha_tmp_write_count), SHOW_LONG_STATUS},
|
||||
{"Key", (char*) &show_default_keycache, SHOW_FUNC},
|
||||
{"Last_query_cost", (char*) offsetof(STATUS_VAR, last_query_cost), SHOW_DOUBLE_STATUS},
|
||||
{"Max_used_connections", (char*) &max_used_connections, SHOW_LONG},
|
||||
|
@ -8279,6 +8282,7 @@ SHOW_VAR status_vars[]= {
|
|||
{"Prepared_stmt_count", (char*) &show_prepared_stmt_count, SHOW_FUNC},
|
||||
{"Rows_sent", (char*) offsetof(STATUS_VAR, rows_sent), SHOW_LONGLONG_STATUS},
|
||||
{"Rows_read", (char*) offsetof(STATUS_VAR, rows_read), SHOW_LONGLONG_STATUS},
|
||||
{"Rows_tmp_read", (char*) offsetof(STATUS_VAR, rows_tmp_read), SHOW_LONGLONG_STATUS},
|
||||
#ifdef HAVE_QUERY_CACHE
|
||||
{"Qcache_free_blocks", (char*) &query_cache.free_memory_blocks, SHOW_LONG_NOFLUSH},
|
||||
{"Qcache_free_memory", (char*) &query_cache.free_memory, SHOW_LONG_NOFLUSH},
|
||||
|
|
|
@ -1804,7 +1804,7 @@ QUICK_RANGE_SELECT::~QUICK_RANGE_SELECT()
|
|||
DBUG_PRINT("info", ("Freeing separate handler 0x%lx (free: %d)", (long) file,
|
||||
free_file));
|
||||
file->ha_external_lock(current_thd, F_UNLCK);
|
||||
file->close();
|
||||
file->ha_close();
|
||||
delete file;
|
||||
}
|
||||
}
|
||||
|
@ -1999,7 +1999,7 @@ int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler)
|
|||
if (init() || reset())
|
||||
{
|
||||
file->ha_external_lock(thd, F_UNLCK);
|
||||
file->close();
|
||||
file->ha_close();
|
||||
goto failure;
|
||||
}
|
||||
free_file= TRUE;
|
||||
|
|
|
@ -675,7 +675,7 @@ void close_handle_and_leave_table_as_lock(TABLE *table)
|
|||
*/
|
||||
if (table->child_l || table->parent)
|
||||
detach_merge_children(table, FALSE);
|
||||
table->file->close();
|
||||
table->file->ha_close();
|
||||
table->db_stat= 0; // Mark file closed
|
||||
release_table_share(table->s, RELEASE_NORMAL);
|
||||
table->s= share;
|
||||
|
@ -3708,7 +3708,7 @@ TABLE *drop_locked_tables(THD *thd,const char *db, const char *table_name)
|
|||
if (table->db_stat)
|
||||
{
|
||||
table->db_stat= 0;
|
||||
table->file->close();
|
||||
table->file->ha_close();
|
||||
}
|
||||
}
|
||||
else
|
||||
|
|
|
@ -1201,6 +1201,7 @@ void add_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var)
|
|||
to_var->bytes_sent+= from_var->bytes_sent;
|
||||
to_var->rows_read+= from_var->rows_read;
|
||||
to_var->rows_sent+= from_var->rows_sent;
|
||||
to_var->rows_tmp_read+= from_var->rows_tmp_read;
|
||||
to_var->binlog_bytes_written+= from_var->binlog_bytes_written;
|
||||
to_var->cpu_time+= from_var->cpu_time;
|
||||
to_var->busy_time+= from_var->busy_time;
|
||||
|
@ -1236,6 +1237,7 @@ void add_diff_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var,
|
|||
to_var->bytes_sent+= from_var->bytes_sent - dec_var->bytes_sent;
|
||||
to_var->rows_read+= from_var->rows_read - dec_var->rows_read;
|
||||
to_var->rows_sent+= from_var->rows_sent - dec_var->rows_sent;
|
||||
to_var->rows_tmp_read+= from_var->rows_tmp_read - dec_var->rows_tmp_read;
|
||||
to_var->binlog_bytes_written+= from_var->binlog_bytes_written -
|
||||
dec_var->binlog_bytes_written;
|
||||
to_var->cpu_time+= from_var->cpu_time - dec_var->cpu_time;
|
||||
|
|
|
@ -530,6 +530,9 @@ typedef struct system_status_var
|
|||
ulong ha_rollback_count;
|
||||
ulong ha_update_count;
|
||||
ulong ha_write_count;
|
||||
/* The following are for internal temporary tables */
|
||||
ulong ha_tmp_update_count;
|
||||
ulong ha_tmp_write_count;
|
||||
ulong ha_prepare_count;
|
||||
ulong ha_discover_count;
|
||||
ulong ha_savepoint_count;
|
||||
|
@ -582,6 +585,7 @@ typedef struct system_status_var
|
|||
ulonglong bytes_sent;
|
||||
ulonglong rows_read;
|
||||
ulonglong rows_sent;
|
||||
ulonglong rows_tmp_read;
|
||||
ulonglong binlog_bytes_written;
|
||||
double last_query_cost;
|
||||
double cpu_time, busy_time;
|
||||
|
@ -3610,7 +3614,7 @@ inline int handler::ha_index_read_idx_map(uchar * buf, uint index,
|
|||
int error= index_read_idx_map(buf, index, key, keypart_map, find_flag);
|
||||
if (!error)
|
||||
{
|
||||
rows_read++;
|
||||
update_rows_read();
|
||||
index_rows_read[index]++;
|
||||
}
|
||||
table->status=error ? STATUS_NOT_FOUND: 0;
|
||||
|
@ -3677,7 +3681,8 @@ inline int handler::ha_ft_read(uchar *buf)
|
|||
{
|
||||
int error= ft_read(buf);
|
||||
if (!error)
|
||||
rows_read++;
|
||||
update_rows_read();
|
||||
|
||||
table->status=error ? STATUS_NOT_FOUND: 0;
|
||||
return error;
|
||||
}
|
||||
|
@ -3687,7 +3692,7 @@ inline int handler::ha_rnd_next(uchar *buf)
|
|||
increment_statistics(&SSV::ha_read_rnd_next_count);
|
||||
int error= rnd_next(buf);
|
||||
if (!error)
|
||||
rows_read++;
|
||||
update_rows_read();
|
||||
table->status=error ? STATUS_NOT_FOUND: 0;
|
||||
return error;
|
||||
}
|
||||
|
@ -3697,7 +3702,7 @@ inline int handler::ha_rnd_pos(uchar *buf, uchar *pos)
|
|||
increment_statistics(&SSV::ha_read_rnd_count);
|
||||
int error= rnd_pos(buf, pos);
|
||||
if (!error)
|
||||
rows_read++;
|
||||
update_rows_read();
|
||||
table->status=error ? STATUS_NOT_FOUND: 0;
|
||||
return error;
|
||||
}
|
||||
|
@ -3706,7 +3711,7 @@ inline int handler::ha_rnd_pos_by_record(uchar *buf)
|
|||
{
|
||||
int error= rnd_pos_by_record(buf);
|
||||
if (!error)
|
||||
rows_read++;
|
||||
update_rows_read();
|
||||
table->status=error ? STATUS_NOT_FOUND: 0;
|
||||
return error;
|
||||
}
|
||||
|
@ -3715,15 +3720,21 @@ inline int handler::ha_read_first_row(uchar *buf, uint primary_key)
|
|||
{
|
||||
int error= read_first_row(buf, primary_key);
|
||||
if (!error)
|
||||
rows_read++;
|
||||
update_rows_read();
|
||||
table->status=error ? STATUS_NOT_FOUND: 0;
|
||||
return error;
|
||||
}
|
||||
|
||||
inline int handler::ha_write_tmp_row(uchar *buf)
|
||||
{
|
||||
increment_statistics(&SSV::ha_write_count);
|
||||
increment_statistics(&SSV::ha_tmp_write_count);
|
||||
return write_row(buf);
|
||||
}
|
||||
|
||||
inline int handler::ha_update_tmp_row(const uchar *old_data, uchar *new_data)
|
||||
{
|
||||
increment_statistics(&SSV::ha_tmp_update_count);
|
||||
return update_row(old_data, new_data);
|
||||
}
|
||||
|
||||
#endif /* MYSQL_SERVER */
|
||||
|
|
|
@ -690,6 +690,7 @@ static void update_global_user_stats_with_user(THD *thd,
|
|||
user_stats->binlog_bytes_written+=
|
||||
(thd->status_var.binlog_bytes_written -
|
||||
thd->org_status_var.binlog_bytes_written);
|
||||
/* We are not counting rows in internal temporary tables here ! */
|
||||
user_stats->rows_read+= (thd->status_var.rows_read -
|
||||
thd->org_status_var.rows_read);
|
||||
user_stats->rows_sent+= (thd->status_var.rows_sent -
|
||||
|
|
|
@ -249,7 +249,7 @@ my_bool Expression_cache_tmptable::put_value(Item *value)
|
|||
if (table_thd->is_error())
|
||||
goto err;;
|
||||
|
||||
if ((error= cache_table->file->ha_write_row(cache_table->record[0])))
|
||||
if ((error= cache_table->file->ha_write_tmp_row(cache_table->record[0])))
|
||||
{
|
||||
/* create_myisam_from_heap will generate error if needed */
|
||||
if (cache_table->file->is_fatal_error(error, HA_CHECK_DUP) &&
|
||||
|
|
|
@ -14325,7 +14325,7 @@ create_internal_tmp_table_from_heap2(THD *thd, TABLE *table,
|
|||
|
||||
/* remove heap table and change to use myisam table */
|
||||
(void) table->file->ha_rnd_end();
|
||||
(void) table->file->close(); // This deletes the table !
|
||||
(void) table->file->ha_close(); // This deletes the table !
|
||||
delete table->file;
|
||||
table->file=0;
|
||||
plugin_unlock(0, table->s->db_plugin);
|
||||
|
@ -14346,7 +14346,7 @@ create_internal_tmp_table_from_heap2(THD *thd, TABLE *table,
|
|||
table->file->print_error(write_err, MYF(0));
|
||||
err_killed:
|
||||
(void) table->file->ha_rnd_end();
|
||||
(void) new_table.file->close();
|
||||
(void) new_table.file->ha_close();
|
||||
err1:
|
||||
new_table.file->ha_delete_table(new_table.s->table_name.str);
|
||||
err2:
|
||||
|
@ -16186,8 +16186,8 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
|
|||
{ /* Update old record */
|
||||
restore_record(table,record[1]);
|
||||
update_tmptable_sum_func(join->sum_funcs,table);
|
||||
if ((error= table->file->ha_update_row(table->record[1],
|
||||
table->record[0])))
|
||||
if ((error= table->file->ha_update_tmp_row(table->record[1],
|
||||
table->record[0])))
|
||||
{
|
||||
table->file->print_error(error,MYF(0)); /* purecov: inspected */
|
||||
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
|
||||
|
@ -16270,8 +16270,8 @@ end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
|
|||
}
|
||||
restore_record(table,record[1]);
|
||||
update_tmptable_sum_func(join->sum_funcs,table);
|
||||
if ((error= table->file->ha_update_row(table->record[1],
|
||||
table->record[0])))
|
||||
if ((error= table->file->ha_update_tmp_row(table->record[1],
|
||||
table->record[0])))
|
||||
{
|
||||
table->file->print_error(error,MYF(0)); /* purecov: inspected */
|
||||
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
|
||||
|
|
|
@ -2814,7 +2814,7 @@ typedef struct st_lookup_field_values
|
|||
bool schema_table_store_record(THD *thd, TABLE *table)
|
||||
{
|
||||
int error;
|
||||
if ((error= table->file->ha_write_row(table->record[0])))
|
||||
if ((error= table->file->ha_write_tmp_row(table->record[0])))
|
||||
{
|
||||
TMP_TABLE_PARAM *param= table->pos_in_table_list->schema_table_param;
|
||||
if (create_internal_tmp_table_from_heap(thd, table, param->start_recinfo,
|
||||
|
|
|
@ -4681,6 +4681,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
|
|||
int result_code;
|
||||
bool need_repair_or_alter= 0;
|
||||
DBUG_ENTER("mysql_admin_table");
|
||||
DBUG_PRINT("enter", ("extra_open_options: %u", extra_open_options));
|
||||
|
||||
if (end_active_trans(thd))
|
||||
DBUG_RETURN(1);
|
||||
|
@ -4705,9 +4706,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
|
|||
bool fatal_error=0;
|
||||
|
||||
DBUG_PRINT("admin", ("table: '%s'.'%s'", table->db, table->table_name));
|
||||
DBUG_PRINT("admin", ("extra_open_options: %u", extra_open_options));
|
||||
strxmov(table_name, db, ".", table->table_name, NullS);
|
||||
thd->open_options|= extra_open_options;
|
||||
table->lock_type= lock_type;
|
||||
/* open only one table from local list of command */
|
||||
{
|
||||
|
@ -4734,12 +4733,13 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
|
|||
lex->sql_command == SQLCOM_ANALYZE ||
|
||||
lex->sql_command == SQLCOM_OPTIMIZE)
|
||||
thd->prepare_derived_at_open= TRUE;
|
||||
thd->open_options|= extra_open_options;
|
||||
open_and_lock_tables(thd, table);
|
||||
thd->open_options&= ~extra_open_options;
|
||||
thd->prepare_derived_at_open= FALSE;
|
||||
thd->no_warnings_for_error= 0;
|
||||
table->next_global= save_next_global;
|
||||
table->next_local= save_next_local;
|
||||
thd->open_options&= ~extra_open_options;
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
if (table->table)
|
||||
{
|
||||
|
@ -4923,7 +4923,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
|
|||
/* We use extra_open_options to be able to open crashed tables */
|
||||
thd->open_options|= extra_open_options;
|
||||
result_code= admin_recreate_table(thd, table);
|
||||
thd->open_options= ~extra_open_options;
|
||||
thd->open_options&= ~extra_open_options;
|
||||
goto send_result;
|
||||
}
|
||||
if (check_old_types || check_for_upgrade)
|
||||
|
|
|
@ -60,7 +60,7 @@ int select_union::send_data(List<Item> &values)
|
|||
if (thd->is_error())
|
||||
return 1;
|
||||
|
||||
if ((write_err= table->file->ha_write_row(table->record[0])))
|
||||
if ((write_err= table->file->ha_write_tmp_row(table->record[0])))
|
||||
{
|
||||
if (write_err == HA_ERR_FOUND_DUPP_KEY)
|
||||
{
|
||||
|
|
|
@ -1868,7 +1868,7 @@ int multi_update::send_data(List<Item> ¬_used_values)
|
|||
*values_for_table[offset], TRUE, FALSE);
|
||||
|
||||
/* Write row, ignoring duplicated updates to a row */
|
||||
error= tmp_table->file->ha_write_row(tmp_table->record[0]);
|
||||
error= tmp_table->file->ha_write_tmp_row(tmp_table->record[0]);
|
||||
if (error != HA_ERR_FOUND_DUPP_KEY && error != HA_ERR_FOUND_DUPP_UNIQUE)
|
||||
{
|
||||
if (error &&
|
||||
|
|
|
@ -2460,7 +2460,7 @@ int closefrm(register TABLE *table, bool free_share)
|
|||
{
|
||||
if (table->s->deleting)
|
||||
table->file->extra(HA_EXTRA_PREPARE_FOR_DROP);
|
||||
error=table->file->close();
|
||||
error=table->file->ha_close();
|
||||
}
|
||||
table->alias.free();
|
||||
if (table->expr_arena)
|
||||
|
|
|
@ -226,7 +226,6 @@ void ha_heap::update_key_stats()
|
|||
int ha_heap::write_row(uchar * buf)
|
||||
{
|
||||
int res;
|
||||
ha_statistic_increment(&SSV::ha_write_count);
|
||||
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
|
||||
table->timestamp_field->set_time();
|
||||
if (table->next_number_field && buf == table->record[0])
|
||||
|
@ -250,7 +249,6 @@ int ha_heap::write_row(uchar * buf)
|
|||
int ha_heap::update_row(const uchar * old_data, uchar * new_data)
|
||||
{
|
||||
int res;
|
||||
ha_statistic_increment(&SSV::ha_update_count);
|
||||
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
|
||||
table->timestamp_field->set_time();
|
||||
res= heap_update(file,old_data,new_data);
|
||||
|
@ -269,7 +267,6 @@ int ha_heap::update_row(const uchar * old_data, uchar * new_data)
|
|||
int ha_heap::delete_row(const uchar * buf)
|
||||
{
|
||||
int res;
|
||||
ha_statistic_increment(&SSV::ha_delete_count);
|
||||
res= heap_delete(file,buf);
|
||||
if (!res && table->s->tmp_table == NO_TMP_TABLE &&
|
||||
++records_changed*HEAP_STATS_UPDATE_THRESHOLD > file->s->records)
|
||||
|
@ -288,7 +285,6 @@ int ha_heap::index_read_map(uchar *buf, const uchar *key,
|
|||
enum ha_rkey_function find_flag)
|
||||
{
|
||||
DBUG_ASSERT(inited==INDEX);
|
||||
ha_statistic_increment(&SSV::ha_read_key_count);
|
||||
int error = heap_rkey(file,buf,active_index, key, keypart_map, find_flag);
|
||||
table->status = error ? STATUS_NOT_FOUND : 0;
|
||||
return error;
|
||||
|
@ -298,7 +294,6 @@ int ha_heap::index_read_last_map(uchar *buf, const uchar *key,
|
|||
key_part_map keypart_map)
|
||||
{
|
||||
DBUG_ASSERT(inited==INDEX);
|
||||
ha_statistic_increment(&SSV::ha_read_key_count);
|
||||
int error= heap_rkey(file, buf, active_index, key, keypart_map,
|
||||
HA_READ_PREFIX_LAST);
|
||||
table->status= error ? STATUS_NOT_FOUND : 0;
|
||||
|
@ -309,7 +304,6 @@ int ha_heap::index_read_idx_map(uchar *buf, uint index, const uchar *key,
|
|||
key_part_map keypart_map,
|
||||
enum ha_rkey_function find_flag)
|
||||
{
|
||||
ha_statistic_increment(&SSV::ha_read_key_count);
|
||||
int error = heap_rkey(file, buf, index, key, keypart_map, find_flag);
|
||||
table->status = error ? STATUS_NOT_FOUND : 0;
|
||||
return error;
|
||||
|
@ -318,7 +312,6 @@ int ha_heap::index_read_idx_map(uchar *buf, uint index, const uchar *key,
|
|||
int ha_heap::index_next(uchar * buf)
|
||||
{
|
||||
DBUG_ASSERT(inited==INDEX);
|
||||
ha_statistic_increment(&SSV::ha_read_next_count);
|
||||
int error=heap_rnext(file,buf);
|
||||
table->status=error ? STATUS_NOT_FOUND: 0;
|
||||
return error;
|
||||
|
@ -327,7 +320,6 @@ int ha_heap::index_next(uchar * buf)
|
|||
int ha_heap::index_prev(uchar * buf)
|
||||
{
|
||||
DBUG_ASSERT(inited==INDEX);
|
||||
ha_statistic_increment(&SSV::ha_read_prev_count);
|
||||
int error=heap_rprev(file,buf);
|
||||
table->status=error ? STATUS_NOT_FOUND: 0;
|
||||
return error;
|
||||
|
@ -336,7 +328,6 @@ int ha_heap::index_prev(uchar * buf)
|
|||
int ha_heap::index_first(uchar * buf)
|
||||
{
|
||||
DBUG_ASSERT(inited==INDEX);
|
||||
ha_statistic_increment(&SSV::ha_read_first_count);
|
||||
int error=heap_rfirst(file, buf, active_index);
|
||||
table->status=error ? STATUS_NOT_FOUND: 0;
|
||||
return error;
|
||||
|
@ -345,7 +336,6 @@ int ha_heap::index_first(uchar * buf)
|
|||
int ha_heap::index_last(uchar * buf)
|
||||
{
|
||||
DBUG_ASSERT(inited==INDEX);
|
||||
ha_statistic_increment(&SSV::ha_read_last_count);
|
||||
int error=heap_rlast(file, buf, active_index);
|
||||
table->status=error ? STATUS_NOT_FOUND: 0;
|
||||
return error;
|
||||
|
@ -358,7 +348,6 @@ int ha_heap::rnd_init(bool scan)
|
|||
|
||||
int ha_heap::rnd_next(uchar *buf)
|
||||
{
|
||||
ha_statistic_increment(&SSV::ha_read_rnd_next_count);
|
||||
int error=heap_scan(file, buf);
|
||||
table->status=error ? STATUS_NOT_FOUND: 0;
|
||||
return error;
|
||||
|
@ -368,7 +357,6 @@ int ha_heap::rnd_pos(uchar * buf, uchar *pos)
|
|||
{
|
||||
int error;
|
||||
HEAP_PTR heap_position;
|
||||
ha_statistic_increment(&SSV::ha_read_rnd_count);
|
||||
memcpy_fixed((char*) &heap_position, pos, sizeof(HEAP_PTR));
|
||||
error=heap_rrnd(file, buf, heap_position);
|
||||
table->status=error ? STATUS_NOT_FOUND: 0;
|
||||
|
@ -582,7 +570,7 @@ int ha_heap::delete_table(const char *name)
|
|||
void ha_heap::drop_table(const char *name)
|
||||
{
|
||||
file->s->delete_on_close= 1;
|
||||
close();
|
||||
ha_close();
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -2524,7 +2524,7 @@ int ha_maria::delete_table(const char *name)
|
|||
void ha_maria::drop_table(const char *name)
|
||||
{
|
||||
DBUG_ASSERT(file->s->temporary);
|
||||
(void) close();
|
||||
(void) ha_close();
|
||||
(void) maria_delete_table_files(name, 0);
|
||||
}
|
||||
|
||||
|
|
|
@ -1016,6 +1016,7 @@ fi
|
|||
%attr(755, root, root) %{_bindir}/mysqld_safe
|
||||
%attr(755, root, root) %{_bindir}/mysqldumpslow
|
||||
%attr(755, root, root) %{_bindir}/mysqlhotcopy
|
||||
%attr(755, root, root) %{_bindir}/mytop
|
||||
%attr(755, root, root) %{_bindir}/mysqltest
|
||||
%attr(755, root, root) %{_bindir}/perror
|
||||
%attr(755, root, root) %{_bindir}/replace
|
||||
|
|
Loading…
Reference in a new issue