mirror of
https://github.com/MariaDB/server.git
synced 2025-03-30 20:05:38 +02:00
Merge bk-internal.mysql.com:/home/bk/mysql-5.1-new
into mysql.com:/home/my/mysql-5.1 BitKeeper/etc/ignore: auto-union include/heap.h: Auto merged include/my_base.h: Auto merged include/mysql_com.h: Auto merged mysql-test/mysql-test-run.pl: Auto merged mysql-test/mysql-test-run.sh: Auto merged mysql-test/extra/binlog_tests/mix_innodb_myisam_binlog.test: Auto merged mysql-test/r/binlog_stm_mix_innodb_myisam.result: Auto merged mysql-test/r/func_time.result: Auto merged mysql-test/r/ndb_index_unique.result: Auto merged mysql-test/t/func_time.test: Auto merged mysql-test/t/view_grant.test: Auto merged sql/field.cc: Auto merged sql/field.h: Auto merged sql/ha_berkeley.cc: Auto merged sql/ha_berkeley.h: Auto merged mysql-test/r/view_grant.result: Auto merged sql/ha_federated.cc: Auto merged sql/ha_federated.h: Auto merged sql/ha_heap.h: Auto merged sql/ha_innodb.cc: Auto merged sql/ha_innodb.h: Auto merged sql/ha_myisam.h: Auto merged sql/ha_myisammrg.cc: Auto merged sql/ha_ndbcluster.h: Auto merged sql/ha_partition.h: Auto merged sql/item.cc: Auto merged sql/item_cmpfunc.h: Auto merged sql/item_func.cc: Auto merged sql/item_func.h: Auto merged sql/item_strfunc.h: Auto merged sql/item_subselect.cc: Auto merged sql/item_subselect.h: Auto merged sql/log.cc: Auto merged sql/mysqld.cc: Auto merged sql/set_var.cc: Auto merged sql/sp.cc: Auto merged sql/sp_head.cc: Auto merged sql/spatial.cc: Auto merged sql/sql_acl.cc: Auto merged sql/sql_base.cc: Auto merged sql/sql_class.cc: Auto merged sql/sql_class.h: Auto merged sql/sql_bitmap.h: Auto merged sql/sql_parse.cc: Auto merged sql/sql_partition.cc: Auto merged sql/sql_plugin.cc: Auto merged sql/sql_prepare.cc: Auto merged sql/share/errmsg.txt: Auto merged sql/sql_handler.cc: Auto merged sql/sql_view.cc: Auto merged sql/table.h: Auto merged storage/archive/ha_archive.cc: Auto merged storage/archive/ha_archive.h: Auto merged storage/example/ha_example.cc: Auto merged storage/myisam/ft_boolean_search.c: Auto merged unittest/mysys/base64-t.c: Auto merged mysql-test/r/innodb_mysql.result: manual merge mysql-test/t/innodb_mysql.test: manual merge mysql-test/valgrind.supp: manual merge sql/event.cc: manual merge sql/ha_heap.cc: manual merge sql/ha_myisam.cc: manual merge sql/ha_ndbcluster.cc: manual merge sql/ha_ndbcluster_binlog.cc: manual merge sql/ha_partition.cc: manual merge sql/handler.cc: manual merge renamed print_key_dupp_error to print_key_dup_error sql/handler.h: manual merge renamed print_key_dupp_error to print_key_dup_error sql/item.h: automatic merge sql/item_cmpfunc.cc: automatic merge sql/log_event.cc: manual merge Trivial cleanup sql/mysql_priv.h: manual merge renamed print_key_dupp_error to print_key_dup_error sql/opt_range.cc: manual merge renamed print_key_dupp_error to print_key_dup_error sql/sql_delete.cc: manual merge renamed print_key_dupp_error to print_key_dup_error sql/sql_insert.cc: manual merge renamed print_key_dupp_error to print_key_dup_error sql/sql_load.cc: manual merge renamed print_key_dupp_error to print_key_dup_error sql/sql_select.cc: manual merge renamed print_key_dupp_error to print_key_dup_error sql/sql_show.cc: manual merge renamed print_key_dupp_error to print_key_dup_error sql/sql_table.cc: manual merge renamed print_key_dupp_error to print_key_dup_error sql/sql_update.cc: manual merge renamed print_key_dupp_error to print_key_dup_error sql/table.cc: manual merge renamed print_key_dupp_error to print_key_dup_error storage/blackhole/ha_blackhole.cc: manual merge renamed print_key_dupp_error to print_key_dup_error storage/csv/ha_tina.cc: manual merge renamed print_key_dupp_error to print_key_dup_error mysql-test/valgrind.supp.orig: Manual merge
This commit is contained in:
commit
555eb848f2
137 changed files with 4249 additions and 2341 deletions
.bzrignore
include
mysql-test
extra/binlog_tests
install_test_db.shmysql-test-run.plmysql-test-run.shr
binlog_row_mix_innodb_myisam.resultbinlog_stm_mix_innodb_myisam.resultcreate.resultfederated.resultfunc_gconcat.resultfunc_time.resultinnodb_mysql.resultinsert.resultloaddata.resultmulti_update.resultndb_index_unique.resultndb_replace.resultrpl_ddl.resultview_grant.result
t
create.testfederated.testfunc_gconcat.testfunc_time.testinnodb_mysql.testinsert.testloaddata.testmulti_update.testview_grant.test
valgrind.suppvalgrind.supp.origmysys
sql
event.ccfield.ccfield.hfilesort.ccha_berkeley.ccha_berkeley.hha_federated.ccha_federated.hha_heap.ccha_heap.hha_innodb.ccha_innodb.hha_myisam.ccha_myisam.hha_myisammrg.ccha_myisammrg.hha_ndbcluster.ccha_ndbcluster.hha_ndbcluster_binlog.ccha_partition.ccha_partition.hhandler.cchandler.hitem.ccitem.hitem_cmpfunc.ccitem_cmpfunc.hitem_func.ccitem_func.hitem_row.ccitem_row.hitem_strfunc.hitem_subselect.ccitem_subselect.hitem_sum.ccitem_sum.hkey.cclog.cclog_event.ccmysql_priv.hmysqld.ccopt_range.ccopt_range.hopt_sum.ccprotocol.ccrecords.ccset_var.cc
share
sp.ccsp_head.ccspatial.ccsql_acl.ccsql_base.ccsql_bitmap.hsql_class.ccsql_class.hsql_delete.ccsql_do.ccsql_handler.ccsql_help.cc
|
@ -1,3 +1,4 @@
|
|||
*-t
|
||||
*.a
|
||||
*.bb
|
||||
*.bbg
|
||||
|
@ -458,6 +459,7 @@ libmysqld/emb_qcache.cpp
|
|||
libmysqld/errmsg.c
|
||||
libmysqld/event.cc
|
||||
libmysqld/event_executor.cc
|
||||
libmysqld/event_scheduler.cc
|
||||
libmysqld/event_timed.cc
|
||||
libmysqld/examples/client_test.c
|
||||
libmysqld/examples/client_test.cc
|
||||
|
@ -788,6 +790,8 @@ mysys/main.cc
|
|||
mysys/my_new.cpp
|
||||
mysys/raid.cpp
|
||||
mysys/ste5KbMa
|
||||
mysys/test_atomic
|
||||
mysys/test_bitmap
|
||||
mysys/test_charset
|
||||
mysys/test_dir
|
||||
mysys/test_gethwaddr
|
||||
|
@ -1770,6 +1774,3 @@ vio/viotest-sslconnect.cpp
|
|||
vio/viotest.cpp
|
||||
zlib/*.ds?
|
||||
zlib/*.vcproj
|
||||
libmysqld/event_scheduler.cc
|
||||
mysys/test_atomic
|
||||
*-t
|
||||
|
|
|
@ -21,8 +21,6 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include <my_global.h>
|
||||
|
||||
/*
|
||||
Calculate how much memory needed for dst of base64_encode()
|
||||
*/
|
||||
|
|
|
@ -209,6 +209,7 @@ extern int heap_create(const char *name, uint keys, HP_KEYDEF *keydef,
|
|||
extern int heap_delete_table(const char *name);
|
||||
extern void heap_drop_table(HP_INFO *info);
|
||||
extern int heap_extra(HP_INFO *info,enum ha_extra_function function);
|
||||
extern int heap_reset(HP_INFO *info);
|
||||
extern int heap_rename(const char *old_name,const char *new_name);
|
||||
extern int heap_panic(enum ha_panic_function flag);
|
||||
extern int heap_rsame(HP_INFO *info,byte *record,int inx);
|
||||
|
|
|
@ -102,7 +102,7 @@ enum ha_key_alg {
|
|||
enum ha_extra_function {
|
||||
HA_EXTRA_NORMAL=0, /* Optimize for space (def) */
|
||||
HA_EXTRA_QUICK=1, /* Optimize for speed */
|
||||
HA_EXTRA_RESET=2, /* Reset database to after open */
|
||||
HA_EXTRA_NOT_USED=2,
|
||||
HA_EXTRA_CACHE=3, /* Cache record in HA_rrnd() */
|
||||
HA_EXTRA_NO_CACHE=4, /* End caching of records (def) */
|
||||
HA_EXTRA_NO_READCHECK=5, /* No readcheck on update */
|
||||
|
@ -128,15 +128,6 @@ enum ha_extra_function {
|
|||
HA_EXTRA_RESET_STATE, /* Reset positions */
|
||||
HA_EXTRA_IGNORE_DUP_KEY, /* Dup keys don't rollback everything*/
|
||||
HA_EXTRA_NO_IGNORE_DUP_KEY,
|
||||
/*
|
||||
Instructs InnoDB to retrieve all columns (except in key read), not just
|
||||
those where field->query_id is the same as the current query id
|
||||
*/
|
||||
HA_EXTRA_RETRIEVE_ALL_COLS,
|
||||
/*
|
||||
Instructs InnoDB to retrieve at least all the primary key columns
|
||||
*/
|
||||
HA_EXTRA_RETRIEVE_PRIMARY_KEY,
|
||||
HA_EXTRA_PREPARE_FOR_DELETE,
|
||||
HA_EXTRA_PREPARE_FOR_UPDATE, /* Remove read cache if problems */
|
||||
HA_EXTRA_PRELOAD_BUFFER_SIZE, /* Set buffer size for preloading */
|
||||
|
|
|
@ -17,19 +17,18 @@
|
|||
#ifndef _my_bitmap_h_
|
||||
#define _my_bitmap_h_
|
||||
|
||||
#ifdef THREAD
|
||||
#include <my_pthread.h>
|
||||
#endif
|
||||
|
||||
#define MY_BIT_NONE (~(uint) 0)
|
||||
|
||||
#include <m_string.h>
|
||||
|
||||
typedef uint32 my_bitmap_map;
|
||||
|
||||
typedef struct st_bitmap
|
||||
{
|
||||
uint32 *bitmap;
|
||||
my_bitmap_map *bitmap;
|
||||
uint n_bits; /* number of bits occupied by the above */
|
||||
uint32 last_word_mask;
|
||||
uint32 *last_word_ptr;
|
||||
my_bitmap_map last_word_mask;
|
||||
my_bitmap_map *last_word_ptr;
|
||||
/*
|
||||
mutex will be acquired for the duration of each bitmap operation if
|
||||
thread_safe flag in bitmap_init was set. Otherwise, we optimize by not
|
||||
|
@ -43,12 +42,16 @@ typedef struct st_bitmap
|
|||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
extern my_bool bitmap_init(MY_BITMAP *map, uint32 *buf, uint n_bits, my_bool thread_safe);
|
||||
extern my_bool bitmap_init(MY_BITMAP *map, my_bitmap_map *buf, uint n_bits,
|
||||
my_bool thread_safe);
|
||||
extern my_bool bitmap_is_clear_all(const MY_BITMAP *map);
|
||||
extern my_bool bitmap_is_prefix(const MY_BITMAP *map, uint prefix_size);
|
||||
extern my_bool bitmap_is_set_all(const MY_BITMAP *map);
|
||||
extern my_bool bitmap_is_subset(const MY_BITMAP *map1, const MY_BITMAP *map2);
|
||||
extern my_bool bitmap_is_overlapping(const MY_BITMAP *map1,
|
||||
const MY_BITMAP *map2);
|
||||
extern my_bool bitmap_test_and_set(MY_BITMAP *map, uint bitmap_bit);
|
||||
extern my_bool bitmap_test_and_clear(MY_BITMAP *map, uint bitmap_bit);
|
||||
extern my_bool bitmap_fast_test_and_set(MY_BITMAP *map, uint bitmap_bit);
|
||||
extern uint bitmap_set_next(MY_BITMAP *map);
|
||||
extern uint bitmap_get_first(const MY_BITMAP *map);
|
||||
|
@ -62,6 +65,7 @@ extern void bitmap_subtract(MY_BITMAP *map, const MY_BITMAP *map2);
|
|||
extern void bitmap_union(MY_BITMAP *map, const MY_BITMAP *map2);
|
||||
extern void bitmap_xor(MY_BITMAP *map, const MY_BITMAP *map2);
|
||||
extern void bitmap_invert(MY_BITMAP *map);
|
||||
extern void bitmap_copy(MY_BITMAP *map, const MY_BITMAP *map2);
|
||||
|
||||
extern uint bitmap_lock_set_next(MY_BITMAP *map);
|
||||
extern void bitmap_lock_clear_bit(MY_BITMAP *map, uint bitmap_bit);
|
||||
|
@ -88,7 +92,7 @@ extern void bitmap_lock_xor(MY_BITMAP *map, const MY_BITMAP *map2);
|
|||
extern void bitmap_lock_invert(MY_BITMAP *map);
|
||||
#endif
|
||||
/* Fast, not thread safe, bitmap functions */
|
||||
#define bitmap_buffer_size(bits) 4*(((bits)+31)/32);
|
||||
#define bitmap_buffer_size(bits) (((bits)+31)/32)*4
|
||||
#define no_bytes_in_map(map) (((map)->n_bits + 7)/8)
|
||||
#define no_words_in_map(map) (((map)->n_bits + 31)/32)
|
||||
#define bytes_word_aligned(bytes) (4*((bytes + 3)/4))
|
||||
|
@ -98,28 +102,28 @@ extern void bitmap_lock_invert(MY_BITMAP *map);
|
|||
^= (1 << ((BIT) & 7)))
|
||||
#define _bitmap_clear_bit(MAP, BIT) (((uchar*)(MAP)->bitmap)[(BIT) / 8] \
|
||||
&= ~ (1 << ((BIT) & 7)))
|
||||
#define _bitmap_is_set(MAP, BIT) (((uchar*)(MAP)->bitmap)[(BIT) / 8] \
|
||||
& (1 << ((BIT) & 7)))
|
||||
#define _bitmap_is_set(MAP, BIT) (uint) (((uchar*)(MAP)->bitmap)[(BIT) / 8] \
|
||||
& (1 << ((BIT) & 7)))
|
||||
#ifndef DBUG_OFF
|
||||
static inline uint32
|
||||
static inline void
|
||||
bitmap_set_bit(MY_BITMAP *map,uint bit)
|
||||
{
|
||||
DBUG_ASSERT(bit < (map)->n_bits);
|
||||
return _bitmap_set_bit(map,bit);
|
||||
_bitmap_set_bit(map,bit);
|
||||
}
|
||||
static inline uint32
|
||||
static inline void
|
||||
bitmap_flip_bit(MY_BITMAP *map,uint bit)
|
||||
{
|
||||
DBUG_ASSERT(bit < (map)->n_bits);
|
||||
return _bitmap_flip_bit(map,bit);
|
||||
_bitmap_flip_bit(map,bit);
|
||||
}
|
||||
static inline uint32
|
||||
static inline void
|
||||
bitmap_clear_bit(MY_BITMAP *map,uint bit)
|
||||
{
|
||||
DBUG_ASSERT(bit < (map)->n_bits);
|
||||
return _bitmap_clear_bit(map,bit);
|
||||
_bitmap_clear_bit(map,bit);
|
||||
}
|
||||
static inline uint32
|
||||
static inline uint
|
||||
bitmap_is_set(const MY_BITMAP *map,uint bit)
|
||||
{
|
||||
DBUG_ASSERT(bit < (map)->n_bits);
|
||||
|
@ -131,11 +135,16 @@ bitmap_is_set(const MY_BITMAP *map,uint bit)
|
|||
#define bitmap_clear_bit(MAP, BIT) _bitmap_clear_bit(MAP, BIT)
|
||||
#define bitmap_is_set(MAP, BIT) _bitmap_is_set(MAP, BIT)
|
||||
#endif
|
||||
#define bitmap_cmp(MAP1, MAP2) \
|
||||
(memcmp((MAP1)->bitmap, (MAP2)->bitmap, 4*no_words_in_map((MAP1)))==0)
|
||||
|
||||
static inline my_bool bitmap_cmp(const MY_BITMAP *map1, const MY_BITMAP *map2)
|
||||
{
|
||||
*(map1)->last_word_ptr|= (map1)->last_word_mask;
|
||||
*(map2)->last_word_ptr|= (map2)->last_word_mask;
|
||||
return memcmp((map1)->bitmap, (map2)->bitmap, 4*no_words_in_map((map1)))==0;
|
||||
}
|
||||
|
||||
#define bitmap_clear_all(MAP) \
|
||||
{ memset((MAP)->bitmap, 0, 4*no_words_in_map((MAP))); \
|
||||
*(MAP)->last_word_ptr|= (MAP)->last_word_mask; }
|
||||
{ memset((MAP)->bitmap, 0, 4*no_words_in_map((MAP))); }
|
||||
#define bitmap_set_all(MAP) \
|
||||
(memset((MAP)->bitmap, 0xFF, 4*no_words_in_map((MAP))))
|
||||
|
||||
|
|
|
@ -303,6 +303,7 @@ extern int mi_rename(const char *from, const char *to);
|
|||
extern int mi_extra(struct st_myisam_info *file,
|
||||
enum ha_extra_function function,
|
||||
void *extra_arg);
|
||||
extern int mi_reset(struct st_myisam_info *file);
|
||||
extern ha_rows mi_records_in_range(struct st_myisam_info *info,int inx,
|
||||
key_range *min_key, key_range *max_key);
|
||||
extern int mi_log(int activate_log);
|
||||
|
|
|
@ -99,6 +99,7 @@ extern int myrg_create(const char *name, const char **table_names,
|
|||
uint insert_method, my_bool fix_names);
|
||||
extern int myrg_extra(MYRG_INFO *file,enum ha_extra_function function,
|
||||
void *extra_arg);
|
||||
extern int myrg_reset(MYRG_INFO *info);
|
||||
extern void myrg_extrafunc(MYRG_INFO *info,invalidator_by_filename inv);
|
||||
extern ha_rows myrg_records_in_range(MYRG_INFO *info,int inx,
|
||||
key_range *min_key, key_range *max_key);
|
||||
|
|
|
@ -98,6 +98,7 @@ enum enum_server_command
|
|||
#define BINCMP_FLAG 131072 /* Intern: Used by sql_yacc */
|
||||
#define GET_FIXED_FIELDS_FLAG (1 << 18) /* Used to get fields in item tree */
|
||||
#define FIELD_IN_PART_FUNC_FLAG (1 << 19)/* Field part of partition func */
|
||||
#define FIELD_IN_ADD_INDEX (1<< 20) /* Intern: Field used in ADD INDEX */
|
||||
|
||||
#define REFRESH_GRANT 1 /* Refresh grant tables */
|
||||
#define REFRESH_LOG 2 /* Start on new log file */
|
||||
|
|
|
@ -234,8 +234,8 @@ select (@after-@before) >= 2;
|
|||
drop table t1,t2;
|
||||
commit;
|
||||
|
||||
# test for BUG#7947 - DO RELEASE_LOCK() not written to binlog on rollback in the middle
|
||||
# of a transaction
|
||||
# test for BUG#7947 - DO RELEASE_LOCK() not written to binlog on rollback in
|
||||
# the middle of a transaction
|
||||
|
||||
connection con2;
|
||||
begin;
|
||||
|
@ -265,6 +265,68 @@ drop table t0,t2;
|
|||
|
||||
# End of 4.1 tests
|
||||
|
||||
#
|
||||
# Test behaviour of CREATE ... SELECT when mixing MyISAM and InnoDB tables
|
||||
#
|
||||
|
||||
set autocommit=0;
|
||||
CREATE TABLE t1 (a int, b int) engine=myisam;
|
||||
reset master;
|
||||
INSERT INTO t1 values (1,1),(1,2);
|
||||
--error 1062
|
||||
CREATE TABLE t2 (primary key (a)) engine=innodb select * from t1;
|
||||
# This should give warning
|
||||
DROP TABLE if exists t2;
|
||||
INSERT INTO t1 values (3,3);
|
||||
--error 1062
|
||||
CREATE TEMPORARY TABLE t2 (primary key (a)) engine=innodb select * from t1;
|
||||
ROLLBACK;
|
||||
# This should give warning
|
||||
DROP TABLE IF EXISTS t2;
|
||||
|
||||
CREATE TABLE t2 (a int, b int, primary key (a)) engine=innodb;
|
||||
INSERT INTO t1 VALUES (4,4);
|
||||
--error 1062
|
||||
CREATE TABLE IF NOT EXISTS t2 (primary key (a)) engine=innodb select * from t1;
|
||||
SELECT * from t2;
|
||||
TRUNCATE table t2;
|
||||
INSERT INTO t1 VALUES (5,5);
|
||||
--error 1062
|
||||
INSERT INTO t2 select * from t1;
|
||||
SELECT * FROM t2;
|
||||
DROP TABLE t2;
|
||||
|
||||
INSERT INTO t1 values (6,6);
|
||||
CREATE TEMPORARY TABLE t2 (a int, b int, primary key (a)) engine=innodb ;
|
||||
INSERT INTO t1 values (7,7);
|
||||
ROLLBACK;
|
||||
INSERT INTO t1 values (8,8);
|
||||
--error 1062
|
||||
CREATE TEMPORARY TABLE IF NOT EXISTS t2 (primary key (a)) engine=innodb select * from t1;
|
||||
COMMIT;
|
||||
INSERT INTO t1 values (9,9);
|
||||
--error 1062
|
||||
CREATE TEMPORARY TABLE IF NOT EXISTS t2 (primary key (a)) engine=innodb select * from t1;
|
||||
ROLLBACK;
|
||||
SELECT * from t2;
|
||||
TRUNCATE table t2;
|
||||
INSERT INTO t1 values (10,10);
|
||||
--error 1062
|
||||
INSERT INTO t2 select * from t1;
|
||||
SELECT * from t1;
|
||||
INSERT INTO t2 values (100,100);
|
||||
--error 1062
|
||||
CREATE TEMPORARY TABLE IF NOT EXISTS t2 (primary key (a)) engine=innodb select * from t1;
|
||||
COMMIT;
|
||||
INSERT INTO t2 values (101,101);
|
||||
--error 1062
|
||||
CREATE TEMPORARY TABLE IF NOT EXISTS t2 (primary key (a)) engine=innodb select * from t1;
|
||||
ROLLBACK;
|
||||
SELECT * from t2;
|
||||
DROP TABLE t1,t2;
|
||||
--replace_regex /table_id: [0-9]+/table_id: #/
|
||||
show binlog events from 102;
|
||||
|
||||
# Test for BUG#16559 (ROLLBACK should always have a zero error code in
|
||||
# binlog). Has to be here and not earlier, as the SELECTs influence
|
||||
# XIDs differently between normal and ps-protocol (and SHOW BINLOG
|
||||
|
@ -283,3 +345,4 @@ disconnect con3;
|
|||
|
||||
connection con4;
|
||||
select get_lock("a",10); # wait for rollback to finish
|
||||
|
||||
|
|
|
@ -34,7 +34,6 @@ if [ x$1 = x"-slave" ]
|
|||
then
|
||||
shift 1
|
||||
data=var/slave-data
|
||||
ldata=$fix_bin/var/slave-data
|
||||
else
|
||||
if [ x$1 = x"-1" ]
|
||||
then
|
||||
|
@ -42,8 +41,8 @@ else
|
|||
else
|
||||
data=var/master-data
|
||||
fi
|
||||
ldata=$fix_bin/$data
|
||||
fi
|
||||
ldata=$fix_bin/$data
|
||||
|
||||
mdata=$data/mysql
|
||||
EXTRA_ARG=""
|
||||
|
@ -81,9 +80,7 @@ basedir=.
|
|||
EXTRA_ARG="--language=../sql/share/english/ --character-sets-dir=../sql/share/charsets/"
|
||||
fi
|
||||
|
||||
mysqld_boot=" $execdir/mysqld --no-defaults --bootstrap --skip-grant-tables \
|
||||
--basedir=$basedir --datadir=$ldata --skip-innodb --skip-ndbcluster --skip-bdb \
|
||||
$EXTRA_ARG"
|
||||
mysqld_boot=" $execdir/mysqld --no-defaults --bootstrap --skip-grant-tables --basedir=$basedir --datadir=$ldata --skip-innodb --skip-ndbcluster --skip-bdb --tmpdir=. $EXTRA_ARG"
|
||||
echo "running $mysqld_boot"
|
||||
|
||||
if $scriptdir/mysql_create_system_tables test $mdata $hostname | $mysqld_boot
|
||||
|
|
|
@ -1943,6 +1943,7 @@ sub install_db ($$) {
|
|||
mtr_add_arg($args, "--skip-innodb");
|
||||
mtr_add_arg($args, "--skip-ndbcluster");
|
||||
mtr_add_arg($args, "--skip-bdb");
|
||||
mtr_add_arg($args, "--tmpdir=.");
|
||||
|
||||
if ( ! $opt_netware )
|
||||
{
|
||||
|
|
|
@ -1121,7 +1121,10 @@ mysql_install_db () {
|
|||
if [ ! -z "$USE_NDBCLUSTER" ]
|
||||
then
|
||||
$ECHO "Installing Master Databases 1"
|
||||
$INSTALL_DB -1
|
||||
# $INSTALL_DB -1
|
||||
$RM -rf var/master-data1
|
||||
mkdir var/master-data1
|
||||
cp -r var/master-data/* var/master-data1
|
||||
if [ $? != 0 ]; then
|
||||
error "Could not install master test DBs 1"
|
||||
exit 1
|
||||
|
@ -1129,7 +1132,9 @@ mysql_install_db () {
|
|||
fi
|
||||
$ECHO "Installing Slave Databases"
|
||||
$RM -rf $SLAVE_MYDDIR $MY_LOG_DIR/*
|
||||
$INSTALL_DB -slave
|
||||
# $INSTALL_DB -slave
|
||||
mkdir var/slave-data
|
||||
cp -r var/master-data/* var/slave-data
|
||||
if [ $? != 0 ]; then
|
||||
error "Could not install slave test DBs"
|
||||
exit 1
|
||||
|
@ -2155,6 +2160,7 @@ then
|
|||
|
||||
# Remove files that can cause problems
|
||||
$RM -rf $MYSQL_TEST_DIR/var/ndbcluster
|
||||
$RM -rf $MYSQL_TEST_DIR/var/tmp/snapshot*
|
||||
$RM -f $MYSQL_TEST_DIR/var/run/* $MYSQL_TEST_DIR/var/tmp/*
|
||||
|
||||
# Remove old berkeley db log files that can confuse the server
|
||||
|
|
|
@ -234,8 +234,6 @@ commit;
|
|||
begin;
|
||||
create temporary table ti (a int) engine=innodb;
|
||||
rollback;
|
||||
Warnings:
|
||||
Warning 1196 Some non-transactional changed tables couldn't be rolled back
|
||||
insert into ti values(1);
|
||||
set autocommit=0;
|
||||
create temporary table t1 (a int) engine=myisam;
|
||||
|
@ -285,6 +283,162 @@ master-bin.000001 1260 Write_rows 1 # table_id: # flags: STMT_END_F
|
|||
master-bin.000001 1294 Query 1 # use `test`; create table t2 (n int) engine=innodb
|
||||
do release_lock("lock1");
|
||||
drop table t0,t2;
|
||||
set autocommit=0;
|
||||
CREATE TABLE t1 (a int, b int) engine=myisam;
|
||||
reset master;
|
||||
INSERT INTO t1 values (1,1),(1,2);
|
||||
CREATE TABLE t2 (primary key (a)) engine=innodb select * from t1;
|
||||
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
|
||||
DROP TABLE if exists t2;
|
||||
Warnings:
|
||||
Note 1051 Unknown table 't2'
|
||||
INSERT INTO t1 values (3,3);
|
||||
CREATE TEMPORARY TABLE t2 (primary key (a)) engine=innodb select * from t1;
|
||||
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
|
||||
ROLLBACK;
|
||||
Warnings:
|
||||
Warning 1196 Some non-transactional changed tables couldn't be rolled back
|
||||
DROP TABLE IF EXISTS t2;
|
||||
Warnings:
|
||||
Note 1051 Unknown table 't2'
|
||||
CREATE TABLE t2 (a int, b int, primary key (a)) engine=innodb;
|
||||
INSERT INTO t1 VALUES (4,4);
|
||||
CREATE TABLE IF NOT EXISTS t2 (primary key (a)) engine=innodb select * from t1;
|
||||
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
|
||||
SELECT * from t2;
|
||||
a b
|
||||
TRUNCATE table t2;
|
||||
INSERT INTO t1 VALUES (5,5);
|
||||
INSERT INTO t2 select * from t1;
|
||||
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
|
||||
SELECT * FROM t2;
|
||||
a b
|
||||
DROP TABLE t2;
|
||||
INSERT INTO t1 values (6,6);
|
||||
CREATE TEMPORARY TABLE t2 (a int, b int, primary key (a)) engine=innodb ;
|
||||
INSERT INTO t1 values (7,7);
|
||||
ROLLBACK;
|
||||
Warnings:
|
||||
Warning 1196 Some non-transactional changed tables couldn't be rolled back
|
||||
INSERT INTO t1 values (8,8);
|
||||
CREATE TEMPORARY TABLE IF NOT EXISTS t2 (primary key (a)) engine=innodb select * from t1;
|
||||
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
|
||||
COMMIT;
|
||||
INSERT INTO t1 values (9,9);
|
||||
CREATE TEMPORARY TABLE IF NOT EXISTS t2 (primary key (a)) engine=innodb select * from t1;
|
||||
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
|
||||
ROLLBACK;
|
||||
Warnings:
|
||||
Warning 1196 Some non-transactional changed tables couldn't be rolled back
|
||||
SELECT * from t2;
|
||||
a b
|
||||
TRUNCATE table t2;
|
||||
INSERT INTO t1 values (10,10);
|
||||
INSERT INTO t2 select * from t1;
|
||||
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
|
||||
SELECT * from t1;
|
||||
a b
|
||||
1 1
|
||||
1 2
|
||||
3 3
|
||||
4 4
|
||||
5 5
|
||||
6 6
|
||||
7 7
|
||||
8 8
|
||||
9 9
|
||||
10 10
|
||||
INSERT INTO t2 values (100,100);
|
||||
CREATE TEMPORARY TABLE IF NOT EXISTS t2 (primary key (a)) engine=innodb select * from t1;
|
||||
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
|
||||
COMMIT;
|
||||
INSERT INTO t2 values (101,101);
|
||||
CREATE TEMPORARY TABLE IF NOT EXISTS t2 (primary key (a)) engine=innodb select * from t1;
|
||||
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
|
||||
ROLLBACK;
|
||||
SELECT * from t2;
|
||||
a b
|
||||
100 100
|
||||
DROP TABLE t1,t2;
|
||||
show binlog events from 102;
|
||||
Log_name Pos Event_type Server_id End_log_pos Info
|
||||
master-bin.000001 102 Table_map 1 142 table_id: # (test.t1)
|
||||
master-bin.000001 142 Write_rows 1 189 table_id: # flags: STMT_END_F
|
||||
master-bin.000001 189 Query 1 257 use `test`; BEGIN
|
||||
master-bin.000001 257 Query 1 182 use `test`; CREATE TABLE `t2` (
|
||||
`a` int(11) NOT NULL DEFAULT '0',
|
||||
`b` int(11) DEFAULT NULL,
|
||||
PRIMARY KEY (`a`)
|
||||
) ENGINE=InnoDB
|
||||
master-bin.000001 439 Table_map 1 222 table_id: # (test.t2)
|
||||
master-bin.000001 479 Write_rows 1 260 table_id: # flags: STMT_END_F
|
||||
master-bin.000001 517 Xid 1 544 COMMIT /* xid= */
|
||||
master-bin.000001 544 Query 1 630 use `test`; DROP TABLE if exists t2
|
||||
master-bin.000001 630 Table_map 1 670 table_id: # (test.t1)
|
||||
master-bin.000001 670 Write_rows 1 708 table_id: # flags: STMT_END_F
|
||||
master-bin.000001 708 Query 1 776 use `test`; BEGIN
|
||||
master-bin.000001 776 Query 1 192 use `test`; CREATE TEMPORARY TABLE `t2` (
|
||||
`a` int(11) NOT NULL DEFAULT '0',
|
||||
`b` int(11) DEFAULT NULL,
|
||||
PRIMARY KEY (`a`)
|
||||
) ENGINE=InnoDB
|
||||
master-bin.000001 968 Query 1 1039 use `test`; ROLLBACK
|
||||
master-bin.000001 1039 Query 1 1125 use `test`; DROP TABLE IF EXISTS t2
|
||||
master-bin.000001 1125 Query 1 1249 use `test`; CREATE TABLE t2 (a int, b int, primary key (a)) engine=innodb
|
||||
master-bin.000001 1249 Table_map 1 1289 table_id: # (test.t1)
|
||||
master-bin.000001 1289 Write_rows 1 1327 table_id: # flags: STMT_END_F
|
||||
master-bin.000001 1327 Query 1 1395 use `test`; BEGIN
|
||||
master-bin.000001 1395 Query 1 182 use `test`; CREATE TABLE `t2` (
|
||||
`a` int(11) NOT NULL DEFAULT '0',
|
||||
`b` int(11) DEFAULT NULL,
|
||||
PRIMARY KEY (`a`)
|
||||
) ENGINE=InnoDB
|
||||
master-bin.000001 1577 Table_map 1 222 table_id: # (test.t2)
|
||||
master-bin.000001 1617 Write_rows 1 260 table_id: # flags: STMT_END_F
|
||||
master-bin.000001 1655 Xid 1 1682 COMMIT /* xid= */
|
||||
master-bin.000001 1682 Query 1 80 use `test`; TRUNCATE table t2
|
||||
master-bin.000001 1762 Xid 1 1789 COMMIT /* xid= */
|
||||
master-bin.000001 1789 Table_map 1 1829 table_id: # (test.t1)
|
||||
master-bin.000001 1829 Write_rows 1 1867 table_id: # flags: STMT_END_F
|
||||
master-bin.000001 1867 Query 1 1935 use `test`; BEGIN
|
||||
master-bin.000001 1935 Table_map 1 40 table_id: # (test.t2)
|
||||
master-bin.000001 1975 Write_rows 1 78 table_id: # flags: STMT_END_F
|
||||
master-bin.000001 2013 Xid 1 2040 COMMIT /* xid= */
|
||||
master-bin.000001 2040 Query 1 2116 use `test`; DROP TABLE t2
|
||||
master-bin.000001 2116 Table_map 1 2156 table_id: # (test.t1)
|
||||
master-bin.000001 2156 Write_rows 1 2194 table_id: # flags: STMT_END_F
|
||||
master-bin.000001 2194 Table_map 1 2234 table_id: # (test.t1)
|
||||
master-bin.000001 2234 Write_rows 1 2272 table_id: # flags: STMT_END_F
|
||||
master-bin.000001 2272 Table_map 1 2312 table_id: # (test.t1)
|
||||
master-bin.000001 2312 Write_rows 1 2350 table_id: # flags: STMT_END_F
|
||||
master-bin.000001 2350 Query 1 2418 use `test`; BEGIN
|
||||
master-bin.000001 2418 Query 1 192 use `test`; CREATE TEMPORARY TABLE `t2` (
|
||||
`a` int(11) NOT NULL DEFAULT '0',
|
||||
`b` int(11) DEFAULT NULL,
|
||||
PRIMARY KEY (`a`)
|
||||
) ENGINE=InnoDB
|
||||
master-bin.000001 2610 Xid 1 2637 COMMIT /* xid= */
|
||||
master-bin.000001 2637 Table_map 1 2677 table_id: # (test.t1)
|
||||
master-bin.000001 2677 Write_rows 1 2715 table_id: # flags: STMT_END_F
|
||||
master-bin.000001 2715 Query 1 2783 use `test`; BEGIN
|
||||
master-bin.000001 2783 Query 1 192 use `test`; CREATE TEMPORARY TABLE `t2` (
|
||||
`a` int(11) NOT NULL DEFAULT '0',
|
||||
`b` int(11) DEFAULT NULL,
|
||||
PRIMARY KEY (`a`)
|
||||
) ENGINE=InnoDB
|
||||
master-bin.000001 2975 Query 1 3046 use `test`; ROLLBACK
|
||||
master-bin.000001 3046 Query 1 80 use `test`; TRUNCATE table t2
|
||||
master-bin.000001 3126 Xid 1 3153 COMMIT /* xid= */
|
||||
master-bin.000001 3153 Table_map 1 3193 table_id: # (test.t1)
|
||||
master-bin.000001 3193 Write_rows 1 3231 table_id: # flags: STMT_END_F
|
||||
master-bin.000001 3231 Query 1 3299 use `test`; BEGIN
|
||||
master-bin.000001 3299 Query 1 192 use `test`; CREATE TEMPORARY TABLE `t2` (
|
||||
`a` int(11) NOT NULL DEFAULT '0',
|
||||
`b` int(11) DEFAULT NULL,
|
||||
PRIMARY KEY (`a`)
|
||||
) ENGINE=InnoDB
|
||||
master-bin.000001 3491 Xid 1 3518 COMMIT /* xid= */
|
||||
master-bin.000001 3518 Query 1 3622 use `test`; DROP TABLE `t1` /* generated by server */
|
||||
reset master;
|
||||
create table t1 (a int) engine=innodb;
|
||||
create table t2 (a int) engine=myisam;
|
||||
|
|
|
@ -209,8 +209,6 @@ commit;
|
|||
begin;
|
||||
create temporary table ti (a int) engine=innodb;
|
||||
rollback;
|
||||
Warnings:
|
||||
Warning 1196 Some non-transactional changed tables couldn't be rolled back
|
||||
insert into ti values(1);
|
||||
set autocommit=0;
|
||||
create temporary table t1 (a int) engine=myisam;
|
||||
|
@ -256,6 +254,107 @@ master-bin.000001 1654 Query 1 # use `test`; create table t2 (n int) engine=inno
|
|||
master-bin.000001 1754 Query 1 # use `test`; DROP /*!40005 TEMPORARY */ TABLE IF EXISTS `test`.`t1`,`test`.`ti`
|
||||
do release_lock("lock1");
|
||||
drop table t0,t2;
|
||||
set autocommit=0;
|
||||
CREATE TABLE t1 (a int, b int) engine=myisam;
|
||||
reset master;
|
||||
INSERT INTO t1 values (1,1),(1,2);
|
||||
CREATE TABLE t2 (primary key (a)) engine=innodb select * from t1;
|
||||
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
|
||||
DROP TABLE if exists t2;
|
||||
Warnings:
|
||||
Note 1051 Unknown table 't2'
|
||||
INSERT INTO t1 values (3,3);
|
||||
CREATE TEMPORARY TABLE t2 (primary key (a)) engine=innodb select * from t1;
|
||||
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
|
||||
ROLLBACK;
|
||||
Warnings:
|
||||
Warning 1196 Some non-transactional changed tables couldn't be rolled back
|
||||
DROP TABLE IF EXISTS t2;
|
||||
Warnings:
|
||||
Note 1051 Unknown table 't2'
|
||||
CREATE TABLE t2 (a int, b int, primary key (a)) engine=innodb;
|
||||
INSERT INTO t1 VALUES (4,4);
|
||||
CREATE TABLE IF NOT EXISTS t2 (primary key (a)) engine=innodb select * from t1;
|
||||
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
|
||||
SELECT * from t2;
|
||||
a b
|
||||
TRUNCATE table t2;
|
||||
INSERT INTO t1 VALUES (5,5);
|
||||
INSERT INTO t2 select * from t1;
|
||||
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
|
||||
SELECT * FROM t2;
|
||||
a b
|
||||
DROP TABLE t2;
|
||||
INSERT INTO t1 values (6,6);
|
||||
CREATE TEMPORARY TABLE t2 (a int, b int, primary key (a)) engine=innodb ;
|
||||
INSERT INTO t1 values (7,7);
|
||||
ROLLBACK;
|
||||
Warnings:
|
||||
Warning 1196 Some non-transactional changed tables couldn't be rolled back
|
||||
INSERT INTO t1 values (8,8);
|
||||
CREATE TEMPORARY TABLE IF NOT EXISTS t2 (primary key (a)) engine=innodb select * from t1;
|
||||
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
|
||||
COMMIT;
|
||||
INSERT INTO t1 values (9,9);
|
||||
CREATE TEMPORARY TABLE IF NOT EXISTS t2 (primary key (a)) engine=innodb select * from t1;
|
||||
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
|
||||
ROLLBACK;
|
||||
Warnings:
|
||||
Warning 1196 Some non-transactional changed tables couldn't be rolled back
|
||||
SELECT * from t2;
|
||||
a b
|
||||
TRUNCATE table t2;
|
||||
INSERT INTO t1 values (10,10);
|
||||
INSERT INTO t2 select * from t1;
|
||||
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
|
||||
SELECT * from t1;
|
||||
a b
|
||||
1 1
|
||||
1 2
|
||||
3 3
|
||||
4 4
|
||||
5 5
|
||||
6 6
|
||||
7 7
|
||||
8 8
|
||||
9 9
|
||||
10 10
|
||||
INSERT INTO t2 values (100,100);
|
||||
CREATE TEMPORARY TABLE IF NOT EXISTS t2 (primary key (a)) engine=innodb select * from t1;
|
||||
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
|
||||
COMMIT;
|
||||
INSERT INTO t2 values (101,101);
|
||||
CREATE TEMPORARY TABLE IF NOT EXISTS t2 (primary key (a)) engine=innodb select * from t1;
|
||||
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
|
||||
ROLLBACK;
|
||||
SELECT * from t2;
|
||||
a b
|
||||
100 100
|
||||
DROP TABLE t1,t2;
|
||||
show binlog events from 102;
|
||||
Log_name Pos Event_type Server_id End_log_pos Info
|
||||
master-bin.000001 102 Query 1 198 use `test`; INSERT INTO t1 values (1,1),(1,2)
|
||||
master-bin.000001 198 Query 1 284 use `test`; DROP TABLE if exists t2
|
||||
master-bin.000001 284 Query 1 374 use `test`; INSERT INTO t1 values (3,3)
|
||||
master-bin.000001 374 Query 1 460 use `test`; DROP TABLE IF EXISTS t2
|
||||
master-bin.000001 460 Query 1 584 use `test`; CREATE TABLE t2 (a int, b int, primary key (a)) engine=innodb
|
||||
master-bin.000001 584 Query 1 674 use `test`; INSERT INTO t1 VALUES (4,4)
|
||||
master-bin.000001 674 Query 1 80 use `test`; TRUNCATE table t2
|
||||
master-bin.000001 754 Xid 1 781 COMMIT /* xid= */
|
||||
master-bin.000001 781 Query 1 871 use `test`; INSERT INTO t1 VALUES (5,5)
|
||||
master-bin.000001 871 Query 1 947 use `test`; DROP TABLE t2
|
||||
master-bin.000001 947 Query 1 1037 use `test`; INSERT INTO t1 values (6,6)
|
||||
master-bin.000001 1037 Query 1 1171 use `test`; CREATE TEMPORARY TABLE t2 (a int, b int, primary key (a)) engine=innodb
|
||||
master-bin.000001 1171 Query 1 1261 use `test`; INSERT INTO t1 values (7,7)
|
||||
master-bin.000001 1261 Query 1 1351 use `test`; INSERT INTO t1 values (8,8)
|
||||
master-bin.000001 1351 Query 1 1441 use `test`; INSERT INTO t1 values (9,9)
|
||||
master-bin.000001 1441 Query 1 80 use `test`; TRUNCATE table t2
|
||||
master-bin.000001 1521 Xid 1 1548 COMMIT /* xid= */
|
||||
master-bin.000001 1548 Query 1 1640 use `test`; INSERT INTO t1 values (10,10)
|
||||
master-bin.000001 1640 Query 1 1708 use `test`; BEGIN
|
||||
master-bin.000001 1708 Query 1 94 use `test`; INSERT INTO t2 values (100,100)
|
||||
master-bin.000001 1802 Xid 1 1829 COMMIT /* xid= */
|
||||
master-bin.000001 1829 Query 1 1908 use `test`; DROP TABLE t1,t2
|
||||
reset master;
|
||||
create table t1 (a int) engine=innodb;
|
||||
create table t2 (a int) engine=myisam;
|
||||
|
|
|
@ -266,6 +266,7 @@ select * from t1;
|
|||
0 1 2
|
||||
0 0 1
|
||||
drop table t1;
|
||||
flush status;
|
||||
create table t1 (a int not null, b int, primary key (a));
|
||||
insert into t1 values (1,1);
|
||||
create table if not exists t1 select 2;
|
||||
|
@ -281,6 +282,13 @@ Warnings:
|
|||
Note 1050 Table 't1' already exists
|
||||
create table if not exists t1 select 3 as 'a',3 as 'b';
|
||||
ERROR 23000: Duplicate entry '3' for key 'PRIMARY'
|
||||
show warnings;
|
||||
Level Code Message
|
||||
Note 1050 Table 't1' already exists
|
||||
Error 1062 Duplicate entry '3' for key 'PRIMARY'
|
||||
show status like "Opened_tables";
|
||||
Variable_name Value
|
||||
Opened_tables 2
|
||||
select * from t1;
|
||||
a b
|
||||
1 1
|
||||
|
@ -778,3 +786,41 @@ Warnings:
|
|||
Warning 1071 Specified key was too long; max key length is 765 bytes
|
||||
insert into t1 values('aaa');
|
||||
drop table t1;
|
||||
CREATE TABLE t1 (a int, b int);
|
||||
insert into t1 values (1,1),(1,2);
|
||||
CREATE TABLE t2 (primary key (a)) select * from t1;
|
||||
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
|
||||
drop table if exists t2;
|
||||
Warnings:
|
||||
Note 1051 Unknown table 't2'
|
||||
CREATE TEMPORARY TABLE t2 (primary key (a)) select * from t1;
|
||||
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
|
||||
drop table if exists t2;
|
||||
Warnings:
|
||||
Note 1051 Unknown table 't2'
|
||||
CREATE TABLE t2 (a int, b int, primary key (a));
|
||||
CREATE TABLE IF NOT EXISTS t2 (primary key (a)) select * from t1;
|
||||
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
|
||||
SELECT * from t2;
|
||||
a b
|
||||
1 1
|
||||
TRUNCATE table t2;
|
||||
INSERT INTO t2 select * from t1;
|
||||
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
|
||||
SELECT * from t2;
|
||||
a b
|
||||
1 1
|
||||
drop table t2;
|
||||
CREATE TEMPORARY TABLE t2 (a int, b int, primary key (a));
|
||||
CREATE TEMPORARY TABLE IF NOT EXISTS t2 (primary key (a)) select * from t1;
|
||||
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
|
||||
SELECT * from t2;
|
||||
a b
|
||||
1 1
|
||||
TRUNCATE table t2;
|
||||
INSERT INTO t2 select * from t1;
|
||||
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
|
||||
SELECT * from t2;
|
||||
a b
|
||||
1 1
|
||||
drop table t1,t2;
|
||||
|
|
|
@ -1601,6 +1601,7 @@ fld_cid fld_name fld_parentid fld_delt
|
|||
5 Torkel 0 0
|
||||
DROP TABLE federated.t1;
|
||||
DROP TABLE federated.bug_17377_table;
|
||||
DROP TABLE federated.t1;
|
||||
DROP TABLE IF EXISTS federated.t1;
|
||||
DROP DATABASE IF EXISTS federated;
|
||||
DROP TABLE IF EXISTS federated.t1;
|
||||
|
|
|
@ -309,6 +309,12 @@ a grp
|
|||
1 2
|
||||
2 4,3
|
||||
3 5
|
||||
select group_concat(c order by (select concat(5-t1.c,group_concat(c order by a)) from t2 where t2.a=t1.a)) as grp from t1;
|
||||
grp
|
||||
5,4,3,2
|
||||
select group_concat(c order by (select concat(t1.c,group_concat(c)) from t2 where a=t1.a)) as grp from t1;
|
||||
grp
|
||||
2,3,4,5
|
||||
select a,c,(select group_concat(c order by a) from t2 where a=t1.a) as grp from t1 order by grp;
|
||||
a c grp
|
||||
3 5 3,3
|
||||
|
|
|
@ -763,6 +763,7 @@ time_format('100:00:00', '%H %k %h %I %l')
|
|||
100 100 04 04 4
|
||||
create table t1 (a timestamp default '2005-05-05 01:01:01',
|
||||
b timestamp default '2005-05-05 01:01:01');
|
||||
drop function if exists t_slow_sysdate;
|
||||
create function t_slow_sysdate() returns timestamp
|
||||
begin
|
||||
do sleep(2);
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
drop table if exists t1,t2;
|
||||
drop table if exists t1,t2,t1m,t1i,t2m,t2i,t4;
|
||||
create table t1 (
|
||||
c_id int(11) not null default '0',
|
||||
org_id int(11) default null,
|
||||
|
@ -268,3 +268,61 @@ explain select distinct f1, f2 from t1;
|
|||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 range NULL PRIMARY 5 NULL 3 Using index for group-by; Using temporary
|
||||
drop table t1;
|
||||
set storage_engine=innodb;
|
||||
CREATE TABLE t1 (a int, b int);
|
||||
insert into t1 values (1,1),(1,2);
|
||||
CREATE TABLE t2 (primary key (a)) select * from t1;
|
||||
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
|
||||
drop table if exists t2;
|
||||
Warnings:
|
||||
Note 1051 Unknown table 't2'
|
||||
CREATE TEMPORARY TABLE t2 (primary key (a)) select * from t1;
|
||||
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
|
||||
drop table if exists t2;
|
||||
Warnings:
|
||||
Note 1051 Unknown table 't2'
|
||||
CREATE TABLE t2 (a int, b int, primary key (a));
|
||||
BEGIN;
|
||||
INSERT INTO t2 values(100,100);
|
||||
CREATE TABLE IF NOT EXISTS t2 (primary key (a)) select * from t1;
|
||||
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
|
||||
SELECT * from t2;
|
||||
a b
|
||||
100 100
|
||||
ROLLBACK;
|
||||
SELECT * from t2;
|
||||
a b
|
||||
100 100
|
||||
TRUNCATE table t2;
|
||||
INSERT INTO t2 select * from t1;
|
||||
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
|
||||
SELECT * from t2;
|
||||
a b
|
||||
drop table t2;
|
||||
CREATE TEMPORARY TABLE t2 (a int, b int, primary key (a));
|
||||
BEGIN;
|
||||
INSERT INTO t2 values(100,100);
|
||||
CREATE TEMPORARY TABLE IF NOT EXISTS t2 (primary key (a)) select * from t1;
|
||||
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
|
||||
SELECT * from t2;
|
||||
a b
|
||||
100 100
|
||||
COMMIT;
|
||||
BEGIN;
|
||||
INSERT INTO t2 values(101,101);
|
||||
CREATE TEMPORARY TABLE IF NOT EXISTS t2 (primary key (a)) select * from t1;
|
||||
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
|
||||
SELECT * from t2;
|
||||
a b
|
||||
100 100
|
||||
101 101
|
||||
ROLLBACK;
|
||||
SELECT * from t2;
|
||||
a b
|
||||
100 100
|
||||
TRUNCATE table t2;
|
||||
INSERT INTO t2 select * from t1;
|
||||
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
|
||||
SELECT * from t2;
|
||||
a b
|
||||
drop table t1,t2;
|
||||
|
|
|
@ -2,8 +2,8 @@ drop table if exists t1,t2,t3;
|
|||
create table t1 (a int not null);
|
||||
insert into t1 values (1);
|
||||
insert into t1 values (a+2);
|
||||
insert into t1 values (a+3);
|
||||
insert into t1 values (4),(a+5);
|
||||
insert into t1 values (a+3),(a+4);
|
||||
insert into t1 values (5),(a+6);
|
||||
select * from t1;
|
||||
a
|
||||
1
|
||||
|
@ -11,6 +11,7 @@ a
|
|||
3
|
||||
4
|
||||
5
|
||||
6
|
||||
drop table t1;
|
||||
create table t1 (id int not null auto_increment primary key, username varchar(32) not null, unique (username));
|
||||
insert into t1 values (0,"mysql");
|
||||
|
@ -299,3 +300,24 @@ select count(*) from t2;
|
|||
count(*)
|
||||
25500
|
||||
drop table t1,t2,t3;
|
||||
create table t1 (a int, b int);
|
||||
insert into t1 (a,b) values (a,b);
|
||||
insert into t1 SET a=1, b=a+1;
|
||||
insert into t1 (a,b) select 1,2;
|
||||
INSERT INTO t1 ( a ) SELECT 0 ON DUPLICATE KEY UPDATE a = a + VALUES (a);
|
||||
prepare stmt1 from ' replace into t1 (a,a) select 100, ''hundred'' ';
|
||||
execute stmt1;
|
||||
ERROR 42000: Column 'a' specified twice
|
||||
insert into t1 (a,b,b) values (1,1,1);
|
||||
ERROR 42000: Column 'b' specified twice
|
||||
insert into t1 (a,a) values (1,1,1);
|
||||
ERROR 21S01: Column count doesn't match value count at row 1
|
||||
insert into t1 (a,a) values (1,1);
|
||||
ERROR 42000: Column 'a' specified twice
|
||||
insert into t1 SET a=1,b=2,a=1;
|
||||
ERROR 42000: Column 'a' specified twice
|
||||
insert into t1 (b,b) select 1,2;
|
||||
ERROR 42000: Column 'b' specified twice
|
||||
INSERT INTO t1 (b,b) SELECT 0,0 ON DUPLICATE KEY UPDATE a = a + VALUES (a);
|
||||
ERROR 42000: Column 'b' specified twice
|
||||
drop table t1;
|
||||
|
|
|
@ -115,6 +115,15 @@ select @a, @b;
|
|||
@a @b
|
||||
NULL 15
|
||||
truncate table t1;
|
||||
load data infile '../std_data_ln/rpl_loaddata.dat' into table t1 set c=b;
|
||||
Warnings:
|
||||
Warning 1261 Row 1 doesn't contain data for all columns
|
||||
Warning 1261 Row 2 doesn't contain data for all columns
|
||||
select * from t1;
|
||||
a b c
|
||||
NULL 10 10
|
||||
NULL 15 15
|
||||
truncate table t1;
|
||||
load data infile '../std_data_ln/loaddata5.dat' into table t1 fields terminated by '' enclosed by '' (a, b) set c="Wow";
|
||||
select * from t1;
|
||||
a b c
|
||||
|
|
|
@ -519,3 +519,83 @@ a
|
|||
30
|
||||
drop view v1;
|
||||
drop table t1, t2;
|
||||
create table t1 (i1 int, i2 int, i3 int);
|
||||
create table t2 (id int, c1 varchar(20), c2 varchar(20));
|
||||
insert into t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2);
|
||||
insert into t2 values (9,"abc","def"),(5,"opq","lmn"),(2,"test t","t test");
|
||||
select * from t1 order by i1;
|
||||
i1 i2 i3
|
||||
1 5 10
|
||||
2 2 2
|
||||
3 7 12
|
||||
4 5 2
|
||||
9 10 15
|
||||
select * from t2;
|
||||
id c1 c2
|
||||
9 abc def
|
||||
5 opq lmn
|
||||
2 test t t test
|
||||
update t1,t2 set t1.i2=15, t2.c2="ppc" where t1.i1=t2.id;
|
||||
select * from t1 order by i1;
|
||||
i1 i2 i3
|
||||
1 5 10
|
||||
2 15 2
|
||||
3 7 12
|
||||
4 5 2
|
||||
9 15 15
|
||||
select * from t2 order by id;
|
||||
id c1 c2
|
||||
2 test t ppc
|
||||
5 opq lmn
|
||||
9 abc ppc
|
||||
delete t1.*,t2.* from t1,t2 where t1.i2=t2.id;
|
||||
select * from t1 order by i1;
|
||||
i1 i2 i3
|
||||
2 15 2
|
||||
3 7 12
|
||||
9 15 15
|
||||
select * from t2 order by id;
|
||||
id c1 c2
|
||||
2 test t ppc
|
||||
9 abc ppc
|
||||
drop table t1, t2;
|
||||
create table t1 (i1 int auto_increment not null, i2 int, i3 int, primary key (i1));
|
||||
create table t2 (id int auto_increment not null, c1 varchar(20), c2 varchar(20), primary key(id));
|
||||
insert into t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2);
|
||||
insert into t2 values (9,"abc","def"),(5,"opq","lmn"),(2,"test t","t test");
|
||||
select * from t1 order by i1;
|
||||
i1 i2 i3
|
||||
1 5 10
|
||||
2 2 2
|
||||
3 7 12
|
||||
4 5 2
|
||||
9 10 15
|
||||
select * from t2 order by id;
|
||||
id c1 c2
|
||||
2 test t t test
|
||||
5 opq lmn
|
||||
9 abc def
|
||||
update t1,t2 set t1.i2=15, t2.c2="ppc" where t1.i1=t2.id;
|
||||
select * from t1 order by i1;
|
||||
i1 i2 i3
|
||||
1 5 10
|
||||
2 15 2
|
||||
3 7 12
|
||||
4 5 2
|
||||
9 15 15
|
||||
select * from t2 order by id;
|
||||
id c1 c2
|
||||
2 test t ppc
|
||||
5 opq lmn
|
||||
9 abc ppc
|
||||
delete t1.*,t2.* from t1,t2 where t1.i2=t2.id;
|
||||
select * from t1 order by i1;
|
||||
i1 i2 i3
|
||||
2 15 2
|
||||
3 7 12
|
||||
9 15 15
|
||||
select * from t2 order by id;
|
||||
id c1 c2
|
||||
2 test t ppc
|
||||
9 abc ppc
|
||||
drop table t1, t2;
|
||||
|
|
|
@ -144,7 +144,7 @@ b int unsigned not null,
|
|||
c int unsigned,
|
||||
UNIQUE (b, c) USING HASH
|
||||
) engine=ndbcluster;
|
||||
ERROR 42000: Column 'c' is used with UNIQUE or INDEX but is not defined as NOT NULL
|
||||
ERROR 42000: Table handler doesn't support NULL in given index. Please change column 'c' to be NOT NULL or use another handler
|
||||
CREATE TABLE t3 (
|
||||
a int unsigned NOT NULL,
|
||||
b int unsigned not null,
|
||||
|
|
|
@ -30,4 +30,4 @@ REPLACE INTO t1 (i,j) VALUES (17,2);
|
|||
SELECT * from t1 ORDER BY i;
|
||||
i j k
|
||||
3 1 42
|
||||
17 2 24
|
||||
17 2 NULL
|
||||
|
|
|
@ -359,8 +359,6 @@ MAX(f1)
|
|||
|
||||
-------- switch to master -------
|
||||
ROLLBACK;
|
||||
Warnings:
|
||||
Warning 1196 Some non-transactional changed tables couldn't be rolled back
|
||||
SELECT MAX(f1) FROM t1;
|
||||
MAX(f1)
|
||||
5
|
||||
|
@ -579,8 +577,6 @@ MAX(f1)
|
|||
|
||||
-------- switch to master -------
|
||||
ROLLBACK;
|
||||
Warnings:
|
||||
Warning 1196 Some non-transactional changed tables couldn't be rolled back
|
||||
SELECT MAX(f1) FROM t1;
|
||||
MAX(f1)
|
||||
8
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
drop database if exists mysqltest;
|
||||
drop view if exists v1;
|
||||
drop view if exists v1,v2,v3;
|
||||
grant create view on test.* to test@localhost;
|
||||
show grants for test@localhost;
|
||||
Grants for test@localhost
|
||||
|
|
|
@ -226,6 +226,7 @@ drop table t1;
|
|||
# Test create table if not exists with duplicate key error
|
||||
#
|
||||
|
||||
flush status;
|
||||
create table t1 (a int not null, b int, primary key (a));
|
||||
insert into t1 values (1,1);
|
||||
create table if not exists t1 select 2;
|
||||
|
@ -233,6 +234,8 @@ select * from t1;
|
|||
create table if not exists t1 select 3 as 'a',4 as 'b';
|
||||
--error 1062
|
||||
create table if not exists t1 select 3 as 'a',3 as 'b';
|
||||
show warnings;
|
||||
show status like "Opened_tables";
|
||||
select * from t1;
|
||||
drop table t1;
|
||||
|
||||
|
@ -676,3 +679,37 @@ insert into t1 values('aaa');
|
|||
drop table t1;
|
||||
|
||||
# End of 5.0 tests
|
||||
|
||||
#
|
||||
# Test of behaviour with CREATE ... SELECT
|
||||
#
|
||||
|
||||
CREATE TABLE t1 (a int, b int);
|
||||
insert into t1 values (1,1),(1,2);
|
||||
--error 1062
|
||||
CREATE TABLE t2 (primary key (a)) select * from t1;
|
||||
# This should give warning
|
||||
drop table if exists t2;
|
||||
--error 1062
|
||||
CREATE TEMPORARY TABLE t2 (primary key (a)) select * from t1;
|
||||
# This should give warning
|
||||
drop table if exists t2;
|
||||
CREATE TABLE t2 (a int, b int, primary key (a));
|
||||
--error 1062
|
||||
CREATE TABLE IF NOT EXISTS t2 (primary key (a)) select * from t1;
|
||||
SELECT * from t2;
|
||||
TRUNCATE table t2;
|
||||
--error 1062
|
||||
INSERT INTO t2 select * from t1;
|
||||
SELECT * from t2;
|
||||
drop table t2;
|
||||
|
||||
CREATE TEMPORARY TABLE t2 (a int, b int, primary key (a));
|
||||
--error 1062
|
||||
CREATE TEMPORARY TABLE IF NOT EXISTS t2 (primary key (a)) select * from t1;
|
||||
SELECT * from t2;
|
||||
TRUNCATE table t2;
|
||||
--error 1062
|
||||
INSERT INTO t2 select * from t1;
|
||||
SELECT * from t2;
|
||||
drop table t1,t2;
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# should work with embedded server after mysqltest is fixed
|
||||
-- source include/not_embedded.inc
|
||||
source include/federated.inc;
|
||||
--source include/not_embedded.inc
|
||||
--source include/federated.inc
|
||||
|
||||
connection slave;
|
||||
DROP TABLE IF EXISTS federated.t1;
|
||||
|
@ -1310,6 +1310,57 @@ select * from federated.t1 where fld_parentid=0 and fld_delt=0;
|
|||
DROP TABLE federated.t1;
|
||||
connection slave;
|
||||
DROP TABLE federated.bug_17377_table;
|
||||
DROP TABLE federated.t1;
|
||||
|
||||
#
|
||||
# Test multi updates and deletes without keys
|
||||
#
|
||||
|
||||
source include/federated_cleanup.inc;
|
||||
# The following can be enabled when bug #19773 has been fixed
|
||||
--disable_parsing
|
||||
connection slave;
|
||||
create table federated.t1 (i1 int, i2 int, i3 int);
|
||||
create table federated.t2 (id int, c1 varchar(20), c2 varchar(20));
|
||||
connection master;
|
||||
eval create table federated.t1 (i1 int, i2 int, i3 int) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t1';
|
||||
eval create table federated.t2 (id int, c1 varchar(20), c2 varchar(20)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t2';
|
||||
insert into federated.t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2);
|
||||
insert into federated.t2 values (9,"abc","def"),(5,"opq","lmn"),(2,"test t","t test");
|
||||
select * from federated.t1 order by i1;
|
||||
select * from federated.t2;
|
||||
update federated.t1,federated.t2 set t1.i2=15, t2.c2="ppc" where t1.i1=t2.id;
|
||||
select * from federated.t1 order by i1;
|
||||
select * from federated.t2 order by id;
|
||||
delete t1.*,t2.* from federated.t1,federated.t2 where t1.i2=t2.id;
|
||||
select * from federated.t1 order by i1;
|
||||
select * from federated.t2 order by id;
|
||||
drop table federated.t1, federated.t2;
|
||||
connection slave;
|
||||
drop table federated.t1, federated.t2;
|
||||
connection master;
|
||||
|
||||
# Test multi updates and deletes with keys
|
||||
|
||||
connection slave;
|
||||
create table federated.t1 (i1 int, i2 int, i3 int, primary key (i1));
|
||||
create table federated.t2 (id int, c1 varchar(20), c2 varchar(20), primary key (id));
|
||||
connection master;
|
||||
eval create table federated.t1 (i1 int auto_increment not null, i2 int, i3 int, primary key (i1)) ENGINE=FEDERATED ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t1';
|
||||
eval create table federated.t2 (id int auto_increment not null, c1 varchar(20), c2 varchar(20), primary key(id)) ENGINE=FEDERATED ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t2';
|
||||
insert into federated.t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2);
|
||||
insert into federated.t2 values (9,"abc","def"),(5,"opq","lmn"),(2,"test t","t test");
|
||||
select * from federated.t1 order by i1;
|
||||
select * from federated.t2 order by id;
|
||||
update federated.t1,federated.t2 set t1.i2=15, t2.c2="ppc" where t1.i1=t2.id;
|
||||
select * from federated.t1 order by i1;
|
||||
select * from federated.t2 order by id;
|
||||
delete t1.*,t2.* from federated.t1,federated.t2 where t1.i2=t2.id;
|
||||
select * from federated.t1 order by i1;
|
||||
select * from federated.t2 order by id;
|
||||
drop table federated.t1, federated.t2;
|
||||
connection slave;
|
||||
drop table federated.t1, federated.t2;
|
||||
connection master;
|
||||
--enable_parsing
|
||||
|
||||
--source include/federated_cleanup.inc
|
||||
|
|
|
@ -176,8 +176,8 @@ select t1.a, group_concat(c order by (select mid(group_concat(c order by a),1,5)
|
|||
select t1.a, group_concat(c order by (select mid(group_concat(c order by a),1,5) from t2 where t2.a=t1.a) desc) as grp from t1 group by 1;
|
||||
|
||||
# The following returns random results as we are sorting on blob addresses
|
||||
# select group_concat(c order by (select group_concat(c order by a) from t2 where t2.a=t1.a)) as grp from t1;
|
||||
# select group_concat(c order by (select group_concat(c) from t2 where a=t1.a)) as grp from t1;
|
||||
select group_concat(c order by (select concat(5-t1.c,group_concat(c order by a)) from t2 where t2.a=t1.a)) as grp from t1;
|
||||
select group_concat(c order by (select concat(t1.c,group_concat(c)) from t2 where a=t1.a)) as grp from t1;
|
||||
|
||||
select a,c,(select group_concat(c order by a) from t2 where a=t1.a) as grp from t1 order by grp;
|
||||
drop table t1,t2;
|
||||
|
|
|
@ -385,6 +385,7 @@ select time_format('100:00:00', '%H %k %h %I %l');
|
|||
create table t1 (a timestamp default '2005-05-05 01:01:01',
|
||||
b timestamp default '2005-05-05 01:01:01');
|
||||
delimiter //;
|
||||
drop function if exists t_slow_sysdate;
|
||||
create function t_slow_sysdate() returns timestamp
|
||||
begin
|
||||
do sleep(2);
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
-- source include/have_innodb.inc
|
||||
|
||||
--disable_warnings
|
||||
drop table if exists t1,t2;
|
||||
drop table if exists t1,t2,t1m,t1i,t2m,t2i,t4;
|
||||
--enable_warnings
|
||||
|
||||
# BUG#16798: Uninitialized row buffer reads in ref-or-null optimizer
|
||||
|
@ -223,3 +223,52 @@ explain select distinct f1 a, f1 b from t1;
|
|||
explain select distinct f1, f2 from t1;
|
||||
drop table t1;
|
||||
|
||||
|
||||
#
|
||||
# Test of behaviour with CREATE ... SELECT
|
||||
#
|
||||
|
||||
set storage_engine=innodb;
|
||||
CREATE TABLE t1 (a int, b int);
|
||||
insert into t1 values (1,1),(1,2);
|
||||
--error 1062
|
||||
CREATE TABLE t2 (primary key (a)) select * from t1;
|
||||
# This should give warning
|
||||
drop table if exists t2;
|
||||
--error 1062
|
||||
CREATE TEMPORARY TABLE t2 (primary key (a)) select * from t1;
|
||||
# This should give warning
|
||||
drop table if exists t2;
|
||||
CREATE TABLE t2 (a int, b int, primary key (a));
|
||||
BEGIN;
|
||||
INSERT INTO t2 values(100,100);
|
||||
--error 1062
|
||||
CREATE TABLE IF NOT EXISTS t2 (primary key (a)) select * from t1;
|
||||
SELECT * from t2;
|
||||
ROLLBACK;
|
||||
SELECT * from t2;
|
||||
TRUNCATE table t2;
|
||||
--error 1062
|
||||
INSERT INTO t2 select * from t1;
|
||||
SELECT * from t2;
|
||||
drop table t2;
|
||||
|
||||
CREATE TEMPORARY TABLE t2 (a int, b int, primary key (a));
|
||||
BEGIN;
|
||||
INSERT INTO t2 values(100,100);
|
||||
--error 1062
|
||||
CREATE TEMPORARY TABLE IF NOT EXISTS t2 (primary key (a)) select * from t1;
|
||||
SELECT * from t2;
|
||||
COMMIT;
|
||||
BEGIN;
|
||||
INSERT INTO t2 values(101,101);
|
||||
--error 1062
|
||||
CREATE TEMPORARY TABLE IF NOT EXISTS t2 (primary key (a)) select * from t1;
|
||||
SELECT * from t2;
|
||||
ROLLBACK;
|
||||
SELECT * from t2;
|
||||
TRUNCATE table t2;
|
||||
--error 1062
|
||||
INSERT INTO t2 select * from t1;
|
||||
SELECT * from t2;
|
||||
drop table t1,t2;
|
||||
|
|
|
@ -9,8 +9,8 @@ drop table if exists t1,t2,t3;
|
|||
create table t1 (a int not null);
|
||||
insert into t1 values (1);
|
||||
insert into t1 values (a+2);
|
||||
insert into t1 values (a+3);
|
||||
insert into t1 values (4),(a+5);
|
||||
insert into t1 values (a+3),(a+4);
|
||||
insert into t1 values (5),(a+6);
|
||||
select * from t1;
|
||||
drop table t1;
|
||||
|
||||
|
@ -176,3 +176,28 @@ insert into t2 select t1.* from t1, t2 t, t3 where t1.id1 = t.id2 and t.id2 =
|
|||
select count(*) from t2;
|
||||
drop table t1,t2,t3;
|
||||
|
||||
#
|
||||
# Test different cases of duplicate fields
|
||||
#
|
||||
|
||||
create table t1 (a int, b int);
|
||||
insert into t1 (a,b) values (a,b);
|
||||
insert into t1 SET a=1, b=a+1;
|
||||
insert into t1 (a,b) select 1,2;
|
||||
INSERT INTO t1 ( a ) SELECT 0 ON DUPLICATE KEY UPDATE a = a + VALUES (a);
|
||||
prepare stmt1 from ' replace into t1 (a,a) select 100, ''hundred'' ';
|
||||
--error 1110
|
||||
execute stmt1;
|
||||
--error 1110
|
||||
insert into t1 (a,b,b) values (1,1,1);
|
||||
--error 1136
|
||||
insert into t1 (a,a) values (1,1,1);
|
||||
--error 1110
|
||||
insert into t1 (a,a) values (1,1);
|
||||
--error 1110
|
||||
insert into t1 SET a=1,b=2,a=1;
|
||||
--error 1110
|
||||
insert into t1 (b,b) select 1,2;
|
||||
--error 1110
|
||||
INSERT INTO t1 (b,b) SELECT 0,0 ON DUPLICATE KEY UPDATE a = a + VALUES (a);
|
||||
drop table t1;
|
||||
|
|
|
@ -92,6 +92,10 @@ load data infile '../std_data_ln/rpl_loaddata.dat' into table t1 (@a, @b);
|
|||
select * from t1;
|
||||
select @a, @b;
|
||||
truncate table t1;
|
||||
# Reading of all columns with set
|
||||
load data infile '../std_data_ln/rpl_loaddata.dat' into table t1 set c=b;
|
||||
select * from t1;
|
||||
truncate table t1;
|
||||
# now going to test fixed field-row file format
|
||||
load data infile '../std_data_ln/loaddata5.dat' into table t1 fields terminated by '' enclosed by '' (a, b) set c="Wow";
|
||||
select * from t1;
|
||||
|
|
|
@ -532,3 +532,33 @@ select * from t1;
|
|||
select * from t2;
|
||||
drop view v1;
|
||||
drop table t1, t2;
|
||||
|
||||
#
|
||||
# Test multi updates and deletes using primary key and without.
|
||||
#
|
||||
create table t1 (i1 int, i2 int, i3 int);
|
||||
create table t2 (id int, c1 varchar(20), c2 varchar(20));
|
||||
insert into t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2);
|
||||
insert into t2 values (9,"abc","def"),(5,"opq","lmn"),(2,"test t","t test");
|
||||
select * from t1 order by i1;
|
||||
select * from t2;
|
||||
update t1,t2 set t1.i2=15, t2.c2="ppc" where t1.i1=t2.id;
|
||||
select * from t1 order by i1;
|
||||
select * from t2 order by id;
|
||||
delete t1.*,t2.* from t1,t2 where t1.i2=t2.id;
|
||||
select * from t1 order by i1;
|
||||
select * from t2 order by id;
|
||||
drop table t1, t2;
|
||||
create table t1 (i1 int auto_increment not null, i2 int, i3 int, primary key (i1));
|
||||
create table t2 (id int auto_increment not null, c1 varchar(20), c2 varchar(20), primary key(id));
|
||||
insert into t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2);
|
||||
insert into t2 values (9,"abc","def"),(5,"opq","lmn"),(2,"test t","t test");
|
||||
select * from t1 order by i1;
|
||||
select * from t2 order by id;
|
||||
update t1,t2 set t1.i2=15, t2.c2="ppc" where t1.i1=t2.id;
|
||||
select * from t1 order by i1;
|
||||
select * from t2 order by id;
|
||||
delete t1.*,t2.* from t1,t2 where t1.i2=t2.id;
|
||||
select * from t1 order by i1;
|
||||
select * from t2 order by id;
|
||||
drop table t1, t2;
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
--disable_warnings
|
||||
drop database if exists mysqltest;
|
||||
drop view if exists v1;
|
||||
drop view if exists v1,v2,v3;
|
||||
--enable_warnings
|
||||
|
||||
|
||||
|
|
|
@ -240,6 +240,7 @@
|
|||
fun:kill_server_thread
|
||||
}
|
||||
|
||||
|
||||
# Red Hat AS 4 32 bit
|
||||
{
|
||||
dl_relocate_object
|
||||
|
@ -405,3 +406,17 @@
|
|||
futex(utime)
|
||||
fun:__lll_mutex_unlock_wake
|
||||
}
|
||||
|
||||
#
|
||||
# Warning when printing stack trace (to suppress some not needed warnings)
|
||||
#
|
||||
|
||||
{
|
||||
vprintf on stacktrace
|
||||
Memcheck:Cond
|
||||
fun:vfprintf
|
||||
fun:uffered_vfprintf
|
||||
fun:vfprintf
|
||||
fun:fprintf
|
||||
fun:print_stacktrace
|
||||
}
|
||||
|
|
189
mysql-test/valgrind.supp.orig
Normal file
189
mysql-test/valgrind.supp.orig
Normal file
|
@ -0,0 +1,189 @@
|
|||
#
|
||||
# Suppress some common (not fatal) errors in system libraries found by valgrind
|
||||
#
|
||||
|
||||
#
|
||||
# Pthread doesn't free all thread specific memory before program exists
|
||||
#
|
||||
{
|
||||
pthread allocate_tls memory loss
|
||||
Memcheck:Leak
|
||||
fun:calloc
|
||||
fun:_dl_allocate_tls
|
||||
fun:allocate_stack
|
||||
fun:pthread_create@@GLIBC_2.1
|
||||
}
|
||||
|
||||
{
|
||||
pthread allocate_dtv memory loss
|
||||
Memcheck:Leak
|
||||
fun:calloc
|
||||
fun:allocate_dtv
|
||||
fun:_dl_allocate_tls_storage
|
||||
fun:__GI__dl_allocate_tls
|
||||
fun:pthread_create
|
||||
}
|
||||
|
||||
{
|
||||
pthread allocate_dtv memory loss second
|
||||
Memcheck:Leak
|
||||
fun:calloc
|
||||
fun:allocate_dtv
|
||||
fun:_dl_allocate_tls
|
||||
fun:pthread_create*
|
||||
}
|
||||
|
||||
{
|
||||
pthread allocate_dtv memory loss second
|
||||
Memcheck:Leak
|
||||
fun:calloc
|
||||
fun:allocate_dtv
|
||||
fun:_dl_allocate_tls
|
||||
fun:pthread_create*
|
||||
}
|
||||
|
||||
{
|
||||
pthread memalign memory loss
|
||||
Memcheck:Leak
|
||||
fun:memalign
|
||||
fun:_dl_allocate_tls_storage
|
||||
fun:__GI__dl_allocate_tls
|
||||
fun:pthread_create
|
||||
}
|
||||
|
||||
{
|
||||
pthread pthread_key_create
|
||||
Memcheck:Leak
|
||||
fun:malloc
|
||||
fun:*
|
||||
fun:*
|
||||
fun:pthread_key_create
|
||||
fun:my_thread_global_init
|
||||
}
|
||||
|
||||
{
|
||||
pthread strstr uninit
|
||||
Memcheck:Cond
|
||||
fun:strstr
|
||||
obj:/lib/tls/libpthread.so.*
|
||||
obj:/lib/tls/libpthread.so.*
|
||||
fun:call_init
|
||||
fun:_dl_init
|
||||
obj:/lib/ld-*.so
|
||||
}
|
||||
|
||||
{
|
||||
pthread strstr uninit
|
||||
Memcheck:Cond
|
||||
fun:strstr
|
||||
obj:/lib/tls/libpthread.so.*
|
||||
obj:/lib/tls/libpthread.so.*
|
||||
fun:call_init
|
||||
fun:_dl_init
|
||||
obj:/lib/ld-*.so
|
||||
}
|
||||
|
||||
{
|
||||
pthread errno
|
||||
Memcheck:Leak
|
||||
fun:calloc
|
||||
fun:_dlerror_run
|
||||
fun:dlsym
|
||||
fun:__errno_location
|
||||
}
|
||||
|
||||
|
||||
#
|
||||
# Warnings in libz becasue it works with aligned memory(?)
|
||||
#
|
||||
|
||||
{
|
||||
libz tr_flush_block
|
||||
Memcheck:Cond
|
||||
fun:_tr_flush_block
|
||||
fun:deflate_slow
|
||||
fun:deflate
|
||||
fun:do_flush
|
||||
fun:gzclose
|
||||
}
|
||||
|
||||
{
|
||||
libz tr_flush_block2
|
||||
Memcheck:Cond
|
||||
fun:_tr_flush_block
|
||||
fun:deflate_slow
|
||||
fun:deflate
|
||||
fun:compress2
|
||||
}
|
||||
|
||||
{
|
||||
libz longest_match
|
||||
Memcheck:Cond
|
||||
fun:longest_match
|
||||
fun:deflate_slow
|
||||
fun:deflate
|
||||
fun:do_flush
|
||||
}
|
||||
|
||||
{
|
||||
libz longest_match2
|
||||
Memcheck:Cond
|
||||
fun:longest_match
|
||||
fun:deflate_slow
|
||||
fun:deflate
|
||||
fun:compress2
|
||||
}
|
||||
|
||||
{
|
||||
libz deflate
|
||||
Memcheck:Cond
|
||||
obj:*/libz.so.*
|
||||
obj:*/libz.so.*
|
||||
fun:deflate
|
||||
fun:compress2
|
||||
}
|
||||
|
||||
{
|
||||
libz deflate2
|
||||
Memcheck:Cond
|
||||
obj:*/libz.so.*
|
||||
obj:*/libz.so.*
|
||||
fun:deflate
|
||||
obj:*/libz.so.*
|
||||
fun:gzflush
|
||||
}
|
||||
|
||||
{
|
||||
libz deflate3
|
||||
Memcheck:Cond
|
||||
obj:*/libz.so.*
|
||||
obj:*/libz.so.*
|
||||
fun:deflate
|
||||
fun:do_flush
|
||||
}
|
||||
|
||||
#
|
||||
# Warning from my_thread_init becasue mysqld dies before kill thread exists
|
||||
#
|
||||
|
||||
{
|
||||
my_thread_init kill thread memory loss second
|
||||
Memcheck:Leak
|
||||
fun:calloc
|
||||
fun:my_thread_init
|
||||
fun:kill_server_thread
|
||||
}
|
||||
|
||||
#
|
||||
# Warning when printing stack trace (to suppress some not needed warnings)
|
||||
#
|
||||
|
||||
{
|
||||
vprintf on stacktrace
|
||||
Memcheck:Cond
|
||||
fun:vfprintf
|
||||
fun:uffered_vfprintf
|
||||
fun:vfprintf
|
||||
fun:fprintf
|
||||
fun:print_stacktrace
|
||||
}
|
|
@ -14,9 +14,10 @@
|
|||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
||||
|
||||
#include <base64.h>
|
||||
#include <my_global.h>
|
||||
#include <m_string.h> /* strchr() */
|
||||
#include <m_ctype.h> /* my_isspace() */
|
||||
#include <base64.h>
|
||||
|
||||
#ifndef MAIN
|
||||
|
||||
|
|
|
@ -46,8 +46,8 @@ void create_last_word_mask(MY_BITMAP *map)
|
|||
unsigned int const used= 1U + ((map->n_bits-1U) & 0x7U);
|
||||
|
||||
/*
|
||||
* Create a mask with the upper 'unused' bits set and the lower 'used'
|
||||
* bits clear. The bits within each byte is stored in big-endian order.
|
||||
Create a mask with the upper 'unused' bits set and the lower 'used'
|
||||
bits clear. The bits within each byte is stored in big-endian order.
|
||||
*/
|
||||
unsigned char const mask= (~((1 << used) - 1)) & 255;
|
||||
|
||||
|
@ -60,13 +60,11 @@ void create_last_word_mask(MY_BITMAP *map)
|
|||
unsigned char *ptr= (unsigned char*)&map->last_word_mask;
|
||||
|
||||
map->last_word_ptr= map->bitmap + no_words_in_map(map)-1;
|
||||
switch (no_bytes_in_map(map)&3)
|
||||
{
|
||||
switch (no_bytes_in_map(map) & 3) {
|
||||
case 1:
|
||||
map->last_word_mask= ~0U;
|
||||
ptr[0]= mask;
|
||||
return;
|
||||
|
||||
case 2:
|
||||
map->last_word_mask= ~0U;
|
||||
ptr[0]= 0;
|
||||
|
@ -84,6 +82,7 @@ void create_last_word_mask(MY_BITMAP *map)
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
static inline void bitmap_lock(MY_BITMAP *map __attribute__((unused)))
|
||||
{
|
||||
#ifdef THREAD
|
||||
|
@ -101,37 +100,41 @@ static inline void bitmap_unlock(MY_BITMAP *map __attribute__((unused)))
|
|||
}
|
||||
|
||||
|
||||
my_bool bitmap_init(MY_BITMAP *map, uint32 *buf, uint n_bits,
|
||||
my_bool bitmap_init(MY_BITMAP *map, my_bitmap_map *buf, uint n_bits,
|
||||
my_bool thread_safe)
|
||||
{
|
||||
DBUG_ENTER("bitmap_init");
|
||||
DBUG_ASSERT(n_bits > 0);
|
||||
if (!buf)
|
||||
{
|
||||
uint size_in_bytes= ((n_bits+31)/32)*4
|
||||
uint size_in_bytes= bitmap_buffer_size(n_bits);
|
||||
uint extra= 0;
|
||||
#ifdef THREAD
|
||||
+(thread_safe ? sizeof(pthread_mutex_t) : 0)
|
||||
if (thread_safe)
|
||||
{
|
||||
size_in_bytes= ALIGN_SIZE(size_in_bytes);
|
||||
extra= sizeof(pthread_mutex_t);
|
||||
}
|
||||
map->mutex= 0;
|
||||
#endif
|
||||
;
|
||||
if (!(buf= (uint32*) my_malloc(size_in_bytes, MYF(MY_WME))))
|
||||
if (!(buf= (my_bitmap_map*) my_malloc(size_in_bytes+extra, MYF(MY_WME))))
|
||||
DBUG_RETURN(1);
|
||||
#ifdef THREAD
|
||||
if (thread_safe)
|
||||
{
|
||||
map->mutex= (pthread_mutex_t *) ((char*) buf + size_in_bytes);
|
||||
pthread_mutex_init(map->mutex, MY_MUTEX_INIT_FAST);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
#ifdef THREAD
|
||||
else
|
||||
DBUG_ASSERT(thread_safe == 0);
|
||||
#endif
|
||||
#ifdef THREAD
|
||||
if (thread_safe)
|
||||
{
|
||||
map->mutex=(pthread_mutex_t *)buf;
|
||||
pthread_mutex_init(map->mutex, MY_MUTEX_INIT_FAST);
|
||||
buf+= sizeof(pthread_mutex_t)/4;
|
||||
DBUG_ASSERT(thread_safe == 0);
|
||||
}
|
||||
else
|
||||
map->mutex=0;
|
||||
#endif
|
||||
|
||||
map->bitmap= buf;
|
||||
map->n_bits=n_bits;
|
||||
map->n_bits= n_bits;
|
||||
create_last_word_mask(map);
|
||||
bitmap_clear_all(map);
|
||||
DBUG_RETURN(0);
|
||||
|
@ -144,15 +147,10 @@ void bitmap_free(MY_BITMAP *map)
|
|||
if (map->bitmap)
|
||||
{
|
||||
#ifdef THREAD
|
||||
char *buf= (char *)map->mutex;
|
||||
if (buf)
|
||||
if (map->mutex)
|
||||
pthread_mutex_destroy(map->mutex);
|
||||
else
|
||||
buf=(char*) map->bitmap;
|
||||
my_free(buf, MYF(0));
|
||||
#else
|
||||
my_free((char*) map->bitmap, MYF(0));
|
||||
#endif
|
||||
my_free((char*) map->bitmap, MYF(0));
|
||||
map->bitmap=0;
|
||||
}
|
||||
DBUG_VOID_RETURN;
|
||||
|
@ -205,6 +203,40 @@ my_bool bitmap_test_and_set(MY_BITMAP *map, uint bitmap_bit)
|
|||
return res;
|
||||
}
|
||||
|
||||
/*
|
||||
test if bit already set and clear it if it was set(thread unsafe method)
|
||||
|
||||
SYNOPSIS
|
||||
bitmap_fast_test_and_set()
|
||||
MAP bit map struct
|
||||
BIT bit number
|
||||
|
||||
RETURN
|
||||
0 bit was not set
|
||||
!=0 bit was set
|
||||
*/
|
||||
|
||||
my_bool bitmap_fast_test_and_clear(MY_BITMAP *map, uint bitmap_bit)
|
||||
{
|
||||
uchar *byte= (uchar*) map->bitmap + (bitmap_bit / 8);
|
||||
uchar bit= 1 << ((bitmap_bit) & 7);
|
||||
uchar res= (*byte) & bit;
|
||||
*byte&= ~bit;
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
my_bool bitmap_test_and_clear(MY_BITMAP *map, uint bitmap_bit)
|
||||
{
|
||||
my_bool res;
|
||||
DBUG_ASSERT(map->bitmap && bitmap_bit < map->n_bits);
|
||||
bitmap_lock(map);
|
||||
res= bitmap_fast_test_and_clear(map, bitmap_bit);
|
||||
bitmap_unlock(map);
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
uint bitmap_set_next(MY_BITMAP *map)
|
||||
{
|
||||
uint bit_found;
|
||||
|
@ -230,7 +262,6 @@ void bitmap_set_prefix(MY_BITMAP *map, uint prefix_size)
|
|||
*m++= (1 << prefix_bits)-1;
|
||||
if ((d= no_bytes_in_map(map)-prefix_bytes))
|
||||
bzero(m, d);
|
||||
*map->last_word_ptr|= map->last_word_mask; /*Set last bits*/
|
||||
}
|
||||
|
||||
|
||||
|
@ -247,7 +278,7 @@ my_bool bitmap_is_prefix(const MY_BITMAP *map, uint prefix_size)
|
|||
if (*m++ != 0xff)
|
||||
return 0;
|
||||
|
||||
*map->last_word_ptr^= map->last_word_mask; /*Clear bits*/
|
||||
*map->last_word_ptr&= ~map->last_word_mask; /*Clear bits*/
|
||||
res= 0;
|
||||
if (prefix_bits && *m++ != (1 << prefix_bits)-1)
|
||||
goto ret;
|
||||
|
@ -257,15 +288,15 @@ my_bool bitmap_is_prefix(const MY_BITMAP *map, uint prefix_size)
|
|||
goto ret;
|
||||
res= 1;
|
||||
ret:
|
||||
*map->last_word_ptr|= map->last_word_mask; /*Set bits again*/
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
my_bool bitmap_is_set_all(const MY_BITMAP *map)
|
||||
{
|
||||
uint32 *data_ptr= map->bitmap;
|
||||
uint32 *end= map->last_word_ptr;
|
||||
my_bitmap_map *data_ptr= map->bitmap;
|
||||
my_bitmap_map *end= map->last_word_ptr;
|
||||
*map->last_word_ptr |= map->last_word_mask;
|
||||
for (; data_ptr <= end; data_ptr++)
|
||||
if (*data_ptr != 0xFFFFFFFF)
|
||||
return FALSE;
|
||||
|
@ -275,9 +306,9 @@ my_bool bitmap_is_set_all(const MY_BITMAP *map)
|
|||
|
||||
my_bool bitmap_is_clear_all(const MY_BITMAP *map)
|
||||
{
|
||||
uint32 *data_ptr= map->bitmap;
|
||||
uint32 *end;
|
||||
if (*map->last_word_ptr != map->last_word_mask)
|
||||
my_bitmap_map *data_ptr= map->bitmap;
|
||||
my_bitmap_map *end;
|
||||
if (*map->last_word_ptr & ~map->last_word_mask)
|
||||
return FALSE;
|
||||
end= map->last_word_ptr;
|
||||
for (; data_ptr < end; data_ptr++)
|
||||
|
@ -286,16 +317,18 @@ my_bool bitmap_is_clear_all(const MY_BITMAP *map)
|
|||
return TRUE;
|
||||
}
|
||||
|
||||
/* Return TRUE if map1 is a subset of map2 */
|
||||
|
||||
my_bool bitmap_is_subset(const MY_BITMAP *map1, const MY_BITMAP *map2)
|
||||
{
|
||||
uint32 *m1= map1->bitmap, *m2= map2->bitmap, *end;
|
||||
my_bitmap_map *m1= map1->bitmap, *m2= map2->bitmap, *end;
|
||||
|
||||
DBUG_ASSERT(map1->bitmap && map2->bitmap &&
|
||||
map1->n_bits==map2->n_bits);
|
||||
|
||||
end= map1->last_word_ptr;
|
||||
|
||||
*map1->last_word_ptr &= ~map1->last_word_mask;
|
||||
*map2->last_word_ptr &= ~map2->last_word_mask;
|
||||
while (m1 <= end)
|
||||
{
|
||||
if ((*m1++) & ~(*m2++))
|
||||
|
@ -304,16 +337,36 @@ my_bool bitmap_is_subset(const MY_BITMAP *map1, const MY_BITMAP *map2)
|
|||
return 1;
|
||||
}
|
||||
|
||||
/* True if bitmaps has any common bits */
|
||||
|
||||
my_bool bitmap_is_overlapping(const MY_BITMAP *map1, const MY_BITMAP *map2)
|
||||
{
|
||||
my_bitmap_map *m1= map1->bitmap, *m2= map2->bitmap, *end;
|
||||
|
||||
DBUG_ASSERT(map1->bitmap && map2->bitmap &&
|
||||
map1->n_bits==map2->n_bits);
|
||||
|
||||
end= map1->last_word_ptr;
|
||||
*map1->last_word_ptr &= ~map1->last_word_mask;
|
||||
*map2->last_word_ptr &= ~map2->last_word_mask;
|
||||
while (m1 <= end)
|
||||
{
|
||||
if ((*m1++) & (*m2++))
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
void bitmap_intersect(MY_BITMAP *map, const MY_BITMAP *map2)
|
||||
{
|
||||
uint32 *to= map->bitmap, *from= map2->bitmap, *end;
|
||||
my_bitmap_map *to= map->bitmap, *from= map2->bitmap, *end;
|
||||
uint len= no_words_in_map(map), len2 = no_words_in_map(map2);
|
||||
|
||||
DBUG_ASSERT(map->bitmap && map2->bitmap);
|
||||
|
||||
end= to+min(len,len2);
|
||||
*map2->last_word_ptr^= map2->last_word_mask; /*Clear last bits in map2*/
|
||||
*map2->last_word_ptr&= ~map2->last_word_mask; /*Clear last bits in map2*/
|
||||
while (to < end)
|
||||
*to++ &= *from++;
|
||||
|
||||
|
@ -323,8 +376,6 @@ void bitmap_intersect(MY_BITMAP *map, const MY_BITMAP *map2)
|
|||
while (to < end)
|
||||
*to++=0;
|
||||
}
|
||||
*map2->last_word_ptr|= map2->last_word_mask; /*Set last bits in map*/
|
||||
*map->last_word_ptr|= map->last_word_mask; /*Set last bits in map2*/
|
||||
}
|
||||
|
||||
|
||||
|
@ -356,13 +407,12 @@ void bitmap_set_above(MY_BITMAP *map, uint from_byte, uint use_bit)
|
|||
|
||||
while (to < end)
|
||||
*to++= use_byte;
|
||||
*map->last_word_ptr|= map->last_word_mask; /*Set last bits again*/
|
||||
}
|
||||
|
||||
|
||||
void bitmap_subtract(MY_BITMAP *map, const MY_BITMAP *map2)
|
||||
{
|
||||
uint32 *to= map->bitmap, *from= map2->bitmap, *end;
|
||||
my_bitmap_map *to= map->bitmap, *from= map2->bitmap, *end;
|
||||
DBUG_ASSERT(map->bitmap && map2->bitmap &&
|
||||
map->n_bits==map2->n_bits);
|
||||
|
||||
|
@ -370,13 +420,12 @@ void bitmap_subtract(MY_BITMAP *map, const MY_BITMAP *map2)
|
|||
|
||||
while (to <= end)
|
||||
*to++ &= ~(*from++);
|
||||
*map->last_word_ptr|= map->last_word_mask; /*Set last bits again*/
|
||||
}
|
||||
|
||||
|
||||
void bitmap_union(MY_BITMAP *map, const MY_BITMAP *map2)
|
||||
{
|
||||
uint32 *to= map->bitmap, *from= map2->bitmap, *end;
|
||||
my_bitmap_map *to= map->bitmap, *from= map2->bitmap, *end;
|
||||
|
||||
DBUG_ASSERT(map->bitmap && map2->bitmap &&
|
||||
map->n_bits==map2->n_bits);
|
||||
|
@ -389,25 +438,23 @@ void bitmap_union(MY_BITMAP *map, const MY_BITMAP *map2)
|
|||
|
||||
void bitmap_xor(MY_BITMAP *map, const MY_BITMAP *map2)
|
||||
{
|
||||
uint32 *to= map->bitmap, *from= map2->bitmap, *end= map->last_word_ptr;
|
||||
my_bitmap_map *to= map->bitmap, *from= map2->bitmap, *end= map->last_word_ptr;
|
||||
DBUG_ASSERT(map->bitmap && map2->bitmap &&
|
||||
map->n_bits==map2->n_bits);
|
||||
while (to <= end)
|
||||
*to++ ^= *from++;
|
||||
*map->last_word_ptr|= map->last_word_mask; /*Set last bits again*/
|
||||
}
|
||||
|
||||
|
||||
void bitmap_invert(MY_BITMAP *map)
|
||||
{
|
||||
uint32 *to= map->bitmap, *end;
|
||||
my_bitmap_map *to= map->bitmap, *end;
|
||||
|
||||
DBUG_ASSERT(map->bitmap);
|
||||
end= map->last_word_ptr;
|
||||
|
||||
while (to <= end)
|
||||
*to++ ^= 0xFFFFFFFF;
|
||||
*map->last_word_ptr|= map->last_word_mask; /*Set last bits again*/
|
||||
}
|
||||
|
||||
|
||||
|
@ -418,21 +465,35 @@ uint bitmap_bits_set(const MY_BITMAP *map)
|
|||
uint res= 0;
|
||||
|
||||
DBUG_ASSERT(map->bitmap);
|
||||
*map->last_word_ptr^=map->last_word_mask; /*Reset last bits to zero*/
|
||||
*map->last_word_ptr&= ~map->last_word_mask; /*Reset last bits to zero*/
|
||||
while (m < end)
|
||||
res+= my_count_bits_ushort(*m++);
|
||||
*map->last_word_ptr^=map->last_word_mask; /*Set last bits to one again*/
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
void bitmap_copy(MY_BITMAP *map, const MY_BITMAP *map2)
|
||||
{
|
||||
my_bitmap_map *to= map->bitmap, *from= map2->bitmap, *end;
|
||||
|
||||
DBUG_ASSERT(map->bitmap && map2->bitmap &&
|
||||
map->n_bits==map2->n_bits);
|
||||
end= map->last_word_ptr;
|
||||
while (to <= end)
|
||||
*to++ = *from++;
|
||||
}
|
||||
|
||||
|
||||
uint bitmap_get_first_set(const MY_BITMAP *map)
|
||||
{
|
||||
uchar *byte_ptr;
|
||||
uint bit_found,i,j,k;
|
||||
uint32 *data_ptr, *end= map->last_word_ptr;
|
||||
uint i,j,k;
|
||||
my_bitmap_map *data_ptr, *end= map->last_word_ptr;
|
||||
|
||||
DBUG_ASSERT(map->bitmap);
|
||||
data_ptr= map->bitmap;
|
||||
*map->last_word_ptr &= ~map->last_word_mask;
|
||||
|
||||
for (i=0; data_ptr <= end; data_ptr++, i++)
|
||||
{
|
||||
if (*data_ptr)
|
||||
|
@ -445,12 +506,7 @@ uint bitmap_get_first_set(const MY_BITMAP *map)
|
|||
for (k=0; ; k++)
|
||||
{
|
||||
if (*byte_ptr & (1 << k))
|
||||
{
|
||||
bit_found= (i*32) + (j*8) + k;
|
||||
if (bit_found == map->n_bits)
|
||||
return MY_BIT_NONE;
|
||||
return bit_found;
|
||||
}
|
||||
return (i*32) + (j*8) + k;
|
||||
}
|
||||
DBUG_ASSERT(0);
|
||||
}
|
||||
|
@ -465,11 +521,13 @@ uint bitmap_get_first_set(const MY_BITMAP *map)
|
|||
uint bitmap_get_first(const MY_BITMAP *map)
|
||||
{
|
||||
uchar *byte_ptr;
|
||||
uint bit_found= MY_BIT_NONE, i,j,k;
|
||||
uint32 *data_ptr, *end= map->last_word_ptr;
|
||||
uint i,j,k;
|
||||
my_bitmap_map *data_ptr, *end= map->last_word_ptr;
|
||||
|
||||
DBUG_ASSERT(map->bitmap);
|
||||
data_ptr= map->bitmap;
|
||||
*map->last_word_ptr|= map->last_word_mask;
|
||||
|
||||
for (i=0; data_ptr <= end; data_ptr++, i++)
|
||||
{
|
||||
if (*data_ptr != 0xFFFFFFFF)
|
||||
|
@ -482,12 +540,7 @@ uint bitmap_get_first(const MY_BITMAP *map)
|
|||
for (k=0; ; k++)
|
||||
{
|
||||
if (!(*byte_ptr & (1 << k)))
|
||||
{
|
||||
bit_found= (i*32) + (j*8) + k;
|
||||
if (bit_found == map->n_bits)
|
||||
return MY_BIT_NONE;
|
||||
return bit_found;
|
||||
}
|
||||
return (i*32) + (j*8) + k;
|
||||
}
|
||||
DBUG_ASSERT(0);
|
||||
}
|
||||
|
@ -705,16 +758,6 @@ void bitmap_lock_flip_bit(MY_BITMAP *map, uint bitmap_bit)
|
|||
#endif
|
||||
#ifdef MAIN
|
||||
|
||||
static void bitmap_print(MY_BITMAP *map)
|
||||
{
|
||||
uint32 *to= map->bitmap, *end= map->last_word_ptr;
|
||||
while (to <= end)
|
||||
{
|
||||
fprintf(stderr,"0x%x ", *to++);
|
||||
}
|
||||
fprintf(stderr,"\n");
|
||||
}
|
||||
|
||||
uint get_rand_bit(uint bitsize)
|
||||
{
|
||||
return (rand() % bitsize);
|
||||
|
@ -766,7 +809,8 @@ error2:
|
|||
return TRUE;
|
||||
}
|
||||
|
||||
bool test_operators(MY_BITMAP *map, uint bitsize)
|
||||
bool test_operators(MY_BITMAP *map __attribute__((unused)),
|
||||
uint bitsize __attribute__((unused)))
|
||||
{
|
||||
return FALSE;
|
||||
}
|
||||
|
@ -819,8 +863,8 @@ bool test_compare_operators(MY_BITMAP *map, uint bitsize)
|
|||
uint no_loops= bitsize > 128 ? 128 : bitsize;
|
||||
MY_BITMAP map2_obj, map3_obj;
|
||||
MY_BITMAP *map2= &map2_obj, *map3= &map3_obj;
|
||||
uint32 map2buf[1024];
|
||||
uint32 map3buf[1024];
|
||||
my_bitmap_map map2buf[1024];
|
||||
my_bitmap_map map3buf[1024];
|
||||
bitmap_init(&map2_obj, map2buf, bitsize, FALSE);
|
||||
bitmap_init(&map3_obj, map3buf, bitsize, FALSE);
|
||||
bitmap_clear_all(map2);
|
||||
|
@ -947,7 +991,7 @@ error2:
|
|||
|
||||
bool test_get_first_bit(MY_BITMAP *map, uint bitsize)
|
||||
{
|
||||
uint i, j, test_bit;
|
||||
uint i, test_bit;
|
||||
uint no_loops= bitsize > 128 ? 128 : bitsize;
|
||||
for (i=0; i < no_loops; i++)
|
||||
{
|
||||
|
@ -1027,7 +1071,7 @@ error3:
|
|||
bool do_test(uint bitsize)
|
||||
{
|
||||
MY_BITMAP map;
|
||||
uint32 buf[1024];
|
||||
my_bitmap_map buf[1024];
|
||||
if (bitmap_init(&map, buf, bitsize, FALSE))
|
||||
{
|
||||
printf("init error for bitsize %d", bitsize);
|
||||
|
|
|
@ -480,7 +480,7 @@ thr_lock(THR_LOCK_DATA *data, THR_LOCK_OWNER *owner,
|
|||
data->type=lock_type;
|
||||
data->owner= owner; /* Must be reset ! */
|
||||
VOID(pthread_mutex_lock(&lock->mutex));
|
||||
DBUG_PRINT("lock",("data: 0x%lx thread: %ld lock: 0x%lx type: %d",
|
||||
DBUG_PRINT("lock",("data: 0x%lx thread: 0x%lx lock: 0x%lx type: %d",
|
||||
data, data->owner->info->thread_id,
|
||||
lock, (int) lock_type));
|
||||
check_locks(lock,(uint) lock_type <= (uint) TL_READ_NO_INSERT ?
|
||||
|
@ -499,7 +499,7 @@ thr_lock(THR_LOCK_DATA *data, THR_LOCK_OWNER *owner,
|
|||
and the read lock is not TL_READ_NO_INSERT
|
||||
*/
|
||||
|
||||
DBUG_PRINT("lock",("write locked by thread: %ld",
|
||||
DBUG_PRINT("lock",("write locked by thread: 0x%lx",
|
||||
lock->write.data->owner->info->thread_id));
|
||||
if (thr_lock_owner_equal(data->owner, lock->write.data->owner) ||
|
||||
(lock->write.data->type <= TL_WRITE_DELAYED &&
|
||||
|
@ -621,7 +621,7 @@ thr_lock(THR_LOCK_DATA *data, THR_LOCK_OWNER *owner,
|
|||
statistic_increment(locks_immediate,&THR_LOCK_lock);
|
||||
goto end;
|
||||
}
|
||||
DBUG_PRINT("lock",("write locked by thread: %ld",
|
||||
DBUG_PRINT("lock",("write locked by thread: 0x%lx",
|
||||
lock->write.data->owner->info->thread_id));
|
||||
}
|
||||
else
|
||||
|
@ -657,7 +657,7 @@ thr_lock(THR_LOCK_DATA *data, THR_LOCK_OWNER *owner,
|
|||
goto end;
|
||||
}
|
||||
}
|
||||
DBUG_PRINT("lock",("write locked by thread: %ld, type: %ld",
|
||||
DBUG_PRINT("lock",("write locked by thread: 0x%lx, type: %ld",
|
||||
lock->read.data->owner->info->thread_id, data->type));
|
||||
}
|
||||
wait_queue= &lock->write_wait;
|
||||
|
@ -719,7 +719,7 @@ static inline void free_all_read_locks(THR_LOCK *lock,
|
|||
}
|
||||
lock->read_no_write_count++;
|
||||
}
|
||||
DBUG_PRINT("lock",("giving read lock to thread: %ld",
|
||||
DBUG_PRINT("lock",("giving read lock to thread: 0x%lx",
|
||||
data->owner->info->thread_id));
|
||||
data->cond=0; /* Mark thread free */
|
||||
VOID(pthread_cond_signal(cond));
|
||||
|
@ -737,7 +737,7 @@ void thr_unlock(THR_LOCK_DATA *data)
|
|||
THR_LOCK *lock=data->lock;
|
||||
enum thr_lock_type lock_type=data->type;
|
||||
DBUG_ENTER("thr_unlock");
|
||||
DBUG_PRINT("lock",("data: 0x%lx thread: %ld lock: 0x%lx",
|
||||
DBUG_PRINT("lock",("data: 0x%lx thread: 0x%lx lock: 0x%lx",
|
||||
data, data->owner->info->thread_id, lock));
|
||||
pthread_mutex_lock(&lock->mutex);
|
||||
check_locks(lock,"start of release lock",0);
|
||||
|
@ -797,7 +797,7 @@ void thr_unlock(THR_LOCK_DATA *data)
|
|||
if (data->type == TL_WRITE_CONCURRENT_INSERT &&
|
||||
(*lock->check_status)(data->status_param))
|
||||
data->type=TL_WRITE; /* Upgrade lock */
|
||||
DBUG_PRINT("lock",("giving write lock of type %d to thread: %ld",
|
||||
DBUG_PRINT("lock",("giving write lock of type %d to thread: 0x%lx",
|
||||
data->type, data->owner->info->thread_id));
|
||||
{
|
||||
pthread_cond_t *cond=data->cond;
|
||||
|
|
35
sql/event.cc
35
sql/event.cc
|
@ -31,8 +31,8 @@
|
|||
should be replicated as disabled. If an event is ALTERed as DISABLED the
|
||||
query should go untouched into the binary log, when ALTERed as enable then
|
||||
it should go as SLAVESIDE_DISABLED. This is regarding the SQL interface.
|
||||
TT routines however modify mysql.event internally and this does not go the log
|
||||
so in this case queries has to be injected into the log...somehow... or
|
||||
TT routines however modify mysql.event internally and this does not go the
|
||||
log so in this case queries has to be injected into the log...somehow... or
|
||||
maybe a solution is RBR for this case, because the event may go only from
|
||||
ENABLED to DISABLED status change and this is safe for replicating. As well
|
||||
an event may be deleted which is also safe for RBR.
|
||||
|
@ -40,9 +40,9 @@
|
|||
- Add logging to file
|
||||
|
||||
Warning:
|
||||
- For now parallel execution is not possible because the same sp_head cannot be
|
||||
executed few times!!! There is still no lock attached to particular event.
|
||||
|
||||
- For now parallel execution is not possible because the same sp_head cannot
|
||||
be executed few times!!! There is still no lock attached to particular
|
||||
event.
|
||||
*/
|
||||
|
||||
|
||||
|
@ -411,9 +411,9 @@ common_1_lev_code:
|
|||
|
||||
SYNOPSIS
|
||||
Events::open_event_table()
|
||||
thd Thread context
|
||||
lock_type How to lock the table
|
||||
table The table pointer
|
||||
thd Thread context
|
||||
lock_type How to lock the table
|
||||
table We will store the open table here
|
||||
|
||||
RETURN VALUE
|
||||
1 Cannot lock table
|
||||
|
@ -426,7 +426,7 @@ Events::open_event_table(THD *thd, enum thr_lock_type lock_type,
|
|||
TABLE **table)
|
||||
{
|
||||
TABLE_LIST tables;
|
||||
DBUG_ENTER("open_proc_table");
|
||||
DBUG_ENTER("open_events_table");
|
||||
|
||||
bzero((char*) &tables, sizeof(tables));
|
||||
tables.db= (char*) "mysql";
|
||||
|
@ -445,7 +445,7 @@ Events::open_event_table(THD *thd, enum thr_lock_type lock_type,
|
|||
DBUG_RETURN(2);
|
||||
}
|
||||
*table= tables.table;
|
||||
|
||||
tables.table->use_all_columns();
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
|
@ -512,12 +512,14 @@ evex_db_find_event_by_name(THD *thd, const LEX_STRING dbname,
|
|||
table->field[Events::FIELD_NAME]->store(ev_name.str, ev_name.length,
|
||||
&my_charset_bin);
|
||||
|
||||
key_copy(key, table->record[0], table->key_info, table->key_info->key_length);
|
||||
key_copy(key, table->record[0], table->key_info,
|
||||
table->key_info->key_length);
|
||||
|
||||
if (table->file->index_read_idx(table->record[0], 0, key,
|
||||
table->key_info->key_length,HA_READ_KEY_EXACT))
|
||||
table->key_info->key_length,
|
||||
HA_READ_KEY_EXACT))
|
||||
{
|
||||
DBUG_PRINT("info", ("Row not fonud"));
|
||||
DBUG_PRINT("info", ("Row not found"));
|
||||
DBUG_RETURN(EVEX_KEY_NOT_FOUND);
|
||||
}
|
||||
|
||||
|
@ -708,7 +710,8 @@ db_create_event(THD *thd, Event_timed *et, my_bool create_if_not,
|
|||
}
|
||||
|
||||
DBUG_PRINT("info", ("non-existant, go forward"));
|
||||
if ((ret= sp_use_new_db(thd, et->dbname.str,olddb, sizeof(olddb),0, &dbchanged)))
|
||||
if ((ret= sp_use_new_db(thd, et->dbname.str,olddb, sizeof(olddb),0,
|
||||
&dbchanged)))
|
||||
{
|
||||
my_error(ER_BAD_DB_ERROR, MYF(0));
|
||||
goto err;
|
||||
|
@ -1280,7 +1283,9 @@ db_drop_events_from_table(THD *thd, LEX_STRING *db)
|
|||
|
||||
if ((ret= Events::open_event_table(thd, TL_WRITE, &table)))
|
||||
{
|
||||
sql_print_error("Table mysql.event is damaged.");
|
||||
if (my_errno != ENOENT)
|
||||
sql_print_error("Table mysql.event is damaged. Got error %d on open",
|
||||
my_errno);
|
||||
DBUG_RETURN(ret);
|
||||
}
|
||||
/* only enabled events are in memory, so we go now and delete the rest */
|
||||
|
|
203
sql/field.cc
203
sql/field.cc
|
@ -50,6 +50,9 @@ const char field_separator=',';
|
|||
#define BLOB_PACK_LENGTH_TO_MAX_LENGH(arg) \
|
||||
((ulong) ((LL(1) << min(arg, 4) * 8) - LL(1)))
|
||||
|
||||
#define ASSERT_COLUMN_MARKED_FOR_READ DBUG_ASSERT(!table->read_set || bitmap_is_set(table->read_set, field_index))
|
||||
#define ASSERT_COLUMN_MARKED_FOR_WRITE DBUG_ASSERT(!table->write_set || bitmap_is_set(table->write_set, field_index))
|
||||
|
||||
/*
|
||||
Rules for merging different types of fields in UNION
|
||||
|
||||
|
@ -1201,9 +1204,11 @@ static bool test_if_real(const char *str,int length, CHARSET_INFO *cs)
|
|||
|
||||
String *Field::val_int_as_str(String *val_buffer, my_bool unsigned_val)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
CHARSET_INFO *cs= &my_charset_bin;
|
||||
uint length= 21;
|
||||
longlong value= val_int();
|
||||
|
||||
if (val_buffer->alloc(length))
|
||||
return 0;
|
||||
length= (uint) (*cs->cset->longlong10_to_str)(cs, (char*) val_buffer->ptr(),
|
||||
|
@ -1221,16 +1226,17 @@ Field::Field(char *ptr_arg,uint32 length_arg,uchar *null_ptr_arg,
|
|||
:ptr(ptr_arg), null_ptr(null_ptr_arg),
|
||||
table(0), orig_table(0), table_name(0),
|
||||
field_name(field_name_arg),
|
||||
query_id(0), key_start(0), part_of_key(0), part_of_sortkey(0),
|
||||
unireg_check(unireg_check_arg),
|
||||
key_start(0), part_of_key(0), part_of_key_not_clustered(0),
|
||||
part_of_sortkey(0), unireg_check(unireg_check_arg),
|
||||
field_length(length_arg), null_bit(null_bit_arg)
|
||||
{
|
||||
flags=null_ptr ? 0: NOT_NULL_FLAG;
|
||||
comment.str= (char*) "";
|
||||
comment.length=0;
|
||||
fieldnr= 0;
|
||||
field_index= 0;
|
||||
}
|
||||
|
||||
|
||||
uint Field::offset()
|
||||
{
|
||||
return (uint) (ptr - (char*) table->record[0]);
|
||||
|
@ -1354,6 +1360,7 @@ longlong Field::convert_decimal2longlong(const my_decimal *val,
|
|||
|
||||
int Field_num::store_decimal(const my_decimal *val)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
int err= 0;
|
||||
longlong i= convert_decimal2longlong(val, unsigned_flag, &err);
|
||||
return test(err | store(i, unsigned_flag));
|
||||
|
@ -1378,6 +1385,7 @@ int Field_num::store_decimal(const my_decimal *val)
|
|||
|
||||
my_decimal* Field_num::val_decimal(my_decimal *decimal_value)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
DBUG_ASSERT(result_type() == INT_RESULT);
|
||||
longlong nr= val_int();
|
||||
int2my_decimal(E_DEC_FATAL_ERROR, nr, unsigned_flag, decimal_value);
|
||||
|
@ -1423,6 +1431,7 @@ void Field_num::make_field(Send_field *field)
|
|||
|
||||
int Field_str::store_decimal(const my_decimal *d)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
double val;
|
||||
/* TODO: use decimal2string? */
|
||||
int err= warn_if_overflow(my_decimal2double(E_DEC_FATAL_ERROR &
|
||||
|
@ -1433,6 +1442,7 @@ int Field_str::store_decimal(const my_decimal *d)
|
|||
|
||||
my_decimal *Field_str::val_decimal(my_decimal *decimal_value)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
longlong nr= val_int();
|
||||
int2my_decimal(E_DEC_FATAL_ERROR, nr, 0, decimal_value);
|
||||
return decimal_value;
|
||||
|
@ -1498,6 +1508,7 @@ bool Field::get_time(TIME *ltime)
|
|||
|
||||
int Field::store_time(TIME *ltime, timestamp_type type)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
char buff[MAX_DATE_STRING_REP_LENGTH];
|
||||
uint length= (uint) my_TIME_to_str(ltime, buff);
|
||||
return store(buff, length, &my_charset_bin);
|
||||
|
@ -1720,6 +1731,7 @@ void Field_decimal::overflow(bool negative)
|
|||
|
||||
int Field_decimal::store(const char *from, uint len, CHARSET_INFO *cs)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
char buff[STRING_BUFFER_USUAL_SIZE];
|
||||
String tmp(buff,sizeof(buff), &my_charset_bin);
|
||||
|
||||
|
@ -2089,6 +2101,7 @@ int Field_decimal::store(const char *from, uint len, CHARSET_INFO *cs)
|
|||
|
||||
int Field_decimal::store(double nr)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
if (unsigned_flag && nr < 0)
|
||||
{
|
||||
overflow(1);
|
||||
|
@ -2134,6 +2147,7 @@ int Field_decimal::store(double nr)
|
|||
|
||||
int Field_decimal::store(longlong nr, bool unsigned_val)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
char buff[22];
|
||||
uint length, int_part;
|
||||
char fyllchar, *to;
|
||||
|
@ -2168,6 +2182,7 @@ int Field_decimal::store(longlong nr, bool unsigned_val)
|
|||
|
||||
double Field_decimal::val_real(void)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
int not_used;
|
||||
char *end_not_used;
|
||||
return my_strntod(&my_charset_bin, ptr, field_length, &end_not_used,
|
||||
|
@ -2176,6 +2191,7 @@ double Field_decimal::val_real(void)
|
|||
|
||||
longlong Field_decimal::val_int(void)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
int not_used;
|
||||
if (unsigned_flag)
|
||||
return my_strntoull(&my_charset_bin, ptr, field_length, 10, NULL,
|
||||
|
@ -2189,6 +2205,7 @@ longlong Field_decimal::val_int(void)
|
|||
String *Field_decimal::val_str(String *val_buffer __attribute__((unused)),
|
||||
String *val_ptr)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
char *str;
|
||||
for (str=ptr ; *str == ' ' ; str++) ;
|
||||
uint tmp_length=(uint) (str-ptr);
|
||||
|
@ -2365,6 +2382,7 @@ void Field_new_decimal::set_value_on_overflow(my_decimal *decimal_value,
|
|||
|
||||
bool Field_new_decimal::store_value(const my_decimal *decimal_value)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
int error= 0;
|
||||
DBUG_ENTER("Field_new_decimal::store_value");
|
||||
#ifndef DBUG_OFF
|
||||
|
@ -2409,6 +2427,7 @@ bool Field_new_decimal::store_value(const my_decimal *decimal_value)
|
|||
int Field_new_decimal::store(const char *from, uint length,
|
||||
CHARSET_INFO *charset)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
int err;
|
||||
my_decimal decimal_value;
|
||||
DBUG_ENTER("Field_new_decimal::store(char*)");
|
||||
|
@ -2456,6 +2475,7 @@ int Field_new_decimal::store(const char *from, uint length,
|
|||
|
||||
int Field_new_decimal::store(double nr)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
my_decimal decimal_value;
|
||||
int err;
|
||||
DBUG_ENTER("Field_new_decimal::store(double)");
|
||||
|
@ -2490,6 +2510,7 @@ int Field_new_decimal::store(double nr)
|
|||
|
||||
int Field_new_decimal::store(longlong nr, bool unsigned_val)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
my_decimal decimal_value;
|
||||
int err;
|
||||
|
||||
|
@ -2511,12 +2532,14 @@ int Field_new_decimal::store(longlong nr, bool unsigned_val)
|
|||
|
||||
int Field_new_decimal::store_decimal(const my_decimal *decimal_value)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
return store_value(decimal_value);
|
||||
}
|
||||
|
||||
|
||||
double Field_new_decimal::val_real(void)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
double dbl;
|
||||
my_decimal decimal_value;
|
||||
my_decimal2double(E_DEC_FATAL_ERROR, val_decimal(&decimal_value), &dbl);
|
||||
|
@ -2526,6 +2549,7 @@ double Field_new_decimal::val_real(void)
|
|||
|
||||
longlong Field_new_decimal::val_int(void)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
longlong i;
|
||||
my_decimal decimal_value;
|
||||
my_decimal2int(E_DEC_FATAL_ERROR, val_decimal(&decimal_value),
|
||||
|
@ -2536,6 +2560,7 @@ longlong Field_new_decimal::val_int(void)
|
|||
|
||||
my_decimal* Field_new_decimal::val_decimal(my_decimal *decimal_value)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
DBUG_ENTER("Field_new_decimal::val_decimal");
|
||||
binary2my_decimal(E_DEC_FATAL_ERROR, ptr, decimal_value,
|
||||
precision, dec);
|
||||
|
@ -2548,6 +2573,7 @@ my_decimal* Field_new_decimal::val_decimal(my_decimal *decimal_value)
|
|||
String *Field_new_decimal::val_str(String *val_buffer,
|
||||
String *val_ptr __attribute__((unused)))
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
my_decimal decimal_value;
|
||||
uint fixed_precision= zerofill ? precision : 0;
|
||||
my_decimal2string(E_DEC_FATAL_ERROR, val_decimal(&decimal_value),
|
||||
|
@ -2584,6 +2610,7 @@ void Field_new_decimal::sql_type(String &str) const
|
|||
|
||||
int Field_tiny::store(const char *from,uint len,CHARSET_INFO *cs)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
int not_used; // We can ignore result from str2int
|
||||
char *end;
|
||||
long tmp= my_strntol(cs, from, len, 10, &end, ¬_used);
|
||||
|
@ -2630,6 +2657,7 @@ int Field_tiny::store(const char *from,uint len,CHARSET_INFO *cs)
|
|||
|
||||
int Field_tiny::store(double nr)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
int error= 0;
|
||||
nr=rint(nr);
|
||||
if (unsigned_flag)
|
||||
|
@ -2672,6 +2700,7 @@ int Field_tiny::store(double nr)
|
|||
|
||||
int Field_tiny::store(longlong nr, bool unsigned_val)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
int error= 0;
|
||||
|
||||
if (unsigned_flag)
|
||||
|
@ -2716,6 +2745,7 @@ int Field_tiny::store(longlong nr, bool unsigned_val)
|
|||
|
||||
double Field_tiny::val_real(void)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
int tmp= unsigned_flag ? (int) ((uchar*) ptr)[0] :
|
||||
(int) ((signed char*) ptr)[0];
|
||||
return (double) tmp;
|
||||
|
@ -2724,6 +2754,7 @@ double Field_tiny::val_real(void)
|
|||
|
||||
longlong Field_tiny::val_int(void)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
int tmp= unsigned_flag ? (int) ((uchar*) ptr)[0] :
|
||||
(int) ((signed char*) ptr)[0];
|
||||
return (longlong) tmp;
|
||||
|
@ -2733,6 +2764,7 @@ longlong Field_tiny::val_int(void)
|
|||
String *Field_tiny::val_str(String *val_buffer,
|
||||
String *val_ptr __attribute__((unused)))
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
CHARSET_INFO *cs= &my_charset_bin;
|
||||
uint length;
|
||||
uint mlength=max(field_length+1,5*cs->mbmaxlen);
|
||||
|
@ -2788,6 +2820,7 @@ void Field_tiny::sql_type(String &res) const
|
|||
|
||||
int Field_short::store(const char *from,uint len,CHARSET_INFO *cs)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
int not_used; // We can ignore result from str2int
|
||||
char *end;
|
||||
long tmp= my_strntol(cs, from, len, 10, &end, ¬_used);
|
||||
|
@ -2841,6 +2874,7 @@ int Field_short::store(const char *from,uint len,CHARSET_INFO *cs)
|
|||
|
||||
int Field_short::store(double nr)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
int error= 0;
|
||||
int16 res;
|
||||
nr=rint(nr);
|
||||
|
@ -2892,6 +2926,7 @@ int Field_short::store(double nr)
|
|||
|
||||
int Field_short::store(longlong nr, bool unsigned_val)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
int error= 0;
|
||||
int16 res;
|
||||
|
||||
|
@ -2946,6 +2981,7 @@ int Field_short::store(longlong nr, bool unsigned_val)
|
|||
|
||||
double Field_short::val_real(void)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
short j;
|
||||
#ifdef WORDS_BIGENDIAN
|
||||
if (table->s->db_low_byte_first)
|
||||
|
@ -2958,6 +2994,7 @@ double Field_short::val_real(void)
|
|||
|
||||
longlong Field_short::val_int(void)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
short j;
|
||||
#ifdef WORDS_BIGENDIAN
|
||||
if (table->s->db_low_byte_first)
|
||||
|
@ -2972,6 +3009,7 @@ longlong Field_short::val_int(void)
|
|||
String *Field_short::val_str(String *val_buffer,
|
||||
String *val_ptr __attribute__((unused)))
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
CHARSET_INFO *cs= &my_charset_bin;
|
||||
uint length;
|
||||
uint mlength=max(field_length+1,7*cs->mbmaxlen);
|
||||
|
@ -3062,6 +3100,7 @@ void Field_short::sql_type(String &res) const
|
|||
|
||||
int Field_medium::store(const char *from,uint len,CHARSET_INFO *cs)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
int not_used; // We can ignore result from str2int
|
||||
char *end;
|
||||
long tmp= my_strntol(cs, from, len, 10, &end, ¬_used);
|
||||
|
@ -3109,6 +3148,7 @@ int Field_medium::store(const char *from,uint len,CHARSET_INFO *cs)
|
|||
|
||||
int Field_medium::store(double nr)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
int error= 0;
|
||||
nr=rint(nr);
|
||||
if (unsigned_flag)
|
||||
|
@ -3154,6 +3194,7 @@ int Field_medium::store(double nr)
|
|||
|
||||
int Field_medium::store(longlong nr, bool unsigned_val)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
int error= 0;
|
||||
|
||||
if (unsigned_flag)
|
||||
|
@ -3202,6 +3243,7 @@ int Field_medium::store(longlong nr, bool unsigned_val)
|
|||
|
||||
double Field_medium::val_real(void)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
long j= unsigned_flag ? (long) uint3korr(ptr) : sint3korr(ptr);
|
||||
return (double) j;
|
||||
}
|
||||
|
@ -3209,6 +3251,7 @@ double Field_medium::val_real(void)
|
|||
|
||||
longlong Field_medium::val_int(void)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
long j= unsigned_flag ? (long) uint3korr(ptr) : sint3korr(ptr);
|
||||
return (longlong) j;
|
||||
}
|
||||
|
@ -3217,6 +3260,7 @@ longlong Field_medium::val_int(void)
|
|||
String *Field_medium::val_str(String *val_buffer,
|
||||
String *val_ptr __attribute__((unused)))
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
CHARSET_INFO *cs= &my_charset_bin;
|
||||
uint length;
|
||||
uint mlength=max(field_length+1,10*cs->mbmaxlen);
|
||||
|
@ -3234,6 +3278,7 @@ String *Field_medium::val_str(String *val_buffer,
|
|||
|
||||
bool Field_medium::send_binary(Protocol *protocol)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
return protocol->store_long(Field_medium::val_int());
|
||||
}
|
||||
|
||||
|
@ -3298,6 +3343,7 @@ static bool test_if_minus(CHARSET_INFO *cs,
|
|||
|
||||
int Field_long::store(const char *from,uint len,CHARSET_INFO *cs)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
ulong tmp_scan;
|
||||
longlong tmp;
|
||||
long store_tmp;
|
||||
|
@ -3370,6 +3416,7 @@ int Field_long::store(const char *from,uint len,CHARSET_INFO *cs)
|
|||
|
||||
int Field_long::store(double nr)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
int error= 0;
|
||||
int32 res;
|
||||
nr=rint(nr);
|
||||
|
@ -3421,6 +3468,7 @@ int Field_long::store(double nr)
|
|||
|
||||
int Field_long::store(longlong nr, bool unsigned_val)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
int error= 0;
|
||||
int32 res;
|
||||
DBUG_ASSERT(table->in_use == current_thd); // General safety
|
||||
|
@ -3474,6 +3522,7 @@ int Field_long::store(longlong nr, bool unsigned_val)
|
|||
|
||||
double Field_long::val_real(void)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
int32 j;
|
||||
#ifdef WORDS_BIGENDIAN
|
||||
if (table->s->db_low_byte_first)
|
||||
|
@ -3486,6 +3535,7 @@ double Field_long::val_real(void)
|
|||
|
||||
longlong Field_long::val_int(void)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
int32 j;
|
||||
/* See the comment in Field_long::store(long long) */
|
||||
DBUG_ASSERT(table->in_use == current_thd);
|
||||
|
@ -3501,6 +3551,7 @@ longlong Field_long::val_int(void)
|
|||
String *Field_long::val_str(String *val_buffer,
|
||||
String *val_ptr __attribute__((unused)))
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
CHARSET_INFO *cs= &my_charset_bin;
|
||||
uint length;
|
||||
uint mlength=max(field_length+1,12*cs->mbmaxlen);
|
||||
|
@ -3527,6 +3578,7 @@ String *Field_long::val_str(String *val_buffer,
|
|||
|
||||
bool Field_long::send_binary(Protocol *protocol)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
return protocol->store_long(Field_long::val_int());
|
||||
}
|
||||
|
||||
|
@ -3591,6 +3643,7 @@ void Field_long::sql_type(String &res) const
|
|||
|
||||
int Field_longlong::store(const char *from,uint len,CHARSET_INFO *cs)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
longlong tmp;
|
||||
int error= 0;
|
||||
char *end;
|
||||
|
@ -3632,6 +3685,7 @@ int Field_longlong::store(const char *from,uint len,CHARSET_INFO *cs)
|
|||
|
||||
int Field_longlong::store(double nr)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
int error= 0;
|
||||
longlong res;
|
||||
|
||||
|
@ -3683,6 +3737,7 @@ int Field_longlong::store(double nr)
|
|||
|
||||
int Field_longlong::store(longlong nr, bool unsigned_val)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
int error= 0;
|
||||
|
||||
if (nr < 0) // Only possible error
|
||||
|
@ -3713,6 +3768,7 @@ int Field_longlong::store(longlong nr, bool unsigned_val)
|
|||
|
||||
double Field_longlong::val_real(void)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
longlong j;
|
||||
#ifdef WORDS_BIGENDIAN
|
||||
if (table->s->db_low_byte_first)
|
||||
|
@ -3734,6 +3790,7 @@ double Field_longlong::val_real(void)
|
|||
|
||||
longlong Field_longlong::val_int(void)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
longlong j;
|
||||
#ifdef WORDS_BIGENDIAN
|
||||
if (table->s->db_low_byte_first)
|
||||
|
@ -3772,6 +3829,7 @@ String *Field_longlong::val_str(String *val_buffer,
|
|||
|
||||
bool Field_longlong::send_binary(Protocol *protocol)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
return protocol->store_longlong(Field_longlong::val_int(), unsigned_flag);
|
||||
}
|
||||
|
||||
|
@ -3864,6 +3922,7 @@ int Field_float::store(const char *from,uint len,CHARSET_INFO *cs)
|
|||
|
||||
int Field_float::store(double nr)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
float j;
|
||||
int error= 0;
|
||||
|
||||
|
@ -3928,12 +3987,14 @@ int Field_float::store(double nr)
|
|||
|
||||
int Field_float::store(longlong nr, bool unsigned_val)
|
||||
{
|
||||
return store(unsigned_val ? ulonglong2double((ulonglong) nr) : (double) nr);
|
||||
return Field_float::store(unsigned_val ? ulonglong2double((ulonglong) nr) :
|
||||
(double) nr);
|
||||
}
|
||||
|
||||
|
||||
double Field_float::val_real(void)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
float j;
|
||||
#ifdef WORDS_BIGENDIAN
|
||||
if (table->s->db_low_byte_first)
|
||||
|
@ -3964,6 +4025,7 @@ longlong Field_float::val_int(void)
|
|||
String *Field_float::val_str(String *val_buffer,
|
||||
String *val_ptr __attribute__((unused)))
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
float nr;
|
||||
#ifdef WORDS_BIGENDIAN
|
||||
if (table->s->db_low_byte_first)
|
||||
|
@ -4109,6 +4171,7 @@ void Field_float::sort_string(char *to,uint length __attribute__((unused)))
|
|||
|
||||
bool Field_float::send_binary(Protocol *protocol)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
return protocol->store((float) Field_float::val_real(), dec, (String*) 0);
|
||||
}
|
||||
|
||||
|
@ -4152,6 +4215,7 @@ int Field_double::store(const char *from,uint len,CHARSET_INFO *cs)
|
|||
|
||||
int Field_double::store(double nr)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
int error= 0;
|
||||
|
||||
if (isnan(nr))
|
||||
|
@ -4209,7 +4273,8 @@ int Field_double::store(double nr)
|
|||
|
||||
int Field_double::store(longlong nr, bool unsigned_val)
|
||||
{
|
||||
return store(unsigned_val ? ulonglong2double((ulonglong) nr) : (double) nr);
|
||||
return Field_double::store(unsigned_val ? ulonglong2double((ulonglong) nr) :
|
||||
(double) nr);
|
||||
}
|
||||
|
||||
|
||||
|
@ -4222,6 +4287,7 @@ int Field_real::store_decimal(const my_decimal *dm)
|
|||
|
||||
double Field_double::val_real(void)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
double j;
|
||||
#ifdef WORDS_BIGENDIAN
|
||||
if (table->s->db_low_byte_first)
|
||||
|
@ -4236,6 +4302,7 @@ double Field_double::val_real(void)
|
|||
|
||||
longlong Field_double::val_int(void)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
double j;
|
||||
longlong res;
|
||||
#ifdef WORDS_BIGENDIAN
|
||||
|
@ -4275,6 +4342,7 @@ warn:
|
|||
|
||||
my_decimal *Field_real::val_decimal(my_decimal *decimal_value)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
double2my_decimal(E_DEC_FATAL_ERROR, val_real(), decimal_value);
|
||||
return decimal_value;
|
||||
}
|
||||
|
@ -4283,6 +4351,7 @@ my_decimal *Field_real::val_decimal(my_decimal *decimal_value)
|
|||
String *Field_double::val_str(String *val_buffer,
|
||||
String *val_ptr __attribute__((unused)))
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
double nr;
|
||||
#ifdef WORDS_BIGENDIAN
|
||||
if (table->s->db_low_byte_first)
|
||||
|
@ -4371,6 +4440,7 @@ bool Field_double::send_binary(Protocol *protocol)
|
|||
|
||||
int Field_double::cmp(const char *a_ptr, const char *b_ptr)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
double a,b;
|
||||
#ifdef WORDS_BIGENDIAN
|
||||
if (table->s->db_low_byte_first)
|
||||
|
@ -4529,6 +4599,7 @@ timestamp_auto_set_type Field_timestamp::get_auto_set_type() const
|
|||
|
||||
int Field_timestamp::store(const char *from,uint len,CHARSET_INFO *cs)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
TIME l_time;
|
||||
my_time_t tmp= 0;
|
||||
int error;
|
||||
|
@ -4599,6 +4670,7 @@ int Field_timestamp::store(double nr)
|
|||
|
||||
int Field_timestamp::store(longlong nr, bool unsigned_val)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
TIME l_time;
|
||||
my_time_t timestamp= 0;
|
||||
int error;
|
||||
|
@ -4650,11 +4722,13 @@ int Field_timestamp::store(longlong nr, bool unsigned_val)
|
|||
|
||||
double Field_timestamp::val_real(void)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
return (double) Field_timestamp::val_int();
|
||||
}
|
||||
|
||||
longlong Field_timestamp::val_int(void)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
uint32 temp;
|
||||
TIME time_tmp;
|
||||
THD *thd= table->in_use;
|
||||
|
@ -4680,6 +4754,7 @@ longlong Field_timestamp::val_int(void)
|
|||
|
||||
String *Field_timestamp::val_str(String *val_buffer, String *val_ptr)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
uint32 temp, temp2;
|
||||
TIME time_tmp;
|
||||
THD *thd= table->in_use;
|
||||
|
@ -4909,6 +4984,7 @@ int Field_time::store_time(TIME *ltime, timestamp_type type)
|
|||
|
||||
int Field_time::store(double nr)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
long tmp;
|
||||
int error= 0;
|
||||
if (nr > 8385959.0)
|
||||
|
@ -4946,6 +5022,7 @@ int Field_time::store(double nr)
|
|||
|
||||
int Field_time::store(longlong nr, bool unsigned_val)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
long tmp;
|
||||
int error= 0;
|
||||
if (nr < (longlong) -8385959L && !unsigned_val)
|
||||
|
@ -4983,12 +5060,14 @@ int Field_time::store(longlong nr, bool unsigned_val)
|
|||
|
||||
double Field_time::val_real(void)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
uint32 j= (uint32) uint3korr(ptr);
|
||||
return (double) j;
|
||||
}
|
||||
|
||||
longlong Field_time::val_int(void)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
return (longlong) sint3korr(ptr);
|
||||
}
|
||||
|
||||
|
@ -5001,6 +5080,7 @@ longlong Field_time::val_int(void)
|
|||
String *Field_time::val_str(String *val_buffer,
|
||||
String *val_ptr __attribute__((unused)))
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
TIME ltime;
|
||||
val_buffer->alloc(19);
|
||||
long tmp=(long) sint3korr(ptr);
|
||||
|
@ -5110,6 +5190,7 @@ void Field_time::sql_type(String &res) const
|
|||
|
||||
int Field_year::store(const char *from, uint len,CHARSET_INFO *cs)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
char *end;
|
||||
int error;
|
||||
long nr= my_strntol(cs, from, len, 10, &end, &error);
|
||||
|
@ -5148,6 +5229,7 @@ int Field_year::store(double nr)
|
|||
|
||||
int Field_year::store(longlong nr, bool unsigned_val)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
if (nr < 0 || nr >= 100 && nr <= 1900 || nr > 2155)
|
||||
{
|
||||
*ptr= 0;
|
||||
|
@ -5168,6 +5250,7 @@ int Field_year::store(longlong nr, bool unsigned_val)
|
|||
|
||||
bool Field_year::send_binary(Protocol *protocol)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
ulonglong tmp= Field_year::val_int();
|
||||
return protocol->store_short(tmp);
|
||||
}
|
||||
|
@ -5181,6 +5264,7 @@ double Field_year::val_real(void)
|
|||
|
||||
longlong Field_year::val_int(void)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
int tmp= (int) ((uchar*) ptr)[0];
|
||||
if (field_length != 4)
|
||||
tmp%=100; // Return last 2 char
|
||||
|
@ -5218,6 +5302,7 @@ void Field_year::sql_type(String &res) const
|
|||
|
||||
int Field_date::store(const char *from, uint len,CHARSET_INFO *cs)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
TIME l_time;
|
||||
uint32 tmp;
|
||||
int error;
|
||||
|
@ -5273,6 +5358,7 @@ int Field_date::store(double nr)
|
|||
|
||||
int Field_date::store(longlong nr, bool unsigned_val)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
TIME not_used;
|
||||
int error;
|
||||
longlong initial_nr= nr;
|
||||
|
@ -5323,6 +5409,7 @@ bool Field_date::send_binary(Protocol *protocol)
|
|||
|
||||
double Field_date::val_real(void)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
int32 j;
|
||||
#ifdef WORDS_BIGENDIAN
|
||||
if (table->s->db_low_byte_first)
|
||||
|
@ -5336,6 +5423,7 @@ double Field_date::val_real(void)
|
|||
|
||||
longlong Field_date::val_int(void)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
int32 j;
|
||||
#ifdef WORDS_BIGENDIAN
|
||||
if (table->s->db_low_byte_first)
|
||||
|
@ -5350,6 +5438,7 @@ longlong Field_date::val_int(void)
|
|||
String *Field_date::val_str(String *val_buffer,
|
||||
String *val_ptr __attribute__((unused)))
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
TIME ltime;
|
||||
val_buffer->alloc(field_length);
|
||||
int32 tmp;
|
||||
|
@ -5421,6 +5510,7 @@ void Field_date::sql_type(String &res) const
|
|||
|
||||
int Field_newdate::store(const char *from,uint len,CHARSET_INFO *cs)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
TIME l_time;
|
||||
long tmp;
|
||||
int error;
|
||||
|
@ -5461,6 +5551,7 @@ int Field_newdate::store(double nr)
|
|||
|
||||
int Field_newdate::store(longlong nr, bool unsigned_val)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
TIME l_time;
|
||||
longlong tmp;
|
||||
int error;
|
||||
|
@ -5489,6 +5580,7 @@ int Field_newdate::store(longlong nr, bool unsigned_val)
|
|||
|
||||
int Field_newdate::store_time(TIME *ltime,timestamp_type type)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
long tmp;
|
||||
int error= 0;
|
||||
if (type == MYSQL_TIMESTAMP_DATE || type == MYSQL_TIMESTAMP_DATETIME)
|
||||
|
@ -5514,12 +5606,14 @@ bool Field_newdate::send_binary(Protocol *protocol)
|
|||
|
||||
double Field_newdate::val_real(void)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
return (double) Field_newdate::val_int();
|
||||
}
|
||||
|
||||
|
||||
longlong Field_newdate::val_int(void)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
ulong j= uint3korr(ptr);
|
||||
j= (j % 32L)+(j / 32L % 16L)*100L + (j/(16L*32L))*10000L;
|
||||
return (longlong) j;
|
||||
|
@ -5529,6 +5623,7 @@ longlong Field_newdate::val_int(void)
|
|||
String *Field_newdate::val_str(String *val_buffer,
|
||||
String *val_ptr __attribute__((unused)))
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
val_buffer->alloc(field_length);
|
||||
val_buffer->length(field_length);
|
||||
uint32 tmp=(uint32) uint3korr(ptr);
|
||||
|
@ -5605,6 +5700,7 @@ void Field_newdate::sql_type(String &res) const
|
|||
|
||||
int Field_datetime::store(const char *from,uint len,CHARSET_INFO *cs)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
TIME time_tmp;
|
||||
int error;
|
||||
ulonglong tmp= 0;
|
||||
|
@ -5656,6 +5752,7 @@ int Field_datetime::store(double nr)
|
|||
|
||||
int Field_datetime::store(longlong nr, bool unsigned_val)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
TIME not_used;
|
||||
int error;
|
||||
longlong initial_nr= nr;
|
||||
|
@ -5692,6 +5789,7 @@ int Field_datetime::store(longlong nr, bool unsigned_val)
|
|||
|
||||
int Field_datetime::store_time(TIME *ltime,timestamp_type type)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
longlong tmp;
|
||||
int error= 0;
|
||||
/*
|
||||
|
@ -5733,6 +5831,7 @@ double Field_datetime::val_real(void)
|
|||
|
||||
longlong Field_datetime::val_int(void)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
longlong j;
|
||||
#ifdef WORDS_BIGENDIAN
|
||||
if (table->s->db_low_byte_first)
|
||||
|
@ -5747,6 +5846,7 @@ longlong Field_datetime::val_int(void)
|
|||
String *Field_datetime::val_str(String *val_buffer,
|
||||
String *val_ptr __attribute__((unused)))
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
val_buffer->alloc(field_length);
|
||||
val_buffer->length(field_length);
|
||||
ulonglong tmp;
|
||||
|
@ -5878,6 +5978,7 @@ void Field_datetime::sql_type(String &res) const
|
|||
|
||||
int Field_string::store(const char *from,uint length,CHARSET_INFO *cs)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
int error= 0, well_formed_error;
|
||||
uint32 not_used;
|
||||
char buff[STRING_BUFFER_USUAL_SIZE];
|
||||
|
@ -5954,6 +6055,7 @@ int Field_string::store(const char *from,uint length,CHARSET_INFO *cs)
|
|||
|
||||
int Field_str::store(double nr)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
char buff[DOUBLE_TO_STRING_CONVERSION_BUFFER_SIZE];
|
||||
uint length;
|
||||
bool use_scientific_notation= TRUE;
|
||||
|
@ -6029,6 +6131,7 @@ int Field_longstr::store_decimal(const my_decimal *d)
|
|||
|
||||
double Field_string::val_real(void)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
int not_used;
|
||||
char *end_not_used;
|
||||
CHARSET_INFO *cs= charset();
|
||||
|
@ -6038,6 +6141,7 @@ double Field_string::val_real(void)
|
|||
|
||||
longlong Field_string::val_int(void)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
int not_used;
|
||||
char *end_not_used;
|
||||
CHARSET_INFO *cs=charset();
|
||||
|
@ -6048,6 +6152,7 @@ longlong Field_string::val_int(void)
|
|||
String *Field_string::val_str(String *val_buffer __attribute__((unused)),
|
||||
String *val_ptr)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
uint length= field_charset->cset->lengthsp(field_charset, ptr, field_length);
|
||||
/* See the comment for Field_long::store(long long) */
|
||||
DBUG_ASSERT(table->in_use == current_thd);
|
||||
|
@ -6058,6 +6163,7 @@ String *Field_string::val_str(String *val_buffer __attribute__((unused)),
|
|||
|
||||
my_decimal *Field_string::val_decimal(my_decimal *decimal_value)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
str2my_decimal(E_DEC_FATAL_ERROR, ptr, field_length, charset(),
|
||||
decimal_value);
|
||||
return decimal_value;
|
||||
|
@ -6295,6 +6401,7 @@ Field *Field_string::new_field(MEM_ROOT *root, struct st_table *new_table)
|
|||
|
||||
int Field_varstring::store(const char *from,uint length,CHARSET_INFO *cs)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
uint32 not_used, copy_length;
|
||||
char buff[STRING_BUFFER_USUAL_SIZE];
|
||||
String tmpstr(buff,sizeof(buff), &my_charset_bin);
|
||||
|
@ -6369,6 +6476,7 @@ int Field_varstring::store(longlong nr, bool unsigned_val)
|
|||
|
||||
double Field_varstring::val_real(void)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
int not_used;
|
||||
char *end_not_used;
|
||||
uint length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr);
|
||||
|
@ -6379,6 +6487,7 @@ double Field_varstring::val_real(void)
|
|||
|
||||
longlong Field_varstring::val_int(void)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
int not_used;
|
||||
char *end_not_used;
|
||||
uint length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr);
|
||||
|
@ -6389,6 +6498,7 @@ longlong Field_varstring::val_int(void)
|
|||
String *Field_varstring::val_str(String *val_buffer __attribute__((unused)),
|
||||
String *val_ptr)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
uint length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr);
|
||||
val_ptr->set((const char*) ptr+length_bytes, length, field_charset);
|
||||
return val_ptr;
|
||||
|
@ -6397,6 +6507,7 @@ String *Field_varstring::val_str(String *val_buffer __attribute__((unused)),
|
|||
|
||||
my_decimal *Field_varstring::val_decimal(my_decimal *decimal_value)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
uint length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr);
|
||||
str2my_decimal(E_DEC_FATAL_ERROR, ptr+length_bytes, length, charset(),
|
||||
decimal_value);
|
||||
|
@ -6937,6 +7048,7 @@ void Field_blob::put_length(char *pos, uint32 length)
|
|||
|
||||
int Field_blob::store(const char *from,uint length,CHARSET_INFO *cs)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
int error= 0, well_formed_error;
|
||||
if (!length)
|
||||
{
|
||||
|
@ -7023,6 +7135,7 @@ int Field_blob::store(longlong nr, bool unsigned_val)
|
|||
|
||||
double Field_blob::val_real(void)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
int not_used;
|
||||
char *end_not_used, *blob;
|
||||
uint32 length;
|
||||
|
@ -7039,6 +7152,7 @@ double Field_blob::val_real(void)
|
|||
|
||||
longlong Field_blob::val_int(void)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
int not_used;
|
||||
char *blob;
|
||||
memcpy_fixed(&blob,ptr+packlength,sizeof(char*));
|
||||
|
@ -7051,6 +7165,7 @@ longlong Field_blob::val_int(void)
|
|||
String *Field_blob::val_str(String *val_buffer __attribute__((unused)),
|
||||
String *val_ptr)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
char *blob;
|
||||
memcpy_fixed(&blob,ptr+packlength,sizeof(char*));
|
||||
if (!blob)
|
||||
|
@ -7063,6 +7178,7 @@ String *Field_blob::val_str(String *val_buffer __attribute__((unused)),
|
|||
|
||||
my_decimal *Field_blob::val_decimal(my_decimal *decimal_value)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
const char *blob;
|
||||
memcpy_fixed(&blob, ptr+packlength, sizeof(const char*));
|
||||
if (!blob)
|
||||
|
@ -7631,6 +7747,7 @@ void Field_enum::store_type(ulonglong value)
|
|||
|
||||
int Field_enum::store(const char *from,uint length,CHARSET_INFO *cs)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
int err= 0;
|
||||
uint32 not_used;
|
||||
char buff[STRING_BUFFER_USUAL_SIZE];
|
||||
|
@ -7677,6 +7794,7 @@ int Field_enum::store(double nr)
|
|||
|
||||
int Field_enum::store(longlong nr, bool unsigned_val)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
int error= 0;
|
||||
if ((ulonglong) nr > typelib->count || nr == 0)
|
||||
{
|
||||
|
@ -7697,44 +7815,45 @@ double Field_enum::val_real(void)
|
|||
|
||||
longlong Field_enum::val_int(void)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
switch (packlength) {
|
||||
case 1:
|
||||
return (longlong) (uchar) ptr[0];
|
||||
case 2:
|
||||
{
|
||||
uint16 tmp;
|
||||
{
|
||||
uint16 tmp;
|
||||
#ifdef WORDS_BIGENDIAN
|
||||
if (table->s->db_low_byte_first)
|
||||
tmp=sint2korr(ptr);
|
||||
else
|
||||
if (table->s->db_low_byte_first)
|
||||
tmp=sint2korr(ptr);
|
||||
else
|
||||
#endif
|
||||
shortget(tmp,ptr);
|
||||
return (longlong) tmp;
|
||||
}
|
||||
shortget(tmp,ptr);
|
||||
return (longlong) tmp;
|
||||
}
|
||||
case 3:
|
||||
return (longlong) uint3korr(ptr);
|
||||
case 4:
|
||||
{
|
||||
uint32 tmp;
|
||||
{
|
||||
uint32 tmp;
|
||||
#ifdef WORDS_BIGENDIAN
|
||||
if (table->s->db_low_byte_first)
|
||||
tmp=uint4korr(ptr);
|
||||
else
|
||||
if (table->s->db_low_byte_first)
|
||||
tmp=uint4korr(ptr);
|
||||
else
|
||||
#endif
|
||||
longget(tmp,ptr);
|
||||
return (longlong) tmp;
|
||||
}
|
||||
longget(tmp,ptr);
|
||||
return (longlong) tmp;
|
||||
}
|
||||
case 8:
|
||||
{
|
||||
longlong tmp;
|
||||
{
|
||||
longlong tmp;
|
||||
#ifdef WORDS_BIGENDIAN
|
||||
if (table->s->db_low_byte_first)
|
||||
tmp=sint8korr(ptr);
|
||||
else
|
||||
if (table->s->db_low_byte_first)
|
||||
tmp=sint8korr(ptr);
|
||||
else
|
||||
#endif
|
||||
longlongget(tmp,ptr);
|
||||
return tmp;
|
||||
}
|
||||
longlongget(tmp,ptr);
|
||||
return tmp;
|
||||
}
|
||||
}
|
||||
return 0; // impossible
|
||||
}
|
||||
|
@ -7812,6 +7931,7 @@ void Field_enum::sql_type(String &res) const
|
|||
|
||||
int Field_set::store(const char *from,uint length,CHARSET_INFO *cs)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
bool got_warning= 0;
|
||||
int err= 0;
|
||||
char *not_used;
|
||||
|
@ -7851,6 +7971,7 @@ int Field_set::store(const char *from,uint length,CHARSET_INFO *cs)
|
|||
|
||||
int Field_set::store(longlong nr, bool unsigned_val)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
int error= 0;
|
||||
if ((ulonglong) nr > (ulonglong) (((longlong) 1 << typelib->count) -
|
||||
(longlong) 1))
|
||||
|
@ -8034,6 +8155,7 @@ Field *Field_bit::new_key_field(MEM_ROOT *root,
|
|||
|
||||
int Field_bit::store(const char *from, uint length, CHARSET_INFO *cs)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
int delta;
|
||||
|
||||
for (; length && !*from; from++, length--); // skip left 0's
|
||||
|
@ -8080,7 +8202,7 @@ int Field_bit::store(const char *from, uint length, CHARSET_INFO *cs)
|
|||
|
||||
int Field_bit::store(double nr)
|
||||
{
|
||||
return store((longlong) nr, FALSE);
|
||||
return Field_bit::store((longlong) nr, FALSE);
|
||||
}
|
||||
|
||||
|
||||
|
@ -8109,6 +8231,7 @@ double Field_bit::val_real(void)
|
|||
|
||||
longlong Field_bit::val_int(void)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
ulonglong bits= 0;
|
||||
if (bit_len)
|
||||
{
|
||||
|
@ -8133,6 +8256,7 @@ longlong Field_bit::val_int(void)
|
|||
String *Field_bit::val_str(String *val_buffer,
|
||||
String *val_ptr __attribute__((unused)))
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
char buff[sizeof(longlong)];
|
||||
uint length= min(pack_length(), sizeof(longlong));
|
||||
ulonglong bits= val_int();
|
||||
|
@ -8148,6 +8272,7 @@ String *Field_bit::val_str(String *val_buffer,
|
|||
|
||||
my_decimal *Field_bit::val_decimal(my_decimal *deciaml_value)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_READ;
|
||||
int2my_decimal(E_DEC_FATAL_ERROR, val_int(), 1, deciaml_value);
|
||||
return deciaml_value;
|
||||
}
|
||||
|
@ -8277,6 +8402,7 @@ Field_bit_as_char::Field_bit_as_char(char *ptr_arg, uint32 len_arg,
|
|||
|
||||
int Field_bit_as_char::store(const char *from, uint length, CHARSET_INFO *cs)
|
||||
{
|
||||
ASSERT_COLUMN_MARKED_FOR_WRITE;
|
||||
int delta;
|
||||
uchar bits= field_length & 7;
|
||||
|
||||
|
@ -8839,15 +8965,14 @@ Field *make_field(TABLE_SHARE *share, char *ptr, uint32 field_length,
|
|||
null_bit= ((uchar) 1) << null_bit;
|
||||
}
|
||||
|
||||
switch (field_type)
|
||||
{
|
||||
case FIELD_TYPE_DATE:
|
||||
case FIELD_TYPE_NEWDATE:
|
||||
case FIELD_TYPE_TIME:
|
||||
case FIELD_TYPE_DATETIME:
|
||||
case FIELD_TYPE_TIMESTAMP:
|
||||
field_charset= &my_charset_bin;
|
||||
default: break;
|
||||
switch (field_type) {
|
||||
case FIELD_TYPE_DATE:
|
||||
case FIELD_TYPE_NEWDATE:
|
||||
case FIELD_TYPE_TIME:
|
||||
case FIELD_TYPE_DATETIME:
|
||||
case FIELD_TYPE_TIMESTAMP:
|
||||
field_charset= &my_charset_bin;
|
||||
default: break;
|
||||
}
|
||||
|
||||
if (f_is_alpha(pack_flag))
|
||||
|
|
11
sql/field.h
11
sql/field.h
|
@ -62,10 +62,9 @@ public:
|
|||
struct st_table *orig_table; // Pointer to original table
|
||||
const char **table_name, *field_name;
|
||||
LEX_STRING comment;
|
||||
query_id_t query_id; // For quick test of used fields
|
||||
bool add_index; // For check if field will be indexed
|
||||
/* Field is part of the following keys */
|
||||
key_map key_start,part_of_key,part_of_sortkey;
|
||||
key_map key_start, part_of_key, part_of_key_not_clustered;
|
||||
key_map part_of_sortkey;
|
||||
/*
|
||||
We use three additional unireg types for TIMESTAMP to overcome limitation
|
||||
of current binary format of .frm file. We'd like to be able to support
|
||||
|
@ -88,12 +87,8 @@ public:
|
|||
|
||||
utype unireg_check;
|
||||
uint32 field_length; // Length of field
|
||||
uint field_index; // field number in fields array
|
||||
uint32 flags;
|
||||
/* fieldnr is the id of the field (first field = 1) as is also
|
||||
used in key_part.
|
||||
*/
|
||||
uint16 fieldnr;
|
||||
uint16 field_index; // field number in fields array
|
||||
uchar null_bit; // Bit used to test null bit
|
||||
|
||||
Field(char *ptr_arg,uint32 length_arg,uchar *null_ptr_arg,uchar null_bit_arg,
|
||||
|
|
106
sql/filesort.cc
106
sql/filesort.cc
|
@ -44,6 +44,7 @@ static ha_rows find_all_keys(SORTPARAM *param,SQL_SELECT *select,
|
|||
static int write_keys(SORTPARAM *param,uchar * *sort_keys,
|
||||
uint count, IO_CACHE *buffer_file, IO_CACHE *tempfile);
|
||||
static void make_sortkey(SORTPARAM *param,uchar *to, byte *ref_pos);
|
||||
static void register_used_fields(SORTPARAM *param);
|
||||
static int merge_index(SORTPARAM *param,uchar *sort_buffer,
|
||||
BUFFPEK *buffpek,
|
||||
uint maxbuffer,IO_CACHE *tempfile,
|
||||
|
@ -66,11 +67,11 @@ static void unpack_addon_fields(struct st_sort_addon_field *addon_field,
|
|||
table Table to sort
|
||||
sortorder How to sort the table
|
||||
s_length Number of elements in sortorder
|
||||
select condition to apply to the rows
|
||||
special Not used.
|
||||
(This could be used to sort the rows pointed on by
|
||||
select->file)
|
||||
examined_rows Store number of examined rows here
|
||||
select Condition to apply to the rows
|
||||
ha_maxrows Return only this many rows
|
||||
sort_positions Set to 1 if we want to force sorting by position
|
||||
(Needed by UPDATE/INSERT or ALTER TABLE)
|
||||
examined_rows Store number of examined rows here
|
||||
|
||||
IMPLEMENTATION
|
||||
Creates a set of pointers that can be used to read the rows
|
||||
|
@ -81,6 +82,10 @@ static void unpack_addon_fields(struct st_sort_addon_field *addon_field,
|
|||
Before calling filesort, one must have done
|
||||
table->file->info(HA_STATUS_VARIABLE)
|
||||
|
||||
NOTES
|
||||
If we sort by position (like if sort_positions is 1) filesort() will
|
||||
call table->prepare_for_position().
|
||||
|
||||
RETURN
|
||||
HA_POS_ERROR Error
|
||||
# Number of rows
|
||||
|
@ -92,7 +97,8 @@ static void unpack_addon_fields(struct st_sort_addon_field *addon_field,
|
|||
*/
|
||||
|
||||
ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
|
||||
SQL_SELECT *select, ha_rows max_rows, ha_rows *examined_rows)
|
||||
SQL_SELECT *select, ha_rows max_rows,
|
||||
bool sort_positions, ha_rows *examined_rows)
|
||||
{
|
||||
int error;
|
||||
ulong memavl, min_sort_memory;
|
||||
|
@ -128,8 +134,8 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
|
|||
param.ref_length= table->file->ref_length;
|
||||
param.addon_field= 0;
|
||||
param.addon_length= 0;
|
||||
if (!(table->file->table_flags() & HA_FAST_KEY_READ) &&
|
||||
!table->fulltext_searched)
|
||||
if (!(table->file->ha_table_flags() & HA_FAST_KEY_READ) &&
|
||||
!table->fulltext_searched && !sort_positions)
|
||||
{
|
||||
/*
|
||||
Get the descriptors of all fields whose values are appended
|
||||
|
@ -175,7 +181,7 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
|
|||
if (select && select->quick && select->quick->records > 0L)
|
||||
{
|
||||
records=min((ha_rows) (select->quick->records*2+EXTRA_RECORDS*2),
|
||||
table->file->records)+EXTRA_RECORDS;
|
||||
table->file->stats.records)+EXTRA_RECORDS;
|
||||
selected_records_file=0;
|
||||
}
|
||||
else
|
||||
|
@ -404,8 +410,11 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select,
|
|||
TABLE *sort_form;
|
||||
volatile THD::killed_state *killed= ¤t_thd->killed;
|
||||
handler *file;
|
||||
MY_BITMAP *save_read_set, *save_write_set;
|
||||
DBUG_ENTER("find_all_keys");
|
||||
DBUG_PRINT("info",("using: %s",(select?select->quick?"ranges":"where":"every row")));
|
||||
DBUG_PRINT("info",("using: %s",
|
||||
(select ? select->quick ? "ranges" : "where":
|
||||
"every row")));
|
||||
|
||||
idx=indexpos=0;
|
||||
error=quick_select=0;
|
||||
|
@ -415,7 +424,7 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select,
|
|||
ref_pos= ref_buff;
|
||||
quick_select=select && select->quick;
|
||||
record=0;
|
||||
flag= ((!indexfile && file->table_flags() & HA_REC_NOT_IN_SEQ)
|
||||
flag= ((!indexfile && file->ha_table_flags() & HA_REC_NOT_IN_SEQ)
|
||||
|| quick_select);
|
||||
if (indexfile || flag)
|
||||
ref_pos= &file->ref[0];
|
||||
|
@ -437,6 +446,19 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select,
|
|||
select, 1, 1);
|
||||
}
|
||||
|
||||
/* Remember original bitmaps */
|
||||
save_read_set= sort_form->read_set;
|
||||
save_write_set= sort_form->write_set;
|
||||
/* Set up temporary column read map for columns used by sort */
|
||||
bitmap_clear_all(&sort_form->tmp_set);
|
||||
/* Temporary set for register_used_fields and register_field_in_read_map */
|
||||
sort_form->read_set= &sort_form->tmp_set;
|
||||
register_used_fields(param);
|
||||
if (select and select->cond)
|
||||
select->cond->walk(&Item::register_field_in_read_map, 1,
|
||||
(byte*) sort_form);
|
||||
sort_form->column_bitmaps_set(&sort_form->tmp_set, &sort_form->tmp_set);
|
||||
|
||||
for (;;)
|
||||
{
|
||||
if (quick_select)
|
||||
|
@ -515,6 +537,9 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select,
|
|||
file->ha_rnd_end();
|
||||
}
|
||||
|
||||
/* Signal we should use orignal column read and write maps */
|
||||
sort_form->column_bitmaps_set(save_read_set, save_write_set);
|
||||
|
||||
DBUG_PRINT("test",("error: %d indexpos: %d",error,indexpos));
|
||||
if (error != HA_ERR_END_OF_FILE)
|
||||
{
|
||||
|
@ -845,6 +870,50 @@ static void make_sortkey(register SORTPARAM *param,
|
|||
return;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Register fields used by sorting in the sorted table's read set
|
||||
*/
|
||||
|
||||
static void register_used_fields(SORTPARAM *param)
|
||||
{
|
||||
reg1 SORT_FIELD *sort_field;
|
||||
reg5 uint length;
|
||||
TABLE *table=param->sort_form;
|
||||
MY_BITMAP *bitmap= table->read_set;
|
||||
|
||||
for (sort_field= param->local_sortorder ;
|
||||
sort_field != param->end ;
|
||||
sort_field++)
|
||||
{
|
||||
Field *field;
|
||||
if ((field= sort_field->field))
|
||||
{
|
||||
if (field->table == table)
|
||||
bitmap_set_bit(bitmap, field->field_index);
|
||||
}
|
||||
else
|
||||
{ // Item
|
||||
sort_field->item->walk(&Item::register_field_in_read_map, 1,
|
||||
(byte *) table);
|
||||
}
|
||||
}
|
||||
|
||||
if (param->addon_field)
|
||||
{
|
||||
SORT_ADDON_FIELD *addonf= param->addon_field;
|
||||
Field *field;
|
||||
for ( ; (field= addonf->field) ; addonf++)
|
||||
bitmap_set_bit(bitmap, field->field_index);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Save filepos last */
|
||||
table->prepare_for_position();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static bool save_index(SORTPARAM *param, uchar **sort_keys, uint count,
|
||||
FILESORT_INFO *table_sort)
|
||||
{
|
||||
|
@ -1353,7 +1422,8 @@ get_addon_fields(THD *thd, Field **ptabfield, uint sortlength, uint *plength)
|
|||
uint length= 0;
|
||||
uint fields= 0;
|
||||
uint null_fields= 0;
|
||||
query_id_t query_id= thd->query_id;
|
||||
MY_BITMAP *read_set= (*ptabfield)->table->read_set;
|
||||
|
||||
/*
|
||||
If there is a reference to a field in the query add it
|
||||
to the the set of appended fields.
|
||||
|
@ -1365,17 +1435,9 @@ get_addon_fields(THD *thd, Field **ptabfield, uint sortlength, uint *plength)
|
|||
*/
|
||||
*plength= 0;
|
||||
|
||||
/*
|
||||
The following statement is added to avoid sorting in alter_table.
|
||||
The fact is the filter 'field->query_id != thd->query_id'
|
||||
doesn't work for alter table
|
||||
*/
|
||||
if (thd->lex->sql_command != SQLCOM_SELECT &&
|
||||
thd->lex->sql_command != SQLCOM_INSERT_SELECT)
|
||||
return 0;
|
||||
for (pfield= ptabfield; (field= *pfield) ; pfield++)
|
||||
{
|
||||
if (field->query_id != query_id)
|
||||
if (!bitmap_is_set(read_set, field->field_index))
|
||||
continue;
|
||||
if (field->flags & BLOB_FLAG)
|
||||
return 0;
|
||||
|
@ -1398,7 +1460,7 @@ get_addon_fields(THD *thd, Field **ptabfield, uint sortlength, uint *plength)
|
|||
null_fields= 0;
|
||||
for (pfield= ptabfield; (field= *pfield) ; pfield++)
|
||||
{
|
||||
if (field->query_id != thd->query_id)
|
||||
if (!bitmap_is_set(read_set, field->field_index))
|
||||
continue;
|
||||
addonf->field= field;
|
||||
addonf->offset= length;
|
||||
|
|
|
@ -25,7 +25,8 @@
|
|||
We will need an updated Berkeley DB version for this.
|
||||
- Killing threads that has got a 'deadlock'
|
||||
- SHOW TABLE STATUS should give more information about the table.
|
||||
- Get a more accurate count of the number of rows (estimate_rows_upper_bound()).
|
||||
- Get a more accurate count of the number of rows
|
||||
(estimate_rows_upper_bound()).
|
||||
We could store the found number of rows when the table is scanned and
|
||||
then increment the counter for each attempted write.
|
||||
- We will need to extend the manager thread to makes checkpoints at
|
||||
|
@ -126,13 +127,14 @@ static int berkeley_rollback(THD *thd, bool all);
|
|||
static int berkeley_rollback_to_savepoint(THD* thd, void *savepoint);
|
||||
static int berkeley_savepoint(THD* thd, void *savepoint);
|
||||
static int berkeley_release_savepoint(THD* thd, void *savepoint);
|
||||
static handler *berkeley_create_handler(TABLE_SHARE *table);
|
||||
static handler *berkeley_create_handler(TABLE_SHARE *table,
|
||||
MEM_ROOT *mem_root);
|
||||
|
||||
handlerton berkeley_hton;
|
||||
|
||||
handler *berkeley_create_handler(TABLE_SHARE *table)
|
||||
static handler *berkeley_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root)
|
||||
{
|
||||
return new ha_berkeley(table);
|
||||
return new (mem_root) ha_berkeley(table);
|
||||
}
|
||||
|
||||
typedef struct st_berkeley_trx_data {
|
||||
|
@ -456,7 +458,7 @@ void berkeley_cleanup_log_files(void)
|
|||
ha_berkeley::ha_berkeley(TABLE_SHARE *table_arg)
|
||||
:handler(&berkeley_hton, table_arg), alloc_ptr(0), rec_buff(0), file(0),
|
||||
int_table_flags(HA_REC_NOT_IN_SEQ | HA_FAST_KEY_READ |
|
||||
HA_NULL_IN_KEY | HA_CAN_INDEX_BLOBS | HA_NOT_EXACT_COUNT |
|
||||
HA_NULL_IN_KEY | HA_CAN_INDEX_BLOBS |
|
||||
HA_PRIMARY_KEY_IN_READ_INDEX | HA_FILE_BASED |
|
||||
HA_CAN_GEOMETRY |
|
||||
HA_AUTO_PART_KEY | HA_TABLE_SCAN_ON_INDEX),
|
||||
|
@ -760,7 +762,7 @@ int ha_berkeley::open(const char *name, int mode, uint test_if_locked)
|
|||
transaction=0;
|
||||
cursor=0;
|
||||
key_read=0;
|
||||
block_size=8192; // Berkeley DB block size
|
||||
stats.block_size=8192; // Berkeley DB block size
|
||||
share->fixed_length_row= !(table_share->db_create_options &
|
||||
HA_OPTION_PACK_RECORD);
|
||||
|
||||
|
@ -776,7 +778,7 @@ int ha_berkeley::close(void)
|
|||
|
||||
my_free((char*) rec_buff,MYF(MY_ALLOW_ZERO_PTR));
|
||||
my_free(alloc_ptr,MYF(MY_ALLOW_ZERO_PTR));
|
||||
ha_berkeley::extra(HA_EXTRA_RESET); // current_row buffer
|
||||
ha_berkeley::reset(); // current_row buffer
|
||||
DBUG_RETURN(free_share(share,table, hidden_primary_key,0));
|
||||
}
|
||||
|
||||
|
@ -877,11 +879,13 @@ void ha_berkeley::unpack_row(char *record, DBT *row)
|
|||
else
|
||||
{
|
||||
/* Copy null bits */
|
||||
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
|
||||
const char *ptr= (const char*) row->data;
|
||||
memcpy(record, ptr, table_share->null_bytes);
|
||||
ptr+= table_share->null_bytes;
|
||||
for (Field **field=table->field ; *field ; field++)
|
||||
ptr= (*field)->unpack(record + (*field)->offset(), ptr);
|
||||
dbug_tmp_restore_column_map(table->write_set, old_map);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -939,6 +943,7 @@ DBT *ha_berkeley::create_key(DBT *key, uint keynr, char *buff,
|
|||
KEY *key_info=table->key_info+keynr;
|
||||
KEY_PART_INFO *key_part=key_info->key_part;
|
||||
KEY_PART_INFO *end=key_part+key_info->key_parts;
|
||||
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
|
||||
DBUG_ENTER("create_key");
|
||||
|
||||
key->data=buff;
|
||||
|
@ -962,6 +967,7 @@ DBT *ha_berkeley::create_key(DBT *key, uint keynr, char *buff,
|
|||
}
|
||||
key->size= (buff - (char*) key->data);
|
||||
DBUG_DUMP("key",(char*) key->data, key->size);
|
||||
dbug_tmp_restore_column_map(table->write_set, old_map);
|
||||
DBUG_RETURN(key);
|
||||
}
|
||||
|
||||
|
@ -979,6 +985,7 @@ DBT *ha_berkeley::pack_key(DBT *key, uint keynr, char *buff,
|
|||
KEY *key_info=table->key_info+keynr;
|
||||
KEY_PART_INFO *key_part=key_info->key_part;
|
||||
KEY_PART_INFO *end=key_part+key_info->key_parts;
|
||||
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
|
||||
DBUG_ENTER("bdb:pack_key");
|
||||
|
||||
bzero((char*) key,sizeof(*key));
|
||||
|
@ -1006,6 +1013,7 @@ DBT *ha_berkeley::pack_key(DBT *key, uint keynr, char *buff,
|
|||
}
|
||||
key->size= (buff - (char*) key->data);
|
||||
DBUG_DUMP("key",(char*) key->data, key->size);
|
||||
dbug_tmp_restore_column_map(table->write_set, old_map);
|
||||
DBUG_RETURN(key);
|
||||
}
|
||||
|
||||
|
@ -1244,8 +1252,8 @@ int ha_berkeley::update_row(const byte * old_row, byte * new_row)
|
|||
DB_TXN *sub_trans;
|
||||
bool primary_key_changed;
|
||||
DBUG_ENTER("update_row");
|
||||
LINT_INIT(error);
|
||||
|
||||
LINT_INIT(error);
|
||||
statistic_increment(table->in_use->status_var.ha_update_count,&LOCK_status);
|
||||
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
|
||||
table->timestamp_field->set_time();
|
||||
|
@ -1826,8 +1834,9 @@ void ha_berkeley::info(uint flag)
|
|||
DBUG_ENTER("ha_berkeley::info");
|
||||
if (flag & HA_STATUS_VARIABLE)
|
||||
{
|
||||
records = share->rows + changed_rows; // Just to get optimisations right
|
||||
deleted = 0;
|
||||
// Just to get optimizations right
|
||||
stats.records = share->rows + changed_rows;
|
||||
stats.deleted = 0;
|
||||
}
|
||||
if ((flag & HA_STATUS_CONST) || version != share->version)
|
||||
{
|
||||
|
@ -1848,19 +1857,8 @@ void ha_berkeley::info(uint flag)
|
|||
int ha_berkeley::extra(enum ha_extra_function operation)
|
||||
{
|
||||
switch (operation) {
|
||||
case HA_EXTRA_RESET:
|
||||
case HA_EXTRA_RESET_STATE:
|
||||
key_read=0;
|
||||
using_ignore=0;
|
||||
if (current_row.flags & (DB_DBT_MALLOC | DB_DBT_REALLOC))
|
||||
{
|
||||
current_row.flags=0;
|
||||
if (current_row.data)
|
||||
{
|
||||
free(current_row.data);
|
||||
current_row.data=0;
|
||||
}
|
||||
}
|
||||
reset();
|
||||
break;
|
||||
case HA_EXTRA_KEYREAD:
|
||||
key_read=1; // Query satisfied with key
|
||||
|
@ -1883,8 +1881,17 @@ int ha_berkeley::extra(enum ha_extra_function operation)
|
|||
|
||||
int ha_berkeley::reset(void)
|
||||
{
|
||||
ha_berkeley::extra(HA_EXTRA_RESET);
|
||||
key_read=0; // Reset to state after open
|
||||
key_read= 0;
|
||||
using_ignore= 0;
|
||||
if (current_row.flags & (DB_DBT_MALLOC | DB_DBT_REALLOC))
|
||||
{
|
||||
current_row.flags= 0;
|
||||
if (current_row.data)
|
||||
{
|
||||
free(current_row.data);
|
||||
current_row.data= 0;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2173,7 +2180,7 @@ int ha_berkeley::rename_table(const char * from, const char * to)
|
|||
|
||||
double ha_berkeley::scan_time()
|
||||
{
|
||||
return rows2double(records/3);
|
||||
return rows2double(stats.records/3);
|
||||
}
|
||||
|
||||
ha_rows ha_berkeley::records_in_range(uint keynr, key_range *start_key,
|
||||
|
@ -2226,7 +2233,7 @@ ha_rows ha_berkeley::records_in_range(uint keynr, key_range *start_key,
|
|||
end_pos=end_range.less;
|
||||
else
|
||||
end_pos=end_range.less+end_range.equal;
|
||||
rows=(end_pos-start_pos)*records;
|
||||
rows=(end_pos-start_pos)*stats.records;
|
||||
DBUG_PRINT("exit",("rows: %g",rows));
|
||||
DBUG_RETURN((ha_rows)(rows <= 1.0 ? 1 : rows));
|
||||
}
|
||||
|
|
|
@ -90,7 +90,7 @@ class ha_berkeley: public handler
|
|||
ulong index_flags(uint idx, uint part, bool all_parts) const;
|
||||
const char *index_type(uint key_number) { return "BTREE"; }
|
||||
const char **bas_ext() const;
|
||||
ulong table_flags(void) const { return int_table_flags; }
|
||||
ulonglong table_flags(void) const { return int_table_flags; }
|
||||
uint max_supported_keys() const { return MAX_KEY-1; }
|
||||
uint extra_rec_buf_length() const { return BDB_HIDDEN_PRIMARY_KEY_LENGTH; }
|
||||
ha_rows estimate_rows_upper_bound();
|
||||
|
@ -98,7 +98,6 @@ class ha_berkeley: public handler
|
|||
uint max_supported_key_part_length() const { return UINT_MAX32; }
|
||||
|
||||
const key_map *keys_to_use_for_scanning() { return &key_map_full; }
|
||||
bool has_transactions() { return 1;}
|
||||
|
||||
int open(const char *name, int mode, uint test_if_locked);
|
||||
int close(void);
|
||||
|
|
|
@ -364,7 +364,8 @@ pthread_mutex_t federated_mutex; // To init the hash
|
|||
static int federated_init= FALSE; // Checking the state of hash
|
||||
|
||||
/* Static declaration for handerton */
|
||||
static handler *federated_create_handler(TABLE_SHARE *table);
|
||||
static handler *federated_create_handler(TABLE_SHARE *table,
|
||||
MEM_ROOT *mem_root);
|
||||
static int federated_commit(THD *thd, bool all);
|
||||
static int federated_rollback(THD *thd, bool all);
|
||||
|
||||
|
@ -372,9 +373,10 @@ static int federated_rollback(THD *thd, bool all);
|
|||
|
||||
handlerton federated_hton;
|
||||
|
||||
static handler *federated_create_handler(TABLE_SHARE *table)
|
||||
static handler *federated_create_handler(TABLE_SHARE *table,
|
||||
MEM_ROOT *mem_root)
|
||||
{
|
||||
return new ha_federated(table);
|
||||
return new (mem_root) ha_federated(table);
|
||||
}
|
||||
|
||||
|
||||
|
@ -765,6 +767,7 @@ uint ha_federated::convert_row_to_internal_format(byte *record, MYSQL_ROW row)
|
|||
{
|
||||
ulong *lengths;
|
||||
Field **field;
|
||||
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
|
||||
DBUG_ENTER("ha_federated::convert_row_to_internal_format");
|
||||
|
||||
lengths= mysql_fetch_lengths(stored_result);
|
||||
|
@ -783,12 +786,15 @@ uint ha_federated::convert_row_to_internal_format(byte *record, MYSQL_ROW row)
|
|||
(*field)->set_null();
|
||||
else
|
||||
{
|
||||
(*field)->set_notnull();
|
||||
(*field)->store(*row, *lengths, &my_charset_bin);
|
||||
if (bitmap_is_set(table->read_set, (*field)->field_index))
|
||||
{
|
||||
(*field)->set_notnull();
|
||||
(*field)->store(*row, *lengths, &my_charset_bin);
|
||||
}
|
||||
}
|
||||
(*field)->move_field_offset(-old_ptr);
|
||||
}
|
||||
|
||||
dbug_tmp_restore_column_map(table->write_set, old_map);
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
|
@ -1107,22 +1113,25 @@ bool ha_federated::create_where_from_key(String *to,
|
|||
KEY *key_info,
|
||||
const key_range *start_key,
|
||||
const key_range *end_key,
|
||||
bool records_in_range)
|
||||
bool records_in_range,
|
||||
bool eq_range)
|
||||
{
|
||||
bool both_not_null=
|
||||
bool both_not_null=
|
||||
(start_key != NULL && end_key != NULL) ? TRUE : FALSE;
|
||||
const byte *ptr;
|
||||
uint remainder, length;
|
||||
char tmpbuff[FEDERATED_QUERY_BUFFER_SIZE];
|
||||
String tmp(tmpbuff, sizeof(tmpbuff), system_charset_info);
|
||||
const key_range *ranges[2]= { start_key, end_key };
|
||||
my_bitmap_map *old_map;
|
||||
DBUG_ENTER("ha_federated::create_where_from_key");
|
||||
|
||||
tmp.length(0);
|
||||
if (start_key == NULL && end_key == NULL)
|
||||
DBUG_RETURN(1);
|
||||
|
||||
for (int i= 0; i <= 1; i++)
|
||||
old_map= dbug_tmp_use_all_columns(table, table->write_set);
|
||||
for (uint i= 0; i <= 1; i++)
|
||||
{
|
||||
bool needs_quotes;
|
||||
KEY_PART_INFO *key_part;
|
||||
|
@ -1156,16 +1165,16 @@ bool ha_federated::create_where_from_key(String *to,
|
|||
{
|
||||
if (emit_key_part_name(&tmp, key_part) ||
|
||||
tmp.append(FEDERATED_ISNULL))
|
||||
DBUG_RETURN(1);
|
||||
goto err;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (tmp.append(FEDERATED_OPENPAREN))
|
||||
DBUG_RETURN(1);
|
||||
goto err;
|
||||
|
||||
switch(ranges[i]->flag) {
|
||||
case(HA_READ_KEY_EXACT):
|
||||
switch (ranges[i]->flag) {
|
||||
case HA_READ_KEY_EXACT:
|
||||
DBUG_PRINT("info", ("federated HA_READ_KEY_EXACT %d", i));
|
||||
if (store_length >= length ||
|
||||
!needs_quotes ||
|
||||
|
@ -1173,22 +1182,22 @@ bool ha_federated::create_where_from_key(String *to,
|
|||
field->result_type() != STRING_RESULT)
|
||||
{
|
||||
if (emit_key_part_name(&tmp, key_part))
|
||||
DBUG_RETURN(1);
|
||||
goto err;
|
||||
|
||||
if (records_in_range)
|
||||
{
|
||||
if (tmp.append(FEDERATED_GE))
|
||||
DBUG_RETURN(1);
|
||||
goto err;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (tmp.append(FEDERATED_EQ))
|
||||
DBUG_RETURN(1);
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (emit_key_part_element(&tmp, key_part, needs_quotes, 0, ptr,
|
||||
part_length))
|
||||
DBUG_RETURN(1);
|
||||
goto err;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -1197,43 +1206,49 @@ bool ha_federated::create_where_from_key(String *to,
|
|||
tmp.append(FEDERATED_LIKE) ||
|
||||
emit_key_part_element(&tmp, key_part, needs_quotes, 1, ptr,
|
||||
part_length))
|
||||
DBUG_RETURN(1);
|
||||
goto err;
|
||||
}
|
||||
break;
|
||||
case(HA_READ_AFTER_KEY):
|
||||
case HA_READ_AFTER_KEY:
|
||||
if (eq_range)
|
||||
{
|
||||
if (tmp.append("1=1")) // Dummy
|
||||
goto err;
|
||||
break;
|
||||
}
|
||||
DBUG_PRINT("info", ("federated HA_READ_AFTER_KEY %d", i));
|
||||
if (store_length >= length) /* end key */
|
||||
{
|
||||
if (emit_key_part_name(&tmp, key_part))
|
||||
DBUG_RETURN(1);
|
||||
goto err;
|
||||
|
||||
if (i > 0) /* end key */
|
||||
{
|
||||
if (tmp.append(FEDERATED_LE))
|
||||
DBUG_RETURN(1);
|
||||
goto err;
|
||||
}
|
||||
else /* start key */
|
||||
{
|
||||
if (tmp.append(FEDERATED_GT))
|
||||
DBUG_RETURN(1);
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (emit_key_part_element(&tmp, key_part, needs_quotes, 0, ptr,
|
||||
part_length))
|
||||
{
|
||||
DBUG_RETURN(1);
|
||||
goto err;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case(HA_READ_KEY_OR_NEXT):
|
||||
case HA_READ_KEY_OR_NEXT:
|
||||
DBUG_PRINT("info", ("federated HA_READ_KEY_OR_NEXT %d", i));
|
||||
if (emit_key_part_name(&tmp, key_part) ||
|
||||
tmp.append(FEDERATED_GE) ||
|
||||
emit_key_part_element(&tmp, key_part, needs_quotes, 0, ptr,
|
||||
part_length))
|
||||
DBUG_RETURN(1);
|
||||
goto err;
|
||||
break;
|
||||
case(HA_READ_BEFORE_KEY):
|
||||
case HA_READ_BEFORE_KEY:
|
||||
DBUG_PRINT("info", ("federated HA_READ_BEFORE_KEY %d", i));
|
||||
if (store_length >= length)
|
||||
{
|
||||
|
@ -1241,23 +1256,23 @@ bool ha_federated::create_where_from_key(String *to,
|
|||
tmp.append(FEDERATED_LT) ||
|
||||
emit_key_part_element(&tmp, key_part, needs_quotes, 0, ptr,
|
||||
part_length))
|
||||
DBUG_RETURN(1);
|
||||
goto err;
|
||||
break;
|
||||
}
|
||||
case(HA_READ_KEY_OR_PREV):
|
||||
case HA_READ_KEY_OR_PREV:
|
||||
DBUG_PRINT("info", ("federated HA_READ_KEY_OR_PREV %d", i));
|
||||
if (emit_key_part_name(&tmp, key_part) ||
|
||||
tmp.append(FEDERATED_LE) ||
|
||||
emit_key_part_element(&tmp, key_part, needs_quotes, 0, ptr,
|
||||
part_length))
|
||||
DBUG_RETURN(1);
|
||||
goto err;
|
||||
break;
|
||||
default:
|
||||
DBUG_PRINT("info",("cannot handle flag %d", ranges[i]->flag));
|
||||
DBUG_RETURN(1);
|
||||
goto err;
|
||||
}
|
||||
if (tmp.append(FEDERATED_CLOSEPAREN))
|
||||
DBUG_RETURN(1);
|
||||
goto err;
|
||||
|
||||
next_loop:
|
||||
if (store_length >= length)
|
||||
|
@ -1267,13 +1282,15 @@ next_loop:
|
|||
length-= store_length;
|
||||
ptr+= store_length;
|
||||
if (tmp.append(FEDERATED_AND))
|
||||
DBUG_RETURN(1);
|
||||
goto err;
|
||||
|
||||
DBUG_PRINT("info",
|
||||
("create_where_from_key WHERE clause: %s",
|
||||
tmp.c_ptr_quick()));
|
||||
}
|
||||
}
|
||||
dbug_tmp_restore_column_map(table->write_set, old_map);
|
||||
|
||||
if (both_not_null)
|
||||
if (tmp.append(FEDERATED_CLOSEPAREN))
|
||||
DBUG_RETURN(1);
|
||||
|
@ -1285,6 +1302,10 @@ next_loop:
|
|||
DBUG_RETURN(1);
|
||||
|
||||
DBUG_RETURN(0);
|
||||
|
||||
err:
|
||||
dbug_tmp_restore_column_map(table->write_set, old_map);
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1324,7 +1345,7 @@ static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table)
|
|||
query.append(FEDERATED_BTICK);
|
||||
query.append(FEDERATED_COMMA);
|
||||
}
|
||||
query.length(query.length()- (FEDERATED_COMMA_LEN - 1));
|
||||
query.length(query.length()- FEDERATED_COMMA_LEN);
|
||||
query.append(FEDERATED_FROM);
|
||||
query.append(FEDERATED_BTICK);
|
||||
|
||||
|
@ -1575,15 +1596,16 @@ int ha_federated::write_row(byte *buf)
|
|||
String insert_field_value_string(insert_field_value_buffer,
|
||||
sizeof(insert_field_value_buffer),
|
||||
&my_charset_bin);
|
||||
values_string.length(0);
|
||||
insert_string.length(0);
|
||||
insert_field_value_string.length(0);
|
||||
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
|
||||
DBUG_ENTER("ha_federated::write_row");
|
||||
DBUG_PRINT("info",
|
||||
("table charset name %s csname %s",
|
||||
table->s->table_charset->name,
|
||||
table->s->table_charset->csname));
|
||||
|
||||
values_string.length(0);
|
||||
insert_string.length(0);
|
||||
insert_field_value_string.length(0);
|
||||
statistic_increment(table->in_use->status_var.ha_write_count, &LOCK_status);
|
||||
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
|
||||
table->timestamp_field->set_time();
|
||||
|
@ -1610,7 +1632,7 @@ int ha_federated::write_row(byte *buf)
|
|||
for (field= table->field; *field; field++)
|
||||
{
|
||||
/* if there is a query id and if it's equal to the current query id */
|
||||
if (ha_get_bit_in_write_set((*field)->fieldnr))
|
||||
if (bitmap_is_set(table->write_set, (*field)->field_index))
|
||||
{
|
||||
/*
|
||||
There are some fields. This will be used later to determine
|
||||
|
@ -1635,21 +1657,16 @@ int ha_federated::write_row(byte *buf)
|
|||
|
||||
/* append commas between both fields and fieldnames */
|
||||
/*
|
||||
unfortunately, we can't use the logic
|
||||
if *(fields + 1) to make the following
|
||||
appends conditional because we may not append
|
||||
if the next field doesn't match the condition:
|
||||
(((*field)->query_id && (*field)->query_id == current_query_id)
|
||||
unfortunately, we can't use the logic if *(fields + 1) to
|
||||
make the following appends conditional as we don't know if the
|
||||
next field is in the write set
|
||||
*/
|
||||
insert_string.append(FEDERATED_COMMA);
|
||||
values_string.append(FEDERATED_COMMA);
|
||||
}
|
||||
}
|
||||
dbug_tmp_restore_column_map(table->read_set, old_map);
|
||||
|
||||
/*
|
||||
remove trailing comma
|
||||
*/
|
||||
insert_string.length(insert_string.length() - strlen(FEDERATED_COMMA));
|
||||
/*
|
||||
if there were no fields, we don't want to add a closing paren
|
||||
AND, we don't want to chop off the last char '('
|
||||
|
@ -1658,9 +1675,13 @@ int ha_federated::write_row(byte *buf)
|
|||
if (has_fields)
|
||||
{
|
||||
/* chops off leading commas */
|
||||
values_string.length(values_string.length() - strlen(FEDERATED_COMMA));
|
||||
insert_string.length(insert_string.length() - FEDERATED_COMMA_LEN);
|
||||
values_string.length(values_string.length() - FEDERATED_COMMA_LEN);
|
||||
insert_string.append(FEDERATED_CLOSEPAREN);
|
||||
}
|
||||
else
|
||||
insert_string.length(insert_string.length() - FEDERATED_CLOSEPAREN_LEN);
|
||||
|
||||
/* we always want to append this, even if there aren't any fields */
|
||||
values_string.append(FEDERATED_CLOSEPAREN);
|
||||
|
||||
|
@ -1674,8 +1695,8 @@ int ha_federated::write_row(byte *buf)
|
|||
DBUG_RETURN(stash_remote_error());
|
||||
}
|
||||
/*
|
||||
If the table we've just written a record to contains an auto_increment field,
|
||||
then store the last_insert_id() value from the foreign server
|
||||
If the table we've just written a record to contains an auto_increment
|
||||
field, then store the last_insert_id() value from the foreign server
|
||||
*/
|
||||
if (table->next_number_field)
|
||||
update_auto_increment();
|
||||
|
@ -1697,7 +1718,7 @@ void ha_federated::update_auto_increment(void)
|
|||
DBUG_ENTER("ha_federated::update_auto_increment");
|
||||
|
||||
thd->insert_id(mysql->last_used_con->insert_id);
|
||||
DBUG_PRINT("info",("last_insert_id %d", auto_increment_value));
|
||||
DBUG_PRINT("info",("last_insert_id %d", stats.auto_increment_value));
|
||||
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
@ -1785,7 +1806,7 @@ int ha_federated::update_row(const byte *old_data, byte *new_data)
|
|||
this? Because we only are updating one record, and LIMIT enforces
|
||||
this.
|
||||
*/
|
||||
bool has_a_primary_key= (table->s->primary_key == 0 ? TRUE : FALSE);
|
||||
bool has_a_primary_key= test(table->s->primary_key != MAX_KEY);
|
||||
/*
|
||||
buffers for following strings
|
||||
*/
|
||||
|
@ -1837,48 +1858,52 @@ int ha_federated::update_row(const byte *old_data, byte *new_data)
|
|||
|
||||
for (Field **field= table->field; *field; field++)
|
||||
{
|
||||
where_string.append((*field)->field_name);
|
||||
update_string.append((*field)->field_name);
|
||||
update_string.append(FEDERATED_EQ);
|
||||
|
||||
if ((*field)->is_null())
|
||||
new_field_value.append(FEDERATED_NULL);
|
||||
else
|
||||
{
|
||||
/* otherwise = */
|
||||
(*field)->val_str(&new_field_value);
|
||||
(*field)->quote_data(&new_field_value);
|
||||
|
||||
if (!field_in_record_is_null(table, *field, (char*) old_data))
|
||||
where_string.append(FEDERATED_EQ);
|
||||
}
|
||||
|
||||
if (field_in_record_is_null(table, *field, (char*) old_data))
|
||||
where_string.append(FEDERATED_ISNULL);
|
||||
else
|
||||
{
|
||||
(*field)->val_str(&old_field_value,
|
||||
(char*) (old_data + (*field)->offset()));
|
||||
(*field)->quote_data(&old_field_value);
|
||||
where_string.append(old_field_value);
|
||||
}
|
||||
|
||||
update_string.append(new_field_value);
|
||||
new_field_value.length(0);
|
||||
|
||||
/*
|
||||
Only append conjunctions if we have another field in which
|
||||
to iterate
|
||||
*/
|
||||
if (*(field + 1))
|
||||
if (bitmap_is_set(table->write_set, (*field)->field_index))
|
||||
{
|
||||
if ((*field)->is_null())
|
||||
new_field_value.append(FEDERATED_NULL);
|
||||
else
|
||||
{
|
||||
my_bitmap_map *old_map= tmp_use_all_columns(table, table->read_set);
|
||||
/* otherwise = */
|
||||
(*field)->val_str(&new_field_value);
|
||||
(*field)->quote_data(&new_field_value);
|
||||
tmp_restore_column_map(table->read_set, old_map);
|
||||
}
|
||||
update_string.append((*field)->field_name);
|
||||
update_string.append(FEDERATED_EQ);
|
||||
update_string.append(new_field_value);
|
||||
update_string.append(FEDERATED_COMMA);
|
||||
new_field_value.length(0);
|
||||
}
|
||||
|
||||
if (bitmap_is_set(table->read_set, (*field)->field_index))
|
||||
{
|
||||
where_string.append((*field)->field_name);
|
||||
if (field_in_record_is_null(table, *field, (char*) old_data))
|
||||
where_string.append(FEDERATED_ISNULL);
|
||||
else
|
||||
{
|
||||
where_string.append(FEDERATED_EQ);
|
||||
(*field)->val_str(&old_field_value,
|
||||
(char*) (old_data + (*field)->offset()));
|
||||
(*field)->quote_data(&old_field_value);
|
||||
where_string.append(old_field_value);
|
||||
old_field_value.length(0);
|
||||
}
|
||||
where_string.append(FEDERATED_AND);
|
||||
}
|
||||
old_field_value.length(0);
|
||||
}
|
||||
update_string.append(FEDERATED_WHERE);
|
||||
update_string.append(where_string);
|
||||
|
||||
/* Remove last ', '. This works as there must be at least on updated field */
|
||||
update_string.length(update_string.length() - FEDERATED_COMMA_LEN);
|
||||
if (where_string.length())
|
||||
{
|
||||
where_string.length(where_string.length() - FEDERATED_AND_LEN);
|
||||
update_string.append(FEDERATED_WHERE);
|
||||
update_string.append(where_string);
|
||||
}
|
||||
|
||||
/*
|
||||
If this table has not a primary key, then we could possibly
|
||||
update multiple rows. We want to make sure to only update one!
|
||||
|
@ -1912,9 +1937,9 @@ int ha_federated::delete_row(const byte *buf)
|
|||
{
|
||||
char delete_buffer[FEDERATED_QUERY_BUFFER_SIZE];
|
||||
char data_buffer[FEDERATED_QUERY_BUFFER_SIZE];
|
||||
|
||||
String delete_string(delete_buffer, sizeof(delete_buffer), &my_charset_bin);
|
||||
String data_string(data_buffer, sizeof(data_buffer), &my_charset_bin);
|
||||
uint found= 0;
|
||||
DBUG_ENTER("ha_federated::delete_row");
|
||||
|
||||
delete_string.length(0);
|
||||
|
@ -1928,25 +1953,31 @@ int ha_federated::delete_row(const byte *buf)
|
|||
for (Field **field= table->field; *field; field++)
|
||||
{
|
||||
Field *cur_field= *field;
|
||||
data_string.length(0);
|
||||
delete_string.append(cur_field->field_name);
|
||||
|
||||
if (cur_field->is_null())
|
||||
found++;
|
||||
if (bitmap_is_set(table->read_set, cur_field->field_index))
|
||||
{
|
||||
delete_string.append(FEDERATED_IS);
|
||||
data_string.append(FEDERATED_NULL);
|
||||
data_string.length(0);
|
||||
delete_string.append(cur_field->field_name);
|
||||
if (cur_field->is_null())
|
||||
{
|
||||
delete_string.append(FEDERATED_IS);
|
||||
delete_string.append(FEDERATED_NULL);
|
||||
}
|
||||
else
|
||||
{
|
||||
delete_string.append(FEDERATED_EQ);
|
||||
cur_field->val_str(&data_string);
|
||||
cur_field->quote_data(&data_string);
|
||||
delete_string.append(data_string);
|
||||
}
|
||||
delete_string.append(FEDERATED_AND);
|
||||
}
|
||||
else
|
||||
{
|
||||
delete_string.append(FEDERATED_EQ);
|
||||
cur_field->val_str(&data_string);
|
||||
cur_field->quote_data(&data_string);
|
||||
}
|
||||
|
||||
delete_string.append(data_string);
|
||||
delete_string.append(FEDERATED_AND);
|
||||
}
|
||||
delete_string.length(delete_string.length()-5); // Remove trailing AND
|
||||
|
||||
// Remove trailing AND
|
||||
delete_string.length(delete_string.length() - FEDERATED_AND_LEN);
|
||||
if (!found)
|
||||
delete_string.length(delete_string.length() - FEDERATED_WHERE_LEN);
|
||||
|
||||
delete_string.append(FEDERATED_LIMIT1);
|
||||
DBUG_PRINT("info",
|
||||
|
@ -1955,10 +1986,10 @@ int ha_federated::delete_row(const byte *buf)
|
|||
{
|
||||
DBUG_RETURN(stash_remote_error());
|
||||
}
|
||||
deleted+= mysql->affected_rows;
|
||||
stats.deleted+= mysql->affected_rows;
|
||||
DBUG_PRINT("info",
|
||||
("rows deleted %d rows deleted for all time %d",
|
||||
int(mysql->affected_rows), deleted));
|
||||
int(mysql->affected_rows), stats.deleted));
|
||||
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
@ -2019,7 +2050,7 @@ int ha_federated::index_read_idx(byte *buf, uint index, const byte *key,
|
|||
create_where_from_key(&index_string,
|
||||
&table->key_info[index],
|
||||
&range,
|
||||
NULL, 0);
|
||||
NULL, 0, 0);
|
||||
sql_query.append(index_string);
|
||||
|
||||
DBUG_PRINT("info",
|
||||
|
@ -2081,15 +2112,10 @@ int ha_federated::index_init(uint keynr, bool sorted)
|
|||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
int read_range_first(const key_range *start_key,
|
||||
const key_range *end_key,
|
||||
bool eq_range, bool sorted);
|
||||
*/
|
||||
int ha_federated::read_range_first(const key_range *start_key,
|
||||
const key_range *end_key,
|
||||
bool eq_range, bool sorted)
|
||||
const key_range *end_key,
|
||||
bool eq_range, bool sorted)
|
||||
{
|
||||
char sql_query_buffer[FEDERATED_QUERY_BUFFER_SIZE];
|
||||
int retval;
|
||||
|
@ -2105,7 +2131,7 @@ int ha_federated::read_range_first(const key_range *start_key,
|
|||
sql_query.append(share->select_query);
|
||||
create_where_from_key(&sql_query,
|
||||
&table->key_info[active_index],
|
||||
start_key, end_key, 0);
|
||||
start_key, end_key, 0, eq_range);
|
||||
|
||||
if (mysql_real_query(mysql, sql_query.ptr(), sql_query.length()))
|
||||
{
|
||||
|
@ -2450,18 +2476,20 @@ void ha_federated::info(uint flag)
|
|||
delete_length = ?
|
||||
*/
|
||||
if (row[4] != NULL)
|
||||
records= (ha_rows) my_strtoll10(row[4], (char**) 0, &error);
|
||||
stats.records= (ha_rows) my_strtoll10(row[4], (char**) 0,
|
||||
&error);
|
||||
if (row[5] != NULL)
|
||||
mean_rec_length= (ha_rows) my_strtoll10(row[5], (char**) 0, &error);
|
||||
stats.mean_rec_length= (ha_rows) my_strtoll10(row[5], (char**) 0,
|
||||
&error);
|
||||
if (row[12] != NULL)
|
||||
update_time= (ha_rows) my_strtoll10(row[12], (char**) 0, &error);
|
||||
stats.update_time= (ha_rows) my_strtoll10(row[12], (char**) 0,
|
||||
&error);
|
||||
if (row[13] != NULL)
|
||||
check_time= (ha_rows) my_strtoll10(row[13], (char**) 0, &error);
|
||||
stats.check_time= (ha_rows) my_strtoll10(row[13], (char**) 0,
|
||||
&error);
|
||||
}
|
||||
if (flag & HA_STATUS_CONST)
|
||||
{
|
||||
block_size= 4096;
|
||||
}
|
||||
stats.block_size= 4096;
|
||||
}
|
||||
|
||||
if (result)
|
||||
|
@ -2512,8 +2540,8 @@ int ha_federated::delete_all_rows()
|
|||
{
|
||||
DBUG_RETURN(stash_remote_error());
|
||||
}
|
||||
deleted+= records;
|
||||
records= 0;
|
||||
stats.deleted+= stats.records;
|
||||
stats.records= 0;
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
|
@ -2793,6 +2821,7 @@ mysql_declare_plugin(federated)
|
|||
federated_db_init, /* Plugin Init */
|
||||
NULL, /* Plugin Deinit */
|
||||
0x0100 /* 1.0 */,
|
||||
0
|
||||
}
|
||||
mysql_declare_plugin_end;
|
||||
|
||||
|
|
|
@ -39,83 +39,83 @@
|
|||
#define FEDERATED_RECORDS_IN_RANGE 2
|
||||
|
||||
#define FEDERATED_INFO " SHOW TABLE STATUS LIKE "
|
||||
#define FEDERATED_INFO_LEN sizeof(FEDERATED_INFO)
|
||||
#define FEDERATED_INFO_LEN (sizeof(FEDERATED_INFO) -1)
|
||||
#define FEDERATED_SELECT "SELECT "
|
||||
#define FEDERATED_SELECT_LEN sizeof(FEDERATED_SELECT)
|
||||
#define FEDERATED_SELECT_LEN (sizeof(FEDERATED_SELECT) -1)
|
||||
#define FEDERATED_WHERE " WHERE "
|
||||
#define FEDERATED_WHERE_LEN sizeof(FEDERATED_WHERE)
|
||||
#define FEDERATED_WHERE_LEN (sizeof(FEDERATED_WHERE) -1)
|
||||
#define FEDERATED_FROM " FROM "
|
||||
#define FEDERATED_FROM_LEN sizeof(FEDERATED_FROM)
|
||||
#define FEDERATED_FROM_LEN (sizeof(FEDERATED_FROM) -1)
|
||||
#define FEDERATED_PERCENT "%"
|
||||
#define FEDERATED_PERCENT_LEN sizeof(FEDERATED_PERCENT)
|
||||
#define FEDERATED_PERCENT_LEN (sizeof(FEDERATED_PERCENT) -1)
|
||||
#define FEDERATED_IS " IS "
|
||||
#define FEDERATED_IS_LEN sizeof(FEDERATED_IS)
|
||||
#define FEDERATED_IS_LEN (sizeof(FEDERATED_IS) -1)
|
||||
#define FEDERATED_NULL " NULL "
|
||||
#define FEDERATED_NULL_LEN sizeof(FEDERATED_NULL)
|
||||
#define FEDERATED_NULL_LEN (sizeof(FEDERATED_NULL) -1)
|
||||
#define FEDERATED_ISNULL " IS NULL "
|
||||
#define FEDERATED_ISNULL_LEN sizeof(FEDERATED_ISNULL)
|
||||
#define FEDERATED_ISNULL_LEN (sizeof(FEDERATED_ISNULL) -1)
|
||||
#define FEDERATED_LIKE " LIKE "
|
||||
#define FEDERATED_LIKE_LEN sizeof(FEDERATED_LIKE)
|
||||
#define FEDERATED_LIKE_LEN (sizeof(FEDERATED_LIKE) -1)
|
||||
#define FEDERATED_TRUNCATE "TRUNCATE "
|
||||
#define FEDERATED_TRUNCATE_LEN sizeof(FEDERATED_TRUNCATE)
|
||||
#define FEDERATED_TRUNCATE_LEN (sizeof(FEDERATED_TRUNCATE) -1)
|
||||
#define FEDERATED_DELETE "DELETE "
|
||||
#define FEDERATED_DELETE_LEN sizeof(FEDERATED_DELETE)
|
||||
#define FEDERATED_DELETE_LEN (sizeof(FEDERATED_DELETE) -1)
|
||||
#define FEDERATED_INSERT "INSERT INTO "
|
||||
#define FEDERATED_INSERT_LEN sizeof(FEDERATED_INSERT)
|
||||
#define FEDERATED_INSERT_LEN (sizeof(FEDERATED_INSERT) -1)
|
||||
#define FEDERATED_OPTIMIZE "OPTIMIZE TABLE "
|
||||
#define FEDERATED_OPTIMIZE_LEN sizeof(FEDERATED_OPTIMIZE)
|
||||
#define FEDERATED_OPTIMIZE_LEN (sizeof(FEDERATED_OPTIMIZE) -1)
|
||||
#define FEDERATED_REPAIR "REPAIR TABLE "
|
||||
#define FEDERATED_REPAIR_LEN sizeof(FEDERATED_REPAIR)
|
||||
#define FEDERATED_REPAIR_LEN (sizeof(FEDERATED_REPAIR) -1)
|
||||
#define FEDERATED_QUICK " QUICK"
|
||||
#define FEDERATED_QUICK_LEN sizeof(FEDERATED_QUICK)
|
||||
#define FEDERATED_QUICK_LEN (sizeof(FEDERATED_QUICK) -1)
|
||||
#define FEDERATED_EXTENDED " EXTENDED"
|
||||
#define FEDERATED_EXTENDED_LEN sizeof(FEDERATED_EXTENDED)
|
||||
#define FEDERATED_EXTENDED_LEN (sizeof(FEDERATED_EXTENDED) -1)
|
||||
#define FEDERATED_USE_FRM " USE_FRM"
|
||||
#define FEDERATED_USE_FRM_LEN sizeof(FEDERATED_USE_FRM)
|
||||
#define FEDERATED_USE_FRM_LEN (sizeof(FEDERATED_USE_FRM) -1)
|
||||
#define FEDERATED_LIMIT1 " LIMIT 1"
|
||||
#define FEDERATED_LIMIT1_LEN sizeof(FEDERATED_LIMIT1)
|
||||
#define FEDERATED_LIMIT1_LEN (sizeof(FEDERATED_LIMIT1) -1)
|
||||
#define FEDERATED_VALUES "VALUES "
|
||||
#define FEDERATED_VALUES_LEN sizeof(FEDERATED_VALUES)
|
||||
#define FEDERATED_VALUES_LEN (sizeof(FEDERATED_VALUES) -1)
|
||||
#define FEDERATED_UPDATE "UPDATE "
|
||||
#define FEDERATED_UPDATE_LEN sizeof(FEDERATED_UPDATE)
|
||||
#define FEDERATED_SET "SET "
|
||||
#define FEDERATED_SET_LEN sizeof(FEDERATED_SET)
|
||||
#define FEDERATED_UPDATE_LEN (sizeof(FEDERATED_UPDATE) -1)
|
||||
#define FEDERATED_SET " SET "
|
||||
#define FEDERATED_SET_LEN (sizeof(FEDERATED_SET) -1)
|
||||
#define FEDERATED_AND " AND "
|
||||
#define FEDERATED_AND_LEN sizeof(FEDERATED_AND)
|
||||
#define FEDERATED_AND_LEN (sizeof(FEDERATED_AND) -1)
|
||||
#define FEDERATED_CONJUNCTION ") AND ("
|
||||
#define FEDERATED_CONJUNCTION_LEN sizeof(FEDERATED_CONJUNCTION)
|
||||
#define FEDERATED_CONJUNCTION_LEN (sizeof(FEDERATED_CONJUNCTION) -1)
|
||||
#define FEDERATED_OR " OR "
|
||||
#define FEDERATED_OR_LEN sizeof(FEDERATED_OR)
|
||||
#define FEDERATED_OR_LEN (sizeof(FEDERATED_OR) -1)
|
||||
#define FEDERATED_NOT " NOT "
|
||||
#define FEDERATED_NOT_LEN sizeof(FEDERATED_NOT)
|
||||
#define FEDERATED_NOT_LEN (sizeof(FEDERATED_NOT) -1)
|
||||
#define FEDERATED_STAR "* "
|
||||
#define FEDERATED_STAR_LEN sizeof(FEDERATED_STAR)
|
||||
#define FEDERATED_STAR_LEN (sizeof(FEDERATED_STAR) -1)
|
||||
#define FEDERATED_SPACE " "
|
||||
#define FEDERATED_SPACE_LEN sizeof(FEDERATED_SPACE)
|
||||
#define FEDERATED_SPACE_LEN (sizeof(FEDERATED_SPACE) -1)
|
||||
#define FEDERATED_SQUOTE "'"
|
||||
#define FEDERATED_SQUOTE_LEN sizeof(FEDERATED_SQUOTE)
|
||||
#define FEDERATED_SQUOTE_LEN (sizeof(FEDERATED_SQUOTE) -1)
|
||||
#define FEDERATED_COMMA ", "
|
||||
#define FEDERATED_COMMA_LEN sizeof(FEDERATED_COMMA)
|
||||
#define FEDERATED_COMMA_LEN (sizeof(FEDERATED_COMMA) -1)
|
||||
#define FEDERATED_BTICK "`"
|
||||
#define FEDERATED_BTICK_LEN sizeof(FEDERATED_BTICK)
|
||||
#define FEDERATED_BTICK_LEN (sizeof(FEDERATED_BTICK) -1)
|
||||
#define FEDERATED_OPENPAREN " ("
|
||||
#define FEDERATED_OPENPAREN_LEN sizeof(FEDERATED_OPENPAREN)
|
||||
#define FEDERATED_OPENPAREN_LEN (sizeof(FEDERATED_OPENPAREN) -1)
|
||||
#define FEDERATED_CLOSEPAREN ") "
|
||||
#define FEDERATED_CLOSEPAREN_LEN sizeof(FEDERATED_CLOSEPAREN)
|
||||
#define FEDERATED_CLOSEPAREN_LEN (sizeof(FEDERATED_CLOSEPAREN) -1)
|
||||
#define FEDERATED_NE " != "
|
||||
#define FEDERATED_NE_LEN sizeof(FEDERATED_NE)
|
||||
#define FEDERATED_NE_LEN (sizeof(FEDERATED_NE) -1)
|
||||
#define FEDERATED_GT " > "
|
||||
#define FEDERATED_GT_LEN sizeof(FEDERATED_GT)
|
||||
#define FEDERATED_GT_LEN (sizeof(FEDERATED_GT) -1)
|
||||
#define FEDERATED_LT " < "
|
||||
#define FEDERATED_LT_LEN sizeof(FEDERATED_LT)
|
||||
#define FEDERATED_LT_LEN (sizeof(FEDERATED_LT) -1)
|
||||
#define FEDERATED_LE " <= "
|
||||
#define FEDERATED_LE_LEN sizeof(FEDERATED_LE)
|
||||
#define FEDERATED_LE_LEN (sizeof(FEDERATED_LE) -1)
|
||||
#define FEDERATED_GE " >= "
|
||||
#define FEDERATED_GE_LEN sizeof(FEDERATED_GE)
|
||||
#define FEDERATED_GE_LEN (sizeof(FEDERATED_GE) -1)
|
||||
#define FEDERATED_EQ " = "
|
||||
#define FEDERATED_EQ_LEN sizeof(FEDERATED_EQ)
|
||||
#define FEDERATED_EQ_LEN (sizeof(FEDERATED_EQ) -1)
|
||||
#define FEDERATED_FALSE " 1=0"
|
||||
#define FEDERATED_FALSE_LEN sizeof(FEDERATED_FALSE)
|
||||
#define FEDERATED_FALSE_LEN (sizeof(FEDERATED_FALSE) -1)
|
||||
|
||||
/*
|
||||
FEDERATED_SHARE is a structure that will be shared amoung all open handlers
|
||||
|
@ -168,7 +168,7 @@ private:
|
|||
bool create_where_from_key(String *to, KEY *key_info,
|
||||
const key_range *start_key,
|
||||
const key_range *end_key,
|
||||
bool records_in_range);
|
||||
bool records_in_range, bool eq_range);
|
||||
int stash_remote_error();
|
||||
|
||||
public:
|
||||
|
@ -192,12 +192,12 @@ public:
|
|||
implements. The current table flags are documented in
|
||||
handler.h
|
||||
*/
|
||||
ulong table_flags() const
|
||||
ulonglong table_flags() const
|
||||
{
|
||||
/* fix server to be able to get remote server table flags */
|
||||
return (HA_NOT_EXACT_COUNT |
|
||||
HA_PRIMARY_KEY_IN_READ_INDEX | HA_FILE_BASED | HA_REC_NOT_IN_SEQ |
|
||||
HA_AUTO_PART_KEY | HA_CAN_INDEX_BLOBS| HA_NO_PREFIX_CHAR_KEYS);
|
||||
return (HA_PRIMARY_KEY_IN_READ_INDEX | HA_FILE_BASED | HA_REC_NOT_IN_SEQ |
|
||||
HA_AUTO_PART_KEY | HA_CAN_INDEX_BLOBS| HA_NO_PREFIX_CHAR_KEYS |
|
||||
HA_PRIMARY_KEY_REQUIRED_FOR_DELETE | HA_PARTIAL_COLUMN_READ);
|
||||
}
|
||||
/*
|
||||
This is a bitmap of flags that says how the storage engine
|
||||
|
@ -231,8 +231,8 @@ public:
|
|||
*/
|
||||
double scan_time()
|
||||
{
|
||||
DBUG_PRINT("info", ("records %lu", (ulong) records));
|
||||
return (double)(records*1000);
|
||||
DBUG_PRINT("info", ("records %lu", (ulong) stats.records));
|
||||
return (double)(stats.records*1000);
|
||||
}
|
||||
/*
|
||||
The next method will never be called if you do not implement indexes.
|
||||
|
@ -302,7 +302,6 @@ public:
|
|||
int external_lock(THD *thd, int lock_type);
|
||||
int connection_commit();
|
||||
int connection_rollback();
|
||||
bool has_transactions() { return 1; }
|
||||
int connection_autocommit(bool state);
|
||||
int execute_simple_query(const char *query, int len);
|
||||
};
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
#include "ha_heap.h"
|
||||
|
||||
|
||||
static handler *heap_create_handler(TABLE_SHARE *table);
|
||||
static handler *heap_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root);
|
||||
|
||||
handlerton heap_hton;
|
||||
|
||||
|
@ -38,9 +38,9 @@ int heap_init()
|
|||
return 0;
|
||||
}
|
||||
|
||||
static handler *heap_create_handler(TABLE_SHARE *table)
|
||||
static handler *heap_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root)
|
||||
{
|
||||
return new ha_heap(table);
|
||||
return new (mem_root) ha_heap(table);
|
||||
}
|
||||
|
||||
|
||||
|
@ -331,16 +331,16 @@ void ha_heap::info(uint flag)
|
|||
HEAPINFO info;
|
||||
(void) heap_info(file,&info,flag);
|
||||
|
||||
records = info.records;
|
||||
deleted = info.deleted;
|
||||
errkey = info.errkey;
|
||||
mean_rec_length=info.reclength;
|
||||
data_file_length=info.data_length;
|
||||
index_file_length=info.index_length;
|
||||
max_data_file_length= info.max_records* info.reclength;
|
||||
delete_length= info.deleted * info.reclength;
|
||||
errkey= info.errkey;
|
||||
stats.records = info.records;
|
||||
stats.deleted = info.deleted;
|
||||
stats.mean_rec_length=info.reclength;
|
||||
stats.data_file_length=info.data_length;
|
||||
stats.index_file_length=info.index_length;
|
||||
stats.max_data_file_length= info.max_records* info.reclength;
|
||||
stats.delete_length= info.deleted * info.reclength;
|
||||
if (flag & HA_STATUS_AUTO)
|
||||
auto_increment_value= info.auto_increment;
|
||||
stats.auto_increment_value= info.auto_increment;
|
||||
/*
|
||||
If info() is called for the first time after open(), we will still
|
||||
have to update the key statistics. Hoping that a table lock is now
|
||||
|
@ -350,11 +350,19 @@ void ha_heap::info(uint flag)
|
|||
update_key_stats();
|
||||
}
|
||||
|
||||
|
||||
int ha_heap::extra(enum ha_extra_function operation)
|
||||
{
|
||||
return heap_extra(file,operation);
|
||||
}
|
||||
|
||||
|
||||
int ha_heap::reset()
|
||||
{
|
||||
return heap_reset(file);
|
||||
}
|
||||
|
||||
|
||||
int ha_heap::delete_all_rows()
|
||||
{
|
||||
heap_clear(file);
|
||||
|
@ -531,8 +539,8 @@ ha_rows ha_heap::records_in_range(uint inx, key_range *min_key,
|
|||
max_key->flag != HA_READ_AFTER_KEY)
|
||||
return HA_POS_ERROR; // Can only use exact keys
|
||||
|
||||
if (records <= 1)
|
||||
return records;
|
||||
if (stats.records <= 1)
|
||||
return stats.records;
|
||||
|
||||
/* Assert that info() did run. We need current statistics here. */
|
||||
DBUG_ASSERT(key_stat_version == file->s->key_stat_version);
|
||||
|
@ -660,7 +668,7 @@ void ha_heap::update_create_info(HA_CREATE_INFO *create_info)
|
|||
{
|
||||
table->file->info(HA_STATUS_AUTO);
|
||||
if (!(create_info->used_fields & HA_CREATE_USED_AUTO))
|
||||
create_info->auto_increment_value= auto_increment_value;
|
||||
create_info->auto_increment_value= stats.auto_increment_value;
|
||||
}
|
||||
|
||||
void ha_heap::get_auto_increment(ulonglong offset, ulonglong increment,
|
||||
|
@ -669,7 +677,7 @@ void ha_heap::get_auto_increment(ulonglong offset, ulonglong increment,
|
|||
ulonglong *nb_reserved_values)
|
||||
{
|
||||
ha_heap::info(HA_STATUS_AUTO);
|
||||
*first_value= auto_increment_value;
|
||||
*first_value= stats.auto_increment_value;
|
||||
/* such table has only table-level locking so reserves up to +inf */
|
||||
*nb_reserved_values= ULONGLONG_MAX;
|
||||
}
|
||||
|
|
|
@ -46,11 +46,11 @@ public:
|
|||
/* Rows also use a fixed-size format */
|
||||
enum row_type get_row_type() const { return ROW_TYPE_FIXED; }
|
||||
const char **bas_ext() const;
|
||||
ulong table_flags() const
|
||||
ulonglong table_flags() const
|
||||
{
|
||||
return (HA_FAST_KEY_READ | HA_NO_BLOBS | HA_NULL_IN_KEY |
|
||||
HA_REC_NOT_IN_SEQ | HA_READ_RND_SAME |
|
||||
HA_CAN_INSERT_DELAYED);
|
||||
HA_REC_NOT_IN_SEQ | HA_CAN_INSERT_DELAYED | HA_NO_TRANSACTIONS |
|
||||
HA_HAS_RECORDS | HA_STATS_RECORDS_IS_EXACT);
|
||||
}
|
||||
ulong index_flags(uint inx, uint part, bool all_parts) const
|
||||
{
|
||||
|
@ -61,7 +61,8 @@ public:
|
|||
const key_map *keys_to_use_for_scanning() { return &btree_keys; }
|
||||
uint max_supported_keys() const { return MAX_KEY; }
|
||||
uint max_supported_key_part_length() const { return MAX_KEY_LENGTH; }
|
||||
double scan_time() { return (double) (records+deleted) / 20.0+10; }
|
||||
double scan_time()
|
||||
{ return (double) (stats.records+stats.deleted) / 20.0+10; }
|
||||
double read_time(uint index, uint ranges, ha_rows rows)
|
||||
{ return (double) rows / 20.0+1; }
|
||||
|
||||
|
@ -90,6 +91,7 @@ public:
|
|||
void position(const byte *record);
|
||||
void info(uint);
|
||||
int extra(enum ha_extra_function operation);
|
||||
int reset();
|
||||
int external_lock(THD *thd, int lock_type);
|
||||
int delete_all_rows(void);
|
||||
int disable_indexes(uint mode);
|
||||
|
|
|
@ -204,14 +204,15 @@ static int innobase_rollback(THD* thd, bool all);
|
|||
static int innobase_rollback_to_savepoint(THD* thd, void *savepoint);
|
||||
static int innobase_savepoint(THD* thd, void *savepoint);
|
||||
static int innobase_release_savepoint(THD* thd, void *savepoint);
|
||||
static handler *innobase_create_handler(TABLE_SHARE *table);
|
||||
static handler *innobase_create_handler(TABLE_SHARE *table,
|
||||
MEM_ROOT *mem_root);
|
||||
|
||||
static const char innobase_hton_name[]= "InnoDB";
|
||||
handlerton innobase_hton;
|
||||
|
||||
static handler *innobase_create_handler(TABLE_SHARE *table)
|
||||
static handler *innobase_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root)
|
||||
{
|
||||
return new ha_innobase(table);
|
||||
return new (mem_root) ha_innobase(table);
|
||||
}
|
||||
|
||||
|
||||
|
@ -804,10 +805,9 @@ ha_innobase::ha_innobase(TABLE_SHARE *table_arg)
|
|||
HA_NULL_IN_KEY |
|
||||
HA_CAN_INDEX_BLOBS |
|
||||
HA_CAN_SQL_HANDLER |
|
||||
HA_NOT_EXACT_COUNT |
|
||||
HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS |
|
||||
HA_PRIMARY_KEY_REQUIRED_FOR_POSITION |
|
||||
HA_PRIMARY_KEY_IN_READ_INDEX |
|
||||
HA_CAN_GEOMETRY |
|
||||
HA_CAN_GEOMETRY | HA_PARTIAL_COLUMN_READ |
|
||||
HA_TABLE_SCAN_ON_INDEX),
|
||||
start_of_scan(0),
|
||||
num_write_row(0)
|
||||
|
@ -2307,7 +2307,7 @@ ha_innobase::open(
|
|||
}
|
||||
}
|
||||
|
||||
block_size = 16 * 1024; /* Index block size in InnoDB: used by MySQL
|
||||
stats.block_size = 16 * 1024; /* Index block size in InnoDB: used by MySQL
|
||||
in query optimization */
|
||||
|
||||
/* Init table lock structure */
|
||||
|
@ -2902,16 +2902,15 @@ ha_innobase::store_key_val_for_row(
|
|||
/******************************************************************
|
||||
Builds a 'template' to the prebuilt struct. The template is used in fast
|
||||
retrieval of just those column values MySQL needs in its processing. */
|
||||
static
|
||||
void
|
||||
build_template(
|
||||
ha_innobase::build_template(
|
||||
/*===========*/
|
||||
row_prebuilt_t* prebuilt, /* in: prebuilt struct */
|
||||
THD* thd, /* in: current user thread, used
|
||||
only if templ_type is
|
||||
ROW_MYSQL_REC_FIELDS */
|
||||
TABLE* table, /* in: MySQL table */
|
||||
ulint templ_type) /* in: ROW_MYSQL_WHOLE_ROW or
|
||||
uint templ_type) /* in: ROW_MYSQL_WHOLE_ROW or
|
||||
ROW_MYSQL_REC_FIELDS */
|
||||
{
|
||||
dict_index_t* index;
|
||||
|
@ -3020,8 +3019,8 @@ build_template(
|
|||
goto include_field;
|
||||
}
|
||||
|
||||
if (table->file->ha_get_bit_in_read_set(i+1) ||
|
||||
table->file->ha_get_bit_in_write_set(i+1)) {
|
||||
if (bitmap_is_set(table->read_set, i) ||
|
||||
bitmap_is_set(table->write_set, i)) {
|
||||
/* This field is needed in the query */
|
||||
|
||||
goto include_field;
|
||||
|
@ -5405,7 +5404,7 @@ ha_innobase::info(
|
|||
nor the CHECK TABLE time, nor the UPDATE or INSERT time. */
|
||||
|
||||
if (os_file_get_status(path,&stat_info)) {
|
||||
create_time = stat_info.ctime;
|
||||
stats.create_time = stat_info.ctime;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -5433,21 +5432,21 @@ ha_innobase::info(
|
|||
n_rows++;
|
||||
}
|
||||
|
||||
records = (ha_rows)n_rows;
|
||||
deleted = 0;
|
||||
data_file_length = ((ulonglong)
|
||||
stats.records = (ha_rows)n_rows;
|
||||
stats.deleted = 0;
|
||||
stats.data_file_length = ((ulonglong)
|
||||
ib_table->stat_clustered_index_size)
|
||||
* UNIV_PAGE_SIZE;
|
||||
index_file_length = ((ulonglong)
|
||||
stats.index_file_length = ((ulonglong)
|
||||
ib_table->stat_sum_of_other_index_sizes)
|
||||
* UNIV_PAGE_SIZE;
|
||||
delete_length = 0;
|
||||
check_time = 0;
|
||||
stats.delete_length = 0;
|
||||
stats.check_time = 0;
|
||||
|
||||
if (records == 0) {
|
||||
mean_rec_length = 0;
|
||||
if (stats.records == 0) {
|
||||
stats.mean_rec_length = 0;
|
||||
} else {
|
||||
mean_rec_length = (ulong) (data_file_length / records);
|
||||
stats.mean_rec_length = (ulong) (stats.data_file_length / stats.records);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -5496,9 +5495,9 @@ ha_innobase::info(
|
|||
|
||||
if (index->stat_n_diff_key_vals[j + 1] == 0) {
|
||||
|
||||
rec_per_key = records;
|
||||
rec_per_key = stats.records;
|
||||
} else {
|
||||
rec_per_key = (ha_rows)(records /
|
||||
rec_per_key = (ha_rows)(stats.records /
|
||||
index->stat_n_diff_key_vals[j + 1]);
|
||||
}
|
||||
|
||||
|
@ -5553,7 +5552,7 @@ ha_innobase::info(
|
|||
}
|
||||
}
|
||||
|
||||
auto_increment_value = auto_inc;
|
||||
stats.auto_increment_value = auto_inc;
|
||||
}
|
||||
|
||||
prebuilt->trx->op_info = (char*)"";
|
||||
|
@ -5948,8 +5947,7 @@ ha_innobase::extra(
|
|||
/*===============*/
|
||||
/* out: 0 or error number */
|
||||
enum ha_extra_function operation)
|
||||
/* in: HA_EXTRA_RETRIEVE_ALL_COLS or some
|
||||
other flag */
|
||||
/* in: HA_EXTRA_FLUSH or some other flag */
|
||||
{
|
||||
row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt;
|
||||
|
||||
|
@ -5963,13 +5961,6 @@ ha_innobase::extra(
|
|||
row_mysql_prebuilt_free_blob_heap(prebuilt);
|
||||
}
|
||||
break;
|
||||
case HA_EXTRA_RESET:
|
||||
if (prebuilt->blob_heap) {
|
||||
row_mysql_prebuilt_free_blob_heap(prebuilt);
|
||||
}
|
||||
prebuilt->keep_other_fields_on_keyread = 0;
|
||||
prebuilt->read_just_key = 0;
|
||||
break;
|
||||
case HA_EXTRA_RESET_STATE:
|
||||
prebuilt->keep_other_fields_on_keyread = 0;
|
||||
prebuilt->read_just_key = 0;
|
||||
|
@ -5977,16 +5968,6 @@ ha_innobase::extra(
|
|||
case HA_EXTRA_NO_KEYREAD:
|
||||
prebuilt->read_just_key = 0;
|
||||
break;
|
||||
case HA_EXTRA_RETRIEVE_ALL_COLS:
|
||||
prebuilt->hint_need_to_fetch_extra_cols
|
||||
= ROW_RETRIEVE_ALL_COLS;
|
||||
break;
|
||||
case HA_EXTRA_RETRIEVE_PRIMARY_KEY:
|
||||
if (prebuilt->hint_need_to_fetch_extra_cols == 0) {
|
||||
prebuilt->hint_need_to_fetch_extra_cols
|
||||
= ROW_RETRIEVE_PRIMARY_KEY;
|
||||
}
|
||||
break;
|
||||
case HA_EXTRA_KEYREAD:
|
||||
prebuilt->read_just_key = 1;
|
||||
break;
|
||||
|
@ -6000,6 +5981,18 @@ ha_innobase::extra(
|
|||
return(0);
|
||||
}
|
||||
|
||||
int ha_innobase::reset()
|
||||
{
|
||||
row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt;
|
||||
if (prebuilt->blob_heap) {
|
||||
row_mysql_prebuilt_free_blob_heap(prebuilt);
|
||||
}
|
||||
prebuilt->keep_other_fields_on_keyread = 0;
|
||||
prebuilt->read_just_key = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/**********************************************************************
|
||||
MySQL calls this function at the start of each SQL statement inside LOCK
|
||||
TABLES. Inside LOCK TABLES the ::external_lock method does not work to
|
||||
|
|
|
@ -33,6 +33,8 @@ typedef struct st_innobase_share {
|
|||
} INNOBASE_SHARE;
|
||||
|
||||
|
||||
struct row_prebuilt_struct;
|
||||
|
||||
my_bool innobase_query_caching_of_table_permitted(THD* thd, char* full_name,
|
||||
uint full_name_len,
|
||||
ulonglong *unused);
|
||||
|
@ -89,7 +91,7 @@ class ha_innobase: public handler
|
|||
const char* table_type() const { return("InnoDB");}
|
||||
const char *index_type(uint key_number) { return "BTREE"; }
|
||||
const char** bas_ext() const;
|
||||
ulong table_flags() const { return int_table_flags; }
|
||||
ulonglong table_flags() const { return int_table_flags; }
|
||||
ulong index_flags(uint idx, uint part, bool all_parts) const
|
||||
{
|
||||
return (HA_READ_NEXT |
|
||||
|
@ -109,7 +111,6 @@ class ha_innobase: public handler
|
|||
uint max_supported_key_length() const { return 3500; }
|
||||
uint max_supported_key_part_length() const;
|
||||
const key_map *keys_to_use_for_scanning() { return &key_map_full; }
|
||||
bool has_transactions() { return 1;}
|
||||
|
||||
int open(const char *name, int mode, uint test_if_locked);
|
||||
int close(void);
|
||||
|
@ -147,20 +148,10 @@ class ha_innobase: public handler
|
|||
int optimize(THD* thd,HA_CHECK_OPT* check_opt);
|
||||
int discard_or_import_tablespace(my_bool discard);
|
||||
int extra(enum ha_extra_function operation);
|
||||
int reset();
|
||||
int external_lock(THD *thd, int lock_type);
|
||||
int transactional_table_lock(THD *thd, int lock_type);
|
||||
int start_stmt(THD *thd, thr_lock_type lock_type);
|
||||
|
||||
int ha_retrieve_all_cols()
|
||||
{
|
||||
ha_set_all_bits_in_read_set();
|
||||
return extra(HA_EXTRA_RETRIEVE_ALL_COLS);
|
||||
}
|
||||
int ha_retrieve_all_pk()
|
||||
{
|
||||
ha_set_primary_key_in_read_set();
|
||||
return extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY);
|
||||
}
|
||||
void position(byte *record);
|
||||
ha_rows records_in_range(uint inx, key_range *min_key, key_range
|
||||
*max_key);
|
||||
|
@ -210,6 +201,8 @@ class ha_innobase: public handler
|
|||
int cmp_ref(const byte *ref1, const byte *ref2);
|
||||
bool check_if_incompatible_data(HA_CREATE_INFO *info,
|
||||
uint table_changes);
|
||||
void build_template(struct row_prebuilt_struct *prebuilt, THD *thd,
|
||||
TABLE *table, uint templ_type);
|
||||
};
|
||||
|
||||
extern SHOW_VAR innodb_status_variables[];
|
||||
|
|
|
@ -52,9 +52,9 @@ TYPELIB myisam_stats_method_typelib= {
|
|||
** MyISAM tables
|
||||
*****************************************************************************/
|
||||
|
||||
static handler *myisam_create_handler(TABLE_SHARE *table)
|
||||
static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root)
|
||||
{
|
||||
return new ha_myisam(table);
|
||||
return new (mem_root) ha_myisam(table);
|
||||
}
|
||||
|
||||
// collect errors printed by mi_check routines
|
||||
|
@ -140,10 +140,11 @@ void mi_check_print_warning(MI_CHECK *param, const char *fmt,...)
|
|||
ha_myisam::ha_myisam(TABLE_SHARE *table_arg)
|
||||
:handler(&myisam_hton, table_arg), file(0),
|
||||
int_table_flags(HA_NULL_IN_KEY | HA_CAN_FULLTEXT | HA_CAN_SQL_HANDLER |
|
||||
HA_DUPP_POS | HA_CAN_INDEX_BLOBS | HA_AUTO_PART_KEY |
|
||||
HA_FILE_BASED | HA_CAN_GEOMETRY | HA_READ_RND_SAME |
|
||||
HA_CAN_INSERT_DELAYED | HA_CAN_BIT_FIELD | HA_CAN_RTREEKEYS),
|
||||
can_enable_indexes(1)
|
||||
HA_DUPLICATE_POS | HA_CAN_INDEX_BLOBS | HA_AUTO_PART_KEY |
|
||||
HA_FILE_BASED | HA_CAN_GEOMETRY | HA_NO_TRANSACTIONS |
|
||||
HA_CAN_INSERT_DELAYED | HA_CAN_BIT_FIELD | HA_CAN_RTREEKEYS |
|
||||
HA_HAS_RECORDS | HA_STATS_RECORDS_IS_EXACT),
|
||||
can_enable_indexes(1)
|
||||
{}
|
||||
|
||||
|
||||
|
@ -1266,7 +1267,7 @@ int ha_myisam::rnd_init(bool scan)
|
|||
{
|
||||
if (scan)
|
||||
return mi_scan_init(file);
|
||||
return mi_extra(file, HA_EXTRA_RESET, 0);
|
||||
return mi_reset(file); // Free buffers
|
||||
}
|
||||
|
||||
int ha_myisam::rnd_next(byte *buf)
|
||||
|
@ -1306,24 +1307,23 @@ void ha_myisam::info(uint flag)
|
|||
(void) mi_status(file,&info,flag);
|
||||
if (flag & HA_STATUS_VARIABLE)
|
||||
{
|
||||
records = info.records;
|
||||
deleted = info.deleted;
|
||||
data_file_length=info.data_file_length;
|
||||
index_file_length=info.index_file_length;
|
||||
delete_length = info.delete_length;
|
||||
check_time = info.check_time;
|
||||
mean_rec_length=info.mean_reclength;
|
||||
stats.records = info.records;
|
||||
stats.deleted = info.deleted;
|
||||
stats.data_file_length=info.data_file_length;
|
||||
stats.index_file_length=info.index_file_length;
|
||||
stats.delete_length = info.delete_length;
|
||||
stats.check_time = info.check_time;
|
||||
stats. mean_rec_length=info.mean_reclength;
|
||||
}
|
||||
if (flag & HA_STATUS_CONST)
|
||||
{
|
||||
TABLE_SHARE *share= table->s;
|
||||
max_data_file_length= info.max_data_file_length;
|
||||
max_index_file_length= info.max_index_file_length;
|
||||
create_time= info.create_time;
|
||||
sortkey= info.sortkey;
|
||||
stats.max_data_file_length= info.max_data_file_length;
|
||||
stats.max_index_file_length= info.max_index_file_length;
|
||||
stats.create_time= info.create_time;
|
||||
ref_length= info.reflength;
|
||||
share->db_options_in_use= info.options;
|
||||
block_size= myisam_block_size; /* record block size */
|
||||
stats.block_size= myisam_block_size; /* record block size */
|
||||
|
||||
/* Update share */
|
||||
if (share->tmp_table == NO_TMP_TABLE)
|
||||
|
@ -1354,12 +1354,12 @@ void ha_myisam::info(uint flag)
|
|||
if (flag & HA_STATUS_ERRKEY)
|
||||
{
|
||||
errkey = info.errkey;
|
||||
my_store_ptr(dupp_ref, ref_length, info.dupp_key_pos);
|
||||
my_store_ptr(dup_ref, ref_length, info.dupp_key_pos);
|
||||
}
|
||||
if (flag & HA_STATUS_TIME)
|
||||
update_time = info.update_time;
|
||||
stats.update_time = info.update_time;
|
||||
if (flag & HA_STATUS_AUTO)
|
||||
auto_increment_value= info.auto_increment;
|
||||
stats.auto_increment_value= info.auto_increment;
|
||||
}
|
||||
|
||||
|
||||
|
@ -1370,6 +1370,10 @@ int ha_myisam::extra(enum ha_extra_function operation)
|
|||
return mi_extra(file, operation, 0);
|
||||
}
|
||||
|
||||
int ha_myisam::reset(void)
|
||||
{
|
||||
return mi_reset(file);
|
||||
}
|
||||
|
||||
/* To be used with WRITE_CACHE and EXTRA_CACHE */
|
||||
|
||||
|
@ -1413,7 +1417,7 @@ void ha_myisam::update_create_info(HA_CREATE_INFO *create_info)
|
|||
ha_myisam::info(HA_STATUS_AUTO | HA_STATUS_CONST);
|
||||
if (!(create_info->used_fields & HA_CREATE_USED_AUTO))
|
||||
{
|
||||
create_info->auto_increment_value=auto_increment_value;
|
||||
create_info->auto_increment_value= stats.auto_increment_value;
|
||||
}
|
||||
create_info->data_file_name=data_file_name;
|
||||
create_info->index_file_name=index_file_name;
|
||||
|
@ -1650,7 +1654,7 @@ void ha_myisam::get_auto_increment(ulonglong offset, ulonglong increment,
|
|||
if (!table->s->next_number_key_offset)
|
||||
{ // Autoincrement at key-start
|
||||
ha_myisam::info(HA_STATUS_AUTO);
|
||||
*first_value= auto_increment_value;
|
||||
*first_value= stats.auto_increment_value;
|
||||
/* MyISAM has only table-level lock, so reserves to +inf */
|
||||
*nb_reserved_values= ULONGLONG_MAX;
|
||||
return;
|
||||
|
@ -1744,7 +1748,7 @@ bool ha_myisam::check_if_incompatible_data(HA_CREATE_INFO *info,
|
|||
{
|
||||
uint options= table->s->db_options_in_use;
|
||||
|
||||
if (info->auto_increment_value != auto_increment_value ||
|
||||
if (info->auto_increment_value != stats.auto_increment_value ||
|
||||
info->data_file_name != data_file_name ||
|
||||
info->index_file_name != index_file_name ||
|
||||
table_changes == IS_EQUAL_NO ||
|
||||
|
|
|
@ -48,7 +48,7 @@ class ha_myisam: public handler
|
|||
const char *table_type() const { return "MyISAM"; }
|
||||
const char *index_type(uint key_number);
|
||||
const char **bas_ext() const;
|
||||
ulong table_flags() const { return int_table_flags; }
|
||||
ulonglong table_flags() const { return int_table_flags; }
|
||||
ulong index_flags(uint inx, uint part, bool all_parts) const
|
||||
{
|
||||
return ((table_share->key_info[inx].algorithm == HA_KEY_ALG_FULLTEXT) ?
|
||||
|
@ -101,6 +101,7 @@ class ha_myisam: public handler
|
|||
void info(uint);
|
||||
int extra(enum ha_extra_function operation);
|
||||
int extra_opt(enum ha_extra_function operation, ulong cache_size);
|
||||
int reset(void);
|
||||
int external_lock(THD *thd, int lock_type);
|
||||
int delete_all_rows(void);
|
||||
int disable_indexes(uint mode);
|
||||
|
|
|
@ -34,15 +34,17 @@
|
|||
** MyISAM MERGE tables
|
||||
*****************************************************************************/
|
||||
|
||||
static handler *myisammrg_create_handler(TABLE_SHARE *table);
|
||||
static handler *myisammrg_create_handler(TABLE_SHARE *table,
|
||||
MEM_ROOT *mem_root);
|
||||
|
||||
/* MyISAM MERGE handlerton */
|
||||
|
||||
handlerton myisammrg_hton;
|
||||
|
||||
static handler *myisammrg_create_handler(TABLE_SHARE *table)
|
||||
static handler *myisammrg_create_handler(TABLE_SHARE *table,
|
||||
MEM_ROOT *mem_root)
|
||||
{
|
||||
return new ha_myisammrg(table);
|
||||
return new (mem_root) ha_myisammrg(table);
|
||||
}
|
||||
|
||||
|
||||
|
@ -94,10 +96,10 @@ int ha_myisammrg::open(const char *name, int mode, uint test_if_locked)
|
|||
if (!(test_if_locked & HA_OPEN_WAIT_IF_LOCKED))
|
||||
myrg_extra(file,HA_EXTRA_WAIT_LOCK,0);
|
||||
|
||||
if (table->s->reclength != mean_rec_length && mean_rec_length)
|
||||
if (table->s->reclength != stats.mean_rec_length && stats.mean_rec_length)
|
||||
{
|
||||
DBUG_PRINT("error",("reclength: %d mean_rec_length: %d",
|
||||
table->s->reclength, mean_rec_length));
|
||||
table->s->reclength, stats.mean_rec_length));
|
||||
goto err;
|
||||
}
|
||||
#if !defined(BIG_TABLES) || SIZEOF_OFF_T == 4
|
||||
|
@ -218,11 +220,13 @@ int ha_myisammrg::index_next_same(byte * buf,
|
|||
return error;
|
||||
}
|
||||
|
||||
|
||||
int ha_myisammrg::rnd_init(bool scan)
|
||||
{
|
||||
return myrg_extra(file,HA_EXTRA_RESET,0);
|
||||
return myrg_reset(file);
|
||||
}
|
||||
|
||||
|
||||
int ha_myisammrg::rnd_next(byte *buf)
|
||||
{
|
||||
statistic_increment(table->in_use->status_var.ha_read_rnd_next_count,
|
||||
|
@ -232,6 +236,7 @@ int ha_myisammrg::rnd_next(byte *buf)
|
|||
return error;
|
||||
}
|
||||
|
||||
|
||||
int ha_myisammrg::rnd_pos(byte * buf, byte *pos)
|
||||
{
|
||||
statistic_increment(table->in_use->status_var.ha_read_rnd_count,
|
||||
|
@ -263,18 +268,18 @@ void ha_myisammrg::info(uint flag)
|
|||
The following fails if one has not compiled MySQL with -DBIG_TABLES
|
||||
and one has more than 2^32 rows in the merge tables.
|
||||
*/
|
||||
records = (ha_rows) info.records;
|
||||
deleted = (ha_rows) info.deleted;
|
||||
stats.records = (ha_rows) info.records;
|
||||
stats.deleted = (ha_rows) info.deleted;
|
||||
#if !defined(BIG_TABLES) || SIZEOF_OFF_T == 4
|
||||
if ((info.records >= (ulonglong) 1 << 32) ||
|
||||
(info.deleted >= (ulonglong) 1 << 32))
|
||||
table->s->crashed= 1;
|
||||
#endif
|
||||
data_file_length=info.data_file_length;
|
||||
stats.data_file_length=info.data_file_length;
|
||||
errkey = info.errkey;
|
||||
table->s->keys_in_use.set_prefix(table->s->keys);
|
||||
table->s->db_options_in_use= info.options;
|
||||
mean_rec_length= info.reclength;
|
||||
stats.mean_rec_length= info.reclength;
|
||||
|
||||
/*
|
||||
The handler::block_size is used all over the code in index scan cost
|
||||
|
@ -292,11 +297,11 @@ void ha_myisammrg::info(uint flag)
|
|||
TODO: In 5.2 index scan cost calculation will be factored out into a
|
||||
virtual function in class handler and we'll be able to remove this hack.
|
||||
*/
|
||||
block_size= 0;
|
||||
stats.block_size= 0;
|
||||
if (file->tables)
|
||||
block_size= myisam_block_size / file->tables;
|
||||
stats.block_size= myisam_block_size / file->tables;
|
||||
|
||||
update_time=0;
|
||||
stats.update_time= 0;
|
||||
#if SIZEOF_OFF_T > 4
|
||||
ref_length=6; // Should be big enough
|
||||
#else
|
||||
|
@ -322,6 +327,10 @@ int ha_myisammrg::extra(enum ha_extra_function operation)
|
|||
return myrg_extra(file,operation,0);
|
||||
}
|
||||
|
||||
int ha_myisammrg::reset(void)
|
||||
{
|
||||
return myrg_reset(file);
|
||||
}
|
||||
|
||||
/* To be used with WRITE_CACHE, EXTRA_CACHE and BULK_INSERT_BEGIN */
|
||||
|
||||
|
|
|
@ -33,9 +33,9 @@ class ha_myisammrg: public handler
|
|||
const char *table_type() const { return "MRG_MyISAM"; }
|
||||
const char **bas_ext() const;
|
||||
const char *index_type(uint key_number);
|
||||
ulong table_flags() const
|
||||
ulonglong table_flags() const
|
||||
{
|
||||
return (HA_REC_NOT_IN_SEQ | HA_AUTO_PART_KEY | HA_READ_RND_SAME |
|
||||
return (HA_REC_NOT_IN_SEQ | HA_AUTO_PART_KEY | HA_NO_TRANSACTIONS |
|
||||
HA_NULL_IN_KEY | HA_CAN_INDEX_BLOBS | HA_FILE_BASED |
|
||||
HA_CAN_INSERT_DELAYED | HA_ANY_INDEX_MAY_BE_UNIQUE |
|
||||
HA_NO_COPY_ON_ALTER);
|
||||
|
@ -50,7 +50,7 @@ class ha_myisammrg: public handler
|
|||
uint max_supported_key_length() const { return MI_MAX_KEY_LENGTH; }
|
||||
uint max_supported_key_part_length() const { return MI_MAX_KEY_LENGTH; }
|
||||
double scan_time()
|
||||
{ return ulonglong2double(data_file_length) / IO_SIZE + file->tables; }
|
||||
{ return ulonglong2double(stats.data_file_length) / IO_SIZE + file->tables; }
|
||||
|
||||
int open(const char *name, int mode, uint test_if_locked);
|
||||
int close(void);
|
||||
|
@ -73,6 +73,7 @@ class ha_myisammrg: public handler
|
|||
void position(const byte *record);
|
||||
ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key);
|
||||
void info(uint);
|
||||
int reset(void);
|
||||
int extra(enum ha_extra_function operation);
|
||||
int extra_opt(enum ha_extra_function operation, ulong cache_size);
|
||||
int external_lock(THD *thd, int lock_type);
|
||||
|
|
|
@ -71,9 +71,10 @@ static int ndbcluster_fill_files_table(THD *thd, TABLE_LIST *tables, COND *cond)
|
|||
|
||||
handlerton ndbcluster_hton;
|
||||
|
||||
static handler *ndbcluster_create_handler(TABLE_SHARE *table)
|
||||
static handler *ndbcluster_create_handler(TABLE_SHARE *table,
|
||||
MEM_ROOT *mem_root)
|
||||
{
|
||||
return new ha_ndbcluster(table);
|
||||
return new (mem_root) ha_ndbcluster(table);
|
||||
}
|
||||
|
||||
static uint ndbcluster_partition_flags()
|
||||
|
@ -427,9 +428,10 @@ void ha_ndbcluster::records_update()
|
|||
Ndb *ndb= get_ndb();
|
||||
ndb->setDatabaseName(m_dbname);
|
||||
struct Ndb_statistics stat;
|
||||
if (ndb_get_table_statistics(ndb, m_table, &stat) == 0){
|
||||
mean_rec_length= stat.row_size;
|
||||
data_file_length= stat.fragment_memory;
|
||||
if (ndb_get_table_statistics(ndb, m_table, &stat) == 0)
|
||||
{
|
||||
stats.mean_rec_length= stat.row_size;
|
||||
stats.data_file_length= stat.fragment_memory;
|
||||
info->records= stat.row_count;
|
||||
}
|
||||
}
|
||||
|
@ -438,7 +440,7 @@ void ha_ndbcluster::records_update()
|
|||
if (get_thd_ndb(thd)->error)
|
||||
info->no_uncommitted_rows_count= 0;
|
||||
}
|
||||
records= info->records+ info->no_uncommitted_rows_count;
|
||||
stats.records= info->records+ info->no_uncommitted_rows_count;
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
|
@ -886,23 +888,24 @@ int ha_ndbcluster::get_ndb_partition_id(NdbOperation *ndb_op)
|
|||
/*
|
||||
Check if any set or get of blob value in current query.
|
||||
*/
|
||||
|
||||
bool ha_ndbcluster::uses_blob_value()
|
||||
{
|
||||
uint blob_fields;
|
||||
MY_BITMAP *bitmap;
|
||||
uint *blob_index, *blob_index_end;
|
||||
if (table_share->blob_fields == 0)
|
||||
return FALSE;
|
||||
|
||||
bitmap= m_write_op ? table->write_set : table->read_set;
|
||||
blob_index= table_share->blob_field;
|
||||
blob_index_end= blob_index + table_share->blob_fields;
|
||||
do
|
||||
{
|
||||
uint no_fields= table_share->fields;
|
||||
int i;
|
||||
// They always put blobs at the end..
|
||||
for (i= no_fields - 1; i >= 0; i--)
|
||||
{
|
||||
if ((m_write_op && ha_get_bit_in_write_set(i+1)) ||
|
||||
(!m_write_op && ha_get_bit_in_read_set(i+1)))
|
||||
{
|
||||
return TRUE;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (bitmap_is_set(table->write_set,
|
||||
table->field[*blob_index]->field_index))
|
||||
return TRUE;
|
||||
} while (++blob_index != blob_index_end);
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
|
@ -1399,10 +1402,9 @@ int ha_ndbcluster::get_ndb_lock_type(enum thr_lock_type type)
|
|||
{
|
||||
if (type >= TL_WRITE_ALLOW_WRITE)
|
||||
return NdbOperation::LM_Exclusive;
|
||||
else if (uses_blob_value())
|
||||
if (uses_blob_value())
|
||||
return NdbOperation::LM_Read;
|
||||
else
|
||||
return NdbOperation::LM_CommittedRead;
|
||||
return NdbOperation::LM_CommittedRead;
|
||||
}
|
||||
|
||||
static const ulong index_type_flags[]=
|
||||
|
@ -1577,13 +1579,13 @@ int ha_ndbcluster::define_read_attrs(byte* buf, NdbOperation* op)
|
|||
for (i= 0; i < table_share->fields; i++)
|
||||
{
|
||||
Field *field= table->field[i];
|
||||
if (ha_get_bit_in_read_set(i+1) ||
|
||||
if (bitmap_is_set(table->read_set, i) ||
|
||||
((field->flags & PRI_KEY_FLAG)))
|
||||
{
|
||||
if (get_ndb_value(op, field, i, buf))
|
||||
ERR_RETURN(op->getNdbError());
|
||||
}
|
||||
else
|
||||
else
|
||||
{
|
||||
m_value[i].ptr= NULL;
|
||||
}
|
||||
|
@ -1687,7 +1689,7 @@ int ha_ndbcluster::complemented_read(const byte *old_data, byte *new_data,
|
|||
DBUG_ENTER("complemented_read");
|
||||
m_write_op= FALSE;
|
||||
|
||||
if (ha_get_all_bit_in_read_set())
|
||||
if (bitmap_is_set_all(table->read_set))
|
||||
{
|
||||
// We have allready retrieved all fields, nothing to complement
|
||||
DBUG_RETURN(0);
|
||||
|
@ -1718,7 +1720,8 @@ int ha_ndbcluster::complemented_read(const byte *old_data, byte *new_data,
|
|||
{
|
||||
Field *field= table->field[i];
|
||||
if (!((field->flags & PRI_KEY_FLAG) ||
|
||||
(ha_get_bit_in_read_set(i+1))))
|
||||
bitmap_is_set(table->read_set, i)) &&
|
||||
!bitmap_is_set(table->write_set, i))
|
||||
{
|
||||
if (get_ndb_value(op, field, i, new_data))
|
||||
ERR_RETURN(trans->getNdbError());
|
||||
|
@ -1742,7 +1745,7 @@ int ha_ndbcluster::complemented_read(const byte *old_data, byte *new_data,
|
|||
{
|
||||
Field *field= table->field[i];
|
||||
if (!((field->flags & PRI_KEY_FLAG) ||
|
||||
(ha_get_bit_in_read_set(i+1))))
|
||||
bitmap_is_set(table->read_set, i)))
|
||||
{
|
||||
m_value[i].ptr= NULL;
|
||||
}
|
||||
|
@ -1843,11 +1846,11 @@ int ha_ndbcluster::peek_indexed_rows(const byte *record)
|
|||
uint32 part_id;
|
||||
int error;
|
||||
longlong func_value;
|
||||
if ((error= m_part_info->get_partition_id(m_part_info, &part_id,
|
||||
&func_value)))
|
||||
{
|
||||
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
|
||||
error= m_part_info->get_partition_id(m_part_info, &part_id, &func_value);
|
||||
dbug_tmp_restore_column_map(table->read_set, old_map);
|
||||
if (error)
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
op->setPartitionId(part_id);
|
||||
}
|
||||
}
|
||||
|
@ -2439,11 +2442,11 @@ int ha_ndbcluster::write_row(byte *record)
|
|||
{
|
||||
uint32 part_id;
|
||||
int error;
|
||||
if ((error= m_part_info->get_partition_id(m_part_info, &part_id,
|
||||
&func_value)))
|
||||
{
|
||||
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
|
||||
error= m_part_info->get_partition_id(m_part_info, &part_id, &func_value);
|
||||
dbug_tmp_restore_column_map(table->read_set, old_map);
|
||||
if (error)
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
op->setPartitionId(part_id);
|
||||
}
|
||||
|
||||
|
@ -2467,25 +2470,27 @@ int ha_ndbcluster::write_row(byte *record)
|
|||
}
|
||||
else
|
||||
{
|
||||
int res;
|
||||
|
||||
if ((res= set_primary_key_from_record(op, record)))
|
||||
return res;
|
||||
int error;
|
||||
if ((error= set_primary_key_from_record(op, record)))
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
|
||||
// Set non-key attribute(s)
|
||||
bool set_blob_value= FALSE;
|
||||
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
|
||||
for (i= 0; i < table_share->fields; i++)
|
||||
{
|
||||
Field *field= table->field[i];
|
||||
if (!(field->flags & PRI_KEY_FLAG) &&
|
||||
(ha_get_bit_in_write_set(i + 1) || !m_use_write) &&
|
||||
(bitmap_is_set(table->write_set, i) || !m_use_write) &&
|
||||
set_ndb_value(op, field, i, record-table->record[0], &set_blob_value))
|
||||
{
|
||||
m_skip_auto_increment= TRUE;
|
||||
dbug_tmp_restore_column_map(table->read_set, old_map);
|
||||
ERR_RETURN(op->getNdbError());
|
||||
}
|
||||
}
|
||||
dbug_tmp_restore_column_map(table->read_set, old_map);
|
||||
|
||||
if (m_use_partition_function)
|
||||
{
|
||||
|
@ -2564,6 +2569,7 @@ int ha_ndbcluster::write_row(byte *record)
|
|||
}
|
||||
m_skip_auto_increment= TRUE;
|
||||
|
||||
DBUG_PRINT("exit",("ok"));
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
|
@ -2623,7 +2629,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
|
|||
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
|
||||
{
|
||||
table->timestamp_field->set_time();
|
||||
ha_set_bit_in_write_set(table->timestamp_field->fieldnr);
|
||||
bitmap_set_bit(table->write_set, table->timestamp_field->field_index);
|
||||
}
|
||||
|
||||
if (m_use_partition_function &&
|
||||
|
@ -2737,14 +2743,19 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
|
|||
m_rows_changed++;
|
||||
|
||||
// Set non-key attribute(s)
|
||||
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
|
||||
for (i= 0; i < table_share->fields; i++)
|
||||
{
|
||||
Field *field= table->field[i];
|
||||
if (ha_get_bit_in_write_set(i+1) &&
|
||||
if (bitmap_is_set(table->write_set, i) &&
|
||||
(!(field->flags & PRI_KEY_FLAG)) &&
|
||||
set_ndb_value(op, field, i, new_data - table->record[0]))
|
||||
{
|
||||
dbug_tmp_restore_column_map(table->read_set, old_map);
|
||||
ERR_RETURN(op->getNdbError());
|
||||
}
|
||||
}
|
||||
dbug_tmp_restore_column_map(table->read_set, old_map);
|
||||
|
||||
if (m_use_partition_function)
|
||||
{
|
||||
|
@ -2836,9 +2847,8 @@ int ha_ndbcluster::delete_row(const byte *record)
|
|||
}
|
||||
else
|
||||
{
|
||||
int res;
|
||||
if ((res= set_primary_key_from_record(op, record)))
|
||||
return res;
|
||||
if ((error= set_primary_key_from_record(op, record)))
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2868,7 +2878,8 @@ void ndb_unpack_record(TABLE *table, NdbValue *value,
|
|||
MY_BITMAP *defined, byte *buf)
|
||||
{
|
||||
Field **p_field= table->field, *field= *p_field;
|
||||
my_ptrdiff_t row_offset= buf - table->record[0];
|
||||
my_ptrdiff_t row_offset= (my_ptrdiff_t) (buf - table->record[0]);
|
||||
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
|
||||
DBUG_ENTER("ndb_unpack_record");
|
||||
|
||||
// Set null flag(s)
|
||||
|
@ -2929,13 +2940,13 @@ void ndb_unpack_record(TABLE *table, NdbValue *value,
|
|||
field_bit->Field_bit::move_field_offset(-row_offset);
|
||||
DBUG_PRINT("info",("[%u] SET",
|
||||
(*value).rec->getColumn()->getColumnNo()));
|
||||
DBUG_DUMP("info", (const char*) field->ptr, field->field_length);
|
||||
DBUG_DUMP("info", (const char*) field->ptr, field->pack_length());
|
||||
}
|
||||
else
|
||||
{
|
||||
DBUG_PRINT("info",("[%u] SET",
|
||||
(*value).rec->getColumn()->getColumnNo()));
|
||||
DBUG_DUMP("info", (const char*) field->ptr, field->field_length);
|
||||
DBUG_DUMP("info", (const char*) field->ptr, field->pack_length());
|
||||
}
|
||||
}
|
||||
else
|
||||
|
@ -2968,6 +2979,7 @@ void ndb_unpack_record(TABLE *table, NdbValue *value,
|
|||
}
|
||||
}
|
||||
}
|
||||
dbug_tmp_restore_column_map(table->write_set, old_map);
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
|
@ -3490,7 +3502,7 @@ void ha_ndbcluster::info(uint flag)
|
|||
if (m_table_info)
|
||||
{
|
||||
if (m_ha_not_exact_count)
|
||||
records= 100;
|
||||
stats.records= 100;
|
||||
else
|
||||
records_update();
|
||||
}
|
||||
|
@ -3504,14 +3516,14 @@ void ha_ndbcluster::info(uint flag)
|
|||
if (current_thd->variables.ndb_use_exact_count &&
|
||||
ndb_get_table_statistics(ndb, m_table, &stat) == 0)
|
||||
{
|
||||
mean_rec_length= stat.row_size;
|
||||
data_file_length= stat.fragment_memory;
|
||||
records= stat.row_count;
|
||||
stats.mean_rec_length= stat.row_size;
|
||||
stats.data_file_length= stat.fragment_memory;
|
||||
stats.records= stat.row_count;
|
||||
}
|
||||
else
|
||||
{
|
||||
mean_rec_length= 0;
|
||||
records= 100;
|
||||
stats.mean_rec_length= 0;
|
||||
stats.records= 100;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -3540,10 +3552,10 @@ void ha_ndbcluster::info(uint flag)
|
|||
const NdbError err= ndb->getNdbError();
|
||||
sql_print_error("Error %lu in readAutoIncrementValue(): %s",
|
||||
(ulong) err.code, err.message);
|
||||
auto_increment_value= ~(Uint64)0;
|
||||
stats.auto_increment_value= ~(ulonglong)0;
|
||||
}
|
||||
else
|
||||
auto_increment_value= (ulonglong)auto_increment_value64;
|
||||
stats.auto_increment_value= (ulonglong)auto_increment_value64;
|
||||
}
|
||||
}
|
||||
DBUG_VOID_RETURN;
|
||||
|
@ -3567,18 +3579,6 @@ int ha_ndbcluster::extra(enum ha_extra_function operation)
|
|||
{
|
||||
DBUG_ENTER("extra");
|
||||
switch (operation) {
|
||||
case HA_EXTRA_RESET: /* Reset database to after open */
|
||||
DBUG_PRINT("info", ("HA_EXTRA_RESET"));
|
||||
DBUG_PRINT("info", ("Clearing condition stack"));
|
||||
cond_clear();
|
||||
/*
|
||||
* Regular partition pruning will set the bitmap appropriately.
|
||||
* Some queries like ALTER TABLE doesn't use partition pruning and
|
||||
* thus the 'used_partitions' bitmap needs to be initialized
|
||||
*/
|
||||
if (m_part_info)
|
||||
bitmap_set_all(&m_part_info->used_partitions);
|
||||
break;
|
||||
case HA_EXTRA_IGNORE_DUP_KEY: /* Dup keys don't rollback everything*/
|
||||
DBUG_PRINT("info", ("HA_EXTRA_IGNORE_DUP_KEY"));
|
||||
if (current_thd->lex->sql_command == SQLCOM_REPLACE && !m_has_unique_index)
|
||||
|
@ -3614,6 +3614,22 @@ int ha_ndbcluster::extra(enum ha_extra_function operation)
|
|||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
|
||||
int ha_ndbcluster::reset()
|
||||
{
|
||||
DBUG_ENTER("ha_ndbcluster::reset");
|
||||
cond_clear();
|
||||
/*
|
||||
Regular partition pruning will set the bitmap appropriately.
|
||||
Some queries like ALTER TABLE doesn't use partition pruning and
|
||||
thus the 'used_partitions' bitmap needs to be initialized
|
||||
*/
|
||||
if (m_part_info)
|
||||
bitmap_set_all(&m_part_info->used_partitions);
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Start of an insert, remember number of rows to be inserted, it will
|
||||
be used in write_row and get_autoincrement to send an optimal number
|
||||
|
@ -3730,7 +3746,7 @@ const char** ha_ndbcluster::bas_ext() const
|
|||
double ha_ndbcluster::scan_time()
|
||||
{
|
||||
DBUG_ENTER("ha_ndbcluster::scan_time()");
|
||||
double res= rows2double(records*1000);
|
||||
double res= rows2double(stats.records*1000);
|
||||
DBUG_PRINT("exit", ("table: %s value: %f",
|
||||
m_tabname, res));
|
||||
DBUG_RETURN(res);
|
||||
|
@ -3814,8 +3830,8 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
|
|||
{
|
||||
int error=0;
|
||||
NdbTransaction* trans= NULL;
|
||||
|
||||
DBUG_ENTER("external_lock");
|
||||
|
||||
/*
|
||||
Check that this handler instance has a connection
|
||||
set up to the Ndb object of thd
|
||||
|
@ -3826,9 +3842,10 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
|
|||
Thd_ndb *thd_ndb= get_thd_ndb(thd);
|
||||
Ndb *ndb= thd_ndb->ndb;
|
||||
|
||||
DBUG_PRINT("enter", ("this: %x thd: %lx thd_ndb: %lx "
|
||||
DBUG_PRINT("enter", ("this: 0x%lx thd: 0x%lx thd_ndb: %lx "
|
||||
"thd_ndb->lock_count: %d",
|
||||
this, thd, thd_ndb, thd_ndb->lock_count));
|
||||
(long) this, (long) thd, (long) thd_ndb,
|
||||
thd_ndb->lock_count));
|
||||
|
||||
if (lock_type != F_UNLCK)
|
||||
{
|
||||
|
@ -5108,7 +5125,7 @@ ha_ndbcluster::delete_table(ha_ndbcluster *h, Ndb *ndb,
|
|||
|
||||
/* Drop the table from NDB */
|
||||
|
||||
int res;
|
||||
int res= 0;
|
||||
if (h && h->m_table)
|
||||
{
|
||||
if (dict->dropTableGlobal(*h->m_table))
|
||||
|
@ -5136,7 +5153,6 @@ ha_ndbcluster::delete_table(ha_ndbcluster *h, Ndb *ndb,
|
|||
ndb_table_id= ndbtab_g.get_table()->getObjectId();
|
||||
ndb_table_version= ndbtab_g.get_table()->getObjectVersion();
|
||||
#endif
|
||||
res= 0;
|
||||
}
|
||||
else if (dict->getNdbError().code == NDB_INVALID_SCHEMA_OBJECT)
|
||||
{
|
||||
|
@ -5312,7 +5328,9 @@ void ha_ndbcluster::get_auto_increment(ulonglong offset, ulonglong increment,
|
|||
HA_NEED_READ_RANGE_BUFFER | \
|
||||
HA_CAN_GEOMETRY | \
|
||||
HA_CAN_BIT_FIELD | \
|
||||
HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS
|
||||
HA_PRIMARY_KEY_REQUIRED_FOR_POSITION | \
|
||||
HA_PRIMARY_KEY_REQUIRED_FOR_DELETE | \
|
||||
HA_PARTIAL_COLUMN_READ
|
||||
|
||||
ha_ndbcluster::ha_ndbcluster(TABLE_SHARE *table_arg):
|
||||
handler(&ndbcluster_hton, table_arg),
|
||||
|
@ -5355,8 +5373,8 @@ ha_ndbcluster::ha_ndbcluster(TABLE_SHARE *table_arg):
|
|||
m_tabname[0]= '\0';
|
||||
m_dbname[0]= '\0';
|
||||
|
||||
records= ~(ha_rows)0; // uninitialized
|
||||
block_size= 1024;
|
||||
stats.records= ~(ha_rows)0; // uninitialized
|
||||
stats.block_size= 1024;
|
||||
|
||||
for (i= 0; i < MAX_KEY; i++)
|
||||
ndb_init_index(m_index[i]);
|
||||
|
@ -5680,7 +5698,7 @@ int ndbcluster_table_exists_in_engine(THD* thd, const char *db,
|
|||
{
|
||||
Ndb* ndb;
|
||||
DBUG_ENTER("ndbcluster_table_exists_in_engine");
|
||||
DBUG_PRINT("enter", ("db: %s, name: %s", db, name));
|
||||
DBUG_PRINT("enter", ("db: %s name: %s", db, name));
|
||||
|
||||
if (!(ndb= check_ndb_in_thd(thd)))
|
||||
DBUG_RETURN(HA_ERR_NO_CONNECTION);
|
||||
|
@ -5689,14 +5707,13 @@ int ndbcluster_table_exists_in_engine(THD* thd, const char *db,
|
|||
NdbDictionary::Dictionary::List list;
|
||||
if (dict->listObjects(list, NdbDictionary::Object::UserTable) != 0)
|
||||
ERR_RETURN(dict->getNdbError());
|
||||
for (int i= 0 ; i < list.count ; i++)
|
||||
for (uint i= 0 ; i < list.count ; i++)
|
||||
{
|
||||
NdbDictionary::Dictionary::List::Element& elmt= list.elements[i];
|
||||
if (my_strcasecmp(system_charset_info, elmt.database, db))
|
||||
continue;
|
||||
if (my_strcasecmp(system_charset_info, elmt.name, name))
|
||||
continue;
|
||||
// table found
|
||||
DBUG_PRINT("info", ("Found table"));
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
|
@ -5827,6 +5844,8 @@ int ndbcluster_find_all_files(THD *thd)
|
|||
NDBDICT *dict= ndb->getDictionary();
|
||||
|
||||
int unhandled, retries= 5, skipped;
|
||||
LINT_INIT(unhandled);
|
||||
LINT_INIT(skipped);
|
||||
do
|
||||
{
|
||||
NdbDictionary::Dictionary::List list;
|
||||
|
@ -6333,9 +6352,11 @@ void ha_ndbcluster::print_error(int error, myf errflag)
|
|||
if (error == HA_ERR_NO_PARTITION_FOUND)
|
||||
{
|
||||
char buf[100];
|
||||
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
|
||||
my_error(ER_NO_PARTITION_FOR_GIVEN_VALUE, MYF(0),
|
||||
m_part_info->part_expr->null_value ? "NULL" :
|
||||
llstr(m_part_info->part_expr->val_int(), buf));
|
||||
dbug_tmp_restore_column_map(table->read_set, old_map);
|
||||
}
|
||||
else
|
||||
handler::print_error(error, errflag);
|
||||
|
@ -6553,12 +6574,11 @@ ha_ndbcluster::records_in_range(uint inx, key_range *min_key,
|
|||
DBUG_RETURN(10); /* Good guess when you don't know anything */
|
||||
}
|
||||
|
||||
ulong ha_ndbcluster::table_flags(void) const
|
||||
ulonglong ha_ndbcluster::table_flags(void) const
|
||||
{
|
||||
if (m_ha_not_exact_count)
|
||||
return m_table_flags | HA_NOT_EXACT_COUNT;
|
||||
else
|
||||
return m_table_flags;
|
||||
return m_table_flags & ~HA_STATS_RECORDS_IS_EXACT;
|
||||
return m_table_flags;
|
||||
}
|
||||
const char * ha_ndbcluster::table_type() const
|
||||
{
|
||||
|
@ -6592,10 +6612,6 @@ bool ha_ndbcluster::low_byte_first() const
|
|||
return TRUE;
|
||||
#endif
|
||||
}
|
||||
bool ha_ndbcluster::has_transactions()
|
||||
{
|
||||
return TRUE;
|
||||
}
|
||||
const char* ha_ndbcluster::index_type(uint key_number)
|
||||
{
|
||||
switch (get_index_type(key_number)) {
|
||||
|
@ -9650,7 +9666,7 @@ uint ha_ndbcluster::set_up_partition_info(partition_info *part_info,
|
|||
|
||||
for (i= 0; i < part_info->part_field_list.elements; i++)
|
||||
{
|
||||
NDBCOL *col= tab->getColumn(fields[i]->fieldnr - 1);
|
||||
NDBCOL *col= tab->getColumn(fields[i]->field_index);
|
||||
DBUG_PRINT("info",("setting dist key on %s", col->getName()));
|
||||
col->setPartitionKey(TRUE);
|
||||
}
|
||||
|
@ -9740,7 +9756,7 @@ bool ha_ndbcluster::check_if_incompatible_data(HA_CREATE_INFO *info,
|
|||
{
|
||||
Field *field= table->field[i];
|
||||
const NDBCOL *col= tab->getColumn(field->field_name);
|
||||
if (field->add_index &&
|
||||
if ((field->flags & FIELD_IN_ADD_INDEX) &&
|
||||
col->getStorageType() == NdbDictionary::Column::StorageTypeDisk)
|
||||
{
|
||||
DBUG_PRINT("info", ("add/drop index not supported for disk stored column"));
|
||||
|
@ -10016,10 +10032,11 @@ bool ha_ndbcluster::get_no_parts(const char *name, uint *no_parts)
|
|||
const NDBTAB *tab;
|
||||
int err;
|
||||
DBUG_ENTER("ha_ndbcluster::get_no_parts");
|
||||
LINT_INIT(err);
|
||||
|
||||
set_dbname(name);
|
||||
set_tabname(name);
|
||||
do
|
||||
for (;;)
|
||||
{
|
||||
if (check_ndb_connection())
|
||||
{
|
||||
|
@ -10033,22 +10050,21 @@ bool ha_ndbcluster::get_no_parts(const char *name, uint *no_parts)
|
|||
ERR_BREAK(dict->getNdbError(), err);
|
||||
*no_parts= ndbtab_g.get_table()->getFragmentCount();
|
||||
DBUG_RETURN(FALSE);
|
||||
} while (1);
|
||||
}
|
||||
|
||||
end:
|
||||
print_error(err, MYF(0));
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
|
||||
static int ndbcluster_fill_files_table(THD *thd, TABLE_LIST *tables, COND *cond)
|
||||
static int ndbcluster_fill_files_table(THD *thd, TABLE_LIST *tables,
|
||||
COND *cond)
|
||||
{
|
||||
TABLE* table= tables->table;
|
||||
Ndb *ndb= check_ndb_in_thd(thd);
|
||||
NdbDictionary::Dictionary* dict= ndb->getDictionary();
|
||||
NdbDictionary::Dictionary::List dflist;
|
||||
NdbError ndberr;
|
||||
unsigned i;
|
||||
|
||||
uint i;
|
||||
DBUG_ENTER("ndbcluster_fill_files_table");
|
||||
|
||||
dict->listObjects(dflist, NdbDictionary::Object::Datafile);
|
||||
|
@ -10060,12 +10076,13 @@ static int ndbcluster_fill_files_table(THD *thd, TABLE_LIST *tables, COND *cond)
|
|||
{
|
||||
NdbDictionary::Dictionary::List::Element& elt = dflist.elements[i];
|
||||
Ndb_cluster_connection_node_iter iter;
|
||||
unsigned id;
|
||||
|
||||
uint id;
|
||||
|
||||
g_ndb_cluster_connection->init_get_next_node(iter);
|
||||
|
||||
while ((id= g_ndb_cluster_connection->get_next_node(iter)))
|
||||
{
|
||||
uint c= 0;
|
||||
NdbDictionary::Datafile df= dict->getDatafile(id, elt.name);
|
||||
ndberr= dict->getNdbError();
|
||||
if(ndberr.classification != NdbError::NoError)
|
||||
|
@ -10083,7 +10100,6 @@ static int ndbcluster_fill_files_table(THD *thd, TABLE_LIST *tables, COND *cond)
|
|||
ERR_RETURN(ndberr);
|
||||
}
|
||||
|
||||
int c= 0;
|
||||
table->field[c++]->set_null(); // FILE_ID
|
||||
table->field[c++]->store(elt.name, strlen(elt.name),
|
||||
system_charset_info);
|
||||
|
|
|
@ -350,7 +350,12 @@ class Ndb_item {
|
|||
const Item *item= value.item;
|
||||
|
||||
if (item && field)
|
||||
((Item *)item)->save_in_field(field, false);
|
||||
{
|
||||
my_bitmap_map *old_map=
|
||||
dbug_tmp_use_all_columns(field->table, field->table->write_set);
|
||||
((Item *)item)->save_in_field(field, FALSE);
|
||||
dbug_tmp_restore_column_map(field->table->write_set, old_map);
|
||||
}
|
||||
};
|
||||
|
||||
static NDB_FUNC_TYPE item_func_to_ndb_func(Item_func::Functype fun)
|
||||
|
@ -621,12 +626,13 @@ class ha_ndbcluster: public handler
|
|||
void get_dynamic_partition_info(PARTITION_INFO *stat_info, uint part_id);
|
||||
int extra(enum ha_extra_function operation);
|
||||
int extra_opt(enum ha_extra_function operation, ulong cache_size);
|
||||
int reset();
|
||||
int external_lock(THD *thd, int lock_type);
|
||||
int start_stmt(THD *thd, thr_lock_type lock_type);
|
||||
void print_error(int error, myf errflag);
|
||||
const char * table_type() const;
|
||||
const char ** bas_ext() const;
|
||||
ulong table_flags(void) const;
|
||||
ulonglong table_flags(void) const;
|
||||
void prepare_for_alter();
|
||||
int add_index(TABLE *table_arg, KEY *key_info, uint num_of_keys);
|
||||
int prepare_drop_index(TABLE *table_arg, uint *key_num, uint num_of_keys);
|
||||
|
@ -653,7 +659,6 @@ class ha_ndbcluster: public handler
|
|||
enum thr_lock_type lock_type);
|
||||
|
||||
bool low_byte_first() const;
|
||||
bool has_transactions();
|
||||
|
||||
virtual bool is_injective() const { return true; }
|
||||
|
||||
|
@ -691,7 +696,7 @@ static void set_tabname(const char *pathname, char *tabname);
|
|||
AND ... AND pushed_condN)
|
||||
or less restrictive condition, depending on handler's capabilities.
|
||||
|
||||
handler->extra(HA_EXTRA_RESET) call empties the condition stack.
|
||||
handler->reset() call empties the condition stack.
|
||||
Calls to rnd_init/rnd_end, index_init/index_end etc do not affect the
|
||||
condition stack.
|
||||
The current implementation supports arbitrary AND/OR nested conditions
|
||||
|
|
|
@ -270,6 +270,14 @@ ndbcluster_binlog_close_table(THD *thd, NDB_SHARE *share)
|
|||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Creates a TABLE object for the ndb cluster table
|
||||
|
||||
NOTES
|
||||
This does not open the underlying table
|
||||
*/
|
||||
|
||||
static int
|
||||
ndbcluster_binlog_open_table(THD *thd, NDB_SHARE *share,
|
||||
TABLE_SHARE *table_share, TABLE *table,
|
||||
|
@ -325,6 +333,8 @@ ndbcluster_binlog_open_table(THD *thd, NDB_SHARE *share,
|
|||
share->table_share= table_share;
|
||||
DBUG_ASSERT(share->table == 0);
|
||||
share->table= table;
|
||||
/* We can't use 'use_all_columns()' as the file object is not setup yet */
|
||||
table->column_bitmaps_set_no_signal(&table->s->all_set, &table->s->all_set);
|
||||
#ifndef DBUG_OFF
|
||||
dbug_print_table("table", table);
|
||||
#endif
|
||||
|
@ -358,7 +368,7 @@ void ndbcluster_binlog_init_share(NDB_SHARE *share, TABLE *_table)
|
|||
{
|
||||
bitmap_init(&share->subscriber_bitmap[i],
|
||||
(Uint32*)alloc_root(mem_root, max_ndb_nodes/8),
|
||||
max_ndb_nodes, false);
|
||||
max_ndb_nodes, FALSE);
|
||||
bitmap_clear_all(&share->subscriber_bitmap[i]);
|
||||
}
|
||||
}
|
||||
|
@ -886,6 +896,7 @@ static void ndbcluster_get_schema(NDB_SHARE *share,
|
|||
/* unpack blob values */
|
||||
byte* blobs_buffer= 0;
|
||||
uint blobs_buffer_size= 0;
|
||||
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
|
||||
{
|
||||
ptrdiff_t ptrdiff= 0;
|
||||
int ret= get_ndb_blobs_value(table, share->ndb_value[0],
|
||||
|
@ -895,7 +906,7 @@ static void ndbcluster_get_schema(NDB_SHARE *share,
|
|||
{
|
||||
my_free(blobs_buffer, MYF(MY_ALLOW_ZERO_PTR));
|
||||
DBUG_PRINT("info", ("blob read error"));
|
||||
DBUG_ASSERT(false);
|
||||
DBUG_ASSERT(FALSE);
|
||||
}
|
||||
}
|
||||
/* db varchar 1 length byte */
|
||||
|
@ -947,6 +958,7 @@ static void ndbcluster_get_schema(NDB_SHARE *share,
|
|||
s->type= ((Field_long *)*field)->val_int();
|
||||
/* free blobs buffer */
|
||||
my_free(blobs_buffer, MYF(MY_ALLOW_ZERO_PTR));
|
||||
dbug_tmp_restore_column_map(table->read_set, old_map);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1241,7 +1253,7 @@ int ndbcluster_log_schema_op(THD *thd, NDB_SHARE *share,
|
|||
{
|
||||
int i, updated= 0;
|
||||
int no_storage_nodes= g_ndb_cluster_connection->no_db_nodes();
|
||||
bitmap_init(&schema_subscribers, bitbuf, sizeof(bitbuf)*8, false);
|
||||
bitmap_init(&schema_subscribers, bitbuf, sizeof(bitbuf)*8, FALSE);
|
||||
bitmap_set_all(&schema_subscribers);
|
||||
(void) pthread_mutex_lock(&schema_share->mutex);
|
||||
for (i= 0; i < no_storage_nodes; i++)
|
||||
|
@ -1717,7 +1729,7 @@ ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb,
|
|||
Cluster_schema *schema= (Cluster_schema *)
|
||||
sql_alloc(sizeof(Cluster_schema));
|
||||
MY_BITMAP slock;
|
||||
bitmap_init(&slock, schema->slock, 8*SCHEMA_SLOCK_SIZE, false);
|
||||
bitmap_init(&slock, schema->slock, 8*SCHEMA_SLOCK_SIZE, FALSE);
|
||||
uint node_id= g_ndb_cluster_connection->node_id();
|
||||
ndbcluster_get_schema(tmp_share, schema);
|
||||
enum SCHEMA_OP_TYPE schema_type= (enum SCHEMA_OP_TYPE)schema->type;
|
||||
|
@ -1956,7 +1968,7 @@ ndb_binlog_thread_handle_schema_event_post_epoch(THD *thd,
|
|||
pthread_mutex_unlock(&ndbcluster_mutex);
|
||||
continue;
|
||||
}
|
||||
NDB_SHARE *share= get_share(key, 0, false, false);
|
||||
NDB_SHARE *share= get_share(key, 0, FALSE, FALSE);
|
||||
switch (schema_type)
|
||||
{
|
||||
case SOT_DROP_DB:
|
||||
|
@ -2020,7 +2032,7 @@ ndb_binlog_thread_handle_schema_event_post_epoch(THD *thd,
|
|||
}
|
||||
break;
|
||||
default:
|
||||
DBUG_ASSERT(false);
|
||||
DBUG_ASSERT(FALSE);
|
||||
}
|
||||
if (share)
|
||||
{
|
||||
|
@ -2097,18 +2109,20 @@ static int open_binlog_index(THD *thd, TABLE_LIST *tables,
|
|||
}
|
||||
*binlog_index= tables->table;
|
||||
thd->proc_info= save_proc_info;
|
||||
(*binlog_index)->use_all_columns();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Insert one row in the binlog_index
|
||||
*/
|
||||
|
||||
int ndb_add_binlog_index(THD *thd, void *_row)
|
||||
{
|
||||
Binlog_index_row &row= *(Binlog_index_row *) _row;
|
||||
int error= 0;
|
||||
bool need_reopen;
|
||||
|
||||
/*
|
||||
Turn of binlogging to prevent the table changes to be written to
|
||||
the binary log.
|
||||
|
@ -2150,10 +2164,9 @@ int ndb_add_binlog_index(THD *thd, void *_row)
|
|||
binlog_index->field[5]->store(row.n_deletes);
|
||||
binlog_index->field[6]->store(row.n_schemaops);
|
||||
|
||||
int r;
|
||||
if ((r= binlog_index->file->ha_write_row(binlog_index->record[0])))
|
||||
if ((error= binlog_index->file->ha_write_row(binlog_index->record[0])))
|
||||
{
|
||||
sql_print_error("NDB Binlog: Writing row to binlog_index: %d", r);
|
||||
sql_print_error("NDB Binlog: Writing row to binlog_index: %d", error);
|
||||
error= -1;
|
||||
goto add_binlog_index_err;
|
||||
}
|
||||
|
@ -2287,7 +2300,7 @@ int ndbcluster_create_binlog_setup(Ndb *ndb, const char *key,
|
|||
}
|
||||
|
||||
/* Create share which is needed to hold replication information */
|
||||
if (!(share= get_share(key, 0, true, true)))
|
||||
if (!(share= get_share(key, 0, TRUE, TRUE)))
|
||||
{
|
||||
sql_print_error("NDB Binlog: "
|
||||
"allocating table share for %s failed", key);
|
||||
|
@ -2438,7 +2451,7 @@ ndbcluster_create_event(Ndb *ndb, const NDBTAB *ndbtab,
|
|||
}
|
||||
}
|
||||
if (share->flags & NSF_BLOB_FLAG)
|
||||
my_event.mergeEvents(true);
|
||||
my_event.mergeEvents(TRUE);
|
||||
|
||||
/* add all columns to the event */
|
||||
int n_cols= ndbtab->getNoOfColumns();
|
||||
|
@ -2626,7 +2639,7 @@ ndbcluster_create_event_ops(NDB_SHARE *share, const NDBTAB *ndbtab,
|
|||
}
|
||||
|
||||
if (share->flags & NSF_BLOB_FLAG)
|
||||
op->mergeEvents(true); // currently not inherited from event
|
||||
op->mergeEvents(TRUE); // currently not inherited from event
|
||||
|
||||
DBUG_PRINT("info", ("share->ndb_value[0]: 0x%x",
|
||||
share->ndb_value[0]));
|
||||
|
@ -2774,7 +2787,7 @@ ndbcluster_handle_drop_table(Ndb *ndb, const char *event_name,
|
|||
share->op->getState() == NdbEventOperation::EO_EXECUTING &&
|
||||
dict->getNdbError().code != 4009)
|
||||
{
|
||||
DBUG_ASSERT(false);
|
||||
DBUG_ASSERT(FALSE);
|
||||
DBUG_RETURN(-1);
|
||||
}
|
||||
}
|
||||
|
@ -2902,7 +2915,7 @@ ndb_binlog_thread_handle_non_data_event(THD *thd, Ndb *ndb,
|
|||
/* make sure to flush any pending events as they can be dependent
|
||||
on one of the tables being changed below
|
||||
*/
|
||||
thd->binlog_flush_pending_rows_event(true);
|
||||
thd->binlog_flush_pending_rows_event(TRUE);
|
||||
|
||||
switch (type)
|
||||
{
|
||||
|
@ -2986,7 +2999,7 @@ ndb_binlog_thread_handle_data_event(Ndb *ndb, NdbEventOperation *pOp,
|
|||
/* Potential buffer for the bitmap */
|
||||
uint32 bitbuf[128 / (sizeof(uint32) * 8)];
|
||||
bitmap_init(&b, n_fields <= sizeof(bitbuf) * 8 ? bitbuf : NULL,
|
||||
n_fields, false);
|
||||
n_fields, FALSE);
|
||||
bitmap_set_all(&b);
|
||||
|
||||
/*
|
||||
|
@ -3019,7 +3032,7 @@ ndb_binlog_thread_handle_data_event(Ndb *ndb, NdbEventOperation *pOp,
|
|||
}
|
||||
ndb_unpack_record(table, share->ndb_value[0], &b, table->record[0]);
|
||||
int ret= trans.write_row(::server_id,
|
||||
injector::transaction::table(table, true),
|
||||
injector::transaction::table(table, TRUE),
|
||||
&b, n_fields, table->record[0]);
|
||||
DBUG_ASSERT(ret == 0);
|
||||
}
|
||||
|
@ -3057,7 +3070,7 @@ ndb_binlog_thread_handle_data_event(Ndb *ndb, NdbEventOperation *pOp,
|
|||
ndb_unpack_record(table, share->ndb_value[n], &b, table->record[n]);
|
||||
DBUG_EXECUTE("info", print_records(table, table->record[n]););
|
||||
int ret= trans.delete_row(::server_id,
|
||||
injector::transaction::table(table, true),
|
||||
injector::transaction::table(table, TRUE),
|
||||
&b, n_fields, table->record[n]);
|
||||
DBUG_ASSERT(ret == 0);
|
||||
}
|
||||
|
@ -3084,7 +3097,7 @@ ndb_binlog_thread_handle_data_event(Ndb *ndb, NdbEventOperation *pOp,
|
|||
since table has a primary key, we can do a write
|
||||
using only after values
|
||||
*/
|
||||
trans.write_row(::server_id, injector::transaction::table(table, true),
|
||||
trans.write_row(::server_id, injector::transaction::table(table, TRUE),
|
||||
&b, n_fields, table->record[0]);// after values
|
||||
}
|
||||
else
|
||||
|
@ -3104,7 +3117,7 @@ ndb_binlog_thread_handle_data_event(Ndb *ndb, NdbEventOperation *pOp,
|
|||
ndb_unpack_record(table, share->ndb_value[1], &b, table->record[1]);
|
||||
DBUG_EXECUTE("info", print_records(table, table->record[1]););
|
||||
int ret= trans.update_row(::server_id,
|
||||
injector::transaction::table(table, true),
|
||||
injector::transaction::table(table, TRUE),
|
||||
&b, n_fields,
|
||||
table->record[1], // before values
|
||||
table->record[0]);// after values
|
||||
|
@ -3196,7 +3209,7 @@ static NDB_SCHEMA_OBJECT *ndb_get_schema_object(const char *key,
|
|||
}
|
||||
pthread_mutex_init(&ndb_schema_object->mutex, MY_MUTEX_INIT_FAST);
|
||||
bitmap_init(&ndb_schema_object->slock_bitmap, ndb_schema_object->slock,
|
||||
sizeof(ndb_schema_object->slock)*8, false);
|
||||
sizeof(ndb_schema_object->slock)*8, FALSE);
|
||||
bitmap_clear_all(&ndb_schema_object->slock_bitmap);
|
||||
break;
|
||||
}
|
||||
|
@ -3607,7 +3620,7 @@ restart:
|
|||
inj->new_trans(thd, &trans);
|
||||
}
|
||||
DBUG_PRINT("info", ("use_table: %.*s", name.length, name.str));
|
||||
injector::transaction::table tbl(table, true);
|
||||
injector::transaction::table tbl(table, TRUE);
|
||||
int ret= trans.use_table(::server_id, tbl);
|
||||
DBUG_ASSERT(ret == 0);
|
||||
}
|
||||
|
@ -3620,20 +3633,14 @@ restart:
|
|||
|
||||
const LEX_STRING& name=table->s->table_name;
|
||||
DBUG_PRINT("info", ("use_table: %.*s", name.length, name.str));
|
||||
injector::transaction::table tbl(table, true);
|
||||
injector::transaction::table tbl(table, TRUE);
|
||||
int ret= trans.use_table(::server_id, tbl);
|
||||
DBUG_ASSERT(ret == 0);
|
||||
|
||||
MY_BITMAP b;
|
||||
uint32 bitbuf;
|
||||
DBUG_ASSERT(table->s->fields <= sizeof(bitbuf) * 8);
|
||||
bitmap_init(&b, &bitbuf, table->s->fields, false);
|
||||
bitmap_set_all(&b);
|
||||
table->field[0]->store((longlong)::server_id);
|
||||
table->field[1]->store((longlong)gci);
|
||||
trans.write_row(::server_id,
|
||||
injector::transaction::table(table, true),
|
||||
&b, table->s->fields,
|
||||
injector::transaction::table(table, TRUE),
|
||||
&table->s->all_set, table->s->fields,
|
||||
table->record[0]);
|
||||
}
|
||||
else
|
||||
|
|
|
@ -69,7 +69,8 @@ static PARTITION_SHARE *get_share(const char *table_name, TABLE * table);
|
|||
MODULE create/delete handler object
|
||||
****************************************************************************/
|
||||
|
||||
static handler *partition_create_handler(TABLE_SHARE *share);
|
||||
static handler *partition_create_handler(TABLE_SHARE *share,
|
||||
MEM_ROOT *mem_root);
|
||||
static uint partition_flags();
|
||||
static uint alter_table_flags(uint flags);
|
||||
|
||||
|
@ -97,9 +98,16 @@ static int partition_initialize()
|
|||
New partition object
|
||||
*/
|
||||
|
||||
static handler *partition_create_handler(TABLE_SHARE *share)
|
||||
static handler *partition_create_handler(TABLE_SHARE *share,
|
||||
MEM_ROOT *mem_root)
|
||||
{
|
||||
return new ha_partition(share);
|
||||
ha_partition *file= new (mem_root) ha_partition(share);
|
||||
if (file && file->initialise_partition(mem_root))
|
||||
{
|
||||
delete file;
|
||||
file= 0;
|
||||
}
|
||||
return file;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -201,7 +209,6 @@ void ha_partition::init_handler_variables()
|
|||
m_reorged_parts= 0;
|
||||
m_added_file= NULL;
|
||||
m_tot_parts= 0;
|
||||
m_has_transactions= 0;
|
||||
m_pkey_is_clustered= 0;
|
||||
m_lock_type= F_UNLCK;
|
||||
m_part_spec.start_part= NO_CURRENT_PART_ID;
|
||||
|
@ -273,7 +280,8 @@ ha_partition::~ha_partition()
|
|||
Initialise partition handler object
|
||||
|
||||
SYNOPSIS
|
||||
ha_initialise()
|
||||
initialise_partition()
|
||||
mem_root Allocate memory through this
|
||||
|
||||
RETURN VALUE
|
||||
1 Error
|
||||
|
@ -313,16 +321,16 @@ ha_partition::~ha_partition()
|
|||
|
||||
*/
|
||||
|
||||
int ha_partition::ha_initialise()
|
||||
bool ha_partition::initialise_partition(MEM_ROOT *mem_root)
|
||||
{
|
||||
handler **file_array, *file;
|
||||
DBUG_ENTER("ha_partition::ha_initialise");
|
||||
DBUG_ENTER("ha_partition::initialise_partition");
|
||||
|
||||
if (m_create_handler)
|
||||
{
|
||||
m_tot_parts= m_part_info->get_tot_partitions();
|
||||
DBUG_ASSERT(m_tot_parts > 0);
|
||||
if (new_handlers_from_part_info())
|
||||
if (new_handlers_from_part_info(mem_root))
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
else if (!table_share || !table_share->normalized_path.str)
|
||||
|
@ -335,7 +343,7 @@ int ha_partition::ha_initialise()
|
|||
m_table_flags|= HA_FILE_BASED | HA_REC_NOT_IN_SEQ;
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
else if (get_from_handler_file(table_share->normalized_path.str))
|
||||
else if (get_from_handler_file(table_share->normalized_path.str, mem_root))
|
||||
{
|
||||
mem_alloc_error(2);
|
||||
DBUG_RETURN(1);
|
||||
|
@ -349,12 +357,11 @@ int ha_partition::ha_initialise()
|
|||
other parameters are calculated on demand.
|
||||
HA_FILE_BASED is always set for partition handler since we use a
|
||||
special file for handling names of partitions, engine types.
|
||||
HA_CAN_GEOMETRY, HA_CAN_FULLTEXT, HA_CAN_SQL_HANDLER, HA_DUPP_POS,
|
||||
HA_CAN_GEOMETRY, HA_CAN_FULLTEXT, HA_CAN_SQL_HANDLER, HA_DUPLICATE_POS,
|
||||
HA_CAN_INSERT_DELAYED is disabled until further investigated.
|
||||
*/
|
||||
m_table_flags= m_file[0]->table_flags();
|
||||
m_low_byte_first= m_file[0]->low_byte_first();
|
||||
m_has_transactions= TRUE;
|
||||
m_pkey_is_clustered= TRUE;
|
||||
file_array= m_file;
|
||||
do
|
||||
|
@ -366,13 +373,11 @@ int ha_partition::ha_initialise()
|
|||
my_error(ER_MIX_HANDLER_ERROR, MYF(0));
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
if (!file->has_transactions())
|
||||
m_has_transactions= FALSE;
|
||||
if (!file->primary_key_is_clustered())
|
||||
m_pkey_is_clustered= FALSE;
|
||||
m_table_flags&= file->table_flags();
|
||||
} while (*(++file_array));
|
||||
m_table_flags&= ~(HA_CAN_GEOMETRY | HA_CAN_FULLTEXT | HA_DUPP_POS |
|
||||
m_table_flags&= ~(HA_CAN_GEOMETRY | HA_CAN_FULLTEXT | HA_DUPLICATE_POS |
|
||||
HA_CAN_SQL_HANDLER | HA_CAN_INSERT_DELAYED);
|
||||
m_table_flags|= HA_FILE_BASED | HA_REC_NOT_IN_SEQ;
|
||||
DBUG_RETURN(0);
|
||||
|
@ -1358,9 +1363,10 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
|
|||
uint j= 0;
|
||||
do
|
||||
{
|
||||
if (!(new_file_array[part_count++]= get_new_handler(table->s,
|
||||
thd->mem_root,
|
||||
part_elem->engine_type)))
|
||||
if (!(new_file_array[part_count++]=
|
||||
get_new_handler(table->s,
|
||||
thd->mem_root,
|
||||
part_elem->engine_type)))
|
||||
{
|
||||
mem_alloc_error(sizeof(handler));
|
||||
DBUG_RETURN(ER_OUTOFMEMORY);
|
||||
|
@ -1614,7 +1620,7 @@ uint ha_partition::del_ren_cre_table(const char *from,
|
|||
handler **file;
|
||||
DBUG_ENTER("del_ren_cre_table()");
|
||||
|
||||
if (get_from_handler_file(from))
|
||||
if (get_from_handler_file(from, current_thd->mem_root))
|
||||
DBUG_RETURN(TRUE);
|
||||
DBUG_ASSERT(m_file_buffer);
|
||||
name_buffer_ptr= m_name_buffer_ptr;
|
||||
|
@ -1923,7 +1929,6 @@ void ha_partition::clear_handler_file()
|
|||
my_free((char*) m_file_buffer, MYF(MY_ALLOW_ZERO_PTR));
|
||||
my_free((char*) m_engine_array, MYF(MY_ALLOW_ZERO_PTR));
|
||||
m_file_buffer= NULL;
|
||||
m_name_buffer_ptr= NULL;
|
||||
m_engine_array= NULL;
|
||||
}
|
||||
|
||||
|
@ -1932,29 +1937,29 @@ void ha_partition::clear_handler_file()
|
|||
|
||||
SYNOPSIS
|
||||
create_handlers()
|
||||
mem_root Allocate memory through this
|
||||
|
||||
RETURN VALUE
|
||||
TRUE Error
|
||||
FALSE Success
|
||||
*/
|
||||
|
||||
bool ha_partition::create_handlers()
|
||||
bool ha_partition::create_handlers(MEM_ROOT *mem_root)
|
||||
{
|
||||
uint i;
|
||||
uint alloc_len= (m_tot_parts + 1) * sizeof(handler*);
|
||||
DBUG_ENTER("create_handlers");
|
||||
|
||||
if (!(m_file= (handler **) sql_alloc(alloc_len)))
|
||||
if (!(m_file= (handler **) alloc_root(mem_root, alloc_len)))
|
||||
DBUG_RETURN(TRUE);
|
||||
bzero(m_file, alloc_len);
|
||||
bzero((char*) m_file, alloc_len);
|
||||
for (i= 0; i < m_tot_parts; i++)
|
||||
{
|
||||
if (!(m_file[i]= get_new_handler(table_share, current_thd->mem_root,
|
||||
if (!(m_file[i]= get_new_handler(table_share, mem_root,
|
||||
m_engine_array[i])))
|
||||
DBUG_RETURN(TRUE);
|
||||
DBUG_PRINT("info", ("engine_type: %u", m_engine_array[i]));
|
||||
}
|
||||
m_file[m_tot_parts]= 0;
|
||||
/* For the moment we only support partition over the same table engine */
|
||||
if (m_engine_array[0] == &myisam_hton)
|
||||
{
|
||||
|
@ -1975,13 +1980,14 @@ bool ha_partition::create_handlers()
|
|||
|
||||
SYNOPSIS
|
||||
new_handlers_from_part_info()
|
||||
mem_root Allocate memory through this
|
||||
|
||||
RETURN VALUE
|
||||
TRUE Error
|
||||
FALSE Success
|
||||
*/
|
||||
|
||||
bool ha_partition::new_handlers_from_part_info()
|
||||
bool ha_partition::new_handlers_from_part_info(MEM_ROOT *mem_root)
|
||||
{
|
||||
uint i, j, part_count;
|
||||
partition_element *part_elem;
|
||||
|
@ -1990,12 +1996,12 @@ bool ha_partition::new_handlers_from_part_info()
|
|||
THD *thd= current_thd;
|
||||
DBUG_ENTER("ha_partition::new_handlers_from_part_info");
|
||||
|
||||
if (!(m_file= (handler **) sql_alloc(alloc_len)))
|
||||
if (!(m_file= (handler **) alloc_root(mem_root, alloc_len)))
|
||||
{
|
||||
mem_alloc_error(alloc_len);
|
||||
goto error_end;
|
||||
}
|
||||
bzero(m_file, alloc_len);
|
||||
bzero((char*) m_file, alloc_len);
|
||||
DBUG_ASSERT(m_part_info->no_parts > 0);
|
||||
|
||||
i= 0;
|
||||
|
@ -2011,8 +2017,8 @@ bool ha_partition::new_handlers_from_part_info()
|
|||
{
|
||||
for (j= 0; j < m_part_info->no_subparts; j++)
|
||||
{
|
||||
if (!(m_file[i]= get_new_handler(table_share, thd->mem_root,
|
||||
part_elem->engine_type)))
|
||||
if (!(m_file[part_count++]= get_new_handler(table_share, mem_root,
|
||||
part_elem->engine_type)))
|
||||
goto error;
|
||||
DBUG_PRINT("info", ("engine_type: %u",
|
||||
(uint) ha_legacy_type(part_elem->engine_type)));
|
||||
|
@ -2020,7 +2026,7 @@ bool ha_partition::new_handlers_from_part_info()
|
|||
}
|
||||
else
|
||||
{
|
||||
if (!(m_file[part_count++]= get_new_handler(table_share, thd->mem_root,
|
||||
if (!(m_file[part_count++]= get_new_handler(table_share, mem_root,
|
||||
part_elem->engine_type)))
|
||||
goto error;
|
||||
DBUG_PRINT("info", ("engine_type: %u",
|
||||
|
@ -2046,6 +2052,7 @@ error_end:
|
|||
SYNOPSIS
|
||||
get_from_handler_file()
|
||||
name Full path of table name
|
||||
mem_root Allocate memory through this
|
||||
|
||||
RETURN VALUE
|
||||
TRUE Error
|
||||
|
@ -2056,7 +2063,7 @@ error_end:
|
|||
partitions.
|
||||
*/
|
||||
|
||||
bool ha_partition::get_from_handler_file(const char *name)
|
||||
bool ha_partition::get_from_handler_file(const char *name, MEM_ROOT *mem_root)
|
||||
{
|
||||
char buff[FN_REFLEN], *address_tot_name_len;
|
||||
File file;
|
||||
|
@ -2095,7 +2102,8 @@ bool ha_partition::get_from_handler_file(const char *name)
|
|||
goto err2;
|
||||
for (i= 0; i < m_tot_parts; i++)
|
||||
engine_array[i]= ha_resolve_by_legacy_type(current_thd,
|
||||
(enum legacy_db_type) *(uchar *) ((file_buffer) + 12 + i));
|
||||
(enum legacy_db_type)
|
||||
*(uchar *) ((file_buffer) + 12 + i));
|
||||
address_tot_name_len= file_buffer + 12 + 4 * tot_partition_words;
|
||||
tot_name_words= (uint4korr(address_tot_name_len) + 3) / 4;
|
||||
if (len_words != (tot_partition_words + tot_name_words + 4))
|
||||
|
@ -2105,7 +2113,7 @@ bool ha_partition::get_from_handler_file(const char *name)
|
|||
m_file_buffer= file_buffer; // Will be freed in clear_handler_file()
|
||||
m_name_buffer_ptr= name_buffer_ptr;
|
||||
m_engine_array= engine_array;
|
||||
if (!m_file && create_handlers())
|
||||
if (!m_file && create_handlers(mem_root))
|
||||
{
|
||||
clear_handler_file();
|
||||
DBUG_RETURN(TRUE);
|
||||
|
@ -2159,7 +2167,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
|
|||
m_mode= mode;
|
||||
m_open_test_lock= test_if_locked;
|
||||
m_part_field_array= m_part_info->full_part_field_array;
|
||||
if (get_from_handler_file(name))
|
||||
if (get_from_handler_file(name, &table->mem_root))
|
||||
DBUG_RETURN(1);
|
||||
m_start_key.length= 0;
|
||||
m_rec0= table->record[0];
|
||||
|
@ -2196,6 +2204,8 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
|
|||
DBUG_RETURN(1);
|
||||
bitmap_set_all(&(m_part_info->used_partitions));
|
||||
|
||||
/* Recalculate table flags as they may change after open */
|
||||
m_table_flags= m_file[0]->table_flags();
|
||||
file= m_file;
|
||||
do
|
||||
{
|
||||
|
@ -2207,7 +2217,11 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
|
|||
m_no_locks+= (*file)->lock_count();
|
||||
name_buffer_ptr+= strlen(name_buffer_ptr) + 1;
|
||||
set_if_bigger(ref_length, ((*file)->ref_length));
|
||||
m_table_flags&= (*file)->table_flags();
|
||||
} while (*(++file));
|
||||
m_table_flags&= ~(HA_CAN_GEOMETRY | HA_CAN_FULLTEXT | HA_DUPLICATE_POS |
|
||||
HA_CAN_SQL_HANDLER | HA_CAN_INSERT_DELAYED);
|
||||
m_table_flags|= HA_FILE_BASED | HA_REC_NOT_IN_SEQ;
|
||||
|
||||
/*
|
||||
Add 2 bytes for partition id in position ref length.
|
||||
|
@ -2570,6 +2584,7 @@ int ha_partition::write_row(byte * buf)
|
|||
if (table->next_number_field && buf == table->record[0])
|
||||
update_auto_increment();
|
||||
|
||||
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
|
||||
#ifdef NOT_NEEDED
|
||||
if (likely(buf == rec0))
|
||||
#endif
|
||||
|
@ -2584,6 +2599,7 @@ int ha_partition::write_row(byte * buf)
|
|||
set_field_ptr(m_part_field_array, rec0, buf);
|
||||
}
|
||||
#endif
|
||||
dbug_tmp_restore_column_map(table->read_set, old_map);
|
||||
if (unlikely(error))
|
||||
DBUG_RETURN(error);
|
||||
m_last_part= part_id;
|
||||
|
@ -4040,7 +4056,7 @@ void ha_partition::include_partition_fields_in_used_fields()
|
|||
|
||||
do
|
||||
{
|
||||
ha_set_bit_in_read_set((*ptr)->fieldnr);
|
||||
bitmap_set_bit(table->read_set, (*ptr)->field_index);
|
||||
} while (*(++ptr));
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
@ -4130,7 +4146,7 @@ void ha_partition::info(uint flag)
|
|||
ulonglong nb_reserved_values;
|
||||
DBUG_PRINT("info", ("HA_STATUS_AUTO"));
|
||||
/* we don't want to reserve any values, it's pure information */
|
||||
get_auto_increment(0, 0, 0, &auto_increment_value, &nb_reserved_values);
|
||||
get_auto_increment(0, 0, 0, &stats.auto_increment_value, &nb_reserved_values);
|
||||
release_auto_increment();
|
||||
}
|
||||
if (flag & HA_STATUS_VARIABLE)
|
||||
|
@ -4155,12 +4171,12 @@ void ha_partition::info(uint flag)
|
|||
check_time: Time of last check (only applicable to MyISAM)
|
||||
We report last time of all underlying handlers
|
||||
*/
|
||||
records= 0;
|
||||
deleted= 0;
|
||||
data_file_length= 0;
|
||||
index_file_length= 0;
|
||||
delete_length= 0;
|
||||
check_time= 0;
|
||||
stats.records= 0;
|
||||
stats.deleted= 0;
|
||||
stats.data_file_length= 0;
|
||||
stats.index_file_length= 0;
|
||||
stats.check_time= 0;
|
||||
stats.delete_length= 0;
|
||||
file_array= m_file;
|
||||
do
|
||||
{
|
||||
|
@ -4168,22 +4184,22 @@ void ha_partition::info(uint flag)
|
|||
{
|
||||
file= *file_array;
|
||||
file->info(HA_STATUS_VARIABLE);
|
||||
records+= file->records;
|
||||
deleted+= file->deleted;
|
||||
data_file_length+= file->data_file_length;
|
||||
index_file_length+= file->index_file_length;
|
||||
delete_length+= file->delete_length;
|
||||
if (file->check_time > check_time)
|
||||
check_time= file->check_time;
|
||||
stats.records+= file->stats.records;
|
||||
stats.deleted+= file->stats.deleted;
|
||||
stats.data_file_length+= file->stats.data_file_length;
|
||||
stats.index_file_length+= file->stats.index_file_length;
|
||||
stats.delete_length+= file->delete_length;
|
||||
if (file->stats.check_time > stats.check_time)
|
||||
stats.check_time= file->stats.check_time;
|
||||
}
|
||||
} while (*(++file_array));
|
||||
if (records < 2 &&
|
||||
m_table_flags & HA_NOT_EXACT_COUNT)
|
||||
records= 2;
|
||||
if (records > 0)
|
||||
mean_rec_length= (ulong) (data_file_length / records);
|
||||
if (stats.records < 2 &&
|
||||
!(m_table_flags & HA_STATS_RECORDS_IS_EXACT))
|
||||
stats.records= 2;
|
||||
if (stats.records > 0)
|
||||
stats.mean_rec_length= (ulong) (stats.data_file_length / stats.records);
|
||||
else
|
||||
mean_rec_length= 1; //? What should we set here
|
||||
stats.mean_rec_length= 1; //? What should we set here
|
||||
}
|
||||
if (flag & HA_STATUS_CONST)
|
||||
{
|
||||
|
@ -4228,7 +4244,6 @@ void ha_partition::info(uint flag)
|
|||
We ignore it since it is never used
|
||||
block_size: Block size used
|
||||
We set it to the value of the first handler
|
||||
sortkey: Never used at any place so ignored
|
||||
ref_length: We set this to the value calculated
|
||||
and stored in local object
|
||||
create_time: Creation time of table
|
||||
|
@ -4240,7 +4255,7 @@ void ha_partition::info(uint flag)
|
|||
|
||||
file= m_file[0];
|
||||
file->info(HA_STATUS_CONST);
|
||||
create_time= file->create_time;
|
||||
stats.create_time= file->stats.create_time;
|
||||
ref_length= m_ref_length;
|
||||
}
|
||||
if (flag & HA_STATUS_ERRKEY)
|
||||
|
@ -4264,14 +4279,14 @@ void ha_partition::info(uint flag)
|
|||
Used by SHOW commands
|
||||
We will report the maximum of these times
|
||||
*/
|
||||
update_time= 0;
|
||||
stats.update_time= 0;
|
||||
file_array= m_file;
|
||||
do
|
||||
{
|
||||
file= *file_array;
|
||||
file->info(HA_STATUS_TIME);
|
||||
if (file->update_time > update_time)
|
||||
update_time= file->update_time;
|
||||
if (file->stats.update_time > stats.update_time)
|
||||
stats.update_time= file->stats.update_time;
|
||||
} while (*(++file_array));
|
||||
}
|
||||
DBUG_VOID_RETURN;
|
||||
|
@ -4285,17 +4300,17 @@ void ha_partition::get_dynamic_partition_info(PARTITION_INFO *stat_info,
|
|||
file->info(HA_STATUS_CONST | HA_STATUS_TIME | HA_STATUS_VARIABLE |
|
||||
HA_STATUS_NO_LOCK);
|
||||
|
||||
stat_info->records= file->records;
|
||||
stat_info->mean_rec_length= file->mean_rec_length;
|
||||
stat_info->data_file_length= file->data_file_length;
|
||||
stat_info->max_data_file_length= file->max_data_file_length;
|
||||
stat_info->index_file_length= file->index_file_length;
|
||||
stat_info->delete_length= file->delete_length;
|
||||
stat_info->create_time= file->create_time;
|
||||
stat_info->update_time= file->update_time;
|
||||
stat_info->check_time= file->check_time;
|
||||
stat_info->records= file->stats.records;
|
||||
stat_info->mean_rec_length= file->stats.mean_rec_length;
|
||||
stat_info->data_file_length= file->stats.data_file_length;
|
||||
stat_info->max_data_file_length= file->stats.max_data_file_length;
|
||||
stat_info->index_file_length= file->stats.index_file_length;
|
||||
stat_info->delete_length= file->stats.delete_length;
|
||||
stat_info->create_time= file->stats.create_time;
|
||||
stat_info->update_time= file->stats.update_time;
|
||||
stat_info->check_time= file->stats.check_time;
|
||||
stat_info->check_sum= 0;
|
||||
if (file->table_flags() & (ulong) HA_HAS_CHECKSUM)
|
||||
if (file->ha_table_flags() & HA_HAS_CHECKSUM)
|
||||
stat_info->check_sum= file->checksum();
|
||||
return;
|
||||
}
|
||||
|
@ -4366,22 +4381,6 @@ void ha_partition::get_dynamic_partition_info(PARTITION_INFO *stat_info,
|
|||
|
||||
2) Parameters used by some non-MyISAM handlers
|
||||
----------------------------------------------
|
||||
HA_EXTRA_RETRIEVE_ALL_COLS:
|
||||
Many handlers have implemented optimisations to avoid fetching all
|
||||
fields when retrieving data. In certain situations all fields need
|
||||
to be retrieved even though the query_id is not set on all field
|
||||
objects.
|
||||
|
||||
It is called from copy_data_between_tables where all fields are
|
||||
copied without setting query_id before calling the handlers.
|
||||
It is called from UPDATE statements when the fields of the index
|
||||
used is updated or ORDER BY is used with UPDATE.
|
||||
And finally when calculating checksum of a table using the CHECKSUM
|
||||
command.
|
||||
HA_EXTRA_RETRIEVE_PRIMARY_KEY:
|
||||
In some situations it is mandatory to retrieve primary key fields
|
||||
independent of the query id's. This extra flag specifies that fetch
|
||||
of primary key fields is mandatory.
|
||||
HA_EXTRA_KEYREAD_PRESERVE_FIELDS:
|
||||
This is a strictly InnoDB feature that is more or less undocumented.
|
||||
When it is activated InnoDB copies field by field from its fetch
|
||||
|
@ -4530,7 +4529,7 @@ void ha_partition::get_dynamic_partition_info(PARTITION_INFO *stat_info,
|
|||
4) Parameters only used by temporary tables for query processing
|
||||
----------------------------------------------------------------
|
||||
HA_EXTRA_RESET_STATE:
|
||||
Same as HA_EXTRA_RESET except that buffers are not released. If there is
|
||||
Same as reset() except that buffers are not released. If there is
|
||||
a READ CACHE it is reinit'ed. A cache is reinit'ed to restart reading
|
||||
or to change type of cache between READ CACHE and WRITE CACHE.
|
||||
|
||||
|
@ -4569,8 +4568,9 @@ void ha_partition::get_dynamic_partition_info(PARTITION_INFO *stat_info,
|
|||
HA_EXTRA_FLUSH_CACHE:
|
||||
Flush WRITE CACHE in MyISAM. It is only from one place in the code.
|
||||
This is in sql_insert.cc where it is called if the table_flags doesn't
|
||||
contain HA_DUPP_POS. The only handler having the HA_DUPP_POS set is the
|
||||
MyISAM handler and so the only handler not receiving this call is MyISAM.
|
||||
contain HA_DUPLICATE_POS. The only handler having the HA_DUPLICATE_POS
|
||||
set is the MyISAM handler and so the only handler not receiving this
|
||||
call is MyISAM.
|
||||
Thus in effect this call is called but never used. Could be removed
|
||||
from sql_insert.cc
|
||||
HA_EXTRA_NO_USER_CHANGE:
|
||||
|
@ -4614,8 +4614,6 @@ int ha_partition::extra(enum ha_extra_function operation)
|
|||
/* Category 2), used by non-MyISAM handlers */
|
||||
case HA_EXTRA_IGNORE_DUP_KEY:
|
||||
case HA_EXTRA_NO_IGNORE_DUP_KEY:
|
||||
case HA_EXTRA_RETRIEVE_ALL_COLS:
|
||||
case HA_EXTRA_RETRIEVE_PRIMARY_KEY:
|
||||
case HA_EXTRA_KEYREAD_PRESERVE_FIELDS:
|
||||
{
|
||||
if (!m_myisam)
|
||||
|
@ -4681,8 +4679,7 @@ int ha_partition::extra(enum ha_extra_function operation)
|
|||
0 Success
|
||||
|
||||
DESCRIPTION
|
||||
This will in the future be called instead of extra(HA_EXTRA_RESET) as this
|
||||
is such a common call
|
||||
Called at end of each statement to reste buffers
|
||||
*/
|
||||
|
||||
int ha_partition::reset(void)
|
||||
|
@ -5090,14 +5087,16 @@ void ha_partition::print_error(int error, myf errflag)
|
|||
|
||||
/* Should probably look for my own errors first */
|
||||
/* monty: needs to be called for the last used partition ! */
|
||||
DBUG_PRINT("enter", ("error = %d", error));
|
||||
DBUG_PRINT("enter", ("error: %d", error));
|
||||
|
||||
if (error == HA_ERR_NO_PARTITION_FOUND)
|
||||
{
|
||||
char buf[100];
|
||||
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
|
||||
my_error(ER_NO_PARTITION_FOR_GIVEN_VALUE, MYF(0),
|
||||
m_part_info->part_expr->null_value ? "NULL" :
|
||||
llstr(m_part_info->part_expr->val_int(), buf));
|
||||
dbug_tmp_restore_column_map(table->read_set, old_map);
|
||||
}
|
||||
else
|
||||
m_file[0]->print_error(error, errflag);
|
||||
|
@ -5284,14 +5283,13 @@ void ha_partition::get_auto_increment(ulonglong offset, ulonglong increment,
|
|||
{
|
||||
ulonglong first_value_part, last_value_part, nb_reserved_values_part,
|
||||
last_value;
|
||||
handler **pos, **end;
|
||||
DBUG_ENTER("ha_partition::get_auto_increment");
|
||||
|
||||
*first_value= 0;
|
||||
last_value= ULONGLONG_MAX;
|
||||
for (uint i= 0; i < m_tot_parts; i++)
|
||||
for (pos=m_file, end= m_file+ m_tot_parts; pos != end ; pos++)
|
||||
{
|
||||
m_file[i]->get_auto_increment(offset, increment, nb_desired_values,
|
||||
&first_value_part, &nb_reserved_values_part);
|
||||
(*pos)->get_auto_increment(offset, increment, nb_desired_values,
|
||||
&first_value_part, &nb_reserved_values_part);
|
||||
if (first_value_part == ~(ulonglong)(0)) // error in one partition
|
||||
{
|
||||
*first_value= first_value_part;
|
||||
|
|
|
@ -95,7 +95,6 @@ private:
|
|||
uint m_rec_length; // Local copy of record length
|
||||
|
||||
bool m_ordered; // Ordered/Unordered index scan
|
||||
bool m_has_transactions; // Can we support transactions
|
||||
bool m_pkey_is_clustered; // Is primary key clustered
|
||||
bool m_create_handler; // Handler used to create table
|
||||
bool m_is_sub_partitioned; // Is subpartitioned
|
||||
|
@ -157,7 +156,7 @@ public:
|
|||
enable later calls of the methods to retrieve constants from the under-
|
||||
lying handlers. Returns false if not successful.
|
||||
*/
|
||||
int ha_initialise();
|
||||
bool initialise_partition(MEM_ROOT *mem_root);
|
||||
|
||||
/*
|
||||
-------------------------------------------------------------------------
|
||||
|
@ -208,25 +207,24 @@ private:
|
|||
delete_table, rename_table and create uses very similar logic which
|
||||
is packed into this routine.
|
||||
*/
|
||||
uint del_ren_cre_table(const char *from,
|
||||
const char *to= NULL,
|
||||
TABLE *table_arg= NULL,
|
||||
HA_CREATE_INFO *create_info= NULL);
|
||||
uint del_ren_cre_table(const char *from, const char *to,
|
||||
TABLE *table_arg, HA_CREATE_INFO *create_info);
|
||||
/*
|
||||
One method to create the table_name.par file containing the names of the
|
||||
underlying partitions, their engine and the number of partitions.
|
||||
And one method to read it in.
|
||||
*/
|
||||
bool create_handler_file(const char *name);
|
||||
bool get_from_handler_file(const char *name);
|
||||
bool new_handlers_from_part_info();
|
||||
bool create_handlers();
|
||||
bool get_from_handler_file(const char *name, MEM_ROOT *mem_root);
|
||||
bool new_handlers_from_part_info(MEM_ROOT *mem_root);
|
||||
bool create_handlers(MEM_ROOT *mem_root);
|
||||
void clear_handler_file();
|
||||
void set_up_table_before_create(TABLE *table_arg,
|
||||
const char *partition_name_with_path,
|
||||
HA_CREATE_INFO *info,
|
||||
uint part_id);
|
||||
partition_element *find_partition_element(uint part_id);
|
||||
|
||||
public:
|
||||
|
||||
/*
|
||||
|
@ -588,7 +586,7 @@ public:
|
|||
NULLable field.
|
||||
(BDB, HEAP, MyISAM, NDB, InnoDB)
|
||||
|
||||
HA_DUPP_POS:
|
||||
HA_DUPLICATE_POS:
|
||||
Tells that we can the position for the conflicting duplicate key
|
||||
record is stored in table->file->dupp_ref. (insert uses rnd_pos() on
|
||||
this to find the duplicated row)
|
||||
|
@ -609,11 +607,10 @@ public:
|
|||
with hidden primary key)
|
||||
(No handler has this limitation currently)
|
||||
|
||||
HA_NOT_EXACT_COUNT:
|
||||
HA_STATS_RECORDS_IS_EXACT:
|
||||
Does the counter of records after the info call specify an exact
|
||||
value or not. If it doesn't this flag is set.
|
||||
value or not. If it does this flag is set.
|
||||
Only MyISAM and HEAP uses exact count.
|
||||
(MyISAM, HEAP, BDB, InnoDB, NDB, Federated)
|
||||
|
||||
HA_CAN_INSERT_DELAYED:
|
||||
Can the storage engine support delayed inserts.
|
||||
|
@ -676,7 +673,7 @@ public:
|
|||
index scan module.
|
||||
(NDB)
|
||||
*/
|
||||
virtual ulong table_flags() const
|
||||
virtual ulonglong table_flags() const
|
||||
{ return m_table_flags; }
|
||||
|
||||
/*
|
||||
|
@ -770,13 +767,6 @@ public:
|
|||
virtual uint extra_rec_buf_length() const;
|
||||
virtual uint min_record_length(uint options) const;
|
||||
|
||||
/*
|
||||
Transactions on the table is supported if all handlers below support
|
||||
transactions.
|
||||
*/
|
||||
virtual bool has_transactions()
|
||||
{ return m_has_transactions; }
|
||||
|
||||
/*
|
||||
Primary key is clustered can only be true if all underlying handlers have
|
||||
this feature.
|
||||
|
|
319
sql/handler.cc
319
sql/handler.cc
|
@ -44,19 +44,20 @@
|
|||
#include "ha_innodb.h"
|
||||
#endif
|
||||
|
||||
/* While we have legacy_db_type, we have this array to
|
||||
check for dups and to find handlerton from legacy_db_type.
|
||||
Remove when legacy_db_type is finally gone */
|
||||
static handlerton *installed_htons[128];
|
||||
/*
|
||||
While we have legacy_db_type, we have this array to
|
||||
check for dups and to find handlerton from legacy_db_type.
|
||||
Remove when legacy_db_type is finally gone
|
||||
*/
|
||||
st_plugin_int *hton2plugin[MAX_HA];
|
||||
|
||||
#define BITMAP_STACKBUF_SIZE (128/8)
|
||||
static handlerton *installed_htons[128];
|
||||
|
||||
KEY_CREATE_INFO default_key_create_info= { HA_KEY_ALG_UNDEF, 0, {NullS,0} };
|
||||
|
||||
/* static functions defined in this file */
|
||||
|
||||
static handler *create_default(TABLE_SHARE *table);
|
||||
static handler *create_default(TABLE_SHARE *table, MEM_ROOT *mem_root);
|
||||
|
||||
static SHOW_COMP_OPTION have_yes= SHOW_OPTION_YES;
|
||||
|
||||
|
@ -126,8 +127,7 @@ handlerton *ha_resolve_by_name(THD *thd, LEX_STRING *name)
|
|||
|
||||
const char *ha_get_storage_engine(enum legacy_db_type db_type)
|
||||
{
|
||||
switch (db_type)
|
||||
{
|
||||
switch (db_type) {
|
||||
case DB_TYPE_DEFAULT:
|
||||
return "DEFAULT";
|
||||
case DB_TYPE_UNKNOWN:
|
||||
|
@ -141,17 +141,15 @@ const char *ha_get_storage_engine(enum legacy_db_type db_type)
|
|||
}
|
||||
|
||||
|
||||
static handler *create_default(TABLE_SHARE *table)
|
||||
static handler *create_default(TABLE_SHARE *table, MEM_ROOT *mem_root)
|
||||
{
|
||||
handlerton *hton=ha_resolve_by_legacy_type(current_thd, DB_TYPE_DEFAULT);
|
||||
return (hton && hton->create) ? hton->create(table) : NULL;
|
||||
return (hton && hton->create) ? hton->create(table, mem_root) : NULL;
|
||||
}
|
||||
|
||||
|
||||
handlerton *ha_resolve_by_legacy_type(THD *thd, enum legacy_db_type db_type)
|
||||
{
|
||||
switch (db_type)
|
||||
{
|
||||
switch (db_type) {
|
||||
case DB_TYPE_DEFAULT:
|
||||
return (thd->variables.table_type != NULL) ?
|
||||
thd->variables.table_type :
|
||||
|
@ -204,36 +202,23 @@ handlerton *ha_checktype(THD *thd, enum legacy_db_type database_type,
|
|||
handler *get_new_handler(TABLE_SHARE *share, MEM_ROOT *alloc,
|
||||
handlerton *db_type)
|
||||
{
|
||||
handler *file= NULL;
|
||||
handler *file;
|
||||
DBUG_ENTER("get_new_handler");
|
||||
DBUG_PRINT("enter", ("alloc: 0x%lx", (long) alloc));
|
||||
|
||||
if (db_type && db_type->state == SHOW_OPTION_YES && db_type->create)
|
||||
{
|
||||
if ((file= db_type->create(share, alloc)))
|
||||
file->init();
|
||||
DBUG_RETURN(file);
|
||||
}
|
||||
/*
|
||||
handlers are allocated with new in the handlerton create() function
|
||||
we need to set the thd mem_root for these to be allocated correctly
|
||||
Try the default table type
|
||||
Here the call to current_thd() is ok as we call this function a lot of
|
||||
times but we enter this branch very seldom.
|
||||
*/
|
||||
THD *thd= current_thd;
|
||||
MEM_ROOT *thd_save_mem_root= thd->mem_root;
|
||||
thd->mem_root= alloc;
|
||||
|
||||
if (db_type != NULL && db_type->state == SHOW_OPTION_YES && db_type->create)
|
||||
file= db_type->create(share);
|
||||
|
||||
thd->mem_root= thd_save_mem_root;
|
||||
|
||||
if (!file)
|
||||
{
|
||||
handlerton *def= current_thd->variables.table_type;
|
||||
/* Try first with 'default table type' */
|
||||
if (db_type != def)
|
||||
return get_new_handler(share, alloc, def);
|
||||
}
|
||||
if (file)
|
||||
{
|
||||
if (file->ha_initialise())
|
||||
{
|
||||
delete file;
|
||||
file=0;
|
||||
}
|
||||
}
|
||||
return file;
|
||||
DBUG_RETURN(get_new_handler(share, alloc,
|
||||
current_thd->variables.table_type));
|
||||
}
|
||||
|
||||
|
||||
|
@ -244,11 +229,13 @@ handler *get_ha_partition(partition_info *part_info)
|
|||
DBUG_ENTER("get_ha_partition");
|
||||
if ((partition= new ha_partition(part_info)))
|
||||
{
|
||||
if (partition->ha_initialise())
|
||||
if (partition->initialise_partition(current_thd->mem_root))
|
||||
{
|
||||
delete partition;
|
||||
partition= 0;
|
||||
}
|
||||
else
|
||||
partition->init();
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -1335,7 +1322,7 @@ int ha_delete_table(THD *thd, handlerton *table_type, const char *path,
|
|||
! (file=get_new_handler(&dummy_share, thd->mem_root, table_type)))
|
||||
DBUG_RETURN(ENOENT);
|
||||
|
||||
if (lower_case_table_names == 2 && !(file->table_flags() & HA_FILE_BASED))
|
||||
if (lower_case_table_names == 2 && !(file->ha_table_flags() & HA_FILE_BASED))
|
||||
{
|
||||
/* Ensure that table handler get path in lower case */
|
||||
strmov(tmp_path, path);
|
||||
|
@ -1418,6 +1405,7 @@ int handler::ha_open(TABLE *table_arg, const char *name, int mode,
|
|||
|
||||
table= table_arg;
|
||||
DBUG_ASSERT(table->s == table_share);
|
||||
DBUG_ASSERT(alloc_root_inited(&table->mem_root));
|
||||
|
||||
if ((error=open(name,mode,test_if_locked)))
|
||||
{
|
||||
|
@ -1439,106 +1427,23 @@ int handler::ha_open(TABLE *table_arg, const char *name, int mode,
|
|||
table->db_stat|=HA_READ_ONLY;
|
||||
(void) extra(HA_EXTRA_NO_READCHECK); // Not needed in SQL
|
||||
|
||||
DBUG_ASSERT(alloc_root_inited(&table->mem_root));
|
||||
|
||||
if (!(ref= (byte*) alloc_root(&table->mem_root, ALIGN_SIZE(ref_length)*2)))
|
||||
{
|
||||
close();
|
||||
error=HA_ERR_OUT_OF_MEM;
|
||||
}
|
||||
else
|
||||
dupp_ref=ref+ALIGN_SIZE(ref_length);
|
||||
|
||||
if (ha_allocate_read_write_set(table->s->fields))
|
||||
error= 1;
|
||||
dup_ref=ref+ALIGN_SIZE(ref_length);
|
||||
cached_table_flags= table_flags();
|
||||
}
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
|
||||
|
||||
int handler::ha_initialise()
|
||||
{
|
||||
DBUG_ENTER("ha_initialise");
|
||||
DBUG_RETURN(FALSE);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Initalize bit maps for used fields
|
||||
|
||||
Called from open_table_from_share()
|
||||
*/
|
||||
|
||||
int handler::ha_allocate_read_write_set(ulong no_fields)
|
||||
{
|
||||
uint bitmap_size= bitmap_buffer_size(no_fields+1);
|
||||
uint32 *read_buf, *write_buf;
|
||||
DBUG_ENTER("ha_allocate_read_write_set");
|
||||
DBUG_PRINT("enter", ("no_fields = %d", no_fields));
|
||||
|
||||
if (!multi_alloc_root(&table->mem_root,
|
||||
&read_set, sizeof(MY_BITMAP),
|
||||
&write_set, sizeof(MY_BITMAP),
|
||||
&read_buf, bitmap_size,
|
||||
&write_buf, bitmap_size,
|
||||
NullS))
|
||||
{
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
bitmap_init(read_set, read_buf, no_fields+1, FALSE);
|
||||
bitmap_init(write_set, write_buf, no_fields+1, FALSE);
|
||||
table->read_set= read_set;
|
||||
table->write_set= write_set;
|
||||
ha_clear_all_set();
|
||||
DBUG_RETURN(FALSE);
|
||||
}
|
||||
|
||||
void handler::ha_clear_all_set()
|
||||
{
|
||||
DBUG_ENTER("ha_clear_all_set");
|
||||
bitmap_clear_all(read_set);
|
||||
bitmap_clear_all(write_set);
|
||||
bitmap_set_bit(read_set, 0);
|
||||
bitmap_set_bit(write_set, 0);
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
int handler::ha_retrieve_all_cols()
|
||||
{
|
||||
DBUG_ENTER("handler::ha_retrieve_all_cols");
|
||||
bitmap_set_all(read_set);
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
int handler::ha_retrieve_all_pk()
|
||||
{
|
||||
DBUG_ENTER("ha_retrieve_all_pk");
|
||||
ha_set_primary_key_in_read_set();
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
void handler::ha_set_primary_key_in_read_set()
|
||||
{
|
||||
ulong prim_key= table->s->primary_key;
|
||||
DBUG_ENTER("handler::ha_set_primary_key_in_read_set");
|
||||
DBUG_PRINT("info", ("Primary key = %d", prim_key));
|
||||
if (prim_key != MAX_KEY)
|
||||
{
|
||||
KEY_PART_INFO *key_part= table->key_info[prim_key].key_part;
|
||||
KEY_PART_INFO *key_part_end= key_part +
|
||||
table->key_info[prim_key].key_parts;
|
||||
for (;key_part != key_part_end; ++key_part)
|
||||
ha_set_bit_in_read_set(key_part->fieldnr);
|
||||
}
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/*
|
||||
Read first row (only) from a table
|
||||
This is never called for InnoDB or BDB tables, as these table types
|
||||
has the HA_NOT_EXACT_COUNT set.
|
||||
has the HA_STATS_RECORDS_IS_EXACT set.
|
||||
*/
|
||||
|
||||
int handler::read_first_row(byte * buf, uint primary_key)
|
||||
|
@ -1554,7 +1459,7 @@ int handler::read_first_row(byte * buf, uint primary_key)
|
|||
scanning the table.
|
||||
TODO remove the test for HA_READ_ORDER
|
||||
*/
|
||||
if (deleted < 10 || primary_key >= MAX_KEY ||
|
||||
if (stats.deleted < 10 || primary_key >= MAX_KEY ||
|
||||
!(index_flags(primary_key, 0, 0) & HA_READ_ORDER))
|
||||
{
|
||||
(void) ha_rnd_init(1);
|
||||
|
@ -1806,6 +1711,29 @@ void handler::restore_auto_increment()
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
MySQL signal that it changed the column bitmap
|
||||
|
||||
USAGE
|
||||
This is for handlers that needs to setup their own column bitmaps.
|
||||
Normally the handler should set up their own column bitmaps in
|
||||
index_init() or rnd_init() and in any column_bitmaps_signal() call after
|
||||
this.
|
||||
|
||||
The handler is allowd to do changes to the bitmap after a index_init or
|
||||
rnd_init() call is made as after this, MySQL will not use the bitmap
|
||||
for any program logic checking.
|
||||
*/
|
||||
|
||||
void handler::column_bitmaps_signal()
|
||||
{
|
||||
DBUG_ENTER("column_bitmaps_signal");
|
||||
DBUG_PRINT("info", ("read_set: 0x%lx write_set: 0x%lx", table->read_set,
|
||||
table->write_set));
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Reserves an interval of auto_increment values from the handler.
|
||||
|
||||
|
@ -1822,7 +1750,6 @@ void handler::restore_auto_increment()
|
|||
If the function sets *first_value to ~(ulonglong)0 it means an error.
|
||||
If the function sets *nb_reserved_values to ULONGLONG_MAX it means it has
|
||||
reserved to "positive infinite".
|
||||
|
||||
*/
|
||||
|
||||
void handler::get_auto_increment(ulonglong offset, ulonglong increment,
|
||||
|
@ -1834,6 +1761,9 @@ void handler::get_auto_increment(ulonglong offset, ulonglong increment,
|
|||
int error;
|
||||
|
||||
(void) extra(HA_EXTRA_KEYREAD);
|
||||
table->mark_columns_used_by_index_no_reset(table->s->next_number_index,
|
||||
table->read_set);
|
||||
column_bitmaps_signal();
|
||||
index_init(table->s->next_number_index, 1);
|
||||
if (!table->s->next_number_key_offset)
|
||||
{ // Autoincrement at key-start
|
||||
|
@ -1873,7 +1803,7 @@ void handler::get_auto_increment(ulonglong offset, ulonglong increment,
|
|||
}
|
||||
|
||||
|
||||
void handler::print_keydupp_error(uint key_nr, const char *msg)
|
||||
void handler::print_keydup_error(uint key_nr, const char *msg)
|
||||
{
|
||||
/* Write the duplicated key in the error message */
|
||||
char key[MAX_KEY_LENGTH];
|
||||
|
@ -1930,7 +1860,7 @@ void handler::print_error(int error, myf errflag)
|
|||
uint key_nr=get_dup_key(error);
|
||||
if ((int) key_nr >= 0)
|
||||
{
|
||||
print_keydupp_error(key_nr, ER(ER_DUP_ENTRY));
|
||||
print_keydup_error(key_nr, ER(ER_DUP_ENTRY));
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
textno=ER_DUP_KEY;
|
||||
|
@ -1941,12 +1871,14 @@ void handler::print_error(int error, myf errflag)
|
|||
uint key_nr= get_dup_key(error);
|
||||
if ((int) key_nr >= 0)
|
||||
{
|
||||
uint max_length;
|
||||
/* Write the key in the error message */
|
||||
char key[MAX_KEY_LENGTH];
|
||||
String str(key,sizeof(key),system_charset_info);
|
||||
/* Table is opened and defined at this point */
|
||||
key_unpack(&str,table,(uint) key_nr);
|
||||
uint max_length= MYSQL_ERRMSG_SIZE-(uint) strlen(ER(ER_FOREIGN_DUPLICATE_KEY));
|
||||
max_length= (MYSQL_ERRMSG_SIZE-
|
||||
(uint) strlen(ER(ER_FOREIGN_DUPLICATE_KEY)));
|
||||
if (str.length() >= max_length)
|
||||
{
|
||||
str.length(max_length-4);
|
||||
|
@ -2355,22 +2287,23 @@ int handler::index_next_same(byte *buf, const byte *key, uint keylen)
|
|||
}
|
||||
|
||||
|
||||
void handler::get_dynamic_partition_info(PARTITION_INFO *stat_info, uint part_id)
|
||||
void handler::get_dynamic_partition_info(PARTITION_INFO *stat_info,
|
||||
uint part_id)
|
||||
{
|
||||
info(HA_STATUS_CONST | HA_STATUS_TIME | HA_STATUS_VARIABLE |
|
||||
HA_STATUS_NO_LOCK);
|
||||
stat_info->records= records;
|
||||
stat_info->mean_rec_length= mean_rec_length;
|
||||
stat_info->data_file_length= data_file_length;
|
||||
stat_info->max_data_file_length= max_data_file_length;
|
||||
stat_info->index_file_length= index_file_length;
|
||||
stat_info->delete_length= delete_length;
|
||||
stat_info->create_time= create_time;
|
||||
stat_info->update_time= update_time;
|
||||
stat_info->check_time= check_time;
|
||||
stat_info->check_sum= 0;
|
||||
stat_info->records= stats.records;
|
||||
stat_info->mean_rec_length= stats.mean_rec_length;
|
||||
stat_info->data_file_length= stats.data_file_length;
|
||||
stat_info->max_data_file_length= stats.max_data_file_length;
|
||||
stat_info->index_file_length= stats.index_file_length;
|
||||
stat_info->delete_length= stats.delete_length;
|
||||
stat_info->create_time= stats.create_time;
|
||||
stat_info->update_time= stats.update_time;
|
||||
stat_info->check_time= stats.check_time;
|
||||
stat_info->check_sum= 0;
|
||||
if (table_flags() & (ulong) HA_HAS_CHECKSUM)
|
||||
stat_info->check_sum= checksum();
|
||||
stat_info->check_sum= checksum();
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -2414,7 +2347,7 @@ int ha_create_table(THD *thd, const char *path,
|
|||
|
||||
name= share.path.str;
|
||||
if (lower_case_table_names == 2 &&
|
||||
!(table.file->table_flags() & HA_FILE_BASED))
|
||||
!(table.file->ha_table_flags() & HA_FILE_BASED))
|
||||
{
|
||||
/* Ensure that handler gets name in lower case */
|
||||
strmov(name_buff, name);
|
||||
|
@ -2493,7 +2426,7 @@ int ha_create_table_from_engine(THD* thd, const char *db, const char *name)
|
|||
create_info.table_options|= HA_OPTION_CREATE_FROM_ENGINE;
|
||||
|
||||
if (lower_case_table_names == 2 &&
|
||||
!(table.file->table_flags() & HA_FILE_BASED))
|
||||
!(table.file->ha_table_flags() & HA_FILE_BASED))
|
||||
{
|
||||
/* Ensure that handler gets name in lower case */
|
||||
my_casedn_str(files_charset_info, path);
|
||||
|
@ -2843,6 +2776,9 @@ int handler::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
|
|||
multi_range_sorted= sorted;
|
||||
multi_range_buffer= buffer;
|
||||
|
||||
table->mark_columns_used_by_index_no_reset(active_index, table->read_set);
|
||||
table->column_bitmaps_set(table->read_set, table->write_set);
|
||||
|
||||
for (multi_range_curr= ranges, multi_range_end= ranges + range_count;
|
||||
multi_range_curr < multi_range_end;
|
||||
multi_range_curr++)
|
||||
|
@ -3085,7 +3021,7 @@ static my_bool exts_handlerton(THD *unused, st_plugin_int *plugin,
|
|||
handlerton *hton= (handlerton *)plugin->data;
|
||||
handler *file;
|
||||
if (hton->state == SHOW_OPTION_YES && hton->create &&
|
||||
(file= hton->create((TABLE_SHARE*) 0)))
|
||||
(file= hton->create((TABLE_SHARE*) 0, current_thd->mem_root)))
|
||||
{
|
||||
List_iterator_fast<char> it(*found_exts);
|
||||
const char **ext, *old_ext;
|
||||
|
@ -3225,7 +3161,7 @@ namespace {
|
|||
char const *name;
|
||||
};
|
||||
|
||||
int table_name_compare(void const *a, void const *b)
|
||||
static int table_name_compare(void const *a, void const *b)
|
||||
{
|
||||
st_table_data const *x = (st_table_data const*) a;
|
||||
st_table_data const *y = (st_table_data const*) b;
|
||||
|
@ -3235,7 +3171,7 @@ namespace {
|
|||
return res != 0 ? res : strcmp(x->name, y->name);
|
||||
}
|
||||
|
||||
bool check_table_binlog_row_based(THD *thd, TABLE *table)
|
||||
static bool check_table_binlog_row_based(THD *thd, TABLE *table)
|
||||
{
|
||||
static st_table_data const ignore[] = {
|
||||
{ "mysql", "event" },
|
||||
|
@ -3256,44 +3192,29 @@ namespace {
|
|||
DBUG_ASSERT(table->s->cached_row_logging_check == 0 ||
|
||||
table->s->cached_row_logging_check == 1);
|
||||
|
||||
return
|
||||
thd->current_stmt_binlog_row_based &&
|
||||
thd && (thd->options & OPTION_BIN_LOG) &&
|
||||
mysql_bin_log.is_open() &&
|
||||
table->s->cached_row_logging_check;
|
||||
return (thd->current_stmt_binlog_row_based &&
|
||||
(thd->options & OPTION_BIN_LOG) &&
|
||||
mysql_bin_log.is_open() &&
|
||||
table->s->cached_row_logging_check);
|
||||
}
|
||||
}
|
||||
|
||||
template<class RowsEventT> int binlog_log_row(TABLE* table,
|
||||
template<class RowsEventT> int binlog_log_row(TABLE *table,
|
||||
const byte *before_record,
|
||||
const byte *after_record)
|
||||
{
|
||||
if (table->file->is_injective())
|
||||
return 0;
|
||||
bool error= 0;
|
||||
THD *const thd= current_thd;
|
||||
|
||||
if (check_table_binlog_row_based(thd, table))
|
||||
if (check_table_binlog_row_based(table->in_use, table))
|
||||
{
|
||||
MY_BITMAP cols;
|
||||
/* Potential buffer on the stack for the bitmap */
|
||||
uint32 bitbuf[BITMAP_STACKBUF_SIZE/sizeof(uint32)];
|
||||
uint n_fields= table->s->fields;
|
||||
my_bool use_bitbuf= n_fields <= sizeof(bitbuf)*8;
|
||||
if (likely(!(error= bitmap_init(&cols,
|
||||
use_bitbuf ? bitbuf : NULL,
|
||||
(n_fields + 7) & ~7UL,
|
||||
false))))
|
||||
{
|
||||
bitmap_set_all(&cols);
|
||||
error=
|
||||
RowsEventT::binlog_row_logging_function(thd, table,
|
||||
table->file->has_transactions(),
|
||||
&cols, table->s->fields,
|
||||
before_record, after_record);
|
||||
if (!use_bitbuf)
|
||||
bitmap_free(&cols);
|
||||
}
|
||||
error=
|
||||
RowsEventT::binlog_row_logging_function(table->in_use, table,
|
||||
table->file->has_transactions(),
|
||||
&table->s->all_set,
|
||||
table->s->fields,
|
||||
before_record, after_record);
|
||||
}
|
||||
return error ? HA_ERR_RBR_LOGGING_FAILED : 0;
|
||||
}
|
||||
|
@ -3354,6 +3275,28 @@ int handler::ha_external_lock(THD *thd, int lock_type)
|
|||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Check handler usage and reset state of file to after 'open'
|
||||
*/
|
||||
|
||||
int handler::ha_reset()
|
||||
{
|
||||
DBUG_ENTER("ha_reset");
|
||||
/* Check that we have called all proper delallocation functions */
|
||||
DBUG_ASSERT((byte*) table->def_read_set.bitmap +
|
||||
table->s->column_bitmap_size ==
|
||||
(char*) table->def_write_set.bitmap);
|
||||
DBUG_ASSERT(bitmap_is_set_all(&table->s->all_set));
|
||||
DBUG_ASSERT(table->key_read == 0);
|
||||
/* ensure that ha_index_end / ha_rnd_end has been called */
|
||||
DBUG_ASSERT(inited == NONE);
|
||||
/* Free cache used by filesort */
|
||||
free_io_cache(table);
|
||||
DBUG_RETURN(reset());
|
||||
}
|
||||
|
||||
|
||||
int handler::ha_write_row(byte *buf)
|
||||
{
|
||||
int error;
|
||||
|
@ -3397,10 +3340,26 @@ int handler::ha_delete_row(const byte *buf)
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/*
|
||||
use_hidden_primary_key() is called in case of an update/delete when
|
||||
(table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined
|
||||
but we don't have a primary key
|
||||
*/
|
||||
|
||||
void handler::use_hidden_primary_key()
|
||||
{
|
||||
/* fallback to use all columns in the table to identify row */
|
||||
table->use_all_columns();
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Dummy function which accept information about log files which is not need
|
||||
by handlers
|
||||
*/
|
||||
|
||||
void signal_log_not_needed(struct handlerton, char *log_file)
|
||||
{
|
||||
DBUG_ENTER("signal_log_not_needed");
|
||||
|
@ -3408,6 +3367,7 @@ void signal_log_not_needed(struct handlerton, char *log_file)
|
|||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
|
||||
#ifdef TRANS_LOG_MGM_EXAMPLE_CODE
|
||||
/*
|
||||
Example of transaction log management functions based on assumption that logs
|
||||
|
@ -3460,6 +3420,7 @@ err:
|
|||
really detect the log status and check that the file is a log of this
|
||||
handler.
|
||||
*/
|
||||
|
||||
enum log_status fl_get_log_status(char *log)
|
||||
{
|
||||
MY_STAT stat_buff;
|
||||
|
@ -3567,7 +3528,7 @@ enum handler_create_iterator_result
|
|||
fl_create_iterator(enum handler_iterator_type type,
|
||||
struct handler_iterator *iterator)
|
||||
{
|
||||
switch(type){
|
||||
switch(type) {
|
||||
case HA_TRANSACTLOG_ITERATOR:
|
||||
return fl_log_iterator_buffer_init(iterator);
|
||||
default:
|
||||
|
|
313
sql/handler.h
313
sql/handler.h
|
@ -49,14 +49,18 @@
|
|||
|
||||
/* Bits in table_flags() to show what database can do */
|
||||
|
||||
/*
|
||||
Can switch index during the scan with ::rnd_same() - not used yet.
|
||||
see mi_rsame/heap_rsame/myrg_rsame
|
||||
*/
|
||||
#define HA_READ_RND_SAME (1 << 0)
|
||||
#define HA_NO_TRANSACTIONS (1 << 0) /* Doesn't support transactions */
|
||||
#define HA_PARTIAL_COLUMN_READ (1 << 1) /* read may not return all columns */
|
||||
#define HA_TABLE_SCAN_ON_INDEX (1 << 2) /* No separate data/index file */
|
||||
#define HA_REC_NOT_IN_SEQ (1 << 3) /* ha_info don't return recnumber;
|
||||
It returns a position to ha_r_rnd */
|
||||
/*
|
||||
The following should be set if the following is not true when scanning
|
||||
a table with rnd_next()
|
||||
- We will see all rows (including deleted ones)
|
||||
- Row positions are 'table->s->db_record_offset' apart
|
||||
If this flag is not set, filesort will do a postion() call for each matched
|
||||
row to be able to find the row later.
|
||||
*/
|
||||
#define HA_REC_NOT_IN_SEQ (1 << 3)
|
||||
#define HA_CAN_GEOMETRY (1 << 4)
|
||||
/*
|
||||
Reading keys in random order is as fast as reading keys in sort order
|
||||
|
@ -64,28 +68,41 @@
|
|||
filesort to decide if we should sort key + data or key + pointer-to-row
|
||||
*/
|
||||
#define HA_FAST_KEY_READ (1 << 5)
|
||||
/*
|
||||
Set the following flag if we on delete should force all key to be read
|
||||
and on update read all keys that changes
|
||||
*/
|
||||
#define HA_REQUIRES_KEY_COLUMNS_FOR_DELETE (1 << 6)
|
||||
#define HA_NULL_IN_KEY (1 << 7) /* One can have keys with NULL */
|
||||
#define HA_DUPP_POS (1 << 8) /* ha_position() gives dup row */
|
||||
#define HA_DUPLICATE_POS (1 << 8) /* ha_position() gives dup row */
|
||||
#define HA_NO_BLOBS (1 << 9) /* Doesn't support blobs */
|
||||
#define HA_CAN_INDEX_BLOBS (1 << 10)
|
||||
#define HA_AUTO_PART_KEY (1 << 11) /* auto-increment in multi-part key */
|
||||
#define HA_REQUIRE_PRIMARY_KEY (1 << 12) /* .. and can't create a hidden one */
|
||||
#define HA_NOT_EXACT_COUNT (1 << 13)
|
||||
#define HA_STATS_RECORDS_IS_EXACT (1 << 13) /* stats.records is exact */
|
||||
/*
|
||||
INSERT_DELAYED only works with handlers that uses MySQL internal table
|
||||
level locks
|
||||
*/
|
||||
#define HA_CAN_INSERT_DELAYED (1 << 14)
|
||||
/*
|
||||
If we get the primary key columns for free when we do an index read
|
||||
It also implies that we have to retrive the primary key when using
|
||||
position() and rnd_pos().
|
||||
*/
|
||||
#define HA_PRIMARY_KEY_IN_READ_INDEX (1 << 15)
|
||||
/*
|
||||
If HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS is set, it means that the engine can
|
||||
do this: the position of an arbitrary record can be retrieved using
|
||||
position() when the table has a primary key, effectively allowing random
|
||||
access on the table based on a given record.
|
||||
If HA_PRIMARY_KEY_REQUIRED_FOR_POSITION is set, it means that to position()
|
||||
uses a primary key. Without primary key, we can't call position().
|
||||
*/
|
||||
#define HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS (1 << 16)
|
||||
#define HA_PRIMARY_KEY_REQUIRED_FOR_POSITION (1 << 16)
|
||||
#define HA_CAN_RTREEKEYS (1 << 17)
|
||||
#define HA_NOT_DELETE_WITH_CACHE (1 << 18)
|
||||
/*
|
||||
The following is we need to a primary key to delete (and update) a row.
|
||||
If there is no primary key, all columns needs to be read on update and delete
|
||||
*/
|
||||
#define HA_PRIMARY_KEY_REQUIRED_FOR_DELETE (1 << 19)
|
||||
#define HA_NO_PREFIX_CHAR_KEYS (1 << 20)
|
||||
#define HA_CAN_FULLTEXT (1 << 21)
|
||||
#define HA_CAN_SQL_HANDLER (1 << 22)
|
||||
|
@ -97,7 +114,8 @@
|
|||
#define HA_CAN_BIT_FIELD (1 << 28) /* supports bit fields */
|
||||
#define HA_NEED_READ_RANGE_BUFFER (1 << 29) /* for read_multi_range */
|
||||
#define HA_ANY_INDEX_MAY_BE_UNIQUE (1 << 30)
|
||||
#define HA_NO_COPY_ON_ALTER (1 << 31)
|
||||
#define HA_NO_COPY_ON_ALTER (LL(1) << 31)
|
||||
#define HA_HAS_RECORDS (LL(1) << 32) /* records() gives exact count*/
|
||||
|
||||
/* bits in index_flags(index_number) for what you can do with index */
|
||||
#define HA_READ_NEXT 1 /* TODO really use this flag */
|
||||
|
@ -241,7 +259,7 @@ enum legacy_db_type
|
|||
|
||||
enum row_type { ROW_TYPE_NOT_USED=-1, ROW_TYPE_DEFAULT, ROW_TYPE_FIXED,
|
||||
ROW_TYPE_DYNAMIC, ROW_TYPE_COMPRESSED,
|
||||
ROW_TYPE_REDUNDANT, ROW_TYPE_COMPACT };
|
||||
ROW_TYPE_REDUNDANT, ROW_TYPE_COMPACT, ROW_TYPE_PAGES };
|
||||
|
||||
enum enum_binlog_func {
|
||||
BFN_RESET_LOGS= 1,
|
||||
|
@ -608,7 +626,7 @@ struct handlerton
|
|||
void *(*create_cursor_read_view)();
|
||||
void (*set_cursor_read_view)(void *);
|
||||
void (*close_cursor_read_view)(void *);
|
||||
handler *(*create)(TABLE_SHARE *table);
|
||||
handler *(*create)(TABLE_SHARE *table, MEM_ROOT *mem_root);
|
||||
void (*drop_database)(char* path);
|
||||
int (*panic)(enum ha_panic_function flag);
|
||||
int (*start_consistent_snapshot)(THD *thd);
|
||||
|
@ -807,11 +825,37 @@ typedef struct st_handler_buffer
|
|||
|
||||
typedef struct system_status_var SSV;
|
||||
|
||||
class ha_statistics
|
||||
{
|
||||
public:
|
||||
ulonglong data_file_length; /* Length off data file */
|
||||
ulonglong max_data_file_length; /* Length off data file */
|
||||
ulonglong index_file_length;
|
||||
ulonglong max_index_file_length;
|
||||
ulonglong delete_length; /* Free bytes */
|
||||
ulonglong auto_increment_value;
|
||||
ha_rows records; /* Estimated records in table */
|
||||
ha_rows deleted; /* Deleted records */
|
||||
ulong mean_rec_length; /* physical reclength */
|
||||
time_t create_time; /* When table was created */
|
||||
time_t check_time;
|
||||
time_t update_time;
|
||||
uint block_size; /* index block size */
|
||||
|
||||
ha_statistics():
|
||||
data_file_length(0), max_data_file_length(0),
|
||||
index_file_length(0), delete_length(0), auto_increment_value(0),
|
||||
records(0), deleted(0), mean_rec_length(0), create_time(0),
|
||||
check_time(0), update_time(0), block_size(0)
|
||||
{}
|
||||
};
|
||||
|
||||
/*
|
||||
The handler class is the interface for dynamically loadable
|
||||
storage engines. Do not add ifdefs and take care when adding or
|
||||
changing virtual functions to avoid vtable confusion
|
||||
*/
|
||||
|
||||
class handler :public Sql_alloc
|
||||
{
|
||||
friend class ha_partition;
|
||||
|
@ -819,6 +863,7 @@ class handler :public Sql_alloc
|
|||
protected:
|
||||
struct st_table_share *table_share; /* The table definition */
|
||||
struct st_table *table; /* The current open table */
|
||||
ulonglong cached_table_flags; /* Set on init() and open() */
|
||||
|
||||
virtual int index_init(uint idx, bool sorted) { active_index=idx; return 0; }
|
||||
virtual int index_end() { active_index=MAX_KEY; return 0; }
|
||||
|
@ -831,31 +876,18 @@ class handler :public Sql_alloc
|
|||
*/
|
||||
virtual int rnd_init(bool scan) =0;
|
||||
virtual int rnd_end() { return 0; }
|
||||
|
||||
virtual ulonglong table_flags(void) const =0;
|
||||
void ha_statistic_increment(ulong SSV::*offset) const;
|
||||
|
||||
|
||||
private:
|
||||
virtual int reset() { return extra(HA_EXTRA_RESET); }
|
||||
ha_rows estimation_rows_to_insert;
|
||||
virtual void start_bulk_insert(ha_rows rows) {}
|
||||
virtual int end_bulk_insert() {return 0; }
|
||||
public:
|
||||
const handlerton *ht; /* storage engine of this handler */
|
||||
byte *ref; /* Pointer to current row */
|
||||
byte *dupp_ref; /* Pointer to dupp row */
|
||||
ulonglong data_file_length; /* Length off data file */
|
||||
ulonglong max_data_file_length; /* Length off data file */
|
||||
ulonglong index_file_length;
|
||||
ulonglong max_index_file_length;
|
||||
ulonglong delete_length; /* Free bytes */
|
||||
ulonglong auto_increment_value;
|
||||
ha_rows records; /* Records in table */
|
||||
ha_rows deleted; /* Deleted records */
|
||||
ulong mean_rec_length; /* physical reclength */
|
||||
time_t create_time; /* When table was created */
|
||||
time_t check_time;
|
||||
time_t update_time;
|
||||
byte *dup_ref; /* Pointer to duplicate row */
|
||||
|
||||
ha_statistics stats;
|
||||
|
||||
/* The following are for read_multi_range */
|
||||
bool multi_range_sorted;
|
||||
|
@ -870,27 +902,20 @@ public:
|
|||
bool eq_range;
|
||||
|
||||
uint errkey; /* Last dup key */
|
||||
uint sortkey, key_used_on_scan;
|
||||
uint key_used_on_scan;
|
||||
uint active_index;
|
||||
/* Length of ref (1-8 or the clustered key length) */
|
||||
uint ref_length;
|
||||
uint block_size; /* index block size */
|
||||
FT_INFO *ft_handler;
|
||||
enum {NONE=0, INDEX, RND} inited;
|
||||
bool auto_increment_column_changed;
|
||||
bool implicit_emptied; /* Can be !=0 only if HEAP */
|
||||
const COND *pushed_cond;
|
||||
MY_BITMAP *read_set;
|
||||
MY_BITMAP *write_set;
|
||||
|
||||
handler(const handlerton *ht_arg, TABLE_SHARE *share_arg)
|
||||
:table_share(share_arg), estimation_rows_to_insert(0), ht(ht_arg),
|
||||
ref(0), data_file_length(0), max_data_file_length(0), index_file_length(0),
|
||||
delete_length(0), auto_increment_value(0),
|
||||
records(0), deleted(0), mean_rec_length(0),
|
||||
create_time(0), check_time(0), update_time(0),
|
||||
key_used_on_scan(MAX_KEY), active_index(MAX_KEY),
|
||||
ref_length(sizeof(my_off_t)), block_size(0),
|
||||
ref(0), key_used_on_scan(MAX_KEY), active_index(MAX_KEY),
|
||||
ref_length(sizeof(my_off_t)),
|
||||
ft_handler(0), inited(NONE), implicit_emptied(0),
|
||||
pushed_cond(NULL)
|
||||
{}
|
||||
|
@ -898,6 +923,11 @@ public:
|
|||
{
|
||||
/* TODO: DBUG_ASSERT(inited == NONE); */
|
||||
}
|
||||
/* This is called after create to allow us to set up cached variables */
|
||||
void init()
|
||||
{
|
||||
cached_table_flags= table_flags();
|
||||
}
|
||||
/*
|
||||
Check whether a handler allows to lock the table.
|
||||
|
||||
|
@ -925,10 +955,9 @@ public:
|
|||
{
|
||||
return TRUE;
|
||||
}
|
||||
virtual int ha_initialise();
|
||||
int ha_open(TABLE *table, const char *name, int mode, int test_if_locked);
|
||||
bool update_auto_increment();
|
||||
void print_keydupp_error(uint key_nr, const char *msg);
|
||||
void print_keydup_error(uint key_nr, const char *msg);
|
||||
virtual void print_error(int error, myf errflag);
|
||||
virtual bool get_error_message(int error, String *buf);
|
||||
uint get_dup_key(int error);
|
||||
|
@ -938,13 +967,19 @@ public:
|
|||
table_share= share;
|
||||
}
|
||||
virtual double scan_time()
|
||||
{ return ulonglong2double(data_file_length) / IO_SIZE + 2; }
|
||||
{ return ulonglong2double(stats.data_file_length) / IO_SIZE + 2; }
|
||||
virtual double read_time(uint index, uint ranges, ha_rows rows)
|
||||
{ return rows2double(ranges+rows); }
|
||||
{ return rows2double(ranges+rows); }
|
||||
virtual const key_map *keys_to_use_for_scanning() { return &key_map_empty; }
|
||||
virtual bool has_transactions(){ return 0;}
|
||||
bool has_transactions()
|
||||
{ return (ha_table_flags() & HA_NO_TRANSACTIONS) == 0; }
|
||||
virtual uint extra_rec_buf_length() const { return 0; }
|
||||
|
||||
/*
|
||||
Number of rows in table. It will only be called if
|
||||
(table_flags() & (HA_HAS_RECORDS | HA_STATS_RECORDS_IS_EXACT)) != 0
|
||||
*/
|
||||
virtual ha_rows records() { return stats.records; }
|
||||
/*
|
||||
Return upper bound of current number of records in the table
|
||||
(max. of how many records one will retrieve when doing a full table scan)
|
||||
|
@ -952,7 +987,7 @@ public:
|
|||
possible upper bound.
|
||||
*/
|
||||
virtual ha_rows estimate_rows_upper_bound()
|
||||
{ return records+EXTRA_RECORDS; }
|
||||
{ return stats.records+EXTRA_RECORDS; }
|
||||
|
||||
/*
|
||||
Get the row type from the storage engine. If this method returns
|
||||
|
@ -990,139 +1025,23 @@ public:
|
|||
inited=NONE;
|
||||
DBUG_RETURN(rnd_end());
|
||||
}
|
||||
int ha_reset()
|
||||
{
|
||||
DBUG_ENTER("ha_reset");
|
||||
ha_clear_all_set();
|
||||
DBUG_RETURN(reset());
|
||||
}
|
||||
int ha_reset();
|
||||
|
||||
/* this is necessary in many places, e.g. in HANDLER command */
|
||||
int ha_index_or_rnd_end()
|
||||
{
|
||||
return inited == INDEX ? ha_index_end() : inited == RND ? ha_rnd_end() : 0;
|
||||
}
|
||||
/*
|
||||
These are a set of routines used to enable handlers to only read/write
|
||||
partial lists of the fields in the table. The bit vector is maintained
|
||||
by the server part and is used by the handler at calls to read/write
|
||||
data in the table.
|
||||
It replaces the use of query id's for this purpose. The benefit is that
|
||||
the handler can also set bits in the read/write set if it has special
|
||||
needs and it is also easy for other parts of the server to interact
|
||||
with the handler (e.g. the replication part for row-level logging).
|
||||
The routines are all part of the general handler and are not possible
|
||||
to override by a handler. A handler can however set/reset bits by
|
||||
calling these routines.
|
||||
longlong ha_table_flags() { return cached_table_flags; }
|
||||
|
||||
The methods ha_retrieve_all_cols and ha_retrieve_all_pk are made
|
||||
virtual to handle InnoDB specifics. If InnoDB doesn't need the
|
||||
extra parameters HA_EXTRA_RETRIEVE_ALL_COLS and
|
||||
HA_EXTRA_RETRIEVE_PRIMARY_KEY anymore then these methods need not be
|
||||
virtual anymore.
|
||||
/*
|
||||
Signal that the table->read_set and table->write_set table maps changed
|
||||
The handler is allowed to set additional bits in the above map in this
|
||||
call. Normally the handler should ignore all calls until we have done
|
||||
a ha_rnd_init() or ha_index_init(), write_row(), update_row or delete_row()
|
||||
as there may be several calls to this routine.
|
||||
*/
|
||||
virtual int ha_retrieve_all_cols();
|
||||
virtual int ha_retrieve_all_pk();
|
||||
void ha_set_all_bits_in_read_set()
|
||||
{
|
||||
DBUG_ENTER("ha_set_all_bits_in_read_set");
|
||||
bitmap_set_all(read_set);
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
void ha_set_all_bits_in_write_set()
|
||||
{
|
||||
DBUG_ENTER("ha_set_all_bits_in_write_set");
|
||||
bitmap_set_all(write_set);
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
void ha_set_bit_in_read_set(uint fieldnr)
|
||||
{
|
||||
DBUG_ENTER("ha_set_bit_in_read_set");
|
||||
DBUG_PRINT("info", ("fieldnr = %d", fieldnr));
|
||||
bitmap_set_bit(read_set, fieldnr);
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
void ha_clear_bit_in_read_set(uint fieldnr)
|
||||
{
|
||||
DBUG_ENTER("ha_clear_bit_in_read_set");
|
||||
DBUG_PRINT("info", ("fieldnr = %d", fieldnr));
|
||||
bitmap_clear_bit(read_set, fieldnr);
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
void ha_set_bit_in_write_set(uint fieldnr)
|
||||
{
|
||||
DBUG_ENTER("ha_set_bit_in_write_set");
|
||||
DBUG_PRINT("info", ("fieldnr = %d", fieldnr));
|
||||
bitmap_set_bit(write_set, fieldnr);
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
void ha_clear_bit_in_write_set(uint fieldnr)
|
||||
{
|
||||
DBUG_ENTER("ha_clear_bit_in_write_set");
|
||||
DBUG_PRINT("info", ("fieldnr = %d", fieldnr));
|
||||
bitmap_clear_bit(write_set, fieldnr);
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
void ha_set_bit_in_rw_set(uint fieldnr, bool write_op)
|
||||
{
|
||||
DBUG_ENTER("ha_set_bit_in_rw_set");
|
||||
DBUG_PRINT("info", ("Set bit %u in read set", fieldnr));
|
||||
bitmap_set_bit(read_set, fieldnr);
|
||||
if (!write_op) {
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
else
|
||||
{
|
||||
DBUG_PRINT("info", ("Set bit %u in read and write set", fieldnr));
|
||||
bitmap_set_bit(write_set, fieldnr);
|
||||
}
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
bool ha_get_bit_in_read_set(uint fieldnr)
|
||||
{
|
||||
bool bit_set=bitmap_is_set(read_set,fieldnr);
|
||||
DBUG_ENTER("ha_get_bit_in_read_set");
|
||||
DBUG_PRINT("info", ("bit %u = %u", fieldnr, bit_set));
|
||||
DBUG_RETURN(bit_set);
|
||||
}
|
||||
bool ha_get_bit_in_write_set(uint fieldnr)
|
||||
{
|
||||
bool bit_set=bitmap_is_set(write_set,fieldnr);
|
||||
DBUG_ENTER("ha_get_bit_in_write_set");
|
||||
DBUG_PRINT("info", ("bit %u = %u", fieldnr, bit_set));
|
||||
DBUG_RETURN(bit_set);
|
||||
}
|
||||
bool ha_get_all_bit_in_read_set()
|
||||
{
|
||||
bool all_bits_set= bitmap_is_set_all(read_set);
|
||||
DBUG_ENTER("ha_get_all_bit_in_read_set");
|
||||
DBUG_PRINT("info", ("all bits set = %u", all_bits_set));
|
||||
DBUG_RETURN(all_bits_set);
|
||||
}
|
||||
bool ha_get_all_bit_in_read_clear()
|
||||
{
|
||||
bool all_bits_set= bitmap_is_clear_all(read_set);
|
||||
DBUG_ENTER("ha_get_all_bit_in_read_clear");
|
||||
DBUG_PRINT("info", ("all bits clear = %u", all_bits_set));
|
||||
DBUG_RETURN(all_bits_set);
|
||||
}
|
||||
bool ha_get_all_bit_in_write_set()
|
||||
{
|
||||
bool all_bits_set= bitmap_is_set_all(write_set);
|
||||
DBUG_ENTER("ha_get_all_bit_in_write_set");
|
||||
DBUG_PRINT("info", ("all bits set = %u", all_bits_set));
|
||||
DBUG_RETURN(all_bits_set);
|
||||
}
|
||||
bool ha_get_all_bit_in_write_clear()
|
||||
{
|
||||
bool all_bits_set= bitmap_is_clear_all(write_set);
|
||||
DBUG_ENTER("ha_get_all_bit_in_write_clear");
|
||||
DBUG_PRINT("info", ("all bits clear = %u", all_bits_set));
|
||||
DBUG_RETURN(all_bits_set);
|
||||
}
|
||||
void ha_set_primary_key_in_read_set();
|
||||
int ha_allocate_read_write_set(ulong no_fields);
|
||||
void ha_clear_all_set();
|
||||
virtual void column_bitmaps_signal();
|
||||
uint get_index(void) const { return active_index; }
|
||||
virtual int open(const char *name, int mode, uint test_if_locked)=0;
|
||||
virtual int close(void)=0;
|
||||
|
@ -1275,6 +1194,13 @@ public:
|
|||
{ return 0; }
|
||||
virtual int extra_opt(enum ha_extra_function operation, ulong cache_size)
|
||||
{ return extra(operation); }
|
||||
|
||||
/*
|
||||
Reset state of file to after 'open'
|
||||
This function is called after every statement for all tables used
|
||||
by that statement.
|
||||
*/
|
||||
virtual int reset() { return 0; }
|
||||
/*
|
||||
In an UPDATE or DELETE, if the row under the cursor was locked by another
|
||||
transaction, and the engine used an optimistic read of the last
|
||||
|
@ -1416,7 +1342,6 @@ public:
|
|||
/* The following can be called without an open handler */
|
||||
virtual const char *table_type() const =0;
|
||||
virtual const char **bas_ext() const =0;
|
||||
virtual ulong table_flags(void) const =0;
|
||||
|
||||
virtual int get_default_no_partitions(ulonglong max_rows) { return 1;}
|
||||
virtual void set_auto_partitions(partition_info *part_info) { return; }
|
||||
|
@ -1525,7 +1450,6 @@ public:
|
|||
false otherwise
|
||||
*/
|
||||
virtual bool primary_key_is_clustered() { return FALSE; }
|
||||
|
||||
virtual int cmp_ref(const byte *ref1, const byte *ref2)
|
||||
{
|
||||
return memcmp(ref1, ref2, ref_length);
|
||||
|
@ -1541,10 +1465,12 @@ public:
|
|||
cond_push()
|
||||
cond Condition to be pushed. The condition tree must not be
|
||||
modified by the by the caller.
|
||||
|
||||
RETURN
|
||||
The 'remainder' condition that caller must use to filter out records.
|
||||
NULL means the handler will not return rows that do not match the
|
||||
passed condition.
|
||||
|
||||
NOTES
|
||||
The pushed conditions form a stack (from which one can remove the
|
||||
last pushed condition using cond_pop).
|
||||
|
@ -1552,7 +1478,7 @@ public:
|
|||
AND ... AND pushed_condN)
|
||||
or less restrictive condition, depending on handler's capabilities.
|
||||
|
||||
handler->extra(HA_EXTRA_RESET) call empties the condition stack.
|
||||
handler->ha_reset() call empties the condition stack.
|
||||
Calls to rnd_init/rnd_end, index_init/index_end etc do not affect the
|
||||
condition stack.
|
||||
*/
|
||||
|
@ -1568,18 +1494,7 @@ public:
|
|||
uint table_changes)
|
||||
{ return COMPATIBLE_DATA_NO; }
|
||||
|
||||
private:
|
||||
/*
|
||||
Row-level primitives for storage engines. These should be
|
||||
overridden by the storage engine class. To call these methods, use
|
||||
the corresponding 'ha_*' method above.
|
||||
*/
|
||||
virtual int external_lock(THD *thd __attribute__((unused)),
|
||||
int lock_type __attribute__((unused)))
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* These are only called from sql_select for internal temporary tables */
|
||||
virtual int write_row(byte *buf __attribute__((unused)))
|
||||
{
|
||||
return HA_ERR_WRONG_COMMAND;
|
||||
|
@ -1595,6 +1510,24 @@ private:
|
|||
{
|
||||
return HA_ERR_WRONG_COMMAND;
|
||||
}
|
||||
/*
|
||||
use_hidden_primary_key() is called in case of an update/delete when
|
||||
(table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined
|
||||
but we don't have a primary key
|
||||
*/
|
||||
virtual void use_hidden_primary_key();
|
||||
|
||||
private:
|
||||
/*
|
||||
Row-level primitives for storage engines. These should be
|
||||
overridden by the storage engine class. To call these methods, use
|
||||
the corresponding 'ha_*' method above.
|
||||
*/
|
||||
virtual int external_lock(THD *thd __attribute__((unused)),
|
||||
int lock_type __attribute__((unused)))
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
};
|
||||
|
||||
/* Some extern variables used with handlers */
|
||||
|
|
77
sql/item.cc
77
sql/item.cc
|
@ -551,6 +551,23 @@ bool Item_field::find_item_in_field_list_processor(byte *arg)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
Mark field in read_map
|
||||
|
||||
NOTES
|
||||
This is used by filesort to register used fields in a a temporary
|
||||
column read set or to register used fields in a view
|
||||
*/
|
||||
|
||||
bool Item_field::register_field_in_read_map(byte *arg)
|
||||
{
|
||||
TABLE *table= (TABLE *) arg;
|
||||
if (field->table == table || !table)
|
||||
bitmap_set_bit(field->table->read_set, field->field_index);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
bool Item::check_cols(uint c)
|
||||
{
|
||||
if (c != 1)
|
||||
|
@ -790,14 +807,25 @@ CHARSET_INFO *Item::default_charset()
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
Save value in field, but don't give any warnings
|
||||
|
||||
NOTES
|
||||
This is used to temporary store and retrieve a value in a column,
|
||||
for example in opt_range to adjust the key value to fit the column.
|
||||
*/
|
||||
|
||||
int Item::save_in_field_no_warnings(Field *field, bool no_conversions)
|
||||
{
|
||||
int res;
|
||||
THD *thd= field->table->in_use;
|
||||
TABLE *table= field->table;
|
||||
THD *thd= table->in_use;
|
||||
enum_check_fields tmp= thd->count_cuted_fields;
|
||||
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
|
||||
thd->count_cuted_fields= CHECK_FIELD_IGNORE;
|
||||
res= save_in_field(field, no_conversions);
|
||||
thd->count_cuted_fields= tmp;
|
||||
dbug_tmp_restore_column_map(table->write_set, old_map);
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -2366,7 +2394,8 @@ bool Item_param::set_from_user_var(THD *thd, const user_var_entry *entry)
|
|||
CHARSET_INFO *tocs= thd->variables.collation_connection;
|
||||
uint32 dummy_offset;
|
||||
|
||||
value.cs_info.character_set_of_placeholder= fromcs;
|
||||
value.cs_info.character_set_of_placeholder=
|
||||
value.cs_info.character_set_client= fromcs;
|
||||
/*
|
||||
Setup source and destination character sets so that they
|
||||
are different only if conversion is necessary: this will
|
||||
|
@ -3569,7 +3598,8 @@ bool Item_field::fix_fields(THD *thd, Item **reference)
|
|||
Item** res= find_item_in_list(this, thd->lex->current_select->item_list,
|
||||
&counter, REPORT_EXCEPT_NOT_FOUND,
|
||||
¬_used);
|
||||
if (res != (Item **)not_found_item && (*res)->type() == Item::FIELD_ITEM)
|
||||
if (res != (Item **)not_found_item &&
|
||||
(*res)->type() == Item::FIELD_ITEM)
|
||||
{
|
||||
set_field((*((Item_field**)res))->field);
|
||||
return 0;
|
||||
|
@ -3588,7 +3618,7 @@ bool Item_field::fix_fields(THD *thd, Item **reference)
|
|||
if it is not expression from merged VIEW we will set this field.
|
||||
|
||||
We can leave expression substituted from view for next PS/SP rexecution
|
||||
(i.e. do not register this substitution for reverting on cleupup()
|
||||
(i.e. do not register this substitution for reverting on cleanup()
|
||||
(register_item_tree_changing())), because this subtree will be
|
||||
fix_field'ed during setup_tables()->setup_underlying() (i.e. before
|
||||
all other expressions of query, and references on tables which do
|
||||
|
@ -3600,13 +3630,13 @@ bool Item_field::fix_fields(THD *thd, Item **reference)
|
|||
return FALSE;
|
||||
|
||||
if (!outer_fixed && cached_table && cached_table->select_lex &&
|
||||
context->select_lex &&
|
||||
cached_table->select_lex != context->select_lex)
|
||||
context->select_lex &&
|
||||
cached_table->select_lex != context->select_lex)
|
||||
{
|
||||
int ret;
|
||||
if ((ret= fix_outer_field(thd, &from_field, reference)) < 0)
|
||||
goto error;
|
||||
else if (!ret)
|
||||
if (!ret)
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
|
@ -3617,17 +3647,28 @@ bool Item_field::fix_fields(THD *thd, Item **reference)
|
|||
set_if_bigger(thd->lex->in_sum_func->max_arg_level,
|
||||
thd->lex->current_select->nest_level);
|
||||
}
|
||||
else if (thd->set_query_id)
|
||||
else if (thd->mark_used_columns != MARK_COLUMNS_NONE)
|
||||
{
|
||||
TABLE *table= field->table;
|
||||
table->file->ha_set_bit_in_rw_set(field->fieldnr,
|
||||
(bool)(thd->set_query_id-1));
|
||||
if (field->query_id != thd->query_id)
|
||||
MY_BITMAP *current_bitmap, *other_bitmap;
|
||||
if (thd->mark_used_columns == MARK_COLUMNS_READ)
|
||||
{
|
||||
/* We only come here in unions */
|
||||
field->query_id=thd->query_id;
|
||||
table->used_fields++;
|
||||
table->used_keys.intersect(field->part_of_key);
|
||||
current_bitmap= table->read_set;
|
||||
other_bitmap= table->write_set;
|
||||
}
|
||||
else
|
||||
{
|
||||
current_bitmap= table->write_set;
|
||||
other_bitmap= table->read_set;
|
||||
}
|
||||
if (!bitmap_fast_test_and_set(current_bitmap, field->field_index))
|
||||
{
|
||||
if (!bitmap_is_set(other_bitmap, field->field_index))
|
||||
{
|
||||
/* First usage of column */
|
||||
table->used_fields++; // Used to optimize loops
|
||||
table->used_keys.intersect(field->part_of_key);
|
||||
}
|
||||
}
|
||||
}
|
||||
#ifndef NO_EMBEDDED_ACCESS_CHECKS
|
||||
|
@ -5398,17 +5439,17 @@ void Item_insert_value::print(String *str)
|
|||
void Item_trigger_field::setup_field(THD *thd, TABLE *table,
|
||||
GRANT_INFO *table_grant_info)
|
||||
{
|
||||
bool save_set_query_id= thd->set_query_id;
|
||||
enum_mark_columns save_mark_used_columns= thd->mark_used_columns;
|
||||
|
||||
/* TODO: Think more about consequences of this step. */
|
||||
thd->set_query_id= 0;
|
||||
thd->mark_used_columns= MARK_COLUMNS_NONE;
|
||||
/*
|
||||
Try to find field by its name and if it will be found
|
||||
set field_idx properly.
|
||||
*/
|
||||
(void)find_field_in_table(thd, table, field_name, (uint) strlen(field_name),
|
||||
0, &field_idx);
|
||||
thd->set_query_id= save_set_query_id;
|
||||
thd->mark_used_columns= save_mark_used_columns;
|
||||
triggers= table->triggers;
|
||||
table_grants= table_grant_info;
|
||||
}
|
||||
|
|
24
sql/item.h
24
sql/item.h
|
@ -762,7 +762,7 @@ public:
|
|||
static CHARSET_INFO *default_charset();
|
||||
virtual CHARSET_INFO *compare_collation() { return NULL; }
|
||||
|
||||
virtual bool walk(Item_processor processor, byte *arg)
|
||||
virtual bool walk(Item_processor processor, bool walk_subquery, byte *arg)
|
||||
{
|
||||
return (this->*processor)(arg);
|
||||
}
|
||||
|
@ -784,7 +784,7 @@ public:
|
|||
virtual bool collect_item_field_processor(byte * arg) { return 0; }
|
||||
virtual bool find_item_in_field_list_processor(byte *arg) { return 0; }
|
||||
virtual bool change_context_processor(byte *context) { return 0; }
|
||||
virtual bool reset_query_id_processor(byte *query_id) { return 0; }
|
||||
virtual bool register_field_in_read_map(byte *arg) { return 0; }
|
||||
/*
|
||||
Check if a partition function is allowed
|
||||
SYNOPSIS
|
||||
|
@ -1280,13 +1280,7 @@ public:
|
|||
Item *get_tmp_table_item(THD *thd);
|
||||
bool collect_item_field_processor(byte * arg);
|
||||
bool find_item_in_field_list_processor(byte *arg);
|
||||
bool reset_query_id_processor(byte *arg)
|
||||
{
|
||||
field->query_id= *((query_id_t *) arg);
|
||||
if (result_field)
|
||||
result_field->query_id= field->query_id;
|
||||
return 0;
|
||||
}
|
||||
bool register_field_in_read_map(byte *arg);
|
||||
bool check_partition_func_processor(byte *bool_arg) { return 0; }
|
||||
void cleanup();
|
||||
Item_equal *find_item_equal(COND_EQUAL *cond_equal);
|
||||
|
@ -1907,8 +1901,8 @@ public:
|
|||
{
|
||||
return ref ? (*ref)->real_item() : this;
|
||||
}
|
||||
bool walk(Item_processor processor, byte *arg)
|
||||
{ return (*ref)->walk(processor, arg); }
|
||||
bool walk(Item_processor processor, bool walk_subquery, byte *arg)
|
||||
{ return (*ref)->walk(processor, walk_subquery, arg); }
|
||||
void print(String *str);
|
||||
void cleanup();
|
||||
Item_field *filed_for_view_update()
|
||||
|
@ -2159,9 +2153,9 @@ public:
|
|||
int save_in_field(Field *field_arg, bool no_conversions);
|
||||
table_map used_tables() const { return (table_map)0L; }
|
||||
|
||||
bool walk(Item_processor processor, byte *args)
|
||||
bool walk(Item_processor processor, bool walk_subquery, byte *args)
|
||||
{
|
||||
return arg->walk(processor, args) ||
|
||||
return arg->walk(processor, walk_subquery, args) ||
|
||||
(this->*processor)(args);
|
||||
}
|
||||
|
||||
|
@ -2206,9 +2200,9 @@ public:
|
|||
}
|
||||
table_map used_tables() const { return (table_map)0L; }
|
||||
|
||||
bool walk(Item_processor processor, byte *args)
|
||||
bool walk(Item_processor processor, bool walk_subquery, byte *args)
|
||||
{
|
||||
return arg->walk(processor, args) ||
|
||||
return arg->walk(processor, walk_subquery, args) ||
|
||||
(this->*processor)(args);
|
||||
}
|
||||
};
|
||||
|
|
|
@ -234,23 +234,31 @@ longlong Item_func_nop_all::val_int()
|
|||
|
||||
static bool convert_constant_item(THD *thd, Field *field, Item **item)
|
||||
{
|
||||
int result= 0;
|
||||
if (!(*item)->with_subselect && (*item)->const_item())
|
||||
{
|
||||
/* For comparison purposes allow invalid dates like 2000-01-32 */
|
||||
ulong orig_sql_mode= field->table->in_use->variables.sql_mode;
|
||||
field->table->in_use->variables.sql_mode|= MODE_INVALID_DATES;
|
||||
TABLE *table= field->table;
|
||||
ulong orig_sql_mode= table->in_use->variables.sql_mode;
|
||||
my_bitmap_map *old_write_map=
|
||||
dbug_tmp_use_all_columns(table, table->write_set);
|
||||
my_bitmap_map *old_read_map=
|
||||
dbug_tmp_use_all_columns(table, table->read_set);
|
||||
|
||||
table->in_use->variables.sql_mode|= MODE_INVALID_DATES;
|
||||
if (!(*item)->save_in_field(field, 1) && !((*item)->null_value))
|
||||
{
|
||||
Item *tmp=new Item_int_with_ref(field->val_int(), *item,
|
||||
test(field->flags & UNSIGNED_FLAG));
|
||||
field->table->in_use->variables.sql_mode= orig_sql_mode;
|
||||
Item *tmp= new Item_int_with_ref(field->val_int(), *item,
|
||||
test(field->flags & UNSIGNED_FLAG));
|
||||
if (tmp)
|
||||
thd->change_item_tree(item, tmp);
|
||||
return 1; // Item was replaced
|
||||
result= 1; // Item was replaced
|
||||
}
|
||||
field->table->in_use->variables.sql_mode= orig_sql_mode;
|
||||
table->in_use->variables.sql_mode= orig_sql_mode;
|
||||
dbug_tmp_restore_column_map(table->write_set, old_write_map);
|
||||
dbug_tmp_restore_column_map(table->read_set, old_read_map);
|
||||
}
|
||||
return 0;
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
|
@ -2608,14 +2616,14 @@ Item_cond::fix_fields(THD *thd, Item **ref)
|
|||
return FALSE;
|
||||
}
|
||||
|
||||
bool Item_cond::walk(Item_processor processor, byte *arg)
|
||||
bool Item_cond::walk(Item_processor processor, bool walk_subquery, byte *arg)
|
||||
{
|
||||
List_iterator_fast<Item> li(list);
|
||||
Item *item;
|
||||
while ((item= li++))
|
||||
if (item->walk(processor, arg))
|
||||
if (item->walk(processor, walk_subquery, arg))
|
||||
return 1;
|
||||
return Item_func::walk(processor, arg);
|
||||
return Item_func::walk(processor, walk_subquery, arg);
|
||||
}
|
||||
|
||||
|
||||
|
@ -3861,14 +3869,16 @@ void Item_equal::fix_length_and_dec()
|
|||
eval_item->cmp_charset= cmp_collation.collation;
|
||||
}
|
||||
|
||||
bool Item_equal::walk(Item_processor processor, byte *arg)
|
||||
bool Item_equal::walk(Item_processor processor, bool walk_subquery, byte *arg)
|
||||
{
|
||||
List_iterator_fast<Item_field> it(fields);
|
||||
Item *item;
|
||||
while ((item= it++))
|
||||
if (item->walk(processor, arg))
|
||||
{
|
||||
if (item->walk(processor, walk_subquery, arg))
|
||||
return 1;
|
||||
return Item_func::walk(processor, arg);
|
||||
}
|
||||
return Item_func::walk(processor, walk_subquery, arg);
|
||||
}
|
||||
|
||||
Item *Item_equal::transform(Item_transformer transformer, byte *arg)
|
||||
|
|
|
@ -1178,7 +1178,7 @@ public:
|
|||
COND **conds);
|
||||
void top_level_item() { abort_on_null=1; }
|
||||
void copy_andor_arguments(THD *thd, Item_cond *item);
|
||||
bool walk(Item_processor processor, byte *arg);
|
||||
bool walk(Item_processor processor, bool walk_subquery, byte *arg);
|
||||
Item *transform(Item_transformer transformer, byte *arg);
|
||||
void traverse_cond(Cond_traverser, void *arg, traverse_order order);
|
||||
void neg_arguments(THD *thd);
|
||||
|
@ -1292,7 +1292,7 @@ public:
|
|||
void fix_length_and_dec();
|
||||
bool fix_fields(THD *thd, Item **ref);
|
||||
void update_used_tables();
|
||||
bool walk(Item_processor processor, byte *arg);
|
||||
bool walk(Item_processor processor, bool walk_subquery, byte *arg);
|
||||
Item *transform(Item_transformer transformer, byte *arg);
|
||||
void print(String *str);
|
||||
CHARSET_INFO *compare_collation()
|
||||
|
|
|
@ -194,14 +194,16 @@ Item_func::fix_fields(THD *thd, Item **ref)
|
|||
return FALSE;
|
||||
}
|
||||
|
||||
bool Item_func::walk (Item_processor processor, byte *argument)
|
||||
|
||||
bool Item_func::walk(Item_processor processor, bool walk_subquery,
|
||||
byte *argument)
|
||||
{
|
||||
if (arg_count)
|
||||
{
|
||||
Item **arg,**arg_end;
|
||||
for (arg= args, arg_end= args+arg_count; arg != arg_end; arg++)
|
||||
{
|
||||
if ((*arg)->walk(processor, argument))
|
||||
if ((*arg)->walk(processor, walk_subquery, argument))
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
@ -4397,7 +4399,7 @@ bool Item_func_match::fix_fields(THD *thd, Item **ref)
|
|||
return TRUE;
|
||||
}
|
||||
table=((Item_field *)item)->field->table;
|
||||
if (!(table->file->table_flags() & HA_CAN_FULLTEXT))
|
||||
if (!(table->file->ha_table_flags() & HA_CAN_FULLTEXT))
|
||||
{
|
||||
my_error(ER_TABLE_CANT_HANDLE_FT, MYF(0));
|
||||
return 1;
|
||||
|
|
|
@ -182,7 +182,7 @@ public:
|
|||
{
|
||||
return agg_item_charsets(c, func_name(), items, nitems, flags);
|
||||
}
|
||||
bool walk(Item_processor processor, byte *arg);
|
||||
bool walk(Item_processor processor, bool walk_subquery, byte *arg);
|
||||
Item *transform(Item_transformer transformer, byte *arg);
|
||||
void traverse_cond(Cond_traverser traverser,
|
||||
void * arg, traverse_order order);
|
||||
|
|
|
@ -142,16 +142,18 @@ void Item_row::print(String *str)
|
|||
str->append(')');
|
||||
}
|
||||
|
||||
bool Item_row::walk(Item_processor processor, byte *arg)
|
||||
|
||||
bool Item_row::walk(Item_processor processor, bool walk_subquery, byte *arg)
|
||||
{
|
||||
for (uint i= 0; i < arg_count; i++)
|
||||
{
|
||||
if (items[i]->walk(processor, arg))
|
||||
if (items[i]->walk(processor, walk_subquery, arg))
|
||||
return 1;
|
||||
}
|
||||
return (this->*processor)(arg);
|
||||
}
|
||||
|
||||
|
||||
Item *Item_row::transform(Item_transformer transformer, byte *arg)
|
||||
{
|
||||
for (uint i= 0; i < arg_count; i++)
|
||||
|
|
|
@ -68,7 +68,7 @@ public:
|
|||
void update_used_tables();
|
||||
void print(String *str);
|
||||
|
||||
bool walk(Item_processor processor, byte *arg);
|
||||
bool walk(Item_processor processor, bool walk_subquery, byte *arg);
|
||||
Item *transform(Item_transformer transformer, byte *arg);
|
||||
|
||||
uint cols() { return arg_count; }
|
||||
|
|
|
@ -455,10 +455,10 @@ public:
|
|||
void update_used_tables();
|
||||
const char *func_name() const { return "make_set"; }
|
||||
|
||||
bool walk(Item_processor processor, byte *arg)
|
||||
bool walk(Item_processor processor, bool walk_subquery, byte *arg)
|
||||
{
|
||||
return item->walk(processor, arg) ||
|
||||
Item_str_func::walk(processor, arg);
|
||||
return item->walk(processor, walk_subquery, arg) ||
|
||||
Item_str_func::walk(processor, walk_subquery, arg);
|
||||
}
|
||||
Item *transform(Item_transformer transformer, byte *arg)
|
||||
{
|
||||
|
|
|
@ -195,6 +195,46 @@ err:
|
|||
return res;
|
||||
}
|
||||
|
||||
|
||||
bool Item_subselect::walk(Item_processor processor, bool walk_subquery,
|
||||
byte *argument)
|
||||
{
|
||||
|
||||
if (walk_subquery)
|
||||
{
|
||||
for (SELECT_LEX *lex= unit->first_select(); lex; lex= lex->next_select())
|
||||
{
|
||||
List_iterator<Item> li(lex->item_list);
|
||||
Item *item;
|
||||
ORDER *order;
|
||||
|
||||
if (lex->where && (lex->where)->walk(processor, walk_subquery, argument))
|
||||
return 1;
|
||||
if (lex->having && (lex->having)->walk(processor, walk_subquery,
|
||||
argument))
|
||||
return 1;
|
||||
|
||||
while ((item=li++))
|
||||
{
|
||||
if (item->walk(processor, walk_subquery, argument))
|
||||
return 1;
|
||||
}
|
||||
for (order= (ORDER*) lex->order_list.first ; order; order= order->next)
|
||||
{
|
||||
if ((*order->item)->walk(processor, walk_subquery, argument))
|
||||
return 1;
|
||||
}
|
||||
for (order= (ORDER*) lex->group_list.first ; order; order= order->next)
|
||||
{
|
||||
if ((*order->item)->walk(processor, walk_subquery, argument))
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
return (this->*processor)(argument);
|
||||
}
|
||||
|
||||
|
||||
bool Item_subselect::exec()
|
||||
{
|
||||
int res;
|
||||
|
@ -374,7 +414,7 @@ Item_singlerow_subselect::select_transformer(JOIN *join)
|
|||
as far as we moved content to upper level, field which depend of
|
||||
'upper' select is not really dependent => we remove this dependence
|
||||
*/
|
||||
substitution->walk(&Item::remove_dependence_processor,
|
||||
substitution->walk(&Item::remove_dependence_processor, 0,
|
||||
(byte *) select_lex->outer_select());
|
||||
/* SELECT without FROM clause can't have WHERE or HAVING clause */
|
||||
DBUG_ASSERT(join->conds == 0 && join->having == 0);
|
||||
|
|
|
@ -121,6 +121,7 @@ public:
|
|||
*/
|
||||
virtual void reset_value_registration() {}
|
||||
enum_parsing_place place() { return parsing_place; }
|
||||
bool walk(Item_processor processor, bool walk_subquery, byte *arg);
|
||||
|
||||
friend class select_subselect;
|
||||
friend class Item_in_optimizer;
|
||||
|
|
|
@ -353,14 +353,15 @@ Item *Item_sum::get_tmp_table_item(THD *thd)
|
|||
}
|
||||
|
||||
|
||||
bool Item_sum::walk (Item_processor processor, byte *argument)
|
||||
bool Item_sum::walk (Item_processor processor, bool walk_subquery,
|
||||
byte *argument)
|
||||
{
|
||||
if (arg_count)
|
||||
{
|
||||
Item **arg,**arg_end;
|
||||
for (arg= args, arg_end= args+arg_count; arg != arg_end; arg++)
|
||||
{
|
||||
if ((*arg)->walk(processor, argument))
|
||||
if ((*arg)->walk(processor, walk_subquery, argument))
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
@ -734,7 +735,7 @@ static int simple_raw_key_cmp(void* arg, const void* key1, const void* key2)
|
|||
static int item_sum_distinct_walk(void *element, element_count num_of_dups,
|
||||
void *item)
|
||||
{
|
||||
return ((Item_sum_distinct*) (item))->unique_walk_function(element);
|
||||
return ((Item_sum_distinct*) (item))->unique_walk_function(element);
|
||||
}
|
||||
|
||||
C_MODE_END
|
||||
|
@ -2688,7 +2689,7 @@ longlong Item_sum_count_distinct::val_int()
|
|||
return (longlong) count;
|
||||
}
|
||||
table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
|
||||
return table->file->records;
|
||||
return table->file->stats.records;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -312,7 +312,7 @@ public:
|
|||
Item *get_tmp_table_item(THD *thd);
|
||||
virtual Field *create_tmp_field(bool group, TABLE *table,
|
||||
uint convert_blob_length);
|
||||
bool walk (Item_processor processor, byte *argument);
|
||||
bool walk(Item_processor processor, bool walk_subquery, byte *argument);
|
||||
bool init_sum_func_check(THD *thd);
|
||||
bool check_sum_func(THD *thd, Item **ref);
|
||||
bool register_sum_func(THD *thd, Item **ref);
|
||||
|
|
19
sql/key.cc
19
sql/key.cc
|
@ -301,14 +301,26 @@ bool key_cmp_if_same(TABLE *table,const byte *key,uint idx,uint key_length)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* unpack key-fields from record to some buffer */
|
||||
/* This is used to get a good error message */
|
||||
/*
|
||||
unpack key-fields from record to some buffer
|
||||
|
||||
SYNOPSIS
|
||||
key_unpack()
|
||||
to Store value here in an easy to read form
|
||||
table Table to use
|
||||
idx Key number
|
||||
|
||||
NOTES
|
||||
This is used mainly to get a good error message
|
||||
We temporary change the column bitmap so that all columns are readable.
|
||||
*/
|
||||
|
||||
void key_unpack(String *to,TABLE *table,uint idx)
|
||||
{
|
||||
KEY_PART_INFO *key_part,*key_part_end;
|
||||
Field *field;
|
||||
String tmp;
|
||||
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
|
||||
DBUG_ENTER("key_unpack");
|
||||
|
||||
to->length(0);
|
||||
|
@ -337,6 +349,7 @@ void key_unpack(String *to,TABLE *table,uint idx)
|
|||
else
|
||||
to->append(STRING_WITH_LEN("???"));
|
||||
}
|
||||
dbug_tmp_restore_column_map(table->read_set, old_map);
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
|
@ -373,7 +386,7 @@ bool check_if_key_used(TABLE *table, uint idx, List<Item> &fields)
|
|||
key is not updated
|
||||
*/
|
||||
if (idx != table->s->primary_key && table->s->primary_key < MAX_KEY &&
|
||||
(table->file->table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX))
|
||||
(table->file->ha_table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX))
|
||||
return check_if_key_used(table, table->s->primary_key, fields);
|
||||
return 0;
|
||||
}
|
||||
|
|
21
sql/log.cc
21
sql/log.cc
|
@ -167,8 +167,10 @@ bool Log_to_csv_event_handler::open_log_table(uint log_type)
|
|||
table->table->file->ha_rnd_init(0))
|
||||
error= TRUE;
|
||||
else
|
||||
{
|
||||
table->table->use_all_columns();
|
||||
table->table->locked_by_logger= TRUE;
|
||||
|
||||
}
|
||||
/* restore thread settings */
|
||||
if (curr)
|
||||
curr->store_globals();
|
||||
|
@ -1153,7 +1155,8 @@ static int binlog_rollback(THD *thd, bool all)
|
|||
table. Such cases should be rare (updating a
|
||||
non-transactional table inside a transaction...)
|
||||
*/
|
||||
if (unlikely(thd->options & OPTION_STATUS_NO_TRANS_UPDATE))
|
||||
if (unlikely(thd->options & (OPTION_STATUS_NO_TRANS_UPDATE |
|
||||
OPTION_KEEP_LOG)))
|
||||
{
|
||||
Query_log_event qev(thd, STRING_WITH_LEN("ROLLBACK"), TRUE, FALSE);
|
||||
qev.error_code= 0; // see comment in MYSQL_LOG::write(THD, IO_CACHE)
|
||||
|
@ -1214,7 +1217,8 @@ static int binlog_savepoint_rollback(THD *thd, void *sv)
|
|||
non-transactional table. Otherwise, truncate the binlog cache starting
|
||||
from the SAVEPOINT command.
|
||||
*/
|
||||
if (unlikely(thd->options & OPTION_STATUS_NO_TRANS_UPDATE))
|
||||
if (unlikely(thd->options &
|
||||
(OPTION_STATUS_NO_TRANS_UPDATE | OPTION_KEEP_LOG)))
|
||||
{
|
||||
int const error=
|
||||
thd->binlog_query(THD::STMT_QUERY_TYPE,
|
||||
|
@ -2641,8 +2645,9 @@ int THD::binlog_write_table_map(TABLE *table, bool is_trans)
|
|||
{
|
||||
int error;
|
||||
DBUG_ENTER("THD::binlog_write_table_map");
|
||||
DBUG_PRINT("enter", ("table: %p (%s: #%u)",
|
||||
table, table->s->table_name, table->s->table_map_id));
|
||||
DBUG_PRINT("enter", ("table: %0xlx (%s: #%u)",
|
||||
(long) table, table->s->table_name,
|
||||
table->s->table_map_id));
|
||||
|
||||
/* Pre-conditions */
|
||||
DBUG_ASSERT(current_stmt_binlog_row_based && mysql_bin_log.is_open());
|
||||
|
@ -2655,7 +2660,8 @@ int THD::binlog_write_table_map(TABLE *table, bool is_trans)
|
|||
the_event(this, table, table->s->table_map_id, is_trans, flags);
|
||||
|
||||
if (is_trans)
|
||||
trans_register_ha(this, options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN),
|
||||
trans_register_ha(this,
|
||||
(options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) != 0,
|
||||
&binlog_hton);
|
||||
|
||||
if ((error= mysql_bin_log.write(&the_event)))
|
||||
|
@ -2871,7 +2877,7 @@ bool MYSQL_LOG::write(Log_event *event_info)
|
|||
if (event_info->get_cache_stmt() && !trans_log_in_use)
|
||||
trans_register_ha(thd,
|
||||
(thd->options &
|
||||
(OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)),
|
||||
(OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) != 0,
|
||||
&binlog_hton);
|
||||
if (event_info->get_cache_stmt() || trans_log_in_use)
|
||||
{
|
||||
|
@ -4359,5 +4365,6 @@ mysql_declare_plugin(binlog)
|
|||
binlog_init, /* Plugin Init */
|
||||
NULL, /* Plugin Deinit */
|
||||
0x0100 /* 1.0 */,
|
||||
0
|
||||
}
|
||||
mysql_declare_plugin_end;
|
||||
|
|
|
@ -1744,21 +1744,22 @@ int Query_log_event::exec_event(struct st_relay_log_info* rli, const char *query
|
|||
{
|
||||
if (flags2_inited)
|
||||
/*
|
||||
all bits of thd->options which are 1 in OPTIONS_WRITTEN_TO_BIN_LOG must
|
||||
take their value from flags2.
|
||||
all bits of thd->options which are 1 in OPTIONS_WRITTEN_TO_BIN_LOG
|
||||
must take their value from flags2.
|
||||
*/
|
||||
thd->options= flags2|(thd->options & ~(ulong)OPTIONS_WRITTEN_TO_BIN_LOG);
|
||||
thd->options= flags2|(thd->options & ~OPTIONS_WRITTEN_TO_BIN_LOG);
|
||||
/*
|
||||
else, we are in a 3.23/4.0 binlog; we previously received a
|
||||
Rotate_log_event which reset thd->options and sql_mode etc, so nothing to do.
|
||||
Rotate_log_event which reset thd->options and sql_mode etc, so
|
||||
nothing to do.
|
||||
*/
|
||||
/*
|
||||
We do not replicate IGNORE_DIR_IN_CREATE. That is, if the master is a
|
||||
slave which runs with SQL_MODE=IGNORE_DIR_IN_CREATE, this should not
|
||||
force us to ignore the dir too. Imagine you are a ring of machines, and
|
||||
one has a disk problem so that you temporarily need IGNORE_DIR_IN_CREATE
|
||||
on this machine; you don't want it to propagate elsewhere (you don't want
|
||||
all slaves to start ignoring the dirs).
|
||||
one has a disk problem so that you temporarily need
|
||||
IGNORE_DIR_IN_CREATE on this machine; you don't want it to propagate
|
||||
elsewhere (you don't want all slaves to start ignoring the dirs).
|
||||
*/
|
||||
if (sql_mode_inited)
|
||||
thd->variables.sql_mode=
|
||||
|
@ -3264,8 +3265,8 @@ int Rotate_log_event::exec_event(struct st_relay_log_info* rli)
|
|||
rli->notify_group_master_log_name_update();
|
||||
rli->group_master_log_pos= pos;
|
||||
rli->group_relay_log_pos= rli->event_relay_log_pos;
|
||||
DBUG_PRINT("info", ("group_master_log_name: '%s' group_master_log_pos:\
|
||||
%lu",
|
||||
DBUG_PRINT("info", ("group_master_log_name: '%s' "
|
||||
"group_master_log_pos: %lu",
|
||||
rli->group_master_log_name,
|
||||
(ulong) rli->group_master_log_pos));
|
||||
/*
|
||||
|
@ -5200,8 +5201,9 @@ int Rows_log_event::do_add_row_data(byte *const row_data,
|
|||
log only the primary key value instead of the entire "before image". This
|
||||
would save binlog space. TODO
|
||||
*/
|
||||
DBUG_ENTER("Rows_log_event::do_add_row_data(byte *data, my_size_t length)");
|
||||
DBUG_PRINT("enter", ("row_data= %p, length= %lu", row_data, length));
|
||||
DBUG_ENTER("Rows_log_event::do_add_row_data");
|
||||
DBUG_PRINT("enter", ("row_data: 0x%lx length: %lu", (ulong) row_data,
|
||||
length));
|
||||
DBUG_DUMP("row_data", (const char*)row_data, min(length, 32));
|
||||
|
||||
DBUG_ASSERT(m_rows_buf <= m_rows_cur);
|
||||
|
@ -5256,7 +5258,7 @@ static char const *unpack_row(TABLE *table,
|
|||
{
|
||||
DBUG_ASSERT(record && row);
|
||||
|
||||
MY_BITMAP *write_set= table->file->write_set;
|
||||
MY_BITMAP *write_set= table->write_set;
|
||||
my_size_t const n_null_bytes= table->s->null_bytes;
|
||||
my_ptrdiff_t const offset= record - (byte*) table->record[0];
|
||||
|
||||
|
@ -5269,13 +5271,13 @@ static char const *unpack_row(TABLE *table,
|
|||
{
|
||||
Field *const f= *field_ptr;
|
||||
|
||||
if (bitmap_is_set(cols, field_ptr - begin_ptr))
|
||||
if (bitmap_is_set(cols, (uint) (field_ptr - begin_ptr)))
|
||||
{
|
||||
/* Field...::unpack() cannot return 0 */
|
||||
ptr= f->unpack(f->ptr + offset, ptr);
|
||||
}
|
||||
else
|
||||
bitmap_clear_bit(write_set, (field_ptr - begin_ptr) + 1);
|
||||
bitmap_clear_bit(write_set, (uint) (field_ptr - begin_ptr));
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
|
@ -5431,7 +5433,8 @@ int Rows_log_event::exec_event(st_relay_log_info *rli)
|
|||
DBUG_ASSERT(sizeof(thd->options) == sizeof(OPTION_RELAXED_UNIQUE_CHECKS));
|
||||
|
||||
error= do_before_row_operations(table);
|
||||
while (error == 0 && row_start < (const char*)m_rows_end) {
|
||||
while (error == 0 && row_start < (const char*) m_rows_end)
|
||||
{
|
||||
char const *row_end= do_prepare_row(thd, table, row_start);
|
||||
DBUG_ASSERT(row_end != NULL); // cannot happen
|
||||
DBUG_ASSERT(row_end <= (const char*)m_rows_end);
|
||||
|
@ -5466,8 +5469,10 @@ int Rows_log_event::exec_event(st_relay_log_info *rli)
|
|||
rli->abort_slave=1;);
|
||||
error= do_after_row_operations(table, error);
|
||||
if (!cache_stmt)
|
||||
thd->options|= OPTION_STATUS_NO_TRANS_UPDATE;
|
||||
|
||||
{
|
||||
DBUG_PRINT("info", ("Marked that we need to keep log"));
|
||||
thd->options|= OPTION_KEEP_LOG;
|
||||
}
|
||||
}
|
||||
|
||||
if (error)
|
||||
|
@ -6255,9 +6260,9 @@ replace_record(THD *thd, TABLE *table)
|
|||
- use index_read_idx() with the key that is duplicated, to
|
||||
retrieve the offending row.
|
||||
*/
|
||||
if (table->file->table_flags() & HA_DUPP_POS)
|
||||
if (table->file->ha_table_flags() & HA_DUPLICATE_POS)
|
||||
{
|
||||
error= table->file->rnd_pos(table->record[1], table->file->dupp_ref);
|
||||
error= table->file->rnd_pos(table->record[1], table->file->dup_ref);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
@ -6374,16 +6379,17 @@ static bool record_compare(TABLE *table)
|
|||
to find (and fetch) the row. If the engine allows random access of the
|
||||
records, a combination of position() and rnd_pos() will be used.
|
||||
*/
|
||||
|
||||
static int find_and_fetch_row(TABLE *table, byte *key)
|
||||
{
|
||||
DBUG_ENTER("find_and_fetch_row(TABLE *table, byte *key, byte *record)");
|
||||
DBUG_PRINT("enter", ("table=%p, key=%p, record=%p",
|
||||
table, key, table->record[1]));
|
||||
DBUG_PRINT("enter", ("table: 0x%lx, key: 0x%lx record: 0x%lx",
|
||||
(long) table, (long) key, (long) table->record[1]));
|
||||
|
||||
DBUG_ASSERT(table->in_use != NULL);
|
||||
|
||||
if ((table->file->table_flags() & HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS)
|
||||
&& table->s->primary_key < MAX_KEY)
|
||||
if ((table->file->ha_table_flags() & HA_PRIMARY_KEY_REQUIRED_FOR_POSITION) &&
|
||||
table->s->primary_key < MAX_KEY)
|
||||
{
|
||||
/*
|
||||
Use a more efficient method to fetch the record given by
|
||||
|
@ -6399,15 +6405,15 @@ static int find_and_fetch_row(TABLE *table, byte *key)
|
|||
DBUG_ASSERT(table->record[1]);
|
||||
|
||||
/* We need to retrieve all fields */
|
||||
table->file->ha_set_all_bits_in_read_set();
|
||||
/* TODO: Move this out from this function to main loop */
|
||||
table->use_all_columns();
|
||||
|
||||
if (table->s->keys > 0)
|
||||
{
|
||||
int error;
|
||||
/* We have a key: search the table using the index */
|
||||
if (!table->file->inited)
|
||||
if ((error= table->file->ha_index_init(0, FALSE)))
|
||||
return error;
|
||||
if (!table->file->inited && (error= table->file->ha_index_init(0, FALSE)))
|
||||
return error;
|
||||
|
||||
/*
|
||||
We need to set the null bytes to ensure that the filler bit are
|
||||
|
@ -6440,7 +6446,7 @@ static int find_and_fetch_row(TABLE *table, byte *key)
|
|||
comparison of non-PK columns to decide if the correct record is
|
||||
found. I can see no scenario where it would be incorrect to
|
||||
chose the row to change only using a PK or an UNNI.
|
||||
*/
|
||||
*/
|
||||
if (table->key_info->flags & HA_NOSAME)
|
||||
{
|
||||
table->file->ha_index_end();
|
||||
|
@ -6564,7 +6570,7 @@ int Delete_rows_log_event::do_before_row_operations(TABLE *table)
|
|||
{
|
||||
DBUG_ASSERT(m_memory == NULL);
|
||||
|
||||
if ((table->file->table_flags() & HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS) &&
|
||||
if ((table->file->ha_table_flags() & HA_PRIMARY_KEY_REQUIRED_FOR_POSITION) &&
|
||||
table->s->primary_key < MAX_KEY)
|
||||
{
|
||||
/*
|
||||
|
@ -6638,19 +6644,18 @@ char const *Delete_rows_log_event::do_prepare_row(THD *thd, TABLE *table,
|
|||
|
||||
int Delete_rows_log_event::do_exec_row(TABLE *table)
|
||||
{
|
||||
int error;
|
||||
DBUG_ASSERT(table != NULL);
|
||||
|
||||
int error= find_and_fetch_row(table, m_key);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/*
|
||||
Now we should have the right row to delete. We are using
|
||||
record[0] since it is guaranteed to point to a record with the
|
||||
correct value.
|
||||
*/
|
||||
error= table->file->ha_delete_row(table->record[0]);
|
||||
|
||||
if (!(error= find_and_fetch_row(table, m_key)))
|
||||
{
|
||||
/*
|
||||
Now we should have the right row to delete. We are using
|
||||
record[0] since it is guaranteed to point to a record with the
|
||||
correct value.
|
||||
*/
|
||||
error= table->file->ha_delete_row(table->record[0]);
|
||||
}
|
||||
return error;
|
||||
}
|
||||
|
||||
|
|
|
@ -279,9 +279,9 @@ extern CHARSET_INFO *national_charset_info, *table_alias_charset;
|
|||
#define OPTION_BEGIN (LL(1) << 20) // THD, intern
|
||||
#define OPTION_TABLE_LOCK (LL(1) << 21) // THD, intern
|
||||
#define OPTION_QUICK (LL(1) << 22) // SELECT (for DELETE)
|
||||
#define OPTION_KEEP_LOG (LL(1) << 23) // Keep binlog on rollback
|
||||
|
||||
/* Thr following is used to detect a conflict with DISTINCT
|
||||
in the user query has requested */
|
||||
/* The following is used to detect a conflict with DISTINCT */
|
||||
#define SELECT_ALL (LL(1) << 24) // SELECT, user, parser
|
||||
|
||||
/* Set if we are updating a non-transaction safe table */
|
||||
|
@ -1106,27 +1106,28 @@ bool insert_fields(THD *thd, Name_resolution_context *context,
|
|||
List_iterator<Item> *it, bool any_privileges);
|
||||
bool setup_tables(THD *thd, Name_resolution_context *context,
|
||||
List<TABLE_LIST> *from_clause, TABLE_LIST *tables,
|
||||
Item **conds, TABLE_LIST **leaves, bool select_insert);
|
||||
bool setup_tables_and_check_access (THD *thd,
|
||||
Name_resolution_context *context,
|
||||
List<TABLE_LIST> *from_clause,
|
||||
TABLE_LIST *tables, Item **conds,
|
||||
TABLE_LIST **leaves,
|
||||
bool select_insert,
|
||||
ulong want_access);
|
||||
TABLE_LIST **leaves, bool select_insert);
|
||||
bool setup_tables_and_check_access(THD *thd,
|
||||
Name_resolution_context *context,
|
||||
List<TABLE_LIST> *from_clause,
|
||||
TABLE_LIST *tables,
|
||||
TABLE_LIST **leaves,
|
||||
bool select_insert,
|
||||
ulong want_access);
|
||||
int setup_wild(THD *thd, TABLE_LIST *tables, List<Item> &fields,
|
||||
List<Item> *sum_func_list, uint wild_num);
|
||||
bool setup_fields(THD *thd, Item** ref_pointer_array,
|
||||
List<Item> &item, ulong set_query_id,
|
||||
List<Item> &item, enum_mark_columns mark_used_columns,
|
||||
List<Item> *sum_func_list, bool allow_sum_func);
|
||||
inline bool setup_fields_with_no_wrap(THD *thd, Item **ref_pointer_array,
|
||||
List<Item> &item, ulong set_query_id,
|
||||
List<Item> *sum_func_list,
|
||||
bool allow_sum_func)
|
||||
List<Item> &item,
|
||||
enum_mark_columns mark_used_columns,
|
||||
List<Item> *sum_func_list,
|
||||
bool allow_sum_func)
|
||||
{
|
||||
bool res;
|
||||
thd->lex->select_lex.no_wrap_view_item= TRUE;
|
||||
res= setup_fields(thd, ref_pointer_array, item, set_query_id, sum_func_list,
|
||||
res= setup_fields(thd, ref_pointer_array, item, mark_used_columns, sum_func_list,
|
||||
allow_sum_func);
|
||||
thd->lex->select_lex.no_wrap_view_item= FALSE;
|
||||
return res;
|
||||
|
@ -1774,7 +1775,8 @@ void init_read_record_idx(READ_RECORD *info, THD *thd, TABLE *table,
|
|||
void end_read_record(READ_RECORD *info);
|
||||
ha_rows filesort(THD *thd, TABLE *form,struct st_sort_field *sortorder,
|
||||
uint s_length, SQL_SELECT *select,
|
||||
ha_rows max_rows, ha_rows *examined_rows);
|
||||
ha_rows max_rows, bool sort_positions,
|
||||
ha_rows *examined_rows);
|
||||
void filesort_free_buffers(TABLE *table);
|
||||
void change_double_for_sort(double nr,byte *to);
|
||||
double my_double_round(double value, int dec, bool truncate);
|
||||
|
|
|
@ -2468,10 +2468,12 @@ static int my_message_sql(uint error, const char *str, myf MyFlags)
|
|||
if (thd->lex->current_select &&
|
||||
thd->lex->current_select->no_error && !thd->is_fatal_error)
|
||||
{
|
||||
DBUG_PRINT("error", ("Error converted to warning: current_select: no_error %d fatal_error: %d",
|
||||
(thd->lex->current_select ?
|
||||
thd->lex->current_select->no_error : 0),
|
||||
(int) thd->is_fatal_error));
|
||||
DBUG_PRINT("error",
|
||||
("Error converted to warning: current_select: no_error %d "
|
||||
"fatal_error: %d",
|
||||
(thd->lex->current_select ?
|
||||
thd->lex->current_select->no_error : 0),
|
||||
(int) thd->is_fatal_error));
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -5691,7 +5693,6 @@ log and this option does nothing anymore.",
|
|||
"The buffer that is allocated to cache index and rows for BDB tables.",
|
||||
(gptr*) &berkeley_cache_size, (gptr*) &berkeley_cache_size, 0, GET_ULL,
|
||||
REQUIRED_ARG, KEY_CACHE_SIZE, 20*1024, (ulonglong) ~0, 0, IO_SIZE, 0},
|
||||
/* QQ: The following should be removed soon! (bdb_max_lock preferred) */
|
||||
{"bdb_lock_max", OPT_BDB_MAX_LOCK, "Synonym for bdb_max_lock.",
|
||||
(gptr*) &berkeley_max_lock, (gptr*) &berkeley_max_lock, 0, GET_ULONG,
|
||||
REQUIRED_ARG, 10000, 0, (long) ~0, 0, 1, 0},
|
||||
|
|
348
sql/opt_range.cc
348
sql/opt_range.cc
|
@ -822,6 +822,10 @@ QUICK_RANGE_SELECT::QUICK_RANGE_SELECT(THD *thd, TABLE *table, uint key_nr,
|
|||
bool no_alloc, MEM_ROOT *parent_alloc)
|
||||
:dont_free(0),error(0),free_file(0),in_range(0),cur_range(NULL),range(0)
|
||||
{
|
||||
my_bitmap_map *bitmap;
|
||||
DBUG_ENTER("QUICK_RANGE_SELECT::QUICK_RANGE_SELECT");
|
||||
|
||||
in_ror_merged_scan= 0;
|
||||
sorted= 0;
|
||||
index= key_nr;
|
||||
head= table;
|
||||
|
@ -845,6 +849,19 @@ QUICK_RANGE_SELECT::QUICK_RANGE_SELECT(THD *thd, TABLE *table, uint key_nr,
|
|||
bzero((char*) &alloc,sizeof(alloc));
|
||||
file= head->file;
|
||||
record= head->record[0];
|
||||
save_read_set= head->read_set;
|
||||
save_write_set= head->write_set;
|
||||
|
||||
/* Allocate a bitmap for used columns */
|
||||
if (!(bitmap= (my_bitmap_map*) my_malloc(head->s->column_bitmap_size,
|
||||
MYF(MY_WME))))
|
||||
{
|
||||
column_bitmap.bitmap= 0;
|
||||
error= 1;
|
||||
}
|
||||
else
|
||||
bitmap_init(&column_bitmap, bitmap, head->s->fields, FALSE);
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
|
||||
|
@ -874,24 +891,26 @@ QUICK_RANGE_SELECT::~QUICK_RANGE_SELECT()
|
|||
if (file)
|
||||
{
|
||||
range_end();
|
||||
file->extra(HA_EXTRA_NO_KEYREAD);
|
||||
if (free_file)
|
||||
{
|
||||
DBUG_PRINT("info", ("Freeing separate handler %p (free=%d)", file,
|
||||
free_file));
|
||||
file->ha_reset();
|
||||
file->ha_external_lock(current_thd, F_UNLCK);
|
||||
file->close();
|
||||
delete file;
|
||||
}
|
||||
else
|
||||
{
|
||||
file->extra(HA_EXTRA_NO_KEYREAD);
|
||||
}
|
||||
}
|
||||
delete_dynamic(&ranges); /* ranges are allocated in alloc */
|
||||
free_root(&alloc,MYF(0));
|
||||
my_free((char*) column_bitmap.bitmap, MYF(MY_ALLOW_ZERO_PTR));
|
||||
}
|
||||
if (multi_range)
|
||||
my_free((char*) multi_range, MYF(0));
|
||||
if (multi_range_buff)
|
||||
my_free((char*) multi_range_buff, MYF(0));
|
||||
head->column_bitmaps_set(save_read_set, save_write_set);
|
||||
x_free(multi_range);
|
||||
x_free(multi_range_buff);
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
|
@ -1011,20 +1030,21 @@ int QUICK_ROR_INTERSECT_SELECT::init()
|
|||
|
||||
int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler)
|
||||
{
|
||||
handler *save_file= file;
|
||||
handler *save_file= file, *org_file;
|
||||
THD *thd;
|
||||
MY_BITMAP *bitmap;
|
||||
DBUG_ENTER("QUICK_RANGE_SELECT::init_ror_merged_scan");
|
||||
|
||||
in_ror_merged_scan= 1;
|
||||
if (reuse_handler)
|
||||
{
|
||||
DBUG_PRINT("info", ("Reusing handler %p", file));
|
||||
if (file->extra(HA_EXTRA_KEYREAD) ||
|
||||
file->ha_retrieve_all_pk() ||
|
||||
init() || reset())
|
||||
DBUG_PRINT("info", ("Reusing handler 0x%lx", (long) file));
|
||||
if (init() || reset())
|
||||
{
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
DBUG_RETURN(0);
|
||||
head->column_bitmaps_set(&column_bitmap, &column_bitmap);
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* Create a separate handler object for this quick select */
|
||||
|
@ -1037,19 +1057,20 @@ int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler)
|
|||
thd= head->in_use;
|
||||
if (!(file= get_new_handler(head->s, thd->mem_root, head->s->db_type)))
|
||||
goto failure;
|
||||
DBUG_PRINT("info", ("Allocated new handler %p", file));
|
||||
DBUG_PRINT("info", ("Allocated new handler 0x%lx", (long) file));
|
||||
if (file->ha_open(head, head->s->normalized_path.str, head->db_stat,
|
||||
HA_OPEN_IGNORE_IF_LOCKED))
|
||||
{
|
||||
/* Caller will free the memory */
|
||||
goto failure;
|
||||
}
|
||||
|
||||
head->column_bitmaps_set(&column_bitmap, &column_bitmap);
|
||||
|
||||
if (file->ha_external_lock(thd, F_RDLCK))
|
||||
goto failure;
|
||||
|
||||
if (file->extra(HA_EXTRA_KEYREAD) ||
|
||||
file->ha_retrieve_all_pk() ||
|
||||
init() || reset())
|
||||
if (init() || reset())
|
||||
{
|
||||
file->ha_external_lock(thd, F_UNLCK);
|
||||
file->close();
|
||||
|
@ -1057,11 +1078,28 @@ int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler)
|
|||
}
|
||||
free_file= TRUE;
|
||||
last_rowid= file->ref;
|
||||
|
||||
end:
|
||||
/*
|
||||
We are only going to read key fields and call position() on 'file'
|
||||
The following sets head->tmp_set to only use this key and then updates
|
||||
head->read_set and head->write_set to use this bitmap.
|
||||
The now bitmap is stored in 'column_bitmap' which is used in ::get_next()
|
||||
*/
|
||||
org_file= head->file;
|
||||
head->file= file;
|
||||
/* We don't have to set 'head->keyread' here as the 'file' is unique */
|
||||
head->mark_columns_used_by_index(index);
|
||||
head->prepare_for_position();
|
||||
head->file= org_file;
|
||||
bitmap_copy(&column_bitmap, head->read_set);
|
||||
head->column_bitmaps_set(&column_bitmap, &column_bitmap);
|
||||
|
||||
DBUG_RETURN(0);
|
||||
|
||||
failure:
|
||||
if (file)
|
||||
delete file;
|
||||
head->column_bitmaps_set(save_read_set, save_write_set);
|
||||
delete file;
|
||||
file= save_file;
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
|
@ -1766,32 +1804,26 @@ public:
|
|||
static int fill_used_fields_bitmap(PARAM *param)
|
||||
{
|
||||
TABLE *table= param->table;
|
||||
param->fields_bitmap_size= bitmap_buffer_size(table->s->fields+1);
|
||||
uint32 *tmp;
|
||||
my_bitmap_map *tmp;
|
||||
uint pk;
|
||||
if (!(tmp= (uint32*) alloc_root(param->mem_root,param->fields_bitmap_size)) ||
|
||||
bitmap_init(¶m->needed_fields, tmp, param->fields_bitmap_size*8,
|
||||
FALSE))
|
||||
param->fields_bitmap_size= table->s->column_bitmap_size;
|
||||
if (!(tmp= (my_bitmap_map*) alloc_root(param->mem_root,
|
||||
param->fields_bitmap_size)) ||
|
||||
bitmap_init(¶m->needed_fields, tmp, table->s->fields, FALSE))
|
||||
return 1;
|
||||
|
||||
bitmap_clear_all(¶m->needed_fields);
|
||||
for (uint i= 0; i < table->s->fields; i++)
|
||||
{
|
||||
if (param->thd->query_id == table->field[i]->query_id)
|
||||
bitmap_set_bit(¶m->needed_fields, i+1);
|
||||
}
|
||||
bitmap_copy(¶m->needed_fields, table->read_set);
|
||||
bitmap_union(¶m->needed_fields, table->write_set);
|
||||
|
||||
pk= param->table->s->primary_key;
|
||||
if (param->table->file->primary_key_is_clustered() && pk != MAX_KEY)
|
||||
if (pk != MAX_KEY && param->table->file->primary_key_is_clustered())
|
||||
{
|
||||
/* The table uses clustered PK and it is not internally generated */
|
||||
KEY_PART_INFO *key_part= param->table->key_info[pk].key_part;
|
||||
KEY_PART_INFO *key_part_end= key_part +
|
||||
param->table->key_info[pk].key_parts;
|
||||
for (;key_part != key_part_end; ++key_part)
|
||||
{
|
||||
bitmap_clear_bit(¶m->needed_fields, key_part->fieldnr);
|
||||
}
|
||||
bitmap_clear_bit(¶m->needed_fields, key_part->fieldnr-1);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -1843,7 +1875,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
|
|||
DBUG_PRINT("enter",("keys_to_use: %lu prev_tables: %lu const_tables: %lu",
|
||||
keys_to_use.to_ulonglong(), (ulong) prev_tables,
|
||||
(ulong) const_tables));
|
||||
DBUG_PRINT("info", ("records=%lu", (ulong)head->file->records));
|
||||
DBUG_PRINT("info", ("records: %lu", (ulong) head->file->stats.records));
|
||||
delete quick;
|
||||
quick=0;
|
||||
needed_reg.clear_all();
|
||||
|
@ -1853,7 +1885,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
|
|||
DBUG_RETURN(0); /* purecov: inspected */
|
||||
if (keys_to_use.is_clear_all())
|
||||
DBUG_RETURN(0);
|
||||
records= head->file->records;
|
||||
records= head->file->stats.records;
|
||||
if (!records)
|
||||
records++; /* purecov: inspected */
|
||||
scan_time= (double) records / TIME_FOR_COMPARE + 1;
|
||||
|
@ -1878,7 +1910,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
|
|||
|
||||
/* set up parameter that is passed to all functions */
|
||||
param.thd= thd;
|
||||
param.baseflag=head->file->table_flags();
|
||||
param.baseflag=head->file->ha_table_flags();
|
||||
param.prev_tables=prev_tables | const_tables;
|
||||
param.read_tables=read_tables;
|
||||
param.current_table= head->map;
|
||||
|
@ -2296,6 +2328,7 @@ bool prune_partitions(THD *thd, TABLE *table, Item *pprune_cond)
|
|||
PART_PRUNE_PARAM prune_param;
|
||||
MEM_ROOT alloc;
|
||||
RANGE_OPT_PARAM *range_par= &prune_param.range_param;
|
||||
my_bitmap_map *old_read_set, *old_write_set;
|
||||
|
||||
prune_param.part_info= part_info;
|
||||
init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0);
|
||||
|
@ -2309,6 +2342,8 @@ bool prune_partitions(THD *thd, TABLE *table, Item *pprune_cond)
|
|||
DBUG_RETURN(FALSE);
|
||||
}
|
||||
|
||||
old_write_set= dbug_tmp_use_all_columns(table, table->write_set);
|
||||
old_read_set= dbug_tmp_use_all_columns(table, table->read_set);
|
||||
range_par->thd= thd;
|
||||
range_par->table= table;
|
||||
/* range_par->cond doesn't need initialization */
|
||||
|
@ -2398,6 +2433,8 @@ all_used:
|
|||
retval= FALSE; // some partitions are used
|
||||
mark_all_partitions_as_used(prune_param.part_info);
|
||||
end:
|
||||
dbug_tmp_restore_column_map(table->write_set, old_write_set);
|
||||
dbug_tmp_restore_column_map(table->read_set, old_read_set);
|
||||
thd->no_errors=0;
|
||||
thd->mem_root= range_par->old_root;
|
||||
free_root(&alloc,MYF(0)); // Return memory & allocator
|
||||
|
@ -2424,6 +2461,8 @@ end:
|
|||
void store_key_image_to_rec(Field *field, char *ptr, uint len)
|
||||
{
|
||||
/* Do the same as print_key() does */
|
||||
my_bitmap_map *old_map;
|
||||
|
||||
if (field->real_maybe_null())
|
||||
{
|
||||
if (*ptr)
|
||||
|
@ -2434,7 +2473,10 @@ void store_key_image_to_rec(Field *field, char *ptr, uint len)
|
|||
field->set_notnull();
|
||||
ptr++;
|
||||
}
|
||||
old_map= dbug_tmp_use_all_columns(field->table,
|
||||
field->table->write_set);
|
||||
field->set_key_image(ptr, len);
|
||||
dbug_tmp_restore_column_map(field->table->write_set, old_map);
|
||||
}
|
||||
|
||||
|
||||
|
@ -2514,11 +2556,11 @@ static int find_used_partitions_imerge_list(PART_PRUNE_PARAM *ppar,
|
|||
{
|
||||
MY_BITMAP all_merges;
|
||||
uint bitmap_bytes;
|
||||
uint32 *bitmap_buf;
|
||||
my_bitmap_map *bitmap_buf;
|
||||
uint n_bits= ppar->part_info->used_partitions.n_bits;
|
||||
bitmap_bytes= bitmap_buffer_size(n_bits);
|
||||
if (!(bitmap_buf= (uint32*)alloc_root(ppar->range_param.mem_root,
|
||||
bitmap_bytes)))
|
||||
if (!(bitmap_buf= (my_bitmap_map*) alloc_root(ppar->range_param.mem_root,
|
||||
bitmap_bytes)))
|
||||
{
|
||||
/*
|
||||
Fallback, process just the first SEL_IMERGE. This can leave us with more
|
||||
|
@ -2764,7 +2806,8 @@ int find_used_partitions(PART_PRUNE_PARAM *ppar, SEL_ARG *key_tree)
|
|||
|
||||
uint32 subpart_id;
|
||||
bitmap_clear_all(&ppar->subparts_bitmap);
|
||||
while ((subpart_id= subpart_iter.get_next(&subpart_iter)) != NOT_A_PARTITION_ID)
|
||||
while ((subpart_id= subpart_iter.get_next(&subpart_iter)) !=
|
||||
NOT_A_PARTITION_ID)
|
||||
bitmap_set_bit(&ppar->subparts_bitmap, subpart_id);
|
||||
|
||||
/* Mark each partition as used in each subpartition. */
|
||||
|
@ -2870,7 +2913,8 @@ process_next_key_part:
|
|||
/* Got "full range" for subpartitioning fields */
|
||||
uint32 part_id;
|
||||
bool found= FALSE;
|
||||
while ((part_id= ppar->part_iter.get_next(&ppar->part_iter)) != NOT_A_PARTITION_ID)
|
||||
while ((part_id= ppar->part_iter.get_next(&ppar->part_iter)) !=
|
||||
NOT_A_PARTITION_ID)
|
||||
{
|
||||
ppar->mark_full_partition_used(ppar->part_info, part_id);
|
||||
found= TRUE;
|
||||
|
@ -3017,11 +3061,12 @@ static bool create_partition_index_description(PART_PRUNE_PARAM *ppar)
|
|||
|
||||
if (ppar->subpart_fields)
|
||||
{
|
||||
uint32 *buf;
|
||||
my_bitmap_map *buf;
|
||||
uint32 bufsize= bitmap_buffer_size(ppar->part_info->no_subparts);
|
||||
if (!(buf= (uint32*)alloc_root(alloc, bufsize)))
|
||||
if (!(buf= (my_bitmap_map*) alloc_root(alloc, bufsize)))
|
||||
return TRUE;
|
||||
bitmap_init(&ppar->subparts_bitmap, buf, ppar->part_info->no_subparts, FALSE);
|
||||
bitmap_init(&ppar->subparts_bitmap, buf, ppar->part_info->no_subparts,
|
||||
FALSE);
|
||||
}
|
||||
range_par->key_parts= key_part;
|
||||
Field **field= (ppar->part_fields)? part_info->part_field_array :
|
||||
|
@ -3188,7 +3233,8 @@ double get_sweep_read_cost(const PARAM *param, ha_rows records)
|
|||
else
|
||||
{
|
||||
double n_blocks=
|
||||
ceil(ulonglong2double(param->table->file->data_file_length) / IO_SIZE);
|
||||
ceil(ulonglong2double(param->table->file->stats.data_file_length) /
|
||||
IO_SIZE);
|
||||
double busy_blocks=
|
||||
n_blocks * (1.0 - pow(1.0 - 1.0/n_blocks, rows2double(records)));
|
||||
if (busy_blocks < 1.0)
|
||||
|
@ -3357,7 +3403,7 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
|
|||
|
||||
DBUG_PRINT("info", ("index_merge scans cost=%g", imerge_cost));
|
||||
if (imerge_too_expensive || (imerge_cost > read_time) ||
|
||||
(non_cpk_scan_records+cpk_scan_records >= param->table->file->records) &&
|
||||
(non_cpk_scan_records+cpk_scan_records >= param->table->file->stats.records) &&
|
||||
read_time != DBL_MAX)
|
||||
{
|
||||
/*
|
||||
|
@ -3415,7 +3461,7 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
|
|||
imerge_trp->read_cost= imerge_cost;
|
||||
imerge_trp->records= non_cpk_scan_records + cpk_scan_records;
|
||||
imerge_trp->records= min(imerge_trp->records,
|
||||
param->table->file->records);
|
||||
param->table->file->stats.records);
|
||||
imerge_trp->range_scans= range_scans;
|
||||
imerge_trp->range_scans_end= range_scans + n_child_scans;
|
||||
read_time= imerge_cost;
|
||||
|
@ -3476,7 +3522,7 @@ skip_to_ror_scan:
|
|||
((TRP_ROR_INTERSECT*)(*cur_roru_plan))->index_scan_costs;
|
||||
roru_total_records += (*cur_roru_plan)->records;
|
||||
roru_intersect_part *= (*cur_roru_plan)->records /
|
||||
param->table->file->records;
|
||||
param->table->file->stats.records;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -3486,7 +3532,7 @@ skip_to_ror_scan:
|
|||
in disjunction do not share key parts.
|
||||
*/
|
||||
roru_total_records -= (ha_rows)(roru_intersect_part*
|
||||
param->table->file->records);
|
||||
param->table->file->stats.records);
|
||||
/* ok, got a ROR read plan for each of the disjuncts
|
||||
Calculate cost:
|
||||
cost(index_union_scan(scan_1, ... scan_n)) =
|
||||
|
@ -3547,7 +3593,7 @@ static double get_index_only_read_time(const PARAM* param, ha_rows records,
|
|||
int keynr)
|
||||
{
|
||||
double read_time;
|
||||
uint keys_per_block= (param->table->file->block_size/2/
|
||||
uint keys_per_block= (param->table->file->stats.block_size/2/
|
||||
(param->table->key_info[keynr].key_length+
|
||||
param->table->file->ref_length) + 1);
|
||||
read_time=((double) (records+keys_per_block-1)/
|
||||
|
@ -3599,7 +3645,7 @@ static
|
|||
ROR_SCAN_INFO *make_ror_scan(const PARAM *param, int idx, SEL_ARG *sel_arg)
|
||||
{
|
||||
ROR_SCAN_INFO *ror_scan;
|
||||
uint32 *bitmap_buf;
|
||||
my_bitmap_map *bitmap_buf;
|
||||
uint keynr;
|
||||
DBUG_ENTER("make_ror_scan");
|
||||
|
||||
|
@ -3614,12 +3660,12 @@ ROR_SCAN_INFO *make_ror_scan(const PARAM *param, int idx, SEL_ARG *sel_arg)
|
|||
ror_scan->sel_arg= sel_arg;
|
||||
ror_scan->records= param->table->quick_rows[keynr];
|
||||
|
||||
if (!(bitmap_buf= (uint32*)alloc_root(param->mem_root,
|
||||
param->fields_bitmap_size)))
|
||||
if (!(bitmap_buf= (my_bitmap_map*) alloc_root(param->mem_root,
|
||||
param->fields_bitmap_size)))
|
||||
DBUG_RETURN(NULL);
|
||||
|
||||
if (bitmap_init(&ror_scan->covered_fields, bitmap_buf,
|
||||
param->fields_bitmap_size*8, FALSE))
|
||||
param->table->s->fields, FALSE))
|
||||
DBUG_RETURN(NULL);
|
||||
bitmap_clear_all(&ror_scan->covered_fields);
|
||||
|
||||
|
@ -3628,8 +3674,8 @@ ROR_SCAN_INFO *make_ror_scan(const PARAM *param, int idx, SEL_ARG *sel_arg)
|
|||
param->table->key_info[keynr].key_parts;
|
||||
for (;key_part != key_part_end; ++key_part)
|
||||
{
|
||||
if (bitmap_is_set(¶m->needed_fields, key_part->fieldnr))
|
||||
bitmap_set_bit(&ror_scan->covered_fields, key_part->fieldnr);
|
||||
if (bitmap_is_set(¶m->needed_fields, key_part->fieldnr-1))
|
||||
bitmap_set_bit(&ror_scan->covered_fields, key_part->fieldnr-1);
|
||||
}
|
||||
ror_scan->index_read_cost=
|
||||
get_index_only_read_time(param, param->table->quick_rows[ror_scan->keynr],
|
||||
|
@ -3729,21 +3775,21 @@ static
|
|||
ROR_INTERSECT_INFO* ror_intersect_init(const PARAM *param)
|
||||
{
|
||||
ROR_INTERSECT_INFO *info;
|
||||
uint32* buf;
|
||||
my_bitmap_map* buf;
|
||||
if (!(info= (ROR_INTERSECT_INFO*)alloc_root(param->mem_root,
|
||||
sizeof(ROR_INTERSECT_INFO))))
|
||||
return NULL;
|
||||
info->param= param;
|
||||
if (!(buf= (uint32*)alloc_root(param->mem_root,
|
||||
param->fields_bitmap_size)))
|
||||
if (!(buf= (my_bitmap_map*) alloc_root(param->mem_root,
|
||||
param->fields_bitmap_size)))
|
||||
return NULL;
|
||||
if (bitmap_init(&info->covered_fields, buf, param->fields_bitmap_size*8,
|
||||
if (bitmap_init(&info->covered_fields, buf, param->table->s->fields,
|
||||
FALSE))
|
||||
return NULL;
|
||||
info->is_covering= FALSE;
|
||||
info->index_scan_costs= 0.0;
|
||||
info->index_records= 0;
|
||||
info->out_rows= param->table->file->records;
|
||||
info->out_rows= param->table->file->stats.records;
|
||||
bitmap_clear_all(&info->covered_fields);
|
||||
return info;
|
||||
}
|
||||
|
@ -3862,14 +3908,14 @@ static double ror_scan_selectivity(const ROR_INTERSECT_INFO *info,
|
|||
SEL_ARG *sel_arg, *tuple_arg= NULL;
|
||||
bool cur_covered;
|
||||
bool prev_covered= test(bitmap_is_set(&info->covered_fields,
|
||||
key_part->fieldnr));
|
||||
key_part->fieldnr-1));
|
||||
key_range min_range;
|
||||
key_range max_range;
|
||||
min_range.key= (byte*) key_val;
|
||||
min_range.flag= HA_READ_KEY_EXACT;
|
||||
max_range.key= (byte*) key_val;
|
||||
max_range.flag= HA_READ_AFTER_KEY;
|
||||
ha_rows prev_records= info->param->table->file->records;
|
||||
ha_rows prev_records= info->param->table->file->stats.records;
|
||||
DBUG_ENTER("ror_intersect_selectivity");
|
||||
|
||||
for (sel_arg= scan->sel_arg; sel_arg;
|
||||
|
@ -3877,7 +3923,7 @@ static double ror_scan_selectivity(const ROR_INTERSECT_INFO *info,
|
|||
{
|
||||
DBUG_PRINT("info",("sel_arg step"));
|
||||
cur_covered= test(bitmap_is_set(&info->covered_fields,
|
||||
key_part[sel_arg->part].fieldnr));
|
||||
key_part[sel_arg->part].fieldnr-1));
|
||||
if (cur_covered != prev_covered)
|
||||
{
|
||||
/* create (part1val, ..., part{n-1}val) tuple. */
|
||||
|
@ -4006,15 +4052,15 @@ static bool ror_intersect_add(ROR_INTERSECT_INFO *info,
|
|||
}
|
||||
|
||||
info->total_cost= info->index_scan_costs;
|
||||
DBUG_PRINT("info", ("info->total_cost= %g", info->total_cost));
|
||||
DBUG_PRINT("info", ("info->total_cost: %g", info->total_cost));
|
||||
if (!info->is_covering)
|
||||
{
|
||||
info->total_cost +=
|
||||
get_sweep_read_cost(info->param, double2rows(info->out_rows));
|
||||
DBUG_PRINT("info", ("info->total_cost= %g", info->total_cost));
|
||||
}
|
||||
DBUG_PRINT("info", ("New out_rows= %g", info->out_rows));
|
||||
DBUG_PRINT("info", ("New cost= %g, %scovering", info->total_cost,
|
||||
DBUG_PRINT("info", ("New out_rows: %g", info->out_rows));
|
||||
DBUG_PRINT("info", ("New cost: %g, %scovering", info->total_cost,
|
||||
info->is_covering?"" : "non-"));
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
|
@ -4093,7 +4139,7 @@ TRP_ROR_INTERSECT *get_best_ror_intersect(const PARAM *param, SEL_TREE *tree,
|
|||
double min_cost= DBL_MAX;
|
||||
DBUG_ENTER("get_best_ror_intersect");
|
||||
|
||||
if ((tree->n_ror_scans < 2) || !param->table->file->records)
|
||||
if ((tree->n_ror_scans < 2) || !param->table->file->stats.records)
|
||||
DBUG_RETURN(NULL);
|
||||
|
||||
/*
|
||||
|
@ -4262,7 +4308,8 @@ TRP_ROR_INTERSECT *get_best_ror_intersect(const PARAM *param, SEL_TREE *tree,
|
|||
F=set of all fields to cover
|
||||
S={}
|
||||
|
||||
do {
|
||||
do
|
||||
{
|
||||
Order I by (#covered fields in F desc,
|
||||
#components asc,
|
||||
number of first not covered component asc);
|
||||
|
@ -4280,7 +4327,6 @@ TRP_ROR_INTERSECT *get_best_covering_ror_intersect(PARAM *param,
|
|||
ROR_SCAN_INFO **ror_scan_mark;
|
||||
ROR_SCAN_INFO **ror_scans_end= tree->ror_scans_end;
|
||||
DBUG_ENTER("get_best_covering_ror_intersect");
|
||||
uint nbits= param->fields_bitmap_size*8;
|
||||
|
||||
for (ROR_SCAN_INFO **scan= tree->ror_scans; scan != ror_scans_end; ++scan)
|
||||
(*scan)->key_components=
|
||||
|
@ -4294,9 +4340,9 @@ TRP_ROR_INTERSECT *get_best_covering_ror_intersect(PARAM *param,
|
|||
/*I=set of all covering indexes */
|
||||
ror_scan_mark= tree->ror_scans;
|
||||
|
||||
uint32 int_buf[MAX_KEY/32+1];
|
||||
my_bitmap_map int_buf[MAX_KEY/(sizeof(my_bitmap_map)*8)+1];
|
||||
MY_BITMAP covered_fields;
|
||||
if (bitmap_init(&covered_fields, int_buf, nbits, FALSE))
|
||||
if (bitmap_init(&covered_fields, int_buf, param->table->s->fields, FALSE))
|
||||
DBUG_RETURN(0);
|
||||
bitmap_clear_all(&covered_fields);
|
||||
|
||||
|
@ -4545,7 +4591,8 @@ QUICK_SELECT_I *TRP_ROR_INTERSECT::make_quick(PARAM *param,
|
|||
|
||||
if ((quick_intrsect=
|
||||
new QUICK_ROR_INTERSECT_SELECT(param->thd, param->table,
|
||||
retrieve_full_rows? (!is_covering):FALSE,
|
||||
(retrieve_full_rows? (!is_covering) :
|
||||
FALSE),
|
||||
parent_alloc)))
|
||||
{
|
||||
DBUG_EXECUTE("info", print_ror_scans_arr(param->table,
|
||||
|
@ -7218,7 +7265,7 @@ QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table,
|
|||
goto err;
|
||||
quick->records= records;
|
||||
|
||||
if (cp_buffer_from_ref(thd,ref) && thd->is_fatal_error ||
|
||||
if (cp_buffer_from_ref(thd, table, ref) && thd->is_fatal_error ||
|
||||
!(range= new(alloc) QUICK_RANGE()))
|
||||
goto err; // out of memory
|
||||
|
||||
|
@ -7281,10 +7328,9 @@ err:
|
|||
rowids into Unique, get the sorted sequence and destroy the Unique.
|
||||
|
||||
If table has a clustered primary key that covers all rows (TRUE for bdb
|
||||
and innodb currently) and one of the index_merge scans is a scan on PK,
|
||||
then
|
||||
rows that will be retrieved by PK scan are not put into Unique and
|
||||
primary key scan is not performed here, it is performed later separately.
|
||||
and innodb currently) and one of the index_merge scans is a scan on PK,
|
||||
then rows that will be retrieved by PK scan are not put into Unique and
|
||||
primary key scan is not performed here, it is performed later separately.
|
||||
|
||||
RETURN
|
||||
0 OK
|
||||
|
@ -7297,21 +7343,17 @@ int QUICK_INDEX_MERGE_SELECT::read_keys_and_merge()
|
|||
QUICK_RANGE_SELECT* cur_quick;
|
||||
int result;
|
||||
Unique *unique;
|
||||
DBUG_ENTER("QUICK_INDEX_MERGE_SELECT::prepare_unique");
|
||||
MY_BITMAP *save_read_set, *save_write_set;
|
||||
handler *file= head->file;
|
||||
DBUG_ENTER("QUICK_INDEX_MERGE_SELECT::read_keys_and_merge");
|
||||
|
||||
/* We're going to just read rowids. */
|
||||
if (head->file->extra(HA_EXTRA_KEYREAD))
|
||||
DBUG_RETURN(1);
|
||||
|
||||
/*
|
||||
Make innodb retrieve all PK member fields, so
|
||||
* ha_innobase::position (which uses them) call works.
|
||||
* We can filter out rows that will be retrieved by clustered PK.
|
||||
(This also creates a deficiency - it is possible that we will retrieve
|
||||
parts of key that are not used by current query at all.)
|
||||
*/
|
||||
if (head->file->ha_retrieve_all_pk())
|
||||
DBUG_RETURN(1);
|
||||
save_read_set= head->read_set;
|
||||
save_write_set= head->write_set;
|
||||
file->extra(HA_EXTRA_KEYREAD);
|
||||
bitmap_clear_all(&head->tmp_set);
|
||||
head->column_bitmaps_set(&head->tmp_set, &head->tmp_set);
|
||||
head->prepare_for_position();
|
||||
|
||||
cur_quick_it.rewind();
|
||||
cur_quick= cur_quick_it++;
|
||||
|
@ -7324,8 +7366,8 @@ int QUICK_INDEX_MERGE_SELECT::read_keys_and_merge()
|
|||
if (cur_quick->init() || cur_quick->reset())
|
||||
DBUG_RETURN(1);
|
||||
|
||||
unique= new Unique(refpos_order_cmp, (void *)head->file,
|
||||
head->file->ref_length,
|
||||
unique= new Unique(refpos_order_cmp, (void *)file,
|
||||
file->ref_length,
|
||||
thd->variables.sortbuff_size);
|
||||
if (!unique)
|
||||
DBUG_RETURN(1);
|
||||
|
@ -7368,15 +7410,16 @@ int QUICK_INDEX_MERGE_SELECT::read_keys_and_merge()
|
|||
|
||||
}
|
||||
|
||||
DBUG_PRINT("info", ("ok"));
|
||||
/* ok, all row ids are in Unique */
|
||||
result= unique->get(head);
|
||||
delete unique;
|
||||
doing_pk_scan= FALSE;
|
||||
/* index_merge currently doesn't support "using index" at all */
|
||||
file->extra(HA_EXTRA_NO_KEYREAD);
|
||||
head->column_bitmaps_set(save_read_set, save_write_set);
|
||||
/* start table scan */
|
||||
init_read_record(&read_record, thd, head, (SQL_SELECT*) 0, 1, 1);
|
||||
/* index_merge currently doesn't support "using index" at all */
|
||||
head->file->extra(HA_EXTRA_NO_KEYREAD);
|
||||
|
||||
DBUG_RETURN(result);
|
||||
}
|
||||
|
||||
|
@ -7398,9 +7441,7 @@ int QUICK_INDEX_MERGE_SELECT::get_next()
|
|||
if (doing_pk_scan)
|
||||
DBUG_RETURN(pk_quick_select->get_next());
|
||||
|
||||
result= read_record.read_record(&read_record);
|
||||
|
||||
if (result == -1)
|
||||
if ((result= read_record.read_record(&read_record)) == -1)
|
||||
{
|
||||
result= HA_ERR_END_OF_FILE;
|
||||
end_read_record(&read_record);
|
||||
|
@ -7408,7 +7449,8 @@ int QUICK_INDEX_MERGE_SELECT::get_next()
|
|||
if (pk_quick_select)
|
||||
{
|
||||
doing_pk_scan= TRUE;
|
||||
if ((result= pk_quick_select->init()) || (result= pk_quick_select->reset()))
|
||||
if ((result= pk_quick_select->init()) ||
|
||||
(result= pk_quick_select->reset()))
|
||||
DBUG_RETURN(result);
|
||||
DBUG_RETURN(pk_quick_select->get_next());
|
||||
}
|
||||
|
@ -7450,16 +7492,12 @@ int QUICK_ROR_INTERSECT_SELECT::get_next()
|
|||
{
|
||||
/* Get a rowid for first quick and save it as a 'candidate' */
|
||||
quick= quick_it++;
|
||||
error= quick->get_next();
|
||||
if (cpk_quick)
|
||||
{
|
||||
do
|
||||
{
|
||||
while (!error && !cpk_quick->row_in_ranges())
|
||||
error= quick->get_next();
|
||||
}while (!error && !cpk_quick->row_in_ranges());
|
||||
}
|
||||
else
|
||||
error= quick->get_next();
|
||||
|
||||
if (error)
|
||||
DBUG_RETURN(error);
|
||||
|
||||
|
@ -7505,7 +7543,7 @@ int QUICK_ROR_INTERSECT_SELECT::get_next()
|
|||
}
|
||||
}
|
||||
|
||||
/* We get here iff we got the same row ref in all scans. */
|
||||
/* We get here if we got the same row ref in all scans. */
|
||||
if (need_to_fetch_row)
|
||||
error= head->file->rnd_pos(head->record[0], last_rowid);
|
||||
} while (error == HA_ERR_RECORD_DELETED);
|
||||
|
@ -7578,6 +7616,7 @@ int QUICK_ROR_UNION_SELECT::get_next()
|
|||
DBUG_RETURN(error);
|
||||
}
|
||||
|
||||
|
||||
int QUICK_RANGE_SELECT::reset()
|
||||
{
|
||||
uint mrange_bufsiz;
|
||||
|
@ -7617,7 +7656,7 @@ int QUICK_RANGE_SELECT::reset()
|
|||
}
|
||||
|
||||
/* Allocate the handler buffer if necessary. */
|
||||
if (file->table_flags() & HA_NEED_READ_RANGE_BUFFER)
|
||||
if (file->ha_table_flags() & HA_NEED_READ_RANGE_BUFFER)
|
||||
{
|
||||
mrange_bufsiz= min(multi_range_bufsiz,
|
||||
(QUICK_SELECT_I::records + 1)* head->s->reclength);
|
||||
|
@ -7682,6 +7721,15 @@ int QUICK_RANGE_SELECT::get_next()
|
|||
(cur_range >= (QUICK_RANGE**) ranges.buffer) &&
|
||||
(cur_range <= (QUICK_RANGE**) ranges.buffer + ranges.elements));
|
||||
|
||||
if (in_ror_merged_scan)
|
||||
{
|
||||
/*
|
||||
We don't need to signal the bitmap change as the bitmap is always the
|
||||
same for this head->file
|
||||
*/
|
||||
head->column_bitmaps_set_no_signal(&column_bitmap, &column_bitmap);
|
||||
}
|
||||
|
||||
for (;;)
|
||||
{
|
||||
if (in_range)
|
||||
|
@ -7689,10 +7737,7 @@ int QUICK_RANGE_SELECT::get_next()
|
|||
/* We did already start to read this key. */
|
||||
result= file->read_multi_range_next(&mrange);
|
||||
if (result != HA_ERR_END_OF_FILE)
|
||||
{
|
||||
in_range= ! result;
|
||||
DBUG_RETURN(result);
|
||||
}
|
||||
goto end;
|
||||
}
|
||||
|
||||
uint count= min(multi_range_length, ranges.elements -
|
||||
|
@ -7701,6 +7746,8 @@ int QUICK_RANGE_SELECT::get_next()
|
|||
{
|
||||
/* Ranges have already been used up before. None is left for read. */
|
||||
in_range= FALSE;
|
||||
if (in_ror_merged_scan)
|
||||
head->column_bitmaps_set_no_signal(save_read_set, save_write_set);
|
||||
DBUG_RETURN(HA_ERR_END_OF_FILE);
|
||||
}
|
||||
KEY_MULTI_RANGE *mrange_slot, *mrange_end;
|
||||
|
@ -7732,12 +7779,18 @@ int QUICK_RANGE_SELECT::get_next()
|
|||
result= file->read_multi_range_first(&mrange, multi_range, count,
|
||||
sorted, multi_range_buff);
|
||||
if (result != HA_ERR_END_OF_FILE)
|
||||
{
|
||||
in_range= ! result;
|
||||
DBUG_RETURN(result);
|
||||
}
|
||||
goto end;
|
||||
in_range= FALSE; /* No matching rows; go to next set of ranges. */
|
||||
}
|
||||
|
||||
end:
|
||||
in_range= ! result;
|
||||
if (in_ror_merged_scan)
|
||||
{
|
||||
/* Restore bitmaps set on entry */
|
||||
head->column_bitmaps_set_no_signal(save_read_set, save_write_set);
|
||||
}
|
||||
DBUG_RETURN(result);
|
||||
}
|
||||
|
||||
|
||||
|
@ -7914,7 +7967,7 @@ bool QUICK_RANGE_SELECT::row_in_ranges()
|
|||
|
||||
QUICK_SELECT_DESC::QUICK_SELECT_DESC(QUICK_RANGE_SELECT *q,
|
||||
uint used_key_parts)
|
||||
: QUICK_RANGE_SELECT(*q), rev_it(rev_ranges)
|
||||
:QUICK_RANGE_SELECT(*q), rev_it(rev_ranges)
|
||||
{
|
||||
QUICK_RANGE *r;
|
||||
|
||||
|
@ -8390,9 +8443,10 @@ cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts,
|
|||
groups, and thus can be applied after the grouping.
|
||||
GA4. There are no expressions among G_i, just direct column references.
|
||||
NGA1.If in the index I there is a gap between the last GROUP attribute G_k,
|
||||
and the MIN/MAX attribute C, then NGA must consist of exactly the index
|
||||
attributes that constitute the gap. As a result there is a permutation
|
||||
of NGA that coincides with the gap in the index <B_1, ..., B_m>.
|
||||
and the MIN/MAX attribute C, then NGA must consist of exactly the
|
||||
index attributes that constitute the gap. As a result there is a
|
||||
permutation of NGA that coincides with the gap in the index
|
||||
<B_1, ..., B_m>.
|
||||
NGA2.If BA <> {}, then the WHERE clause must contain a conjunction EQ of
|
||||
equality conditions for all NG_i of the form (NG_i = const) or
|
||||
(const = NG_i), such that each NG_i is referenced in exactly one
|
||||
|
@ -8400,9 +8454,10 @@ cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts,
|
|||
gap in the index.
|
||||
WA1. There are no other attributes in the WHERE clause except the ones
|
||||
referenced in predicates RNG, PA, PC, EQ defined above. Therefore
|
||||
WA is subset of (GA union NGA union C) for GA,NGA,C that pass the above
|
||||
tests. By transitivity then it also follows that each WA_i participates
|
||||
in the index I (if this was already tested for GA, NGA and C).
|
||||
WA is subset of (GA union NGA union C) for GA,NGA,C that pass the
|
||||
above tests. By transitivity then it also follows that each WA_i
|
||||
participates in the index I (if this was already tested for GA, NGA
|
||||
and C).
|
||||
|
||||
C) Overall query form:
|
||||
SELECT EXPR([A_1,...,A_k], [B_1,...,B_m], [MIN(C)], [MAX(C)])
|
||||
|
@ -8464,12 +8519,12 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree)
|
|||
TABLE *table= param->table;
|
||||
bool have_min= FALSE; /* TRUE if there is a MIN function. */
|
||||
bool have_max= FALSE; /* TRUE if there is a MAX function. */
|
||||
Item_field *min_max_arg_item= NULL;/* The argument of all MIN/MAX functions.*/
|
||||
Item_field *min_max_arg_item= NULL; // The argument of all MIN/MAX functions
|
||||
KEY_PART_INFO *min_max_arg_part= NULL; /* The corresponding keypart. */
|
||||
uint group_prefix_len= 0; /* Length (in bytes) of the key prefix. */
|
||||
KEY *index_info= NULL; /* The index chosen for data access. */
|
||||
uint index= 0; /* The id of the chosen index. */
|
||||
uint group_key_parts= 0; /* Number of index key parts in the group prefix. */
|
||||
uint group_key_parts= 0; // Number of index key parts in the group prefix.
|
||||
uint used_key_parts= 0; /* Number of index key parts used for access. */
|
||||
byte key_infix[MAX_KEY_LENGTH]; /* Constants from equality predicates.*/
|
||||
uint key_infix_len= 0; /* Length of key_infix. */
|
||||
|
@ -8587,28 +8642,19 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree)
|
|||
we check that all query fields are indeed covered by 'cur_index'.
|
||||
*/
|
||||
if (pk < MAX_KEY && cur_index != pk &&
|
||||
(table->file->table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX))
|
||||
(table->file->ha_table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX))
|
||||
{
|
||||
/* For each table field */
|
||||
for (uint i= 0; i < table->s->fields; i++)
|
||||
{
|
||||
Field *cur_field= table->field[i];
|
||||
/*
|
||||
If the field is used in the current query, check that the
|
||||
field is covered by some keypart of the current index.
|
||||
If the field is used in the current query ensure that it's
|
||||
part of 'cur_index'
|
||||
*/
|
||||
if (thd->query_id == cur_field->query_id)
|
||||
{
|
||||
KEY_PART_INFO *key_part= cur_index_info->key_part;
|
||||
KEY_PART_INFO *key_part_end= key_part + cur_index_info->key_parts;
|
||||
for (;;)
|
||||
{
|
||||
if (key_part->field == cur_field)
|
||||
break;
|
||||
if (++key_part == key_part_end)
|
||||
goto next_index; // Field was not part of key
|
||||
}
|
||||
}
|
||||
if (bitmap_is_set(table->read_set, cur_field->field_index) &&
|
||||
!cur_field->part_of_key_not_clustered.is_set(cur_index))
|
||||
goto next_index; // Field was not part of key
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -8762,7 +8808,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree)
|
|||
key_part_range[1]= last_part;
|
||||
|
||||
/* Check if cur_part is referenced in the WHERE clause. */
|
||||
if (join->conds->walk(&Item::find_item_in_field_list_processor,
|
||||
if (join->conds->walk(&Item::find_item_in_field_list_processor, 0,
|
||||
(byte*) key_part_range))
|
||||
goto next_index;
|
||||
}
|
||||
|
@ -8776,7 +8822,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree)
|
|||
{
|
||||
for (cur_part= first_non_infix_part; cur_part != last_part; cur_part++)
|
||||
{
|
||||
if (cur_part->field->query_id == thd->query_id)
|
||||
if (bitmap_is_set(table->read_set, cur_part->field->field_index))
|
||||
goto next_index;
|
||||
}
|
||||
}
|
||||
|
@ -9240,8 +9286,8 @@ void cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts,
|
|||
double cpu_cost= 0; /* TODO: CPU cost of index_read calls? */
|
||||
DBUG_ENTER("cost_group_min_max");
|
||||
|
||||
table_records= table->file->records;
|
||||
keys_per_block= (table->file->block_size / 2 /
|
||||
table_records= table->file->stats.records;
|
||||
keys_per_block= (table->file->stats.block_size / 2 /
|
||||
(index_info->key_length + table->file->ref_length)
|
||||
+ 1);
|
||||
num_blocks= (table_records / keys_per_block) + 1;
|
||||
|
@ -10414,6 +10460,10 @@ print_key(KEY_PART *key_part,const char *key,uint used_length)
|
|||
const char *key_end= key+used_length;
|
||||
String tmp(buff,sizeof(buff),&my_charset_bin);
|
||||
uint store_length;
|
||||
TABLE *table= key_part->field->table;
|
||||
my_bitmap_map *old_write_set, *old_read_set;
|
||||
old_write_set= dbug_tmp_use_all_columns(table, table->write_set);
|
||||
old_read_set= dbug_tmp_use_all_columns(table, table->read_set);
|
||||
|
||||
for (; key < key_end; key+=store_length, key_part++)
|
||||
{
|
||||
|
@ -10439,18 +10489,28 @@ print_key(KEY_PART *key_part,const char *key,uint used_length)
|
|||
if (key+store_length < key_end)
|
||||
fputc('/',DBUG_FILE);
|
||||
}
|
||||
dbug_tmp_restore_column_map(table->write_set, old_write_set);
|
||||
dbug_tmp_restore_column_map(table->read_set, old_read_set);
|
||||
}
|
||||
|
||||
|
||||
static void print_quick(QUICK_SELECT_I *quick, const key_map *needed_reg)
|
||||
{
|
||||
char buf[MAX_KEY/8+1];
|
||||
TABLE *table;
|
||||
my_bitmap_map *old_read_map, *old_write_map;
|
||||
DBUG_ENTER("print_quick");
|
||||
if (!quick)
|
||||
DBUG_VOID_RETURN;
|
||||
DBUG_LOCK_FILE;
|
||||
|
||||
table= quick->head;
|
||||
old_read_map= dbug_tmp_use_all_columns(table, table->read_set);
|
||||
old_write_map= dbug_tmp_use_all_columns(table, table->write_set);
|
||||
quick->dbug_dump(0, TRUE);
|
||||
dbug_tmp_restore_column_map(table->read_set, old_read_map);
|
||||
dbug_tmp_restore_column_map(table->write_set, old_write_map);
|
||||
|
||||
fprintf(DBUG_FILE,"other_keys: 0x%s:\n", needed_reg->print(buf));
|
||||
|
||||
DBUG_UNLOCK_FILE;
|
||||
|
|
|
@ -192,8 +192,9 @@ public:
|
|||
function is called.
|
||||
SYNOPSIS
|
||||
init_ror_merged_scan()
|
||||
reuse_handler If true, the quick select may use table->handler, otherwise
|
||||
it must create and use a separate handler object.
|
||||
reuse_handler If true, the quick select may use table->handler,
|
||||
otherwise it must create and use a separate handler
|
||||
object.
|
||||
RETURN
|
||||
0 Ok
|
||||
other Error
|
||||
|
@ -259,7 +260,7 @@ class SEL_ARG;
|
|||
class QUICK_RANGE_SELECT : public QUICK_SELECT_I
|
||||
{
|
||||
protected:
|
||||
bool next,dont_free;
|
||||
bool next,dont_free,in_ror_merged_scan;
|
||||
public:
|
||||
int error;
|
||||
protected:
|
||||
|
@ -277,8 +278,8 @@ protected:
|
|||
freed by QUICK_RANGE_SELECT) */
|
||||
HANDLER_BUFFER *multi_range_buff; /* the handler buffer (allocated and
|
||||
freed by QUICK_RANGE_SELECT) */
|
||||
MY_BITMAP column_bitmap, *save_read_set, *save_write_set;
|
||||
|
||||
protected:
|
||||
friend class TRP_ROR_INTERSECT;
|
||||
friend
|
||||
QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table,
|
||||
|
|
|
@ -54,6 +54,36 @@ static int reckey_in_range(bool max_fl, TABLE_REF *ref, Field* field,
|
|||
static int maxmin_in_range(bool max_fl, Field* field, COND *cond);
|
||||
|
||||
|
||||
/*
|
||||
Get exact count of rows in all tables
|
||||
|
||||
SYNOPSIS
|
||||
get_exact_records()
|
||||
tables List of tables
|
||||
|
||||
NOTES
|
||||
When this is called, we know all table handlers supports HA_HAS_RECORDS
|
||||
or HA_STATS_RECORDS_IS_EXACT
|
||||
|
||||
RETURN
|
||||
ULONGLONG_MAX Error: Could not calculate number of rows
|
||||
# Multiplication of number of rows in all tables
|
||||
*/
|
||||
|
||||
static ulonglong get_exact_record_count(TABLE_LIST *tables)
|
||||
{
|
||||
ulonglong count= 1;
|
||||
for (TABLE_LIST *tl= tables; tl; tl= tl->next_leaf)
|
||||
{
|
||||
ha_rows tmp= tl->table->file->records();
|
||||
if ((tmp == HA_POS_ERROR))
|
||||
return ULONGLONG_MAX;
|
||||
count*= tmp;
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Substitutes constants for some COUNT(), MIN() and MAX() functions.
|
||||
|
||||
|
@ -80,8 +110,8 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds)
|
|||
List_iterator_fast<Item> it(all_fields);
|
||||
int const_result= 1;
|
||||
bool recalc_const_item= 0;
|
||||
longlong count= 1;
|
||||
bool is_exact_count= TRUE;
|
||||
ulonglong count= 1;
|
||||
bool is_exact_count= TRUE, maybe_exact_count= TRUE;
|
||||
table_map removed_tables= 0, outer_tables= 0, used_tables= 0;
|
||||
table_map where_tables= 0;
|
||||
Item *item;
|
||||
|
@ -120,22 +150,25 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds)
|
|||
used_tables|= tl->table->map;
|
||||
|
||||
/*
|
||||
If the storage manager of 'tl' gives exact row count, compute the total
|
||||
number of rows. If there are no outer table dependencies, this count
|
||||
may be used as the real count.
|
||||
If the storage manager of 'tl' gives exact row count as part of
|
||||
statistics (cheap), compute the total number of rows. If there are
|
||||
no outer table dependencies, this count may be used as the real count.
|
||||
Schema tables are filled after this function is invoked, so we can't
|
||||
get row count
|
||||
*/
|
||||
if ((tl->table->file->table_flags() & HA_NOT_EXACT_COUNT) ||
|
||||
if (!(tl->table->file->ha_table_flags() & HA_STATS_RECORDS_IS_EXACT) ||
|
||||
tl->schema_table)
|
||||
{
|
||||
maybe_exact_count&= test(!tl->schema_table &&
|
||||
(tl->table->file->ha_table_flags() &
|
||||
HA_HAS_RECORDS));
|
||||
is_exact_count= FALSE;
|
||||
count= 1; // ensure count != 0
|
||||
}
|
||||
else
|
||||
{
|
||||
tl->table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
|
||||
count*= tl->table->file->records;
|
||||
count*= tl->table->file->stats.records;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -157,9 +190,19 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds)
|
|||
there are no outer joins.
|
||||
*/
|
||||
if (!conds && !((Item_sum_count*) item)->args[0]->maybe_null &&
|
||||
!outer_tables && is_exact_count)
|
||||
!outer_tables && maybe_exact_count)
|
||||
{
|
||||
((Item_sum_count*) item)->make_const(count);
|
||||
if (!is_exact_count)
|
||||
{
|
||||
if ((count= get_exact_record_count(tables)) == ULONGLONG_MAX)
|
||||
{
|
||||
/* Error from handler in counting rows. Don't optimize count() */
|
||||
const_result= 0;
|
||||
continue;
|
||||
}
|
||||
is_exact_count= 1; // count is now exact
|
||||
}
|
||||
((Item_sum_count*) item)->make_const((longlong) count);
|
||||
recalc_const_item= 1;
|
||||
}
|
||||
else
|
||||
|
|
|
@ -924,8 +924,19 @@ bool Protocol_simple::store(Field *field)
|
|||
char buff[MAX_FIELD_WIDTH];
|
||||
String str(buff,sizeof(buff), &my_charset_bin);
|
||||
CHARSET_INFO *tocs= this->thd->variables.character_set_results;
|
||||
TABLE *table= field->table;
|
||||
#ifdef DBUG_OFF
|
||||
my_bitmap_map *old_map= 0;
|
||||
if (table->file)
|
||||
old_map= dbug_tmp_use_all_columns(table, table->read_set);
|
||||
#endif
|
||||
|
||||
field->val_str(&str);
|
||||
#ifdef DBUG_OFF
|
||||
if (old_map)
|
||||
dbug_tmp_restore_column_map(table->read_set, old_map);
|
||||
#endif
|
||||
|
||||
return store_string_aux(str.ptr(), str.length(), str.charset(), tocs);
|
||||
}
|
||||
|
||||
|
|
|
@ -64,10 +64,7 @@ void init_read_record_idx(READ_RECORD *info, THD *thd, TABLE *table,
|
|||
|
||||
table->status=0; /* And it's always found */
|
||||
if (!table->file->inited)
|
||||
{
|
||||
table->file->ha_index_init(idx, 1);
|
||||
table->file->extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY);
|
||||
}
|
||||
/* read_record will be changed to rr_index in rr_index_first */
|
||||
info->read_record= rr_index_first;
|
||||
}
|
||||
|
@ -195,11 +192,11 @@ void init_read_record(READ_RECORD *info,THD *thd, TABLE *table,
|
|||
if (!table->sort.addon_field &&
|
||||
! (specialflag & SPECIAL_SAFE_MODE) &&
|
||||
thd->variables.read_rnd_buff_size &&
|
||||
!(table->file->table_flags() & HA_FAST_KEY_READ) &&
|
||||
!(table->file->ha_table_flags() & HA_FAST_KEY_READ) &&
|
||||
(table->db_stat & HA_READ_ONLY ||
|
||||
table->reginfo.lock_type <= TL_READ_NO_INSERT) &&
|
||||
(ulonglong) table->s->reclength* (table->file->records+
|
||||
table->file->deleted) >
|
||||
(ulonglong) table->s->reclength* (table->file->stats.records+
|
||||
table->file->stats.deleted) >
|
||||
(ulonglong) MIN_FILE_LENGTH_TO_USE_ROW_CACHE &&
|
||||
info->io_cache->end_of_file/info->ref_length * table->s->reclength >
|
||||
(my_off_t) MIN_ROWS_TO_USE_TABLE_CACHE &&
|
||||
|
@ -239,7 +236,7 @@ void init_read_record(READ_RECORD *info,THD *thd, TABLE *table,
|
|||
(int) table->reginfo.lock_type <= (int) TL_READ_HIGH_PRIORITY ||
|
||||
!(table->s->db_options_in_use & HA_OPTION_PACK_RECORD) ||
|
||||
(use_record_cache < 0 &&
|
||||
!(table->file->table_flags() & HA_NOT_DELETE_WITH_CACHE))))
|
||||
!(table->file->ha_table_flags() & HA_NOT_DELETE_WITH_CACHE))))
|
||||
VOID(table->file->extra_opt(HA_EXTRA_CACHE,
|
||||
thd->variables.read_buff_size));
|
||||
}
|
||||
|
|
|
@ -2728,7 +2728,7 @@ bool sys_var_max_user_conn::check(THD *thd, set_var *var)
|
|||
{
|
||||
/*
|
||||
Per-session values of max_user_connections can't be set directly.
|
||||
QQ: May be we should have a separate error message for this?
|
||||
May be we should have a separate error message for this?
|
||||
*/
|
||||
my_error(ER_GLOBAL_VARIABLE, MYF(0), name);
|
||||
return TRUE;
|
||||
|
@ -2795,7 +2795,8 @@ static bool set_option_autocommit(THD *thd, set_var *var)
|
|||
if ((org_options & OPTION_NOT_AUTOCOMMIT))
|
||||
{
|
||||
/* We changed to auto_commit mode */
|
||||
thd->options&= ~(ulong) (OPTION_BEGIN | OPTION_STATUS_NO_TRANS_UPDATE);
|
||||
thd->options&= ~(ulong) (OPTION_BEGIN | OPTION_STATUS_NO_TRANS_UPDATE |
|
||||
OPTION_KEEP_LOG);
|
||||
thd->server_status|= SERVER_STATUS_AUTOCOMMIT;
|
||||
if (ha_commit(thd))
|
||||
return 1;
|
||||
|
|
|
@ -2865,30 +2865,8 @@ ER_WRONG_OUTER_JOIN 42000
|
|||
swe "Felaktigt referens i OUTER JOIN. Kontrollera ON-uttrycket"
|
||||
ukr "ðÅÒÅÈÒÅÓÎÁ ÚÁÌÅÖΦÓÔØ Õ OUTER JOIN. ðÅÒÅצÒÔÅ ÕÍÏ×Õ ON"
|
||||
ER_NULL_COLUMN_IN_INDEX 42000
|
||||
cze "Sloupec '%-.32s' je pou-B¾it s UNIQUE nebo INDEX, ale není definován jako NOT NULL"
|
||||
dan "Kolonne '%-.32s' bruges som UNIQUE eller INDEX men er ikke defineret som NOT NULL"
|
||||
nla "Kolom '%-.64s' wordt gebruikt met UNIQUE of INDEX maar is niet gedefinieerd als NOT NULL"
|
||||
eng "Column '%-.64s' is used with UNIQUE or INDEX but is not defined as NOT NULL"
|
||||
jps "Column '%-.64s' ‚ª UNIQUE ‚© INDEX ‚ÅŽg—p‚³‚ê‚Ü‚µ‚½. ‚±‚̃Jƒ‰ƒ€‚Í NOT NULL ‚Æ’è‹`‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ.",
|
||||
est "Tulp '%-.64s' on kasutusel indeksina, kuid ei ole määratletud kui NOT NULL"
|
||||
fre "La colonne '%-.32s' fait partie d'un index UNIQUE ou INDEX mais n'est pas définie comme NOT NULL"
|
||||
ger "Spalte '%-.64s' wurde mit UNIQUE oder INDEX benutzt, ist aber nicht als NOT NULL definiert"
|
||||
greek "Ôï ðåäßï '%-.64s' ÷ñçóéìïðïéåßôáé óáí UNIQUE Þ INDEX áëëÜ äåí Ý÷åé ïñéóèåß óáí NOT NULL"
|
||||
hun "A(z) '%-.64s' oszlop INDEX vagy UNIQUE (egyedi), de a definicioja szerint nem NOT NULL"
|
||||
ita "La colonna '%-.64s' e` usata con UNIQUE o INDEX ma non e` definita come NOT NULL"
|
||||
jpn "Column '%-.64s' ¤¬ UNIQUE ¤« INDEX ¤Ç»ÈÍѤµ¤ì¤Þ¤·¤¿. ¤³¤Î¥«¥é¥à¤Ï NOT NULL ¤ÈÄêµÁ¤µ¤ì¤Æ¤¤¤Þ¤»¤ó."
|
||||
kor "'%-.64s' Ä®·³ÀÌ UNIQUE³ª INDEX¸¦ »ç¿ëÇÏ¿´Áö¸¸ NOT NULLÀÌ Á¤ÀǵÇÁö ¾Ê¾Ò±º¿ä..."
|
||||
nor "Column '%-.32s' is used with UNIQUE or INDEX but is not defined as NOT NULL"
|
||||
norwegian-ny "Column '%-.32s' is used with UNIQUE or INDEX but is not defined as NOT NULL"
|
||||
pol "Column '%-.32s' is used with UNIQUE or INDEX but is not defined as NOT NULL"
|
||||
por "Coluna '%-.64s' é usada com única (UNIQUE) ou índice (INDEX), mas não está definida como não-nula (NOT NULL)"
|
||||
rum "Coloana '%-.64s' e folosita cu UNIQUE sau INDEX dar fara sa fie definita ca NOT NULL"
|
||||
rus "óÔÏÌÂÅÃ '%-.64s' ÉÓÐÏÌØÚÕÅÔÓÑ × UNIQUE ÉÌÉ × INDEX, ÎÏ ÎÅ ÏÐÒÅÄÅÌÅÎ ËÁË NOT NULL"
|
||||
serbian "Kolona '%-.64s' je upotrebljena kao 'UNIQUE' ili 'INDEX' ali nije definisana kao 'NOT NULL'"
|
||||
slo "Pole '%-.64s' je pou¾ité s UNIQUE alebo INDEX, ale nie je zadefinované ako NOT NULL"
|
||||
spa "Columna '%-.32s' es usada con UNIQUE o INDEX pero no está definida como NOT NULL"
|
||||
swe "Kolumn '%-.32s' är använd med UNIQUE eller INDEX men är inte definerad med NOT NULL"
|
||||
ukr "óÔÏ×ÂÅÃØ '%-.64s' ×ÉËÏÒÉÓÔÏ×Õ¤ÔØÓÑ Ú UNIQUE ÁÂÏ INDEX, ÁÌÅ ÎÅ ×ÉÚÎÁÞÅÎÉÊ ÑË NOT NULL"
|
||||
eng "Table handler doesn't support NULL in given index. Please change column '%-.64s' to be NOT NULL or use another handler"
|
||||
swe "Tabell hanteraren kan inte indexera NULL kolumner för den givna index typen. Ändra '%-.64s' till NOT NULL eller använd en annan hanterare"
|
||||
ER_CANT_FIND_UDF
|
||||
cze "Nemohu na-Bèíst funkci '%-.64s'"
|
||||
dan "Kan ikke læse funktionen '%-.64s'"
|
||||
|
|
|
@ -137,6 +137,7 @@ TABLE *open_proc_table_for_read(THD *thd, Open_tables_state *backup)
|
|||
mysql_proc_table_exists= 0;
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
table->use_all_columns();
|
||||
|
||||
DBUG_ASSERT(table->s->system_table);
|
||||
|
||||
|
@ -182,6 +183,8 @@ static TABLE *open_proc_table_for_update(THD *thd)
|
|||
tables.lock_type= TL_WRITE;
|
||||
|
||||
table= open_ltable(thd, &tables, TL_WRITE);
|
||||
if (table)
|
||||
table->use_all_columns();
|
||||
|
||||
/*
|
||||
Under explicit LOCK TABLES or in prelocked mode we should not
|
||||
|
@ -801,6 +804,7 @@ db_show_routine_status(THD *thd, int type, const char *wild)
|
|||
TABLE_LIST *leaves= 0;
|
||||
st_used_field used_fields[array_elements(init_fields)];
|
||||
|
||||
table->use_all_columns();
|
||||
memcpy((char*) used_fields, (char*) init_fields, sizeof(used_fields));
|
||||
/* Init header */
|
||||
for (used_field= &used_fields[0];
|
||||
|
@ -834,7 +838,7 @@ db_show_routine_status(THD *thd, int type, const char *wild)
|
|||
thd->lex->select_lex.context.resolve_in_table_list_only(&tables);
|
||||
setup_tables(thd, &thd->lex->select_lex.context,
|
||||
&thd->lex->select_lex.top_join_list,
|
||||
&tables, 0, &leaves, FALSE);
|
||||
&tables, &leaves, FALSE);
|
||||
for (used_field= &used_fields[0];
|
||||
used_field->field_name;
|
||||
used_field++)
|
||||
|
|
|
@ -3442,7 +3442,7 @@ sp_add_to_query_tables(THD *thd, LEX *lex,
|
|||
table->table_name= thd->strmake(name, table->table_name_length);
|
||||
table->alias= thd->strdup(name);
|
||||
table->lock_type= locktype;
|
||||
table->select_lex= lex->current_select; // QQ?
|
||||
table->select_lex= lex->current_select;
|
||||
table->cacheable_table= 1;
|
||||
|
||||
lex->add_to_query_tables(table);
|
||||
|
|
|
@ -829,7 +829,6 @@ int Gis_polygon::area(double *ar, const char **end_of_data) const
|
|||
double x, y;
|
||||
get_point(&x, &y, data);
|
||||
data+= (SIZEOF_STORED_DOUBLE*2);
|
||||
/* QQ: Is the following prev_x+x right ? */
|
||||
lr_area+= (prev_x + x)* (prev_y - y);
|
||||
prev_x= x;
|
||||
prev_y= y;
|
||||
|
@ -952,7 +951,6 @@ int Gis_polygon::centroid_xy(double *x, double *y) const
|
|||
double x, y;
|
||||
get_point(&x, &y, data);
|
||||
data+= (SIZEOF_STORED_DOUBLE*2);
|
||||
/* QQ: Is the following prev_x+x right ? */
|
||||
cur_area+= (prev_x + x) * (prev_y - y);
|
||||
cur_cx+= x;
|
||||
cur_cy+= y;
|
||||
|
|
|
@ -323,6 +323,7 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables)
|
|||
|
||||
init_sql_alloc(&mem, ACL_ALLOC_BLOCK_SIZE, 0);
|
||||
init_read_record(&read_record_info,thd,table= tables[0].table,NULL,1,0);
|
||||
table->use_all_columns();
|
||||
VOID(my_init_dynamic_array(&acl_hosts,sizeof(ACL_HOST),20,50));
|
||||
while (!(read_record_info.read_record(&read_record_info)))
|
||||
{
|
||||
|
@ -369,6 +370,7 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables)
|
|||
freeze_size(&acl_hosts);
|
||||
|
||||
init_read_record(&read_record_info,thd,table=tables[1].table,NULL,1,0);
|
||||
table->use_all_columns();
|
||||
VOID(my_init_dynamic_array(&acl_users,sizeof(ACL_USER),50,100));
|
||||
password_length= table->field[2]->field_length /
|
||||
table->field[2]->charset()->mbmaxlen;
|
||||
|
@ -555,6 +557,7 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables)
|
|||
freeze_size(&acl_users);
|
||||
|
||||
init_read_record(&read_record_info,thd,table=tables[2].table,NULL,1,0);
|
||||
table->use_all_columns();
|
||||
VOID(my_init_dynamic_array(&acl_dbs,sizeof(ACL_DB),50,100));
|
||||
while (!(read_record_info.read_record(&read_record_info)))
|
||||
{
|
||||
|
@ -1797,14 +1800,15 @@ static bool update_user_table(THD *thd, TABLE *table,
|
|||
DBUG_ENTER("update_user_table");
|
||||
DBUG_PRINT("enter",("user: %s host: %s",user,host));
|
||||
|
||||
table->use_all_columns();
|
||||
table->field[0]->store(host,(uint) strlen(host), system_charset_info);
|
||||
table->field[1]->store(user,(uint) strlen(user), system_charset_info);
|
||||
key_copy((byte *) user_key, table->record[0], table->key_info,
|
||||
table->key_info->key_length);
|
||||
|
||||
table->file->ha_retrieve_all_cols();
|
||||
if (table->file->index_read_idx(table->record[0], 0,
|
||||
(byte *) user_key, table->key_info->key_length,
|
||||
(byte *) user_key,
|
||||
table->key_info->key_length,
|
||||
HA_READ_KEY_EXACT))
|
||||
{
|
||||
my_message(ER_PASSWORD_NO_MATCH, ER(ER_PASSWORD_NO_MATCH),
|
||||
|
@ -1887,12 +1891,14 @@ static int replace_user_table(THD *thd, TABLE *table, const LEX_USER &combo,
|
|||
password=combo.password.str;
|
||||
}
|
||||
|
||||
table->field[0]->store(combo.host.str,combo.host.length, system_charset_info);
|
||||
table->field[1]->store(combo.user.str,combo.user.length, system_charset_info);
|
||||
table->use_all_columns();
|
||||
table->field[0]->store(combo.host.str,combo.host.length,
|
||||
system_charset_info);
|
||||
table->field[1]->store(combo.user.str,combo.user.length,
|
||||
system_charset_info);
|
||||
key_copy(user_key, table->record[0], table->key_info,
|
||||
table->key_info->key_length);
|
||||
|
||||
table->file->ha_retrieve_all_cols();
|
||||
if (table->file->index_read_idx(table->record[0], 0,
|
||||
user_key, table->key_info->key_length,
|
||||
HA_READ_KEY_EXACT))
|
||||
|
@ -2028,7 +2034,6 @@ static int replace_user_table(THD *thd, TABLE *table, const LEX_USER &combo,
|
|||
We should NEVER delete from the user table, as a uses can still
|
||||
use mysqld even if he doesn't have any privileges in the user table!
|
||||
*/
|
||||
table->file->ha_retrieve_all_cols();
|
||||
if (cmp_record(table,record[1]) &&
|
||||
(error=table->file->ha_update_row(table->record[1],table->record[0])))
|
||||
{ // This should never happen
|
||||
|
@ -2104,13 +2109,15 @@ static int replace_db_table(TABLE *table, const char *db,
|
|||
DBUG_RETURN(-1);
|
||||
}
|
||||
|
||||
table->field[0]->store(combo.host.str,combo.host.length, system_charset_info);
|
||||
table->use_all_columns();
|
||||
table->field[0]->store(combo.host.str,combo.host.length,
|
||||
system_charset_info);
|
||||
table->field[1]->store(db,(uint) strlen(db), system_charset_info);
|
||||
table->field[2]->store(combo.user.str,combo.user.length, system_charset_info);
|
||||
table->field[2]->store(combo.user.str,combo.user.length,
|
||||
system_charset_info);
|
||||
key_copy(user_key, table->record[0], table->key_info,
|
||||
table->key_info->key_length);
|
||||
|
||||
table->file->ha_retrieve_all_cols();
|
||||
if (table->file->index_read_idx(table->record[0],0,
|
||||
user_key, table->key_info->key_length,
|
||||
HA_READ_KEY_EXACT))
|
||||
|
@ -2122,9 +2129,11 @@ static int replace_db_table(TABLE *table, const char *db,
|
|||
}
|
||||
old_row_exists = 0;
|
||||
restore_record(table, s->default_values);
|
||||
table->field[0]->store(combo.host.str,combo.host.length, system_charset_info);
|
||||
table->field[0]->store(combo.host.str,combo.host.length,
|
||||
system_charset_info);
|
||||
table->field[1]->store(db,(uint) strlen(db), system_charset_info);
|
||||
table->field[2]->store(combo.user.str,combo.user.length, system_charset_info);
|
||||
table->field[2]->store(combo.user.str,combo.user.length,
|
||||
system_charset_info);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -2146,18 +2155,17 @@ static int replace_db_table(TABLE *table, const char *db,
|
|||
/* update old existing row */
|
||||
if (rights)
|
||||
{
|
||||
table->file->ha_retrieve_all_cols();
|
||||
if ((error=table->file->ha_update_row(table->record[1],
|
||||
table->record[0])))
|
||||
if ((error= table->file->ha_update_row(table->record[1],
|
||||
table->record[0])))
|
||||
goto table_error; /* purecov: deadcode */
|
||||
}
|
||||
else /* must have been a revoke of all privileges */
|
||||
{
|
||||
if ((error = table->file->ha_delete_row(table->record[1])))
|
||||
if ((error= table->file->ha_delete_row(table->record[1])))
|
||||
goto table_error; /* purecov: deadcode */
|
||||
}
|
||||
}
|
||||
else if (rights && (error=table->file->ha_write_row(table->record[0])))
|
||||
else if (rights && (error= table->file->ha_write_row(table->record[0])))
|
||||
{
|
||||
if (error && error != HA_ERR_FOUND_DUPP_KEY) /* purecov: inspected */
|
||||
goto table_error; /* purecov: deadcode */
|
||||
|
@ -2313,7 +2321,8 @@ GRANT_TABLE::GRANT_TABLE(TABLE *form, TABLE *col_privs)
|
|||
uint key_prefix_len;
|
||||
KEY_PART_INFO *key_part= col_privs->key_info->key_part;
|
||||
col_privs->field[0]->store(host.hostname,
|
||||
host.hostname ? (uint) strlen(host.hostname) : 0,
|
||||
host.hostname ? (uint) strlen(host.hostname) :
|
||||
0,
|
||||
system_charset_info);
|
||||
col_privs->field[1]->store(db,(uint) strlen(db), system_charset_info);
|
||||
col_privs->field[2]->store(user,(uint) strlen(user), system_charset_info);
|
||||
|
@ -2454,6 +2463,7 @@ static int replace_column_table(GRANT_TABLE *g_t,
|
|||
KEY_PART_INFO *key_part= table->key_info->key_part;
|
||||
DBUG_ENTER("replace_column_table");
|
||||
|
||||
table->use_all_columns();
|
||||
table->field[0]->store(combo.host.str,combo.host.length,
|
||||
system_charset_info);
|
||||
table->field[1]->store(db,(uint) strlen(db),
|
||||
|
@ -2489,7 +2499,6 @@ static int replace_column_table(GRANT_TABLE *g_t,
|
|||
key_copy(user_key, table->record[0], table->key_info,
|
||||
table->key_info->key_length);
|
||||
|
||||
table->file->ha_retrieve_all_cols();
|
||||
if (table->file->index_read(table->record[0], user_key,
|
||||
table->key_info->key_length,
|
||||
HA_READ_KEY_EXACT))
|
||||
|
@ -2567,7 +2576,6 @@ static int replace_column_table(GRANT_TABLE *g_t,
|
|||
key_copy(user_key, table->record[0], table->key_info,
|
||||
key_prefix_length);
|
||||
|
||||
table->file->ha_retrieve_all_cols();
|
||||
if (table->file->index_read(table->record[0], user_key,
|
||||
key_prefix_length,
|
||||
HA_READ_KEY_EXACT))
|
||||
|
@ -2657,16 +2665,19 @@ static int replace_table_table(THD *thd, GRANT_TABLE *grant_table,
|
|||
DBUG_RETURN(-1); /* purecov: deadcode */
|
||||
}
|
||||
|
||||
table->use_all_columns();
|
||||
restore_record(table, s->default_values); // Get empty record
|
||||
table->field[0]->store(combo.host.str,combo.host.length, system_charset_info);
|
||||
table->field[0]->store(combo.host.str,combo.host.length,
|
||||
system_charset_info);
|
||||
table->field[1]->store(db,(uint) strlen(db), system_charset_info);
|
||||
table->field[2]->store(combo.user.str,combo.user.length, system_charset_info);
|
||||
table->field[3]->store(table_name,(uint) strlen(table_name), system_charset_info);
|
||||
table->field[2]->store(combo.user.str,combo.user.length,
|
||||
system_charset_info);
|
||||
table->field[3]->store(table_name,(uint) strlen(table_name),
|
||||
system_charset_info);
|
||||
store_record(table,record[1]); // store at pos 1
|
||||
key_copy(user_key, table->record[0], table->key_info,
|
||||
table->key_info->key_length);
|
||||
|
||||
table->file->ha_retrieve_all_cols();
|
||||
if (table->file->index_read_idx(table->record[0], 0,
|
||||
user_key, table->key_info->key_length,
|
||||
HA_READ_KEY_EXACT))
|
||||
|
@ -2779,6 +2790,7 @@ static int replace_routine_table(THD *thd, GRANT_NAME *grant_name,
|
|||
DBUG_RETURN(-1);
|
||||
}
|
||||
|
||||
table->use_all_columns();
|
||||
restore_record(table, s->default_values); // Get empty record
|
||||
table->field[0]->store(combo.host.str,combo.host.length, &my_charset_latin1);
|
||||
table->field[1]->store(db,(uint) strlen(db), &my_charset_latin1);
|
||||
|
@ -3475,10 +3487,14 @@ static my_bool grant_load(TABLE_LIST *tables)
|
|||
0,0);
|
||||
init_sql_alloc(&memex, ACL_ALLOC_BLOCK_SIZE, 0);
|
||||
|
||||
t_table = tables[0].table; c_table = tables[1].table;
|
||||
t_table = tables[0].table;
|
||||
c_table = tables[1].table;
|
||||
p_table= tables[2].table;
|
||||
t_table->file->ha_index_init(0, 1);
|
||||
p_table->file->ha_index_init(0, 1);
|
||||
t_table->use_all_columns();
|
||||
c_table->use_all_columns();
|
||||
p_table->use_all_columns();
|
||||
if (!t_table->file->index_first(t_table->record[0]))
|
||||
{
|
||||
memex_ptr= &memex;
|
||||
|
@ -3486,7 +3502,7 @@ static my_bool grant_load(TABLE_LIST *tables)
|
|||
do
|
||||
{
|
||||
GRANT_TABLE *mem_check;
|
||||
if (!(mem_check=new GRANT_TABLE(t_table,c_table)))
|
||||
if (!(mem_check=new (memex_ptr) GRANT_TABLE(t_table,c_table)))
|
||||
{
|
||||
/* This could only happen if we are out memory */
|
||||
grant_option= FALSE;
|
||||
|
@ -3524,7 +3540,7 @@ static my_bool grant_load(TABLE_LIST *tables)
|
|||
{
|
||||
GRANT_NAME *mem_check;
|
||||
HASH *hash;
|
||||
if (!(mem_check=new GRANT_NAME(p_table)))
|
||||
if (!(mem_check=new (&memex) GRANT_NAME(p_table)))
|
||||
{
|
||||
/* This could only happen if we are out memory */
|
||||
grant_option= FALSE;
|
||||
|
@ -4884,6 +4900,7 @@ static int handle_grant_table(TABLE_LIST *tables, uint table_no, bool drop,
|
|||
uint key_prefix_length;
|
||||
DBUG_ENTER("handle_grant_table");
|
||||
|
||||
table->use_all_columns();
|
||||
if (! table_no) // mysql.user table
|
||||
{
|
||||
/*
|
||||
|
@ -5533,7 +5550,8 @@ bool mysql_revoke_all(THD *thd, List <LEX_USER> &list)
|
|||
if (!strcmp(lex_user->user.str,user) &&
|
||||
!my_strcasecmp(system_charset_info, lex_user->host.str, host))
|
||||
{
|
||||
if (!replace_db_table(tables[1].table, acl_db->db, *lex_user, ~(ulong)0, 1))
|
||||
if (!replace_db_table(tables[1].table, acl_db->db, *lex_user,
|
||||
~(ulong)0, 1))
|
||||
{
|
||||
/*
|
||||
Don't increment counter as replace_db_table deleted the
|
||||
|
|
172
sql/sql_base.cc
172
sql/sql_base.cc
|
@ -948,8 +948,13 @@ bool close_cached_tables(THD *thd, bool if_wait_for_refresh,
|
|||
static void mark_used_tables_as_free_for_reuse(THD *thd, TABLE *table)
|
||||
{
|
||||
for (; table ; table= table->next)
|
||||
{
|
||||
if (table->query_id == thd->query_id)
|
||||
{
|
||||
table->query_id= 0;
|
||||
table->file->ha_reset();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -1029,21 +1034,13 @@ void close_thread_tables(THD *thd, bool lock_in_use, bool skip_derived)
|
|||
*/
|
||||
ha_commit_stmt(thd);
|
||||
|
||||
/* Ensure we are calling ha_reset() for all used tables */
|
||||
mark_used_tables_as_free_for_reuse(thd, thd->open_tables);
|
||||
|
||||
/* We are under simple LOCK TABLES so should not do anything else. */
|
||||
if (!prelocked_mode)
|
||||
if (!prelocked_mode || !thd->lex->requires_prelocking())
|
||||
DBUG_VOID_RETURN;
|
||||
|
||||
if (!thd->lex->requires_prelocking())
|
||||
{
|
||||
/*
|
||||
If we are executing one of substatements we have to mark
|
||||
all tables which it used as free for reuse.
|
||||
*/
|
||||
mark_used_tables_as_free_for_reuse(thd, thd->open_tables);
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
DBUG_ASSERT(prelocked_mode);
|
||||
/*
|
||||
We are in prelocked mode, so we have to leave it now with doing
|
||||
implicit UNLOCK TABLES if need.
|
||||
|
@ -1097,7 +1094,7 @@ void close_thread_tables(THD *thd, bool lock_in_use, bool skip_derived)
|
|||
|
||||
found_old_table= 0;
|
||||
while (thd->open_tables)
|
||||
found_old_table|=close_thread_table(thd, &thd->open_tables);
|
||||
found_old_table|= close_thread_table(thd, &thd->open_tables);
|
||||
thd->some_tables_deleted=0;
|
||||
|
||||
/* Free tables to hold down open files */
|
||||
|
@ -1126,6 +1123,7 @@ void close_thread_tables(THD *thd, bool lock_in_use, bool skip_derived)
|
|||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
|
||||
/* move one table to free list */
|
||||
|
||||
bool close_thread_table(THD *thd, TABLE **table_ptr)
|
||||
|
@ -1150,11 +1148,8 @@ bool close_thread_table(THD *thd, TABLE **table_ptr)
|
|||
table->s->flush_version= flush_version;
|
||||
table->file->extra(HA_EXTRA_FLUSH);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Free memory and reset for next loop
|
||||
table->file->ha_reset();
|
||||
}
|
||||
// Free memory and reset for next loop
|
||||
table->file->ha_reset();
|
||||
table->in_use=0;
|
||||
if (unused_tables)
|
||||
{
|
||||
|
@ -1184,10 +1179,8 @@ static inline uint tmpkeyval(THD *thd, TABLE *table)
|
|||
|
||||
void close_temporary_tables(THD *thd)
|
||||
{
|
||||
TABLE *next,
|
||||
*prev_table /* prev link is not maintained in TABLE's double-linked list */,
|
||||
*table;
|
||||
char *query= (gptr) 0, *end;
|
||||
TABLE *next, *prev_table, *table;
|
||||
char *query= 0, *end;
|
||||
uint query_buf_size, max_names_len;
|
||||
bool found_user_tables;
|
||||
|
||||
|
@ -2096,6 +2089,7 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root,
|
|||
if (table->timestamp_field)
|
||||
table->timestamp_field_type= table->timestamp_field->get_auto_set_type();
|
||||
table_list->updatable= 1; // It is not derived table nor non-updatable VIEW
|
||||
table->clear_column_bitmaps();
|
||||
DBUG_ASSERT(table->key_read == 0);
|
||||
DBUG_RETURN(table);
|
||||
}
|
||||
|
@ -2193,6 +2187,7 @@ static bool reopen_table(TABLE *table)
|
|||
VOID(closefrm(table, 1)); // close file, free everything
|
||||
|
||||
*table= tmp;
|
||||
table->default_column_bitmaps();
|
||||
table->file->change_table_ptr(table, table->s);
|
||||
|
||||
DBUG_ASSERT(table->alias != 0);
|
||||
|
@ -3560,22 +3555,50 @@ Field *view_ref_found= (Field*) 0x2;
|
|||
|
||||
static void update_field_dependencies(THD *thd, Field *field, TABLE *table)
|
||||
{
|
||||
if (thd->set_query_id)
|
||||
DBUG_ENTER("update_field_dependencies");
|
||||
if (thd->mark_used_columns != MARK_COLUMNS_NONE)
|
||||
{
|
||||
table->file->ha_set_bit_in_rw_set(field->fieldnr,
|
||||
(bool)(thd->set_query_id-1));
|
||||
if (field->query_id != thd->query_id)
|
||||
MY_BITMAP *current_bitmap, *other_bitmap;
|
||||
|
||||
/*
|
||||
We always want to register the used keys, as the column bitmap may have
|
||||
been set for all fields (for example for view).
|
||||
*/
|
||||
|
||||
table->used_keys.intersect(field->part_of_key);
|
||||
table->merge_keys.merge(field->part_of_key);
|
||||
|
||||
if (thd->mark_used_columns == MARK_COLUMNS_READ)
|
||||
{
|
||||
if (table->get_fields_in_item_tree)
|
||||
field->flags|= GET_FIXED_FIELDS_FLAG;
|
||||
field->query_id= thd->query_id;
|
||||
table->used_fields++;
|
||||
table->used_keys.intersect(field->part_of_key);
|
||||
current_bitmap= table->read_set;
|
||||
other_bitmap= table->write_set;
|
||||
}
|
||||
else
|
||||
thd->dupp_field= field;
|
||||
} else if (table->get_fields_in_item_tree)
|
||||
{
|
||||
current_bitmap= table->write_set;
|
||||
other_bitmap= table->read_set;
|
||||
}
|
||||
|
||||
if (bitmap_fast_test_and_set(current_bitmap, field->field_index))
|
||||
{
|
||||
if (thd->mark_used_columns == MARK_COLUMNS_WRITE)
|
||||
{
|
||||
DBUG_PRINT("warning", ("Found duplicated field"));
|
||||
thd->dup_field= field;
|
||||
}
|
||||
else
|
||||
{
|
||||
DBUG_PRINT("note", ("Field found before"));
|
||||
}
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
if (table->get_fields_in_item_tree)
|
||||
field->flags|= GET_FIXED_FIELDS_FLAG;
|
||||
table->used_fields++;
|
||||
}
|
||||
else if (table->get_fields_in_item_tree)
|
||||
field->flags|= GET_FIXED_FIELDS_FLAG;
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
|
||||
|
@ -3984,12 +4007,12 @@ find_field_in_table_ref(THD *thd, TABLE_LIST *table_list,
|
|||
fld= WRONG_GRANT;
|
||||
else
|
||||
#endif
|
||||
if (thd->set_query_id)
|
||||
if (thd->mark_used_columns != MARK_COLUMNS_NONE)
|
||||
{
|
||||
/*
|
||||
* get rw_set correct for this field so that the handler
|
||||
* knows that this field is involved in the query and gets
|
||||
* retrieved/updated
|
||||
Get rw_set correct for this field so that the handler
|
||||
knows that this field is involved in the query and gets
|
||||
retrieved/updated
|
||||
*/
|
||||
Field *field_to_set= NULL;
|
||||
if (fld == view_ref_found)
|
||||
|
@ -3997,13 +4020,22 @@ find_field_in_table_ref(THD *thd, TABLE_LIST *table_list,
|
|||
Item *it= (*ref)->real_item();
|
||||
if (it->type() == Item::FIELD_ITEM)
|
||||
field_to_set= ((Item_field*)it)->field;
|
||||
else
|
||||
{
|
||||
if (thd->mark_used_columns == MARK_COLUMNS_READ)
|
||||
it->walk(&Item::register_field_in_read_map, 1, (byte *) 0);
|
||||
}
|
||||
}
|
||||
else
|
||||
field_to_set= fld;
|
||||
if (field_to_set)
|
||||
field_to_set->table->file->
|
||||
ha_set_bit_in_rw_set(field_to_set->fieldnr,
|
||||
(bool)(thd->set_query_id-1));
|
||||
{
|
||||
TABLE *table= field_to_set->table;
|
||||
if (thd->mark_used_columns == MARK_COLUMNS_READ)
|
||||
bitmap_set_bit(table->read_set, field_to_set->field_index);
|
||||
else
|
||||
bitmap_set_bit(table->write_set, field_to_set->field_index);
|
||||
}
|
||||
}
|
||||
}
|
||||
DBUG_RETURN(fld);
|
||||
|
@ -4696,17 +4728,17 @@ mark_common_columns(THD *thd, TABLE_LIST *table_ref_1, TABLE_LIST *table_ref_2,
|
|||
{
|
||||
TABLE *table_1= nj_col_1->table_ref->table;
|
||||
/* Mark field_1 used for table cache. */
|
||||
field_1->query_id= thd->query_id;
|
||||
table_1->file->ha_set_bit_in_read_set(field_1->fieldnr);
|
||||
bitmap_set_bit(table_1->read_set, field_1->field_index);
|
||||
table_1->used_keys.intersect(field_1->part_of_key);
|
||||
table_1->merge_keys.merge(field_1->part_of_key);
|
||||
}
|
||||
if (field_2)
|
||||
{
|
||||
TABLE *table_2= nj_col_2->table_ref->table;
|
||||
/* Mark field_2 used for table cache. */
|
||||
field_2->query_id= thd->query_id;
|
||||
table_2->file->ha_set_bit_in_read_set(field_2->fieldnr);
|
||||
bitmap_set_bit(table_2->read_set, field_2->field_index);
|
||||
table_2->used_keys.intersect(field_2->part_of_key);
|
||||
table_2->merge_keys.merge(field_2->part_of_key);
|
||||
}
|
||||
|
||||
if (using_fields != NULL)
|
||||
|
@ -5174,17 +5206,17 @@ int setup_wild(THD *thd, TABLE_LIST *tables, List<Item> &fields,
|
|||
****************************************************************************/
|
||||
|
||||
bool setup_fields(THD *thd, Item **ref_pointer_array,
|
||||
List<Item> &fields, ulong set_query_id,
|
||||
List<Item> &fields, enum_mark_columns mark_used_columns,
|
||||
List<Item> *sum_func_list, bool allow_sum_func)
|
||||
{
|
||||
reg2 Item *item;
|
||||
ulong save_set_query_id= thd->set_query_id;
|
||||
enum_mark_columns save_mark_used_columns= thd->mark_used_columns;
|
||||
nesting_map save_allow_sum_func= thd->lex->allow_sum_func;
|
||||
List_iterator<Item> it(fields);
|
||||
DBUG_ENTER("setup_fields");
|
||||
|
||||
thd->set_query_id=set_query_id;
|
||||
DBUG_PRINT("info", ("thd->set_query_id: %d", thd->set_query_id));
|
||||
thd->mark_used_columns= mark_used_columns;
|
||||
DBUG_PRINT("info", ("thd->mark_used_columns: %d", thd->mark_used_columns));
|
||||
if (allow_sum_func)
|
||||
thd->lex->allow_sum_func|= 1 << thd->lex->current_select->nest_level;
|
||||
thd->where= THD::DEFAULT_WHERE;
|
||||
|
@ -5210,8 +5242,8 @@ bool setup_fields(THD *thd, Item **ref_pointer_array,
|
|||
(item= *(it.ref()))->check_cols(1))
|
||||
{
|
||||
thd->lex->allow_sum_func= save_allow_sum_func;
|
||||
thd->set_query_id= save_set_query_id;
|
||||
DBUG_PRINT("info", ("thd->set_query_id: %d", thd->set_query_id));
|
||||
thd->mark_used_columns= save_mark_used_columns;
|
||||
DBUG_PRINT("info", ("thd->mark_used_columns: %d", thd->mark_used_columns));
|
||||
DBUG_RETURN(TRUE); /* purecov: inspected */
|
||||
}
|
||||
if (ref)
|
||||
|
@ -5222,8 +5254,8 @@ bool setup_fields(THD *thd, Item **ref_pointer_array,
|
|||
thd->used_tables|= item->used_tables();
|
||||
}
|
||||
thd->lex->allow_sum_func= save_allow_sum_func;
|
||||
thd->set_query_id= save_set_query_id;
|
||||
DBUG_PRINT("info", ("thd->set_query_id: %d", thd->set_query_id));
|
||||
thd->mark_used_columns= save_mark_used_columns;
|
||||
DBUG_PRINT("info", ("thd->mark_used_columns: %d", thd->mark_used_columns));
|
||||
DBUG_RETURN(test(thd->net.report_error));
|
||||
}
|
||||
|
||||
|
@ -5267,7 +5299,6 @@ TABLE_LIST **make_leaves_list(TABLE_LIST **list, TABLE_LIST *tables)
|
|||
context name resolution contest to setup table list there
|
||||
from_clause Top-level list of table references in the FROM clause
|
||||
tables Table list (select_lex->table_list)
|
||||
conds Condition of current SELECT (can be changed by VIEW)
|
||||
leaves List of join table leaves list (select_lex->leaf_tables)
|
||||
refresh It is onle refresh for subquery
|
||||
select_insert It is SELECT ... INSERT command
|
||||
|
@ -5289,7 +5320,7 @@ TABLE_LIST **make_leaves_list(TABLE_LIST **list, TABLE_LIST *tables)
|
|||
|
||||
bool setup_tables(THD *thd, Name_resolution_context *context,
|
||||
List<TABLE_LIST> *from_clause, TABLE_LIST *tables,
|
||||
Item **conds, TABLE_LIST **leaves, bool select_insert)
|
||||
TABLE_LIST **leaves, bool select_insert)
|
||||
{
|
||||
uint tablenr= 0;
|
||||
DBUG_ENTER("setup_tables");
|
||||
|
@ -5321,6 +5352,7 @@ bool setup_tables(THD *thd, Name_resolution_context *context,
|
|||
}
|
||||
setup_table_map(table, table_list, tablenr);
|
||||
table->used_keys= table->s->keys_for_keyread;
|
||||
table->merge_keys.clear_all();
|
||||
if (table_list->use_index)
|
||||
{
|
||||
key_map map;
|
||||
|
@ -5546,7 +5578,6 @@ insert_fields(THD *thd, Name_resolution_context *context, const char *db_name,
|
|||
}
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
Update the tables used in the query based on the referenced fields. For
|
||||
views and natural joins this update is performed inside the loop below.
|
||||
|
@ -5612,18 +5643,13 @@ insert_fields(THD *thd, Name_resolution_context *context, const char *db_name,
|
|||
|
||||
if ((field= field_iterator.field()))
|
||||
{
|
||||
/*
|
||||
Mark if field used before in this select.
|
||||
Used by 'insert' to verify if a field name is used twice.
|
||||
*/
|
||||
if (field->query_id == thd->query_id)
|
||||
thd->dupp_field= field;
|
||||
field->query_id= thd->query_id;
|
||||
field->table->file->ha_set_bit_in_read_set(field->fieldnr);
|
||||
|
||||
/* Mark fields as used to allow storage engine to optimze access */
|
||||
bitmap_set_bit(field->table->read_set, field->field_index);
|
||||
if (table)
|
||||
{
|
||||
table->used_keys.intersect(field->part_of_key);
|
||||
|
||||
table->merge_keys.merge(field->part_of_key);
|
||||
}
|
||||
if (tables->is_natural_join)
|
||||
{
|
||||
TABLE *field_table;
|
||||
|
@ -5640,16 +5666,13 @@ insert_fields(THD *thd, Name_resolution_context *context, const char *db_name,
|
|||
{
|
||||
thd->used_tables|= field_table->map;
|
||||
field_table->used_keys.intersect(field->part_of_key);
|
||||
field_table->merge_keys.merge(field->part_of_key);
|
||||
field_table->used_fields++;
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
thd->used_tables|= item->used_tables();
|
||||
item->walk(&Item::reset_query_id_processor,
|
||||
(byte *)(&thd->query_id));
|
||||
}
|
||||
}
|
||||
/*
|
||||
In case of stored tables, all fields are considered as used,
|
||||
|
@ -5658,10 +5681,7 @@ insert_fields(THD *thd, Name_resolution_context *context, const char *db_name,
|
|||
For NATURAL joins, used_tables is updated in the IF above.
|
||||
*/
|
||||
if (table)
|
||||
{
|
||||
table->used_fields= table->s->fields;
|
||||
table->file->ha_set_all_bits_in_read_set();
|
||||
}
|
||||
}
|
||||
if (found)
|
||||
DBUG_RETURN(FALSE);
|
||||
|
@ -5720,8 +5740,8 @@ int setup_conds(THD *thd, TABLE_LIST *tables, TABLE_LIST *leaves,
|
|||
arena->is_conventional())
|
||||
arena= 0; // For easier test
|
||||
|
||||
thd->set_query_id=1;
|
||||
DBUG_PRINT("info", ("thd->set_query_id: %d", thd->set_query_id));
|
||||
thd->mark_used_columns= MARK_COLUMNS_READ;
|
||||
DBUG_PRINT("info", ("thd->mark_used_columns: %d", thd->mark_used_columns));
|
||||
select_lex->cond_count= 0;
|
||||
|
||||
for (table= tables; table; table= table->next_local)
|
||||
|
@ -5976,7 +5996,7 @@ static void mysql_rm_tmp_tables(void)
|
|||
|
||||
if (!bcmp(file->name,tmp_file_prefix,tmp_file_prefix_length))
|
||||
{
|
||||
sprintf(filePath,"%s%s",tmpdir,file->name);
|
||||
sprintf(filePath,"%s%c%s",tmpdir,FN_LIBCHAR,file->name);
|
||||
VOID(my_delete(filePath,MYF(MY_WME)));
|
||||
}
|
||||
}
|
||||
|
@ -6232,7 +6252,7 @@ int init_ftfuncs(THD *thd, SELECT_LEX *select_lex, bool no_order)
|
|||
alias alias for table
|
||||
db database
|
||||
table_name name of table
|
||||
db_stat open flags (for example HA_OPEN_KEYFILE|HA_OPEN_RNDFILE..)
|
||||
db_stat open flags (for example ->OPEN_KEYFILE|HA_OPEN_RNDFILE..)
|
||||
can be 0 (example in ha_example_table)
|
||||
prgflag READ_ALL etc..
|
||||
ha_open_flags HA_OPEN_ABORT_IF_LOCKED etc..
|
||||
|
|
|
@ -66,6 +66,7 @@ public:
|
|||
my_bool is_clear_all() const { return bitmap_is_clear_all(&map); }
|
||||
my_bool is_set_all() const { return bitmap_is_set_all(&map); }
|
||||
my_bool is_subset(const Bitmap& map2) const { return bitmap_is_subset(&map, &map2.map); }
|
||||
my_bool is_overlapping(const Bitmap& map2) const { return bitmap_is_overlapping(&map, map2.map); }
|
||||
my_bool operator==(const Bitmap& map2) const { return bitmap_cmp(&map, &map2.map); }
|
||||
char *print(char *buf) const
|
||||
{
|
||||
|
@ -132,6 +133,7 @@ public:
|
|||
my_bool is_clear_all() const { return map == (ulonglong)0; }
|
||||
my_bool is_set_all() const { return map == ~(ulonglong)0; }
|
||||
my_bool is_subset(const Bitmap<64>& map2) const { return !(map & ~map2.map); }
|
||||
my_bool is_overlapping(const Bitmap<64>& map2) const { return (map & map2.map)!= 0; }
|
||||
my_bool operator==(const Bitmap<64>& map2) const { return map == map2.map; }
|
||||
char *print(char *buf) const { longlong2str(map,buf,16); return buf; }
|
||||
ulonglong to_ulonglong() const { return map; }
|
||||
|
|
|
@ -248,6 +248,7 @@ THD::THD()
|
|||
bzero(ha_data, sizeof(ha_data));
|
||||
mysys_var=0;
|
||||
binlog_evt_union.do_union= FALSE;
|
||||
enable_slow_log= 0;
|
||||
#ifndef DBUG_OFF
|
||||
dbug_sentry=THD_SENTRY_MAGIC;
|
||||
#endif
|
||||
|
@ -1646,7 +1647,7 @@ Statement::Statement(enum enum_state state_arg, ulong id_arg,
|
|||
ulong alloc_block_size, ulong prealloc_size)
|
||||
:Query_arena(&main_mem_root, state_arg),
|
||||
id(id_arg),
|
||||
set_query_id(1),
|
||||
mark_used_columns(MARK_COLUMNS_READ),
|
||||
lex(&main_lex),
|
||||
query(0),
|
||||
query_length(0),
|
||||
|
@ -1666,7 +1667,7 @@ Query_arena::Type Statement::type() const
|
|||
void Statement::set_statement(Statement *stmt)
|
||||
{
|
||||
id= stmt->id;
|
||||
set_query_id= stmt->set_query_id;
|
||||
mark_used_columns= stmt->mark_used_columns;
|
||||
lex= stmt->lex;
|
||||
query= stmt->query;
|
||||
query_length= stmt->query_length;
|
||||
|
@ -2449,6 +2450,7 @@ field_type_name(enum_field_types type)
|
|||
return "Unknown";
|
||||
}
|
||||
|
||||
|
||||
my_size_t THD::max_row_length_blob(TABLE *table, const byte *data) const
|
||||
{
|
||||
my_size_t length= 0;
|
||||
|
@ -2465,53 +2467,52 @@ my_size_t THD::max_row_length_blob(TABLE *table, const byte *data) const
|
|||
return length;
|
||||
}
|
||||
|
||||
|
||||
my_size_t THD::pack_row(TABLE *table, MY_BITMAP const* cols, byte *row_data,
|
||||
const byte *record) const
|
||||
{
|
||||
Field **p_field= table->field, *field= *p_field;
|
||||
Field **p_field= table->field, *field;
|
||||
int n_null_bytes= table->s->null_bytes;
|
||||
my_ptrdiff_t const offset= record - (byte*) table->record[0];
|
||||
|
||||
byte *ptr;
|
||||
uint i;
|
||||
my_ptrdiff_t const offset= (my_ptrdiff_t) (record - (byte*)
|
||||
table->record[0]);
|
||||
memcpy(row_data, record, n_null_bytes);
|
||||
byte *ptr= row_data+n_null_bytes;
|
||||
ptr= row_data+n_null_bytes;
|
||||
|
||||
for (int i= 0 ; field ; i++, p_field++, field= *p_field)
|
||||
for (i= 0 ; (field= *p_field) ; i++, p_field++)
|
||||
{
|
||||
if (bitmap_is_set(cols,i))
|
||||
ptr= (byte*)field->pack((char *) ptr, field->ptr + offset);
|
||||
}
|
||||
|
||||
/*
|
||||
my_ptrdiff_t is signed, size_t is unsigned. Assert that the
|
||||
conversion will work correctly.
|
||||
*/
|
||||
DBUG_ASSERT(ptr - row_data >= 0);
|
||||
return (static_cast<size_t>(ptr - row_data));
|
||||
return (static_cast<my_size_t>(ptr - row_data));
|
||||
}
|
||||
|
||||
|
||||
int THD::binlog_write_row(TABLE* table, bool is_trans,
|
||||
MY_BITMAP const* cols, my_size_t colcnt,
|
||||
byte const *record)
|
||||
{
|
||||
DBUG_ASSERT(current_stmt_binlog_row_based && mysql_bin_log.is_open());
|
||||
|
||||
/*
|
||||
Pack records into format for transfer. We are allocating more
|
||||
memory than needed, but that doesn't matter.
|
||||
/*
|
||||
Pack records into format for transfer. We are allocating more
|
||||
memory than needed, but that doesn't matter.
|
||||
*/
|
||||
bool error= 0;
|
||||
byte *row_data= table->write_row_record;
|
||||
my_size_t const max_len= max_row_length(table, record);
|
||||
|
||||
/*
|
||||
* Allocate room for a row (if needed)
|
||||
*/
|
||||
my_size_t len;
|
||||
Rows_log_event *ev;
|
||||
|
||||
/* Allocate room for a row (if needed) */
|
||||
if (!row_data)
|
||||
{
|
||||
if (!table->s->blob_fields)
|
||||
{
|
||||
/* multiply max_len by 2 so it can be used for update_row as well */
|
||||
table->write_row_record= (byte *) alloc_root(&table->mem_root, 2*max_len);
|
||||
table->write_row_record= (byte *) alloc_root(&table->mem_root,
|
||||
2*max_len);
|
||||
if (!table->write_row_record)
|
||||
return HA_ERR_OUT_OF_MEM;
|
||||
row_data= table->write_row_record;
|
||||
|
@ -2519,12 +2520,11 @@ int THD::binlog_write_row(TABLE* table, bool is_trans,
|
|||
else if (unlikely(!(row_data= (byte *) my_malloc(max_len, MYF(MY_WME)))))
|
||||
return HA_ERR_OUT_OF_MEM;
|
||||
}
|
||||
my_size_t const len= pack_row(table, cols, row_data, record);
|
||||
len= pack_row(table, cols, row_data, record);
|
||||
|
||||
Rows_log_event* const ev=
|
||||
binlog_prepare_pending_rows_event(table, server_id, cols, colcnt,
|
||||
len, is_trans,
|
||||
static_cast<Write_rows_log_event*>(0));
|
||||
ev= binlog_prepare_pending_rows_event(table, server_id, cols, colcnt,
|
||||
len, is_trans,
|
||||
static_cast<Write_rows_log_event*>(0));
|
||||
|
||||
/* add_row_data copies row_data to internal buffer */
|
||||
error= likely(ev != 0) ? ev->add_row_data(row_data,len) : HA_ERR_OUT_OF_MEM ;
|
||||
|
|
|
@ -37,9 +37,10 @@ enum enum_ha_read_modes { RFIRST, RNEXT, RPREV, RLAST, RKEY, RNEXT_SAME };
|
|||
enum enum_duplicates { DUP_ERROR, DUP_REPLACE, DUP_UPDATE };
|
||||
enum enum_delay_key_write { DELAY_KEY_WRITE_NONE, DELAY_KEY_WRITE_ON,
|
||||
DELAY_KEY_WRITE_ALL };
|
||||
|
||||
enum enum_check_fields { CHECK_FIELD_IGNORE, CHECK_FIELD_WARN,
|
||||
CHECK_FIELD_ERROR_FOR_NULL };
|
||||
enum enum_check_fields
|
||||
{ CHECK_FIELD_IGNORE, CHECK_FIELD_WARN, CHECK_FIELD_ERROR_FOR_NULL };
|
||||
enum enum_mark_columns
|
||||
{ MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE};
|
||||
|
||||
extern char internal_table_name[2];
|
||||
extern const char **errmesg;
|
||||
|
@ -465,17 +466,17 @@ public:
|
|||
ulong id;
|
||||
|
||||
/*
|
||||
- if set_query_id=1, we set field->query_id for all fields. In that case
|
||||
field list can not contain duplicates.
|
||||
0: Means query_id is not set and no indicator to handler of fields used
|
||||
is set
|
||||
1: Means query_id is set for fields in list and bit in read set is set
|
||||
to inform handler of that field is to be read
|
||||
2: Means query is set for fields in list and bit is set in update set
|
||||
to inform handler that it needs to update this field in write_row
|
||||
and update_row
|
||||
MARK_COLUMNS_NONE: Means mark_used_colums is not set and no indicator to
|
||||
handler of fields used is set
|
||||
MARK_COLUMNS_READ: Means a bit in read set is set to inform handler
|
||||
that the field is to be read. If field list contains
|
||||
duplicates, then thd->dup_field is set to point
|
||||
to the last found duplicate.
|
||||
MARK_COLUMNS_WRITE: Means a bit is set in write set to inform handler
|
||||
that it needs to update this field in write_row
|
||||
and update_row.
|
||||
*/
|
||||
ulong set_query_id;
|
||||
enum enum_mark_columns mark_used_columns;
|
||||
|
||||
LEX_STRING name; /* name for named prepared statements */
|
||||
LEX *lex; // parse tree descriptor
|
||||
|
@ -1027,7 +1028,7 @@ public:
|
|||
#endif
|
||||
}
|
||||
} transaction;
|
||||
Field *dupp_field;
|
||||
Field *dup_field;
|
||||
#ifndef __WIN__
|
||||
sigset_t signals,block_signals;
|
||||
#endif
|
||||
|
@ -1408,7 +1409,8 @@ public:
|
|||
}
|
||||
inline void reset_current_stmt_binlog_row_based()
|
||||
{
|
||||
current_stmt_binlog_row_based= test(variables.binlog_format == BINLOG_FORMAT_ROW);
|
||||
current_stmt_binlog_row_based= test(variables.binlog_format ==
|
||||
BINLOG_FORMAT_ROW);
|
||||
}
|
||||
#endif /*HAVE_ROW_BASED_REPLICATION*/
|
||||
};
|
||||
|
@ -1570,6 +1572,7 @@ class select_insert :public select_result_interceptor {
|
|||
int prepare2(void);
|
||||
bool send_data(List<Item> &items);
|
||||
virtual void store_values(List<Item> &values);
|
||||
virtual bool can_rollback_data() { return 0; }
|
||||
void send_error(uint errcode,const char *err);
|
||||
bool send_eof();
|
||||
/* not implemented: select_insert is never re-used in prepared statements */
|
||||
|
@ -1591,17 +1594,19 @@ public:
|
|||
List<create_field> &fields_par,
|
||||
List<Key> &keys_par,
|
||||
List<Item> &select_fields,enum_duplicates duplic, bool ignore)
|
||||
:select_insert (NULL, NULL, &select_fields, 0, 0, duplic, ignore), create_table(table),
|
||||
extra_fields(&fields_par),keys(&keys_par), create_info(create_info_par),
|
||||
:select_insert (NULL, NULL, &select_fields, 0, 0, duplic, ignore),
|
||||
create_table(table), extra_fields(&fields_par),keys(&keys_par),
|
||||
create_info(create_info_par),
|
||||
lock(0)
|
||||
{}
|
||||
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
|
||||
|
||||
void binlog_show_create_table(TABLE **tables, uint count);
|
||||
void store_values(List<Item> &values);
|
||||
void send_error(uint errcode,const char *err);
|
||||
bool send_eof();
|
||||
void abort();
|
||||
virtual bool can_rollback_data() { return 1; }
|
||||
|
||||
// Needed for access from local class MY_HOOKS in prepare(), since thd is proteted.
|
||||
THD *get_thd(void) { return thd; }
|
||||
};
|
||||
|
|
|
@ -80,9 +80,9 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
|
|||
!(specialflag & (SPECIAL_NO_NEW_FUNC | SPECIAL_SAFE_MODE)) &&
|
||||
!(table->triggers && table->triggers->has_delete_triggers()))
|
||||
{
|
||||
/* Update the table->file->records number */
|
||||
/* Update the table->file->stats.records number */
|
||||
table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
|
||||
ha_rows const maybe_deleted= table->file->records;
|
||||
ha_rows const maybe_deleted= table->file->stats.records;
|
||||
/*
|
||||
If all rows shall be deleted, we (almost) always log this
|
||||
statement-based (see [binlog], below), so we set this flag and
|
||||
|
@ -113,7 +113,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
|
|||
DBUG_RETURN(0);
|
||||
}
|
||||
#endif
|
||||
/* Update the table->file->records number */
|
||||
/* Update the table->file->stats.records number */
|
||||
table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
|
||||
|
||||
table->used_keys.clear_all();
|
||||
|
@ -184,7 +184,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
|
|||
if (!(sortorder= make_unireg_sortorder((ORDER*) order->first,
|
||||
&length)) ||
|
||||
(table->sort.found_records = filesort(thd, table, sortorder, length,
|
||||
select, HA_POS_ERROR,
|
||||
select, HA_POS_ERROR, 1,
|
||||
&examined_rows))
|
||||
== HA_POS_ERROR)
|
||||
{
|
||||
|
@ -226,6 +226,8 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
|
|||
if (ha_delete_all_rows)
|
||||
thd->options&= ~static_cast<ulonglong>(OPTION_BIN_LOG);
|
||||
|
||||
table->mark_columns_needed_for_delete();
|
||||
|
||||
while (!(error=info.read_record(&info)) && !thd->killed &&
|
||||
!thd->net.report_error)
|
||||
{
|
||||
|
@ -285,7 +287,6 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
|
|||
}
|
||||
thd->proc_info= "end";
|
||||
end_read_record(&info);
|
||||
free_io_cache(table); // Will not do any harm
|
||||
if (options & OPTION_QUICK)
|
||||
(void) table->file->extra(HA_EXTRA_NORMAL);
|
||||
|
||||
|
@ -396,7 +397,7 @@ bool mysql_prepare_delete(THD *thd, TABLE_LIST *table_list, Item **conds)
|
|||
thd->lex->allow_sum_func= 0;
|
||||
if (setup_tables_and_check_access(thd, &thd->lex->select_lex.context,
|
||||
&thd->lex->select_lex.top_join_list,
|
||||
table_list, conds,
|
||||
table_list,
|
||||
&select_lex->leaf_tables, FALSE,
|
||||
DELETE_ACL) ||
|
||||
setup_conds(thd, table_list, select_lex->leaf_tables, conds) ||
|
||||
|
@ -459,7 +460,7 @@ bool mysql_multi_delete_prepare(THD *thd)
|
|||
*/
|
||||
if (setup_tables_and_check_access(thd, &thd->lex->select_lex.context,
|
||||
&thd->lex->select_lex.top_join_list,
|
||||
lex->query_tables, &lex->select_lex.where,
|
||||
lex->query_tables,
|
||||
&lex->select_lex.leaf_tables, FALSE,
|
||||
DELETE_ACL))
|
||||
DBUG_RETURN(TRUE);
|
||||
|
@ -567,6 +568,8 @@ multi_delete::initialize_tables(JOIN *join)
|
|||
transactional_tables= 1;
|
||||
else
|
||||
normal_tables= 1;
|
||||
tbl->prepare_for_position();
|
||||
tbl->mark_columns_needed_for_delete();
|
||||
}
|
||||
else if ((tab->type != JT_SYSTEM && tab->type != JT_CONST) &&
|
||||
walk == delete_tables)
|
||||
|
@ -606,7 +609,6 @@ multi_delete::~multi_delete()
|
|||
table_being_deleted= table_being_deleted->next_local)
|
||||
{
|
||||
TABLE *table= table_being_deleted->table;
|
||||
free_io_cache(table); // Alloced by unique
|
||||
table->no_keyread=0;
|
||||
}
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@ bool mysql_do(THD *thd, List<Item> &values)
|
|||
List_iterator<Item> li(values);
|
||||
Item *value;
|
||||
DBUG_ENTER("mysql_do");
|
||||
if (setup_fields(thd, 0, values, 0, 0, 0))
|
||||
if (setup_fields(thd, 0, values, MARK_COLUMNS_NONE, 0, 0))
|
||||
DBUG_RETURN(TRUE);
|
||||
while ((value = li++))
|
||||
value->val_int();
|
||||
|
|
|
@ -188,13 +188,13 @@ bool mysql_ha_open(THD *thd, TABLE_LIST *tables, bool reopen)
|
|||
/* for now HANDLER can be used only for real TABLES */
|
||||
tables->required_type= FRMTYPE_TABLE;
|
||||
error= open_tables(thd, &tables, &counter, 0);
|
||||
|
||||
HANDLER_TABLES_HACK(thd);
|
||||
|
||||
if (error)
|
||||
goto err;
|
||||
|
||||
/* There can be only one table in '*tables'. */
|
||||
if (! (tables->table->file->table_flags() & HA_CAN_SQL_HANDLER))
|
||||
if (! (tables->table->file->ha_table_flags() & HA_CAN_SQL_HANDLER))
|
||||
{
|
||||
if (! reopen)
|
||||
my_error(ER_ILLEGAL_HA, MYF(0), tables->alias);
|
||||
|
@ -421,6 +421,9 @@ bool mysql_ha_read(THD *thd, TABLE_LIST *tables,
|
|||
if (!lock)
|
||||
goto err0; // mysql_lock_tables() printed error message already
|
||||
|
||||
// Always read all columns
|
||||
tables->table->read_set= &tables->table->s->all_set;
|
||||
|
||||
if (cond)
|
||||
{
|
||||
if (table->query_id != thd->query_id)
|
||||
|
@ -514,6 +517,7 @@ bool mysql_ha_read(THD *thd, TABLE_LIST *tables,
|
|||
Item *item;
|
||||
for (key_len=0 ; (item=it_ke++) ; key_part++)
|
||||
{
|
||||
my_bitmap_map *old_map;
|
||||
// 'item' can be changed by fix_fields() call
|
||||
if ((!item->fixed &&
|
||||
item->fix_fields(thd, it_ke.ref())) ||
|
||||
|
@ -524,16 +528,19 @@ bool mysql_ha_read(THD *thd, TABLE_LIST *tables,
|
|||
my_error(ER_WRONG_ARGUMENTS,MYF(0),"HANDLER ... READ");
|
||||
goto err;
|
||||
}
|
||||
old_map= dbug_tmp_use_all_columns(table, table->write_set);
|
||||
(void) item->save_in_field(key_part->field, 1);
|
||||
dbug_tmp_restore_column_map(table->write_set, old_map);
|
||||
key_len+=key_part->store_length;
|
||||
}
|
||||
|
||||
if (!(key= (byte*) thd->calloc(ALIGN_SIZE(key_len))))
|
||||
goto err;
|
||||
table->file->ha_index_or_rnd_end();
|
||||
table->file->ha_index_init(keyno, 1);
|
||||
key_copy(key, table->record[0], table->key_info + keyno, key_len);
|
||||
error= table->file->index_read(table->record[0],
|
||||
key,key_len,ha_rkey_mode);
|
||||
key,key_len,ha_rkey_mode);
|
||||
mode=rkey_to_rnext[(int)ha_rkey_mode];
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -94,6 +94,11 @@ static bool init_fields(THD *thd, TABLE_LIST *tables,
|
|||
0, REPORT_ALL_ERRORS, 1,
|
||||
TRUE)))
|
||||
DBUG_RETURN(1);
|
||||
bitmap_set_bit(find_fields->field->table->read_set,
|
||||
find_fields->field->field_index);
|
||||
/* To make life easier when setting values in keys */
|
||||
bitmap_set_bit(find_fields->field->table->write_set,
|
||||
find_fields->field->field_index);
|
||||
}
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
@ -272,7 +277,6 @@ int get_topics_for_keyword(THD *thd, TABLE *topics, TABLE *relations,
|
|||
int count= 0;
|
||||
int iindex_topic, iindex_relations;
|
||||
Field *rtopic_id, *rkey_id;
|
||||
|
||||
DBUG_ENTER("get_topics_for_keyword");
|
||||
|
||||
if ((iindex_topic= find_type((char*) primary_key_name,
|
||||
|
@ -292,8 +296,9 @@ int get_topics_for_keyword(THD *thd, TABLE *topics, TABLE *relations,
|
|||
rkey_id->store((longlong) key_id, TRUE);
|
||||
rkey_id->get_key_image(buff, rkey_id->pack_length(), Field::itRAW);
|
||||
int key_res= relations->file->index_read(relations->record[0],
|
||||
(byte *)buff, rkey_id->pack_length(),
|
||||
HA_READ_KEY_EXACT);
|
||||
(byte *) buff,
|
||||
rkey_id->pack_length(),
|
||||
HA_READ_KEY_EXACT);
|
||||
|
||||
for ( ;
|
||||
!key_res && key_id == (int16) rkey_id->val_int() ;
|
||||
|
@ -653,13 +658,15 @@ bool mysqld_help(THD *thd, const char *mask)
|
|||
|
||||
if (open_and_lock_tables(thd, tables))
|
||||
goto error;
|
||||
|
||||
/*
|
||||
Init tables and fields to be usable from items
|
||||
tables do not contain VIEWs => we can pass 0 as conds
|
||||
*/
|
||||
setup_tables(thd, &thd->lex->select_lex.context,
|
||||
&thd->lex->select_lex.top_join_list,
|
||||
tables, 0, &leaves, FALSE);
|
||||
if (setup_tables(thd, &thd->lex->select_lex.context,
|
||||
&thd->lex->select_lex.top_join_list,
|
||||
tables, &leaves, FALSE))
|
||||
goto error;
|
||||
memcpy((char*) used_fields, (char*) init_used_fields, sizeof(used_fields));
|
||||
if (init_fields(thd, tables, used_fields, array_elements(used_fields)))
|
||||
goto error;
|
||||
|
@ -681,10 +688,12 @@ bool mysqld_help(THD *thd, const char *mask)
|
|||
int key_id;
|
||||
if (!(select=
|
||||
prepare_select_for_name(thd,mask,mlen,tables,tables[3].table,
|
||||
used_fields[help_keyword_name].field,&error)))
|
||||
used_fields[help_keyword_name].field,
|
||||
&error)))
|
||||
goto error;
|
||||
|
||||
count_topics=search_keyword(thd,tables[3].table,used_fields,select,&key_id);
|
||||
count_topics= search_keyword(thd,tables[3].table, used_fields, select,
|
||||
&key_id);
|
||||
delete select;
|
||||
count_topics= (count_topics != 1) ? 0 :
|
||||
get_topics_for_keyword(thd,tables[0].table,tables[2].table,
|
||||
|
@ -698,7 +707,8 @@ bool mysqld_help(THD *thd, const char *mask)
|
|||
Field *cat_cat_id= used_fields[help_category_parent_category_id].field;
|
||||
if (!(select=
|
||||
prepare_select_for_name(thd,mask,mlen,tables,tables[1].table,
|
||||
used_fields[help_category_name].field,&error)))
|
||||
used_fields[help_category_name].field,
|
||||
&error)))
|
||||
goto error;
|
||||
|
||||
count_categories= search_categories(thd, tables[1].table, used_fields,
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue