mirror of
https://github.com/MariaDB/server.git
synced 2025-01-29 02:05:57 +01:00
Merge branch 'bb-10.2-mariarocks-merge' of github.com:MariaDB/server into 10.2
Manually resolved the conflicts
This commit is contained in:
commit
dbe73588cd
142 changed files with 5195 additions and 1734 deletions
|
@ -209,7 +209,7 @@ extern int max_user_connections;
|
|||
extern volatile ulong cached_thread_count;
|
||||
extern ulong what_to_log,flush_time;
|
||||
extern uint max_prepared_stmt_count, prepared_stmt_count;
|
||||
extern ulong open_files_limit;
|
||||
extern MYSQL_PLUGIN_IMPORT ulong open_files_limit;
|
||||
extern ulonglong binlog_cache_size, binlog_stmt_cache_size;
|
||||
extern ulonglong max_binlog_cache_size, max_binlog_stmt_cache_size;
|
||||
extern ulong max_binlog_size;
|
||||
|
|
|
@ -95,6 +95,8 @@ SET(ROCKSDB_SE_SOURCES
|
|||
rdb_threads.h
|
||||
rdb_psi.h
|
||||
rdb_psi.cc
|
||||
rdb_sst_info.cc
|
||||
rdb_sst_info.h
|
||||
)
|
||||
|
||||
# MariaDB: the following is added in build_rocksdb.cmake, when appropriate:
|
||||
|
@ -137,8 +139,6 @@ ADD_CONVENIENCE_LIBRARY(rocksdb_aux_lib
|
|||
event_listener.h
|
||||
rdb_perf_context.cc
|
||||
rdb_perf_context.h
|
||||
rdb_sst_info.cc
|
||||
rdb_sst_info.h
|
||||
rdb_buff.h
|
||||
rdb_mariadb_port.h
|
||||
)
|
||||
|
@ -178,9 +178,9 @@ IF(HAVE_SCHED_GETCPU)
|
|||
ADD_DEFINITIONS(-DHAVE_SCHED_GETCPU=1)
|
||||
ENDIF()
|
||||
|
||||
IF (NOT "$ENV{WITH_TBB}" STREQUAL "")
|
||||
IF (WITH_TBB)
|
||||
SET(rocksdb_static_libs ${rocksdb_static_libs}
|
||||
$ENV{WITH_TBB}/libtbb${PIC_EXT}.a)
|
||||
${WITH_TBB}/lib/libtbb${PIC_EXT}.a)
|
||||
ADD_DEFINITIONS(-DTBB)
|
||||
ENDIF()
|
||||
|
||||
|
|
|
@ -12,11 +12,6 @@ INCLUDE_DIRECTORIES(
|
|||
${ROCKSDB_SOURCE_DIR}/third-party/gtest-1.7.0/fused-src
|
||||
)
|
||||
|
||||
IF(WIN32)
|
||||
INCLUDE_DIRECTORIES(BEFORE
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/patch)
|
||||
ENDIF()
|
||||
|
||||
list(APPEND CMAKE_MODULE_PATH "${ROCKSDB_SOURCE_DIR}/cmake/modules/")
|
||||
|
||||
if(WIN32)
|
||||
|
@ -245,6 +240,7 @@ set(ROCKSDB_SOURCES
|
|||
table/block_based_table_factory.cc
|
||||
table/block_based_table_reader.cc
|
||||
table/block_builder.cc
|
||||
table/block_fetcher.cc
|
||||
table/block_prefix_index.cc
|
||||
table/bloom_block.cc
|
||||
table/cuckoo_table_builder.cc
|
||||
|
@ -340,15 +336,16 @@ set(ROCKSDB_SOURCES
|
|||
utilities/transactions/optimistic_transaction_db_impl.cc
|
||||
utilities/transactions/pessimistic_transaction.cc
|
||||
utilities/transactions/pessimistic_transaction_db.cc
|
||||
utilities/transactions/snapshot_checker.cc
|
||||
utilities/transactions/transaction_base.cc
|
||||
utilities/transactions/transaction_db_mutex_impl.cc
|
||||
utilities/transactions/transaction_lock_mgr.cc
|
||||
utilities/transactions/transaction_util.cc
|
||||
utilities/transactions/write_prepared_txn.cc
|
||||
utilities/transactions/write_prepared_txn_db.cc
|
||||
utilities/ttl/db_ttl_impl.cc
|
||||
utilities/write_batch_with_index/write_batch_with_index.cc
|
||||
utilities/write_batch_with_index/write_batch_with_index_internal.cc
|
||||
|
||||
)
|
||||
|
||||
if(WIN32)
|
||||
|
|
|
@ -83,4 +83,14 @@ void Rdb_event_listener::OnExternalFileIngested(
|
|||
DBUG_ASSERT(db != nullptr);
|
||||
update_index_stats(info.table_properties);
|
||||
}
|
||||
|
||||
void Rdb_event_listener::OnBackgroundError(
|
||||
rocksdb::BackgroundErrorReason reason, rocksdb::Status *status) {
|
||||
rdb_log_status_error(*status, "Error detected in background");
|
||||
sql_print_error("RocksDB: BackgroundErrorReason: %d", (int)reason);
|
||||
if (status->IsCorruption()) {
|
||||
rdb_persist_corruption_marker();
|
||||
abort();
|
||||
}
|
||||
}
|
||||
} // namespace myrocks
|
||||
|
|
|
@ -37,6 +37,9 @@ public:
|
|||
rocksdb::DB *db,
|
||||
const rocksdb::ExternalFileIngestionInfo &ingestion_info) override;
|
||||
|
||||
void OnBackgroundError(rocksdb::BackgroundErrorReason reason,
|
||||
rocksdb::Status *status) override;
|
||||
|
||||
private:
|
||||
Rdb_ddl_manager *m_ddl_manager;
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -41,6 +41,7 @@
|
|||
|
||||
/* RocksDB header files */
|
||||
#include "rocksdb/cache.h"
|
||||
#include "rocksdb/merge_operator.h"
|
||||
#include "rocksdb/perf_context.h"
|
||||
#include "rocksdb/sst_file_manager.h"
|
||||
#include "rocksdb/statistics.h"
|
||||
|
@ -92,6 +93,25 @@ struct Rdb_trx_info {
|
|||
|
||||
std::vector<Rdb_trx_info> rdb_get_all_trx_info();
|
||||
|
||||
/*
|
||||
* class for exporting deadlock transaction information for
|
||||
* information_schema.rocksdb_deadlock
|
||||
*/
|
||||
struct Rdb_deadlock_info {
|
||||
struct Rdb_dl_trx_info {
|
||||
ulonglong trx_id;
|
||||
std::string cf_name;
|
||||
std::string waiting_key;
|
||||
bool exclusive_lock;
|
||||
std::string index_name;
|
||||
std::string table_name;
|
||||
};
|
||||
std::vector <Rdb_dl_trx_info> path;
|
||||
ulonglong victim_trx_id;
|
||||
};
|
||||
|
||||
std::vector<Rdb_deadlock_info> rdb_get_deadlock_info();
|
||||
|
||||
/*
|
||||
This is
|
||||
- the name of the default Column Family (the CF which stores indexes which
|
||||
|
@ -250,6 +270,8 @@ enum collations_used {
|
|||
*/
|
||||
#define ROCKSDB_SIZEOF_TTL_RECORD sizeof(longlong)
|
||||
|
||||
#define ROCKSDB_SIZEOF_AUTOINC_VALUE sizeof(longlong)
|
||||
|
||||
/*
|
||||
Maximum index prefix length in bytes.
|
||||
*/
|
||||
|
@ -367,6 +389,7 @@ enum operation_type : int {
|
|||
ROWS_UPDATED,
|
||||
ROWS_DELETED_BLIND,
|
||||
ROWS_EXPIRED,
|
||||
ROWS_FILTERED,
|
||||
ROWS_HIDDEN_NO_SNAPSHOT,
|
||||
ROWS_MAX
|
||||
};
|
||||
|
@ -400,6 +423,7 @@ struct st_export_stats {
|
|||
ulonglong rows_updated;
|
||||
ulonglong rows_deleted_blind;
|
||||
ulonglong rows_expired;
|
||||
ulonglong rows_filtered;
|
||||
ulonglong rows_hidden_no_snapshot;
|
||||
|
||||
ulonglong system_rows_deleted;
|
||||
|
@ -538,6 +562,12 @@ class ha_rocksdb : public my_core::handler {
|
|||
uchar *m_dup_sk_packed_tuple;
|
||||
uchar *m_dup_sk_packed_tuple_old;
|
||||
|
||||
/* Buffers used for passing upper/bound eq conditions. */
|
||||
uchar *m_eq_cond_lower_bound;
|
||||
uchar *m_eq_cond_upper_bound;
|
||||
rocksdb::Slice m_eq_cond_lower_bound_slice;
|
||||
rocksdb::Slice m_eq_cond_upper_bound_slice;
|
||||
|
||||
/*
|
||||
Temporary space for packing VARCHARs (we provide it to
|
||||
pack_record()/pack_index_tuple() calls).
|
||||
|
@ -567,7 +597,7 @@ class ha_rocksdb : public my_core::handler {
|
|||
This is used only when we get the record with rocksdb's Get() call (The
|
||||
other option is when we get a rocksdb::Slice from an iterator)
|
||||
*/
|
||||
std::string m_retrieved_record;
|
||||
rocksdb::PinnableSlice m_retrieved_record;
|
||||
|
||||
/* Type of locking to apply to rows */
|
||||
enum { RDB_LOCK_NONE, RDB_LOCK_READ, RDB_LOCK_WRITE } m_lock_rows;
|
||||
|
@ -600,12 +630,7 @@ class ha_rocksdb : public my_core::handler {
|
|||
bool m_update_scope_is_valid;
|
||||
|
||||
/* SST information used for bulk loading the primary key */
|
||||
std::unique_ptr<Rdb_sst_info> m_sst_info;
|
||||
/* External merge sorts for bulk load: key ID -> merge sort instance */
|
||||
std::unordered_map<GL_INDEX_ID, Rdb_index_merge> m_key_merge;
|
||||
Rdb_transaction *m_bulk_load_tx;
|
||||
/* Mutex to protect finalizing bulk load */
|
||||
mysql_mutex_t m_bulk_load_mutex;
|
||||
std::shared_ptr<Rdb_sst_info> m_sst_info;
|
||||
|
||||
/*
|
||||
MySQL index number for duplicate key error
|
||||
|
@ -620,13 +645,17 @@ class ha_rocksdb : public my_core::handler {
|
|||
int secondary_index_read(const int keyno, uchar *const buf)
|
||||
MY_ATTRIBUTE((__nonnull__, __warn_unused_result__));
|
||||
void setup_iterator_for_rnd_scan();
|
||||
void setup_scan_iterator(const Rdb_key_def &kd, rocksdb::Slice *const slice)
|
||||
MY_ATTRIBUTE((__nonnull__)) {
|
||||
setup_scan_iterator(kd, slice, false, 0);
|
||||
}
|
||||
bool is_ascending(const Rdb_key_def &keydef,
|
||||
enum ha_rkey_function find_flag) const
|
||||
MY_ATTRIBUTE((__nonnull__, __warn_unused_result__));
|
||||
void setup_iterator_bounds(const Rdb_key_def &kd,
|
||||
const rocksdb::Slice &eq_cond);
|
||||
bool can_use_bloom_filter(THD *thd, const Rdb_key_def &kd,
|
||||
const rocksdb::Slice &eq_cond,
|
||||
const bool use_all_keys);
|
||||
bool check_bloom_and_set_bounds(THD *thd, const Rdb_key_def &kd,
|
||||
const rocksdb::Slice &eq_cond,
|
||||
const bool use_all_keys);
|
||||
void setup_scan_iterator(const Rdb_key_def &kd, rocksdb::Slice *slice,
|
||||
const bool use_all_keys, const uint eq_cond_len)
|
||||
MY_ATTRIBUTE((__nonnull__));
|
||||
|
@ -635,7 +664,8 @@ class ha_rocksdb : public my_core::handler {
|
|||
rocksdb::Status
|
||||
get_for_update(Rdb_transaction *const tx,
|
||||
rocksdb::ColumnFamilyHandle *const column_family,
|
||||
const rocksdb::Slice &key, std::string *const value) const;
|
||||
const rocksdb::Slice &key,
|
||||
rocksdb::PinnableSlice *value) const;
|
||||
|
||||
int get_row_by_rowid(uchar *const buf, const char *const rowid,
|
||||
const uint rowid_size, const bool skip_lookup = false,
|
||||
|
@ -649,8 +679,13 @@ class ha_rocksdb : public my_core::handler {
|
|||
rowid_size, skip_lookup, skip_ttl_check);
|
||||
}
|
||||
|
||||
void update_auto_incr_val();
|
||||
void load_auto_incr_value();
|
||||
ulonglong load_auto_incr_value_from_index();
|
||||
void update_auto_incr_val(ulonglong val);
|
||||
void update_auto_incr_val_from_field();
|
||||
rocksdb::Status get_datadic_auto_incr(Rdb_transaction *const tx,
|
||||
const GL_INDEX_ID &gl_index_id,
|
||||
ulonglong *new_val) const;
|
||||
longlong update_hidden_pk_val();
|
||||
int load_hidden_pk_value() MY_ATTRIBUTE((__warn_unused_result__));
|
||||
int read_hidden_pk_id_from_rowkey(longlong *const hidden_pk_id)
|
||||
|
@ -696,6 +731,12 @@ class ha_rocksdb : public my_core::handler {
|
|||
*/
|
||||
std::vector<READ_FIELD> m_decoders_vect;
|
||||
|
||||
/*
|
||||
This tells if any field which is part of the key needs to be unpacked and
|
||||
decoded.
|
||||
*/
|
||||
bool m_key_requested = false;
|
||||
|
||||
/* Setup field_decoders based on type of scan and table->read_set */
|
||||
void setup_read_decoders();
|
||||
|
||||
|
@ -766,9 +807,12 @@ public:
|
|||
my_core::TABLE_SHARE *const table_arg);
|
||||
~ha_rocksdb() {
|
||||
int err MY_ATTRIBUTE((__unused__));
|
||||
err = finalize_bulk_load();
|
||||
DBUG_ASSERT(err == 0);
|
||||
mysql_mutex_destroy(&m_bulk_load_mutex);
|
||||
err = finalize_bulk_load(false);
|
||||
if (err != 0) {
|
||||
sql_print_error("RocksDB: Error %d finalizing bulk load while closing "
|
||||
"handler.",
|
||||
err);
|
||||
}
|
||||
}
|
||||
|
||||
/** @brief
|
||||
|
@ -794,10 +838,9 @@ public:
|
|||
*/
|
||||
|
||||
/*
|
||||
See if this is the same base table - this should only be true for different
|
||||
partitions of the same table.
|
||||
Returns the name of the table's base name
|
||||
*/
|
||||
bool same_table(const ha_rocksdb &other) const;
|
||||
const std::string &get_table_basename() const;
|
||||
|
||||
/** @brief
|
||||
This is a list of flags that indicate what functionality the storage engine
|
||||
|
@ -1210,8 +1253,6 @@ private:
|
|||
Rdb_tbl_def *get_table_if_exists(const char *const tablename)
|
||||
MY_ATTRIBUTE((__nonnull__, __warn_unused_result__));
|
||||
void read_thd_vars(THD *const thd) MY_ATTRIBUTE((__nonnull__));
|
||||
const char *thd_rocksdb_tmpdir()
|
||||
MY_ATTRIBUTE((__nonnull__, __warn_unused_result__));
|
||||
|
||||
bool contains_foreign_key(THD *const thd)
|
||||
MY_ATTRIBUTE((__nonnull__, __warn_unused_result__));
|
||||
|
@ -1221,6 +1262,9 @@ private:
|
|||
const std::unordered_set<std::shared_ptr<Rdb_key_def>> &indexes)
|
||||
MY_ATTRIBUTE((__nonnull__, __warn_unused_result__));
|
||||
|
||||
int finalize_bulk_load(bool print_client_error = true)
|
||||
MY_ATTRIBUTE((__warn_unused_result__));
|
||||
|
||||
public:
|
||||
int index_init(uint idx, bool sorted) override
|
||||
MY_ATTRIBUTE((__warn_unused_result__));
|
||||
|
@ -1264,7 +1308,7 @@ public:
|
|||
DBUG_ENTER_FUNC();
|
||||
|
||||
/* Free blob data */
|
||||
m_retrieved_record.clear();
|
||||
m_retrieved_record.Reset();
|
||||
|
||||
DBUG_RETURN(HA_EXIT_SUCCESS);
|
||||
}
|
||||
|
@ -1335,8 +1379,6 @@ public:
|
|||
my_core::Alter_inplace_info *const ha_alter_info,
|
||||
bool commit) override;
|
||||
|
||||
int finalize_bulk_load() MY_ATTRIBUTE((__warn_unused_result__));
|
||||
|
||||
#ifdef MARIAROCKS_NOT_YET // MDEV-10976
|
||||
void set_use_read_free_rpl(const char *const whitelist);
|
||||
#endif
|
||||
|
@ -1391,18 +1433,22 @@ struct Rdb_inplace_alter_ctx : public my_core::inplace_alter_handler_ctx {
|
|||
/* Stores number of keys to drop */
|
||||
const uint m_n_dropped_keys;
|
||||
|
||||
/* Stores the largest current auto increment value in the index */
|
||||
const ulonglong m_max_auto_incr;
|
||||
|
||||
Rdb_inplace_alter_ctx(
|
||||
Rdb_tbl_def *new_tdef, std::shared_ptr<Rdb_key_def> *old_key_descr,
|
||||
std::shared_ptr<Rdb_key_def> *new_key_descr, uint old_n_keys,
|
||||
uint new_n_keys,
|
||||
std::unordered_set<std::shared_ptr<Rdb_key_def>> added_indexes,
|
||||
std::unordered_set<GL_INDEX_ID> dropped_index_ids, uint n_added_keys,
|
||||
uint n_dropped_keys)
|
||||
uint n_dropped_keys, ulonglong max_auto_incr)
|
||||
: my_core::inplace_alter_handler_ctx(), m_new_tdef(new_tdef),
|
||||
m_old_key_descr(old_key_descr), m_new_key_descr(new_key_descr),
|
||||
m_old_n_keys(old_n_keys), m_new_n_keys(new_n_keys),
|
||||
m_added_indexes(added_indexes), m_dropped_index_ids(dropped_index_ids),
|
||||
m_n_added_keys(n_added_keys), m_n_dropped_keys(n_dropped_keys) {}
|
||||
m_n_added_keys(n_added_keys), m_n_dropped_keys(n_dropped_keys),
|
||||
m_max_auto_incr(max_auto_incr) {}
|
||||
|
||||
~Rdb_inplace_alter_ctx() {}
|
||||
|
||||
|
@ -1412,6 +1458,9 @@ private:
|
|||
Rdb_inplace_alter_ctx &operator=(const Rdb_inplace_alter_ctx &);
|
||||
};
|
||||
|
||||
// file name indicating RocksDB data corruption
|
||||
std::string rdb_corruption_marker_file_name();
|
||||
|
||||
const int MYROCKS_MARIADB_PLUGIN_MATURITY_LEVEL= MariaDB_PLUGIN_MATURITY_GAMMA;
|
||||
|
||||
extern bool prevent_myrocks_loading;
|
||||
|
|
|
@ -0,0 +1,150 @@
|
|||
--echo #
|
||||
--echo # Testing concurrent transactions.
|
||||
--echo #
|
||||
|
||||
--source include/count_sessions.inc
|
||||
connect (con1,localhost,root,,);
|
||||
connect (con2,localhost,root,,);
|
||||
connect (con3,localhost,root,,);
|
||||
|
||||
connection con1;
|
||||
begin;
|
||||
insert into t values (); # 1
|
||||
|
||||
connection con2;
|
||||
begin;
|
||||
insert into t values (); # 2
|
||||
|
||||
connection con3;
|
||||
begin;
|
||||
insert into t values (); # 3
|
||||
|
||||
connection con1;
|
||||
insert into t values (); # 4
|
||||
|
||||
connection con2;
|
||||
insert into t values (); # 5
|
||||
|
||||
connection con3;
|
||||
insert into t values (); # 6
|
||||
|
||||
connection con2;
|
||||
commit;
|
||||
|
||||
connection con3;
|
||||
rollback;
|
||||
|
||||
connection con1;
|
||||
commit;
|
||||
|
||||
delete from t;
|
||||
|
||||
--echo # Master value before restart
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
|
||||
--echo # Slave value before restart
|
||||
sync_slave_with_master;
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
|
||||
connection slave;
|
||||
--source include/stop_slave.inc
|
||||
--let $rpl_server_number = 1
|
||||
--source include/rpl_restart_server.inc
|
||||
|
||||
connection default;
|
||||
--echo # Master value after restart
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
|
||||
--let $rpl_server_number = 2
|
||||
--source include/rpl_restart_server.inc
|
||||
|
||||
connection slave;
|
||||
--source include/start_slave.inc
|
||||
--echo # Slave value after restart
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
|
||||
disconnect con1;
|
||||
disconnect con2;
|
||||
disconnect con3;
|
||||
--source include/wait_until_count_sessions.inc
|
||||
|
||||
--echo #
|
||||
--echo # Testing interaction of merge markers with various DDL statements.
|
||||
--echo #
|
||||
connection slave;
|
||||
--source include/stop_slave.inc
|
||||
|
||||
connection default;
|
||||
|
||||
--echo # Drop and add primary key.
|
||||
alter table t modify i int;
|
||||
alter table t drop primary key;
|
||||
alter table t add primary key (i);
|
||||
alter table t modify i int auto_increment;
|
||||
|
||||
--let $rpl_server_number = 1
|
||||
--source include/rpl_restart_server.inc
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
|
||||
--echo # Remove auto_increment property.
|
||||
alter table t modify i int;
|
||||
--let $rpl_server_number = 1
|
||||
--source include/rpl_restart_server.inc
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
|
||||
--echo # Add auto_increment property.
|
||||
insert into t values (123);
|
||||
alter table t modify i int auto_increment;
|
||||
--let $rpl_server_number = 1
|
||||
--source include/rpl_restart_server.inc
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
|
||||
--echo # Add column j.
|
||||
alter table t add column j int;
|
||||
--let $rpl_server_number = 1
|
||||
--source include/rpl_restart_server.inc
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
|
||||
--echo # Rename tables.
|
||||
rename table t to t2;
|
||||
rename table t2 to t;
|
||||
|
||||
--let $rpl_server_number = 1
|
||||
--source include/rpl_restart_server.inc
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
|
||||
--echo # Change auto_increment property
|
||||
alter table t auto_increment = 1000;
|
||||
--let $rpl_server_number = 1
|
||||
--source include/rpl_restart_server.inc
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
|
||||
alter table t auto_increment = 1;
|
||||
--let $rpl_server_number = 1
|
||||
--source include/rpl_restart_server.inc
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
|
||||
alter table t drop primary key, add key (i), auto_increment = 1;
|
||||
--let $rpl_server_number = 1
|
||||
--source include/rpl_restart_server.inc
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
|
||||
alter table t add key (j), auto_increment = 1;
|
||||
--let $rpl_server_number = 1
|
||||
--source include/rpl_restart_server.inc
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
|
||||
alter table t modify i int;
|
||||
alter table t add column (k int auto_increment), add key(k), auto_increment=15;
|
||||
--let $rpl_server_number = 1
|
||||
--source include/rpl_restart_server.inc
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
|
||||
--echo # Drop table.
|
||||
drop table t;
|
||||
|
||||
--let $rpl_server_number = 1
|
||||
--source include/rpl_restart_server.inc
|
||||
|
||||
connection slave;
|
||||
--source include/start_slave.inc
|
|
@ -1,6 +1,4 @@
|
|||
--disable_warnings
|
||||
DROP TABLE IF EXISTS t1, t2, t3;
|
||||
--enable_warnings
|
||||
--source include/count_sessions.inc
|
||||
|
||||
if ($data_order_desc)
|
||||
{
|
||||
|
@ -20,7 +18,7 @@ eval CREATE TABLE t1(
|
|||
b CHAR(30),
|
||||
PRIMARY KEY(pk) COMMENT "$pk_cf",
|
||||
KEY(a)
|
||||
) COLLATE 'latin1_bin';
|
||||
) ENGINE=ROCKSDB COLLATE 'latin1_bin';
|
||||
|
||||
# Create a second identical table to validate that bulk loading different
|
||||
# tables in the same session works
|
||||
|
@ -30,7 +28,7 @@ eval CREATE TABLE t2(
|
|||
b CHAR(30),
|
||||
PRIMARY KEY(pk) COMMENT "$pk_cf",
|
||||
KEY(a)
|
||||
) COLLATE 'latin1_bin';
|
||||
) ENGINE=ROCKSDB COLLATE 'latin1_bin';
|
||||
|
||||
# Create a third table using partitions to validate that bulk loading works
|
||||
# across a partitioned table
|
||||
|
@ -40,7 +38,7 @@ eval CREATE TABLE t3(
|
|||
b CHAR(30),
|
||||
PRIMARY KEY(pk) COMMENT "$pk_cf",
|
||||
KEY(a)
|
||||
) COLLATE 'latin1_bin' PARTITION BY KEY() PARTITIONS 4;
|
||||
) ENGINE=ROCKSDB COLLATE 'latin1_bin' PARTITION BY KEY() PARTITIONS 4;
|
||||
|
||||
--let $file = `SELECT CONCAT(@@datadir, "test_loadfile.txt")`
|
||||
|
||||
|
@ -154,3 +152,5 @@ EOF
|
|||
# Cleanup
|
||||
disconnect other;
|
||||
DROP TABLE t1, t2, t3;
|
||||
|
||||
--source include/wait_until_count_sessions.inc
|
|
@ -0,0 +1,144 @@
|
|||
--source include/have_partition.inc
|
||||
--source include/count_sessions.inc
|
||||
|
||||
SET rocksdb_bulk_load_size=3;
|
||||
SET rocksdb_bulk_load_allow_unsorted=1;
|
||||
|
||||
### Test individual INSERTs ###
|
||||
|
||||
# A table with only a PK won't have rows until the bulk load is finished
|
||||
eval CREATE TABLE t1(a INT, b INT, PRIMARY KEY(a) COMMENT "$pk_cf")
|
||||
ENGINE=ROCKSDB;
|
||||
SET rocksdb_bulk_load=1;
|
||||
--disable_query_log
|
||||
let $sign = 1;
|
||||
let $max = 5;
|
||||
let $i = 1;
|
||||
while ($i <= $max) {
|
||||
let $a = 1 + $sign * $i;
|
||||
let $b = 1 - $sign * $i;
|
||||
let $sign = -$sign;
|
||||
let $insert = INSERT INTO t1 VALUES ($a, $b);
|
||||
eval $insert;
|
||||
inc $i;
|
||||
}
|
||||
--enable_query_log
|
||||
SELECT * FROM t1 FORCE INDEX (PRIMARY);
|
||||
SET rocksdb_bulk_load=0;
|
||||
SELECT * FROM t1 FORCE INDEX (PRIMARY);
|
||||
DROP TABLE t1;
|
||||
|
||||
# A table with a PK and a SK shows rows immediately
|
||||
eval CREATE TABLE t1(a INT, b INT, PRIMARY KEY(a) COMMENT "$pk_cf", KEY(b))
|
||||
ENGINE=ROCKSDB;
|
||||
SET rocksdb_bulk_load=1;
|
||||
--disable_query_log
|
||||
let $sign = 1;
|
||||
let $max = 5;
|
||||
let $i = 1;
|
||||
while ($i <= $max) {
|
||||
let $a = 1 + $sign * $i;
|
||||
let $b = 1 - $sign * $i;
|
||||
let $sign = -$sign;
|
||||
let $insert = INSERT INTO t1 VALUES ($a, $b);
|
||||
eval $insert;
|
||||
inc $i;
|
||||
}
|
||||
--enable_query_log
|
||||
|
||||
SELECT * FROM t1 FORCE INDEX (PRIMARY);
|
||||
SET rocksdb_bulk_load=0;
|
||||
SELECT * FROM t1 FORCE INDEX (PRIMARY);
|
||||
DROP TABLE t1;
|
||||
|
||||
# Inserting into another table finishes bulk load to the previous table
|
||||
eval CREATE TABLE t1(a INT, b INT, PRIMARY KEY(a) COMMENT "$pk_cf")
|
||||
ENGINE=ROCKSDB;
|
||||
eval CREATE TABLE t2(a INT, b INT, PRIMARY KEY(a) COMMENT "$pk_cf")
|
||||
ENGINE=ROCKSDB;
|
||||
|
||||
SET rocksdb_bulk_load=1;
|
||||
INSERT INTO t1 VALUES (1,1);
|
||||
INSERT INTO t2 VALUES (1,1);
|
||||
SELECT * FROM t1 FORCE INDEX (PRIMARY);
|
||||
INSERT INTO t1 VALUES (2,2);
|
||||
SELECT * FROM t2 FORCE INDEX (PRIMARY);
|
||||
SELECT * FROM t1 FORCE INDEX (PRIMARY);
|
||||
SET rocksdb_bulk_load=0;
|
||||
SELECT * FROM t1 FORCE INDEX (PRIMARY);
|
||||
DROP TABLE t1, t2;
|
||||
|
||||
### Test bulk load from a file ###
|
||||
eval CREATE TABLE t1(a INT, b INT, PRIMARY KEY(a) COMMENT "$pk_cf")
|
||||
ENGINE=ROCKSDB;
|
||||
eval CREATE TABLE t2(a INT, b INT, PRIMARY KEY(b) COMMENT "$pk_cf")
|
||||
ENGINE=ROCKSDB;
|
||||
eval CREATE TABLE t3(a INT, b INT, PRIMARY KEY(a) COMMENT "$pk_cf")
|
||||
ENGINE=ROCKSDB PARTITION BY KEY() PARTITIONS 4;
|
||||
|
||||
--let $file = `SELECT CONCAT(@@datadir, "test_loadfile.txt")`
|
||||
# Create a text file with data to import into the table.
|
||||
# PK and SK are not in any order
|
||||
--let ROCKSDB_INFILE = $file
|
||||
perl;
|
||||
my $fn = $ENV{'ROCKSDB_INFILE'};
|
||||
open(my $fh, '>', $fn) || die "perl open($fn): $!";
|
||||
my $max = 5000000;
|
||||
my $sign = 1;
|
||||
for (my $ii = 0; $ii < $max; $ii++)
|
||||
{
|
||||
my $a = 1 + $sign * $ii;
|
||||
my $b = 1 - $sign * $ii;
|
||||
$sign = -$sign;
|
||||
print $fh "$a\t$b\n";
|
||||
}
|
||||
close($fh);
|
||||
EOF
|
||||
--file_exists $file
|
||||
|
||||
# Make sure a snapshot held by another user doesn't block the bulk load
|
||||
connect (other,localhost,root,,);
|
||||
set session transaction isolation level repeatable read;
|
||||
select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS';
|
||||
start transaction with consistent snapshot;
|
||||
select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS';
|
||||
|
||||
connection default;
|
||||
set rocksdb_bulk_load=1;
|
||||
set rocksdb_bulk_load_size=100000;
|
||||
--disable_query_log
|
||||
--echo LOAD DATA INFILE <input_file> INTO TABLE t1;
|
||||
eval LOAD DATA INFILE '$file' INTO TABLE t1;
|
||||
--echo LOAD DATA INFILE <input_file> INTO TABLE t2;
|
||||
eval LOAD DATA INFILE '$file' INTO TABLE t2;
|
||||
--echo LOAD DATA INFILE <input_file> INTO TABLE t3;
|
||||
eval LOAD DATA INFILE '$file' INTO TABLE t3;
|
||||
--enable_query_log
|
||||
set rocksdb_bulk_load=0;
|
||||
|
||||
--remove_file $file
|
||||
|
||||
# Make sure row count index stats are correct
|
||||
--replace_column 6 # 7 # 8 # 9 #
|
||||
SHOW TABLE STATUS WHERE name LIKE 't%';
|
||||
|
||||
ANALYZE TABLE t1, t2, t3;
|
||||
|
||||
--replace_column 6 # 7 # 8 # 9 #
|
||||
SHOW TABLE STATUS WHERE name LIKE 't%';
|
||||
|
||||
# Make sure all the data is there.
|
||||
select count(a) from t1;
|
||||
select count(b) from t1;
|
||||
select count(a) from t2;
|
||||
select count(b) from t2;
|
||||
select count(a) from t3;
|
||||
select count(b) from t3;
|
||||
|
||||
SELECT * FROM t1 FORCE INDEX (PRIMARY) LIMIT 3;
|
||||
SELECT * FROM t2 FORCE INDEX (PRIMARY) LIMIT 3;
|
||||
|
||||
disconnect other;
|
||||
DROP TABLE t1, t2, t3;
|
||||
|
||||
--source include/wait_until_count_sessions.inc
|
|
@ -0,0 +1,8 @@
|
|||
--source include/shutdown_mysqld.inc
|
||||
|
||||
# Expect the server to fail to come up with these options
|
||||
--error 1
|
||||
--exec $MYSQLD_CMD --plugin_load=$HA_ROCKSDB_SO $_mysqld_option
|
||||
|
||||
# Restart the server with the default options
|
||||
--source include/start_mysqld.inc
|
|
@ -0,0 +1,14 @@
|
|||
# Include this script only after using shutdown_mysqld.inc
|
||||
# where $_expect_file_name was initialized.
|
||||
# Write file to make mysql-test-run.pl start up the server again
|
||||
--exec echo "restart:$_mysqld_option" > $_expect_file_name
|
||||
|
||||
# Turn on reconnect
|
||||
--enable_reconnect
|
||||
|
||||
# Call script that will poll the server waiting for it to be back online again
|
||||
--source include/wait_until_connected_again.inc
|
||||
|
||||
# Turn off reconnect again
|
||||
--disable_reconnect
|
||||
|
|
@ -15,6 +15,10 @@ count(b)
|
|||
300000
|
||||
ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE;
|
||||
ALTER TABLE t1 ADD INDEX kb_copy(b), ALGORITHM=COPY;
|
||||
ERROR HY000: Status error 10 received from RocksDB: Operation aborted: Failed to acquire lock due to max_num_locks limit
|
||||
set session rocksdb_bulk_load=1;
|
||||
ALTER TABLE t1 ADD INDEX kb_copy(b), ALGORITHM=COPY;
|
||||
set session rocksdb_bulk_load=0;
|
||||
SELECT COUNT(*) as c FROM
|
||||
(SELECT COALESCE(LOWER(CONV(BIT_XOR(CAST(CRC32(CONCAT_WS('#', `b`, CONCAT(ISNULL(`b`)))) AS UNSIGNED)), 10, 16)), 0) AS crc FROM `t1` FORCE INDEX(`kb`)
|
||||
UNION DISTINCT
|
||||
|
|
|
@ -778,3 +778,20 @@ set global rocksdb_force_flush_memtable_now = true;
|
|||
select * from t1;
|
||||
col1 col2 extra
|
||||
DROP TABLE t1;
|
||||
create table t1 (i int auto_increment, key(i)) engine=rocksdb;
|
||||
insert into t1 values();
|
||||
insert into t1 values();
|
||||
insert into t1 values();
|
||||
show create table t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE `t1` (
|
||||
`i` int(11) NOT NULL AUTO_INCREMENT,
|
||||
KEY `i` (`i`)
|
||||
) ENGINE=ROCKSDB AUTO_INCREMENT=4 DEFAULT CHARSET=latin1
|
||||
show create table t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE `t1` (
|
||||
`i` int(11) NOT NULL AUTO_INCREMENT,
|
||||
KEY `i` (`i`)
|
||||
) ENGINE=ROCKSDB AUTO_INCREMENT=4 DEFAULT CHARSET=latin1
|
||||
drop table t1;
|
||||
|
|
|
@ -0,0 +1,38 @@
|
|||
#
|
||||
# Test how MyRocks behaves when RocksDB reports corrupted data.
|
||||
#
|
||||
#
|
||||
# Test server crashes on corrupted data and restarts
|
||||
#
|
||||
create table t1 (
|
||||
pk int not null primary key,
|
||||
col1 varchar(10)
|
||||
) engine=rocksdb;
|
||||
insert into t1 values (1,1),(2,2),(3,3);
|
||||
select * from t1 where pk=1;
|
||||
pk col1
|
||||
1 1
|
||||
set session debug_dbug= "+d,rocksdb_return_status_corrupted";
|
||||
select * from t1 where pk=1;
|
||||
ERROR HY000: Lost connection to MySQL server during query
|
||||
FOUND 1 /data corruption detected/ in allow_to_start_after_corruption_debug.err
|
||||
#
|
||||
# The same for scan queries
|
||||
#
|
||||
select * from t1;
|
||||
pk col1
|
||||
1 1
|
||||
2 2
|
||||
3 3
|
||||
set session debug_dbug= "+d,rocksdb_return_status_corrupted";
|
||||
select * from t1;
|
||||
ERROR HY000: Lost connection to MySQL server during query
|
||||
FOUND 1 /data corruption detected/ in allow_to_start_after_corruption_debug.err
|
||||
#
|
||||
# Test restart failure. The server is shutdown at this point.
|
||||
#
|
||||
FOUND 1 /The server will exit normally and stop restart attempts/ in allow_to_start_after_corruption_debug.err
|
||||
#
|
||||
# Remove corruption file and restart cleanly
|
||||
#
|
||||
drop table t1;
|
132
storage/rocksdb/mysql-test/rocksdb/r/autoinc_crash_safe.result
Normal file
132
storage/rocksdb/mysql-test/rocksdb/r/autoinc_crash_safe.result
Normal file
|
@ -0,0 +1,132 @@
|
|||
include/master-slave.inc
|
||||
[connection master]
|
||||
create table t (i int primary key auto_increment) engine=rocksdb;
|
||||
#
|
||||
# Testing concurrent transactions.
|
||||
#
|
||||
connect con1,localhost,root,,;
|
||||
connect con2,localhost,root,,;
|
||||
connect con3,localhost,root,,;
|
||||
connection con1;
|
||||
begin;
|
||||
insert into t values ();
|
||||
connection con2;
|
||||
begin;
|
||||
insert into t values ();
|
||||
connection con3;
|
||||
begin;
|
||||
insert into t values ();
|
||||
connection con1;
|
||||
insert into t values ();
|
||||
connection con2;
|
||||
insert into t values ();
|
||||
connection con3;
|
||||
insert into t values ();
|
||||
connection con2;
|
||||
commit;
|
||||
connection con3;
|
||||
rollback;
|
||||
connection con1;
|
||||
commit;
|
||||
delete from t;
|
||||
# Master value before restart
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
table_schema table_name auto_increment
|
||||
test t 7
|
||||
# Slave value before restart
|
||||
connection slave;
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
table_schema table_name auto_increment
|
||||
test t 6
|
||||
connection slave;
|
||||
include/stop_slave.inc
|
||||
include/rpl_restart_server.inc [server_number=1]
|
||||
connection default;
|
||||
# Master value after restart
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
table_schema table_name auto_increment
|
||||
test t 6
|
||||
include/rpl_restart_server.inc [server_number=2]
|
||||
connection slave;
|
||||
include/start_slave.inc
|
||||
# Slave value after restart
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
table_schema table_name auto_increment
|
||||
test t 6
|
||||
disconnect con1;
|
||||
disconnect con2;
|
||||
disconnect con3;
|
||||
#
|
||||
# Testing interaction of merge markers with various DDL statements.
|
||||
#
|
||||
connection slave;
|
||||
include/stop_slave.inc
|
||||
connection default;
|
||||
# Drop and add primary key.
|
||||
alter table t modify i int;
|
||||
alter table t drop primary key;
|
||||
alter table t add primary key (i);
|
||||
alter table t modify i int auto_increment;
|
||||
include/rpl_restart_server.inc [server_number=1]
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
table_schema table_name auto_increment
|
||||
test t 6
|
||||
# Remove auto_increment property.
|
||||
alter table t modify i int;
|
||||
include/rpl_restart_server.inc [server_number=1]
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
table_schema table_name auto_increment
|
||||
test t NULL
|
||||
# Add auto_increment property.
|
||||
insert into t values (123);
|
||||
alter table t modify i int auto_increment;
|
||||
include/rpl_restart_server.inc [server_number=1]
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
table_schema table_name auto_increment
|
||||
test t 124
|
||||
# Add column j.
|
||||
alter table t add column j int;
|
||||
include/rpl_restart_server.inc [server_number=1]
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
table_schema table_name auto_increment
|
||||
test t 124
|
||||
# Rename tables.
|
||||
rename table t to t2;
|
||||
rename table t2 to t;
|
||||
include/rpl_restart_server.inc [server_number=1]
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
table_schema table_name auto_increment
|
||||
test t 124
|
||||
# Change auto_increment property
|
||||
alter table t auto_increment = 1000;
|
||||
include/rpl_restart_server.inc [server_number=1]
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
table_schema table_name auto_increment
|
||||
test t 1000
|
||||
alter table t auto_increment = 1;
|
||||
include/rpl_restart_server.inc [server_number=1]
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
table_schema table_name auto_increment
|
||||
test t 124
|
||||
alter table t drop primary key, add key (i), auto_increment = 1;
|
||||
include/rpl_restart_server.inc [server_number=1]
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
table_schema table_name auto_increment
|
||||
test t 124
|
||||
alter table t add key (j), auto_increment = 1;
|
||||
include/rpl_restart_server.inc [server_number=1]
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
table_schema table_name auto_increment
|
||||
test t 124
|
||||
alter table t modify i int;
|
||||
alter table t add column (k int auto_increment), add key(k), auto_increment=15;
|
||||
include/rpl_restart_server.inc [server_number=1]
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
table_schema table_name auto_increment
|
||||
test t 16
|
||||
# Drop table.
|
||||
drop table t;
|
||||
include/rpl_restart_server.inc [server_number=1]
|
||||
connection slave;
|
||||
include/start_slave.inc
|
||||
include/rpl_end.inc
|
|
@ -0,0 +1,132 @@
|
|||
include/master-slave.inc
|
||||
[connection master]
|
||||
create table t (i int primary key auto_increment) engine=rocksdb partition by key (i) partitions 3;
|
||||
#
|
||||
# Testing concurrent transactions.
|
||||
#
|
||||
connect con1,localhost,root,,;
|
||||
connect con2,localhost,root,,;
|
||||
connect con3,localhost,root,,;
|
||||
connection con1;
|
||||
begin;
|
||||
insert into t values ();
|
||||
connection con2;
|
||||
begin;
|
||||
insert into t values ();
|
||||
connection con3;
|
||||
begin;
|
||||
insert into t values ();
|
||||
connection con1;
|
||||
insert into t values ();
|
||||
connection con2;
|
||||
insert into t values ();
|
||||
connection con3;
|
||||
insert into t values ();
|
||||
connection con2;
|
||||
commit;
|
||||
connection con3;
|
||||
rollback;
|
||||
connection con1;
|
||||
commit;
|
||||
delete from t;
|
||||
# Master value before restart
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
table_schema table_name auto_increment
|
||||
test t 7
|
||||
# Slave value before restart
|
||||
connection slave;
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
table_schema table_name auto_increment
|
||||
test t 6
|
||||
connection slave;
|
||||
include/stop_slave.inc
|
||||
include/rpl_restart_server.inc [server_number=1]
|
||||
connection default;
|
||||
# Master value after restart
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
table_schema table_name auto_increment
|
||||
test t 6
|
||||
include/rpl_restart_server.inc [server_number=2]
|
||||
connection slave;
|
||||
include/start_slave.inc
|
||||
# Slave value after restart
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
table_schema table_name auto_increment
|
||||
test t 6
|
||||
disconnect con1;
|
||||
disconnect con2;
|
||||
disconnect con3;
|
||||
#
|
||||
# Testing interaction of merge markers with various DDL statements.
|
||||
#
|
||||
connection slave;
|
||||
include/stop_slave.inc
|
||||
connection default;
|
||||
# Drop and add primary key.
|
||||
alter table t modify i int;
|
||||
alter table t drop primary key;
|
||||
alter table t add primary key (i);
|
||||
alter table t modify i int auto_increment;
|
||||
include/rpl_restart_server.inc [server_number=1]
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
table_schema table_name auto_increment
|
||||
test t 1
|
||||
# Remove auto_increment property.
|
||||
alter table t modify i int;
|
||||
include/rpl_restart_server.inc [server_number=1]
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
table_schema table_name auto_increment
|
||||
test t NULL
|
||||
# Add auto_increment property.
|
||||
insert into t values (123);
|
||||
alter table t modify i int auto_increment;
|
||||
include/rpl_restart_server.inc [server_number=1]
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
table_schema table_name auto_increment
|
||||
test t 124
|
||||
# Add column j.
|
||||
alter table t add column j int;
|
||||
include/rpl_restart_server.inc [server_number=1]
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
table_schema table_name auto_increment
|
||||
test t 124
|
||||
# Rename tables.
|
||||
rename table t to t2;
|
||||
rename table t2 to t;
|
||||
include/rpl_restart_server.inc [server_number=1]
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
table_schema table_name auto_increment
|
||||
test t 124
|
||||
# Change auto_increment property
|
||||
alter table t auto_increment = 1000;
|
||||
include/rpl_restart_server.inc [server_number=1]
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
table_schema table_name auto_increment
|
||||
test t 1000
|
||||
alter table t auto_increment = 1;
|
||||
include/rpl_restart_server.inc [server_number=1]
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
table_schema table_name auto_increment
|
||||
test t 124
|
||||
alter table t drop primary key, add key (i), auto_increment = 1;
|
||||
include/rpl_restart_server.inc [server_number=1]
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
table_schema table_name auto_increment
|
||||
test t 124
|
||||
alter table t add key (j), auto_increment = 1;
|
||||
include/rpl_restart_server.inc [server_number=1]
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
table_schema table_name auto_increment
|
||||
test t 124
|
||||
alter table t modify i int;
|
||||
alter table t add column (k int auto_increment), add key(k), auto_increment=15;
|
||||
include/rpl_restart_server.inc [server_number=1]
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
table_schema table_name auto_increment
|
||||
test t 16
|
||||
# Drop table.
|
||||
drop table t;
|
||||
include/rpl_restart_server.inc [server_number=1]
|
||||
connection slave;
|
||||
include/start_slave.inc
|
||||
include/rpl_end.inc
|
107
storage/rocksdb/mysql-test/rocksdb/r/autoinc_debug.result
Normal file
107
storage/rocksdb/mysql-test/rocksdb/r/autoinc_debug.result
Normal file
|
@ -0,0 +1,107 @@
|
|||
#
|
||||
# Testing upgrading from server without merges for auto_increment
|
||||
# to new server with such support.
|
||||
#
|
||||
set debug_dbug='+d,myrocks_autoinc_upgrade';
|
||||
create table t (i int primary key auto_increment);
|
||||
insert into t values ();
|
||||
insert into t values ();
|
||||
insert into t values ();
|
||||
select * from t;
|
||||
i
|
||||
1
|
||||
2
|
||||
3
|
||||
delete from t where i > 1;
|
||||
select * from t;
|
||||
i
|
||||
1
|
||||
select table_name, index_name, auto_increment
|
||||
from information_schema.rocksdb_ddl where table_name = 't';
|
||||
table_name index_name auto_increment
|
||||
t PRIMARY NULL
|
||||
set debug_dbug='-d,myrocks_autoinc_upgrade';
|
||||
insert into t values ();
|
||||
insert into t values ();
|
||||
insert into t values ();
|
||||
select * from t;
|
||||
i
|
||||
1
|
||||
2
|
||||
3
|
||||
4
|
||||
select table_name, index_name, auto_increment
|
||||
from information_schema.rocksdb_ddl where table_name = 't';
|
||||
table_name index_name auto_increment
|
||||
t PRIMARY 5
|
||||
delete from t where i > 1;
|
||||
insert into t values ();
|
||||
insert into t values ();
|
||||
insert into t values ();
|
||||
select * from t;
|
||||
i
|
||||
1
|
||||
5
|
||||
6
|
||||
7
|
||||
drop table t;
|
||||
#
|
||||
# Testing crash safety of transactions.
|
||||
#
|
||||
create table t (i int primary key auto_increment);
|
||||
insert into t values ();
|
||||
insert into t values ();
|
||||
insert into t values ();
|
||||
# Before anything
|
||||
begin;
|
||||
insert into t values ();
|
||||
insert into t values ();
|
||||
set debug_dbug="+d,crash_commit_before";
|
||||
commit;
|
||||
ERROR HY000: Lost connection to MySQL server during query
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
table_schema table_name auto_increment
|
||||
test t 4
|
||||
select max(i) from t;
|
||||
max(i)
|
||||
3
|
||||
# After engine prepare
|
||||
begin;
|
||||
insert into t values ();
|
||||
insert into t values ();
|
||||
set debug_dbug="+d,crash_commit_after_prepare";
|
||||
commit;
|
||||
ERROR HY000: Lost connection to MySQL server during query
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
table_schema table_name auto_increment
|
||||
test t 4
|
||||
select max(i) from t;
|
||||
max(i)
|
||||
3
|
||||
# After binlog
|
||||
begin;
|
||||
insert into t values ();
|
||||
insert into t values ();
|
||||
set debug_dbug="+d,crash_commit_after_log";
|
||||
commit;
|
||||
ERROR HY000: Lost connection to MySQL server during query
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
table_schema table_name auto_increment
|
||||
test t 6
|
||||
select max(i) from t;
|
||||
max(i)
|
||||
5
|
||||
# After everything
|
||||
begin;
|
||||
insert into t values ();
|
||||
insert into t values ();
|
||||
set debug_dbug="+d,crash_commit_after";
|
||||
commit;
|
||||
ERROR HY000: Lost connection to MySQL server during query
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
table_schema table_name auto_increment
|
||||
test t 8
|
||||
select max(i) from t;
|
||||
max(i)
|
||||
7
|
||||
drop table t;
|
|
@ -61,3 +61,82 @@ LAST_INSERT_ID()
|
|||
SELECT a FROM t1 ORDER BY a;
|
||||
a
|
||||
DROP TABLE t1;
|
||||
#---------------------------
|
||||
# test large autoincrement values
|
||||
#---------------------------
|
||||
SET auto_increment_increment = 1;
|
||||
SET auto_increment_offset = 1;
|
||||
CREATE TABLE t1 (a BIGINT UNSIGNED AUTO_INCREMENT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb;
|
||||
INSERT INTO t1 VALUES (18446744073709551613, 'a');
|
||||
SHOW CREATE TABLE t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE `t1` (
|
||||
`a` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
|
||||
`b` char(8) DEFAULT NULL,
|
||||
PRIMARY KEY (`a`)
|
||||
) ENGINE=ROCKSDB AUTO_INCREMENT=18446744073709551614 DEFAULT CHARSET=latin1
|
||||
INSERT INTO t1 VALUES (NULL, 'b');
|
||||
SHOW CREATE TABLE t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE `t1` (
|
||||
`a` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
|
||||
`b` char(8) DEFAULT NULL,
|
||||
PRIMARY KEY (`a`)
|
||||
) ENGINE=ROCKSDB AUTO_INCREMENT=18446744073709551615 DEFAULT CHARSET=latin1
|
||||
INSERT INTO t1 VALUES (NULL, 'c');
|
||||
ERROR HY000: Failed to read auto-increment value from storage engine
|
||||
SELECT * FROM t1;
|
||||
a b
|
||||
18446744073709551613 a
|
||||
18446744073709551614 b
|
||||
DROP TABLE t1;
|
||||
SET auto_increment_increment = 300;
|
||||
CREATE TABLE t1 (a BIGINT UNSIGNED AUTO_INCREMENT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb;
|
||||
INSERT INTO t1 VALUES (18446744073709551613, 'a');
|
||||
SHOW CREATE TABLE t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE `t1` (
|
||||
`a` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
|
||||
`b` char(8) DEFAULT NULL,
|
||||
PRIMARY KEY (`a`)
|
||||
) ENGINE=ROCKSDB AUTO_INCREMENT=18446744073709551614 DEFAULT CHARSET=latin1
|
||||
INSERT INTO t1 VALUES (NULL, 'b');
|
||||
ERROR HY000: Failed to read auto-increment value from storage engine
|
||||
SHOW CREATE TABLE t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE `t1` (
|
||||
`a` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
|
||||
`b` char(8) DEFAULT NULL,
|
||||
PRIMARY KEY (`a`)
|
||||
) ENGINE=ROCKSDB AUTO_INCREMENT=18446744073709551615 DEFAULT CHARSET=latin1
|
||||
INSERT INTO t1 VALUES (NULL, 'c');
|
||||
ERROR HY000: Failed to read auto-increment value from storage engine
|
||||
SELECT * FROM t1;
|
||||
a b
|
||||
18446744073709551613 a
|
||||
DROP TABLE t1;
|
||||
SET auto_increment_offset = 200;
|
||||
CREATE TABLE t1 (a BIGINT UNSIGNED AUTO_INCREMENT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb;
|
||||
INSERT INTO t1 VALUES (18446744073709551613, 'a');
|
||||
SHOW CREATE TABLE t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE `t1` (
|
||||
`a` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
|
||||
`b` char(8) DEFAULT NULL,
|
||||
PRIMARY KEY (`a`)
|
||||
) ENGINE=ROCKSDB AUTO_INCREMENT=18446744073709551614 DEFAULT CHARSET=latin1
|
||||
INSERT INTO t1 VALUES (NULL, 'b');
|
||||
ERROR HY000: Failed to read auto-increment value from storage engine
|
||||
SHOW CREATE TABLE t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE `t1` (
|
||||
`a` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
|
||||
`b` char(8) DEFAULT NULL,
|
||||
PRIMARY KEY (`a`)
|
||||
) ENGINE=ROCKSDB AUTO_INCREMENT=18446744073709551615 DEFAULT CHARSET=latin1
|
||||
INSERT INTO t1 VALUES (NULL, 'c');
|
||||
ERROR HY000: Failed to read auto-increment value from storage engine
|
||||
SELECT * FROM t1;
|
||||
a b
|
||||
18446744073709551613 a
|
||||
DROP TABLE t1;
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
# The test checks AUTO_INCREMENT capabilities that are not supported by RocksDB-SE.
|
62
storage/rocksdb/mysql-test/rocksdb/r/bloomfilter5.result
Normal file
62
storage/rocksdb/mysql-test/rocksdb/r/bloomfilter5.result
Normal file
|
@ -0,0 +1,62 @@
|
|||
#
|
||||
# Issue #809: Wrong query result with bloom filters
|
||||
#
|
||||
create table t1 (
|
||||
id1 bigint not null,
|
||||
id2 bigint not null,
|
||||
id3 varchar(100) not null,
|
||||
id4 int not null,
|
||||
id5 int not null,
|
||||
value bigint,
|
||||
value2 varchar(100),
|
||||
primary key (id1, id2, id3, id4) COMMENT 'rev:bf5_1'
|
||||
) engine=ROCKSDB;
|
||||
create table t2(a int);
|
||||
insert into t2 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
|
||||
create table t3(seq int);
|
||||
insert into t3
|
||||
select
|
||||
1+ A.a + B.a* 10 + C.a * 100 + D.a * 1000
|
||||
from t2 A, t2 B, t2 C, t2 D;
|
||||
insert t1
|
||||
select
|
||||
(seq+9) div 10, (seq+4) div 5, (seq+4) div 5, seq, seq, 1000, "aaabbbccc"
|
||||
from t3;
|
||||
set global rocksdb_force_flush_memtable_now=1;
|
||||
# Full table scan
|
||||
explain
|
||||
select * from t1 limit 10;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 ALL NULL NULL NULL NULL 10000
|
||||
select * from t1 limit 10;
|
||||
id1 id2 id3 id4 id5 value value2
|
||||
1000 2000 2000 10000 10000 1000 aaabbbccc
|
||||
1000 2000 2000 9999 9999 1000 aaabbbccc
|
||||
1000 2000 2000 9998 9998 1000 aaabbbccc
|
||||
1000 2000 2000 9997 9997 1000 aaabbbccc
|
||||
1000 2000 2000 9996 9996 1000 aaabbbccc
|
||||
1000 1999 1999 9995 9995 1000 aaabbbccc
|
||||
1000 1999 1999 9994 9994 1000 aaabbbccc
|
||||
1000 1999 1999 9993 9993 1000 aaabbbccc
|
||||
1000 1999 1999 9992 9992 1000 aaabbbccc
|
||||
1000 1999 1999 9991 9991 1000 aaabbbccc
|
||||
# An index scan starting from the end of the table:
|
||||
explain
|
||||
select * from t1 order by id1 desc,id2 desc, id3 desc, id4 desc limit 1;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 index NULL PRIMARY 122 NULL 1
|
||||
select * from t1 order by id1 desc,id2 desc, id3 desc, id4 desc limit 1;
|
||||
id1 id2 id3 id4 id5 value value2
|
||||
1000 2000 2000 10000 10000 1000 aaabbbccc
|
||||
create table t4 (
|
||||
pk int unsigned not null primary key,
|
||||
kp1 int unsigned not null,
|
||||
kp2 int unsigned not null,
|
||||
col1 int unsigned,
|
||||
key(kp1, kp2) comment 'rev:bf5_2'
|
||||
) engine=rocksdb;
|
||||
insert into t4 values (1, 0xFFFF, 0xFFF, 12345);
|
||||
# This must not fail an assert:
|
||||
select * from t4 force index(kp1) where kp1=0xFFFFFFFF and kp2<=0xFFFFFFFF order by kp2 desc;
|
||||
pk kp1 kp2 col1
|
||||
drop table t1,t2,t3,t4;
|
|
@ -1,4 +1,3 @@
|
|||
DROP TABLE IF EXISTS t1, t2, t3;
|
||||
Data will be ordered in ascending order
|
||||
CREATE TABLE t1(
|
||||
pk CHAR(5),
|
||||
|
@ -6,21 +5,21 @@ a CHAR(30),
|
|||
b CHAR(30),
|
||||
PRIMARY KEY(pk) COMMENT "cf1",
|
||||
KEY(a)
|
||||
) COLLATE 'latin1_bin';
|
||||
) ENGINE=ROCKSDB COLLATE 'latin1_bin';
|
||||
CREATE TABLE t2(
|
||||
pk CHAR(5),
|
||||
a CHAR(30),
|
||||
b CHAR(30),
|
||||
PRIMARY KEY(pk) COMMENT "cf1",
|
||||
KEY(a)
|
||||
) COLLATE 'latin1_bin';
|
||||
) ENGINE=ROCKSDB COLLATE 'latin1_bin';
|
||||
CREATE TABLE t3(
|
||||
pk CHAR(5),
|
||||
a CHAR(30),
|
||||
b CHAR(30),
|
||||
PRIMARY KEY(pk) COMMENT "cf1",
|
||||
KEY(a)
|
||||
) COLLATE 'latin1_bin' PARTITION BY KEY() PARTITIONS 4;
|
||||
) ENGINE=ROCKSDB COLLATE 'latin1_bin' PARTITION BY KEY() PARTITIONS 4;
|
||||
connect other,localhost,root,,;
|
||||
set session transaction isolation level repeatable read;
|
||||
select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS';
|
||||
|
|
|
@ -0,0 +1,11 @@
|
|||
CREATE TABLE t1 (pk INT, PRIMARY KEY (pk)) ENGINE=ROCKSDB;
|
||||
SET rocksdb_bulk_load_allow_unsorted=1;
|
||||
SET rocksdb_bulk_load=1;
|
||||
INSERT INTO t1 VALUES (1);
|
||||
connect con1,localhost,root,,;
|
||||
DROP TABLE t1;
|
||||
connection default;
|
||||
disconnect con1;
|
||||
SET rocksdb_bulk_load=0;
|
||||
SELECT * FROM t1;
|
||||
ERROR 42S02: Table 'test.t1' doesn't exist
|
|
@ -1,4 +1,4 @@
|
|||
CREATE TABLE t1(pk INT, PRIMARY KEY(pk));
|
||||
CREATE TABLE t1(pk INT, PRIMARY KEY(pk)) ENGINE=ROCKSDB;
|
||||
SET rocksdb_bulk_load=1;
|
||||
INSERT INTO t1 VALUES(10);
|
||||
INSERT INTO t1 VALUES(11);
|
||||
|
@ -14,18 +14,30 @@ INSERT INTO t1 VALUES(1);
|
|||
INSERT INTO t1 VALUES(2);
|
||||
INSERT INTO t1 VALUES(20);
|
||||
INSERT INTO t1 VALUES(21);
|
||||
#
|
||||
# In MyRocks, the following statement will intentionally crash the server.
|
||||
# In MariaDB, it will cause an error
|
||||
SET rocksdb_bulk_load=0;
|
||||
ERROR HY000: Rows inserted during bulk load must not overlap existing rows
|
||||
#
|
||||
# Despite the error, bulk load operation is over so the variable value
|
||||
# will be 0:
|
||||
select @@rocksdb_bulk_load;
|
||||
@@rocksdb_bulk_load
|
||||
0
|
||||
SHOW VARIABLES LIKE 'rocksdb_bulk_load';
|
||||
Variable_name Value
|
||||
rocksdb_bulk_load OFF
|
||||
call mtr.add_suppression('finalizing last SST file while setting bulk loading variable');
|
||||
SELECT * FROM t1;
|
||||
pk
|
||||
10
|
||||
11
|
||||
FOUND 1 /RocksDB: Error [0-9]+ finalizing last SST file while setting bulk loading variable/ in rocksdb.bulk_load_errors.1.err
|
||||
connect con1,localhost,root,,;
|
||||
SET rocksdb_bulk_load=1;
|
||||
INSERT INTO t1 VALUES(1);
|
||||
INSERT INTO t1 VALUES(2);
|
||||
INSERT INTO t1 VALUES(20);
|
||||
INSERT INTO t1 VALUES(21);
|
||||
connection default;
|
||||
disconnect con1;
|
||||
SELECT * FROM t1;
|
||||
pk
|
||||
10
|
||||
11
|
||||
FOUND 1 /RocksDB: Error [0-9]+ finalizing last SST file while disconnecting/ in rocksdb.bulk_load_errors.2.err
|
||||
TRUNCATE TABLE t1;
|
||||
SET rocksdb_bulk_load_allow_unsorted=1;
|
||||
SET rocksdb_bulk_load=1;
|
||||
|
@ -53,3 +65,35 @@ pk
|
|||
202
|
||||
SET rocksdb_bulk_load_allow_unsorted=DEFAULT;
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1(c1 INT KEY) ENGINE=ROCKSDB;
|
||||
SET rocksdb_bulk_load=1;
|
||||
INSERT INTO t1 VALUES (),(),();
|
||||
ERROR HY000: Rows must be inserted in primary key order during bulk load operation
|
||||
SET rocksdb_bulk_load=0;
|
||||
DROP TABLE t1;
|
||||
SET @orig_table_open_cache=@@global.table_open_cache;
|
||||
CREATE TABLE t1(a INT AUTO_INCREMENT, b INT, PRIMARY KEY (a)) ENGINE=ROCKSDB DEFAULT CHARSET=latin1;
|
||||
SET rocksdb_bulk_load=1;
|
||||
INSERT INTO t1 VALUES(13, 0);
|
||||
INSERT INTO t1 VALUES(2, 'test 2');
|
||||
Warnings:
|
||||
Warning 1366 Incorrect integer value: 'test 2' for column 'b' at row 1
|
||||
INSERT INTO t1 VALUES(@id, @arg04);
|
||||
SET @@global.table_open_cache=FALSE;
|
||||
Warnings:
|
||||
Warning 1292 Truncated incorrect table_open_cache value: '0'
|
||||
INSERT INTO t1 VALUES(51479+0.333333333,1);
|
||||
DROP TABLE t1;
|
||||
SET @@global.table_open_cache=@orig_table_open_cache;
|
||||
FOUND 1 /RocksDB: Error [0-9]+ finalizing bulk load while closing handler/ in rocksdb.bulk_load_errors.3.err
|
||||
CREATE TABLE t1 (pk INT, PRIMARY KEY (pk)) ENGINE=ROCKSDB;
|
||||
CREATE TABLE t2 (pk INT, PRIMARY KEY (pk)) ENGINE=ROCKSDB;
|
||||
SET rocksdb_bulk_load=1;
|
||||
INSERT INTO t1 VALUES (1), (2);
|
||||
INSERT INTO t2 VALUES (1), (2);
|
||||
INSERT INTO t1 VALUES (1);
|
||||
INSERT INTO t2 VALUES (3);
|
||||
ERROR HY000: Rows inserted during bulk load must not overlap existing rows
|
||||
SET rocksdb_bulk_load=0;
|
||||
DROP TABLE t1;
|
||||
DROP TABLE t2;
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
DROP TABLE IF EXISTS t1, t2, t3;
|
||||
Data will be ordered in ascending order
|
||||
CREATE TABLE t1(
|
||||
pk CHAR(5),
|
||||
|
@ -6,21 +5,21 @@ a CHAR(30),
|
|||
b CHAR(30),
|
||||
PRIMARY KEY(pk) COMMENT "rev:cf1",
|
||||
KEY(a)
|
||||
) COLLATE 'latin1_bin';
|
||||
) ENGINE=ROCKSDB COLLATE 'latin1_bin';
|
||||
CREATE TABLE t2(
|
||||
pk CHAR(5),
|
||||
a CHAR(30),
|
||||
b CHAR(30),
|
||||
PRIMARY KEY(pk) COMMENT "rev:cf1",
|
||||
KEY(a)
|
||||
) COLLATE 'latin1_bin';
|
||||
) ENGINE=ROCKSDB COLLATE 'latin1_bin';
|
||||
CREATE TABLE t3(
|
||||
pk CHAR(5),
|
||||
a CHAR(30),
|
||||
b CHAR(30),
|
||||
PRIMARY KEY(pk) COMMENT "rev:cf1",
|
||||
KEY(a)
|
||||
) COLLATE 'latin1_bin' PARTITION BY KEY() PARTITIONS 4;
|
||||
) ENGINE=ROCKSDB COLLATE 'latin1_bin' PARTITION BY KEY() PARTITIONS 4;
|
||||
connect other,localhost,root,,;
|
||||
set session transaction isolation level repeatable read;
|
||||
select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS';
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
DROP TABLE IF EXISTS t1, t2, t3;
|
||||
Data will be ordered in descending order
|
||||
CREATE TABLE t1(
|
||||
pk CHAR(5),
|
||||
|
@ -6,21 +5,21 @@ a CHAR(30),
|
|||
b CHAR(30),
|
||||
PRIMARY KEY(pk) COMMENT "rev:cf1",
|
||||
KEY(a)
|
||||
) COLLATE 'latin1_bin';
|
||||
) ENGINE=ROCKSDB COLLATE 'latin1_bin';
|
||||
CREATE TABLE t2(
|
||||
pk CHAR(5),
|
||||
a CHAR(30),
|
||||
b CHAR(30),
|
||||
PRIMARY KEY(pk) COMMENT "rev:cf1",
|
||||
KEY(a)
|
||||
) COLLATE 'latin1_bin';
|
||||
) ENGINE=ROCKSDB COLLATE 'latin1_bin';
|
||||
CREATE TABLE t3(
|
||||
pk CHAR(5),
|
||||
a CHAR(30),
|
||||
b CHAR(30),
|
||||
PRIMARY KEY(pk) COMMENT "rev:cf1",
|
||||
KEY(a)
|
||||
) COLLATE 'latin1_bin' PARTITION BY KEY() PARTITIONS 4;
|
||||
) ENGINE=ROCKSDB COLLATE 'latin1_bin' PARTITION BY KEY() PARTITIONS 4;
|
||||
connect other,localhost,root,,;
|
||||
set session transaction isolation level repeatable read;
|
||||
select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS';
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
DROP TABLE IF EXISTS t1, t2, t3;
|
||||
Data will be ordered in descending order
|
||||
CREATE TABLE t1(
|
||||
pk CHAR(5),
|
||||
|
@ -6,21 +5,21 @@ a CHAR(30),
|
|||
b CHAR(30),
|
||||
PRIMARY KEY(pk) COMMENT "cf1",
|
||||
KEY(a)
|
||||
) COLLATE 'latin1_bin';
|
||||
) ENGINE=ROCKSDB COLLATE 'latin1_bin';
|
||||
CREATE TABLE t2(
|
||||
pk CHAR(5),
|
||||
a CHAR(30),
|
||||
b CHAR(30),
|
||||
PRIMARY KEY(pk) COMMENT "cf1",
|
||||
KEY(a)
|
||||
) COLLATE 'latin1_bin';
|
||||
) ENGINE=ROCKSDB COLLATE 'latin1_bin';
|
||||
CREATE TABLE t3(
|
||||
pk CHAR(5),
|
||||
a CHAR(30),
|
||||
b CHAR(30),
|
||||
PRIMARY KEY(pk) COMMENT "cf1",
|
||||
KEY(a)
|
||||
) COLLATE 'latin1_bin' PARTITION BY KEY() PARTITIONS 4;
|
||||
) ENGINE=ROCKSDB COLLATE 'latin1_bin' PARTITION BY KEY() PARTITIONS 4;
|
||||
connect other,localhost,root,,;
|
||||
set session transaction isolation level repeatable read;
|
||||
select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS';
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
DROP TABLE IF EXISTS t1;
|
||||
SET rocksdb_bulk_load_size=3;
|
||||
SET rocksdb_bulk_load_allow_unsorted=1;
|
||||
CREATE TABLE t1(a INT, b INT, PRIMARY KEY(a) COMMENT "cf1");
|
||||
CREATE TABLE t1(a INT, b INT, PRIMARY KEY(a) COMMENT "cf1")
|
||||
ENGINE=ROCKSDB;
|
||||
SET rocksdb_bulk_load=1;
|
||||
SELECT * FROM t1;
|
||||
SELECT * FROM t1 FORCE INDEX (PRIMARY);
|
||||
a b
|
||||
SET rocksdb_bulk_load=0;
|
||||
SELECT * FROM t1;
|
||||
SELECT * FROM t1 FORCE INDEX (PRIMARY);
|
||||
a b
|
||||
-3 5
|
||||
-1 3
|
||||
|
@ -14,42 +14,49 @@ a b
|
|||
4 -2
|
||||
6 -4
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1(a INT, b INT, PRIMARY KEY(a) COMMENT "cf1", KEY(b));
|
||||
CREATE TABLE t1(a INT, b INT, PRIMARY KEY(a) COMMENT "cf1", KEY(b))
|
||||
ENGINE=ROCKSDB;
|
||||
SET rocksdb_bulk_load=1;
|
||||
SELECT * FROM t1;
|
||||
SELECT * FROM t1 FORCE INDEX (PRIMARY);
|
||||
a b
|
||||
6 -4
|
||||
4 -2
|
||||
2 0
|
||||
-1 3
|
||||
-3 5
|
||||
SET rocksdb_bulk_load=0;
|
||||
SELECT * FROM t1 FORCE INDEX (PRIMARY);
|
||||
a b
|
||||
-3 5
|
||||
-1 3
|
||||
2 0
|
||||
4 -2
|
||||
6 -4
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1(a INT, b INT, PRIMARY KEY(a) COMMENT "cf1");
|
||||
CREATE TABLE t2(a INT, b INT, PRIMARY KEY(a) COMMENT "cf1");
|
||||
CREATE TABLE t1(a INT, b INT, PRIMARY KEY(a) COMMENT "cf1")
|
||||
ENGINE=ROCKSDB;
|
||||
CREATE TABLE t2(a INT, b INT, PRIMARY KEY(a) COMMENT "cf1")
|
||||
ENGINE=ROCKSDB;
|
||||
SET rocksdb_bulk_load=1;
|
||||
INSERT INTO t1 VALUES (1,1);
|
||||
INSERT INTO t2 VALUES (1,1);
|
||||
SELECT * FROM t1;
|
||||
SELECT * FROM t1 FORCE INDEX (PRIMARY);
|
||||
a b
|
||||
1 1
|
||||
INSERT INTO t1 VALUES (2,2);
|
||||
SELECT * FROM t2;
|
||||
SELECT * FROM t2 FORCE INDEX (PRIMARY);
|
||||
a b
|
||||
1 1
|
||||
SELECT * FROM t1;
|
||||
SELECT * FROM t1 FORCE INDEX (PRIMARY);
|
||||
a b
|
||||
1 1
|
||||
SET rocksdb_bulk_load=0;
|
||||
SELECT * FROM t1;
|
||||
SELECT * FROM t1 FORCE INDEX (PRIMARY);
|
||||
a b
|
||||
1 1
|
||||
2 2
|
||||
DROP TABLE t1, t2;
|
||||
CREATE TABLE t1(a INT, b INT, PRIMARY KEY(a) COMMENT "cf1");
|
||||
CREATE TABLE t2(a INT, b INT, PRIMARY KEY(b) COMMENT "cf1");
|
||||
CREATE TABLE t1(a INT, b INT, PRIMARY KEY(a) COMMENT "cf1")
|
||||
ENGINE=ROCKSDB;
|
||||
CREATE TABLE t2(a INT, b INT, PRIMARY KEY(b) COMMENT "cf1")
|
||||
ENGINE=ROCKSDB;
|
||||
CREATE TABLE t3(a INT, b INT, PRIMARY KEY(a) COMMENT "cf1")
|
||||
PARTITION BY KEY() PARTITIONS 4;
|
||||
ENGINE=ROCKSDB PARTITION BY KEY() PARTITIONS 4;
|
||||
connect other,localhost,root,,;
|
||||
set session transaction isolation level repeatable read;
|
||||
select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS';
|
||||
|
@ -99,5 +106,15 @@ count(a)
|
|||
select count(b) from t3;
|
||||
count(b)
|
||||
5000000
|
||||
SELECT * FROM t1 FORCE INDEX (PRIMARY) LIMIT 3;
|
||||
a b
|
||||
-4999998 5000000
|
||||
-4999996 4999998
|
||||
-4999994 4999996
|
||||
SELECT * FROM t2 FORCE INDEX (PRIMARY) LIMIT 3;
|
||||
a b
|
||||
4999999 -4999997
|
||||
4999997 -4999995
|
||||
4999995 -4999993
|
||||
disconnect other;
|
||||
DROP TABLE t1, t2, t3;
|
||||
SET rocksdb_bulk_load_allow_unsorted=0;
|
||||
|
|
|
@ -0,0 +1,120 @@
|
|||
SET rocksdb_bulk_load_size=3;
|
||||
SET rocksdb_bulk_load_allow_unsorted=1;
|
||||
CREATE TABLE t1(a INT, b INT, PRIMARY KEY(a) COMMENT "rev:cf1")
|
||||
ENGINE=ROCKSDB;
|
||||
SET rocksdb_bulk_load=1;
|
||||
SELECT * FROM t1 FORCE INDEX (PRIMARY);
|
||||
a b
|
||||
SET rocksdb_bulk_load=0;
|
||||
SELECT * FROM t1 FORCE INDEX (PRIMARY);
|
||||
a b
|
||||
6 -4
|
||||
4 -2
|
||||
2 0
|
||||
-1 3
|
||||
-3 5
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1(a INT, b INT, PRIMARY KEY(a) COMMENT "rev:cf1", KEY(b))
|
||||
ENGINE=ROCKSDB;
|
||||
SET rocksdb_bulk_load=1;
|
||||
SELECT * FROM t1 FORCE INDEX (PRIMARY);
|
||||
a b
|
||||
SET rocksdb_bulk_load=0;
|
||||
SELECT * FROM t1 FORCE INDEX (PRIMARY);
|
||||
a b
|
||||
6 -4
|
||||
4 -2
|
||||
2 0
|
||||
-1 3
|
||||
-3 5
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1(a INT, b INT, PRIMARY KEY(a) COMMENT "rev:cf1")
|
||||
ENGINE=ROCKSDB;
|
||||
CREATE TABLE t2(a INT, b INT, PRIMARY KEY(a) COMMENT "rev:cf1")
|
||||
ENGINE=ROCKSDB;
|
||||
SET rocksdb_bulk_load=1;
|
||||
INSERT INTO t1 VALUES (1,1);
|
||||
INSERT INTO t2 VALUES (1,1);
|
||||
SELECT * FROM t1 FORCE INDEX (PRIMARY);
|
||||
a b
|
||||
1 1
|
||||
INSERT INTO t1 VALUES (2,2);
|
||||
SELECT * FROM t2 FORCE INDEX (PRIMARY);
|
||||
a b
|
||||
1 1
|
||||
SELECT * FROM t1 FORCE INDEX (PRIMARY);
|
||||
a b
|
||||
1 1
|
||||
SET rocksdb_bulk_load=0;
|
||||
SELECT * FROM t1 FORCE INDEX (PRIMARY);
|
||||
a b
|
||||
2 2
|
||||
1 1
|
||||
DROP TABLE t1, t2;
|
||||
CREATE TABLE t1(a INT, b INT, PRIMARY KEY(a) COMMENT "rev:cf1")
|
||||
ENGINE=ROCKSDB;
|
||||
CREATE TABLE t2(a INT, b INT, PRIMARY KEY(b) COMMENT "rev:cf1")
|
||||
ENGINE=ROCKSDB;
|
||||
CREATE TABLE t3(a INT, b INT, PRIMARY KEY(a) COMMENT "rev:cf1")
|
||||
ENGINE=ROCKSDB PARTITION BY KEY() PARTITIONS 4;
|
||||
connect other,localhost,root,,;
|
||||
set session transaction isolation level repeatable read;
|
||||
select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS';
|
||||
STAT_TYPE VALUE
|
||||
DB_NUM_SNAPSHOTS 0
|
||||
start transaction with consistent snapshot;
|
||||
select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS';
|
||||
STAT_TYPE VALUE
|
||||
DB_NUM_SNAPSHOTS 1
|
||||
connection default;
|
||||
set rocksdb_bulk_load=1;
|
||||
set rocksdb_bulk_load_size=100000;
|
||||
LOAD DATA INFILE <input_file> INTO TABLE t1;
|
||||
LOAD DATA INFILE <input_file> INTO TABLE t2;
|
||||
LOAD DATA INFILE <input_file> INTO TABLE t3;
|
||||
set rocksdb_bulk_load=0;
|
||||
SHOW TABLE STATUS WHERE name LIKE 't%';
|
||||
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
|
||||
t1 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL
|
||||
t2 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL
|
||||
t3 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL partitioned
|
||||
ANALYZE TABLE t1, t2, t3;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 analyze status OK
|
||||
test.t2 analyze status OK
|
||||
test.t3 analyze status OK
|
||||
SHOW TABLE STATUS WHERE name LIKE 't%';
|
||||
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
|
||||
t1 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL
|
||||
t2 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL
|
||||
t3 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL partitioned
|
||||
select count(a) from t1;
|
||||
count(a)
|
||||
5000000
|
||||
select count(b) from t1;
|
||||
count(b)
|
||||
5000000
|
||||
select count(a) from t2;
|
||||
count(a)
|
||||
5000000
|
||||
select count(b) from t2;
|
||||
count(b)
|
||||
5000000
|
||||
select count(a) from t3;
|
||||
count(a)
|
||||
5000000
|
||||
select count(b) from t3;
|
||||
count(b)
|
||||
5000000
|
||||
SELECT * FROM t1 FORCE INDEX (PRIMARY) LIMIT 3;
|
||||
a b
|
||||
4999999 -4999997
|
||||
4999997 -4999995
|
||||
4999995 -4999993
|
||||
SELECT * FROM t2 FORCE INDEX (PRIMARY) LIMIT 3;
|
||||
a b
|
||||
-4999998 5000000
|
||||
-4999996 4999998
|
||||
-4999994 4999996
|
||||
disconnect other;
|
||||
DROP TABLE t1, t2, t3;
|
|
@ -1,3 +1,38 @@
|
|||
CREATE TABLE t0 (id int PRIMARY KEY, a int, INDEX ix_a (a)) engine=rocksdb;
|
||||
insert into t0 values (0, 0),(1, 1),(2, 2),(3, 3),(4, 4),
|
||||
(5, 4),(6, 4),(7, 4),(8, 4),(9, 4);
|
||||
SELECT cardinality FROM information_schema.statistics where table_name="t0" and
|
||||
column_name="id";
|
||||
cardinality
|
||||
NULL
|
||||
SELECT cardinality FROM information_schema.statistics where table_name="t0" and
|
||||
column_name="a";
|
||||
cardinality
|
||||
NULL
|
||||
ANALYZE TABLE t0;
|
||||
SELECT table_rows into @N FROM information_schema.tables
|
||||
WHERE table_name = "t0";
|
||||
SELECT FLOOR(@N/cardinality) FROM
|
||||
information_schema.statistics where table_name="t0" and column_name="id";
|
||||
FLOOR(@N/cardinality)
|
||||
1
|
||||
SELECT FLOOR(@N/cardinality) FROM
|
||||
information_schema.statistics where table_name="t0" and column_name="a";
|
||||
FLOOR(@N/cardinality)
|
||||
2
|
||||
SET GLOBAL rocksdb_force_flush_memtable_now = 1;
|
||||
ANALYZE TABLE t0;
|
||||
SELECT table_rows into @N FROM information_schema.tables
|
||||
WHERE table_name = "t0";
|
||||
SELECT FLOOR(@N/cardinality) FROM
|
||||
information_schema.statistics where table_name="t0" and column_name="id";
|
||||
FLOOR(@N/cardinality)
|
||||
1
|
||||
SELECT FLOOR(@N/cardinality) FROM
|
||||
information_schema.statistics where table_name="t0" and column_name="a";
|
||||
FLOOR(@N/cardinality)
|
||||
2
|
||||
drop table t0;
|
||||
DROP TABLE IF EXISTS t1,t10,t11;
|
||||
create table t1(
|
||||
id bigint not null primary key,
|
||||
|
|
|
@ -0,0 +1,7 @@
|
|||
select variable_name, variable_value from information_schema.global_variables where variable_name="rocksdb_ignore_unknown_options";
|
||||
variable_name variable_value
|
||||
ROCKSDB_IGNORE_UNKNOWN_OPTIONS ON
|
||||
FOUND 1 /RocksDB: Compatibility check against existing database options failed/ in my_restart.err
|
||||
select variable_name, variable_value from information_schema.global_variables where variable_name="rocksdb_ignore_unknown_options";
|
||||
variable_name variable_value
|
||||
ROCKSDB_IGNORE_UNKNOWN_OPTIONS ON
|
|
@ -66,13 +66,6 @@ KEY
|
|||
LOCK TYPE: EXCLUSIVE
|
||||
INDEX NAME: PRIMARY
|
||||
TABLE NAME: test.t
|
||||
---------------WAITING FOR---------------
|
||||
TXN_ID
|
||||
COLUMN FAMILY NAME: default
|
||||
KEY
|
||||
LOCK TYPE: EXCLUSIVE
|
||||
INDEX NAME: PRIMARY
|
||||
TABLE NAME: test.t
|
||||
|
||||
--------TXN_ID GOT DEADLOCK---------
|
||||
-----------------------------------------
|
||||
|
@ -122,13 +115,6 @@ KEY
|
|||
LOCK TYPE: EXCLUSIVE
|
||||
INDEX NAME: PRIMARY
|
||||
TABLE NAME: test.t
|
||||
---------------WAITING FOR---------------
|
||||
TXN_ID
|
||||
COLUMN FAMILY NAME: default
|
||||
KEY
|
||||
LOCK TYPE: EXCLUSIVE
|
||||
INDEX NAME: PRIMARY
|
||||
TABLE NAME: test.t
|
||||
|
||||
--------TXN_ID GOT DEADLOCK---------
|
||||
|
||||
|
@ -147,13 +133,6 @@ KEY
|
|||
LOCK TYPE: EXCLUSIVE
|
||||
INDEX NAME: PRIMARY
|
||||
TABLE NAME: test.t
|
||||
---------------WAITING FOR---------------
|
||||
TXN_ID
|
||||
COLUMN FAMILY NAME: default
|
||||
KEY
|
||||
LOCK TYPE: EXCLUSIVE
|
||||
INDEX NAME: PRIMARY
|
||||
TABLE NAME: test.t
|
||||
|
||||
--------TXN_ID GOT DEADLOCK---------
|
||||
-----------------------------------------
|
||||
|
@ -204,13 +183,6 @@ KEY
|
|||
LOCK TYPE: EXCLUSIVE
|
||||
INDEX NAME: PRIMARY
|
||||
TABLE NAME: test.t
|
||||
---------------WAITING FOR---------------
|
||||
TXN_ID
|
||||
COLUMN FAMILY NAME: default
|
||||
KEY
|
||||
LOCK TYPE: EXCLUSIVE
|
||||
INDEX NAME: PRIMARY
|
||||
TABLE NAME: test.t
|
||||
|
||||
--------TXN_ID GOT DEADLOCK---------
|
||||
|
||||
|
@ -229,13 +201,6 @@ KEY
|
|||
LOCK TYPE: EXCLUSIVE
|
||||
INDEX NAME: PRIMARY
|
||||
TABLE NAME: test.t
|
||||
---------------WAITING FOR---------------
|
||||
TXN_ID
|
||||
COLUMN FAMILY NAME: default
|
||||
KEY
|
||||
LOCK TYPE: EXCLUSIVE
|
||||
INDEX NAME: PRIMARY
|
||||
TABLE NAME: test.t
|
||||
|
||||
--------TXN_ID GOT DEADLOCK---------
|
||||
|
||||
|
@ -254,13 +219,6 @@ KEY
|
|||
LOCK TYPE: EXCLUSIVE
|
||||
INDEX NAME: PRIMARY
|
||||
TABLE NAME: test.t
|
||||
---------------WAITING FOR---------------
|
||||
TXN_ID
|
||||
COLUMN FAMILY NAME: default
|
||||
KEY
|
||||
LOCK TYPE: EXCLUSIVE
|
||||
INDEX NAME: PRIMARY
|
||||
TABLE NAME: test.t
|
||||
|
||||
--------TXN_ID GOT DEADLOCK---------
|
||||
-----------------------------------------
|
||||
|
@ -295,13 +253,6 @@ KEY
|
|||
LOCK TYPE: EXCLUSIVE
|
||||
INDEX NAME: PRIMARY
|
||||
TABLE NAME: test.t
|
||||
---------------WAITING FOR---------------
|
||||
TXN_ID
|
||||
COLUMN FAMILY NAME: default
|
||||
KEY
|
||||
LOCK TYPE: EXCLUSIVE
|
||||
INDEX NAME: PRIMARY
|
||||
TABLE NAME: test.t
|
||||
|
||||
--------TXN_ID GOT DEADLOCK---------
|
||||
-----------------------------------------
|
||||
|
@ -324,8 +275,12 @@ i
|
|||
3
|
||||
select * from t where i=2 for update;
|
||||
select * from t where i=3 for update;
|
||||
select variable_value into @a from information_schema.global_status where variable_name='rocksdb_row_lock_deadlocks';
|
||||
select * from t where i=1 for update;
|
||||
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
|
||||
select case when variable_value-@a = 1 then 'true' else 'false' end as deadlocks from information_schema.global_status where variable_name='rocksdb_row_lock_deadlocks';
|
||||
deadlocks
|
||||
true
|
||||
rollback;
|
||||
i
|
||||
3
|
||||
|
@ -410,13 +365,6 @@ KEY
|
|||
LOCK TYPE: SHARED
|
||||
INDEX NAME: PRIMARY
|
||||
TABLE NAME: test.t
|
||||
---------------WAITING FOR---------------
|
||||
TXN_ID
|
||||
COLUMN FAMILY NAME: default
|
||||
KEY
|
||||
LOCK TYPE: EXCLUSIVE
|
||||
INDEX NAME: PRIMARY
|
||||
TABLE NAME: test.t
|
||||
|
||||
--------TXN_ID GOT DEADLOCK---------
|
||||
|
||||
|
@ -455,13 +403,6 @@ KEY
|
|||
LOCK TYPE: SHARED
|
||||
INDEX NAME: NOT FOUND; IDX_ID
|
||||
TABLE NAME: NOT FOUND; IDX_ID
|
||||
---------------WAITING FOR---------------
|
||||
TXN_ID
|
||||
COLUMN FAMILY NAME: default
|
||||
KEY
|
||||
LOCK TYPE: EXCLUSIVE
|
||||
INDEX NAME: NOT FOUND; IDX_ID
|
||||
TABLE NAME: NOT FOUND; IDX_ID
|
||||
|
||||
--------TXN_ID GOT DEADLOCK---------
|
||||
|
||||
|
|
|
@ -1,17 +1,22 @@
|
|||
DROP TABLE IF EXISTS is_ddl_t1;
|
||||
DROP TABLE IF EXISTS is_ddl_t2;
|
||||
DROP TABLE IF EXISTS is_ddl_t3;
|
||||
CREATE TABLE is_ddl_t1 (i INT, j INT, k INT, l INT,
|
||||
PRIMARY KEY (i), KEY (j), KEY (k, l) COMMENT 'kl_cf')
|
||||
ENGINE = ROCKSDB;
|
||||
CREATE TABLE is_ddl_t2 (x INT, y INT, z INT,
|
||||
PRIMARY KEY (z, y) COMMENT 'zy_cf',
|
||||
KEY (x)) ENGINE = ROCKSDB;
|
||||
SELECT TABLE_SCHEMA,TABLE_NAME,PARTITION_NAME,INDEX_NAME,INDEX_TYPE,KV_FORMAT_VERSION,CF FROM INFORMATION_SCHEMA.ROCKSDB_DDL WHERE TABLE_NAME like 'is_ddl_t%';
|
||||
TABLE_SCHEMA TABLE_NAME PARTITION_NAME INDEX_NAME INDEX_TYPE KV_FORMAT_VERSION CF
|
||||
test is_ddl_t1 NULL PRIMARY 1 13 default
|
||||
test is_ddl_t1 NULL j 2 13 default
|
||||
test is_ddl_t1 NULL k 2 13 kl_cf
|
||||
test is_ddl_t2 NULL PRIMARY 1 13 zy_cf
|
||||
test is_ddl_t2 NULL x 2 13 default
|
||||
CREATE TABLE is_ddl_t3 (a INT, b INT, c INT, PRIMARY KEY (a)) ENGINE = ROCKSDB
|
||||
COMMENT "ttl_duration=3600;";
|
||||
SELECT TABLE_SCHEMA,TABLE_NAME,PARTITION_NAME,INDEX_NAME,INDEX_TYPE,KV_FORMAT_VERSION,CF,TTL_DURATION,INDEX_FLAGS FROM INFORMATION_SCHEMA.ROCKSDB_DDL WHERE TABLE_NAME like 'is_ddl_t%';
|
||||
TABLE_SCHEMA TABLE_NAME PARTITION_NAME INDEX_NAME INDEX_TYPE KV_FORMAT_VERSION CF TTL_DURATION INDEX_FLAGS
|
||||
test is_ddl_t1 NULL PRIMARY 1 13 default 0 0
|
||||
test is_ddl_t1 NULL j 2 13 default 0 0
|
||||
test is_ddl_t1 NULL k 2 13 kl_cf 0 0
|
||||
test is_ddl_t2 NULL PRIMARY 1 13 zy_cf 0 0
|
||||
test is_ddl_t2 NULL x 2 13 default 0 0
|
||||
test is_ddl_t3 NULL PRIMARY 1 13 default 3600 1
|
||||
DROP TABLE is_ddl_t1;
|
||||
DROP TABLE is_ddl_t2;
|
||||
DROP TABLE is_ddl_t3;
|
||||
|
|
215
storage/rocksdb/mysql-test/rocksdb/r/i_s_deadlock.result
Normal file
215
storage/rocksdb/mysql-test/rocksdb/r/i_s_deadlock.result
Normal file
|
@ -0,0 +1,215 @@
|
|||
set @prior_lock_wait_timeout = @@rocksdb_lock_wait_timeout;
|
||||
set @prior_deadlock_detect = @@rocksdb_deadlock_detect;
|
||||
set @prior_max_latest_deadlocks = @@rocksdb_max_latest_deadlocks;
|
||||
set global rocksdb_deadlock_detect = on;
|
||||
set global rocksdb_lock_wait_timeout = 10000;
|
||||
# Clears deadlock buffer of any prior deadlocks.
|
||||
set global rocksdb_max_latest_deadlocks = 0;
|
||||
set global rocksdb_max_latest_deadlocks = @prior_max_latest_deadlocks;
|
||||
connect con1,localhost,root,,;
|
||||
connect con2,localhost,root,,;
|
||||
connect con3,localhost,root,,;
|
||||
connection default;
|
||||
show create table information_schema.rocksdb_deadlock;
|
||||
Table Create Table
|
||||
ROCKSDB_DEADLOCK CREATE TEMPORARY TABLE `ROCKSDB_DEADLOCK` (
|
||||
`DEADLOCK_ID` bigint(8) NOT NULL DEFAULT 0,
|
||||
`TRANSACTION_ID` bigint(8) NOT NULL DEFAULT 0,
|
||||
`CF_NAME` varchar(193) NOT NULL DEFAULT '',
|
||||
`WAITING_KEY` varchar(513) NOT NULL DEFAULT '',
|
||||
`LOCK_TYPE` varchar(193) NOT NULL DEFAULT '',
|
||||
`INDEX_NAME` varchar(193) NOT NULL DEFAULT '',
|
||||
`TABLE_NAME` varchar(193) NOT NULL DEFAULT '',
|
||||
`ROLLED_BACK` bigint(8) NOT NULL DEFAULT 0
|
||||
) ENGINE=MEMORY DEFAULT CHARSET=utf8
|
||||
create table t (i int primary key) engine=rocksdb;
|
||||
insert into t values (1), (2), (3);
|
||||
select * from information_schema.rocksdb_deadlock;
|
||||
DEADLOCK_ID TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK
|
||||
Deadlock #1
|
||||
connection con1;
|
||||
begin;
|
||||
select * from t where i=1 for update;
|
||||
i
|
||||
1
|
||||
connection con2;
|
||||
begin;
|
||||
select * from t where i=2 for update;
|
||||
i
|
||||
2
|
||||
connection con1;
|
||||
select * from t where i=2 for update;
|
||||
connection con2;
|
||||
select * from t where i=1 for update;
|
||||
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
|
||||
rollback;
|
||||
connection con1;
|
||||
i
|
||||
2
|
||||
rollback;
|
||||
connection default;
|
||||
select * from information_schema.rocksdb_deadlock;
|
||||
DEADLOCK_ID TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK
|
||||
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 0
|
||||
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 1
|
||||
Deadlock #2
|
||||
connection con1;
|
||||
begin;
|
||||
select * from t where i=1 for update;
|
||||
i
|
||||
1
|
||||
connection con2;
|
||||
begin;
|
||||
select * from t where i=2 for update;
|
||||
i
|
||||
2
|
||||
connection con1;
|
||||
select * from t where i=2 for update;
|
||||
connection con2;
|
||||
select * from t where i=1 for update;
|
||||
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
|
||||
rollback;
|
||||
connection con1;
|
||||
i
|
||||
2
|
||||
rollback;
|
||||
connection default;
|
||||
select * from information_schema.rocksdb_deadlock;
|
||||
DEADLOCK_ID TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK
|
||||
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 0
|
||||
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 1
|
||||
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 0
|
||||
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 1
|
||||
set global rocksdb_max_latest_deadlocks = 10;
|
||||
Deadlock #3
|
||||
connection con1;
|
||||
begin;
|
||||
select * from t where i=1 for update;
|
||||
i
|
||||
1
|
||||
connection con2;
|
||||
begin;
|
||||
select * from t where i=2 for update;
|
||||
i
|
||||
2
|
||||
connection con1;
|
||||
select * from t where i=2 for update;
|
||||
connection con2;
|
||||
select * from t where i=1 for update;
|
||||
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
|
||||
rollback;
|
||||
connection con1;
|
||||
i
|
||||
2
|
||||
rollback;
|
||||
connection default;
|
||||
select * from information_schema.rocksdb_deadlock;
|
||||
DEADLOCK_ID TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK
|
||||
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 0
|
||||
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 1
|
||||
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 0
|
||||
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 1
|
||||
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 0
|
||||
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 1
|
||||
set global rocksdb_max_latest_deadlocks = 1;
|
||||
select * from information_schema.rocksdb_deadlock;
|
||||
DEADLOCK_ID TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK
|
||||
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 0
|
||||
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 1
|
||||
connection con3;
|
||||
set rocksdb_deadlock_detect_depth = 2;
|
||||
Deadlock #4
|
||||
connection con1;
|
||||
begin;
|
||||
select * from t where i=1 for update;
|
||||
i
|
||||
1
|
||||
connection con2;
|
||||
begin;
|
||||
select * from t where i=2 for update;
|
||||
i
|
||||
2
|
||||
connection con3;
|
||||
begin;
|
||||
select * from t where i=3 for update;
|
||||
i
|
||||
3
|
||||
connection con1;
|
||||
select * from t where i=2 for update;
|
||||
connection con2;
|
||||
select * from t where i=3 for update;
|
||||
connection con3;
|
||||
select * from t where i=1 for update;
|
||||
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
|
||||
rollback;
|
||||
connection con2;
|
||||
i
|
||||
3
|
||||
rollback;
|
||||
connection con1;
|
||||
i
|
||||
2
|
||||
rollback;
|
||||
connection default;
|
||||
set global rocksdb_max_latest_deadlocks = 5;
|
||||
select * from information_schema.rocksdb_deadlock;
|
||||
DEADLOCK_ID TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK
|
||||
Deadlock #5
|
||||
connection con1;
|
||||
begin;
|
||||
select * from t where i=1 for update;
|
||||
i
|
||||
1
|
||||
connection con2;
|
||||
begin;
|
||||
select * from t where i=2 for update;
|
||||
i
|
||||
2
|
||||
connection con3;
|
||||
begin;
|
||||
select * from t where i=3 lock in share mode;
|
||||
i
|
||||
3
|
||||
connection con1;
|
||||
select * from t where i=100 for update;
|
||||
i
|
||||
select * from t where i=101 for update;
|
||||
i
|
||||
select * from t where i=2 for update;
|
||||
connection con2;
|
||||
select * from t where i=3 lock in share mode;
|
||||
i
|
||||
3
|
||||
select * from t where i=200 for update;
|
||||
i
|
||||
select * from t where i=201 for update;
|
||||
i
|
||||
select * from t where i=1 lock in share mode;
|
||||
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
|
||||
rollback;
|
||||
connection con1;
|
||||
i
|
||||
2
|
||||
rollback;
|
||||
connection con3;
|
||||
rollback;
|
||||
connection default;
|
||||
select * from information_schema.rocksdb_deadlock;
|
||||
DEADLOCK_ID TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK
|
||||
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 0
|
||||
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY SHARED PRIMARY test.t 1
|
||||
disconnect con1;
|
||||
disconnect con2;
|
||||
disconnect con3;
|
||||
set global rocksdb_lock_wait_timeout = @prior_lock_wait_timeout;
|
||||
set global rocksdb_deadlock_detect = @prior_deadlock_detect;
|
||||
drop table t;
|
||||
select * from information_schema.rocksdb_deadlock;
|
||||
DEADLOCK_ID TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK
|
||||
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE INDEX_NAME TABLE_NAME 0
|
||||
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY SHARED INDEX_NAME TABLE_NAME 1
|
||||
set global rocksdb_max_latest_deadlocks = 0;
|
||||
# Clears deadlock buffer of any existent deadlocks.
|
||||
set global rocksdb_max_latest_deadlocks = @prior_max_latest_deadlocks;
|
||||
select * from information_schema.rocksdb_deadlock;
|
||||
DEADLOCK_ID TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK
|
|
@ -25,10 +25,10 @@ UPDATE t1 SET filler1='to be deleted' WHERE key1=100 and key2=100;
|
|||
DROP TABLE t0, t1;
|
||||
create table t1 (key1 int, key2 int, key3 int, key (key1), key (key2), key(key3)) engine=rocksdb;
|
||||
insert into t1 values (1, 100, 100), (1, 200, 200), (1, 300, 300);
|
||||
set global rocksdb_force_flush_memtable_now=1;
|
||||
analyze table t1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 analyze status OK
|
||||
set global rocksdb_force_flush_memtable_now=1;
|
||||
explain select * from t1 where key1 = 1;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 ref key1 key1 5 const #
|
||||
|
|
|
@ -4,15 +4,14 @@ DROP TABLE IF EXISTS t3;
|
|||
SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK=1;
|
||||
create table t1 (a int) engine=rocksdb;
|
||||
drop table t1;
|
||||
select * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO;
|
||||
select * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO where type<>'DDL_DROP_INDEX_ONGOING';
|
||||
TYPE NAME VALUE
|
||||
MAX_INDEX_ID MAX_INDEX_ID max_index_id
|
||||
CF_FLAGS 0 default [0]
|
||||
CF_FLAGS 1 __system__ [0]
|
||||
DDL_DROP_INDEX_ONGOING cf_id:0,index_id:max_index_id
|
||||
select count(*) from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO;
|
||||
select count(*) from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO where type<>'DDL_DROP_INDEX_ONGOING';
|
||||
count(*)
|
||||
4
|
||||
3
|
||||
SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK=0;
|
||||
select VALUE into @keysIn from INFORMATION_SCHEMA.ROCKSDB_COMPACTION_STATS where CF_NAME = 'default' and LEVEL = 'Sum' and TYPE = 'KeyIn';
|
||||
CREATE TABLE t1 (i1 INT, i2 INT, PRIMARY KEY (i1)) ENGINE = ROCKSDB;
|
||||
|
|
|
@ -6,6 +6,19 @@ t1 ROCKSDB # Fixed 1 # # # # # 6 NULL NULL NULL latin1_swedish_ci NULL
|
|||
INSERT INTO t1 VALUES ('538647864786478647864');
|
||||
Warnings:
|
||||
Warning 1264 Out of range value for column 'pk' at row 1
|
||||
SELECT * FROM t1;
|
||||
pk
|
||||
5
|
||||
9223372036854775807
|
||||
SHOW TABLE STATUS LIKE 't1';
|
||||
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
|
||||
t1 ROCKSDB 10 Fixed 2 22 44 0 0 0 9223372036854775807 NULL NULL NULL latin1_swedish_ci NULL
|
||||
INSERT INTO t1 VALUES ();
|
||||
ERROR 23000: Duplicate entry '9223372036854775807' for key 'PRIMARY'
|
||||
SELECT * FROM t1;
|
||||
pk
|
||||
5
|
||||
9223372036854775807
|
||||
SHOW TABLE STATUS LIKE 't1';
|
||||
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
|
||||
t1 ROCKSDB # Fixed 2 # # # # # 9223372036854775807 NULL NULL NULL latin1_swedish_ci NULL
|
||||
|
@ -19,3 +32,37 @@ SHOW TABLE STATUS LIKE 't1';
|
|||
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
|
||||
t1 ROCKSDB # Fixed 2 # # # # # 9223372036854775807 NULL NULL NULL latin1_swedish_ci NULL
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1 (pk TINYINT NOT NULL PRIMARY KEY AUTO_INCREMENT);
|
||||
INSERT INTO t1 VALUES (5);
|
||||
SHOW TABLE STATUS LIKE 't1';
|
||||
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
|
||||
t1 ROCKSDB # Fixed 1 # # # # # 6 NULL NULL NULL latin1_swedish_ci NULL
|
||||
INSERT INTO t1 VALUES (1000);
|
||||
Warnings:
|
||||
Warning 1264 Out of range value for column 'pk' at row 1
|
||||
SELECT * FROM t1;
|
||||
pk
|
||||
5
|
||||
127
|
||||
SHOW TABLE STATUS LIKE 't1';
|
||||
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
|
||||
t1 ROCKSDB 10 Fixed 2 15 30 0 0 0 127 NULL NULL NULL latin1_swedish_ci NULL
|
||||
INSERT INTO t1 VALUES ();
|
||||
ERROR 23000: Duplicate entry '127' for key 'PRIMARY'
|
||||
SELECT * FROM t1;
|
||||
pk
|
||||
5
|
||||
127
|
||||
SHOW TABLE STATUS LIKE 't1';
|
||||
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
|
||||
t1 ROCKSDB # Fixed 2 # # # # # 127 NULL NULL NULL latin1_swedish_ci NULL
|
||||
INSERT INTO t1 VALUES ();
|
||||
ERROR 23000: Duplicate entry '127' for key 'PRIMARY'
|
||||
SELECT * FROM t1;
|
||||
pk
|
||||
5
|
||||
127
|
||||
SHOW TABLE STATUS LIKE 't1';
|
||||
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
|
||||
t1 ROCKSDB # Fixed 2 # # # # # 127 NULL NULL NULL latin1_swedish_ci NULL
|
||||
DROP TABLE t1;
|
||||
|
|
|
@ -8,6 +8,7 @@ ROW_LOCK_WAIT_TIMEOUTS
|
|||
begin;
|
||||
set @@rocksdb_lock_wait_timeout=1;
|
||||
begin;
|
||||
select variable_value into @a from information_schema.global_status where variable_name='rocksdb_row_lock_wait_timeouts';
|
||||
insert into t values(0);
|
||||
ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t.PRIMARY
|
||||
select ROW_LOCK_WAIT_TIMEOUTS from information_schema.table_statistics where table_name="t";
|
||||
|
@ -16,6 +17,10 @@ ROW_LOCK_WAIT_TIMEOUTS
|
|||
select ROW_LOCK_WAIT_TIMEOUTS from information_schema.table_statistics where table_name="t";
|
||||
ROW_LOCK_WAIT_TIMEOUTS
|
||||
1
|
||||
select case when variable_value-@a = 1 then 'true' else 'false' end as waits from information_schema.global_status where variable_name='rocksdb_row_lock_wait_timeouts';
|
||||
waits
|
||||
true
|
||||
select variable_value into @a from information_schema.global_status where variable_name='rocksdb_row_lock_wait_timeouts';
|
||||
insert into t values(0);
|
||||
ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t.PRIMARY
|
||||
select ROW_LOCK_WAIT_TIMEOUTS from information_schema.table_statistics where table_name="t";
|
||||
|
@ -24,4 +29,7 @@ ROW_LOCK_WAIT_TIMEOUTS
|
|||
select ROW_LOCK_WAIT_TIMEOUTS from information_schema.table_statistics where table_name="t";
|
||||
ROW_LOCK_WAIT_TIMEOUTS
|
||||
2
|
||||
select case when variable_value-@a = 1 then 'true' else 'false' end as waits from information_schema.global_status where variable_name='rocksdb_row_lock_wait_timeouts';
|
||||
waits
|
||||
true
|
||||
drop table t;
|
||||
|
|
|
@ -81,6 +81,7 @@ ROCKSDB_DDL Gamma
|
|||
ROCKSDB_INDEX_FILE_MAP Gamma
|
||||
ROCKSDB_LOCKS Gamma
|
||||
ROCKSDB_TRX Gamma
|
||||
ROCKSDB_DEADLOCK Gamma
|
||||
#
|
||||
# MDEV-12466 : Assertion `thd->transaction.stmt.is_empty() || thd->in_sub_stmt || ...
|
||||
#
|
||||
|
|
21
storage/rocksdb/mysql-test/rocksdb/r/max_open_files.result
Normal file
21
storage/rocksdb/mysql-test/rocksdb/r/max_open_files.result
Normal file
|
@ -0,0 +1,21 @@
|
|||
CALL mtr.add_suppression("RocksDB: rocksdb_max_open_files should not be greater than the open_files_limit*");
|
||||
FOUND 1 /RocksDB: rocksdb_max_open_files should not be greater than the open_files_limit/ in rocksdb.max_open_files.err
|
||||
SELECT FLOOR(@@global.open_files_limit / 2) = @@global.rocksdb_max_open_files;
|
||||
FLOOR(@@global.open_files_limit / 2) = @@global.rocksdb_max_open_files
|
||||
1
|
||||
SELECT @@global.open_files_limit - 1 = @@global.rocksdb_max_open_files;
|
||||
@@global.open_files_limit - 1 = @@global.rocksdb_max_open_files
|
||||
1
|
||||
SELECT @@global.rocksdb_max_open_files;
|
||||
@@global.rocksdb_max_open_files
|
||||
0
|
||||
CREATE TABLE t1(a INT) ENGINE=ROCKSDB;
|
||||
INSERT INTO t1 VALUES(0),(1),(2),(3),(4);
|
||||
SET GLOBAL rocksdb_force_flush_memtable_and_lzero_now=1;
|
||||
DROP TABLE t1;
|
||||
SELECT @@global.rocksdb_max_open_files;
|
||||
@@global.rocksdb_max_open_files
|
||||
-1
|
||||
SELECT FLOOR(@@global.open_files_limit / 2) = @@global.rocksdb_max_open_files;
|
||||
FLOOR(@@global.open_files_limit / 2) = @@global.rocksdb_max_open_files
|
||||
1
|
|
@ -36,7 +36,7 @@ explain select b, d from t where d > 4;
|
|||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t range PRIMARY,b b 8 NULL # Using where; Using index for skip scan
|
||||
rows_read
|
||||
1509
|
||||
1505
|
||||
include/diff_tables.inc [temp_orig, temp_skip]
|
||||
set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on';
|
||||
set optimizer_switch = 'skip_scan=off';
|
||||
|
@ -44,7 +44,7 @@ explain select a, b, c, d from t where a = 5 and d <= 3;
|
|||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t ref PRIMARY PRIMARY 4 const # Using where; Using index
|
||||
rows_read
|
||||
251
|
||||
250
|
||||
set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off';
|
||||
explain select a, b, c, d from t where a = 5 and d <= 3;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
|
@ -58,13 +58,13 @@ explain select e from t where a = 5 and d <= 3;
|
|||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t ref PRIMARY PRIMARY 4 const # Using where
|
||||
rows_read
|
||||
251
|
||||
250
|
||||
set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off';
|
||||
explain select e from t where a = 5 and d <= 3;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t ref PRIMARY,b PRIMARY 4 const # Using where
|
||||
rows_read
|
||||
251
|
||||
250
|
||||
include/diff_tables.inc [temp_orig, temp_skip]
|
||||
set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on';
|
||||
set optimizer_switch = 'skip_scan=off';
|
||||
|
@ -72,13 +72,13 @@ explain select a, b, c, d from t where a = 5 and d >= 98;
|
|||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t ref PRIMARY PRIMARY 4 const # Using where; Using index
|
||||
rows_read
|
||||
251
|
||||
250
|
||||
set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off';
|
||||
explain select a, b, c, d from t where a = 5 and d >= 98;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t range PRIMARY,b PRIMARY 16 NULL # Using where; Using index for skip scan
|
||||
rows_read
|
||||
51
|
||||
26
|
||||
include/diff_tables.inc [temp_orig, temp_skip]
|
||||
set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on';
|
||||
set optimizer_switch = 'skip_scan=off';
|
||||
|
@ -86,13 +86,13 @@ explain select e from t where a = 5 and d >= 98;
|
|||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t ref PRIMARY PRIMARY 4 const # Using where
|
||||
rows_read
|
||||
251
|
||||
250
|
||||
set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off';
|
||||
explain select e from t where a = 5 and d >= 98;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t ref PRIMARY,b PRIMARY 4 const # Using where
|
||||
rows_read
|
||||
251
|
||||
250
|
||||
include/diff_tables.inc [temp_orig, temp_skip]
|
||||
set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on';
|
||||
set optimizer_switch = 'skip_scan=off';
|
||||
|
@ -100,13 +100,13 @@ explain select a, b, c, d from t where a in (1, 5) and d >= 98;
|
|||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t range PRIMARY PRIMARY 4 NULL # Using where; Using index
|
||||
rows_read
|
||||
502
|
||||
500
|
||||
set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off';
|
||||
explain select a, b, c, d from t where a in (1, 5) and d >= 98;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t range PRIMARY,b PRIMARY 16 NULL # Using where; Using index for skip scan
|
||||
rows_read
|
||||
102
|
||||
52
|
||||
include/diff_tables.inc [temp_orig, temp_skip]
|
||||
set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on';
|
||||
set optimizer_switch = 'skip_scan=off';
|
||||
|
@ -114,13 +114,13 @@ explain select a, b, c, d from t where a in (1, 3, 5) and d >= 98;
|
|||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t range PRIMARY PRIMARY 4 NULL # Using where; Using index
|
||||
rows_read
|
||||
753
|
||||
750
|
||||
set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off';
|
||||
explain select a, b, c, d from t where a in (1, 3, 5) and d >= 98;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t range PRIMARY,b PRIMARY 16 NULL # Using where; Using index for skip scan
|
||||
rows_read
|
||||
153
|
||||
78
|
||||
include/diff_tables.inc [temp_orig, temp_skip]
|
||||
set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on';
|
||||
set optimizer_switch = 'skip_scan=off';
|
||||
|
@ -128,13 +128,13 @@ explain select a, b, c, d from t where a in (1, 5) and b in (1, 2) and d >= 98;
|
|||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t range PRIMARY,b PRIMARY 8 NULL # Using where; Using index
|
||||
rows_read
|
||||
204
|
||||
200
|
||||
set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off';
|
||||
explain select a, b, c, d from t where a in (1, 5) and b in (1, 2) and d >= 98;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t range PRIMARY,b PRIMARY 16 NULL # Using where; Using index for skip scan
|
||||
rows_read
|
||||
44
|
||||
24
|
||||
include/diff_tables.inc [temp_orig, temp_skip]
|
||||
set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on';
|
||||
set optimizer_switch = 'skip_scan=off';
|
||||
|
@ -142,13 +142,13 @@ explain select a, b, c, d from t where a in (1, 2, 3, 4, 5) and b in (1, 2, 3) a
|
|||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t range PRIMARY,b PRIMARY 8 NULL # Using where; Using index
|
||||
rows_read
|
||||
765
|
||||
750
|
||||
set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off';
|
||||
explain select a, b, c, d from t where a in (1, 2, 3, 4, 5) and b in (1, 2, 3) and d >= 98;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t range PRIMARY,b PRIMARY 16 NULL # Using where; Using index for skip scan
|
||||
rows_read
|
||||
165
|
||||
90
|
||||
include/diff_tables.inc [temp_orig, temp_skip]
|
||||
set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on';
|
||||
set optimizer_switch = 'skip_scan=off';
|
||||
|
@ -156,13 +156,13 @@ explain select a, b, c, d from t where a = 5 and b = 2 and d >= 98;
|
|||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t ref PRIMARY,b PRIMARY 8 const,const # Using where; Using index
|
||||
rows_read
|
||||
51
|
||||
50
|
||||
set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off';
|
||||
explain select a, b, c, d from t where a = 5 and b = 2 and d >= 98;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t range PRIMARY,b PRIMARY 16 NULL # Using where; Using index for skip scan
|
||||
rows_read
|
||||
11
|
||||
6
|
||||
include/diff_tables.inc [temp_orig, temp_skip]
|
||||
set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on';
|
||||
set optimizer_switch = 'skip_scan=off';
|
||||
|
@ -170,7 +170,7 @@ explain select a+1, b, c, d from t where a = 5 and d < 3;
|
|||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t ref PRIMARY PRIMARY 4 const # Using where; Using index
|
||||
rows_read
|
||||
251
|
||||
250
|
||||
set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off';
|
||||
explain select a+1, b, c, d from t where a = 5 and d < 3;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
|
@ -184,7 +184,7 @@ explain select b, c, d from t where a = 5 and d < 3;
|
|||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t ref PRIMARY PRIMARY 4 const # Using where; Using index
|
||||
rows_read
|
||||
251
|
||||
250
|
||||
set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off';
|
||||
explain select b, c, d from t where a = 5 and d < 3;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
|
@ -204,7 +204,7 @@ explain select a, b, c, d from t where a = b and d >= 98;
|
|||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t range PRIMARY,b b 8 NULL # Using where; Using index for skip scan
|
||||
rows_read
|
||||
9
|
||||
5
|
||||
include/diff_tables.inc [temp_orig, temp_skip]
|
||||
set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on';
|
||||
set optimizer_switch = 'skip_scan=on';
|
||||
|
|
|
@ -14,8 +14,13 @@ test t1 NULL BLOCK_READ_BYTE #
|
|||
test t1 NULL BLOCK_READ_TIME #
|
||||
test t1 NULL BLOCK_CHECKSUM_TIME #
|
||||
test t1 NULL BLOCK_DECOMPRESS_TIME #
|
||||
test t1 NULL GET_READ_BYTES #
|
||||
test t1 NULL MULTIGET_READ_BYTES #
|
||||
test t1 NULL ITER_READ_BYTES #
|
||||
test t1 NULL INTERNAL_KEY_SKIPPED_COUNT #
|
||||
test t1 NULL INTERNAL_DELETE_SKIPPED_COUNT #
|
||||
test t1 NULL INTERNAL_RECENT_SKIPPED_COUNT #
|
||||
test t1 NULL INTERNAL_MERGE_COUNT #
|
||||
test t1 NULL GET_SNAPSHOT_TIME #
|
||||
test t1 NULL GET_FROM_MEMTABLE_TIME #
|
||||
test t1 NULL GET_FROM_MEMTABLE_COUNT #
|
||||
|
@ -23,9 +28,12 @@ test t1 NULL GET_POST_PROCESS_TIME #
|
|||
test t1 NULL GET_FROM_OUTPUT_FILES_TIME #
|
||||
test t1 NULL SEEK_ON_MEMTABLE_TIME #
|
||||
test t1 NULL SEEK_ON_MEMTABLE_COUNT #
|
||||
test t1 NULL NEXT_ON_MEMTABLE_COUNT #
|
||||
test t1 NULL PREV_ON_MEMTABLE_COUNT #
|
||||
test t1 NULL SEEK_CHILD_SEEK_TIME #
|
||||
test t1 NULL SEEK_CHILD_SEEK_COUNT #
|
||||
test t1 NULL SEEK_IN_HEAP_TIME #
|
||||
test t1 NULL SEEK_MIN_HEAP_TIME #
|
||||
test t1 NULL SEEK_MAX_HEAP_TIME #
|
||||
test t1 NULL SEEK_INTERNAL_SEEK_TIME #
|
||||
test t1 NULL FIND_NEXT_USER_ENTRY_TIME #
|
||||
test t1 NULL WRITE_WAL_TIME #
|
||||
|
@ -41,6 +49,12 @@ test t1 NULL NEW_TABLE_BLOCK_ITER_NANOS #
|
|||
test t1 NULL NEW_TABLE_ITERATOR_NANOS #
|
||||
test t1 NULL BLOCK_SEEK_NANOS #
|
||||
test t1 NULL FIND_TABLE_NANOS #
|
||||
test t1 NULL BLOOM_MEMTABLE_HIT_COUNT #
|
||||
test t1 NULL BLOOM_MEMTABLE_MISS_COUNT #
|
||||
test t1 NULL BLOOM_SST_HIT_COUNT #
|
||||
test t1 NULL BLOOM_SST_MISS_COUNT #
|
||||
test t1 NULL KEY_LOCK_WAIT_TIME #
|
||||
test t1 NULL KEY_LOCK_WAIT_COUNT #
|
||||
test t1 NULL IO_THREAD_POOL_ID #
|
||||
test t1 NULL IO_BYTES_WRITTEN #
|
||||
test t1 NULL IO_BYTES_READ #
|
||||
|
@ -59,8 +73,13 @@ BLOCK_READ_BYTE #
|
|||
BLOCK_READ_TIME #
|
||||
BLOCK_CHECKSUM_TIME #
|
||||
BLOCK_DECOMPRESS_TIME #
|
||||
GET_READ_BYTES #
|
||||
MULTIGET_READ_BYTES #
|
||||
ITER_READ_BYTES #
|
||||
INTERNAL_KEY_SKIPPED_COUNT #
|
||||
INTERNAL_DELETE_SKIPPED_COUNT #
|
||||
INTERNAL_RECENT_SKIPPED_COUNT #
|
||||
INTERNAL_MERGE_COUNT #
|
||||
GET_SNAPSHOT_TIME #
|
||||
GET_FROM_MEMTABLE_TIME #
|
||||
GET_FROM_MEMTABLE_COUNT #
|
||||
|
@ -68,9 +87,12 @@ GET_POST_PROCESS_TIME #
|
|||
GET_FROM_OUTPUT_FILES_TIME #
|
||||
SEEK_ON_MEMTABLE_TIME #
|
||||
SEEK_ON_MEMTABLE_COUNT #
|
||||
NEXT_ON_MEMTABLE_COUNT #
|
||||
PREV_ON_MEMTABLE_COUNT #
|
||||
SEEK_CHILD_SEEK_TIME #
|
||||
SEEK_CHILD_SEEK_COUNT #
|
||||
SEEK_IN_HEAP_TIME #
|
||||
SEEK_MIN_HEAP_TIME #
|
||||
SEEK_MAX_HEAP_TIME #
|
||||
SEEK_INTERNAL_SEEK_TIME #
|
||||
FIND_NEXT_USER_ENTRY_TIME #
|
||||
WRITE_WAL_TIME #
|
||||
|
@ -86,6 +108,12 @@ NEW_TABLE_BLOCK_ITER_NANOS #
|
|||
NEW_TABLE_ITERATOR_NANOS #
|
||||
BLOCK_SEEK_NANOS #
|
||||
FIND_TABLE_NANOS #
|
||||
BLOOM_MEMTABLE_HIT_COUNT #
|
||||
BLOOM_MEMTABLE_MISS_COUNT #
|
||||
BLOOM_SST_HIT_COUNT #
|
||||
BLOOM_SST_MISS_COUNT #
|
||||
KEY_LOCK_WAIT_TIME #
|
||||
KEY_LOCK_WAIT_COUNT #
|
||||
IO_THREAD_POOL_ID #
|
||||
IO_BYTES_WRITTEN #
|
||||
IO_BYTES_READ #
|
||||
|
|
|
@ -573,9 +573,6 @@ insert into t30 values
|
|||
('row3', 'row3-key', 'row3-data'),
|
||||
('row4', 'row4-key', 'row4-data'),
|
||||
('row5', 'row5-key', 'row5-data');
|
||||
analyze table t30;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t30 analyze status OK
|
||||
explain
|
||||
select * from t30 where key1 <='row3-key';
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
|
@ -868,6 +865,7 @@ ERROR 42S02: Unknown table 'test.t45'
|
|||
show variables
|
||||
where
|
||||
variable_name like 'rocksdb%' and
|
||||
variable_name not like 'rocksdb_max_open_files' and
|
||||
variable_name not like 'rocksdb_supported_compression_types';
|
||||
Variable_name Value
|
||||
rocksdb_access_hint_on_compaction_start 1
|
||||
|
@ -875,6 +873,7 @@ rocksdb_advise_random_on_open ON
|
|||
rocksdb_allow_concurrent_memtable_write OFF
|
||||
rocksdb_allow_mmap_reads OFF
|
||||
rocksdb_allow_mmap_writes OFF
|
||||
rocksdb_allow_to_start_after_corruption OFF
|
||||
rocksdb_blind_delete_primary_key OFF
|
||||
rocksdb_block_cache_size 536870912
|
||||
rocksdb_block_restart_interval 16
|
||||
|
@ -894,7 +893,6 @@ rocksdb_compaction_sequential_deletes 0
|
|||
rocksdb_compaction_sequential_deletes_count_sd OFF
|
||||
rocksdb_compaction_sequential_deletes_file_size 0
|
||||
rocksdb_compaction_sequential_deletes_window 0
|
||||
rocksdb_concurrent_prepare ON
|
||||
rocksdb_create_checkpoint
|
||||
rocksdb_create_if_missing ON
|
||||
rocksdb_create_missing_column_families OFF
|
||||
|
@ -918,7 +916,6 @@ rocksdb_enable_ttl_read_filtering ON
|
|||
rocksdb_enable_write_thread_adaptive_yield OFF
|
||||
rocksdb_error_if_exists OFF
|
||||
rocksdb_flush_log_at_trx_commit 0
|
||||
rocksdb_flush_memtable_on_analyze ON
|
||||
rocksdb_force_compute_memtable_stats ON
|
||||
rocksdb_force_compute_memtable_stats_cachetime 0
|
||||
rocksdb_force_flush_memtable_and_lzero_now OFF
|
||||
|
@ -926,6 +923,7 @@ rocksdb_force_flush_memtable_now OFF
|
|||
rocksdb_force_index_records_in_range 0
|
||||
rocksdb_git_hash #
|
||||
rocksdb_hash_index_allow_collision ON
|
||||
rocksdb_ignore_unknown_options ON
|
||||
rocksdb_index_type kBinarySearch
|
||||
rocksdb_info_log_level error_level
|
||||
rocksdb_io_write_timeout 0
|
||||
|
@ -942,8 +940,7 @@ rocksdb_max_background_jobs 2
|
|||
rocksdb_max_latest_deadlocks 5
|
||||
rocksdb_max_log_file_size 0
|
||||
rocksdb_max_manifest_file_size 18446744073709551615
|
||||
rocksdb_max_open_files -1
|
||||
rocksdb_max_row_locks 1073741824
|
||||
rocksdb_max_row_locks 1048576
|
||||
rocksdb_max_subcompactions 1
|
||||
rocksdb_max_total_wal_size 0
|
||||
rocksdb_merge_buf_size 67108864
|
||||
|
@ -978,6 +975,7 @@ rocksdb_table_cache_numshardbits 6
|
|||
rocksdb_table_stats_sampling_pct 10
|
||||
rocksdb_tmpdir
|
||||
rocksdb_trace_sst_api OFF
|
||||
rocksdb_two_write_queues ON
|
||||
rocksdb_unsafe_for_binlog OFF
|
||||
rocksdb_update_cf_options
|
||||
rocksdb_use_adaptive_mutex OFF
|
||||
|
@ -1464,6 +1462,7 @@ Rocksdb_rows_read #
|
|||
Rocksdb_rows_updated #
|
||||
Rocksdb_rows_deleted_blind #
|
||||
Rocksdb_rows_expired #
|
||||
Rocksdb_rows_filtered #
|
||||
Rocksdb_system_rows_deleted #
|
||||
Rocksdb_system_rows_inserted #
|
||||
Rocksdb_system_rows_read #
|
||||
|
@ -1474,11 +1473,22 @@ Rocksdb_queries_point #
|
|||
Rocksdb_queries_range #
|
||||
Rocksdb_covered_secondary_key_lookups #
|
||||
Rocksdb_block_cache_add #
|
||||
Rocksdb_block_cache_add_failures #
|
||||
Rocksdb_block_cache_bytes_read #
|
||||
Rocksdb_block_cache_bytes_write #
|
||||
Rocksdb_block_cache_data_add #
|
||||
Rocksdb_block_cache_data_bytes_insert #
|
||||
Rocksdb_block_cache_data_hit #
|
||||
Rocksdb_block_cache_data_miss #
|
||||
Rocksdb_block_cache_filter_add #
|
||||
Rocksdb_block_cache_filter_bytes_evict #
|
||||
Rocksdb_block_cache_filter_bytes_insert #
|
||||
Rocksdb_block_cache_filter_hit #
|
||||
Rocksdb_block_cache_filter_miss #
|
||||
Rocksdb_block_cache_hit #
|
||||
Rocksdb_block_cache_index_add #
|
||||
Rocksdb_block_cache_index_bytes_evict #
|
||||
Rocksdb_block_cache_index_bytes_insert #
|
||||
Rocksdb_block_cache_index_hit #
|
||||
Rocksdb_block_cache_index_miss #
|
||||
Rocksdb_block_cache_miss #
|
||||
|
@ -1495,7 +1505,11 @@ Rocksdb_compaction_key_drop_new #
|
|||
Rocksdb_compaction_key_drop_obsolete #
|
||||
Rocksdb_compaction_key_drop_user #
|
||||
Rocksdb_flush_write_bytes #
|
||||
Rocksdb_get_hit_l0 #
|
||||
Rocksdb_get_hit_l1 #
|
||||
Rocksdb_get_hit_l2_and_up #
|
||||
Rocksdb_getupdatessince_calls #
|
||||
Rocksdb_iter_bytes_read #
|
||||
Rocksdb_memtable_hit #
|
||||
Rocksdb_memtable_miss #
|
||||
Rocksdb_no_file_closes #
|
||||
|
@ -1503,6 +1517,12 @@ Rocksdb_no_file_errors #
|
|||
Rocksdb_no_file_opens #
|
||||
Rocksdb_num_iterators #
|
||||
Rocksdb_number_block_not_compressed #
|
||||
Rocksdb_number_db_next #
|
||||
Rocksdb_number_db_next_found #
|
||||
Rocksdb_number_db_prev #
|
||||
Rocksdb_number_db_prev_found #
|
||||
Rocksdb_number_db_seek #
|
||||
Rocksdb_number_db_seek_found #
|
||||
Rocksdb_number_deletes_filtered #
|
||||
Rocksdb_number_keys_read #
|
||||
Rocksdb_number_keys_updated #
|
||||
|
@ -1517,11 +1537,11 @@ Rocksdb_number_sst_entry_merge #
|
|||
Rocksdb_number_sst_entry_other #
|
||||
Rocksdb_number_sst_entry_put #
|
||||
Rocksdb_number_sst_entry_singledelete #
|
||||
Rocksdb_number_stat_computes #
|
||||
Rocksdb_number_superversion_acquires #
|
||||
Rocksdb_number_superversion_cleanups #
|
||||
Rocksdb_number_superversion_releases #
|
||||
Rocksdb_rate_limit_delay_millis #
|
||||
Rocksdb_row_lock_deadlocks #
|
||||
Rocksdb_row_lock_wait_timeouts #
|
||||
Rocksdb_snapshot_conflict_errors #
|
||||
Rocksdb_stall_l0_file_count_limit_slowdowns #
|
||||
Rocksdb_stall_locked_l0_file_count_limit_slowdowns #
|
||||
|
@ -1549,6 +1569,7 @@ ROCKSDB_ROWS_READ
|
|||
ROCKSDB_ROWS_UPDATED
|
||||
ROCKSDB_ROWS_DELETED_BLIND
|
||||
ROCKSDB_ROWS_EXPIRED
|
||||
ROCKSDB_ROWS_FILTERED
|
||||
ROCKSDB_SYSTEM_ROWS_DELETED
|
||||
ROCKSDB_SYSTEM_ROWS_INSERTED
|
||||
ROCKSDB_SYSTEM_ROWS_READ
|
||||
|
@ -1559,11 +1580,22 @@ ROCKSDB_QUERIES_POINT
|
|||
ROCKSDB_QUERIES_RANGE
|
||||
ROCKSDB_COVERED_SECONDARY_KEY_LOOKUPS
|
||||
ROCKSDB_BLOCK_CACHE_ADD
|
||||
ROCKSDB_BLOCK_CACHE_ADD_FAILURES
|
||||
ROCKSDB_BLOCK_CACHE_BYTES_READ
|
||||
ROCKSDB_BLOCK_CACHE_BYTES_WRITE
|
||||
ROCKSDB_BLOCK_CACHE_DATA_ADD
|
||||
ROCKSDB_BLOCK_CACHE_DATA_BYTES_INSERT
|
||||
ROCKSDB_BLOCK_CACHE_DATA_HIT
|
||||
ROCKSDB_BLOCK_CACHE_DATA_MISS
|
||||
ROCKSDB_BLOCK_CACHE_FILTER_ADD
|
||||
ROCKSDB_BLOCK_CACHE_FILTER_BYTES_EVICT
|
||||
ROCKSDB_BLOCK_CACHE_FILTER_BYTES_INSERT
|
||||
ROCKSDB_BLOCK_CACHE_FILTER_HIT
|
||||
ROCKSDB_BLOCK_CACHE_FILTER_MISS
|
||||
ROCKSDB_BLOCK_CACHE_HIT
|
||||
ROCKSDB_BLOCK_CACHE_INDEX_ADD
|
||||
ROCKSDB_BLOCK_CACHE_INDEX_BYTES_EVICT
|
||||
ROCKSDB_BLOCK_CACHE_INDEX_BYTES_INSERT
|
||||
ROCKSDB_BLOCK_CACHE_INDEX_HIT
|
||||
ROCKSDB_BLOCK_CACHE_INDEX_MISS
|
||||
ROCKSDB_BLOCK_CACHE_MISS
|
||||
|
@ -1580,7 +1612,11 @@ ROCKSDB_COMPACTION_KEY_DROP_NEW
|
|||
ROCKSDB_COMPACTION_KEY_DROP_OBSOLETE
|
||||
ROCKSDB_COMPACTION_KEY_DROP_USER
|
||||
ROCKSDB_FLUSH_WRITE_BYTES
|
||||
ROCKSDB_GET_HIT_L0
|
||||
ROCKSDB_GET_HIT_L1
|
||||
ROCKSDB_GET_HIT_L2_AND_UP
|
||||
ROCKSDB_GETUPDATESSINCE_CALLS
|
||||
ROCKSDB_ITER_BYTES_READ
|
||||
ROCKSDB_MEMTABLE_HIT
|
||||
ROCKSDB_MEMTABLE_MISS
|
||||
ROCKSDB_NO_FILE_CLOSES
|
||||
|
@ -1588,6 +1624,12 @@ ROCKSDB_NO_FILE_ERRORS
|
|||
ROCKSDB_NO_FILE_OPENS
|
||||
ROCKSDB_NUM_ITERATORS
|
||||
ROCKSDB_NUMBER_BLOCK_NOT_COMPRESSED
|
||||
ROCKSDB_NUMBER_DB_NEXT
|
||||
ROCKSDB_NUMBER_DB_NEXT_FOUND
|
||||
ROCKSDB_NUMBER_DB_PREV
|
||||
ROCKSDB_NUMBER_DB_PREV_FOUND
|
||||
ROCKSDB_NUMBER_DB_SEEK
|
||||
ROCKSDB_NUMBER_DB_SEEK_FOUND
|
||||
ROCKSDB_NUMBER_DELETES_FILTERED
|
||||
ROCKSDB_NUMBER_KEYS_READ
|
||||
ROCKSDB_NUMBER_KEYS_UPDATED
|
||||
|
@ -1602,11 +1644,11 @@ ROCKSDB_NUMBER_SST_ENTRY_MERGE
|
|||
ROCKSDB_NUMBER_SST_ENTRY_OTHER
|
||||
ROCKSDB_NUMBER_SST_ENTRY_PUT
|
||||
ROCKSDB_NUMBER_SST_ENTRY_SINGLEDELETE
|
||||
ROCKSDB_NUMBER_STAT_COMPUTES
|
||||
ROCKSDB_NUMBER_SUPERVERSION_ACQUIRES
|
||||
ROCKSDB_NUMBER_SUPERVERSION_CLEANUPS
|
||||
ROCKSDB_NUMBER_SUPERVERSION_RELEASES
|
||||
ROCKSDB_RATE_LIMIT_DELAY_MILLIS
|
||||
ROCKSDB_ROW_LOCK_DEADLOCKS
|
||||
ROCKSDB_ROW_LOCK_WAIT_TIMEOUTS
|
||||
ROCKSDB_SNAPSHOT_CONFLICT_ERRORS
|
||||
ROCKSDB_STALL_L0_FILE_COUNT_LIMIT_SLOWDOWNS
|
||||
ROCKSDB_STALL_LOCKED_L0_FILE_COUNT_LIMIT_SLOWDOWNS
|
||||
|
@ -1636,6 +1678,7 @@ ROCKSDB_ROWS_READ
|
|||
ROCKSDB_ROWS_UPDATED
|
||||
ROCKSDB_ROWS_DELETED_BLIND
|
||||
ROCKSDB_ROWS_EXPIRED
|
||||
ROCKSDB_ROWS_FILTERED
|
||||
ROCKSDB_SYSTEM_ROWS_DELETED
|
||||
ROCKSDB_SYSTEM_ROWS_INSERTED
|
||||
ROCKSDB_SYSTEM_ROWS_READ
|
||||
|
@ -1646,11 +1689,22 @@ ROCKSDB_QUERIES_POINT
|
|||
ROCKSDB_QUERIES_RANGE
|
||||
ROCKSDB_COVERED_SECONDARY_KEY_LOOKUPS
|
||||
ROCKSDB_BLOCK_CACHE_ADD
|
||||
ROCKSDB_BLOCK_CACHE_ADD_FAILURES
|
||||
ROCKSDB_BLOCK_CACHE_BYTES_READ
|
||||
ROCKSDB_BLOCK_CACHE_BYTES_WRITE
|
||||
ROCKSDB_BLOCK_CACHE_DATA_ADD
|
||||
ROCKSDB_BLOCK_CACHE_DATA_BYTES_INSERT
|
||||
ROCKSDB_BLOCK_CACHE_DATA_HIT
|
||||
ROCKSDB_BLOCK_CACHE_DATA_MISS
|
||||
ROCKSDB_BLOCK_CACHE_FILTER_ADD
|
||||
ROCKSDB_BLOCK_CACHE_FILTER_BYTES_EVICT
|
||||
ROCKSDB_BLOCK_CACHE_FILTER_BYTES_INSERT
|
||||
ROCKSDB_BLOCK_CACHE_FILTER_HIT
|
||||
ROCKSDB_BLOCK_CACHE_FILTER_MISS
|
||||
ROCKSDB_BLOCK_CACHE_HIT
|
||||
ROCKSDB_BLOCK_CACHE_INDEX_ADD
|
||||
ROCKSDB_BLOCK_CACHE_INDEX_BYTES_EVICT
|
||||
ROCKSDB_BLOCK_CACHE_INDEX_BYTES_INSERT
|
||||
ROCKSDB_BLOCK_CACHE_INDEX_HIT
|
||||
ROCKSDB_BLOCK_CACHE_INDEX_MISS
|
||||
ROCKSDB_BLOCK_CACHE_MISS
|
||||
|
@ -1667,7 +1721,11 @@ ROCKSDB_COMPACTION_KEY_DROP_NEW
|
|||
ROCKSDB_COMPACTION_KEY_DROP_OBSOLETE
|
||||
ROCKSDB_COMPACTION_KEY_DROP_USER
|
||||
ROCKSDB_FLUSH_WRITE_BYTES
|
||||
ROCKSDB_GET_HIT_L0
|
||||
ROCKSDB_GET_HIT_L1
|
||||
ROCKSDB_GET_HIT_L2_AND_UP
|
||||
ROCKSDB_GETUPDATESSINCE_CALLS
|
||||
ROCKSDB_ITER_BYTES_READ
|
||||
ROCKSDB_MEMTABLE_HIT
|
||||
ROCKSDB_MEMTABLE_MISS
|
||||
ROCKSDB_NO_FILE_CLOSES
|
||||
|
@ -1675,6 +1733,12 @@ ROCKSDB_NO_FILE_ERRORS
|
|||
ROCKSDB_NO_FILE_OPENS
|
||||
ROCKSDB_NUM_ITERATORS
|
||||
ROCKSDB_NUMBER_BLOCK_NOT_COMPRESSED
|
||||
ROCKSDB_NUMBER_DB_NEXT
|
||||
ROCKSDB_NUMBER_DB_NEXT_FOUND
|
||||
ROCKSDB_NUMBER_DB_PREV
|
||||
ROCKSDB_NUMBER_DB_PREV_FOUND
|
||||
ROCKSDB_NUMBER_DB_SEEK
|
||||
ROCKSDB_NUMBER_DB_SEEK_FOUND
|
||||
ROCKSDB_NUMBER_DELETES_FILTERED
|
||||
ROCKSDB_NUMBER_KEYS_READ
|
||||
ROCKSDB_NUMBER_KEYS_UPDATED
|
||||
|
@ -1689,11 +1753,11 @@ ROCKSDB_NUMBER_SST_ENTRY_MERGE
|
|||
ROCKSDB_NUMBER_SST_ENTRY_OTHER
|
||||
ROCKSDB_NUMBER_SST_ENTRY_PUT
|
||||
ROCKSDB_NUMBER_SST_ENTRY_SINGLEDELETE
|
||||
ROCKSDB_NUMBER_STAT_COMPUTES
|
||||
ROCKSDB_NUMBER_SUPERVERSION_ACQUIRES
|
||||
ROCKSDB_NUMBER_SUPERVERSION_CLEANUPS
|
||||
ROCKSDB_NUMBER_SUPERVERSION_RELEASES
|
||||
ROCKSDB_RATE_LIMIT_DELAY_MILLIS
|
||||
ROCKSDB_ROW_LOCK_DEADLOCKS
|
||||
ROCKSDB_ROW_LOCK_WAIT_TIMEOUTS
|
||||
ROCKSDB_SNAPSHOT_CONFLICT_ERRORS
|
||||
ROCKSDB_STALL_L0_FILE_COUNT_LIMIT_SLOWDOWNS
|
||||
ROCKSDB_STALL_LOCKED_L0_FILE_COUNT_LIMIT_SLOWDOWNS
|
||||
|
|
11
storage/rocksdb/mysql-test/rocksdb/r/rocksdb_debug.result
Normal file
11
storage/rocksdb/mysql-test/rocksdb/r/rocksdb_debug.result
Normal file
|
@ -0,0 +1,11 @@
|
|||
#
|
||||
# Issue #728: Assertion `covers_key(b)' failed in int
|
||||
# myrocks::Rdb_key_def::cmp_full_keys(const rocks db::Slice&,
|
||||
# const rocksdb::Slice&)
|
||||
#
|
||||
CREATE TABLE t2(c1 TINYINT SIGNED KEY,c2 TINYINT UNSIGNED,c3 INT);
|
||||
INSERT INTO t2(c1)VALUES(0);
|
||||
SELECT * FROM t2 WHERE c1<=127 ORDER BY c1 DESC;
|
||||
c1 c2 c3
|
||||
0 NULL NULL
|
||||
DROP TABLE t2;
|
|
@ -7,5 +7,5 @@ count(*)
|
|||
10000
|
||||
explain select c1 from t1 where c1 > 5 limit 10;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 range i i 9 NULL 9900 Using where; Using index
|
||||
1 SIMPLE t1 range i i 9 NULL # Using where; Using index
|
||||
drop table t1;
|
||||
|
|
|
@ -83,12 +83,12 @@ FROM INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT
|
|||
WHERE TABLE_SCHEMA = 'test'
|
||||
GROUP BY TABLE_NAME, PARTITION_NAME;
|
||||
TABLE_SCHEMA TABLE_NAME PARTITION_NAME COUNT(STAT_TYPE)
|
||||
test t1 NULL 43
|
||||
test t2 NULL 43
|
||||
test t4 p0 43
|
||||
test t4 p1 43
|
||||
test t4 p2 43
|
||||
test t4 p3 43
|
||||
test t1 NULL 57
|
||||
test t2 NULL 57
|
||||
test t4 p0 57
|
||||
test t4 p1 57
|
||||
test t4 p2 57
|
||||
test t4 p3 57
|
||||
SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_CF_OPTIONS;
|
||||
CF_NAME OPTION_TYPE VALUE
|
||||
__system__ COMPARATOR #
|
||||
|
@ -153,9 +153,15 @@ __system__ TABLE_FACTORY::BLOCK_SIZE #
|
|||
__system__ TABLE_FACTORY::BLOCK_SIZE_DEVIATION #
|
||||
__system__ TABLE_FACTORY::BLOCK_RESTART_INTERVAL #
|
||||
__system__ TABLE_FACTORY::INDEX_BLOCK_RESTART_INTERVAL #
|
||||
__system__ TABLE_FACTORY::METADATA_BLOCK_SIZE #
|
||||
__system__ TABLE_FACTORY::PARTITION_FILTERS #
|
||||
__system__ TABLE_FACTORY::USE_DELTA_ENCODING #
|
||||
__system__ TABLE_FACTORY::FILTER_POLICY #
|
||||
__system__ TABLE_FACTORY::WHOLE_KEY_FILTERING #
|
||||
__system__ TABLE_FACTORY::VERIFY_COMPRESSION #
|
||||
__system__ TABLE_FACTORY::READ_AMP_BYTES_PER_BIT #
|
||||
__system__ TABLE_FACTORY::FORMAT_VERSION #
|
||||
__system__ TABLE_FACTORY::ENABLE_INDEX_COMPRESSION #
|
||||
cf_t1 COMPARATOR #
|
||||
cf_t1 MERGE_OPERATOR #
|
||||
cf_t1 COMPACTION_FILTER #
|
||||
|
@ -218,9 +224,15 @@ cf_t1 TABLE_FACTORY::BLOCK_SIZE #
|
|||
cf_t1 TABLE_FACTORY::BLOCK_SIZE_DEVIATION #
|
||||
cf_t1 TABLE_FACTORY::BLOCK_RESTART_INTERVAL #
|
||||
cf_t1 TABLE_FACTORY::INDEX_BLOCK_RESTART_INTERVAL #
|
||||
cf_t1 TABLE_FACTORY::METADATA_BLOCK_SIZE #
|
||||
cf_t1 TABLE_FACTORY::PARTITION_FILTERS #
|
||||
cf_t1 TABLE_FACTORY::USE_DELTA_ENCODING #
|
||||
cf_t1 TABLE_FACTORY::FILTER_POLICY #
|
||||
cf_t1 TABLE_FACTORY::WHOLE_KEY_FILTERING #
|
||||
cf_t1 TABLE_FACTORY::VERIFY_COMPRESSION #
|
||||
cf_t1 TABLE_FACTORY::READ_AMP_BYTES_PER_BIT #
|
||||
cf_t1 TABLE_FACTORY::FORMAT_VERSION #
|
||||
cf_t1 TABLE_FACTORY::ENABLE_INDEX_COMPRESSION #
|
||||
default COMPARATOR #
|
||||
default MERGE_OPERATOR #
|
||||
default COMPACTION_FILTER #
|
||||
|
@ -283,9 +295,15 @@ default TABLE_FACTORY::BLOCK_SIZE #
|
|||
default TABLE_FACTORY::BLOCK_SIZE_DEVIATION #
|
||||
default TABLE_FACTORY::BLOCK_RESTART_INTERVAL #
|
||||
default TABLE_FACTORY::INDEX_BLOCK_RESTART_INTERVAL #
|
||||
default TABLE_FACTORY::METADATA_BLOCK_SIZE #
|
||||
default TABLE_FACTORY::PARTITION_FILTERS #
|
||||
default TABLE_FACTORY::USE_DELTA_ENCODING #
|
||||
default TABLE_FACTORY::FILTER_POLICY #
|
||||
default TABLE_FACTORY::WHOLE_KEY_FILTERING #
|
||||
default TABLE_FACTORY::VERIFY_COMPRESSION #
|
||||
default TABLE_FACTORY::READ_AMP_BYTES_PER_BIT #
|
||||
default TABLE_FACTORY::FORMAT_VERSION #
|
||||
default TABLE_FACTORY::ENABLE_INDEX_COMPRESSION #
|
||||
rev:cf_t2 COMPARATOR #
|
||||
rev:cf_t2 MERGE_OPERATOR #
|
||||
rev:cf_t2 COMPACTION_FILTER #
|
||||
|
@ -348,9 +366,15 @@ rev:cf_t2 TABLE_FACTORY::BLOCK_SIZE #
|
|||
rev:cf_t2 TABLE_FACTORY::BLOCK_SIZE_DEVIATION #
|
||||
rev:cf_t2 TABLE_FACTORY::BLOCK_RESTART_INTERVAL #
|
||||
rev:cf_t2 TABLE_FACTORY::INDEX_BLOCK_RESTART_INTERVAL #
|
||||
rev:cf_t2 TABLE_FACTORY::METADATA_BLOCK_SIZE #
|
||||
rev:cf_t2 TABLE_FACTORY::PARTITION_FILTERS #
|
||||
rev:cf_t2 TABLE_FACTORY::USE_DELTA_ENCODING #
|
||||
rev:cf_t2 TABLE_FACTORY::FILTER_POLICY #
|
||||
rev:cf_t2 TABLE_FACTORY::WHOLE_KEY_FILTERING #
|
||||
rev:cf_t2 TABLE_FACTORY::VERIFY_COMPRESSION #
|
||||
rev:cf_t2 TABLE_FACTORY::READ_AMP_BYTES_PER_BIT #
|
||||
rev:cf_t2 TABLE_FACTORY::FORMAT_VERSION #
|
||||
rev:cf_t2 TABLE_FACTORY::ENABLE_INDEX_COMPRESSION #
|
||||
DROP TABLE t1;
|
||||
DROP TABLE t2;
|
||||
DROP TABLE t3;
|
||||
|
|
|
@ -1,4 +1,20 @@
|
|||
CREATE TABLE t1 (pk int primary key) ENGINE=ROCKSDB;
|
||||
create table t1 (pk int primary key) engine=rocksdb;
|
||||
show tables;
|
||||
Tables_in_test
|
||||
#mysql50#t1#sql-test
|
||||
t1
|
||||
call mtr.add_suppression('Invalid .old.. table or database name .t1#sql-test.');
|
||||
set session debug_dbug="+d,gen_sql_table_name";
|
||||
rename table t1 to t2;
|
||||
set session debug_dbug= "-d,gen_sql_table_name";
|
||||
show tables;
|
||||
Tables_in_test
|
||||
#mysql50#t1#sql-test
|
||||
t2
|
||||
show tables;
|
||||
Tables_in_test
|
||||
create table t2 (pk int primary key) engine=rocksdb;
|
||||
show tables;
|
||||
Tables_in_test
|
||||
t2
|
||||
drop table t2;
|
||||
|
|
|
@ -51,18 +51,30 @@ INSERT INTO t1 values (3);
|
|||
INSERT INTO t1 values (5);
|
||||
INSERT INTO t1 values (7);
|
||||
set global rocksdb_debug_ttl_rec_ts = 0;
|
||||
select variable_value into @a from information_schema.global_status where variable_name='rocksdb_rows_filtered';
|
||||
SELECT * FROM t1;
|
||||
a
|
||||
select variable_value-@a from information_schema.global_status where variable_name='rocksdb_rows_filtered';
|
||||
variable_value-@a
|
||||
4
|
||||
set global rocksdb_enable_ttl_read_filtering=0;
|
||||
select variable_value into @a from information_schema.global_status where variable_name='rocksdb_rows_filtered';
|
||||
SELECT * FROM t1;
|
||||
a
|
||||
1
|
||||
3
|
||||
5
|
||||
7
|
||||
select variable_value-@a from information_schema.global_status where variable_name='rocksdb_rows_filtered';
|
||||
variable_value-@a
|
||||
0
|
||||
set global rocksdb_enable_ttl_read_filtering=1;
|
||||
select variable_value into @a from information_schema.global_status where variable_name='rocksdb_rows_filtered';
|
||||
SELECT * FROM t1;
|
||||
a
|
||||
select variable_value-@a from information_schema.global_status where variable_name='rocksdb_rows_filtered';
|
||||
variable_value-@a
|
||||
4
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1 (
|
||||
a int,
|
||||
|
@ -191,20 +203,36 @@ a
|
|||
connection con2;
|
||||
set global rocksdb_force_flush_memtable_now=1;
|
||||
set global rocksdb_compact_cf='default';
|
||||
select variable_value into @a from information_schema.global_status where variable_name='rocksdb_rows_filtered';
|
||||
SELECT * FROM t1;
|
||||
a
|
||||
select variable_value-@a from information_schema.global_status where variable_name='rocksdb_rows_filtered';
|
||||
variable_value-@a
|
||||
1
|
||||
# Switching to connection 1
|
||||
connection con1;
|
||||
select variable_value into @a from information_schema.global_status where variable_name='rocksdb_rows_filtered';
|
||||
SELECT * FROM t1;
|
||||
a
|
||||
1
|
||||
select variable_value-@a from information_schema.global_status where variable_name='rocksdb_rows_filtered';
|
||||
variable_value-@a
|
||||
0
|
||||
UPDATE t1 set a = a + 1;
|
||||
select variable_value into @a from information_schema.global_status where variable_name='rocksdb_rows_filtered';
|
||||
SELECT * FROM t1;
|
||||
a
|
||||
2
|
||||
select variable_value-@a from information_schema.global_status where variable_name='rocksdb_rows_filtered';
|
||||
variable_value-@a
|
||||
0
|
||||
COMMIT;
|
||||
select variable_value into @a from information_schema.global_status where variable_name='rocksdb_rows_filtered';
|
||||
SELECT * FROM t1;
|
||||
a
|
||||
select variable_value-@a from information_schema.global_status where variable_name='rocksdb_rows_filtered';
|
||||
variable_value-@a
|
||||
1
|
||||
DROP TABLE t1;
|
||||
disconnect con1;
|
||||
disconnect con2;
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
drop table if exists t1,t2;
|
||||
#
|
||||
# A basic test whether endspace-aware variable length encoding
|
||||
# works when in PK
|
||||
|
@ -756,3 +755,16 @@ email_i 1
|
|||
drop table t;
|
||||
set global rocksdb_checksums_pct = @save_rocksdb_checksums_pct;
|
||||
set session rocksdb_verify_row_debug_checksums = @save_rocksdb_verify_row_debug_checksums;
|
||||
drop table if exists t;
|
||||
Warnings:
|
||||
Note 1051 Unknown table 'test.t'
|
||||
create table t (h varchar(31) character set utf8 collate utf8_bin not null, i varchar(19) collate latin1_bin not null, primary key(i), key(h)) engine=rocksdb;
|
||||
insert into t(i,h) values('a','b');
|
||||
check table t;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t check status OK
|
||||
alter table t modify h varchar(31) character set cp1257 collate cp1257_bin not null;
|
||||
check table t;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t check status OK
|
||||
drop table t;
|
||||
|
|
|
@ -4,23 +4,15 @@ call mtr.add_suppression("Aborting");
|
|||
select plugin_name, plugin_type from information_schema.plugins where plugin_name='RocksDB';
|
||||
plugin_name plugin_type
|
||||
ROCKSDB STORAGE ENGINE
|
||||
# Check that ROCKSDB plugin is not loaded:
|
||||
select plugin_name, plugin_type from information_schema.plugins where plugin_name='RocksDB';
|
||||
plugin_name plugin_type
|
||||
# Check that MyRocks has printed an error message into server error log:
|
||||
FOUND 1 /enable both use_direct_reads/ in mysqld.1.err
|
||||
# Now, restart the server back with regular settings
|
||||
select plugin_name, plugin_type from information_schema.plugins where plugin_name='RocksDB';
|
||||
plugin_name plugin_type
|
||||
ROCKSDB STORAGE ENGINE
|
||||
#
|
||||
# Now, repeat the same with another set of invalid arguments
|
||||
#
|
||||
# Check that ROCKSDB plugin is not loaded:
|
||||
select plugin_name, plugin_type from information_schema.plugins where plugin_name='RocksDB';
|
||||
plugin_name plugin_type
|
||||
FOUND 1 /enable both use_direct_io_for_flush_and_compaction/ in mysqld.1.err
|
||||
# Now, restart the server back with regular settings
|
||||
select plugin_name, plugin_type from information_schema.plugins where plugin_name='RocksDB';
|
||||
plugin_name plugin_type
|
||||
ROCKSDB STORAGE ENGINE
|
||||
Checking direct reads
|
||||
FOUND 1 /enable both use_direct_reads/ in use_direct_reads_writes.err
|
||||
Checking direct writes
|
||||
FOUND 1 /enable both use_direct_io_for_flush_and_compaction/ in use_direct_reads_writes.err
|
||||
Checking rocksdb_flush_log_at_trx_commit
|
||||
FOUND 1 /rocksdb_flush_log_at_trx_commit needs to be/ in use_direct_reads_writes.err
|
||||
Validate flush_log settings when direct writes is enabled
|
||||
set global rocksdb_flush_log_at_trx_commit=0;
|
||||
set global rocksdb_flush_log_at_trx_commit=1;
|
||||
ERROR 42000: Variable 'rocksdb_flush_log_at_trx_commit' can't be set to the value of '1'
|
||||
set global rocksdb_flush_log_at_trx_commit=2;
|
||||
ERROR 42000: Variable 'rocksdb_flush_log_at_trx_commit' can't be set to the value of '2'
|
||||
|
|
|
@ -3,6 +3,7 @@ SET GLOBAL rocksdb_write_ignore_missing_column_families=true;
|
|||
create table aaa (id int primary key, i int) engine rocksdb;
|
||||
set @save_rocksdb_flush_log_at_trx_commit=@@global.rocksdb_flush_log_at_trx_commit;
|
||||
SET GLOBAL rocksdb_flush_log_at_trx_commit=1;
|
||||
insert aaa(id, i) values(0,1);
|
||||
select variable_value into @a from information_schema.global_status where variable_name='rocksdb_wal_synced';
|
||||
insert aaa(id, i) values(1,1);
|
||||
select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced';
|
||||
|
@ -16,11 +17,11 @@ insert aaa(id, i) values(3,1);
|
|||
select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced';
|
||||
variable_value-@a
|
||||
3
|
||||
select variable_value into @a from information_schema.global_status where variable_name='rocksdb_wal_synced';
|
||||
SET GLOBAL rocksdb_flush_log_at_trx_commit=0;
|
||||
select variable_value into @a from information_schema.global_status where variable_name='rocksdb_wal_synced';
|
||||
insert aaa(id, i) values(4,1);
|
||||
SET GLOBAL rocksdb_flush_log_at_trx_commit=2;
|
||||
select variable_value into @a from information_schema.global_status where variable_name='rocksdb_wal_synced';
|
||||
SET GLOBAL rocksdb_flush_log_at_trx_commit=2;
|
||||
insert aaa(id, i) values(5,1);
|
||||
truncate table aaa;
|
||||
drop table aaa;
|
||||
|
|
|
@ -65,7 +65,12 @@ ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE;
|
|||
# disable duplicate index warning
|
||||
--disable_warnings
|
||||
# now do same index using copy algorithm
|
||||
# hitting max row locks (1M)
|
||||
--error ER_RDB_STATUS_GENERAL
|
||||
ALTER TABLE t1 ADD INDEX kb_copy(b), ALGORITHM=COPY;
|
||||
set session rocksdb_bulk_load=1;
|
||||
ALTER TABLE t1 ADD INDEX kb_copy(b), ALGORITHM=COPY;
|
||||
set session rocksdb_bulk_load=0;
|
||||
--enable_warnings
|
||||
|
||||
# checksum testing
|
||||
|
|
|
@ -135,3 +135,15 @@ set global rocksdb_force_flush_memtable_now = true;
|
|||
|
||||
select * from t1;
|
||||
DROP TABLE t1;
|
||||
|
||||
## https://github.com/facebook/mysql-5.6/issues/736
|
||||
create table t1 (i int auto_increment, key(i)) engine=rocksdb;
|
||||
insert into t1 values();
|
||||
insert into t1 values();
|
||||
insert into t1 values();
|
||||
|
||||
show create table t1;
|
||||
--source include/restart_mysqld.inc
|
||||
show create table t1;
|
||||
|
||||
drop table t1;
|
||||
|
|
|
@ -0,0 +1,75 @@
|
|||
--source include/have_rocksdb.inc
|
||||
--source include/not_valgrind.inc
|
||||
|
||||
--echo #
|
||||
--echo # Test how MyRocks behaves when RocksDB reports corrupted data.
|
||||
--echo #
|
||||
|
||||
--source include/have_debug.inc
|
||||
|
||||
# use custom error log to assert on error message in search_pattern_in_file.inc
|
||||
--let LOG=$MYSQLTEST_VARDIR/tmp/allow_to_start_after_corruption_debug.err
|
||||
--let SEARCH_FILE=$LOG
|
||||
|
||||
# restart server to change error log and ignore corruptopn on startup
|
||||
--let $_mysqld_option=--log-error=$LOG --rocksdb_allow_to_start_after_corruption=1
|
||||
--source include/restart_mysqld_with_option.inc
|
||||
|
||||
--echo #
|
||||
--echo # Test server crashes on corrupted data and restarts
|
||||
--echo #
|
||||
create table t1 (
|
||||
pk int not null primary key,
|
||||
col1 varchar(10)
|
||||
) engine=rocksdb;
|
||||
|
||||
insert into t1 values (1,1),(2,2),(3,3);
|
||||
|
||||
select * from t1 where pk=1;
|
||||
set session debug_dbug= "+d,rocksdb_return_status_corrupted";
|
||||
--exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
|
||||
--error 2013
|
||||
select * from t1 where pk=1;
|
||||
--source include/wait_until_disconnected.inc
|
||||
--let SEARCH_PATTERN=data corruption detected
|
||||
--source include/search_pattern_in_file.inc
|
||||
--remove_file $LOG
|
||||
|
||||
--echo #
|
||||
--echo # The same for scan queries
|
||||
--echo #
|
||||
|
||||
--source include/start_mysqld_with_option.inc
|
||||
select * from t1;
|
||||
set session debug_dbug= "+d,rocksdb_return_status_corrupted";
|
||||
--exec echo "wait" > $_expect_file_name
|
||||
--error 2013
|
||||
select * from t1;
|
||||
--source include/wait_until_disconnected.inc
|
||||
--let SEARCH_PATTERN=data corruption detected
|
||||
--source include/search_pattern_in_file.inc
|
||||
--remove_file $LOG
|
||||
|
||||
--echo #
|
||||
--echo # Test restart failure. The server is shutdown at this point.
|
||||
--echo #
|
||||
|
||||
# remove flag to ignore corruption
|
||||
--let $_mysqld_option=--log-error=$LOG
|
||||
--error 0
|
||||
--exec $MYSQLD_CMD --plugin_load=$HA_ROCKSDB_SO $_mysqld_option
|
||||
--let SEARCH_PATTERN=The server will exit normally and stop restart attempts
|
||||
--source include/search_pattern_in_file.inc
|
||||
--remove_file $LOG
|
||||
|
||||
--echo #
|
||||
--echo # Remove corruption file and restart cleanly
|
||||
--echo #
|
||||
|
||||
--exec rm $MYSQLTEST_VARDIR/mysqld.$_server_id/data/#rocksdb/ROCKSDB_CORRUPTED
|
||||
--source include/start_mysqld_with_option.inc
|
||||
|
||||
drop table t1;
|
||||
|
||||
# Restart mysqld with default options
|
||||
--source include/restart_mysqld.inc
|
|
@ -0,0 +1,8 @@
|
|||
!include suite/rpl/my.cnf
|
||||
|
||||
[mysqld.1]
|
||||
binlog_format=row
|
||||
[mysqld.2]
|
||||
binlog_format=row
|
||||
slave_parallel_workers=1
|
||||
#rpl_skip_tx_api=ON
|
|
@ -0,0 +1,9 @@
|
|||
--source include/have_rocksdb.inc
|
||||
--source include/have_binlog_format_row.inc
|
||||
--source include/master-slave.inc
|
||||
|
||||
create table t (i int primary key auto_increment) engine=rocksdb;
|
||||
|
||||
--source include/autoinc_crash_safe.inc
|
||||
|
||||
--source include/rpl_end.inc
|
|
@ -0,0 +1,8 @@
|
|||
!include suite/rpl/my.cnf
|
||||
|
||||
[mysqld.1]
|
||||
binlog_format=row
|
||||
[mysqld.2]
|
||||
binlog_format=row
|
||||
#slave_parallel_workers=1
|
||||
#rpl_skip_tx_api=ON
|
|
@ -0,0 +1,10 @@
|
|||
--source include/have_rocksdb.inc
|
||||
--source include/have_binlog_format_row.inc
|
||||
--source include/master-slave.inc
|
||||
--source include/have_partition.inc
|
||||
|
||||
create table t (i int primary key auto_increment) engine=rocksdb partition by key (i) partitions 3;
|
||||
|
||||
--source include/autoinc_crash_safe.inc
|
||||
|
||||
--source include/rpl_end.inc
|
|
@ -0,0 +1 @@
|
|||
--binlog-format=row
|
118
storage/rocksdb/mysql-test/rocksdb/t/autoinc_debug.test
Normal file
118
storage/rocksdb/mysql-test/rocksdb/t/autoinc_debug.test
Normal file
|
@ -0,0 +1,118 @@
|
|||
--source include/have_rocksdb.inc
|
||||
--source include/have_debug.inc
|
||||
--source include/have_log_bin.inc
|
||||
|
||||
--echo #
|
||||
--echo # Testing upgrading from server without merges for auto_increment
|
||||
--echo # to new server with such support.
|
||||
--echo #
|
||||
|
||||
set debug_dbug='+d,myrocks_autoinc_upgrade';
|
||||
create table t (i int primary key auto_increment);
|
||||
insert into t values ();
|
||||
insert into t values ();
|
||||
insert into t values ();
|
||||
select * from t;
|
||||
|
||||
delete from t where i > 1;
|
||||
select * from t;
|
||||
|
||||
select table_name, index_name, auto_increment
|
||||
from information_schema.rocksdb_ddl where table_name = 't';
|
||||
|
||||
set debug_dbug='-d,myrocks_autoinc_upgrade';
|
||||
|
||||
--source include/restart_mysqld.inc
|
||||
|
||||
insert into t values ();
|
||||
insert into t values ();
|
||||
insert into t values ();
|
||||
select * from t;
|
||||
|
||||
select table_name, index_name, auto_increment
|
||||
from information_schema.rocksdb_ddl where table_name = 't';
|
||||
|
||||
delete from t where i > 1;
|
||||
|
||||
--source include/restart_mysqld.inc
|
||||
|
||||
insert into t values ();
|
||||
insert into t values ();
|
||||
insert into t values ();
|
||||
select * from t;
|
||||
|
||||
drop table t;
|
||||
|
||||
--echo #
|
||||
--echo # Testing crash safety of transactions.
|
||||
--echo #
|
||||
create table t (i int primary key auto_increment);
|
||||
insert into t values ();
|
||||
insert into t values ();
|
||||
insert into t values ();
|
||||
|
||||
--echo # Before anything
|
||||
begin;
|
||||
insert into t values ();
|
||||
insert into t values ();
|
||||
set debug_dbug="+d,crash_commit_before";
|
||||
--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
|
||||
--error 2013
|
||||
commit;
|
||||
--source include/wait_until_disconnected.inc
|
||||
--enable_reconnect
|
||||
--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
|
||||
--source include/wait_until_connected_again.inc
|
||||
--disable_reconnect
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
select max(i) from t;
|
||||
|
||||
--echo # After engine prepare
|
||||
begin;
|
||||
insert into t values ();
|
||||
insert into t values ();
|
||||
set debug_dbug="+d,crash_commit_after_prepare";
|
||||
--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
|
||||
--error 2013
|
||||
commit;
|
||||
--source include/wait_until_disconnected.inc
|
||||
--enable_reconnect
|
||||
--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
|
||||
--source include/wait_until_connected_again.inc
|
||||
--disable_reconnect
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
select max(i) from t;
|
||||
|
||||
--echo # After binlog
|
||||
begin;
|
||||
insert into t values ();
|
||||
insert into t values ();
|
||||
set debug_dbug="+d,crash_commit_after_log";
|
||||
--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
|
||||
--error 2013
|
||||
commit;
|
||||
--source include/wait_until_disconnected.inc
|
||||
--enable_reconnect
|
||||
--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
|
||||
--source include/wait_until_connected_again.inc
|
||||
--disable_reconnect
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
select max(i) from t;
|
||||
|
||||
--echo # After everything
|
||||
begin;
|
||||
insert into t values ();
|
||||
insert into t values ();
|
||||
set debug_dbug="+d,crash_commit_after";
|
||||
--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
|
||||
--error 2013
|
||||
commit;
|
||||
--source include/wait_until_disconnected.inc
|
||||
--enable_reconnect
|
||||
--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
|
||||
--source include/wait_until_connected_again.inc
|
||||
--disable_reconnect
|
||||
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
|
||||
select max(i) from t;
|
||||
|
||||
drop table t;
|
|
@ -64,4 +64,42 @@ SELECT LAST_INSERT_ID();
|
|||
SELECT a FROM t1 ORDER BY a;
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo #---------------------------
|
||||
--echo # test large autoincrement values
|
||||
--echo #---------------------------
|
||||
|
||||
SET auto_increment_increment = 1;
|
||||
SET auto_increment_offset = 1;
|
||||
CREATE TABLE t1 (a BIGINT UNSIGNED AUTO_INCREMENT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb;
|
||||
INSERT INTO t1 VALUES (18446744073709551613, 'a');
|
||||
SHOW CREATE TABLE t1;
|
||||
INSERT INTO t1 VALUES (NULL, 'b');
|
||||
SHOW CREATE TABLE t1;
|
||||
--error ER_AUTOINC_READ_FAILED
|
||||
INSERT INTO t1 VALUES (NULL, 'c');
|
||||
SELECT * FROM t1;
|
||||
DROP TABLE t1;
|
||||
|
||||
SET auto_increment_increment = 300;
|
||||
CREATE TABLE t1 (a BIGINT UNSIGNED AUTO_INCREMENT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb;
|
||||
INSERT INTO t1 VALUES (18446744073709551613, 'a');
|
||||
SHOW CREATE TABLE t1;
|
||||
--error ER_AUTOINC_READ_FAILED
|
||||
INSERT INTO t1 VALUES (NULL, 'b');
|
||||
SHOW CREATE TABLE t1;
|
||||
--error ER_AUTOINC_READ_FAILED
|
||||
INSERT INTO t1 VALUES (NULL, 'c');
|
||||
SELECT * FROM t1;
|
||||
DROP TABLE t1;
|
||||
|
||||
SET auto_increment_offset = 200;
|
||||
CREATE TABLE t1 (a BIGINT UNSIGNED AUTO_INCREMENT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb;
|
||||
INSERT INTO t1 VALUES (18446744073709551613, 'a');
|
||||
SHOW CREATE TABLE t1;
|
||||
--error ER_AUTOINC_READ_FAILED
|
||||
INSERT INTO t1 VALUES (NULL, 'b');
|
||||
SHOW CREATE TABLE t1;
|
||||
--error ER_AUTOINC_READ_FAILED
|
||||
INSERT INTO t1 VALUES (NULL, 'c');
|
||||
SELECT * FROM t1;
|
||||
DROP TABLE t1;
|
||||
|
|
|
@ -1,3 +0,0 @@
|
|||
--source include/have_rocksdb.inc
|
||||
|
||||
--echo # The test checks AUTO_INCREMENT capabilities that are not supported by RocksDB-SE.
|
|
@ -0,0 +1 @@
|
|||
--rocksdb_override_cf_options=rev:bf5_1={prefix_extractor=capped:4;block_based_table_factory={filter_policy=bloomfilter:10:false;whole_key_filtering=0;}};
|
61
storage/rocksdb/mysql-test/rocksdb/t/bloomfilter5.test
Normal file
61
storage/rocksdb/mysql-test/rocksdb/t/bloomfilter5.test
Normal file
|
@ -0,0 +1,61 @@
|
|||
|
||||
--echo #
|
||||
--echo # Issue #809: Wrong query result with bloom filters
|
||||
--echo #
|
||||
|
||||
create table t1 (
|
||||
id1 bigint not null,
|
||||
id2 bigint not null,
|
||||
id3 varchar(100) not null,
|
||||
id4 int not null,
|
||||
id5 int not null,
|
||||
value bigint,
|
||||
value2 varchar(100),
|
||||
primary key (id1, id2, id3, id4) COMMENT 'rev:bf5_1'
|
||||
) engine=ROCKSDB;
|
||||
|
||||
|
||||
create table t2(a int);
|
||||
insert into t2 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
|
||||
|
||||
create table t3(seq int);
|
||||
insert into t3
|
||||
select
|
||||
1+ A.a + B.a* 10 + C.a * 100 + D.a * 1000
|
||||
from t2 A, t2 B, t2 C, t2 D;
|
||||
|
||||
insert t1
|
||||
select
|
||||
(seq+9) div 10, (seq+4) div 5, (seq+4) div 5, seq, seq, 1000, "aaabbbccc"
|
||||
from t3;
|
||||
|
||||
set global rocksdb_force_flush_memtable_now=1;
|
||||
|
||||
--echo # Full table scan
|
||||
explain
|
||||
select * from t1 limit 10;
|
||||
select * from t1 limit 10;
|
||||
|
||||
--echo # An index scan starting from the end of the table:
|
||||
explain
|
||||
select * from t1 order by id1 desc,id2 desc, id3 desc, id4 desc limit 1;
|
||||
select * from t1 order by id1 desc,id2 desc, id3 desc, id4 desc limit 1;
|
||||
|
||||
# A testcase for an assertion that the fix is removing
|
||||
# The only requirement for the used column family is that it is reverse-ordered
|
||||
create table t4 (
|
||||
pk int unsigned not null primary key,
|
||||
kp1 int unsigned not null,
|
||||
kp2 int unsigned not null,
|
||||
col1 int unsigned,
|
||||
key(kp1, kp2) comment 'rev:bf5_2'
|
||||
) engine=rocksdb;
|
||||
|
||||
insert into t4 values (1, 0xFFFF, 0xFFF, 12345);
|
||||
|
||||
--echo # This must not fail an assert:
|
||||
select * from t4 force index(kp1) where kp1=0xFFFFFFFF and kp2<=0xFFFFFFFF order by kp2 desc;
|
||||
|
||||
drop table t1,t2,t3,t4;
|
||||
|
||||
|
|
@ -7,4 +7,4 @@
|
|||
--let pk_cf=cf1
|
||||
--let data_order_desc=0
|
||||
|
||||
--source bulk_load.inc
|
||||
--source ../include/bulk_load.inc
|
||||
|
|
|
@ -0,0 +1,19 @@
|
|||
--source include/have_rocksdb.inc
|
||||
|
||||
CREATE TABLE t1 (pk INT, PRIMARY KEY (pk)) ENGINE=ROCKSDB;
|
||||
|
||||
SET rocksdb_bulk_load_allow_unsorted=1;
|
||||
SET rocksdb_bulk_load=1;
|
||||
|
||||
INSERT INTO t1 VALUES (1);
|
||||
|
||||
--connect (con1,localhost,root,,)
|
||||
DROP TABLE t1;
|
||||
|
||||
--connection default
|
||||
--disconnect con1
|
||||
|
||||
# This would have crashed the server prior to the fix
|
||||
SET rocksdb_bulk_load=0;
|
||||
--error ER_NO_SUCH_TABLE
|
||||
SELECT * FROM t1;
|
|
@ -1,7 +1,13 @@
|
|||
--source include/have_rocksdb.inc
|
||||
--source include/count_sessions.inc
|
||||
|
||||
--let LOG1=$MYSQLTEST_VARDIR/tmp/rocksdb.bulk_load_errors.1.err
|
||||
--let $_mysqld_option=--log-error=$LOG1
|
||||
--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
|
||||
--source include/restart_mysqld_with_option.inc
|
||||
|
||||
### Bulk load ###
|
||||
CREATE TABLE t1(pk INT, PRIMARY KEY(pk));
|
||||
CREATE TABLE t1(pk INT, PRIMARY KEY(pk)) ENGINE=ROCKSDB;
|
||||
|
||||
# Make sure we get an error with out of order keys during bulk load
|
||||
SET rocksdb_bulk_load=1;
|
||||
|
@ -21,19 +27,49 @@ INSERT INTO t1 VALUES(2);
|
|||
INSERT INTO t1 VALUES(20);
|
||||
INSERT INTO t1 VALUES(21);
|
||||
|
||||
--echo #
|
||||
--echo # In MyRocks, the following statement will intentionally crash the server.
|
||||
--echo # In MariaDB, it will cause an error
|
||||
--error ER_OVERLAPPING_KEYS
|
||||
SET rocksdb_bulk_load=0;
|
||||
|
||||
--echo #
|
||||
--echo # Despite the error, bulk load operation is over so the variable value
|
||||
--echo # will be 0:
|
||||
select @@rocksdb_bulk_load;
|
||||
|
||||
SHOW VARIABLES LIKE 'rocksdb_bulk_load';
|
||||
call mtr.add_suppression('finalizing last SST file while setting bulk loading variable');
|
||||
|
||||
SELECT * FROM t1;
|
||||
|
||||
--let SEARCH_FILE=$LOG1
|
||||
--let SEARCH_PATTERN=RocksDB: Error [0-9]+ finalizing last SST file while setting bulk loading variable
|
||||
--source include/search_pattern_in_file.inc
|
||||
|
||||
--let LOG2=$MYSQLTEST_VARDIR/tmp/rocksdb.bulk_load_errors.2.err
|
||||
--let $_mysqld_option=--log-error=$LOG2
|
||||
--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
|
||||
--source include/restart_mysqld_with_option.inc
|
||||
--remove_file $LOG1
|
||||
|
||||
|
||||
# Make sure we get an error in log when we disconnect and do not assert the server
|
||||
--connect (con1,localhost,root,,)
|
||||
SET rocksdb_bulk_load=1;
|
||||
INSERT INTO t1 VALUES(1);
|
||||
INSERT INTO t1 VALUES(2);
|
||||
INSERT INTO t1 VALUES(20);
|
||||
INSERT INTO t1 VALUES(21);
|
||||
--connection default
|
||||
--disconnect con1
|
||||
|
||||
SELECT * FROM t1;
|
||||
|
||||
--source include/wait_until_count_sessions.inc
|
||||
|
||||
--let SEARCH_FILE=$LOG2
|
||||
--let SEARCH_PATTERN=RocksDB: Error [0-9]+ finalizing last SST file while disconnecting
|
||||
--source include/search_pattern_in_file.inc
|
||||
|
||||
--let LOG3=$MYSQLTEST_VARDIR/tmp/rocksdb.bulk_load_errors.3.err
|
||||
--let $_mysqld_option=--log-error=$LOG3
|
||||
--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
|
||||
--source include/restart_mysqld_with_option.inc
|
||||
--remove_file $LOG2
|
||||
|
||||
TRUNCATE TABLE t1;
|
||||
|
||||
### Bulk load with unsorted PKs ###
|
||||
|
@ -60,3 +96,46 @@ SELECT * FROM t1;
|
|||
|
||||
SET rocksdb_bulk_load_allow_unsorted=DEFAULT;
|
||||
DROP TABLE t1;
|
||||
|
||||
# This would trigger a debug assertion that is just an error in release builds
|
||||
CREATE TABLE t1(c1 INT KEY) ENGINE=ROCKSDB;
|
||||
SET rocksdb_bulk_load=1;
|
||||
--error ER_KEYS_OUT_OF_ORDER
|
||||
INSERT INTO t1 VALUES (),(),();
|
||||
SET rocksdb_bulk_load=0;
|
||||
DROP TABLE t1;
|
||||
|
||||
# Crash when table open cache closes handler with bulk load operation not finalized
|
||||
SET @orig_table_open_cache=@@global.table_open_cache;
|
||||
CREATE TABLE t1(a INT AUTO_INCREMENT, b INT, PRIMARY KEY (a)) ENGINE=ROCKSDB DEFAULT CHARSET=latin1;
|
||||
SET rocksdb_bulk_load=1;
|
||||
INSERT INTO t1 VALUES(13, 0);
|
||||
INSERT INTO t1 VALUES(2, 'test 2');
|
||||
INSERT INTO t1 VALUES(@id, @arg04);
|
||||
SET @@global.table_open_cache=FALSE;
|
||||
INSERT INTO t1 VALUES(51479+0.333333333,1);
|
||||
DROP TABLE t1;
|
||||
SET @@global.table_open_cache=@orig_table_open_cache;
|
||||
|
||||
--let SEARCH_FILE=$LOG3
|
||||
--let SEARCH_PATTERN=RocksDB: Error [0-9]+ finalizing bulk load while closing handler
|
||||
--source include/search_pattern_in_file.inc
|
||||
|
||||
--source include/restart_mysqld.inc
|
||||
|
||||
--remove_file $LOG3
|
||||
|
||||
# Switch between tables, but also introduce duplicate key errors
|
||||
CREATE TABLE t1 (pk INT, PRIMARY KEY (pk)) ENGINE=ROCKSDB;
|
||||
CREATE TABLE t2 (pk INT, PRIMARY KEY (pk)) ENGINE=ROCKSDB;
|
||||
SET rocksdb_bulk_load=1;
|
||||
INSERT INTO t1 VALUES (1), (2);
|
||||
INSERT INTO t2 VALUES (1), (2);
|
||||
INSERT INTO t1 VALUES (1);
|
||||
--error ER_OVERLAPPING_KEYS
|
||||
INSERT INTO t2 VALUES (3);
|
||||
SET rocksdb_bulk_load=0;
|
||||
DROP TABLE t1;
|
||||
DROP TABLE t2;
|
||||
|
||||
--source include/wait_until_count_sessions.inc
|
||||
|
|
|
@ -6,4 +6,4 @@
|
|||
--let pk_cf=rev:cf1
|
||||
--let data_order_desc=0
|
||||
|
||||
--source bulk_load.inc
|
||||
--source ../include/bulk_load.inc
|
||||
|
|
|
@ -6,4 +6,4 @@
|
|||
--let pk_cf=rev:cf1
|
||||
--let data_order_desc=1
|
||||
|
||||
--source bulk_load.inc
|
||||
--source ../include/bulk_load.inc
|
||||
|
|
|
@ -6,4 +6,4 @@
|
|||
--let pk_cf=cf1
|
||||
--let data_order_desc=1
|
||||
|
||||
--source bulk_load.inc
|
||||
--source ../include/bulk_load.inc
|
||||
|
|
|
@ -3,136 +3,4 @@
|
|||
|
||||
--let pk_cf=cf1
|
||||
|
||||
--disable_warnings
|
||||
DROP TABLE IF EXISTS t1;
|
||||
--enable_warnings
|
||||
|
||||
SET rocksdb_bulk_load_size=3;
|
||||
SET rocksdb_bulk_load_allow_unsorted=1;
|
||||
|
||||
### Test individual INSERTs ###
|
||||
|
||||
# A table with only a PK won't have rows until the bulk load is finished
|
||||
eval CREATE TABLE t1(a INT, b INT, PRIMARY KEY(a) COMMENT "$pk_cf");
|
||||
SET rocksdb_bulk_load=1;
|
||||
--disable_query_log
|
||||
let $sign = 1;
|
||||
let $max = 5;
|
||||
let $i = 1;
|
||||
while ($i <= $max) {
|
||||
let $a = 1 + $sign * $i;
|
||||
let $b = 1 - $sign * $i;
|
||||
let $sign = -$sign;
|
||||
let $insert = INSERT INTO t1 VALUES ($a, $b);
|
||||
eval $insert;
|
||||
inc $i;
|
||||
}
|
||||
--enable_query_log
|
||||
SELECT * FROM t1;
|
||||
SET rocksdb_bulk_load=0;
|
||||
SELECT * FROM t1;
|
||||
DROP TABLE t1;
|
||||
|
||||
# A table with a PK and a SK shows rows immediately
|
||||
eval CREATE TABLE t1(a INT, b INT, PRIMARY KEY(a) COMMENT "$pk_cf", KEY(b));
|
||||
SET rocksdb_bulk_load=1;
|
||||
--disable_query_log
|
||||
let $sign = 1;
|
||||
let $max = 5;
|
||||
let $i = 1;
|
||||
while ($i <= $max) {
|
||||
let $a = 1 + $sign * $i;
|
||||
let $b = 1 - $sign * $i;
|
||||
let $sign = -$sign;
|
||||
let $insert = INSERT INTO t1 VALUES ($a, $b);
|
||||
eval $insert;
|
||||
inc $i;
|
||||
}
|
||||
--enable_query_log
|
||||
|
||||
SELECT * FROM t1;
|
||||
SET rocksdb_bulk_load=0;
|
||||
DROP TABLE t1;
|
||||
|
||||
# Inserting into another table finishes bulk load to the previous table
|
||||
eval CREATE TABLE t1(a INT, b INT, PRIMARY KEY(a) COMMENT "$pk_cf");
|
||||
eval CREATE TABLE t2(a INT, b INT, PRIMARY KEY(a) COMMENT "$pk_cf");
|
||||
|
||||
SET rocksdb_bulk_load=1;
|
||||
INSERT INTO t1 VALUES (1,1);
|
||||
INSERT INTO t2 VALUES (1,1);
|
||||
SELECT * FROM t1;
|
||||
INSERT INTO t1 VALUES (2,2);
|
||||
SELECT * FROM t2;
|
||||
SELECT * FROM t1;
|
||||
SET rocksdb_bulk_load=0;
|
||||
SELECT * FROM t1;
|
||||
DROP TABLE t1, t2;
|
||||
|
||||
### Test bulk load from a file ###
|
||||
eval CREATE TABLE t1(a INT, b INT, PRIMARY KEY(a) COMMENT "$pk_cf");
|
||||
eval CREATE TABLE t2(a INT, b INT, PRIMARY KEY(b) COMMENT "$pk_cf");
|
||||
eval CREATE TABLE t3(a INT, b INT, PRIMARY KEY(a) COMMENT "$pk_cf")
|
||||
PARTITION BY KEY() PARTITIONS 4;
|
||||
|
||||
--let $file = `SELECT CONCAT(@@datadir, "test_loadfile.txt")`
|
||||
# Create a text file with data to import into the table.
|
||||
# PK and SK are not in any order
|
||||
--let ROCKSDB_INFILE = $file
|
||||
perl;
|
||||
my $fn = $ENV{'ROCKSDB_INFILE'};
|
||||
open(my $fh, '>', $fn) || die "perl open($fn): $!";
|
||||
binmode $fh;
|
||||
my $max = 5000000;
|
||||
my $sign = 1;
|
||||
for (my $ii = 0; $ii < $max; $ii++)
|
||||
{
|
||||
my $a = 1 + $sign * $ii;
|
||||
my $b = 1 - $sign * $ii;
|
||||
print $fh "$a\t$b\n";
|
||||
}
|
||||
close($fh);
|
||||
EOF
|
||||
--file_exists $file
|
||||
|
||||
# Make sure a snapshot held by another user doesn't block the bulk load
|
||||
connect (other,localhost,root,,);
|
||||
set session transaction isolation level repeatable read;
|
||||
select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS';
|
||||
start transaction with consistent snapshot;
|
||||
select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS';
|
||||
|
||||
connection default;
|
||||
set rocksdb_bulk_load=1;
|
||||
set rocksdb_bulk_load_size=100000;
|
||||
--disable_query_log
|
||||
--echo LOAD DATA INFILE <input_file> INTO TABLE t1;
|
||||
eval LOAD DATA INFILE '$file' INTO TABLE t1;
|
||||
--echo LOAD DATA INFILE <input_file> INTO TABLE t2;
|
||||
eval LOAD DATA INFILE '$file' INTO TABLE t2;
|
||||
--echo LOAD DATA INFILE <input_file> INTO TABLE t3;
|
||||
eval LOAD DATA INFILE '$file' INTO TABLE t3;
|
||||
--enable_query_log
|
||||
set rocksdb_bulk_load=0;
|
||||
|
||||
--remove_file $file
|
||||
|
||||
# Make sure row count index stats are correct
|
||||
--replace_column 6 # 7 # 8 # 9 #
|
||||
SHOW TABLE STATUS WHERE name LIKE 't%';
|
||||
|
||||
ANALYZE TABLE t1, t2, t3;
|
||||
|
||||
--replace_column 6 # 7 # 8 # 9 #
|
||||
SHOW TABLE STATUS WHERE name LIKE 't%';
|
||||
|
||||
# Make sure all the data is there.
|
||||
select count(a) from t1;
|
||||
select count(b) from t1;
|
||||
select count(a) from t2;
|
||||
select count(b) from t2;
|
||||
select count(a) from t3;
|
||||
select count(b) from t3;
|
||||
|
||||
DROP TABLE t1, t2, t3;
|
||||
SET rocksdb_bulk_load_allow_unsorted=0;
|
||||
--source ../include/bulk_load_unsorted.inc
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
--source include/have_rocksdb.inc
|
||||
|
||||
--let pk_cf=rev:cf1
|
||||
|
||||
--source ../include/bulk_load_unsorted.inc
|
|
@ -2,6 +2,48 @@
|
|||
|
||||
--source include/restart_mysqld.inc
|
||||
|
||||
# Test memtable cardinality statistics
|
||||
CREATE TABLE t0 (id int PRIMARY KEY, a int, INDEX ix_a (a)) engine=rocksdb;
|
||||
|
||||
# populate the table with 10 reconds where cardinality of id is N and a is N/2.
|
||||
insert into t0 values (0, 0),(1, 1),(2, 2),(3, 3),(4, 4),
|
||||
(5, 4),(6, 4),(7, 4),(8, 4),(9, 4);
|
||||
|
||||
# Assert no cardinality data exists before ANALYZE TABLE is done
|
||||
SELECT cardinality FROM information_schema.statistics where table_name="t0" and
|
||||
column_name="id";
|
||||
SELECT cardinality FROM information_schema.statistics where table_name="t0" and
|
||||
column_name="a";
|
||||
|
||||
--disable_result_log
|
||||
ANALYZE TABLE t0;
|
||||
--enable_result_log
|
||||
|
||||
SELECT table_rows into @N FROM information_schema.tables
|
||||
WHERE table_name = "t0";
|
||||
SELECT FLOOR(@N/cardinality) FROM
|
||||
information_schema.statistics where table_name="t0" and column_name="id";
|
||||
SELECT FLOOR(@N/cardinality) FROM
|
||||
information_schema.statistics where table_name="t0" and column_name="a";
|
||||
|
||||
# Flush the table and re-run the test as statistics is calculated a bit
|
||||
# differently for memtable and SST files
|
||||
SET GLOBAL rocksdb_force_flush_memtable_now = 1;
|
||||
--disable_result_log
|
||||
ANALYZE TABLE t0;
|
||||
--enable_result_log
|
||||
|
||||
SELECT table_rows into @N FROM information_schema.tables
|
||||
WHERE table_name = "t0";
|
||||
SELECT FLOOR(@N/cardinality) FROM
|
||||
information_schema.statistics where table_name="t0" and column_name="id";
|
||||
SELECT FLOOR(@N/cardinality) FROM
|
||||
information_schema.statistics where table_name="t0" and column_name="a";
|
||||
|
||||
drop table t0;
|
||||
|
||||
# Test big table on SST
|
||||
|
||||
--disable_warnings
|
||||
DROP TABLE IF EXISTS t1,t10,t11;
|
||||
--enable_warnings
|
||||
|
|
|
@ -0,0 +1,22 @@
|
|||
--disable_warnings
|
||||
let $MYSQLD_DATADIR= `select @@datadir`;
|
||||
let $restart_file= $MYSQLTEST_VARDIR/tmp/mysqld.1.expect;
|
||||
let $error_log= $MYSQLTEST_VARDIR/log/my_restart.err;
|
||||
select variable_name, variable_value from information_schema.global_variables where variable_name="rocksdb_ignore_unknown_options";
|
||||
|
||||
--exec find $MYSQLD_DATADIR/#rocksdb/OPTIONS* | sort -n | tail -1 | xargs -0 -I {} -t sh -c "echo hello=world>>{}"
|
||||
|
||||
--exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
|
||||
--shutdown_server 10
|
||||
|
||||
--error 1
|
||||
--exec $MYSQLD_CMD --plugin_load=$HA_ROCKSDB_SO --rocksdb_ignore_unknown_options=0 --loose-console --log-error=$error_log
|
||||
|
||||
let SEARCH_FILE= $error_log;
|
||||
let SEARCH_PATTERN= RocksDB: Compatibility check against existing database options failed;
|
||||
--source include/search_pattern_in_file.inc
|
||||
--enable_reconnect
|
||||
--exec echo "restart" > $restart_file
|
||||
--source include/wait_until_connected_again.inc
|
||||
--exec find $MYSQLD_DATADIR/#rocksdb/OPTIONS* | sort -n | tail -1 | xargs -0 -I {} -t sh -c "sed -i '/hello=world/d' {}"
|
||||
select variable_name, variable_value from information_schema.global_variables where variable_name="rocksdb_ignore_unknown_options";
|
|
@ -21,29 +21,29 @@ let $con3= `SELECT CONNECTION_ID()`;
|
|||
connection default;
|
||||
eval create table t (i int primary key) engine=$engine;
|
||||
insert into t values (1), (2), (3);
|
||||
--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTIONID: [0-9]*/TXN_ID/
|
||||
--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTION ID: [0-9]*/TXN_ID/
|
||||
show engine rocksdb transaction status;
|
||||
|
||||
echo Deadlock #1;
|
||||
--source include/simple_deadlock.inc
|
||||
connection default;
|
||||
--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTIONID: [0-9]*/TXN_ID/
|
||||
--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTION ID: [0-9]*/TXN_ID/
|
||||
show engine rocksdb transaction status;
|
||||
|
||||
echo Deadlock #2;
|
||||
--source include/simple_deadlock.inc
|
||||
connection default;
|
||||
--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTIONID: [0-9]*/TXN_ID/
|
||||
--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTION ID: [0-9]*/TXN_ID/
|
||||
show engine rocksdb transaction status;
|
||||
set global rocksdb_max_latest_deadlocks = 10;
|
||||
|
||||
echo Deadlock #3;
|
||||
--source include/simple_deadlock.inc
|
||||
connection default;
|
||||
--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTIONID: [0-9]*/TXN_ID/
|
||||
--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTION ID: [0-9]*/TXN_ID/
|
||||
show engine rocksdb transaction status;
|
||||
set global rocksdb_max_latest_deadlocks = 1;
|
||||
--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTIONID: [0-9]*/TXN_ID/
|
||||
--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTION ID: [0-9]*/TXN_ID/
|
||||
show engine rocksdb transaction status;
|
||||
|
||||
connection con3;
|
||||
|
@ -77,8 +77,10 @@ let $wait_condition = select count(*) = 1 from information_schema.rocksdb_trx
|
|||
where thread_id = $con2 and waiting_key != "";
|
||||
--source include/wait_condition.inc
|
||||
|
||||
select variable_value into @a from information_schema.global_status where variable_name='rocksdb_row_lock_deadlocks';
|
||||
--error ER_LOCK_DEADLOCK
|
||||
select * from t where i=1 for update;
|
||||
select case when variable_value-@a = 1 then 'true' else 'false' end as deadlocks from information_schema.global_status where variable_name='rocksdb_row_lock_deadlocks';
|
||||
rollback;
|
||||
|
||||
connection con2;
|
||||
|
@ -91,7 +93,7 @@ rollback;
|
|||
|
||||
connection default;
|
||||
set global rocksdb_max_latest_deadlocks = 5;
|
||||
--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTIONID: [0-9]*/TXN_ID/
|
||||
--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTION ID: [0-9]*/TXN_ID/
|
||||
show engine rocksdb transaction status;
|
||||
|
||||
echo Deadlock #5;
|
||||
|
@ -133,7 +135,7 @@ connection con3;
|
|||
rollback;
|
||||
|
||||
connection default;
|
||||
--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTIONID: [0-9]*/TXN_ID/
|
||||
--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTION ID: [0-9]*/TXN_ID/
|
||||
show engine rocksdb transaction status;
|
||||
|
||||
disconnect con1;
|
||||
|
@ -143,11 +145,11 @@ disconnect con3;
|
|||
set global rocksdb_lock_wait_timeout = @prior_lock_wait_timeout;
|
||||
set global rocksdb_deadlock_detect = @prior_deadlock_detect;
|
||||
drop table t;
|
||||
--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTIONID: [0-9]*/TXN_ID/ /INDEX_ID: [0-9a-f]*/IDX_ID/
|
||||
--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTION ID: [0-9]*/TXN_ID/ /INDEX_ID: [0-9a-f]*/IDX_ID/
|
||||
show engine rocksdb transaction status;
|
||||
set global rocksdb_max_latest_deadlocks = 0;
|
||||
--echo # Clears deadlock buffer of any existent deadlocks.
|
||||
set global rocksdb_max_latest_deadlocks = @prior_max_latest_deadlocks;
|
||||
--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTIONID: [0-9]*/TXN_ID/ /INDEX_ID: [0-9a-f]*/IDX_ID/
|
||||
--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTION ID: [0-9]*/TXN_ID/ /INDEX_ID: [0-9a-f]*/IDX_ID/
|
||||
show engine rocksdb transaction status;
|
||||
--source include/wait_until_count_sessions.inc
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
--disable_warnings
|
||||
DROP TABLE IF EXISTS is_ddl_t1;
|
||||
DROP TABLE IF EXISTS is_ddl_t2;
|
||||
DROP TABLE IF EXISTS is_ddl_t3;
|
||||
--enable_warnings
|
||||
|
||||
CREATE TABLE is_ddl_t1 (i INT, j INT, k INT, l INT,
|
||||
|
@ -16,9 +17,13 @@ CREATE TABLE is_ddl_t1 (i INT, j INT, k INT, l INT,
|
|||
CREATE TABLE is_ddl_t2 (x INT, y INT, z INT,
|
||||
PRIMARY KEY (z, y) COMMENT 'zy_cf',
|
||||
KEY (x)) ENGINE = ROCKSDB;
|
||||
CREATE TABLE is_ddl_t3 (a INT, b INT, c INT, PRIMARY KEY (a)) ENGINE = ROCKSDB
|
||||
COMMENT "ttl_duration=3600;";
|
||||
|
||||
--sorted_result
|
||||
SELECT TABLE_SCHEMA,TABLE_NAME,PARTITION_NAME,INDEX_NAME,INDEX_TYPE,KV_FORMAT_VERSION,CF FROM INFORMATION_SCHEMA.ROCKSDB_DDL WHERE TABLE_NAME like 'is_ddl_t%';
|
||||
SELECT TABLE_SCHEMA,TABLE_NAME,PARTITION_NAME,INDEX_NAME,INDEX_TYPE,KV_FORMAT_VERSION,CF,TTL_DURATION,INDEX_FLAGS FROM INFORMATION_SCHEMA.ROCKSDB_DDL WHERE TABLE_NAME like 'is_ddl_t%';
|
||||
|
||||
# cleanup
|
||||
DROP TABLE is_ddl_t1;
|
||||
DROP TABLE is_ddl_t2;
|
||||
DROP TABLE is_ddl_t3;
|
||||
|
|
158
storage/rocksdb/mysql-test/rocksdb/t/i_s_deadlock.test
Normal file
158
storage/rocksdb/mysql-test/rocksdb/t/i_s_deadlock.test
Normal file
|
@ -0,0 +1,158 @@
|
|||
--source include/have_rocksdb.inc
|
||||
|
||||
set @prior_lock_wait_timeout = @@rocksdb_lock_wait_timeout;
|
||||
set @prior_deadlock_detect = @@rocksdb_deadlock_detect;
|
||||
set @prior_max_latest_deadlocks = @@rocksdb_max_latest_deadlocks;
|
||||
set global rocksdb_deadlock_detect = on;
|
||||
set global rocksdb_lock_wait_timeout = 10000;
|
||||
--echo # Clears deadlock buffer of any prior deadlocks.
|
||||
set global rocksdb_max_latest_deadlocks = 0;
|
||||
set global rocksdb_max_latest_deadlocks = @prior_max_latest_deadlocks;
|
||||
|
||||
# needed by simple_deadlock.inc
|
||||
let $engine = rocksdb;
|
||||
|
||||
--source include/count_sessions.inc
|
||||
connect (con1,localhost,root,,);
|
||||
let $con1= `SELECT CONNECTION_ID()`;
|
||||
|
||||
connect (con2,localhost,root,,);
|
||||
let $con2= `SELECT CONNECTION_ID()`;
|
||||
|
||||
connect (con3,localhost,root,,);
|
||||
let $con3= `SELECT CONNECTION_ID()`;
|
||||
|
||||
connection default;
|
||||
show create table information_schema.rocksdb_deadlock;
|
||||
|
||||
create table t (i int primary key) engine=rocksdb;
|
||||
insert into t values (1), (2), (3);
|
||||
select * from information_schema.rocksdb_deadlock;
|
||||
|
||||
echo Deadlock #1;
|
||||
--source include/simple_deadlock.inc
|
||||
connection default;
|
||||
--replace_column 1 DEADLOCK_ID 2 TRANSACTION_ID 4 WAITING_KEY
|
||||
select * from information_schema.rocksdb_deadlock;
|
||||
|
||||
echo Deadlock #2;
|
||||
--source include/simple_deadlock.inc
|
||||
connection default;
|
||||
--replace_column 1 DEADLOCK_ID 2 TRANSACTION_ID 4 WAITING_KEY
|
||||
select * from information_schema.rocksdb_deadlock;
|
||||
set global rocksdb_max_latest_deadlocks = 10;
|
||||
|
||||
echo Deadlock #3;
|
||||
--source include/simple_deadlock.inc
|
||||
connection default;
|
||||
--replace_column 1 DEADLOCK_ID 2 TRANSACTION_ID 4 WAITING_KEY
|
||||
select * from information_schema.rocksdb_deadlock;
|
||||
set global rocksdb_max_latest_deadlocks = 1;
|
||||
--replace_column 1 DEADLOCK_ID 2 TRANSACTION_ID 4 WAITING_KEY
|
||||
select * from information_schema.rocksdb_deadlock;
|
||||
|
||||
connection con3;
|
||||
set rocksdb_deadlock_detect_depth = 2;
|
||||
|
||||
echo Deadlock #4;
|
||||
connection con1;
|
||||
begin;
|
||||
select * from t where i=1 for update;
|
||||
|
||||
connection con2;
|
||||
begin;
|
||||
select * from t where i=2 for update;
|
||||
|
||||
connection con3;
|
||||
begin;
|
||||
select * from t where i=3 for update;
|
||||
|
||||
connection con1;
|
||||
send select * from t where i=2 for update;
|
||||
|
||||
connection con2;
|
||||
let $wait_condition = select count(*) = 1 from information_schema.rocksdb_trx
|
||||
where thread_id = $con1 and waiting_key != "";
|
||||
--source include/wait_condition.inc
|
||||
|
||||
send select * from t where i=3 for update;
|
||||
|
||||
connection con3;
|
||||
let $wait_condition = select count(*) = 1 from information_schema.rocksdb_trx
|
||||
where thread_id = $con2 and waiting_key != "";
|
||||
--source include/wait_condition.inc
|
||||
|
||||
--error ER_LOCK_DEADLOCK
|
||||
select * from t where i=1 for update;
|
||||
rollback;
|
||||
|
||||
connection con2;
|
||||
reap;
|
||||
rollback;
|
||||
|
||||
connection con1;
|
||||
reap;
|
||||
rollback;
|
||||
|
||||
connection default;
|
||||
set global rocksdb_max_latest_deadlocks = 5;
|
||||
--replace_column 1 DEADLOCK_ID 2 TRANSACTION_ID 4 WAITING_KEY
|
||||
select * from information_schema.rocksdb_deadlock;
|
||||
|
||||
echo Deadlock #5;
|
||||
connection con1;
|
||||
begin;
|
||||
select * from t where i=1 for update;
|
||||
|
||||
connection con2;
|
||||
begin;
|
||||
select * from t where i=2 for update;
|
||||
|
||||
connection con3;
|
||||
begin;
|
||||
select * from t where i=3 lock in share mode;
|
||||
|
||||
connection con1;
|
||||
select * from t where i=100 for update;
|
||||
select * from t where i=101 for update;
|
||||
send select * from t where i=2 for update;
|
||||
|
||||
connection con2;
|
||||
let $wait_condition = select count(*) = 1 from information_schema.rocksdb_trx
|
||||
where thread_id = $con1 and waiting_key != "";
|
||||
--source include/wait_condition.inc
|
||||
|
||||
select * from t where i=3 lock in share mode;
|
||||
select * from t where i=200 for update;
|
||||
select * from t where i=201 for update;
|
||||
|
||||
--error ER_LOCK_DEADLOCK
|
||||
select * from t where i=1 lock in share mode;
|
||||
rollback;
|
||||
|
||||
connection con1;
|
||||
reap;
|
||||
rollback;
|
||||
|
||||
connection con3;
|
||||
rollback;
|
||||
|
||||
connection default;
|
||||
--replace_column 1 DEADLOCK_ID 2 TRANSACTION_ID 4 WAITING_KEY
|
||||
select * from information_schema.rocksdb_deadlock;
|
||||
|
||||
disconnect con1;
|
||||
disconnect con2;
|
||||
disconnect con3;
|
||||
|
||||
set global rocksdb_lock_wait_timeout = @prior_lock_wait_timeout;
|
||||
set global rocksdb_deadlock_detect = @prior_deadlock_detect;
|
||||
drop table t;
|
||||
--replace_column 1 DEADLOCK_ID 2 TRANSACTION_ID 4 WAITING_KEY 6 INDEX_NAME 7 TABLE_NAME
|
||||
select * from information_schema.rocksdb_deadlock;
|
||||
set global rocksdb_max_latest_deadlocks = 0;
|
||||
--echo # Clears deadlock buffer of any existent deadlocks.
|
||||
set global rocksdb_max_latest_deadlocks = @prior_max_latest_deadlocks;
|
||||
--replace_column 1 DEADLOCK_ID 2 TRANSACTION_ID 4 WAITING_KEY
|
||||
select * from information_schema.rocksdb_deadlock;
|
||||
--source include/wait_until_count_sessions.inc
|
|
@ -70,14 +70,15 @@ while ($cnt)
|
|||
|
||||
SELECT COUNT(*) FROM t1;
|
||||
|
||||
# flush the table first as statistics is calculated a bit differently for memtable and SST files
|
||||
SET GLOBAL rocksdb_force_flush_memtable_now = 1;
|
||||
|
||||
-- disable_query_log
|
||||
-- disable_result_log
|
||||
ANALYZE TABLE t1;
|
||||
-- enable_result_log
|
||||
-- enable_query_log
|
||||
|
||||
SET GLOBAL rocksdb_force_flush_memtable_now = 1;
|
||||
|
||||
--replace_column 9 #
|
||||
EXPLAIN UPDATE t1 SET filler1='to be deleted' WHERE key1=100 AND key2=100;
|
||||
UPDATE t1 SET filler1='to be deleted' WHERE key1=100 and key2=100;
|
||||
|
@ -95,8 +96,8 @@ while ($i <= 1000) {
|
|||
eval $insert;
|
||||
}
|
||||
--enable_query_log
|
||||
analyze table t1;
|
||||
set global rocksdb_force_flush_memtable_now=1;
|
||||
analyze table t1;
|
||||
|
||||
--replace_column 9 #
|
||||
explain select * from t1 where key1 = 1;
|
||||
|
|
|
@ -5,7 +5,8 @@
|
|||
|
||||
# t/index_merge_innodb.test
|
||||
#
|
||||
# Index merge tests
|
||||
# Index merge tests (the test is called 'index_merge_rocksdb2' because
|
||||
# 'index_merge_rocksdb' has already existed before copying 'index_merge_innodb')
|
||||
#
|
||||
# Last update:
|
||||
# 2006-08-07 ML test refactored (MySQL 5.1)
|
||||
|
@ -61,6 +62,7 @@ INSERT INTO t1 SELECT id + 16, 7, 0 FROM t1;
|
|||
|
||||
-- disable_query_log
|
||||
-- disable_result_log
|
||||
set global rocksdb_force_flush_memtable_now=1;
|
||||
analyze table t1;
|
||||
-- enable_result_log
|
||||
-- enable_query_log
|
||||
|
|
|
@ -19,8 +19,8 @@ drop table t1;
|
|||
|
||||
--let $max_index_id = query_get_value(SELECT * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO where type = 'MAX_INDEX_ID', VALUE, 1)
|
||||
--replace_result $max_index_id max_index_id
|
||||
select * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO;
|
||||
select count(*) from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO;
|
||||
select * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO where type<>'DDL_DROP_INDEX_ONGOING';
|
||||
select count(*) from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO where type<>'DDL_DROP_INDEX_ONGOING';
|
||||
SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK=0;
|
||||
|
||||
select VALUE into @keysIn from INFORMATION_SCHEMA.ROCKSDB_COMPACTION_STATS where CF_NAME = 'default' and LEVEL = 'Sum' and TYPE = 'KeyIn';
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
--rocksdb_write_disable_wal=1
|
||||
--rocksdb_flush_log_at_trx_commit=0
|
||||
--rocksdb_default_cf_options=write_buffer_size=16k;target_file_size_base=16k;level0_file_num_compaction_trigger=4;level0_slowdown_writes_trigger=256;level0_stop_writes_trigger=256;max_write_buffer_number=16;compression_per_level=kNoCompression;memtable=vector:1024
|
||||
--rocksdb_override_cf_options=__system__={memtable=skip_list:16}
|
||||
--rocksdb_compaction_sequential_deletes=0
|
||||
|
|
|
@ -8,6 +8,38 @@ SHOW TABLE STATUS LIKE 't1';
|
|||
|
||||
INSERT INTO t1 VALUES ('538647864786478647864');
|
||||
--replace_column 3 # 6 # 7 # 8 # 9 # 10 #
|
||||
SELECT * FROM t1;
|
||||
SHOW TABLE STATUS LIKE 't1';
|
||||
|
||||
--error ER_DUP_ENTRY
|
||||
INSERT INTO t1 VALUES ();
|
||||
SELECT * FROM t1;
|
||||
--replace_column 3 # 6 # 7 # 8 # 9 # 10 #
|
||||
SHOW TABLE STATUS LIKE 't1';
|
||||
|
||||
--error ER_DUP_ENTRY
|
||||
INSERT INTO t1 VALUES ();
|
||||
SELECT * FROM t1;
|
||||
--replace_column 3 # 6 # 7 # 8 # 9 # 10 #
|
||||
SHOW TABLE STATUS LIKE 't1';
|
||||
|
||||
DROP TABLE t1;
|
||||
|
||||
CREATE TABLE t1 (pk TINYINT NOT NULL PRIMARY KEY AUTO_INCREMENT);
|
||||
|
||||
INSERT INTO t1 VALUES (5);
|
||||
--replace_column 3 # 6 # 7 # 8 # 9 # 10 #
|
||||
SHOW TABLE STATUS LIKE 't1';
|
||||
|
||||
INSERT INTO t1 VALUES (1000);
|
||||
--replace_column 3 # 6 # 7 # 8 # 9 # 10 #
|
||||
SELECT * FROM t1;
|
||||
SHOW TABLE STATUS LIKE 't1';
|
||||
|
||||
--error ER_DUP_ENTRY
|
||||
INSERT INTO t1 VALUES ();
|
||||
SELECT * FROM t1;
|
||||
--replace_column 3 # 6 # 7 # 8 # 9 # 10 #
|
||||
SHOW TABLE STATUS LIKE 't1';
|
||||
|
||||
--error ER_DUP_ENTRY
|
||||
|
|
|
@ -16,16 +16,20 @@ set @@rocksdb_lock_wait_timeout=1;
|
|||
begin;
|
||||
|
||||
--connection con1
|
||||
select variable_value into @a from information_schema.global_status where variable_name='rocksdb_row_lock_wait_timeouts';
|
||||
--error ER_LOCK_WAIT_TIMEOUT
|
||||
insert into t values(0);
|
||||
select ROW_LOCK_WAIT_TIMEOUTS from information_schema.table_statistics where table_name="t";
|
||||
select ROW_LOCK_WAIT_TIMEOUTS from information_schema.table_statistics where table_name="t";
|
||||
select case when variable_value-@a = 1 then 'true' else 'false' end as waits from information_schema.global_status where variable_name='rocksdb_row_lock_wait_timeouts';
|
||||
|
||||
--connection con2
|
||||
select variable_value into @a from information_schema.global_status where variable_name='rocksdb_row_lock_wait_timeouts';
|
||||
--error ER_LOCK_WAIT_TIMEOUT
|
||||
insert into t values(0);
|
||||
select ROW_LOCK_WAIT_TIMEOUTS from information_schema.table_statistics where table_name="t";
|
||||
select ROW_LOCK_WAIT_TIMEOUTS from information_schema.table_statistics where table_name="t";
|
||||
select case when variable_value-@a = 1 then 'true' else 'false' end as waits from information_schema.global_status where variable_name='rocksdb_row_lock_wait_timeouts';
|
||||
|
||||
--disconnect con1
|
||||
--connection default
|
||||
|
|
53
storage/rocksdb/mysql-test/rocksdb/t/max_open_files.test
Normal file
53
storage/rocksdb/mysql-test/rocksdb/t/max_open_files.test
Normal file
|
@ -0,0 +1,53 @@
|
|||
--source include/have_rocksdb.inc
|
||||
|
||||
# Basic Sysbench run fails with basic MyROCKS install due to lack of open files
|
||||
|
||||
# test for over limit
|
||||
CALL mtr.add_suppression("RocksDB: rocksdb_max_open_files should not be greater than the open_files_limit*");
|
||||
|
||||
--let $over_rocksdb_max_open_files=`SELECT @@global.open_files_limit + 100`
|
||||
--let $under_rocksdb_max_open_files=`SELECT @@global.open_files_limit -1`
|
||||
--let SEARCH_FILE=$MYSQLTEST_VARDIR/tmp/rocksdb.max_open_files.err
|
||||
--let SEARCH_PATTERN=RocksDB: rocksdb_max_open_files should not be greater than the open_files_limit
|
||||
|
||||
--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR $over_rocksdb_max_open_files over_rocksdb_max_open_files
|
||||
--let $_mysqld_option=--log-error=$SEARCH_FILE --rocksdb_max_open_files=$over_rocksdb_max_open_files
|
||||
--source include/restart_mysqld_with_option.inc
|
||||
--source include/search_pattern_in_file.inc
|
||||
|
||||
SELECT FLOOR(@@global.open_files_limit / 2) = @@global.rocksdb_max_open_files;
|
||||
|
||||
# test for within limit
|
||||
--let $_mysqld_option=--rocksdb_max_open_files=$under_rocksdb_max_open_files
|
||||
--source include/restart_mysqld_with_option.inc
|
||||
|
||||
SELECT @@global.open_files_limit - 1 = @@global.rocksdb_max_open_files;
|
||||
|
||||
# test for minimal value
|
||||
--let $_mysqld_option=--rocksdb_max_open_files=0
|
||||
--source include/restart_mysqld_with_option.inc
|
||||
|
||||
SELECT @@global.rocksdb_max_open_files;
|
||||
|
||||
# verify that we can still do work with no descriptor cache
|
||||
CREATE TABLE t1(a INT) ENGINE=ROCKSDB;
|
||||
INSERT INTO t1 VALUES(0),(1),(2),(3),(4);
|
||||
SET GLOBAL rocksdb_force_flush_memtable_and_lzero_now=1;
|
||||
DROP TABLE t1;
|
||||
|
||||
# test for unlimited
|
||||
--let $_mysqld_option=--rocksdb_max_open_files=-1
|
||||
--source include/restart_mysqld_with_option.inc
|
||||
|
||||
SELECT @@global.rocksdb_max_open_files;
|
||||
|
||||
# test for auto-tune
|
||||
--let $_mysqld_option=--rocksdb_max_open_files=-2
|
||||
--source include/restart_mysqld_with_option.inc
|
||||
|
||||
SELECT FLOOR(@@global.open_files_limit / 2) = @@global.rocksdb_max_open_files;
|
||||
|
||||
# cleanup
|
||||
--let _$mysqld_option=
|
||||
--source include/restart_mysqld.inc
|
||||
--remove_file $SEARCH_FILE
|
|
@ -32,7 +32,7 @@ BEGIN;
|
|||
insert into r1 values (5,5,5,5,5,5,5,5);
|
||||
update r1 set value1=value1+100 where id1=1 and id2=1 and id3='1';
|
||||
|
||||
--exec $MYSQL_DUMP --skip-comments --single-transaction --master-data=2 --print-ordering-key --rocksdb --order-by-primary-desc --rocksdb_bulk_load test
|
||||
--exec ASAN_OPTIONS="detect_leaks=0" $MYSQL_DUMP --skip-comments --single-transaction --master-data=2 --print-ordering-key --rocksdb --order-by-primary-desc --rocksdb_bulk_load test
|
||||
|
||||
rollback;
|
||||
|
||||
|
@ -44,16 +44,16 @@ source include/search_pattern_in_file.inc;
|
|||
|
||||
set @save_default_storage_engine=@@global.default_storage_engine;
|
||||
SET GLOBAL default_storage_engine=rocksdb;
|
||||
--exec $MYSQL_DUMP --skip-comments --single-transaction --master-data=2 --print-ordering-key test
|
||||
--exec ASAN_OPTIONS="detect_leaks=0" $MYSQL_DUMP --skip-comments --single-transaction --master-data=2 --print-ordering-key test
|
||||
source include/search_pattern_in_file.inc;
|
||||
|
||||
# Sanity test mysqldump when the --innodb-stats-on-metadata is specified (no effect)
|
||||
--echo ==== mysqldump with --innodb-stats-on-metadata ====
|
||||
--exec $MYSQL_DUMP --skip-comments --single-transaction --master-data=2 --print-ordering-key --innodb-stats-on-metadata test
|
||||
--exec ASAN_OPTIONS="detect_leaks=0" $MYSQL_DUMP --skip-comments --single-transaction --master-data=2 --print-ordering-key --innodb-stats-on-metadata test
|
||||
|
||||
# testing mysqldump work with statement based binary logging
|
||||
SET GLOBAL binlog_format=statement;
|
||||
--exec $MYSQL_DUMP --skip-comments --single-transaction --master-data=2 --print-ordering-key test > /dev/null
|
||||
--exec ASAN_OPTIONS="detect_leaks=0" $MYSQL_DUMP --skip-comments --single-transaction --master-data=2 --print-ordering-key test > /dev/null
|
||||
SET GLOBAL binlog_format=row;
|
||||
|
||||
drop table r1;
|
||||
|
|
|
@ -29,7 +29,7 @@ let $restart_file= $MYSQLTEST_VARDIR/tmp/mysqld.1.expect;
|
|||
|
||||
select variable_value into @a from information_schema.global_status where variable_name='rocksdb_block_cache_add';
|
||||
|
||||
--exec $MYSQL_DUMP --skip-comments --single-transaction --master-data=2 --print-ordering-key --rocksdb test > /dev/null
|
||||
--exec ASAN_OPTIONS="detect_leaks=0" $MYSQL_DUMP --skip-comments --single-transaction --master-data=2 --print-ordering-key --rocksdb test > /dev/null
|
||||
|
||||
# verifying block cache was not filled
|
||||
select case when variable_value - @a > 20 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_block_cache_add';
|
||||
|
|
|
@ -574,7 +574,6 @@ insert into t30 values
|
|||
('row3', 'row3-key', 'row3-data'),
|
||||
('row4', 'row4-key', 'row4-data'),
|
||||
('row5', 'row5-key', 'row5-data');
|
||||
analyze table t30;
|
||||
|
||||
--replace_column 9 #
|
||||
explain
|
||||
|
@ -786,11 +785,15 @@ drop table t45;
|
|||
--echo # Now it fails if there is data overlap with what
|
||||
--echo # already exists
|
||||
--echo #
|
||||
# We exclude rocksdb_max_open_files here because it value is dependent on
|
||||
# the value of the servers open_file_limit and is expected to be different
|
||||
# across distros and installs
|
||||
|
||||
--replace_regex /[a-f0-9]{40}/#/
|
||||
show variables
|
||||
where
|
||||
variable_name like 'rocksdb%' and
|
||||
variable_name not like 'rocksdb_max_open_files' and
|
||||
variable_name not like 'rocksdb_supported_compression_types';
|
||||
|
||||
create table t47 (pk int primary key, col1 varchar(12)) engine=rocksdb;
|
||||
|
|
14
storage/rocksdb/mysql-test/rocksdb/t/rocksdb_debug.test
Normal file
14
storage/rocksdb/mysql-test/rocksdb/t/rocksdb_debug.test
Normal file
|
@ -0,0 +1,14 @@
|
|||
--source include/have_rocksdb.inc
|
||||
--source include/have_debug.inc
|
||||
|
||||
--echo #
|
||||
--echo # Issue #728: Assertion `covers_key(b)' failed in int
|
||||
--echo # myrocks::Rdb_key_def::cmp_full_keys(const rocks db::Slice&,
|
||||
--echo # const rocksdb::Slice&)
|
||||
--echo #
|
||||
|
||||
CREATE TABLE t2(c1 TINYINT SIGNED KEY,c2 TINYINT UNSIGNED,c3 INT);
|
||||
INSERT INTO t2(c1)VALUES(0);
|
||||
SELECT * FROM t2 WHERE c1<=127 ORDER BY c1 DESC;
|
||||
DROP TABLE t2;
|
||||
|
|
@ -15,6 +15,7 @@ while ($i<10000)
|
|||
--enable_query_log
|
||||
analyze table t1;
|
||||
select count(*) from t1;
|
||||
--replace_column 9 #
|
||||
explain select c1 from t1 where c1 > 5 limit 10;
|
||||
drop table t1;
|
||||
|
||||
|
|
|
@ -1,39 +1,39 @@
|
|||
--source include/have_rocksdb.inc
|
||||
--source include/have_debug.inc
|
||||
|
||||
# Write file to make mysql-test-run.pl expect the "crash", but don't restart the
|
||||
# server until it is told to
|
||||
--let $_server_id= `SELECT @@server_id`
|
||||
--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect
|
||||
|
||||
CREATE TABLE t1 (pk int primary key) ENGINE=ROCKSDB;
|
||||
create table t1 (pk int primary key) engine=rocksdb;
|
||||
|
||||
# Create a .frm file without a matching table
|
||||
--exec cp $MYSQLTEST_VARDIR/mysqld.$_server_id/data/test/t1.frm $MYSQLTEST_VARDIR/mysqld.$_server_id/data/test/t1#sql-test.frm
|
||||
|
||||
# Restart the server with a .frm file exist but that table is not registered in RocksDB
|
||||
--exec echo "wait" >$_expect_file_name
|
||||
shutdown_server 10;
|
||||
--exec echo "restart" >$_expect_file_name
|
||||
--sleep 5
|
||||
--enable_reconnect
|
||||
--source include/wait_until_connected_again.inc
|
||||
--disable_reconnect
|
||||
--source include/restart_mysqld.inc
|
||||
|
||||
show tables;
|
||||
|
||||
# MariaDB produces a warning:
|
||||
call mtr.add_suppression('Invalid .old.. table or database name .t1#sql-test.');
|
||||
|
||||
# This will append '#sql-test' to the end of new name
|
||||
set session debug_dbug="+d,gen_sql_table_name";
|
||||
rename table t1 to t2;
|
||||
set session debug_dbug= "-d,gen_sql_table_name";
|
||||
|
||||
show tables;
|
||||
|
||||
# Remove the corresponding .frm files
|
||||
--remove_files_wildcard $MYSQLTEST_VARDIR/mysqld.$_server_id/data/test *t1*.frm
|
||||
--remove_files_wildcard $MYSQLTEST_VARDIR/mysqld.$_server_id/data/test *t2*.frm
|
||||
|
||||
# Restart the server with a table registered in RocksDB but does not have a .frm file
|
||||
--exec echo "wait" >$_expect_file_name
|
||||
shutdown_server 10;
|
||||
--exec echo "restart" >$_expect_file_name
|
||||
--sleep 5
|
||||
--enable_reconnect
|
||||
--source include/wait_until_connected_again.inc
|
||||
--disable_reconnect
|
||||
--source include/restart_mysqld.inc
|
||||
|
||||
show tables;
|
||||
|
||||
# try to recreate a table with the same name
|
||||
create table t2 (pk int primary key) engine=rocksdb;
|
||||
|
||||
show tables;
|
||||
|
||||
drop table t2;
|
||||
|
|
|
@ -78,22 +78,28 @@ INSERT INTO t1 values (7);
|
|||
set global rocksdb_debug_ttl_rec_ts = 0;
|
||||
|
||||
# should return nothing.
|
||||
select variable_value into @a from information_schema.global_status where variable_name='rocksdb_rows_filtered';
|
||||
--sorted_result
|
||||
SELECT * FROM t1;
|
||||
select variable_value-@a from information_schema.global_status where variable_name='rocksdb_rows_filtered';
|
||||
|
||||
# disable filtering
|
||||
set global rocksdb_enable_ttl_read_filtering=0;
|
||||
|
||||
# should return everything
|
||||
select variable_value into @a from information_schema.global_status where variable_name='rocksdb_rows_filtered';
|
||||
--sorted_result
|
||||
SELECT * FROM t1;
|
||||
select variable_value-@a from information_schema.global_status where variable_name='rocksdb_rows_filtered';
|
||||
|
||||
# disable filtering
|
||||
# enable filtering
|
||||
set global rocksdb_enable_ttl_read_filtering=1;
|
||||
|
||||
# should return nothing.
|
||||
select variable_value into @a from information_schema.global_status where variable_name='rocksdb_rows_filtered';
|
||||
--sorted_result
|
||||
SELECT * FROM t1;
|
||||
select variable_value-@a from information_schema.global_status where variable_name='rocksdb_rows_filtered';
|
||||
|
||||
DROP TABLE t1;
|
||||
|
||||
|
@ -286,28 +292,37 @@ SELECT * FROM t1; # <= shouldn't be filtered out here
|
|||
|
||||
--echo # Switching to connection 2
|
||||
connection con2;
|
||||
# compaction doesn't do anythign since con1 snapshot is still open
|
||||
# compaction doesn't do anything since con1 snapshot is still open
|
||||
set global rocksdb_force_flush_memtable_now=1;
|
||||
set global rocksdb_compact_cf='default';
|
||||
# read filtered out, because on a different connection, on
|
||||
# this connection the records have 'expired' already so they are filtered out
|
||||
# even though they have not yet been removed by compaction
|
||||
|
||||
select variable_value into @a from information_schema.global_status where variable_name='rocksdb_rows_filtered';
|
||||
--sorted_result
|
||||
SELECT * FROM t1;
|
||||
select variable_value-@a from information_schema.global_status where variable_name='rocksdb_rows_filtered';
|
||||
|
||||
--echo # Switching to connection 1
|
||||
connection con1;
|
||||
select variable_value into @a from information_schema.global_status where variable_name='rocksdb_rows_filtered';
|
||||
--sorted_result
|
||||
SELECT * FROM t1; # <= shouldn't be filtered out here
|
||||
select variable_value-@a from information_schema.global_status where variable_name='rocksdb_rows_filtered';
|
||||
|
||||
UPDATE t1 set a = a + 1;
|
||||
select variable_value into @a from information_schema.global_status where variable_name='rocksdb_rows_filtered';
|
||||
--sorted_result
|
||||
SELECT * FROM t1; # <= shouldn't be filtered out here
|
||||
select variable_value-@a from information_schema.global_status where variable_name='rocksdb_rows_filtered';
|
||||
|
||||
COMMIT;
|
||||
|
||||
select variable_value into @a from information_schema.global_status where variable_name='rocksdb_rows_filtered';
|
||||
--sorted_result # <= filtered out here because time has passed.
|
||||
SELECT * FROM t1;
|
||||
select variable_value-@a from information_schema.global_status where variable_name='rocksdb_rows_filtered';
|
||||
|
||||
DROP TABLE t1;
|
||||
disconnect con1;
|
||||
|
|
|
@ -1,9 +1,5 @@
|
|||
--source include/have_rocksdb.inc
|
||||
|
||||
--disable_warnings
|
||||
drop table if exists t1,t2;
|
||||
--enable_warnings
|
||||
|
||||
#
|
||||
# VARCHAR column types
|
||||
#
|
||||
|
@ -73,3 +69,14 @@ select 'email_i' as index_name, count(*) AS count from t force index(email_i);
|
|||
drop table t;
|
||||
set global rocksdb_checksums_pct = @save_rocksdb_checksums_pct;
|
||||
set session rocksdb_verify_row_debug_checksums = @save_rocksdb_verify_row_debug_checksums;
|
||||
|
||||
# Issue #784 - Skip trailing space bytes for non-unpackable fields
|
||||
|
||||
drop table if exists t;
|
||||
create table t (h varchar(31) character set utf8 collate utf8_bin not null, i varchar(19) collate latin1_bin not null, primary key(i), key(h)) engine=rocksdb;
|
||||
insert into t(i,h) values('a','b');
|
||||
check table t;
|
||||
alter table t modify h varchar(31) character set cp1257 collate cp1257_bin not null;
|
||||
check table t;
|
||||
drop table t;
|
||||
|
||||
|
|
|
@ -11,37 +11,51 @@ select plugin_name, plugin_type from information_schema.plugins where plugin_nam
|
|||
# caused an assertion in RocksDB. Now it should not be allowed and ROCKSDB
|
||||
# plugin will not load in such configuration.
|
||||
#
|
||||
# We want the server to still start, so we specify default-storage-engine=myisam
|
||||
--let LOG=$MYSQLTEST_VARDIR/tmp/use_direct_reads_writes.err
|
||||
--let SEARCH_FILE=$LOG
|
||||
|
||||
--let $_mysqld_option=--rocksdb_use_direct_reads=1 --rocksdb_allow_mmap_reads=1 --default-storage-engine=myisam
|
||||
--echo Checking direct reads
|
||||
--let $_mysqld_option=--log-error=$LOG --rocksdb_use_direct_reads=1 --rocksdb_allow_mmap_reads=1
|
||||
--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
|
||||
--source include/restart_mysqld_with_invalid_option.inc
|
||||
|
||||
--let SEARCH_PATTERN=enable both use_direct_reads
|
||||
--source include/search_pattern_in_file.inc
|
||||
--remove_file $LOG
|
||||
|
||||
|
||||
# Repeat with direct-writes
|
||||
--echo Checking direct writes
|
||||
--let $_mysqld_option=--log-error=$LOG --rocksdb_use_direct_io_for_flush_and_compaction=1 --rocksdb_allow_mmap_writes=1
|
||||
--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
|
||||
--source include/restart_mysqld_with_invalid_option.inc
|
||||
|
||||
--let SEARCH_PATTERN=enable both use_direct_io_for_flush_and_compaction
|
||||
--source include/search_pattern_in_file.inc
|
||||
--remove_file $LOG
|
||||
|
||||
|
||||
# Verify invalid direct-writes and --rocksdb_flush_log_at_trx_commit combination at startup fails
|
||||
--echo Checking rocksdb_flush_log_at_trx_commit
|
||||
--let $_mysqld_option=--log-error=$LOG --rocksdb_flush_log_at_trx_commit=1 --rocksdb_allow_mmap_writes=1
|
||||
--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
|
||||
--source include/restart_mysqld_with_invalid_option.inc
|
||||
|
||||
--let SEARCH_PATTERN=rocksdb_flush_log_at_trx_commit needs to be
|
||||
--source include/search_pattern_in_file.inc
|
||||
--remove_file $LOG
|
||||
|
||||
|
||||
# Verify rocksdb_flush_log_at_trx_commit cannot be changed if direct writes are used
|
||||
--echo Validate flush_log settings when direct writes is enabled
|
||||
--let $_mysqld_option=--rocksdb_flush_log_at_trx_commit=0 --rocksdb_allow_mmap_writes=1
|
||||
--source include/restart_mysqld_with_option.inc
|
||||
|
||||
--echo # Check that ROCKSDB plugin is not loaded:
|
||||
select plugin_name, plugin_type from information_schema.plugins where plugin_name='RocksDB';
|
||||
set global rocksdb_flush_log_at_trx_commit=0;
|
||||
--error ER_WRONG_VALUE_FOR_VAR
|
||||
set global rocksdb_flush_log_at_trx_commit=1;
|
||||
--error ER_WRONG_VALUE_FOR_VAR
|
||||
set global rocksdb_flush_log_at_trx_commit=2;
|
||||
|
||||
--echo # Check that MyRocks has printed an error message into server error log:
|
||||
let SEARCH_FILE=$MYSQLTEST_VARDIR/log/mysqld.1.err;
|
||||
let SEARCH_PATTERN=enable both use_direct_reads;
|
||||
source include/search_pattern_in_file.inc;
|
||||
|
||||
--echo # Now, restart the server back with regular settings
|
||||
# Cleanup
|
||||
--source include/restart_mysqld.inc
|
||||
select plugin_name, plugin_type from information_schema.plugins where plugin_name='RocksDB';
|
||||
|
||||
--echo #
|
||||
--echo # Now, repeat the same with another set of invalid arguments
|
||||
--echo #
|
||||
--let $_mysqld_option=--rocksdb_use_direct_io_for_flush_and_compaction=1 --rocksdb_allow_mmap_writes=1 --default-storage-engine=myisam
|
||||
--source include/restart_mysqld_with_option.inc
|
||||
|
||||
--echo # Check that ROCKSDB plugin is not loaded:
|
||||
select plugin_name, plugin_type from information_schema.plugins where plugin_name='RocksDB';
|
||||
|
||||
let SEARCH_FILE=$MYSQLTEST_VARDIR/log/mysqld.1.err;
|
||||
let SEARCH_PATTERN=enable both use_direct_io_for_flush_and_compaction;
|
||||
source include/search_pattern_in_file.inc;
|
||||
|
||||
--echo # Now, restart the server back with regular settings
|
||||
--source include/restart_mysqld.inc
|
||||
select plugin_name, plugin_type from information_schema.plugins where plugin_name='RocksDB';
|
||||
|
||||
|
|
|
@ -7,7 +7,8 @@ SET GLOBAL rocksdb_write_ignore_missing_column_families=true;
|
|||
create table aaa (id int primary key, i int) engine rocksdb;
|
||||
set @save_rocksdb_flush_log_at_trx_commit=@@global.rocksdb_flush_log_at_trx_commit;
|
||||
SET GLOBAL rocksdb_flush_log_at_trx_commit=1;
|
||||
--exec sleep 5
|
||||
insert aaa(id, i) values(0,1);
|
||||
|
||||
select variable_value into @a from information_schema.global_status where variable_name='rocksdb_wal_synced';
|
||||
insert aaa(id, i) values(1,1);
|
||||
select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced';
|
||||
|
@ -16,18 +17,16 @@ select variable_value-@a from information_schema.global_status where variable_na
|
|||
insert aaa(id, i) values(3,1);
|
||||
select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced';
|
||||
|
||||
SET GLOBAL rocksdb_flush_log_at_trx_commit=0;
|
||||
--exec sleep 5
|
||||
select variable_value into @a from information_schema.global_status where variable_name='rocksdb_wal_synced';
|
||||
SET GLOBAL rocksdb_flush_log_at_trx_commit=0;
|
||||
insert aaa(id, i) values(4,1);
|
||||
|
||||
let $status_var=rocksdb_wal_synced;
|
||||
let $status_var_value=`select @a+1`;
|
||||
source include/wait_for_status_var.inc;
|
||||
|
||||
SET GLOBAL rocksdb_flush_log_at_trx_commit=2;
|
||||
--exec sleep 5
|
||||
select variable_value into @a from information_schema.global_status where variable_name='rocksdb_wal_synced';
|
||||
SET GLOBAL rocksdb_flush_log_at_trx_commit=2;
|
||||
insert aaa(id, i) values(5,1);
|
||||
|
||||
let $status_var=rocksdb_wal_synced;
|
||||
|
|
|
@ -30,6 +30,7 @@ INSERT INTO t1 VALUES(1, 1);
|
|||
connection slave;
|
||||
--let $slave_sql_errno= 1062
|
||||
--let $not_switch_connection= 0
|
||||
--let $slave_timeout= 120
|
||||
--source include/wait_for_slave_sql_error_and_skip.inc
|
||||
set global reset_seconds_behind_master=0;
|
||||
--source include/stop_slave_io.inc
|
||||
|
|
|
@ -10,6 +10,7 @@ insert into r1 values (1, 1000);
|
|||
set global rocksdb_force_flush_memtable_now=1;
|
||||
include/rpl_start_server.inc [server_number=2]
|
||||
include/start_slave.inc
|
||||
insert into r1 values (2,2000);
|
||||
delete r1 from r1 force index (i) where id2=1000;
|
||||
select id1,id2 from r1 force index (primary) where id1=1 and id2=1000;
|
||||
id1 id2
|
||||
|
|
|
@ -62,6 +62,7 @@ SET GLOBAL SYNC_BINLOG = 1;
|
|||
|
||||
insert into t1 values (1000000, 1, "i_am_just_here_to_trigger_a_flush");
|
||||
|
||||
--error 0,2013
|
||||
SET DEBUG_SYNC='now SIGNAL go';
|
||||
--source include/wait_until_disconnected.inc
|
||||
--enable_reconnect
|
||||
|
|
|
@ -53,8 +53,14 @@ EOF
|
|||
|
||||
--source include/rpl_start_server.inc
|
||||
--source include/start_slave.inc
|
||||
|
||||
# Due to the binlogs being truncated, the slave may still think it's processed up to
|
||||
# the truncated binlog and select master_pos_wait() can return prematurely. Add
|
||||
# a new transaction to the master to force master_pos_wait() to wait.
|
||||
connection master;
|
||||
insert into r1 values (2,2000);
|
||||
sync_slave_with_master;
|
||||
|
||||
connection slave;
|
||||
delete r1 from r1 force index (i) where id2=1000;
|
||||
select id1,id2 from r1 force index (primary) where id1=1 and id2=1000;
|
||||
|
|
|
@ -9,6 +9,8 @@ zero_sum INT DEFAULT 0,
|
|||
msg VARCHAR(1024),
|
||||
msg_length int,
|
||||
msg_checksum varchar(128),
|
||||
auto_inc BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,
|
||||
KEY(auto_inc),
|
||||
KEY msg_i(msg(255), zero_sum))
|
||||
ENGINE=RocksDB DEFAULT CHARSET=latin1 COLLATE=latin1_bin;
|
||||
stop slave;
|
||||
|
|
|
@ -9,6 +9,8 @@ zero_sum INT DEFAULT 0,
|
|||
msg VARCHAR(1024),
|
||||
msg_length int,
|
||||
msg_checksum varchar(128),
|
||||
auto_inc BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,
|
||||
KEY(auto_inc),
|
||||
KEY msg_i(msg(255), zero_sum))
|
||||
ENGINE=RocksDB DEFAULT CHARSET=latin1 COLLATE=latin1_bin;
|
||||
stop slave;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue