fixes #188 Renamed the last brt artifacts to ft_handle. Tried to keep

comments etc sane by not dropping a big sed bomb. Hopefully things make
sense from here on out.
This commit is contained in:
John Esmet 2014-03-02 18:04:37 -05:00
parent 34dbe5ecb2
commit d037524b70
78 changed files with 1268 additions and 1270 deletions

View file

@ -427,7 +427,7 @@ toku_ft_unlock (FT ft) {
unlock_for_blocktable(bt); unlock_for_blocktable(bt);
} }
// Also used only in brt-serialize-test. // Also used only in ft-serialize-test.
void void
toku_block_free(BLOCK_TABLE bt, uint64_t offset) { toku_block_free(BLOCK_TABLE bt, uint64_t offset) {
lock_for_blocktable(bt); lock_for_blocktable(bt);

View file

@ -370,7 +370,7 @@ toku_cachetable_set_env_dir(CACHETABLE ct, const char *env_dir) {
// What cachefile goes with particular iname (iname relative to env)? // What cachefile goes with particular iname (iname relative to env)?
// The transaction that is adding the reference might not have a reference // The transaction that is adding the reference might not have a reference
// to the brt, therefore the cachefile might be closing. // to the ft, therefore the cachefile might be closing.
// If closing, we want to return that it is not there, but must wait till after // If closing, we want to return that it is not there, but must wait till after
// the close has finished. // the close has finished.
// Once the close has finished, there must not be a cachefile with that name // Once the close has finished, there must not be a cachefile with that name
@ -380,7 +380,7 @@ int toku_cachefile_of_iname_in_env (CACHETABLE ct, const char *iname_in_env, CAC
} }
// What cachefile goes with particular fd? // What cachefile goes with particular fd?
// This function can only be called if the brt is still open, so file must // This function can only be called if the ft is still open, so file must
// still be open // still be open
int toku_cachefile_of_filenum (CACHETABLE ct, FILENUM filenum, CACHEFILE *cf) { int toku_cachefile_of_filenum (CACHETABLE ct, FILENUM filenum, CACHEFILE *cf) {
return ct->cf_list.cachefile_of_filenum(filenum, cf); return ct->cf_list.cachefile_of_filenum(filenum, cf);
@ -642,7 +642,7 @@ static void cachetable_free_pair(PAIR p) {
cachetable_evictions++; cachetable_evictions++;
PAIR_ATTR new_attr = p->attr; PAIR_ATTR new_attr = p->attr;
// Note that flush_callback is called with write_me false, so the only purpose of this // Note that flush_callback is called with write_me false, so the only purpose of this
// call is to tell the brt layer to evict the node (keep_me is false). // call is to tell the ft layer to evict the node (keep_me is false).
// Also, because we have already removed the PAIR from the cachetable in // Also, because we have already removed the PAIR from the cachetable in
// cachetable_remove_pair, we cannot pass in p->cachefile and p->cachefile->fd // cachetable_remove_pair, we cannot pass in p->cachefile and p->cachefile->fd
// for the first two parameters, as these may be invalid (#5171), so, we // for the first two parameters, as these may be invalid (#5171), so, we

View file

@ -110,7 +110,7 @@ struct __attribute__((__packed__)) fifo_entry {
XIDS_S xids_s; XIDS_S xids_s;
}; };
// get and set the brt message type for a fifo entry. // get and set the ft message type for a fifo entry.
// it is internally stored as a single unsigned char. // it is internally stored as a single unsigned char.
static inline enum ft_msg_type static inline enum ft_msg_type
fifo_entry_get_msg_type(const struct fifo_entry * entry) fifo_entry_get_msg_type(const struct fifo_entry * entry)

View file

@ -202,7 +202,7 @@ toku_create_new_ftnode (
// //
int int
toku_pin_ftnode_for_query( toku_pin_ftnode_for_query(
FT_HANDLE brt, FT_HANDLE ft_handle,
BLOCKNUM blocknum, BLOCKNUM blocknum,
uint32_t fullhash, uint32_t fullhash,
UNLOCKERS unlockers, UNLOCKERS unlockers,
@ -226,12 +226,12 @@ toku_pin_ftnode_for_query(
} }
int r = toku_cachetable_get_and_pin_nonblocking( int r = toku_cachetable_get_and_pin_nonblocking(
brt->ft->cf, ft_handle->ft->cf,
blocknum, blocknum,
fullhash, fullhash,
&node_v, &node_v,
NULL, NULL,
get_write_callbacks_for_node(brt->ft), get_write_callbacks_for_node(ft_handle->ft),
toku_ftnode_fetch_callback, toku_ftnode_fetch_callback,
toku_ftnode_pf_req_callback, toku_ftnode_pf_req_callback,
toku_ftnode_pf_callback, toku_ftnode_pf_callback,
@ -245,7 +245,7 @@ toku_pin_ftnode_for_query(
node = static_cast<FTNODE>(node_v); node = static_cast<FTNODE>(node_v);
if (apply_ancestor_messages && node->height == 0) { if (apply_ancestor_messages && node->height == 0) {
needs_ancestors_messages = toku_ft_leaf_needs_ancestors_messages( needs_ancestors_messages = toku_ft_leaf_needs_ancestors_messages(
brt->ft, ft_handle->ft,
node, node,
ancestors, ancestors,
bounds, bounds,
@ -255,20 +255,20 @@ toku_pin_ftnode_for_query(
if (needs_ancestors_messages) { if (needs_ancestors_messages) {
toku::context apply_messages_ctx(CTX_MESSAGE_APPLICATION); toku::context apply_messages_ctx(CTX_MESSAGE_APPLICATION);
toku_unpin_ftnode_read_only(brt->ft, node); toku_unpin_ftnode_read_only(ft_handle->ft, node);
int rr = toku_cachetable_get_and_pin_nonblocking( int rr = toku_cachetable_get_and_pin_nonblocking(
brt->ft->cf, ft_handle->ft->cf,
blocknum, blocknum,
fullhash, fullhash,
&node_v, &node_v,
NULL, NULL,
get_write_callbacks_for_node(brt->ft), get_write_callbacks_for_node(ft_handle->ft),
toku_ftnode_fetch_callback, toku_ftnode_fetch_callback,
toku_ftnode_pf_req_callback, toku_ftnode_pf_req_callback,
toku_ftnode_pf_callback, toku_ftnode_pf_callback,
PL_WRITE_CHEAP, PL_WRITE_CHEAP,
bfe, //read_extraargs bfe, //read_extraargs
unlockers); unlockers);
if (rr != 0) { if (rr != 0) {
assert(rr == TOKUDB_TRY_AGAIN); // Any other error and we should bomb out ASAP. assert(rr == TOKUDB_TRY_AGAIN); // Any other error and we should bomb out ASAP.
r = TOKUDB_TRY_AGAIN; r = TOKUDB_TRY_AGAIN;
@ -276,7 +276,7 @@ toku_pin_ftnode_for_query(
} }
node = static_cast<FTNODE>(node_v); node = static_cast<FTNODE>(node_v);
toku_apply_ancestors_messages_to_node( toku_apply_ancestors_messages_to_node(
brt, ft_handle,
node, node,
ancestors, ancestors,
bounds, bounds,

View file

@ -141,7 +141,7 @@ toku_create_new_ftnode (
// This function returns a pinned ftnode to the caller. // This function returns a pinned ftnode to the caller.
int int
toku_pin_ftnode_for_query( toku_pin_ftnode_for_query(
FT_HANDLE brt, FT_HANDLE ft_h,
BLOCKNUM blocknum, BLOCKNUM blocknum,
uint32_t fullhash, uint32_t fullhash,
UNLOCKERS unlockers, UNLOCKERS unlockers,

View file

@ -104,7 +104,7 @@ PATENT RIGHTS GRANT:
*/ */
static FT_FLUSHER_STATUS_S ft_flusher_status; static FT_FLUSHER_STATUS_S ft_flusher_status;
#define STATUS_INIT(k,c,t,l,inc) TOKUDB_STATUS_INIT(ft_flusher_status, k, c, t, "brt flusher: " l, inc) #define STATUS_INIT(k,c,t,l,inc) TOKUDB_STATUS_INIT(ft_flusher_status, k, c, t, "ft flusher: " l, inc)
#define STATUS_VALUE(x) ft_flusher_status.status[x].value.num #define STATUS_VALUE(x) ft_flusher_status.status[x].value.num
void toku_ft_flusher_status_init(void) { void toku_ft_flusher_status_init(void) {
@ -1343,7 +1343,7 @@ maybe_merge_pinned_nodes(
// For nonleaf nodes, we distribute the children evenly. That may leave one or both of the nodes overfull, but that's OK. // For nonleaf nodes, we distribute the children evenly. That may leave one or both of the nodes overfull, but that's OK.
// If we distribute, we set *splitk to a malloced pivot key. // If we distribute, we set *splitk to a malloced pivot key.
// Parameters: // Parameters:
// t The BRT. // t The FT.
// parent The parent of the two nodes to be split. // parent The parent of the two nodes to be split.
// parent_splitk The pivot key between a and b. This is either free()'d or returned in *splitk. // parent_splitk The pivot key between a and b. This is either free()'d or returned in *splitk.
// a The first node to merge. // a The first node to merge.
@ -1591,7 +1591,6 @@ void toku_ft_flush_some_child(FT ft, FTNODE parent, struct flusher_advice *fa)
bool may_child_be_reactive = may_node_be_reactive(ft, child); bool may_child_be_reactive = may_node_be_reactive(ft, child);
paranoid_invariant(child->thisnodename.b!=0); paranoid_invariant(child->thisnodename.b!=0);
//VERIFY_NODE(brt, child);
// only do the following work if there is a flush to perform // only do the following work if there is a flush to perform
if (toku_bnc_n_entries(BNC(parent, childnum)) > 0 || parent->height == 1) { if (toku_bnc_n_entries(BNC(parent, childnum)) > 0 || parent->height == 1) {

View file

@ -124,7 +124,7 @@ typedef enum {
FT_FLUSHER_SPLIT_NONLEAF, // number of nonleaf nodes split FT_FLUSHER_SPLIT_NONLEAF, // number of nonleaf nodes split
FT_FLUSHER_MERGE_LEAF, // number of times leaf nodes are merged FT_FLUSHER_MERGE_LEAF, // number of times leaf nodes are merged
FT_FLUSHER_MERGE_NONLEAF, // number of times nonleaf nodes are merged FT_FLUSHER_MERGE_NONLEAF, // number of times nonleaf nodes are merged
FT_FLUSHER_BALANCE_LEAF, // number of times a leaf node is balanced inside brt FT_FLUSHER_BALANCE_LEAF, // number of times a leaf node is balanced
FT_FLUSHER_STATUS_NUM_ROWS FT_FLUSHER_STATUS_NUM_ROWS
} ft_flusher_status_entry; } ft_flusher_status_entry;
@ -230,7 +230,7 @@ void toku_ft_hot_get_status(FT_HOT_STATUS);
* we go until the end of the FT. * we go until the end of the FT.
*/ */
int int
toku_ft_hot_optimize(FT_HANDLE brt, DBT* left, DBT* right, toku_ft_hot_optimize(FT_HANDLE ft_h, DBT* left, DBT* right,
int (*progress_callback)(void *extra, float progress), int (*progress_callback)(void *extra, float progress),
void *progress_extra, uint64_t* loops_run); void *progress_extra, uint64_t* loops_run);

View file

@ -298,9 +298,9 @@ hot_flusher_destroy(struct hot_flusher_extra *flusher)
// Entry point for Hot Optimize Table (HOT). Note, this function is // Entry point for Hot Optimize Table (HOT). Note, this function is
// not recursive. It iterates over root-to-leaf paths. // not recursive. It iterates over root-to-leaf paths.
int int
toku_ft_hot_optimize(FT_HANDLE brt, DBT* left, DBT* right, toku_ft_hot_optimize(FT_HANDLE ft_handle, DBT* left, DBT* right,
int (*progress_callback)(void *extra, float progress), int (*progress_callback)(void *extra, float progress),
void *progress_extra, uint64_t* loops_run) void *progress_extra, uint64_t* loops_run)
{ {
toku::context flush_ctx(CTX_FLUSH); toku::context flush_ctx(CTX_FLUSH);
@ -316,7 +316,7 @@ toku_ft_hot_optimize(FT_HANDLE brt, DBT* left, DBT* right,
// start of HOT operation // start of HOT operation
(void) toku_sync_fetch_and_add(&STATUS_VALUE(FT_HOT_NUM_STARTED), 1); (void) toku_sync_fetch_and_add(&STATUS_VALUE(FT_HOT_NUM_STARTED), 1);
toku_ft_note_hot_begin(brt); toku_ft_note_hot_begin(ft_handle);
// Higher level logic prevents a dictionary from being deleted or // Higher level logic prevents a dictionary from being deleted or
// truncated during a hot optimize operation. Doing so would violate // truncated during a hot optimize operation. Doing so would violate
@ -329,10 +329,10 @@ toku_ft_hot_optimize(FT_HANDLE brt, DBT* left, DBT* right,
{ {
// Get root node (the first parent of each successive HOT // Get root node (the first parent of each successive HOT
// call.) // call.)
toku_calculate_root_offset_pointer(brt->ft, &root_key, &fullhash); toku_calculate_root_offset_pointer(ft_handle->ft, &root_key, &fullhash);
struct ftnode_fetch_extra bfe; struct ftnode_fetch_extra bfe;
fill_bfe_for_full_read(&bfe, brt->ft); fill_bfe_for_full_read(&bfe, ft_handle->ft);
toku_pin_ftnode(brt->ft, toku_pin_ftnode(ft_handle->ft,
(BLOCKNUM) root_key, (BLOCKNUM) root_key,
fullhash, fullhash,
&bfe, &bfe,
@ -364,12 +364,12 @@ toku_ft_hot_optimize(FT_HANDLE brt, DBT* left, DBT* right,
// This should recurse to the bottom of the tree and then // This should recurse to the bottom of the tree and then
// return. // return.
if (root->height > 0) { if (root->height > 0) {
toku_ft_flush_some_child(brt->ft, root, &advice); toku_ft_flush_some_child(ft_handle->ft, root, &advice);
} else { } else {
// Since there are no children to flush, we should abort // Since there are no children to flush, we should abort
// the HOT call. // the HOT call.
flusher.rightmost_leaf_seen = 1; flusher.rightmost_leaf_seen = 1;
toku_unpin_ftnode(brt->ft, root); toku_unpin_ftnode(ft_handle->ft, root);
} }
// Set the highest pivot key seen here, since the parent may // Set the highest pivot key seen here, since the parent may
@ -385,8 +385,8 @@ toku_ft_hot_optimize(FT_HANDLE brt, DBT* left, DBT* right,
else if (right) { else if (right) {
// if we have flushed past the bounds set for us, // if we have flushed past the bounds set for us,
// set rightmost_leaf_seen so we exit // set rightmost_leaf_seen so we exit
FAKE_DB(db, &brt->ft->cmp_descriptor); FAKE_DB(db, &ft_handle->ft->cmp_descriptor);
int cmp = brt->ft->compare_fun(&db, &flusher.max_current_key, right); int cmp = ft_handle->ft->compare_fun(&db, &flusher.max_current_key, right);
if (cmp > 0) { if (cmp > 0) {
flusher.rightmost_leaf_seen = 1; flusher.rightmost_leaf_seen = 1;
} }
@ -416,7 +416,7 @@ toku_ft_hot_optimize(FT_HANDLE brt, DBT* left, DBT* right,
if (r == 0) { success = true; } if (r == 0) { success = true; }
{ {
toku_ft_note_hot_complete(brt, success, msn_at_start_of_hot); toku_ft_note_hot_complete(ft_handle, success, msn_at_start_of_hot);
} }
if (success) { if (success) {

View file

@ -454,7 +454,7 @@ enum {
uint32_t compute_child_fullhash (CACHEFILE cf, FTNODE node, int childnum); uint32_t compute_child_fullhash (CACHEFILE cf, FTNODE node, int childnum);
// The brt_header is not managed by the cachetable. Instead, it hangs off the cachefile as userdata. // The ft_header is not managed by the cachetable. Instead, it hangs off the cachefile as userdata.
enum ft_type {FT_CURRENT=1, FT_CHECKPOINT_INPROGRESS}; enum ft_type {FT_CURRENT=1, FT_CHECKPOINT_INPROGRESS};
@ -470,7 +470,7 @@ struct ft_header {
// LSN of creation of "checkpoint-begin" record in log. // LSN of creation of "checkpoint-begin" record in log.
LSN checkpoint_lsn; LSN checkpoint_lsn;
// see brt_layout_version.h. maybe don't need this if we assume // see ft_layout_version.h. maybe don't need this if we assume
// it's always the current version after deserializing // it's always the current version after deserializing
const int layout_version; const int layout_version;
// different (<) from layout_version if upgraded from a previous // different (<) from layout_version if upgraded from a previous
@ -504,7 +504,7 @@ struct ft_header {
enum toku_compression_method compression_method; enum toku_compression_method compression_method;
unsigned int fanout; unsigned int fanout;
// Current Minimum MSN to be used when upgrading pre-MSN BRT's. // Current Minimum MSN to be used when upgrading pre-MSN FT's.
// This is decremented from our currnt MIN_MSN so as not to clash // This is decremented from our currnt MIN_MSN so as not to clash
// with any existing 'normal' MSN's. // with any existing 'normal' MSN's.
MSN highest_unused_msn_for_upgrade; MSN highest_unused_msn_for_upgrade;
@ -526,7 +526,7 @@ struct ft_header {
STAT64INFO_S on_disk_stats; STAT64INFO_S on_disk_stats;
}; };
// brt_header is always the current version. // ft_header is always the current version.
struct ft { struct ft {
FT_HEADER h; FT_HEADER h;
FT_HEADER checkpoint_header; FT_HEADER checkpoint_header;
@ -768,7 +768,7 @@ static inline CACHETABLE_WRITE_CALLBACK get_write_callbacks_for_node(FT h) {
static const FTNODE null_ftnode=0; static const FTNODE null_ftnode=0;
/* a brt cursor is represented as a kv pair in a tree */ /* an ft cursor is represented as a kv pair in a tree */
struct ft_cursor { struct ft_cursor {
struct toku_list cursors_link; struct toku_list cursors_link;
FT_HANDLE ft_handle; FT_HANDLE ft_handle;
@ -1018,12 +1018,12 @@ int toku_ftnode_hot_next_child(FTNODE node,
/* Stuff for testing */ /* Stuff for testing */
// toku_testsetup_initialize() must be called before any other test_setup_xxx() functions are called. // toku_testsetup_initialize() must be called before any other test_setup_xxx() functions are called.
void toku_testsetup_initialize(void); void toku_testsetup_initialize(void);
int toku_testsetup_leaf(FT_HANDLE brt, BLOCKNUM *blocknum, int n_children, char **keys, int *keylens); int toku_testsetup_leaf(FT_HANDLE ft_h, BLOCKNUM *blocknum, int n_children, char **keys, int *keylens);
int toku_testsetup_nonleaf (FT_HANDLE brt, int height, BLOCKNUM *diskoff, int n_children, BLOCKNUM *children, char **keys, int *keylens); int toku_testsetup_nonleaf (FT_HANDLE ft_h, int height, BLOCKNUM *diskoff, int n_children, BLOCKNUM *children, char **keys, int *keylens);
int toku_testsetup_root(FT_HANDLE brt, BLOCKNUM); int toku_testsetup_root(FT_HANDLE ft_h, BLOCKNUM);
int toku_testsetup_get_sersize(FT_HANDLE brt, BLOCKNUM); // Return the size on disk. int toku_testsetup_get_sersize(FT_HANDLE ft_h, BLOCKNUM); // Return the size on disk.
int toku_testsetup_insert_to_leaf (FT_HANDLE brt, BLOCKNUM, const char *key, int keylen, const char *val, int vallen); int toku_testsetup_insert_to_leaf (FT_HANDLE ft_h, BLOCKNUM, const char *key, int keylen, const char *val, int vallen);
int toku_testsetup_insert_to_nonleaf (FT_HANDLE brt, BLOCKNUM, enum ft_msg_type, const char *key, int keylen, const char *val, int vallen); int toku_testsetup_insert_to_nonleaf (FT_HANDLE ft_h, BLOCKNUM, enum ft_msg_type, const char *key, int keylen, const char *val, int vallen);
void toku_pin_node_with_min_bfe(FTNODE* node, BLOCKNUM b, FT_HANDLE t); void toku_pin_node_with_min_bfe(FTNODE* node, BLOCKNUM b, FT_HANDLE t);
void toku_ft_root_put_msg(FT h, FT_MSG msg, txn_gc_info *gc_info); void toku_ft_root_put_msg(FT h, FT_MSG msg, txn_gc_info *gc_info);
@ -1031,12 +1031,12 @@ void toku_ft_root_put_msg(FT h, FT_MSG msg, txn_gc_info *gc_info);
void void
toku_get_node_for_verify( toku_get_node_for_verify(
BLOCKNUM blocknum, BLOCKNUM blocknum,
FT_HANDLE brt, FT_HANDLE ft_h,
FTNODE* nodep FTNODE* nodep
); );
int int
toku_verify_ftnode (FT_HANDLE brt, toku_verify_ftnode (FT_HANDLE ft_h,
MSN rootmsn, MSN parentmsn, bool messages_exist_above, MSN rootmsn, MSN parentmsn, bool messages_exist_above,
FTNODE node, int height, FTNODE node, int height,
const DBT *lesser_pivot, // Everything in the subtree should be > lesser_pivot. (lesser_pivot==NULL if there is no lesser pivot.) const DBT *lesser_pivot, // Everything in the subtree should be > lesser_pivot. (lesser_pivot==NULL if there is no lesser pivot.)

View file

@ -235,7 +235,7 @@ static const uint32_t this_version = FT_LAYOUT_VERSION;
*/ */
static FT_STATUS_S ft_status; static FT_STATUS_S ft_status;
#define STATUS_INIT(k,c,t,l,inc) TOKUDB_STATUS_INIT(ft_status, k, c, t, "brt: " l, inc) #define STATUS_INIT(k,c,t,l,inc) TOKUDB_STATUS_INIT(ft_status, k, c, t, "ft: " l, inc)
static toku_mutex_t ft_open_close_lock; static toku_mutex_t ft_open_close_lock;
@ -731,14 +731,14 @@ toku_bfe_rightmost_child_wanted(struct ftnode_fetch_extra *bfe, FTNODE node)
} }
static int static int
ft_cursor_rightmost_child_wanted(FT_CURSOR cursor, FT_HANDLE brt, FTNODE node) ft_cursor_rightmost_child_wanted(FT_CURSOR cursor, FT_HANDLE ft_handle, FTNODE node)
{ {
if (cursor->right_is_pos_infty) { if (cursor->right_is_pos_infty) {
return node->n_children - 1; return node->n_children - 1;
} else if (cursor->range_lock_right_key.data == nullptr) { } else if (cursor->range_lock_right_key.data == nullptr) {
return -1; return -1;
} else { } else {
return toku_ftnode_which_child(node, &cursor->range_lock_right_key, &brt->ft->cmp_descriptor, brt->ft->compare_fun); return toku_ftnode_which_child(node, &cursor->range_lock_right_key, &ft_handle->ft->cmp_descriptor, ft_handle->ft->compare_fun);
} }
} }
@ -1261,7 +1261,7 @@ bool toku_ftnode_pf_req_callback(void* ftnode_pv, void* read_extraargs) {
FTNODE node = (FTNODE) ftnode_pv; FTNODE node = (FTNODE) ftnode_pv;
struct ftnode_fetch_extra *bfe = (struct ftnode_fetch_extra *) read_extraargs; struct ftnode_fetch_extra *bfe = (struct ftnode_fetch_extra *) read_extraargs;
// //
// The three types of fetches that the brt layer may request are: // The three types of fetches that the ft layer may request are:
// - ftnode_fetch_none: no partitions are necessary (example use: stat64) // - ftnode_fetch_none: no partitions are necessary (example use: stat64)
// - ftnode_fetch_subset: some subset is necessary (example use: toku_ft_search) // - ftnode_fetch_subset: some subset is necessary (example use: toku_ft_search)
// - ftnode_fetch_all: entire node is necessary (example use: flush, split, merge) // - ftnode_fetch_all: entire node is necessary (example use: flush, split, merge)
@ -1803,7 +1803,7 @@ struct setval_extra_s {
* If new_val == NULL, we send a delete message instead of an insert. * If new_val == NULL, we send a delete message instead of an insert.
* This happens here instead of in do_delete() for consistency. * This happens here instead of in do_delete() for consistency.
* setval_fun() is called from handlerton, passing in svextra_v * setval_fun() is called from handlerton, passing in svextra_v
* from setval_extra_s input arg to brt->update_fun(). * from setval_extra_s input arg to ft->update_fun().
*/ */
static void setval_fun (const DBT *new_val, void *svextra_v) { static void setval_fun (const DBT *new_val, void *svextra_v) {
struct setval_extra_s *CAST_FROM_VOIDP(svextra, svextra_v); struct setval_extra_s *CAST_FROM_VOIDP(svextra, svextra_v);
@ -1888,7 +1888,7 @@ static int do_update(ft_update_func update_fun, DESCRIPTOR desc, BASEMENTNODE bn
struct setval_extra_s setval_extra = {setval_tag, false, 0, bn, msg->msn, msg->xids, struct setval_extra_s setval_extra = {setval_tag, false, 0, bn, msg->msn, msg->xids,
keyp, idx, le_for_update, gc_info, keyp, idx, le_for_update, gc_info,
workdone, stats_to_update}; workdone, stats_to_update};
// call handlerton's brt->update_fun(), which passes setval_extra to setval_fun() // call handlerton's ft->update_fun(), which passes setval_extra to setval_fun()
FAKE_DB(db, desc); FAKE_DB(db, desc);
int r = update_fun( int r = update_fun(
&db, &db,
@ -3218,9 +3218,9 @@ void toku_ft_root_put_msg(
} }
} }
// Effect: Insert the key-val pair into brt. // Effect: Insert the key-val pair into ft.
void toku_ft_insert (FT_HANDLE brt, DBT *key, DBT *val, TOKUTXN txn) { void toku_ft_insert (FT_HANDLE ft_handle, DBT *key, DBT *val, TOKUTXN txn) {
toku_ft_maybe_insert(brt, key, val, txn, false, ZERO_LSN, true, FT_INSERT); toku_ft_maybe_insert(ft_handle, key, val, txn, false, ZERO_LSN, true, FT_INSERT);
} }
void toku_ft_load_recovery(TOKUTXN txn, FILENUM old_filenum, char const * new_iname, int do_fsync, int do_log, LSN *load_lsn) { void toku_ft_load_recovery(TOKUTXN txn, FILENUM old_filenum, char const * new_iname, int do_fsync, int do_log, LSN *load_lsn) {
@ -3291,31 +3291,31 @@ void toku_ft_optimize (FT_HANDLE ft_h) {
} }
} }
void toku_ft_load(FT_HANDLE brt, TOKUTXN txn, char const * new_iname, int do_fsync, LSN *load_lsn) { void toku_ft_load(FT_HANDLE ft_handle, TOKUTXN txn, char const * new_iname, int do_fsync, LSN *load_lsn) {
FILENUM old_filenum = toku_cachefile_filenum(brt->ft->cf); FILENUM old_filenum = toku_cachefile_filenum(ft_handle->ft->cf);
int do_log = 1; int do_log = 1;
toku_ft_load_recovery(txn, old_filenum, new_iname, do_fsync, do_log, load_lsn); toku_ft_load_recovery(txn, old_filenum, new_iname, do_fsync, do_log, load_lsn);
} }
// ft actions for logging hot index filenums // ft actions for logging hot index filenums
void toku_ft_hot_index(FT_HANDLE brt __attribute__ ((unused)), TOKUTXN txn, FILENUMS filenums, int do_fsync, LSN *lsn) { void toku_ft_hot_index(FT_HANDLE ft_handle __attribute__ ((unused)), TOKUTXN txn, FILENUMS filenums, int do_fsync, LSN *lsn) {
int do_log = 1; int do_log = 1;
toku_ft_hot_index_recovery(txn, filenums, do_fsync, do_log, lsn); toku_ft_hot_index_recovery(txn, filenums, do_fsync, do_log, lsn);
} }
void void
toku_ft_log_put (TOKUTXN txn, FT_HANDLE brt, const DBT *key, const DBT *val) { toku_ft_log_put (TOKUTXN txn, FT_HANDLE ft_handle, const DBT *key, const DBT *val) {
TOKULOGGER logger = toku_txn_logger(txn); TOKULOGGER logger = toku_txn_logger(txn);
if (logger) { if (logger) {
BYTESTRING keybs = {.len=key->size, .data=(char *) key->data}; BYTESTRING keybs = {.len=key->size, .data=(char *) key->data};
BYTESTRING valbs = {.len=val->size, .data=(char *) val->data}; BYTESTRING valbs = {.len=val->size, .data=(char *) val->data};
TXNID_PAIR xid = toku_txn_get_txnid(txn); TXNID_PAIR xid = toku_txn_get_txnid(txn);
toku_log_enq_insert(logger, (LSN*)0, 0, txn, toku_cachefile_filenum(brt->ft->cf), xid, keybs, valbs); toku_log_enq_insert(logger, (LSN*)0, 0, txn, toku_cachefile_filenum(ft_handle->ft->cf), xid, keybs, valbs);
} }
} }
void void
toku_ft_log_put_multiple (TOKUTXN txn, FT_HANDLE src_ft, FT_HANDLE *brts, uint32_t num_fts, const DBT *key, const DBT *val) { toku_ft_log_put_multiple (TOKUTXN txn, FT_HANDLE src_ft, FT_HANDLE *fts, uint32_t num_fts, const DBT *key, const DBT *val) {
assert(txn); assert(txn);
assert(num_fts > 0); assert(num_fts > 0);
TOKULOGGER logger = toku_txn_logger(txn); TOKULOGGER logger = toku_txn_logger(txn);
@ -3323,7 +3323,7 @@ toku_ft_log_put_multiple (TOKUTXN txn, FT_HANDLE src_ft, FT_HANDLE *brts, uint32
FILENUM fnums[num_fts]; FILENUM fnums[num_fts];
uint32_t i; uint32_t i;
for (i = 0; i < num_fts; i++) { for (i = 0; i < num_fts; i++) {
fnums[i] = toku_cachefile_filenum(brts[i]->ft->cf); fnums[i] = toku_cachefile_filenum(fts[i]->ft->cf);
} }
FILENUMS filenums = {.num = num_fts, .filenums = fnums}; FILENUMS filenums = {.num = num_fts, .filenums = fnums};
BYTESTRING keybs = {.len=key->size, .data=(char *) key->data}; BYTESTRING keybs = {.len=key->size, .data=(char *) key->data};
@ -3467,33 +3467,33 @@ void toku_ft_maybe_update_broadcast(FT_HANDLE ft_h, const DBT *update_function_e
} }
} }
void toku_ft_send_insert(FT_HANDLE brt, DBT *key, DBT *val, XIDS xids, enum ft_msg_type type, txn_gc_info *gc_info) { void toku_ft_send_insert(FT_HANDLE ft_handle, DBT *key, DBT *val, XIDS xids, enum ft_msg_type type, txn_gc_info *gc_info) {
FT_MSG_S ftmsg = { type, ZERO_MSN, xids, .u = { .id = { key, val } } }; FT_MSG_S ftmsg = { type, ZERO_MSN, xids, .u = { .id = { key, val } } };
toku_ft_root_put_msg(brt->ft, &ftmsg, gc_info); toku_ft_root_put_msg(ft_handle->ft, &ftmsg, gc_info);
} }
void toku_ft_send_commit_any(FT_HANDLE brt, DBT *key, XIDS xids, txn_gc_info *gc_info) { void toku_ft_send_commit_any(FT_HANDLE ft_handle, DBT *key, XIDS xids, txn_gc_info *gc_info) {
DBT val; DBT val;
FT_MSG_S ftmsg = { FT_COMMIT_ANY, ZERO_MSN, xids, .u = { .id = { key, toku_init_dbt(&val) } } }; FT_MSG_S ftmsg = { FT_COMMIT_ANY, ZERO_MSN, xids, .u = { .id = { key, toku_init_dbt(&val) } } };
toku_ft_root_put_msg(brt->ft, &ftmsg, gc_info); toku_ft_root_put_msg(ft_handle->ft, &ftmsg, gc_info);
} }
void toku_ft_delete(FT_HANDLE brt, DBT *key, TOKUTXN txn) { void toku_ft_delete(FT_HANDLE ft_handle, DBT *key, TOKUTXN txn) {
toku_ft_maybe_delete(brt, key, txn, false, ZERO_LSN, true); toku_ft_maybe_delete(ft_handle, key, txn, false, ZERO_LSN, true);
} }
void void
toku_ft_log_del(TOKUTXN txn, FT_HANDLE brt, const DBT *key) { toku_ft_log_del(TOKUTXN txn, FT_HANDLE ft_handle, const DBT *key) {
TOKULOGGER logger = toku_txn_logger(txn); TOKULOGGER logger = toku_txn_logger(txn);
if (logger) { if (logger) {
BYTESTRING keybs = {.len=key->size, .data=(char *) key->data}; BYTESTRING keybs = {.len=key->size, .data=(char *) key->data};
TXNID_PAIR xid = toku_txn_get_txnid(txn); TXNID_PAIR xid = toku_txn_get_txnid(txn);
toku_log_enq_delete_any(logger, (LSN*)0, 0, txn, toku_cachefile_filenum(brt->ft->cf), xid, keybs); toku_log_enq_delete_any(logger, (LSN*)0, 0, txn, toku_cachefile_filenum(ft_handle->ft->cf), xid, keybs);
} }
} }
void void
toku_ft_log_del_multiple (TOKUTXN txn, FT_HANDLE src_ft, FT_HANDLE *brts, uint32_t num_fts, const DBT *key, const DBT *val) { toku_ft_log_del_multiple (TOKUTXN txn, FT_HANDLE src_ft, FT_HANDLE *fts, uint32_t num_fts, const DBT *key, const DBT *val) {
assert(txn); assert(txn);
assert(num_fts > 0); assert(num_fts > 0);
TOKULOGGER logger = toku_txn_logger(txn); TOKULOGGER logger = toku_txn_logger(txn);
@ -3501,7 +3501,7 @@ toku_ft_log_del_multiple (TOKUTXN txn, FT_HANDLE src_ft, FT_HANDLE *brts, uint32
FILENUM fnums[num_fts]; FILENUM fnums[num_fts];
uint32_t i; uint32_t i;
for (i = 0; i < num_fts; i++) { for (i = 0; i < num_fts; i++) {
fnums[i] = toku_cachefile_filenum(brts[i]->ft->cf); fnums[i] = toku_cachefile_filenum(fts[i]->ft->cf);
} }
FILENUMS filenums = {.num = num_fts, .filenums = fnums}; FILENUMS filenums = {.num = num_fts, .filenums = fnums};
BYTESTRING keybs = {.len=key->size, .data=(char *) key->data}; BYTESTRING keybs = {.len=key->size, .data=(char *) key->data};
@ -3544,10 +3544,10 @@ void toku_ft_maybe_delete(FT_HANDLE ft_h, DBT *key, TOKUTXN txn, bool oplsn_vali
} }
} }
void toku_ft_send_delete(FT_HANDLE brt, DBT *key, XIDS xids, txn_gc_info *gc_info) { void toku_ft_send_delete(FT_HANDLE ft_handle, DBT *key, XIDS xids, txn_gc_info *gc_info) {
DBT val; toku_init_dbt(&val); DBT val; toku_init_dbt(&val);
FT_MSG_S ftmsg = { FT_DELETE_ANY, ZERO_MSN, xids, .u = { .id = { key, &val } } }; FT_MSG_S ftmsg = { FT_DELETE_ANY, ZERO_MSN, xids, .u = { .id = { key, &val } } };
toku_ft_root_put_msg(brt->ft, &ftmsg, gc_info); toku_ft_root_put_msg(ft_handle->ft, &ftmsg, gc_info);
} }
/* ******************** open,close and create ********************** */ /* ******************** open,close and create ********************** */
@ -3558,22 +3558,22 @@ int toku_open_ft_handle (const char *fname, int is_create, FT_HANDLE *ft_handle_
enum toku_compression_method compression_method, enum toku_compression_method compression_method,
CACHETABLE cachetable, TOKUTXN txn, CACHETABLE cachetable, TOKUTXN txn,
int (*compare_fun)(DB *, const DBT*,const DBT*)) { int (*compare_fun)(DB *, const DBT*,const DBT*)) {
FT_HANDLE brt; FT_HANDLE ft_handle;
const int only_create = 0; const int only_create = 0;
toku_ft_handle_create(&brt); toku_ft_handle_create(&ft_handle);
toku_ft_handle_set_nodesize(brt, nodesize); toku_ft_handle_set_nodesize(ft_handle, nodesize);
toku_ft_handle_set_basementnodesize(brt, basementnodesize); toku_ft_handle_set_basementnodesize(ft_handle, basementnodesize);
toku_ft_handle_set_compression_method(brt, compression_method); toku_ft_handle_set_compression_method(ft_handle, compression_method);
toku_ft_handle_set_fanout(brt, 16); toku_ft_handle_set_fanout(ft_handle, 16);
toku_ft_set_bt_compare(brt, compare_fun); toku_ft_set_bt_compare(ft_handle, compare_fun);
int r = toku_ft_handle_open(brt, fname, is_create, only_create, cachetable, txn); int r = toku_ft_handle_open(ft_handle, fname, is_create, only_create, cachetable, txn);
if (r != 0) { if (r != 0) {
return r; return r;
} }
*ft_handle_p = brt; *ft_handle_p = ft_handle;
return r; return r;
} }
@ -3591,9 +3591,9 @@ static inline int ft_open_maybe_direct(const char *filename, int oflag, int mode
} }
} }
// open a file for use by the brt // open a file for use by the ft
// Requires: File does not exist. // Requires: File does not exist.
static int ft_create_file(FT_HANDLE UU(brt), const char *fname, int *fdp) { static int ft_create_file(FT_HANDLE UU(ft_handle), const char *fname, int *fdp) {
mode_t mode = S_IRWXU|S_IRWXG|S_IRWXO; mode_t mode = S_IRWXU|S_IRWXG|S_IRWXO;
int r; int r;
int fd; int fd;
@ -3619,7 +3619,7 @@ static int ft_create_file(FT_HANDLE UU(brt), const char *fname, int *fdp) {
return r; return r;
} }
// open a file for use by the brt. if the file does not exist, error // open a file for use by the ft. if the file does not exist, error
static int ft_open_file(const char *fname, int *fdp) { static int ft_open_file(const char *fname, int *fdp) {
mode_t mode = S_IRWXU|S_IRWXG|S_IRWXO; mode_t mode = S_IRWXU|S_IRWXG|S_IRWXO;
int fd; int fd;
@ -3854,7 +3854,7 @@ ft_handle_open(FT_HANDLE ft_h, const char *fname_in_env, int is_create, int only
// important note here, // important note here,
// after this point, where we associate the header // after this point, where we associate the header
// with the brt, the function is not allowed to fail // with the ft_handle, the function is not allowed to fail
// Code that handles failure (located below "exit"), // Code that handles failure (located below "exit"),
// depends on this // depends on this
toku_ft_note_ft_handle_open(ft, ft_h); toku_ft_note_ft_handle_open(ft, ft_h);
@ -3863,7 +3863,7 @@ ft_handle_open(FT_HANDLE ft_h, const char *fname_in_env, int is_create, int only
toku_txn_maybe_note_ft(txn, ft); toku_txn_maybe_note_ft(txn, ft);
} }
//Opening a brt may restore to previous checkpoint. Truncate if necessary. //Opening an ft may restore to previous checkpoint. Truncate if necessary.
{ {
int fd = toku_cachefile_get_fd (ft->cf); int fd = toku_cachefile_get_fd (ft->cf);
toku_maybe_truncate_file_on_open(ft->blocktable, fd); toku_maybe_truncate_file_on_open(ft->blocktable, fd);
@ -3879,9 +3879,9 @@ exit:
// we only call toku_ft_note_ft_handle_open // we only call toku_ft_note_ft_handle_open
// when the function succeeds, so if we are here, // when the function succeeds, so if we are here,
// then that means we have a reference to the header // then that means we have a reference to the header
// but we have not linked it to this brt. So, // but we have not linked it to this ft. So,
// we can simply try to remove the header. // we can simply try to remove the header.
// We don't need to unlink this brt from the header // We don't need to unlink this ft from the header
toku_ft_grab_reflock(ft); toku_ft_grab_reflock(ft);
bool needed = toku_ft_needed_unlocked(ft); bool needed = toku_ft_needed_unlocked(ft);
toku_ft_release_reflock(ft); toku_ft_release_reflock(ft);
@ -3898,7 +3898,7 @@ exit:
return r; return r;
} }
// Open a brt for the purpose of recovery, which requires that the brt be open to a pre-determined FILENUM // Open an ft for the purpose of recovery, which requires that the ft be open to a pre-determined FILENUM
// and may require a specific checkpointed version of the file. // and may require a specific checkpointed version of the file.
// (dict_id is assigned by the ft_handle_open() function.) // (dict_id is assigned by the ft_handle_open() function.)
int int
@ -3910,7 +3910,7 @@ toku_ft_handle_open_recovery(FT_HANDLE t, const char *fname_in_env, int is_creat
return r; return r;
} }
// Open a brt in normal use. The FILENUM and dict_id are assigned by the ft_handle_open() function. // Open an ft in normal use. The FILENUM and dict_id are assigned by the ft_handle_open() function.
// Requires: The multi-operation client lock must be held to prevent a checkpoint from occuring. // Requires: The multi-operation client lock must be held to prevent a checkpoint from occuring.
int int
toku_ft_handle_open(FT_HANDLE t, const char *fname_in_env, int is_create, int only_create, CACHETABLE cachetable, TOKUTXN txn) { toku_ft_handle_open(FT_HANDLE t, const char *fname_in_env, int is_create, int only_create, CACHETABLE cachetable, TOKUTXN txn) {
@ -3945,7 +3945,7 @@ toku_ft_handle_clone(FT_HANDLE *cloned_ft_handle, FT_HANDLE ft_handle, TOKUTXN t
return r; return r;
} }
// Open a brt in normal use. The FILENUM and dict_id are assigned by the ft_handle_open() function. // Open an ft in normal use. The FILENUM and dict_id are assigned by the ft_handle_open() function.
int int
toku_ft_handle_open_with_dict_id( toku_ft_handle_open_with_dict_id(
FT_HANDLE t, FT_HANDLE t,
@ -3973,8 +3973,8 @@ toku_ft_handle_open_with_dict_id(
} }
DICTIONARY_ID DICTIONARY_ID
toku_ft_get_dictionary_id(FT_HANDLE brt) { toku_ft_get_dictionary_id(FT_HANDLE ft_handle) {
FT h = brt->ft; FT h = ft_handle->ft;
DICTIONARY_ID dict_id = h->dict_id; DICTIONARY_ID dict_id = h->dict_id;
return dict_id; return dict_id;
} }
@ -3989,7 +3989,7 @@ void toku_ft_get_flags(FT_HANDLE ft_handle, unsigned int *flags) {
} }
void toku_ft_get_maximum_advised_key_value_lengths (unsigned int *max_key_len, unsigned int *max_val_len) void toku_ft_get_maximum_advised_key_value_lengths (unsigned int *max_key_len, unsigned int *max_val_len)
// return the maximum advisable key value lengths. The brt doesn't enforce these. // return the maximum advisable key value lengths. The ft doesn't enforce these.
{ {
*max_key_len = 32*1024; *max_key_len = 32*1024;
*max_val_len = 32*1024*1024; *max_val_len = 32*1024*1024;
@ -4032,21 +4032,21 @@ void toku_ft_handle_get_basementnodesize(FT_HANDLE ft_handle, unsigned int *base
} }
} }
void toku_ft_set_bt_compare(FT_HANDLE brt, int (*bt_compare)(DB*, const DBT*, const DBT*)) { void toku_ft_set_bt_compare(FT_HANDLE ft_handle, int (*bt_compare)(DB*, const DBT*, const DBT*)) {
brt->options.compare_fun = bt_compare; ft_handle->options.compare_fun = bt_compare;
} }
void toku_ft_set_redirect_callback(FT_HANDLE brt, on_redirect_callback redir_cb, void* extra) { void toku_ft_set_redirect_callback(FT_HANDLE ft_handle, on_redirect_callback redir_cb, void* extra) {
brt->redirect_callback = redir_cb; ft_handle->redirect_callback = redir_cb;
brt->redirect_callback_extra = extra; ft_handle->redirect_callback_extra = extra;
} }
void toku_ft_set_update(FT_HANDLE brt, ft_update_func update_fun) { void toku_ft_set_update(FT_HANDLE ft_handle, ft_update_func update_fun) {
brt->options.update_fun = update_fun; ft_handle->options.update_fun = update_fun;
} }
ft_compare_func toku_ft_get_bt_compare (FT_HANDLE brt) { ft_compare_func toku_ft_get_bt_compare (FT_HANDLE ft_handle) {
return brt->options.compare_fun; return ft_handle->options.compare_fun;
} }
static void static void
@ -4088,18 +4088,18 @@ toku_close_ft_handle_nolsn (FT_HANDLE ft_handle, char** UU(error_string)) {
} }
void toku_ft_handle_create(FT_HANDLE *ft_handle_ptr) { void toku_ft_handle_create(FT_HANDLE *ft_handle_ptr) {
FT_HANDLE XMALLOC(brt); FT_HANDLE XMALLOC(ft_handle);
memset(brt, 0, sizeof *brt); memset(ft_handle, 0, sizeof *ft_handle);
toku_list_init(&brt->live_ft_handle_link); toku_list_init(&ft_handle->live_ft_handle_link);
brt->options.flags = 0; ft_handle->options.flags = 0;
brt->did_set_flags = false; ft_handle->did_set_flags = false;
brt->options.nodesize = FT_DEFAULT_NODE_SIZE; ft_handle->options.nodesize = FT_DEFAULT_NODE_SIZE;
brt->options.basementnodesize = FT_DEFAULT_BASEMENT_NODE_SIZE; ft_handle->options.basementnodesize = FT_DEFAULT_BASEMENT_NODE_SIZE;
brt->options.compression_method = TOKU_DEFAULT_COMPRESSION_METHOD; ft_handle->options.compression_method = TOKU_DEFAULT_COMPRESSION_METHOD;
brt->options.fanout = FT_DEFAULT_FANOUT; ft_handle->options.fanout = FT_DEFAULT_FANOUT;
brt->options.compare_fun = toku_builtin_compare_fun; ft_handle->options.compare_fun = toku_builtin_compare_fun;
brt->options.update_fun = NULL; ft_handle->options.update_fun = NULL;
*ft_handle_ptr = brt; *ft_handle_ptr = ft_handle;
} }
/* ************* CURSORS ********************* */ /* ************* CURSORS ********************* */
@ -4163,7 +4163,7 @@ ft_cursor_extract_val(LEAFENTRY le,
} }
int toku_ft_cursor ( int toku_ft_cursor (
FT_HANDLE brt, FT_HANDLE ft_handle,
FT_CURSOR *cursorptr, FT_CURSOR *cursorptr,
TOKUTXN ttxn, TOKUTXN ttxn,
bool is_snapshot_read, bool is_snapshot_read,
@ -4172,14 +4172,14 @@ int toku_ft_cursor (
{ {
if (is_snapshot_read) { if (is_snapshot_read) {
invariant(ttxn != NULL); invariant(ttxn != NULL);
int accepted = does_txn_read_entry(brt->ft->h->root_xid_that_created, ttxn); int accepted = does_txn_read_entry(ft_handle->ft->h->root_xid_that_created, ttxn);
if (accepted!=TOKUDB_ACCEPT) { if (accepted!=TOKUDB_ACCEPT) {
invariant(accepted==0); invariant(accepted==0);
return TOKUDB_MVCC_DICTIONARY_TOO_NEW; return TOKUDB_MVCC_DICTIONARY_TOO_NEW;
} }
} }
FT_CURSOR XCALLOC(cursor); FT_CURSOR XCALLOC(cursor);
cursor->ft_handle = brt; cursor->ft_handle = ft_handle;
cursor->prefetching = false; cursor->prefetching = false;
toku_init_dbt(&cursor->range_lock_left_key); toku_init_dbt(&cursor->range_lock_left_key);
toku_init_dbt(&cursor->range_lock_right_key); toku_init_dbt(&cursor->range_lock_right_key);
@ -5051,7 +5051,7 @@ got_a_good_value:
static int static int
ft_search_node ( ft_search_node (
FT_HANDLE brt, FT_HANDLE ft_handle,
FTNODE node, FTNODE node,
ft_search_t *search, ft_search_t *search,
int child_to_search, int child_to_search,
@ -5086,25 +5086,25 @@ ftnode_pf_callback_and_free_bfe(void *ftnode_pv, void* disk_data, void *read_ext
} }
static void static void
ft_node_maybe_prefetch(FT_HANDLE brt, FTNODE node, int childnum, FT_CURSOR ftcursor, bool *doprefetch) { ft_node_maybe_prefetch(FT_HANDLE ft_handle, FTNODE node, int childnum, FT_CURSOR ftcursor, bool *doprefetch) {
// the number of nodes to prefetch // the number of nodes to prefetch
const int num_nodes_to_prefetch = 1; const int num_nodes_to_prefetch = 1;
// if we want to prefetch in the tree // if we want to prefetch in the tree
// then prefetch the next children if there are any // then prefetch the next children if there are any
if (*doprefetch && ft_cursor_prefetching(ftcursor) && !ftcursor->disable_prefetching) { if (*doprefetch && ft_cursor_prefetching(ftcursor) && !ftcursor->disable_prefetching) {
int rc = ft_cursor_rightmost_child_wanted(ftcursor, brt, node); int rc = ft_cursor_rightmost_child_wanted(ftcursor, ft_handle, node);
for (int i = childnum + 1; (i <= childnum + num_nodes_to_prefetch) && (i <= rc); i++) { for (int i = childnum + 1; (i <= childnum + num_nodes_to_prefetch) && (i <= rc); i++) {
BLOCKNUM nextchildblocknum = BP_BLOCKNUM(node, i); BLOCKNUM nextchildblocknum = BP_BLOCKNUM(node, i);
uint32_t nextfullhash = compute_child_fullhash(brt->ft->cf, node, i); uint32_t nextfullhash = compute_child_fullhash(ft_handle->ft->cf, node, i);
struct ftnode_fetch_extra *MALLOC(bfe); struct ftnode_fetch_extra *MALLOC(bfe);
fill_bfe_for_prefetch(bfe, brt->ft, ftcursor); fill_bfe_for_prefetch(bfe, ft_handle->ft, ftcursor);
bool doing_prefetch = false; bool doing_prefetch = false;
toku_cachefile_prefetch( toku_cachefile_prefetch(
brt->ft->cf, ft_handle->ft->cf,
nextchildblocknum, nextchildblocknum,
nextfullhash, nextfullhash,
get_write_callbacks_for_node(brt->ft), get_write_callbacks_for_node(ft_handle->ft),
ftnode_fetch_callback_and_free_bfe, ftnode_fetch_callback_and_free_bfe,
toku_ftnode_pf_req_callback, toku_ftnode_pf_req_callback,
ftnode_pf_callback_and_free_bfe, ftnode_pf_callback_and_free_bfe,
@ -5130,11 +5130,11 @@ static void
unlock_ftnode_fun (void *v) { unlock_ftnode_fun (void *v) {
struct unlock_ftnode_extra *x = NULL; struct unlock_ftnode_extra *x = NULL;
CAST_FROM_VOIDP(x, v); CAST_FROM_VOIDP(x, v);
FT_HANDLE brt = x->ft_handle; FT_HANDLE ft_handle = x->ft_handle;
FTNODE node = x->node; FTNODE node = x->node;
// CT lock is held // CT lock is held
int r = toku_cachetable_unpin_ct_prelocked_no_flush( int r = toku_cachetable_unpin_ct_prelocked_no_flush(
brt->ft->cf, ft_handle->ft->cf,
node->ct_pair, node->ct_pair,
(enum cachetable_dirty) node->dirty, (enum cachetable_dirty) node->dirty,
x->msgs_applied ? make_ftnode_pair_attr(node) : make_invalid_pair_attr() x->msgs_applied ? make_ftnode_pair_attr(node) : make_invalid_pair_attr()
@ -5144,14 +5144,14 @@ unlock_ftnode_fun (void *v) {
/* search in a node's child */ /* search in a node's child */
static int static int
ft_search_child(FT_HANDLE brt, FTNODE node, int childnum, ft_search_t *search, FT_GET_CALLBACK_FUNCTION getf, void *getf_v, bool *doprefetch, FT_CURSOR ftcursor, UNLOCKERS unlockers, ft_search_child(FT_HANDLE ft_handle, FTNODE node, int childnum, ft_search_t *search, FT_GET_CALLBACK_FUNCTION getf, void *getf_v, bool *doprefetch, FT_CURSOR ftcursor, UNLOCKERS unlockers,
ANCESTORS ancestors, struct pivot_bounds const * const bounds, bool can_bulk_fetch) ANCESTORS ancestors, struct pivot_bounds const * const bounds, bool can_bulk_fetch)
// Effect: Search in a node's child. Searches are read-only now (at least as far as the hardcopy is concerned). // Effect: Search in a node's child. Searches are read-only now (at least as far as the hardcopy is concerned).
{ {
struct ancestors next_ancestors = {node, childnum, ancestors}; struct ancestors next_ancestors = {node, childnum, ancestors};
BLOCKNUM childblocknum = BP_BLOCKNUM(node,childnum); BLOCKNUM childblocknum = BP_BLOCKNUM(node,childnum);
uint32_t fullhash = compute_child_fullhash(brt->ft->cf, node, childnum); uint32_t fullhash = compute_child_fullhash(ft_handle->ft->cf, node, childnum);
FTNODE childnode = nullptr; FTNODE childnode = nullptr;
// If the current node's height is greater than 1, then its child is an internal node. // If the current node's height is greater than 1, then its child is an internal node.
@ -5160,7 +5160,7 @@ ft_search_child(FT_HANDLE brt, FTNODE node, int childnum, ft_search_t *search, F
struct ftnode_fetch_extra bfe; struct ftnode_fetch_extra bfe;
fill_bfe_for_subset_read( fill_bfe_for_subset_read(
&bfe, &bfe,
brt->ft, ft_handle->ft,
search, search,
&ftcursor->range_lock_left_key, &ftcursor->range_lock_left_key,
&ftcursor->range_lock_right_key, &ftcursor->range_lock_right_key,
@ -5171,7 +5171,7 @@ ft_search_child(FT_HANDLE brt, FTNODE node, int childnum, ft_search_t *search, F
); );
bool msgs_applied = false; bool msgs_applied = false;
{ {
int rr = toku_pin_ftnode_for_query(brt, childblocknum, fullhash, int rr = toku_pin_ftnode_for_query(ft_handle, childblocknum, fullhash,
unlockers, unlockers,
&next_ancestors, bounds, &next_ancestors, bounds,
&bfe, &bfe,
@ -5184,22 +5184,21 @@ ft_search_child(FT_HANDLE brt, FTNODE node, int childnum, ft_search_t *search, F
invariant_zero(rr); invariant_zero(rr);
} }
struct unlock_ftnode_extra unlock_extra = {brt,childnode,msgs_applied}; struct unlock_ftnode_extra unlock_extra = { ft_handle, childnode, msgs_applied };
struct unlockers next_unlockers = {true, unlock_ftnode_fun, (void*)&unlock_extra, unlockers}; struct unlockers next_unlockers = { true, unlock_ftnode_fun, (void *) &unlock_extra, unlockers };
int r = ft_search_node(ft_handle, childnode, search, bfe.child_to_read, getf, getf_v, doprefetch, ftcursor, &next_unlockers, &next_ancestors, bounds, can_bulk_fetch);
int r = ft_search_node(brt, childnode, search, bfe.child_to_read, getf, getf_v, doprefetch, ftcursor, &next_unlockers, &next_ancestors, bounds, can_bulk_fetch);
if (r!=TOKUDB_TRY_AGAIN) { if (r!=TOKUDB_TRY_AGAIN) {
// maybe prefetch the next child // maybe prefetch the next child
if (r == 0 && node->height == 1) { if (r == 0 && node->height == 1) {
ft_node_maybe_prefetch(brt, node, childnum, ftcursor, doprefetch); ft_node_maybe_prefetch(ft_handle, node, childnum, ftcursor, doprefetch);
} }
assert(next_unlockers.locked); assert(next_unlockers.locked);
if (msgs_applied) { if (msgs_applied) {
toku_unpin_ftnode(brt->ft, childnode); toku_unpin_ftnode(ft_handle->ft, childnode);
} }
else { else {
toku_unpin_ftnode_read_only(brt->ft, childnode); toku_unpin_ftnode_read_only(ft_handle->ft, childnode);
} }
} else { } else {
// try again. // try again.
@ -5212,10 +5211,10 @@ ft_search_child(FT_HANDLE brt, FTNODE node, int childnum, ft_search_t *search, F
// the node was not unpinned, so we unpin it here // the node was not unpinned, so we unpin it here
if (next_unlockers.locked) { if (next_unlockers.locked) {
if (msgs_applied) { if (msgs_applied) {
toku_unpin_ftnode(brt->ft, childnode); toku_unpin_ftnode(ft_handle->ft, childnode);
} }
else { else {
toku_unpin_ftnode_read_only(brt->ft, childnode); toku_unpin_ftnode_read_only(ft_handle->ft, childnode);
} }
} }
} }
@ -5309,7 +5308,7 @@ maybe_search_save_bound(
static int static int
ft_search_node( ft_search_node(
FT_HANDLE brt, FT_HANDLE ft_handle,
FTNODE node, FTNODE node,
ft_search_t *search, ft_search_t *search,
int child_to_search, int child_to_search,
@ -5334,7 +5333,7 @@ ft_search_node(
const struct pivot_bounds next_bounds = next_pivot_keys(node, child_to_search, bounds); const struct pivot_bounds next_bounds = next_pivot_keys(node, child_to_search, bounds);
if (node->height > 0) { if (node->height > 0) {
r = ft_search_child( r = ft_search_child(
brt, ft_handle,
node, node,
child_to_search, child_to_search,
search, search,
@ -5407,13 +5406,13 @@ ft_search_node(
} }
static int static int
toku_ft_search (FT_HANDLE brt, ft_search_t *search, FT_GET_CALLBACK_FUNCTION getf, void *getf_v, FT_CURSOR ftcursor, bool can_bulk_fetch) toku_ft_search (FT_HANDLE ft_handle, ft_search_t *search, FT_GET_CALLBACK_FUNCTION getf, void *getf_v, FT_CURSOR ftcursor, bool can_bulk_fetch)
// Effect: Perform a search. Associate cursor with a leaf if possible. // Effect: Perform a search. Associate cursor with a leaf if possible.
// All searches are performed through this function. // All searches are performed through this function.
{ {
int r; int r;
uint trycount = 0; // How many tries did it take to get the result? uint trycount = 0; // How many tries did it take to get the result?
FT ft = brt->ft; FT ft = ft_handle->ft;
toku::context search_ctx(CTX_SEARCH); toku::context search_ctx(CTX_SEARCH);
@ -5478,13 +5477,13 @@ try_again:
uint tree_height = node->height + 1; // How high is the tree? This is the height of the root node plus one (leaf is at height 0). uint tree_height = node->height + 1; // How high is the tree? This is the height of the root node plus one (leaf is at height 0).
struct unlock_ftnode_extra unlock_extra = {brt,node,false}; struct unlock_ftnode_extra unlock_extra = {ft_handle,node,false};
struct unlockers unlockers = {true, unlock_ftnode_fun, (void*)&unlock_extra, (UNLOCKERS)NULL}; struct unlockers unlockers = {true, unlock_ftnode_fun, (void*)&unlock_extra, (UNLOCKERS)NULL};
{ {
bool doprefetch = false; bool doprefetch = false;
//static int counter = 0; counter++; //static int counter = 0; counter++;
r = ft_search_node(brt, node, search, bfe.child_to_read, getf, getf_v, &doprefetch, ftcursor, &unlockers, (ANCESTORS)NULL, &infinite_bounds, can_bulk_fetch); r = ft_search_node(ft_handle, node, search, bfe.child_to_read, getf, getf_v, &doprefetch, ftcursor, &unlockers, (ANCESTORS)NULL, &infinite_bounds, can_bulk_fetch);
if (r==TOKUDB_TRY_AGAIN) { if (r==TOKUDB_TRY_AGAIN) {
// there are two cases where we get TOKUDB_TRY_AGAIN // there are two cases where we get TOKUDB_TRY_AGAIN
// case 1 is when some later call to toku_pin_ftnode returned // case 1 is when some later call to toku_pin_ftnode returned
@ -5493,7 +5492,7 @@ try_again:
// some piece of a node that it needed was not in memory. // some piece of a node that it needed was not in memory.
// In this case, the node was not unpinned, so we unpin it here // In this case, the node was not unpinned, so we unpin it here
if (unlockers.locked) { if (unlockers.locked) {
toku_unpin_ftnode_read_only(brt->ft, node); toku_unpin_ftnode_read_only(ft_handle->ft, node);
} }
goto try_again; goto try_again;
} else { } else {
@ -5502,7 +5501,7 @@ try_again:
} }
assert(unlockers.locked); assert(unlockers.locked);
toku_unpin_ftnode_read_only(brt->ft, node); toku_unpin_ftnode_read_only(ft_handle->ft, node);
//Heaviside function (+direction) queries define only a lower or upper //Heaviside function (+direction) queries define only a lower or upper
@ -5553,9 +5552,9 @@ ft_cursor_search(FT_CURSOR cursor, ft_search_t *search, FT_GET_CALLBACK_FUNCTION
return r; return r;
} }
static inline int compare_k_x(FT_HANDLE brt, const DBT *k, const DBT *x) { static inline int compare_k_x(FT_HANDLE ft_handle, const DBT *k, const DBT *x) {
FAKE_DB(db, &brt->ft->cmp_descriptor); FAKE_DB(db, &ft_handle->ft->cmp_descriptor);
return brt->ft->compare_fun(&db, k, x); return ft_handle->ft->compare_fun(&db, k, x);
} }
static int static int
@ -5565,8 +5564,8 @@ ft_cursor_compare_one(const ft_search_t &search __attribute__((__unused__)), con
} }
static int ft_cursor_compare_set(const ft_search_t &search, const DBT *x) { static int ft_cursor_compare_set(const ft_search_t &search, const DBT *x) {
FT_HANDLE CAST_FROM_VOIDP(brt, search.context); FT_HANDLE CAST_FROM_VOIDP(ft_handle, search.context);
return compare_k_x(brt, search.k, x) <= 0; /* return min xy: kv <= xy */ return compare_k_x(ft_handle, search.k, x) <= 0; /* return min xy: kv <= xy */
} }
static int static int
@ -5628,8 +5627,8 @@ toku_ft_cursor_last(FT_CURSOR cursor, FT_GET_CALLBACK_FUNCTION getf, void *getf_
} }
static int ft_cursor_compare_next(const ft_search_t &search, const DBT *x) { static int ft_cursor_compare_next(const ft_search_t &search, const DBT *x) {
FT_HANDLE CAST_FROM_VOIDP(brt, search.context); FT_HANDLE CAST_FROM_VOIDP(ft_handle, search.context);
return compare_k_x(brt, search.k, x) < 0; /* return min xy: kv < xy */ return compare_k_x(ft_handle, search.k, x) < 0; /* return min xy: kv < xy */
} }
static int static int
@ -5736,8 +5735,8 @@ ft_cursor_search_eq_k_x(FT_CURSOR cursor, ft_search_t *search, FT_GET_CALLBACK_F
} }
static int ft_cursor_compare_prev(const ft_search_t &search, const DBT *x) { static int ft_cursor_compare_prev(const ft_search_t &search, const DBT *x) {
FT_HANDLE CAST_FROM_VOIDP(brt, search.context); FT_HANDLE CAST_FROM_VOIDP(ft_handle, search.context);
return compare_k_x(brt, search.k, x) > 0; /* return max xy: kv > xy */ return compare_k_x(ft_handle, search.k, x) > 0; /* return max xy: kv > xy */
} }
int int
@ -5751,8 +5750,8 @@ toku_ft_cursor_prev(FT_CURSOR cursor, FT_GET_CALLBACK_FUNCTION getf, void *getf_
} }
static int ft_cursor_compare_set_range(const ft_search_t &search, const DBT *x) { static int ft_cursor_compare_set_range(const ft_search_t &search, const DBT *x) {
FT_HANDLE CAST_FROM_VOIDP(brt, search.context); FT_HANDLE CAST_FROM_VOIDP(ft_handle, search.context);
return compare_k_x(brt, search.k, x) <= 0; /* return kv <= xy */ return compare_k_x(ft_handle, search.k, x) <= 0; /* return kv <= xy */
} }
int int
@ -5776,8 +5775,8 @@ toku_ft_cursor_set_range(FT_CURSOR cursor, DBT *key, FT_GET_CALLBACK_FUNCTION ge
} }
static int ft_cursor_compare_set_range_reverse(const ft_search_t &search, const DBT *x) { static int ft_cursor_compare_set_range_reverse(const ft_search_t &search, const DBT *x) {
FT_HANDLE CAST_FROM_VOIDP(brt, search.context); FT_HANDLE CAST_FROM_VOIDP(ft_handle, search.context);
return compare_k_x(brt, search.k, x) >= 0; /* return kv >= xy */ return compare_k_x(ft_handle, search.k, x) >= 0; /* return kv >= xy */
} }
int int
@ -5856,12 +5855,12 @@ bool toku_ft_cursor_uninitialized(FT_CURSOR c) {
/* ********************************* lookup **************************************/ /* ********************************* lookup **************************************/
int int
toku_ft_lookup (FT_HANDLE brt, DBT *k, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) toku_ft_lookup (FT_HANDLE ft_handle, DBT *k, FT_GET_CALLBACK_FUNCTION getf, void *getf_v)
{ {
int r, rr; int r, rr;
FT_CURSOR cursor; FT_CURSOR cursor;
rr = toku_ft_cursor(brt, &cursor, NULL, false, false); rr = toku_ft_cursor(ft_handle, &cursor, NULL, false, false);
if (rr != 0) return rr; if (rr != 0) return rr;
int op = DB_SET; int op = DB_SET;
@ -5915,7 +5914,7 @@ keyrange_compare (DBT const &kdbt, const struct keyrange_compare_s &s) {
} }
static void static void
keysrange_in_leaf_partition (FT_HANDLE brt, FTNODE node, keysrange_in_leaf_partition (FT_HANDLE ft_handle, FTNODE node,
DBT* key_left, DBT* key_right, DBT* key_left, DBT* key_right,
int left_child_number, int right_child_number, uint64_t estimated_num_rows, int left_child_number, int right_child_number, uint64_t estimated_num_rows,
uint64_t *less, uint64_t* equal_left, uint64_t* middle, uint64_t *less, uint64_t* equal_left, uint64_t* middle,
@ -5932,7 +5931,7 @@ keysrange_in_leaf_partition (FT_HANDLE brt, FTNODE node,
if (BP_STATE(node, left_child_number) == PT_AVAIL) { if (BP_STATE(node, left_child_number) == PT_AVAIL) {
int r; int r;
// The partition is in main memory then get an exact count. // The partition is in main memory then get an exact count.
struct keyrange_compare_s s_left = {brt->ft, key_left}; struct keyrange_compare_s s_left = {ft_handle->ft, key_left};
BASEMENTNODE bn = BLB(node, left_child_number); BASEMENTNODE bn = BLB(node, left_child_number);
uint32_t idx_left = 0; uint32_t idx_left = 0;
// if key_left is NULL then set r==-1 and idx==0. // if key_left is NULL then set r==-1 and idx==0.
@ -5944,7 +5943,7 @@ keysrange_in_leaf_partition (FT_HANDLE brt, FTNODE node,
uint32_t idx_right = size; uint32_t idx_right = size;
r = -1; r = -1;
if (single_basement && key_right) { if (single_basement && key_right) {
struct keyrange_compare_s s_right = {brt->ft, key_right}; struct keyrange_compare_s s_right = {ft_handle->ft, key_right};
r = bn->data_buffer.find_zero<decltype(s_right), keyrange_compare>(s_right, nullptr, nullptr, nullptr, &idx_right); r = bn->data_buffer.find_zero<decltype(s_right), keyrange_compare>(s_right, nullptr, nullptr, nullptr, &idx_right);
} }
*middle = idx_right - idx_left - *equal_left; *middle = idx_right - idx_left - *equal_left;
@ -5969,7 +5968,7 @@ keysrange_in_leaf_partition (FT_HANDLE brt, FTNODE node,
} }
static int static int
toku_ft_keysrange_internal (FT_HANDLE brt, FTNODE node, toku_ft_keysrange_internal (FT_HANDLE ft_handle, FTNODE node,
DBT* key_left, DBT* key_right, bool may_find_right, DBT* key_left, DBT* key_right, bool may_find_right,
uint64_t* less, uint64_t* equal_left, uint64_t* middle, uint64_t* less, uint64_t* equal_left, uint64_t* middle,
uint64_t* equal_right, uint64_t* greater, bool* single_basement_node, uint64_t* equal_right, uint64_t* greater, bool* single_basement_node,
@ -5981,15 +5980,15 @@ toku_ft_keysrange_internal (FT_HANDLE brt, FTNODE node,
{ {
int r = 0; int r = 0;
// if KEY is NULL then use the leftmost key. // if KEY is NULL then use the leftmost key.
int left_child_number = key_left ? toku_ftnode_which_child (node, key_left, &brt->ft->cmp_descriptor, brt->ft->compare_fun) : 0; int left_child_number = key_left ? toku_ftnode_which_child (node, key_left, &ft_handle->ft->cmp_descriptor, ft_handle->ft->compare_fun) : 0;
int right_child_number = node->n_children; // Sentinel that does not equal left_child_number. int right_child_number = node->n_children; // Sentinel that does not equal left_child_number.
if (may_find_right) { if (may_find_right) {
right_child_number = key_right ? toku_ftnode_which_child (node, key_right, &brt->ft->cmp_descriptor, brt->ft->compare_fun) : node->n_children - 1; right_child_number = key_right ? toku_ftnode_which_child (node, key_right, &ft_handle->ft->cmp_descriptor, ft_handle->ft->compare_fun) : node->n_children - 1;
} }
uint64_t rows_per_child = estimated_num_rows / node->n_children; uint64_t rows_per_child = estimated_num_rows / node->n_children;
if (node->height == 0) { if (node->height == 0) {
keysrange_in_leaf_partition(brt, node, key_left, key_right, left_child_number, right_child_number, keysrange_in_leaf_partition(ft_handle, node, key_left, key_right, left_child_number, right_child_number,
rows_per_child, less, equal_left, middle, equal_right, greater, single_basement_node); rows_per_child, less, equal_left, middle, equal_right, greater, single_basement_node);
*less += rows_per_child * left_child_number; *less += rows_per_child * left_child_number;
@ -6002,12 +6001,12 @@ toku_ft_keysrange_internal (FT_HANDLE brt, FTNODE node,
// do the child. // do the child.
struct ancestors next_ancestors = {node, left_child_number, ancestors}; struct ancestors next_ancestors = {node, left_child_number, ancestors};
BLOCKNUM childblocknum = BP_BLOCKNUM(node, left_child_number); BLOCKNUM childblocknum = BP_BLOCKNUM(node, left_child_number);
uint32_t fullhash = compute_child_fullhash(brt->ft->cf, node, left_child_number); uint32_t fullhash = compute_child_fullhash(ft_handle->ft->cf, node, left_child_number);
FTNODE childnode; FTNODE childnode;
bool msgs_applied = false; bool msgs_applied = false;
bool child_may_find_right = may_find_right && left_child_number == right_child_number; bool child_may_find_right = may_find_right && left_child_number == right_child_number;
r = toku_pin_ftnode_for_query( r = toku_pin_ftnode_for_query(
brt, ft_handle,
childblocknum, childblocknum,
fullhash, fullhash,
unlockers, unlockers,
@ -6022,11 +6021,11 @@ toku_ft_keysrange_internal (FT_HANDLE brt, FTNODE node,
if (r != TOKUDB_TRY_AGAIN) { if (r != TOKUDB_TRY_AGAIN) {
assert_zero(r); assert_zero(r);
struct unlock_ftnode_extra unlock_extra = {brt,childnode,false}; struct unlock_ftnode_extra unlock_extra = {ft_handle,childnode,false};
struct unlockers next_unlockers = {true, unlock_ftnode_fun, (void*)&unlock_extra, unlockers}; struct unlockers next_unlockers = {true, unlock_ftnode_fun, (void*)&unlock_extra, unlockers};
const struct pivot_bounds next_bounds = next_pivot_keys(node, left_child_number, bounds); const struct pivot_bounds next_bounds = next_pivot_keys(node, left_child_number, bounds);
r = toku_ft_keysrange_internal(brt, childnode, key_left, key_right, child_may_find_right, r = toku_ft_keysrange_internal(ft_handle, childnode, key_left, key_right, child_may_find_right,
less, equal_left, middle, equal_right, greater, single_basement_node, less, equal_left, middle, equal_right, greater, single_basement_node,
rows_per_child, min_bfe, match_bfe, &next_unlockers, &next_ancestors, &next_bounds); rows_per_child, min_bfe, match_bfe, &next_unlockers, &next_ancestors, &next_bounds);
if (r != TOKUDB_TRY_AGAIN) { if (r != TOKUDB_TRY_AGAIN) {
@ -6040,14 +6039,14 @@ toku_ft_keysrange_internal (FT_HANDLE brt, FTNODE node,
} }
assert(unlockers->locked); assert(unlockers->locked);
toku_unpin_ftnode_read_only(brt->ft, childnode); toku_unpin_ftnode_read_only(ft_handle->ft, childnode);
} }
} }
} }
return r; return r;
} }
void toku_ft_keysrange(FT_HANDLE brt, DBT* key_left, DBT* key_right, uint64_t *less_p, uint64_t* equal_left_p, uint64_t* middle_p, uint64_t* equal_right_p, uint64_t* greater_p, bool* middle_3_exact_p) void toku_ft_keysrange(FT_HANDLE ft_handle, DBT* key_left, DBT* key_right, uint64_t *less_p, uint64_t* equal_left_p, uint64_t* middle_p, uint64_t* equal_right_p, uint64_t* greater_p, bool* middle_3_exact_p)
// Effect: Return an estimate of the number of keys to the left, the number equal (to left key), number between keys, number equal to right key, and the number to the right of both keys. // Effect: Return an estimate of the number of keys to the left, the number equal (to left key), number between keys, number equal to right key, and the number to the right of both keys.
// The values are an estimate. // The values are an estimate.
// If you perform a keyrange on two keys that are in the same basement, equal_less, middle, and equal_right will be exact. // If you perform a keyrange on two keys that are in the same basement, equal_less, middle, and equal_right will be exact.
@ -6061,7 +6060,7 @@ void toku_ft_keysrange(FT_HANDLE brt, DBT* key_left, DBT* key_right, uint64_t *l
// Simplify internals by only supporting key_right != null when key_left != null // Simplify internals by only supporting key_right != null when key_left != null
// If key_right != null and key_left == null, then swap them and fix up numbers. // If key_right != null and key_left == null, then swap them and fix up numbers.
uint64_t less = 0, equal_left = 0, middle = 0, equal_right = 0, greater = 0; uint64_t less = 0, equal_left = 0, middle = 0, equal_right = 0, greater = 0;
toku_ft_keysrange(brt, key_right, nullptr, &less, &equal_left, &middle, &equal_right, &greater, middle_3_exact_p); toku_ft_keysrange(ft_handle, key_right, nullptr, &less, &equal_left, &middle, &equal_right, &greater, middle_3_exact_p);
*less_p = 0; *less_p = 0;
*equal_left_p = 0; *equal_left_p = 0;
*middle_p = less; *middle_p = less;
@ -6074,8 +6073,8 @@ void toku_ft_keysrange(FT_HANDLE brt, DBT* key_left, DBT* key_right, uint64_t *l
paranoid_invariant(!(!key_left && key_right)); paranoid_invariant(!(!key_left && key_right));
struct ftnode_fetch_extra min_bfe; struct ftnode_fetch_extra min_bfe;
struct ftnode_fetch_extra match_bfe; struct ftnode_fetch_extra match_bfe;
fill_bfe_for_min_read(&min_bfe, brt->ft); // read pivot keys but not message buffers fill_bfe_for_min_read(&min_bfe, ft_handle->ft); // read pivot keys but not message buffers
fill_bfe_for_keymatch(&match_bfe, brt->ft, key_left, key_right, false, false); // read basement node only if both keys in it. fill_bfe_for_keymatch(&match_bfe, ft_handle->ft, key_left, key_right, false, false); // read basement node only if both keys in it.
try_again: try_again:
{ {
uint64_t less = 0, equal_left = 0, middle = 0, equal_right = 0, greater = 0; uint64_t less = 0, equal_left = 0, middle = 0, equal_right = 0, greater = 0;
@ -6084,9 +6083,9 @@ try_again:
{ {
uint32_t fullhash; uint32_t fullhash;
CACHEKEY root_key; CACHEKEY root_key;
toku_calculate_root_offset_pointer(brt->ft, &root_key, &fullhash); toku_calculate_root_offset_pointer(ft_handle->ft, &root_key, &fullhash);
toku_pin_ftnode( toku_pin_ftnode(
brt->ft, ft_handle->ft,
root_key, root_key,
fullhash, fullhash,
&match_bfe, &match_bfe,
@ -6096,15 +6095,15 @@ try_again:
); );
} }
struct unlock_ftnode_extra unlock_extra = {brt,node,false}; struct unlock_ftnode_extra unlock_extra = {ft_handle,node,false};
struct unlockers unlockers = {true, unlock_ftnode_fun, (void*)&unlock_extra, (UNLOCKERS)NULL}; struct unlockers unlockers = {true, unlock_ftnode_fun, (void*)&unlock_extra, (UNLOCKERS)NULL};
{ {
int r; int r;
int64_t numrows = brt->ft->in_memory_stats.numrows; int64_t numrows = ft_handle->ft->in_memory_stats.numrows;
if (numrows < 0) if (numrows < 0)
numrows = 0; // prevent appearance of a negative number numrows = 0; // prevent appearance of a negative number
r = toku_ft_keysrange_internal (brt, node, key_left, key_right, true, r = toku_ft_keysrange_internal (ft_handle, node, key_left, key_right, true,
&less, &equal_left, &middle, &equal_right, &greater, &less, &equal_left, &middle, &equal_right, &greater,
&single_basement_node, numrows, &single_basement_node, numrows,
&min_bfe, &match_bfe, &unlockers, (ANCESTORS)NULL, &infinite_bounds); &min_bfe, &match_bfe, &unlockers, (ANCESTORS)NULL, &infinite_bounds);
@ -6120,7 +6119,7 @@ try_again:
invariant_zero(greater); invariant_zero(greater);
uint64_t less2 = 0, equal_left2 = 0, middle2 = 0, equal_right2 = 0, greater2 = 0; uint64_t less2 = 0, equal_left2 = 0, middle2 = 0, equal_right2 = 0, greater2 = 0;
bool ignore; bool ignore;
r = toku_ft_keysrange_internal (brt, node, key_right, nullptr, false, r = toku_ft_keysrange_internal (ft_handle, node, key_right, nullptr, false,
&less2, &equal_left2, &middle2, &equal_right2, &greater2, &less2, &equal_left2, &middle2, &equal_right2, &greater2,
&ignore, numrows, &ignore, numrows,
&min_bfe, &match_bfe, &unlockers, (ANCESTORS)nullptr, &infinite_bounds); &min_bfe, &match_bfe, &unlockers, (ANCESTORS)nullptr, &infinite_bounds);
@ -6150,7 +6149,7 @@ try_again:
} }
} }
assert(unlockers.locked); assert(unlockers.locked);
toku_unpin_ftnode_read_only(brt->ft, node); toku_unpin_ftnode_read_only(ft_handle->ft, node);
if (!key_right) { if (!key_right) {
paranoid_invariant_zero(equal_right); paranoid_invariant_zero(equal_right);
paranoid_invariant_zero(greater); paranoid_invariant_zero(greater);
@ -6330,16 +6329,16 @@ int toku_ft_get_key_after_bytes(FT_HANDLE ft_h, const DBT *start_key, uint64_t s
} }
//Test-only wrapper for the old one-key range function //Test-only wrapper for the old one-key range function
void toku_ft_keyrange(FT_HANDLE brt, DBT *key, uint64_t *less, uint64_t *equal, uint64_t *greater) { void toku_ft_keyrange(FT_HANDLE ft_handle, DBT *key, uint64_t *less, uint64_t *equal, uint64_t *greater) {
uint64_t zero_equal_right, zero_greater; uint64_t zero_equal_right, zero_greater;
bool ignore; bool ignore;
toku_ft_keysrange(brt, key, nullptr, less, equal, greater, &zero_equal_right, &zero_greater, &ignore); toku_ft_keysrange(ft_handle, key, nullptr, less, equal, greater, &zero_equal_right, &zero_greater, &ignore);
invariant_zero(zero_equal_right); invariant_zero(zero_equal_right);
invariant_zero(zero_greater); invariant_zero(zero_greater);
} }
void toku_ft_handle_stat64 (FT_HANDLE brt, TOKUTXN UU(txn), struct ftstat64_s *s) { void toku_ft_handle_stat64 (FT_HANDLE ft_handle, TOKUTXN UU(txn), struct ftstat64_s *s) {
toku_ft_stat64(brt->ft, s); toku_ft_stat64(ft_handle->ft, s);
} }
void toku_ft_handle_get_fractal_tree_info64(FT_HANDLE ft_h, struct ftinfo64 *s) { void toku_ft_handle_get_fractal_tree_info64(FT_HANDLE ft_h, struct ftinfo64 *s) {
@ -6352,16 +6351,16 @@ int toku_ft_handle_iterate_fractal_tree_block_map(FT_HANDLE ft_h, int (*iter)(ui
/* ********************* debugging dump ************************ */ /* ********************* debugging dump ************************ */
static int static int
toku_dump_ftnode (FILE *file, FT_HANDLE brt, BLOCKNUM blocknum, int depth, const DBT *lorange, const DBT *hirange) { toku_dump_ftnode (FILE *file, FT_HANDLE ft_handle, BLOCKNUM blocknum, int depth, const DBT *lorange, const DBT *hirange) {
int result=0; int result=0;
FTNODE node; FTNODE node;
toku_get_node_for_verify(blocknum, brt, &node); toku_get_node_for_verify(blocknum, ft_handle, &node);
result=toku_verify_ftnode(brt, brt->ft->h->max_msn_in_ft, brt->ft->h->max_msn_in_ft, false, node, -1, lorange, hirange, NULL, NULL, 0, 1, 0); result=toku_verify_ftnode(ft_handle, ft_handle->ft->h->max_msn_in_ft, ft_handle->ft->h->max_msn_in_ft, false, node, -1, lorange, hirange, NULL, NULL, 0, 1, 0);
uint32_t fullhash = toku_cachetable_hash(brt->ft->cf, blocknum); uint32_t fullhash = toku_cachetable_hash(ft_handle->ft->cf, blocknum);
struct ftnode_fetch_extra bfe; struct ftnode_fetch_extra bfe;
fill_bfe_for_full_read(&bfe, brt->ft); fill_bfe_for_full_read(&bfe, ft_handle->ft);
toku_pin_ftnode( toku_pin_ftnode(
brt->ft, ft_handle->ft,
blocknum, blocknum,
fullhash, fullhash,
&bfe, &bfe,
@ -6416,25 +6415,25 @@ toku_dump_ftnode (FILE *file, FT_HANDLE brt, BLOCKNUM blocknum, int depth, const
char *CAST_FROM_VOIDP(key, node->childkeys[i-1].data); char *CAST_FROM_VOIDP(key, node->childkeys[i-1].data);
fprintf(file, "%*spivot %d len=%u %u\n", depth+1, "", i-1, node->childkeys[i-1].size, (unsigned)toku_dtoh32(*(int*)key)); fprintf(file, "%*spivot %d len=%u %u\n", depth+1, "", i-1, node->childkeys[i-1].size, (unsigned)toku_dtoh32(*(int*)key));
} }
toku_dump_ftnode(file, brt, BP_BLOCKNUM(node, i), depth+4, toku_dump_ftnode(file, ft_handle, BP_BLOCKNUM(node, i), depth+4,
(i==0) ? lorange : &node->childkeys[i-1], (i==0) ? lorange : &node->childkeys[i-1],
(i==node->n_children-1) ? hirange : &node->childkeys[i]); (i==node->n_children-1) ? hirange : &node->childkeys[i]);
} }
} }
} }
toku_unpin_ftnode(brt->ft, node); toku_unpin_ftnode(ft_handle->ft, node);
return result; return result;
} }
int toku_dump_ft (FILE *f, FT_HANDLE brt) { int toku_dump_ft (FILE *f, FT_HANDLE ft_handle) {
int r; int r;
assert(brt->ft); assert(ft_handle->ft);
toku_dump_translation_table(f, brt->ft->blocktable); toku_dump_translation_table(f, ft_handle->ft->blocktable);
{ {
uint32_t fullhash = 0; uint32_t fullhash = 0;
CACHEKEY root_key; CACHEKEY root_key;
toku_calculate_root_offset_pointer(brt->ft, &root_key, &fullhash); toku_calculate_root_offset_pointer(ft_handle->ft, &root_key, &fullhash);
r = toku_dump_ftnode(f, brt, root_key, 0, 0, 0); r = toku_dump_ftnode(f, ft_handle, root_key, 0, 0, 0);
} }
return r; return r;
} }
@ -6529,23 +6528,23 @@ void toku_ft_unlink(FT_HANDLE handle) {
} }
int int
toku_ft_get_fragmentation(FT_HANDLE brt, TOKU_DB_FRAGMENTATION report) { toku_ft_get_fragmentation(FT_HANDLE ft_handle, TOKU_DB_FRAGMENTATION report) {
int r; int r;
int fd = toku_cachefile_get_fd(brt->ft->cf); int fd = toku_cachefile_get_fd(ft_handle->ft->cf);
toku_ft_lock(brt->ft); toku_ft_lock(ft_handle->ft);
int64_t file_size; int64_t file_size;
r = toku_os_get_file_size(fd, &file_size); r = toku_os_get_file_size(fd, &file_size);
if (r==0) { if (r==0) {
report->file_size_bytes = file_size; report->file_size_bytes = file_size;
toku_block_table_get_fragmentation_unlocked(brt->ft->blocktable, report); toku_block_table_get_fragmentation_unlocked(ft_handle->ft->blocktable, report);
} }
toku_ft_unlock(brt->ft); toku_ft_unlock(ft_handle->ft);
return r; return r;
} }
static bool is_empty_fast_iter (FT_HANDLE brt, FTNODE node) { static bool is_empty_fast_iter (FT_HANDLE ft_handle, FTNODE node) {
if (node->height > 0) { if (node->height > 0) {
for (int childnum=0; childnum<node->n_children; childnum++) { for (int childnum=0; childnum<node->n_children; childnum++) {
if (toku_bnc_nbytesinbuf(BNC(node, childnum)) != 0) { if (toku_bnc_nbytesinbuf(BNC(node, childnum)) != 0) {
@ -6554,13 +6553,13 @@ static bool is_empty_fast_iter (FT_HANDLE brt, FTNODE node) {
FTNODE childnode; FTNODE childnode;
{ {
BLOCKNUM childblocknum = BP_BLOCKNUM(node,childnum); BLOCKNUM childblocknum = BP_BLOCKNUM(node,childnum);
uint32_t fullhash = compute_child_fullhash(brt->ft->cf, node, childnum); uint32_t fullhash = compute_child_fullhash(ft_handle->ft->cf, node, childnum);
struct ftnode_fetch_extra bfe; struct ftnode_fetch_extra bfe;
fill_bfe_for_full_read(&bfe, brt->ft); fill_bfe_for_full_read(&bfe, ft_handle->ft);
// don't need to pass in dependent nodes as we are not // don't need to pass in dependent nodes as we are not
// modifying nodes we are pinning // modifying nodes we are pinning
toku_pin_ftnode( toku_pin_ftnode(
brt->ft, ft_handle->ft,
childblocknum, childblocknum,
fullhash, fullhash,
&bfe, &bfe,
@ -6569,8 +6568,8 @@ static bool is_empty_fast_iter (FT_HANDLE brt, FTNODE node) {
true true
); );
} }
int child_is_empty = is_empty_fast_iter(brt, childnode); int child_is_empty = is_empty_fast_iter(ft_handle, childnode);
toku_unpin_ftnode(brt->ft, childnode); toku_unpin_ftnode(ft_handle->ft, childnode);
if (!child_is_empty) return 0; if (!child_is_empty) return 0;
} }
return 1; return 1;
@ -6585,7 +6584,7 @@ static bool is_empty_fast_iter (FT_HANDLE brt, FTNODE node) {
} }
} }
bool toku_ft_is_empty_fast (FT_HANDLE brt) bool toku_ft_is_empty_fast (FT_HANDLE ft_handle)
// A fast check to see if the tree is empty. If there are any messages or leafentries, we consider the tree to be nonempty. It's possible that those // A fast check to see if the tree is empty. If there are any messages or leafentries, we consider the tree to be nonempty. It's possible that those
// messages and leafentries would all optimize away and that the tree is empty, but we'll say it is nonempty. // messages and leafentries would all optimize away and that the tree is empty, but we'll say it is nonempty.
{ {
@ -6593,11 +6592,11 @@ bool toku_ft_is_empty_fast (FT_HANDLE brt)
FTNODE node; FTNODE node;
{ {
CACHEKEY root_key; CACHEKEY root_key;
toku_calculate_root_offset_pointer(brt->ft, &root_key, &fullhash); toku_calculate_root_offset_pointer(ft_handle->ft, &root_key, &fullhash);
struct ftnode_fetch_extra bfe; struct ftnode_fetch_extra bfe;
fill_bfe_for_full_read(&bfe, brt->ft); fill_bfe_for_full_read(&bfe, ft_handle->ft);
toku_pin_ftnode( toku_pin_ftnode(
brt->ft, ft_handle->ft,
root_key, root_key,
fullhash, fullhash,
&bfe, &bfe,
@ -6606,8 +6605,8 @@ bool toku_ft_is_empty_fast (FT_HANDLE brt)
true true
); );
} }
bool r = is_empty_fast_iter(brt, node); bool r = is_empty_fast_iter(ft_handle, node);
toku_unpin_ftnode(brt->ft, node); toku_unpin_ftnode(ft_handle->ft, node);
return r; return r;
} }

View file

@ -103,7 +103,7 @@ PATENT RIGHTS GRANT:
// A callback function is invoked with the key, and the data. // A callback function is invoked with the key, and the data.
// The pointers (to the bytevecs) must not be modified. The data must be copied out before the callback function returns. // The pointers (to the bytevecs) must not be modified. The data must be copied out before the callback function returns.
// Note: In the thread-safe version, the brt node remains locked while the callback function runs. So return soon, and don't call the BRT code from the callback function. // Note: In the thread-safe version, the ftnode remains locked while the callback function runs. So return soon, and don't call the ft code from the callback function.
// If the callback function returns a nonzero value (an error code), then that error code is returned from the get function itself. // If the callback function returns a nonzero value (an error code), then that error code is returned from the get function itself.
// The cursor object will have been updated (so that if result==0 the current value is the value being passed) // The cursor object will have been updated (so that if result==0 the current value is the value being passed)
// (If r!=0 then the cursor won't have been updated.) // (If r!=0 then the cursor won't have been updated.)
@ -141,9 +141,9 @@ void toku_ft_handle_set_fanout(FT_HANDLE, unsigned int fanout);
void toku_ft_handle_get_fanout(FT_HANDLE, unsigned int *fanout); void toku_ft_handle_get_fanout(FT_HANDLE, unsigned int *fanout);
void toku_ft_set_bt_compare(FT_HANDLE, ft_compare_func); void toku_ft_set_bt_compare(FT_HANDLE, ft_compare_func);
ft_compare_func toku_ft_get_bt_compare (FT_HANDLE brt); ft_compare_func toku_ft_get_bt_compare (FT_HANDLE ft_h);
void toku_ft_set_redirect_callback(FT_HANDLE brt, on_redirect_callback redir_cb, void* extra); void toku_ft_set_redirect_callback(FT_HANDLE ft_h, on_redirect_callback redir_cb, void* extra);
// How updates (update/insert/deletes) work: // How updates (update/insert/deletes) work:
// There are two flavers of upsertdels: Singleton and broadcast. // There are two flavers of upsertdels: Singleton and broadcast.
@ -181,7 +181,7 @@ void toku_ft_set_redirect_callback(FT_HANDLE brt, on_redirect_callback redir_cb,
// Implementation note: Acquires a write lock on the entire database. // Implementation note: Acquires a write lock on the entire database.
// This function works by sending an BROADCAST-UPDATE message containing // This function works by sending an BROADCAST-UPDATE message containing
// the key and the extra. // the key and the extra.
void toku_ft_set_update(FT_HANDLE brt, ft_update_func update_fun); void toku_ft_set_update(FT_HANDLE ft_h, ft_update_func update_fun);
int toku_ft_handle_open(FT_HANDLE, const char *fname_in_env, int toku_ft_handle_open(FT_HANDLE, const char *fname_in_env,
int is_create, int only_create, CACHETABLE ct, TOKUTXN txn) __attribute__ ((warn_unused_result)); int is_create, int only_create, CACHETABLE ct, TOKUTXN txn) __attribute__ ((warn_unused_result));
@ -208,60 +208,60 @@ toku_ft_handle_open_with_dict_id(
DICTIONARY_ID use_dictionary_id DICTIONARY_ID use_dictionary_id
) __attribute__ ((warn_unused_result)); ) __attribute__ ((warn_unused_result));
int toku_ft_lookup (FT_HANDLE brt, DBT *k, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) __attribute__ ((warn_unused_result)); int toku_ft_lookup (FT_HANDLE ft_h, DBT *k, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) __attribute__ ((warn_unused_result));
// Effect: Insert a key and data pair into a brt // Effect: Insert a key and data pair into an ft
void toku_ft_insert (FT_HANDLE brt, DBT *k, DBT *v, TOKUTXN txn); void toku_ft_insert (FT_HANDLE ft_h, DBT *k, DBT *v, TOKUTXN txn);
// Effect: Optimize the ft // Effect: Optimize the ft
void toku_ft_optimize (FT_HANDLE brt); void toku_ft_optimize (FT_HANDLE ft_h);
// Effect: Insert a key and data pair into a brt if the oplsn is newer than the brt lsn. This function is called during recovery. // Effect: Insert a key and data pair into an ft if the oplsn is newer than the ft's lsn. This function is called during recovery.
void toku_ft_maybe_insert (FT_HANDLE brt, DBT *k, DBT *v, TOKUTXN txn, bool oplsn_valid, LSN oplsn, bool do_logging, enum ft_msg_type type); void toku_ft_maybe_insert (FT_HANDLE ft_h, DBT *k, DBT *v, TOKUTXN txn, bool oplsn_valid, LSN oplsn, bool do_logging, enum ft_msg_type type);
// Effect: Send an update message into a brt. This function is called // Effect: Send an update message into an ft. This function is called
// during recovery. // during recovery.
void toku_ft_maybe_update(FT_HANDLE brt, const DBT *key, const DBT *update_function_extra, TOKUTXN txn, bool oplsn_valid, LSN oplsn, bool do_logging); void toku_ft_maybe_update(FT_HANDLE ft_h, const DBT *key, const DBT *update_function_extra, TOKUTXN txn, bool oplsn_valid, LSN oplsn, bool do_logging);
// Effect: Send a broadcasting update message into a brt. This function // Effect: Send a broadcasting update message into an ft. This function
// is called during recovery. // is called during recovery.
void toku_ft_maybe_update_broadcast(FT_HANDLE brt, const DBT *update_function_extra, TOKUTXN txn, bool oplsn_valid, LSN oplsn, bool do_logging, bool is_resetting_op); void toku_ft_maybe_update_broadcast(FT_HANDLE ft_h, const DBT *update_function_extra, TOKUTXN txn, bool oplsn_valid, LSN oplsn, bool do_logging, bool is_resetting_op);
void toku_ft_load_recovery(TOKUTXN txn, FILENUM old_filenum, char const * new_iname, int do_fsync, int do_log, LSN *load_lsn); void toku_ft_load_recovery(TOKUTXN txn, FILENUM old_filenum, char const * new_iname, int do_fsync, int do_log, LSN *load_lsn);
void toku_ft_load(FT_HANDLE brt, TOKUTXN txn, char const * new_iname, int do_fsync, LSN *get_lsn); void toku_ft_load(FT_HANDLE ft_h, TOKUTXN txn, char const * new_iname, int do_fsync, LSN *get_lsn);
void toku_ft_hot_index_recovery(TOKUTXN txn, FILENUMS filenums, int do_fsync, int do_log, LSN *hot_index_lsn); void toku_ft_hot_index_recovery(TOKUTXN txn, FILENUMS filenums, int do_fsync, int do_log, LSN *hot_index_lsn);
void toku_ft_hot_index(FT_HANDLE brt, TOKUTXN txn, FILENUMS filenums, int do_fsync, LSN *lsn); void toku_ft_hot_index(FT_HANDLE ft_h, TOKUTXN txn, FILENUMS filenums, int do_fsync, LSN *lsn);
void toku_ft_log_put_multiple (TOKUTXN txn, FT_HANDLE src_ft, FT_HANDLE *brts, uint32_t num_fts, const DBT *key, const DBT *val); void toku_ft_log_put_multiple (TOKUTXN txn, FT_HANDLE src_ft, FT_HANDLE *fts, uint32_t num_fts, const DBT *key, const DBT *val);
void toku_ft_log_put (TOKUTXN txn, FT_HANDLE brt, const DBT *key, const DBT *val); void toku_ft_log_put (TOKUTXN txn, FT_HANDLE ft_h, const DBT *key, const DBT *val);
void toku_ft_log_del_multiple (TOKUTXN txn, FT_HANDLE src_ft, FT_HANDLE *brts, uint32_t num_fts, const DBT *key, const DBT *val); void toku_ft_log_del_multiple (TOKUTXN txn, FT_HANDLE src_ft, FT_HANDLE *fts, uint32_t num_fts, const DBT *key, const DBT *val);
void toku_ft_log_del (TOKUTXN txn, FT_HANDLE brt, const DBT *key); void toku_ft_log_del (TOKUTXN txn, FT_HANDLE ft_h, const DBT *key);
// Effect: Delete a key from a brt // Effect: Delete a key from an ft
void toku_ft_delete (FT_HANDLE brt, DBT *k, TOKUTXN txn); void toku_ft_delete (FT_HANDLE ft_h, DBT *k, TOKUTXN txn);
// Effect: Delete a key from a brt if the oplsn is newer than the brt lsn. This function is called during recovery. // Effect: Delete a key from an ft if the oplsn is newer than the ft lsn. This function is called during recovery.
void toku_ft_maybe_delete (FT_HANDLE brt, DBT *k, TOKUTXN txn, bool oplsn_valid, LSN oplsn, bool do_logging); void toku_ft_maybe_delete (FT_HANDLE ft_h, DBT *k, TOKUTXN txn, bool oplsn_valid, LSN oplsn, bool do_logging);
TXNID toku_ft_get_oldest_referenced_xid_estimate(FT_HANDLE ft_h); TXNID toku_ft_get_oldest_referenced_xid_estimate(FT_HANDLE ft_h);
TXN_MANAGER toku_ft_get_txn_manager(FT_HANDLE ft_h); TXN_MANAGER toku_ft_get_txn_manager(FT_HANDLE ft_h);
void toku_ft_send_insert(FT_HANDLE brt, DBT *key, DBT *val, XIDS xids, enum ft_msg_type type, txn_gc_info *gc_info); void toku_ft_send_insert(FT_HANDLE ft_h, DBT *key, DBT *val, XIDS xids, enum ft_msg_type type, txn_gc_info *gc_info);
void toku_ft_send_delete(FT_HANDLE brt, DBT *key, XIDS xids, txn_gc_info *gc_info); void toku_ft_send_delete(FT_HANDLE ft_h, DBT *key, XIDS xids, txn_gc_info *gc_info);
void toku_ft_send_commit_any(FT_HANDLE brt, DBT *key, XIDS xids, txn_gc_info *gc_info); void toku_ft_send_commit_any(FT_HANDLE ft_h, DBT *key, XIDS xids, txn_gc_info *gc_info);
int toku_close_ft_handle_nolsn (FT_HANDLE, char **error_string) __attribute__ ((warn_unused_result)); int toku_close_ft_handle_nolsn (FT_HANDLE, char **error_string) __attribute__ ((warn_unused_result));
int toku_dump_ft (FILE *,FT_HANDLE brt) __attribute__ ((warn_unused_result)); int toku_dump_ft (FILE *,FT_HANDLE ft_h) __attribute__ ((warn_unused_result));
extern int toku_ft_debug_mode; extern int toku_ft_debug_mode;
int toku_verify_ft (FT_HANDLE brt) __attribute__ ((warn_unused_result)); int toku_verify_ft (FT_HANDLE ft_h) __attribute__ ((warn_unused_result));
int toku_verify_ft_with_progress (FT_HANDLE brt, int (*progress_callback)(void *extra, float progress), void *extra, int verbose, int keep_going) __attribute__ ((warn_unused_result)); int toku_verify_ft_with_progress (FT_HANDLE ft_h, int (*progress_callback)(void *extra, float progress), void *extra, int verbose, int keep_going) __attribute__ ((warn_unused_result));
typedef struct ft_cursor *FT_CURSOR; typedef struct ft_cursor *FT_CURSOR;
int toku_ft_cursor (FT_HANDLE, FT_CURSOR*, TOKUTXN, bool, bool) __attribute__ ((warn_unused_result)); int toku_ft_cursor (FT_HANDLE, FT_CURSOR*, TOKUTXN, bool, bool) __attribute__ ((warn_unused_result));
void toku_ft_cursor_set_leaf_mode(FT_CURSOR); void toku_ft_cursor_set_leaf_mode(FT_CURSOR);
// Sets a boolean on the brt cursor that prevents uncessary copying of // Sets a boolean on the ft cursor that prevents uncessary copying of
// the cursor duing a one query. // the cursor duing a one query.
void toku_ft_cursor_set_temporary(FT_CURSOR); void toku_ft_cursor_set_temporary(FT_CURSOR);
void toku_ft_cursor_remove_restriction(FT_CURSOR); void toku_ft_cursor_remove_restriction(FT_CURSOR);
@ -298,8 +298,8 @@ enum ft_flags {
TOKU_DB_VALCMP_BUILTIN_13 = (1<<3), TOKU_DB_VALCMP_BUILTIN_13 = (1<<3),
}; };
void toku_ft_keyrange(FT_HANDLE brt, DBT *key, uint64_t *less, uint64_t *equal, uint64_t *greater); void toku_ft_keyrange(FT_HANDLE ft_h, DBT *key, uint64_t *less, uint64_t *equal, uint64_t *greater);
void toku_ft_keysrange(FT_HANDLE brt, DBT* key_left, DBT* key_right, uint64_t *less_p, uint64_t* equal_left_p, uint64_t* middle_p, uint64_t* equal_right_p, uint64_t* greater_p, bool* middle_3_exact_p); void toku_ft_keysrange(FT_HANDLE ft_h, DBT* key_left, DBT* key_right, uint64_t *less_p, uint64_t* equal_left_p, uint64_t* middle_p, uint64_t* equal_right_p, uint64_t* greater_p, bool* middle_3_exact_p);
int toku_ft_get_key_after_bytes(FT_HANDLE ft_h, const DBT *start_key, uint64_t skip_len, void (*callback)(const DBT *end_key, uint64_t actually_skipped, void *extra), void *cb_extra); int toku_ft_get_key_after_bytes(FT_HANDLE ft_h, const DBT *start_key, uint64_t skip_len, void (*callback)(const DBT *end_key, uint64_t actually_skipped, void *extra), void *cb_extra);
@ -341,9 +341,9 @@ void toku_maybe_preallocate_in_file (int fd, int64_t size, int64_t expected_size
// Effect: make the file bigger by either doubling it or growing by 16MiB whichever is less, until it is at least size // Effect: make the file bigger by either doubling it or growing by 16MiB whichever is less, until it is at least size
// Return 0 on success, otherwise an error number. // Return 0 on success, otherwise an error number.
int toku_ft_get_fragmentation(FT_HANDLE brt, TOKU_DB_FRAGMENTATION report) __attribute__ ((warn_unused_result)); int toku_ft_get_fragmentation(FT_HANDLE ft_h, TOKU_DB_FRAGMENTATION report) __attribute__ ((warn_unused_result));
bool toku_ft_is_empty_fast (FT_HANDLE brt) __attribute__ ((warn_unused_result)); bool toku_ft_is_empty_fast (FT_HANDLE ft_h) __attribute__ ((warn_unused_result));
// Effect: Return true if there are no messages or leaf entries in the tree. If so, it's empty. If there are messages or leaf entries, we say it's not empty // Effect: Return true if there are no messages or leaf entries in the tree. If so, it's empty. If there are messages or leaf entries, we say it's not empty
// even though if we were to optimize the tree it might turn out that they are empty. // even though if we were to optimize the tree it might turn out that they are empty.

View file

@ -210,7 +210,7 @@ exit:
int deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ftp, uint32_t version) int deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ftp, uint32_t version)
// Effect: Deserialize the ft header. // Effect: Deserialize the ft header.
// We deserialize brt header only once and then share everything with all the brts. // We deserialize ft_header only once and then share everything with all the FTs.
{ {
int r; int r;
FT ft = NULL; FT ft = NULL;

View file

@ -119,10 +119,10 @@ next_dummymsn(void) {
bool ignore_if_was_already_open; bool ignore_if_was_already_open;
int toku_testsetup_leaf(FT_HANDLE brt, BLOCKNUM *blocknum, int n_children, char **keys, int *keylens) { int toku_testsetup_leaf(FT_HANDLE ft_handle, BLOCKNUM *blocknum, int n_children, char **keys, int *keylens) {
FTNODE node; FTNODE node;
assert(testsetup_initialized); assert(testsetup_initialized);
toku_create_new_ftnode(brt, &node, 0, n_children); toku_create_new_ftnode(ft_handle, &node, 0, n_children);
int i; int i;
for (i=0; i<n_children; i++) { for (i=0; i<n_children; i++) {
BP_STATE(node,i) = PT_AVAIL; BP_STATE(node,i) = PT_AVAIL;
@ -134,15 +134,15 @@ int toku_testsetup_leaf(FT_HANDLE brt, BLOCKNUM *blocknum, int n_children, char
} }
*blocknum = node->thisnodename; *blocknum = node->thisnodename;
toku_unpin_ftnode(brt->ft, node); toku_unpin_ftnode(ft_handle->ft, node);
return 0; return 0;
} }
// Don't bother to clean up carefully if something goes wrong. (E.g., it's OK to have malloced stuff that hasn't been freed.) // Don't bother to clean up carefully if something goes wrong. (E.g., it's OK to have malloced stuff that hasn't been freed.)
int toku_testsetup_nonleaf (FT_HANDLE brt, int height, BLOCKNUM *blocknum, int n_children, BLOCKNUM *children, char **keys, int *keylens) { int toku_testsetup_nonleaf (FT_HANDLE ft_handle, int height, BLOCKNUM *blocknum, int n_children, BLOCKNUM *children, char **keys, int *keylens) {
FTNODE node; FTNODE node;
assert(testsetup_initialized); assert(testsetup_initialized);
toku_create_new_ftnode(brt, &node, height, n_children); toku_create_new_ftnode(ft_handle, &node, height, n_children);
int i; int i;
for (i=0; i<n_children; i++) { for (i=0; i<n_children; i++) {
BP_BLOCKNUM(node, i) = children[i]; BP_BLOCKNUM(node, i) = children[i];
@ -153,28 +153,28 @@ int toku_testsetup_nonleaf (FT_HANDLE brt, int height, BLOCKNUM *blocknum, int n
node->totalchildkeylens += keylens[i]; node->totalchildkeylens += keylens[i];
} }
*blocknum = node->thisnodename; *blocknum = node->thisnodename;
toku_unpin_ftnode(brt->ft, node); toku_unpin_ftnode(ft_handle->ft, node);
return 0; return 0;
} }
int toku_testsetup_root(FT_HANDLE brt, BLOCKNUM blocknum) { int toku_testsetup_root(FT_HANDLE ft_handle, BLOCKNUM blocknum) {
assert(testsetup_initialized); assert(testsetup_initialized);
brt->ft->h->root_blocknum = blocknum; ft_handle->ft->h->root_blocknum = blocknum;
return 0; return 0;
} }
int toku_testsetup_get_sersize(FT_HANDLE brt, BLOCKNUM diskoff) // Return the size on disk int toku_testsetup_get_sersize(FT_HANDLE ft_handle, BLOCKNUM diskoff) // Return the size on disk
{ {
assert(testsetup_initialized); assert(testsetup_initialized);
void *node_v; void *node_v;
struct ftnode_fetch_extra bfe; struct ftnode_fetch_extra bfe;
fill_bfe_for_full_read(&bfe, brt->ft); fill_bfe_for_full_read(&bfe, ft_handle->ft);
int r = toku_cachetable_get_and_pin( int r = toku_cachetable_get_and_pin(
brt->ft->cf, diskoff, ft_handle->ft->cf, diskoff,
toku_cachetable_hash(brt->ft->cf, diskoff), toku_cachetable_hash(ft_handle->ft->cf, diskoff),
&node_v, &node_v,
NULL, NULL,
get_write_callbacks_for_node(brt->ft), get_write_callbacks_for_node(ft_handle->ft),
toku_ftnode_fetch_callback, toku_ftnode_fetch_callback,
toku_ftnode_pf_req_callback, toku_ftnode_pf_req_callback,
toku_ftnode_pf_callback, toku_ftnode_pf_callback,
@ -184,25 +184,25 @@ int toku_testsetup_get_sersize(FT_HANDLE brt, BLOCKNUM diskoff) // Return the si
assert(r==0); assert(r==0);
FTNODE CAST_FROM_VOIDP(node, node_v); FTNODE CAST_FROM_VOIDP(node, node_v);
int size = toku_serialize_ftnode_size(node); int size = toku_serialize_ftnode_size(node);
toku_unpin_ftnode(brt->ft, node); toku_unpin_ftnode(ft_handle->ft, node);
return size; return size;
} }
int toku_testsetup_insert_to_leaf (FT_HANDLE brt, BLOCKNUM blocknum, const char *key, int keylen, const char *val, int vallen) { int toku_testsetup_insert_to_leaf (FT_HANDLE ft_handle, BLOCKNUM blocknum, const char *key, int keylen, const char *val, int vallen) {
void *node_v; void *node_v;
int r; int r;
assert(testsetup_initialized); assert(testsetup_initialized);
struct ftnode_fetch_extra bfe; struct ftnode_fetch_extra bfe;
fill_bfe_for_full_read(&bfe, brt->ft); fill_bfe_for_full_read(&bfe, ft_handle->ft);
r = toku_cachetable_get_and_pin( r = toku_cachetable_get_and_pin(
brt->ft->cf, ft_handle->ft->cf,
blocknum, blocknum,
toku_cachetable_hash(brt->ft->cf, blocknum), toku_cachetable_hash(ft_handle->ft->cf, blocknum),
&node_v, &node_v,
NULL, NULL,
get_write_callbacks_for_node(brt->ft), get_write_callbacks_for_node(ft_handle->ft),
toku_ftnode_fetch_callback, toku_ftnode_fetch_callback,
toku_ftnode_pf_req_callback, toku_ftnode_pf_req_callback,
toku_ftnode_pf_callback, toku_ftnode_pf_callback,
@ -223,9 +223,9 @@ int toku_testsetup_insert_to_leaf (FT_HANDLE brt, BLOCKNUM blocknum, const char
static size_t zero_flow_deltas[] = { 0, 0 }; static size_t zero_flow_deltas[] = { 0, 0 };
txn_gc_info gc_info(nullptr, TXNID_NONE, TXNID_NONE, true); txn_gc_info gc_info(nullptr, TXNID_NONE, TXNID_NONE, true);
toku_ft_node_put_msg ( toku_ft_node_put_msg (
brt->ft->compare_fun, ft_handle->ft->compare_fun,
brt->ft->update_fun, ft_handle->ft->update_fun,
&brt->ft->cmp_descriptor, &ft_handle->ft->cmp_descriptor,
node, node,
-1, -1,
&msg, &msg,
@ -237,7 +237,7 @@ int toku_testsetup_insert_to_leaf (FT_HANDLE brt, BLOCKNUM blocknum, const char
toku_verify_or_set_counts(node); toku_verify_or_set_counts(node);
toku_unpin_ftnode(brt->ft, node); toku_unpin_ftnode(ft_handle->ft, node);
return 0; return 0;
} }
@ -265,21 +265,21 @@ toku_pin_node_with_min_bfe(FTNODE* node, BLOCKNUM b, FT_HANDLE t)
); );
} }
int toku_testsetup_insert_to_nonleaf (FT_HANDLE brt, BLOCKNUM blocknum, enum ft_msg_type msgtype, const char *key, int keylen, const char *val, int vallen) { int toku_testsetup_insert_to_nonleaf (FT_HANDLE ft_handle, BLOCKNUM blocknum, enum ft_msg_type msgtype, const char *key, int keylen, const char *val, int vallen) {
void *node_v; void *node_v;
int r; int r;
assert(testsetup_initialized); assert(testsetup_initialized);
struct ftnode_fetch_extra bfe; struct ftnode_fetch_extra bfe;
fill_bfe_for_full_read(&bfe, brt->ft); fill_bfe_for_full_read(&bfe, ft_handle->ft);
r = toku_cachetable_get_and_pin( r = toku_cachetable_get_and_pin(
brt->ft->cf, ft_handle->ft->cf,
blocknum, blocknum,
toku_cachetable_hash(brt->ft->cf, blocknum), toku_cachetable_hash(ft_handle->ft->cf, blocknum),
&node_v, &node_v,
NULL, NULL,
get_write_callbacks_for_node(brt->ft), get_write_callbacks_for_node(ft_handle->ft),
toku_ftnode_fetch_callback, toku_ftnode_fetch_callback,
toku_ftnode_pf_req_callback, toku_ftnode_pf_req_callback,
toku_ftnode_pf_callback, toku_ftnode_pf_callback,
@ -293,19 +293,19 @@ int toku_testsetup_insert_to_nonleaf (FT_HANDLE brt, BLOCKNUM blocknum, enum ft_
DBT k; DBT k;
int childnum = toku_ftnode_which_child(node, int childnum = toku_ftnode_which_child(node,
toku_fill_dbt(&k, key, keylen), toku_fill_dbt(&k, key, keylen),
&brt->ft->cmp_descriptor, brt->ft->compare_fun); &ft_handle->ft->cmp_descriptor, ft_handle->ft->compare_fun);
XIDS xids_0 = xids_get_root_xids(); XIDS xids_0 = xids_get_root_xids();
MSN msn = next_dummymsn(); MSN msn = next_dummymsn();
toku_bnc_insert_msg(BNC(node, childnum), key, keylen, val, vallen, msgtype, msn, xids_0, true, NULL, testhelper_string_key_cmp); toku_bnc_insert_msg(BNC(node, childnum), key, keylen, val, vallen, msgtype, msn, xids_0, true, NULL, testhelper_string_key_cmp);
// Hack to get the test working. The problem is that this test // Hack to get the test working. The problem is that this test
// is directly queueing something in a FIFO instead of // is directly queueing something in a FIFO instead of
// using brt APIs. // using ft APIs.
node->max_msn_applied_to_node_on_disk = msn; node->max_msn_applied_to_node_on_disk = msn;
node->dirty = 1; node->dirty = 1;
// Also hack max_msn_in_ft // Also hack max_msn_in_ft
brt->ft->h->max_msn_in_ft = msn; ft_handle->ft->h->max_msn_in_ft = msn;
toku_unpin_ftnode(brt->ft, node); toku_unpin_ftnode(ft_handle->ft, node);
return 0; return 0;
} }

View file

@ -89,7 +89,7 @@ PATENT RIGHTS GRANT:
#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved."
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
/* Verify a BRT. */ /* Verify an FT. */
/* Check: /* Check:
* The tree is of uniform depth (and the height is correct at every node) * The tree is of uniform depth (and the height is correct at every node)
* For each pivot key: the max of the stuff to the left is <= the pivot key < the min of the stuff to the right. * For each pivot key: the max of the stuff to the left is <= the pivot key < the min of the stuff to the right.
@ -102,26 +102,26 @@ PATENT RIGHTS GRANT:
#include "ft.h" #include "ft.h"
static int static int
compare_pairs (FT_HANDLE brt, const DBT *a, const DBT *b) { compare_pairs (FT_HANDLE ft_handle, const DBT *a, const DBT *b) {
FAKE_DB(db, &brt->ft->cmp_descriptor); FAKE_DB(db, &ft_handle->ft->cmp_descriptor);
int cmp = brt->ft->compare_fun(&db, a, b); int cmp = ft_handle->ft->compare_fun(&db, a, b);
return cmp; return cmp;
} }
static int static int
compare_pair_to_key (FT_HANDLE brt, const DBT *a, bytevec key, ITEMLEN keylen) { compare_pair_to_key (FT_HANDLE ft_handle, const DBT *a, bytevec key, ITEMLEN keylen) {
DBT y; DBT y;
FAKE_DB(db, &brt->ft->cmp_descriptor); FAKE_DB(db, &ft_handle->ft->cmp_descriptor);
int cmp = brt->ft->compare_fun(&db, a, toku_fill_dbt(&y, key, keylen)); int cmp = ft_handle->ft->compare_fun(&db, a, toku_fill_dbt(&y, key, keylen));
return cmp; return cmp;
} }
static int static int
verify_msg_in_child_buffer(FT_HANDLE brt, enum ft_msg_type type, MSN msn, bytevec key, ITEMLEN keylen, bytevec UU(data), ITEMLEN UU(datalen), XIDS UU(xids), const DBT *lesser_pivot, const DBT *greatereq_pivot) verify_msg_in_child_buffer(FT_HANDLE ft_handle, enum ft_msg_type type, MSN msn, bytevec key, ITEMLEN keylen, bytevec UU(data), ITEMLEN UU(datalen), XIDS UU(xids), const DBT *lesser_pivot, const DBT *greatereq_pivot)
__attribute__((warn_unused_result)); __attribute__((warn_unused_result));
static int static int
verify_msg_in_child_buffer(FT_HANDLE brt, enum ft_msg_type type, MSN msn, bytevec key, ITEMLEN keylen, bytevec UU(data), ITEMLEN UU(datalen), XIDS UU(xids), const DBT *lesser_pivot, const DBT *greatereq_pivot) { verify_msg_in_child_buffer(FT_HANDLE ft_handle, enum ft_msg_type type, MSN msn, bytevec key, ITEMLEN keylen, bytevec UU(data), ITEMLEN UU(datalen), XIDS UU(xids), const DBT *lesser_pivot, const DBT *greatereq_pivot) {
int result = 0; int result = 0;
if (msn.msn == ZERO_MSN.msn) if (msn.msn == ZERO_MSN.msn)
result = EINVAL; result = EINVAL;
@ -135,12 +135,12 @@ verify_msg_in_child_buffer(FT_HANDLE brt, enum ft_msg_type type, MSN msn, byteve
case FT_COMMIT_ANY: case FT_COMMIT_ANY:
// verify key in bounds // verify key in bounds
if (lesser_pivot) { if (lesser_pivot) {
int compare = compare_pair_to_key(brt, lesser_pivot, key, keylen); int compare = compare_pair_to_key(ft_handle, lesser_pivot, key, keylen);
if (compare >= 0) if (compare >= 0)
result = EINVAL; result = EINVAL;
} }
if (result == 0 && greatereq_pivot) { if (result == 0 && greatereq_pivot) {
int compare = compare_pair_to_key(brt, greatereq_pivot, key, keylen); int compare = compare_pair_to_key(ft_handle, greatereq_pivot, key, keylen);
if (compare < 0) if (compare < 0)
result = EINVAL; result = EINVAL;
} }
@ -243,7 +243,7 @@ int verify_marked_messages(const int32_t &offset, const uint32_t UU(idx), struct
template<typename verify_omt_t> template<typename verify_omt_t>
static int static int
verify_sorted_by_key_msn(FT_HANDLE brt, FIFO fifo, const verify_omt_t &mt) { verify_sorted_by_key_msn(FT_HANDLE ft_handle, FIFO fifo, const verify_omt_t &mt) {
int result = 0; int result = 0;
size_t last_offset = 0; size_t last_offset = 0;
for (uint32_t i = 0; i < mt.size(); i++) { for (uint32_t i = 0; i < mt.size(); i++) {
@ -253,8 +253,8 @@ verify_sorted_by_key_msn(FT_HANDLE brt, FIFO fifo, const verify_omt_t &mt) {
if (i > 0) { if (i > 0) {
struct toku_fifo_entry_key_msn_cmp_extra extra; struct toku_fifo_entry_key_msn_cmp_extra extra;
ZERO_STRUCT(extra); ZERO_STRUCT(extra);
extra.desc = &brt->ft->cmp_descriptor; extra.desc = &ft_handle->ft->cmp_descriptor;
extra.cmp = brt->ft->compare_fun; extra.cmp = ft_handle->ft->compare_fun;
extra.fifo = fifo; extra.fifo = fifo;
if (toku_fifo_entry_key_msn_cmp(extra, last_offset, offset) >= 0) { if (toku_fifo_entry_key_msn_cmp(extra, last_offset, offset) >= 0) {
result = TOKUDB_NEEDS_REPAIR; result = TOKUDB_NEEDS_REPAIR;
@ -268,11 +268,11 @@ verify_sorted_by_key_msn(FT_HANDLE brt, FIFO fifo, const verify_omt_t &mt) {
template<typename count_omt_t> template<typename count_omt_t>
static int static int
count_eq_key_msn(FT_HANDLE brt, FIFO fifo, const count_omt_t &mt, const DBT *key, MSN msn) { count_eq_key_msn(FT_HANDLE ft_handle, FIFO fifo, const count_omt_t &mt, const DBT *key, MSN msn) {
struct toku_fifo_entry_key_msn_heaviside_extra extra; struct toku_fifo_entry_key_msn_heaviside_extra extra;
ZERO_STRUCT(extra); ZERO_STRUCT(extra);
extra.desc = &brt->ft->cmp_descriptor; extra.desc = &ft_handle->ft->cmp_descriptor;
extra.cmp = brt->ft->compare_fun; extra.cmp = ft_handle->ft->compare_fun;
extra.fifo = fifo; extra.fifo = fifo;
extra.key = key; extra.key = key;
extra.msn = msn; extra.msn = msn;
@ -290,15 +290,15 @@ count_eq_key_msn(FT_HANDLE brt, FIFO fifo, const count_omt_t &mt, const DBT *key
void void
toku_get_node_for_verify( toku_get_node_for_verify(
BLOCKNUM blocknum, BLOCKNUM blocknum,
FT_HANDLE brt, FT_HANDLE ft_handle,
FTNODE* nodep FTNODE* nodep
) )
{ {
uint32_t fullhash = toku_cachetable_hash(brt->ft->cf, blocknum); uint32_t fullhash = toku_cachetable_hash(ft_handle->ft->cf, blocknum);
struct ftnode_fetch_extra bfe; struct ftnode_fetch_extra bfe;
fill_bfe_for_full_read(&bfe, brt->ft); fill_bfe_for_full_read(&bfe, ft_handle->ft);
toku_pin_ftnode( toku_pin_ftnode(
brt->ft, ft_handle->ft,
blocknum, blocknum,
fullhash, fullhash,
&bfe, &bfe,
@ -309,7 +309,7 @@ toku_get_node_for_verify(
} }
static int static int
toku_verify_ftnode_internal(FT_HANDLE brt, toku_verify_ftnode_internal(FT_HANDLE ft_handle,
MSN rootmsn, MSN parentmsn, bool messages_exist_above, MSN rootmsn, MSN parentmsn, bool messages_exist_above,
FTNODE node, int height, FTNODE node, int height,
const DBT *lesser_pivot, // Everything in the subtree should be > lesser_pivot. (lesser_pivot==NULL if there is no lesser pivot.) const DBT *lesser_pivot, // Everything in the subtree should be > lesser_pivot. (lesser_pivot==NULL if there is no lesser pivot.)
@ -332,17 +332,17 @@ toku_verify_ftnode_internal(FT_HANDLE brt,
} }
// Verify that all the pivot keys are in order. // Verify that all the pivot keys are in order.
for (int i = 0; i < node->n_children-2; i++) { for (int i = 0; i < node->n_children-2; i++) {
int compare = compare_pairs(brt, &node->childkeys[i], &node->childkeys[i+1]); int compare = compare_pairs(ft_handle, &node->childkeys[i], &node->childkeys[i+1]);
VERIFY_ASSERTION(compare < 0, i, "Value is >= the next value"); VERIFY_ASSERTION(compare < 0, i, "Value is >= the next value");
} }
// Verify that all the pivot keys are lesser_pivot < pivot <= greatereq_pivot // Verify that all the pivot keys are lesser_pivot < pivot <= greatereq_pivot
for (int i = 0; i < node->n_children-1; i++) { for (int i = 0; i < node->n_children-1; i++) {
if (lesser_pivot) { if (lesser_pivot) {
int compare = compare_pairs(brt, lesser_pivot, &node->childkeys[i]); int compare = compare_pairs(ft_handle, lesser_pivot, &node->childkeys[i]);
VERIFY_ASSERTION(compare < 0, i, "Pivot is >= the lower-bound pivot"); VERIFY_ASSERTION(compare < 0, i, "Pivot is >= the lower-bound pivot");
} }
if (greatereq_pivot) { if (greatereq_pivot) {
int compare = compare_pairs(brt, greatereq_pivot, &node->childkeys[i]); int compare = compare_pairs(ft_handle, greatereq_pivot, &node->childkeys[i]);
VERIFY_ASSERTION(compare >= 0, i, "Pivot is < the upper-bound pivot"); VERIFY_ASSERTION(compare >= 0, i, "Pivot is < the upper-bound pivot");
} }
} }
@ -354,12 +354,12 @@ toku_verify_ftnode_internal(FT_HANDLE brt,
MSN last_msn = ZERO_MSN; MSN last_msn = ZERO_MSN;
// Verify that messages in the buffers are in the right place. // Verify that messages in the buffers are in the right place.
NONLEAF_CHILDINFO bnc = BNC(node, i); NONLEAF_CHILDINFO bnc = BNC(node, i);
VERIFY_ASSERTION(verify_sorted_by_key_msn(brt, bnc->buffer, bnc->fresh_message_tree) == 0, i, "fresh_message_tree"); VERIFY_ASSERTION(verify_sorted_by_key_msn(ft_handle, bnc->buffer, bnc->fresh_message_tree) == 0, i, "fresh_message_tree");
VERIFY_ASSERTION(verify_sorted_by_key_msn(brt, bnc->buffer, bnc->stale_message_tree) == 0, i, "stale_message_tree"); VERIFY_ASSERTION(verify_sorted_by_key_msn(ft_handle, bnc->buffer, bnc->stale_message_tree) == 0, i, "stale_message_tree");
FIFO_ITERATE(bnc->buffer, key, keylen, data, datalen, itype, msn, xid, is_fresh, FIFO_ITERATE(bnc->buffer, key, keylen, data, datalen, itype, msn, xid, is_fresh,
({ ({
enum ft_msg_type type = (enum ft_msg_type) itype; enum ft_msg_type type = (enum ft_msg_type) itype;
int r = verify_msg_in_child_buffer(brt, type, msn, key, keylen, data, datalen, xid, int r = verify_msg_in_child_buffer(ft_handle, type, msn, key, keylen, data, datalen, xid,
curr_less_pivot, curr_less_pivot,
curr_geq_pivot); curr_geq_pivot);
VERIFY_ASSERTION(r==0, i, "A message in the buffer is out of place"); VERIFY_ASSERTION(r==0, i, "A message in the buffer is out of place");
@ -370,7 +370,7 @@ toku_verify_ftnode_internal(FT_HANDLE brt,
DBT keydbt; DBT keydbt;
toku_fill_dbt(&keydbt, key, keylen); toku_fill_dbt(&keydbt, key, keylen);
int total_count = 0; int total_count = 0;
count = count_eq_key_msn(brt, bnc->buffer, bnc->fresh_message_tree, toku_fill_dbt(&keydbt, key, keylen), msn); count = count_eq_key_msn(ft_handle, bnc->buffer, bnc->fresh_message_tree, toku_fill_dbt(&keydbt, key, keylen), msn);
total_count += count; total_count += count;
if (is_fresh) { if (is_fresh) {
VERIFY_ASSERTION(count == 1, i, "a fresh message was not found in the fresh message tree"); VERIFY_ASSERTION(count == 1, i, "a fresh message was not found in the fresh message tree");
@ -378,7 +378,7 @@ toku_verify_ftnode_internal(FT_HANDLE brt,
VERIFY_ASSERTION(count == 0, i, "a stale message was found in the fresh message tree"); VERIFY_ASSERTION(count == 0, i, "a stale message was found in the fresh message tree");
} }
VERIFY_ASSERTION(count <= 1, i, "a message was found multiple times in the fresh message tree"); VERIFY_ASSERTION(count <= 1, i, "a message was found multiple times in the fresh message tree");
count = count_eq_key_msn(brt, bnc->buffer, bnc->stale_message_tree, &keydbt, msn); count = count_eq_key_msn(ft_handle, bnc->buffer, bnc->stale_message_tree, &keydbt, msn);
total_count += count; total_count += count;
if (is_fresh) { if (is_fresh) {
@ -426,16 +426,16 @@ toku_verify_ftnode_internal(FT_HANDLE brt,
VERIFY_ASSERTION((rootmsn.msn >= this_msn.msn), 0, "leaf may have latest msn, but cannot be greater than root msn"); VERIFY_ASSERTION((rootmsn.msn >= this_msn.msn), 0, "leaf may have latest msn, but cannot be greater than root msn");
DBT kdbt = get_ith_key_dbt(bn, j); DBT kdbt = get_ith_key_dbt(bn, j);
if (curr_less_pivot) { if (curr_less_pivot) {
int compare = compare_pairs(brt, curr_less_pivot, &kdbt); int compare = compare_pairs(ft_handle, curr_less_pivot, &kdbt);
VERIFY_ASSERTION(compare < 0, j, "The leafentry is >= the lower-bound pivot"); VERIFY_ASSERTION(compare < 0, j, "The leafentry is >= the lower-bound pivot");
} }
if (curr_geq_pivot) { if (curr_geq_pivot) {
int compare = compare_pairs(brt, curr_geq_pivot, &kdbt); int compare = compare_pairs(ft_handle, curr_geq_pivot, &kdbt);
VERIFY_ASSERTION(compare >= 0, j, "The leafentry is < the upper-bound pivot"); VERIFY_ASSERTION(compare >= 0, j, "The leafentry is < the upper-bound pivot");
} }
if (0 < j) { if (0 < j) {
DBT prev_key_dbt = get_ith_key_dbt(bn, j-1); DBT prev_key_dbt = get_ith_key_dbt(bn, j-1);
int compare = compare_pairs(brt, &prev_key_dbt, &kdbt); int compare = compare_pairs(ft_handle, &prev_key_dbt, &kdbt);
VERIFY_ASSERTION(compare < 0, j, "Adjacent leafentries are out of order"); VERIFY_ASSERTION(compare < 0, j, "Adjacent leafentries are out of order");
} }
} }
@ -449,7 +449,7 @@ done:
// input is a pinned node, on exit, node is unpinned // input is a pinned node, on exit, node is unpinned
int int
toku_verify_ftnode (FT_HANDLE brt, toku_verify_ftnode (FT_HANDLE ft_handle,
MSN rootmsn, MSN parentmsn, bool messages_exist_above, MSN rootmsn, MSN parentmsn, bool messages_exist_above,
FTNODE node, int height, FTNODE node, int height,
const DBT *lesser_pivot, // Everything in the subtree should be > lesser_pivot. (lesser_pivot==NULL if there is no lesser pivot.) const DBT *lesser_pivot, // Everything in the subtree should be > lesser_pivot. (lesser_pivot==NULL if there is no lesser pivot.)
@ -469,15 +469,15 @@ toku_verify_ftnode (FT_HANDLE brt,
// Otherwise we'll just do the next call // Otherwise we'll just do the next call
result = toku_verify_ftnode_internal( result = toku_verify_ftnode_internal(
brt, rootmsn, parentmsn, messages_exist_above, node, height, lesser_pivot, greatereq_pivot, ft_handle, rootmsn, parentmsn, messages_exist_above, node, height, lesser_pivot, greatereq_pivot,
verbose, keep_going_on_failure, false); verbose, keep_going_on_failure, false);
if (result != 0 && (!keep_going_on_failure || result != TOKUDB_NEEDS_REPAIR)) goto done; if (result != 0 && (!keep_going_on_failure || result != TOKUDB_NEEDS_REPAIR)) goto done;
} }
if (node->height > 0) { if (node->height > 0) {
toku_move_ftnode_messages_to_stale(brt->ft, node); toku_move_ftnode_messages_to_stale(ft_handle->ft, node);
} }
result2 = toku_verify_ftnode_internal( result2 = toku_verify_ftnode_internal(
brt, rootmsn, parentmsn, messages_exist_above, node, height, lesser_pivot, greatereq_pivot, ft_handle, rootmsn, parentmsn, messages_exist_above, node, height, lesser_pivot, greatereq_pivot,
verbose, keep_going_on_failure, true); verbose, keep_going_on_failure, true);
if (result == 0) { if (result == 0) {
result = result2; result = result2;
@ -488,8 +488,8 @@ toku_verify_ftnode (FT_HANDLE brt,
if (recurse && node->height > 0) { if (recurse && node->height > 0) {
for (int i = 0; i < node->n_children; i++) { for (int i = 0; i < node->n_children; i++) {
FTNODE child_node; FTNODE child_node;
toku_get_node_for_verify(BP_BLOCKNUM(node, i), brt, &child_node); toku_get_node_for_verify(BP_BLOCKNUM(node, i), ft_handle, &child_node);
int r = toku_verify_ftnode(brt, rootmsn, this_msn, messages_exist_above || toku_bnc_n_entries(BNC(node, i)) > 0, int r = toku_verify_ftnode(ft_handle, rootmsn, this_msn, messages_exist_above || toku_bnc_n_entries(BNC(node, i)) > 0,
child_node, node->height-1, child_node, node->height-1,
(i==0) ? lesser_pivot : &node->childkeys[i-1], (i==0) ? lesser_pivot : &node->childkeys[i-1],
(i==node->n_children-1) ? greatereq_pivot : &node->childkeys[i], (i==node->n_children-1) ? greatereq_pivot : &node->childkeys[i],
@ -502,7 +502,7 @@ toku_verify_ftnode (FT_HANDLE brt,
} }
} }
done: done:
toku_unpin_ftnode(brt->ft, node); toku_unpin_ftnode(ft_handle->ft, node);
if (result == 0 && progress_callback) if (result == 0 && progress_callback)
result = progress_callback(progress_extra, 0.0); result = progress_callback(progress_extra, 0.0);
@ -511,26 +511,26 @@ done:
} }
int int
toku_verify_ft_with_progress (FT_HANDLE brt, int (*progress_callback)(void *extra, float progress), void *progress_extra, int verbose, int keep_on_going) { toku_verify_ft_with_progress (FT_HANDLE ft_handle, int (*progress_callback)(void *extra, float progress), void *progress_extra, int verbose, int keep_on_going) {
assert(brt->ft); assert(ft_handle->ft);
FTNODE root_node = NULL; FTNODE root_node = NULL;
{ {
uint32_t root_hash; uint32_t root_hash;
CACHEKEY root_key; CACHEKEY root_key;
toku_calculate_root_offset_pointer(brt->ft, &root_key, &root_hash); toku_calculate_root_offset_pointer(ft_handle->ft, &root_key, &root_hash);
toku_get_node_for_verify(root_key, brt, &root_node); toku_get_node_for_verify(root_key, ft_handle, &root_node);
} }
int r = toku_verify_ftnode(brt, brt->ft->h->max_msn_in_ft, brt->ft->h->max_msn_in_ft, false, root_node, -1, NULL, NULL, progress_callback, progress_extra, 1, verbose, keep_on_going); int r = toku_verify_ftnode(ft_handle, ft_handle->ft->h->max_msn_in_ft, ft_handle->ft->h->max_msn_in_ft, false, root_node, -1, NULL, NULL, progress_callback, progress_extra, 1, verbose, keep_on_going);
if (r == 0) { if (r == 0) {
toku_ft_lock(brt->ft); toku_ft_lock(ft_handle->ft);
brt->ft->h->time_of_last_verification = time(NULL); ft_handle->ft->h->time_of_last_verification = time(NULL);
brt->ft->h->dirty = 1; ft_handle->ft->h->dirty = 1;
toku_ft_unlock(brt->ft); toku_ft_unlock(ft_handle->ft);
} }
return r; return r;
} }
int int
toku_verify_ft (FT_HANDLE brt) { toku_verify_ft (FT_HANDLE ft_handle) {
return toku_verify_ft_with_progress(brt, NULL, NULL, 0, 0); return toku_verify_ft_with_progress(ft_handle, NULL, NULL, 0, 0);
} }

View file

@ -465,8 +465,8 @@ void toku_ft_create(FT *ftp, FT_OPTIONS options, CACHEFILE cf, TOKUTXN txn) {
*ftp = ft; *ftp = ft;
} }
// TODO: (Zardosht) get rid of brt parameter // TODO: (Zardosht) get rid of ft parameter
int toku_read_ft_and_store_in_cachefile (FT_HANDLE brt, CACHEFILE cf, LSN max_acceptable_lsn, FT *header) int toku_read_ft_and_store_in_cachefile (FT_HANDLE ft_handle, CACHEFILE cf, LSN max_acceptable_lsn, FT *header)
// If the cachefile already has the header, then just get it. // If the cachefile already has the header, then just get it.
// If the cachefile has not been initialized, then don't modify anything. // If the cachefile has not been initialized, then don't modify anything.
// max_acceptable_lsn is the latest acceptable checkpointed version of the file. // max_acceptable_lsn is the latest acceptable checkpointed version of the file.
@ -475,8 +475,8 @@ int toku_read_ft_and_store_in_cachefile (FT_HANDLE brt, CACHEFILE cf, LSN max_ac
FT h; FT h;
if ((h = (FT) toku_cachefile_get_userdata(cf))!=0) { if ((h = (FT) toku_cachefile_get_userdata(cf))!=0) {
*header = h; *header = h;
assert(brt->options.update_fun == h->update_fun); assert(ft_handle->options.update_fun == h->update_fun);
assert(brt->options.compare_fun == h->compare_fun); assert(ft_handle->options.compare_fun == h->compare_fun);
return 0; return 0;
} }
} }
@ -494,8 +494,8 @@ int toku_read_ft_and_store_in_cachefile (FT_HANDLE brt, CACHEFILE cf, LSN max_ac
// GCC 4.8 seems to get confused by the gotos in the deserialize code and think h is maybe uninitialized. // GCC 4.8 seems to get confused by the gotos in the deserialize code and think h is maybe uninitialized.
invariant_notnull(h); invariant_notnull(h);
h->cf = cf; h->cf = cf;
h->compare_fun = brt->options.compare_fun; h->compare_fun = ft_handle->options.compare_fun;
h->update_fun = brt->options.update_fun; h->update_fun = ft_handle->options.update_fun;
toku_cachefile_set_userdata(cf, toku_cachefile_set_userdata(cf,
(void*)h, (void*)h,
ft_log_fassociate_during_checkpoint, ft_log_fassociate_during_checkpoint,
@ -557,13 +557,13 @@ FT_HANDLE toku_ft_get_only_existing_ft_handle(FT h) {
return ft_handle_ret; return ft_handle_ret;
} }
// Purpose: set fields in brt_header to capture accountability info for start of HOT optimize. // Purpose: set fields in ft_header to capture accountability info for start of HOT optimize.
// Note: HOT accountability variables in header are modified only while holding header lock. // Note: HOT accountability variables in header are modified only while holding header lock.
// (Header lock is really needed for touching the dirty bit, but it's useful and // (Header lock is really needed for touching the dirty bit, but it's useful and
// convenient here for keeping the HOT variables threadsafe.) // convenient here for keeping the HOT variables threadsafe.)
void void
toku_ft_note_hot_begin(FT_HANDLE brt) { toku_ft_note_hot_begin(FT_HANDLE ft_handle) {
FT ft = brt->ft; FT ft = ft_handle->ft;
time_t now = time(NULL); time_t now = time(NULL);
// hold lock around setting and clearing of dirty bit // hold lock around setting and clearing of dirty bit
@ -576,11 +576,11 @@ toku_ft_note_hot_begin(FT_HANDLE brt) {
} }
// Purpose: set fields in brt_header to capture accountability info for end of HOT optimize. // Purpose: set fields in ft_header to capture accountability info for end of HOT optimize.
// Note: See note for toku_ft_note_hot_begin(). // Note: See note for toku_ft_note_hot_begin().
void void
toku_ft_note_hot_complete(FT_HANDLE brt, bool success, MSN msn_at_start_of_hot) { toku_ft_note_hot_complete(FT_HANDLE ft_handle, bool success, MSN msn_at_start_of_hot) {
FT ft = brt->ft; FT ft = ft_handle->ft;
time_t now = time(NULL); time_t now = time(NULL);
toku_ft_lock(ft); toku_ft_lock(ft);
@ -626,7 +626,7 @@ toku_ft_init(FT ft,
ft->h->checkpoint_lsn = checkpoint_lsn; ft->h->checkpoint_lsn = checkpoint_lsn;
} }
// Open a brt for use by redirect. The new brt must have the same dict_id as the old_ft passed in. (FILENUM is assigned by the ft_handle_open() function.) // Open an ft for use by redirect. The new ft must have the same dict_id as the old_ft passed in. (FILENUM is assigned by the ft_handle_open() function.)
static int static int
ft_handle_open_for_redirect(FT_HANDLE *new_ftp, const char *fname_in_env, TOKUTXN txn, FT old_h) { ft_handle_open_for_redirect(FT_HANDLE *new_ftp, const char *fname_in_env, TOKUTXN txn, FT old_h) {
FT_HANDLE t; FT_HANDLE t;
@ -664,9 +664,9 @@ dictionary_redirect_internal(const char *dst_fname_in_env, FT src_h, TOKUTXN txn
FT dst_h = NULL; FT dst_h = NULL;
struct toku_list *list; struct toku_list *list;
// open a dummy brt based off of // open a dummy ft based off of
// dst_fname_in_env to get the header // dst_fname_in_env to get the header
// then we will change all the brt's to have // then we will change all the ft's to have
// their headers point to dst_h instead of src_h // their headers point to dst_h instead of src_h
FT_HANDLE tmp_dst_ft = NULL; FT_HANDLE tmp_dst_ft = NULL;
r = ft_handle_open_for_redirect(&tmp_dst_ft, dst_fname_in_env, txn, src_h); r = ft_handle_open_for_redirect(&tmp_dst_ft, dst_fname_in_env, txn, src_h);
@ -680,7 +680,7 @@ dictionary_redirect_internal(const char *dst_fname_in_env, FT src_h, TOKUTXN txn
assert(dst_filenum.fileid!=FILENUM_NONE.fileid); assert(dst_filenum.fileid!=FILENUM_NONE.fileid);
assert(dst_filenum.fileid!=src_filenum.fileid); //Cannot be same file. assert(dst_filenum.fileid!=src_filenum.fileid); //Cannot be same file.
// for each live brt, brt->ft is currently src_h // for each live ft_handle, ft_handle->ft is currently src_h
// we want to change it to dummy_dst // we want to change it to dummy_dst
toku_ft_grab_reflock(src_h); toku_ft_grab_reflock(src_h);
while (!toku_list_empty(&src_h->live_ft_handles)) { while (!toku_list_empty(&src_h->live_ft_handles)) {
@ -720,7 +720,7 @@ toku_dictionary_redirect_abort(FT old_h, FT new_h, TOKUTXN txn) {
FILENUM new_filenum = toku_cachefile_filenum(new_h->cf); FILENUM new_filenum = toku_cachefile_filenum(new_h->cf);
assert(old_filenum.fileid!=new_filenum.fileid); //Cannot be same file. assert(old_filenum.fileid!=new_filenum.fileid); //Cannot be same file.
//No living brts in old header. //No living fts in old header.
toku_ft_grab_reflock(old_h); toku_ft_grab_reflock(old_h);
assert(toku_list_empty(&old_h->live_ft_handles)); assert(toku_list_empty(&old_h->live_ft_handles));
toku_ft_release_reflock(old_h); toku_ft_release_reflock(old_h);
@ -738,13 +738,13 @@ toku_dictionary_redirect_abort(FT old_h, FT new_h, TOKUTXN txn) {
/**** /****
* on redirect or abort: * on redirect or abort:
* if redirect txn_note_doing_work(txn) * if redirect txn_note_doing_work(txn)
* if redirect connect src brt to txn (txn modified this brt) * if redirect connect src ft to txn (txn modified this ft)
* for each src brt * for each src ft
* open brt to dst file (create new brt struct) * open ft to dst file (create new ft struct)
* if redirect connect dst brt to txn * if redirect connect dst ft to txn
* redirect db to new brt * redirect db to new ft
* redirect cursors to new brt * redirect cursors to new ft
* close all src brts * close all src fts
* if redirect make rollback log entry * if redirect make rollback log entry
* *
* on commit: * on commit:
@ -756,21 +756,21 @@ int
toku_dictionary_redirect (const char *dst_fname_in_env, FT_HANDLE old_ft_h, TOKUTXN txn) { toku_dictionary_redirect (const char *dst_fname_in_env, FT_HANDLE old_ft_h, TOKUTXN txn) {
// Input args: // Input args:
// new file name for dictionary (relative to env) // new file name for dictionary (relative to env)
// old_ft_h is a live brt of open handle ({DB, BRT} pair) that currently refers to old dictionary file. // old_ft_h is a live ft of open handle ({DB, FT_HANDLE} pair) that currently refers to old dictionary file.
// (old_ft_h may be one of many handles to the dictionary.) // (old_ft_h may be one of many handles to the dictionary.)
// txn that created the loader // txn that created the loader
// Requires: // Requires:
// multi operation lock is held. // multi operation lock is held.
// The brt is open. (which implies there can be no zombies.) // The ft is open. (which implies there can be no zombies.)
// The new file must be a valid dictionary. // The new file must be a valid dictionary.
// The block size and flags in the new file must match the existing BRT. // The block size and flags in the new file must match the existing FT.
// The new file must already have its descriptor in it (and it must match the existing descriptor). // The new file must already have its descriptor in it (and it must match the existing descriptor).
// Effect: // Effect:
// Open new FTs (and related header and cachefile) to the new dictionary file with a new FILENUM. // Open new FTs (and related header and cachefile) to the new dictionary file with a new FILENUM.
// Redirect all DBs that point to brts that point to the old file to point to brts that point to the new file. // Redirect all DBs that point to fts that point to the old file to point to fts that point to the new file.
// Copy the dictionary id (dict_id) from the header of the original file to the header of the new file. // Copy the dictionary id (dict_id) from the header of the original file to the header of the new file.
// Create a rollback log entry. // Create a rollback log entry.
// The original BRT, header, cachefile and file remain unchanged. They will be cleaned up on commmit. // The original FT, header, cachefile and file remain unchanged. They will be cleaned up on commmit.
// If the txn aborts, then this operation will be undone // If the txn aborts, then this operation will be undone
int r; int r;

View file

@ -113,7 +113,7 @@ void toku_ft_release_reflock(FT ft);
void toku_ft_create(FT *ftp, FT_OPTIONS options, CACHEFILE cf, TOKUTXN txn); void toku_ft_create(FT *ftp, FT_OPTIONS options, CACHEFILE cf, TOKUTXN txn);
void toku_ft_free (FT h); void toku_ft_free (FT h);
int toku_read_ft_and_store_in_cachefile (FT_HANDLE brt, CACHEFILE cf, LSN max_acceptable_lsn, FT *header); int toku_read_ft_and_store_in_cachefile (FT_HANDLE ft_h, CACHEFILE cf, LSN max_acceptable_lsn, FT *header);
void toku_ft_note_ft_handle_open(FT ft, FT_HANDLE live); void toku_ft_note_ft_handle_open(FT ft, FT_HANDLE live);
bool toku_ft_needed_unlocked(FT ft); bool toku_ft_needed_unlocked(FT ft);
@ -125,8 +125,8 @@ void toku_ft_evict_from_memory(FT ft, bool oplsn_valid, LSN oplsn);
FT_HANDLE toku_ft_get_only_existing_ft_handle(FT h); FT_HANDLE toku_ft_get_only_existing_ft_handle(FT h);
void toku_ft_note_hot_begin(FT_HANDLE brt); void toku_ft_note_hot_begin(FT_HANDLE ft_h);
void toku_ft_note_hot_complete(FT_HANDLE brt, bool success, MSN msn_at_start_of_hot); void toku_ft_note_hot_complete(FT_HANDLE ft_h, bool success, MSN msn_at_start_of_hot);
void void
toku_ft_init( toku_ft_init(

View file

@ -99,7 +99,7 @@ enum ft_layout_version_e {
FT_LAYOUT_VERSION_7 = 7, // Diff from 6 to 7: Add exact-bit to leafentry_estimate #818, add magic to header #22, add per-subdatase flags #333 FT_LAYOUT_VERSION_7 = 7, // Diff from 6 to 7: Add exact-bit to leafentry_estimate #818, add magic to header #22, add per-subdatase flags #333
FT_LAYOUT_VERSION_8 = 8, // Diff from 7 to 8: Use murmur instead of crc32. We are going to make a simplification and stop supporting version 7 and before. Current As of Beta 1.0.6 FT_LAYOUT_VERSION_8 = 8, // Diff from 7 to 8: Use murmur instead of crc32. We are going to make a simplification and stop supporting version 7 and before. Current As of Beta 1.0.6
FT_LAYOUT_VERSION_9 = 9, // Diff from 8 to 9: Variable-sized blocks and compression. FT_LAYOUT_VERSION_9 = 9, // Diff from 8 to 9: Variable-sized blocks and compression.
FT_LAYOUT_VERSION_10 = 10, // Diff from 9 to 10: Variable number of compressed sub-blocks per block, disk byte order == intel byte order, Subtree estimates instead of just leafentry estimates, translation table, dictionary descriptors, checksum in header, subdb support removed from brt layer FT_LAYOUT_VERSION_10 = 10, // Diff from 9 to 10: Variable number of compressed sub-blocks per block, disk byte order == intel byte order, Subtree estimates instead of just leafentry estimates, translation table, dictionary descriptors, checksum in header, subdb support removed from ft layer
FT_LAYOUT_VERSION_11 = 11, // Diff from 10 to 11: Nested transaction leafentries (completely redesigned). FT_CMDs on disk now support XIDS (multiple txnids) instead of exactly one. FT_LAYOUT_VERSION_11 = 11, // Diff from 10 to 11: Nested transaction leafentries (completely redesigned). FT_CMDs on disk now support XIDS (multiple txnids) instead of exactly one.
FT_LAYOUT_VERSION_12 = 12, // Diff from 11 to 12: Added FT_CMD 'FT_INSERT_NO_OVERWRITE', compressed block format, num old blocks FT_LAYOUT_VERSION_12 = 12, // Diff from 11 to 12: Added FT_CMD 'FT_INSERT_NO_OVERWRITE', compressed block format, num old blocks
FT_LAYOUT_VERSION_13 = 13, // Diff from 12 to 13: Fixed loader pivot bug, added build_id to every node, timestamps to ft FT_LAYOUT_VERSION_13 = 13, // Diff from 12 to 13: Fixed loader pivot bug, added build_id to every node, timestamps to ft
@ -107,8 +107,8 @@ enum ft_layout_version_e {
FT_LAYOUT_VERSION_15 = 15, // Diff from 14 to 15: basement nodes, last verification time FT_LAYOUT_VERSION_15 = 15, // Diff from 14 to 15: basement nodes, last verification time
FT_LAYOUT_VERSION_16 = 16, // Dr. No: No subtree estimates, partition layout information represented more transparently. FT_LAYOUT_VERSION_16 = 16, // Dr. No: No subtree estimates, partition layout information represented more transparently.
// ALERT ALERT ALERT: version 16 never released to customers, internal and beta use only // ALERT ALERT ALERT: version 16 never released to customers, internal and beta use only
FT_LAYOUT_VERSION_17 = 17, // Dr. No: Add STAT64INFO_S to brt_header FT_LAYOUT_VERSION_17 = 17, // Dr. No: Add STAT64INFO_S to ft header
FT_LAYOUT_VERSION_18 = 18, // Dr. No: Add HOT info to brt_header FT_LAYOUT_VERSION_18 = 18, // Dr. No: Add HOT info to ft header
FT_LAYOUT_VERSION_19 = 19, // Doofenshmirtz: Add compression method, highest_unused_msn_for_upgrade FT_LAYOUT_VERSION_19 = 19, // Doofenshmirtz: Add compression method, highest_unused_msn_for_upgrade
FT_LAYOUT_VERSION_20 = 20, // Deadshot: Add compression method to log_fcreate, FT_LAYOUT_VERSION_20 = 20, // Deadshot: Add compression method to log_fcreate,
// mgr_last_xid after begin checkpoint, // mgr_last_xid after begin checkpoint,

View file

@ -101,7 +101,7 @@ PATENT RIGHTS GRANT:
static FT_UPGRADE_STATUS_S ft_upgrade_status; static FT_UPGRADE_STATUS_S ft_upgrade_status;
#define STATUS_INIT(k,c,t,l,inc) TOKUDB_STATUS_INIT(ft_upgrade_status, k, c, t, "brt upgrade: " l, inc) #define STATUS_INIT(k,c,t,l,inc) TOKUDB_STATUS_INIT(ft_upgrade_status, k, c, t, "ft upgrade: " l, inc)
static void static void
status_init(void) status_init(void)
@ -1412,7 +1412,7 @@ setup_available_ftnode_partition(FTNODE node, int i) {
} }
} }
// Assign the child_to_read member of the bfe from the given brt node // Assign the child_to_read member of the bfe from the given ftnode
// that has been brought into memory. // that has been brought into memory.
static void static void
update_bfe_using_ftnode(FTNODE node, struct ftnode_fetch_extra *bfe) update_bfe_using_ftnode(FTNODE node, struct ftnode_fetch_extra *bfe)
@ -1447,7 +1447,7 @@ update_bfe_using_ftnode(FTNODE node, struct ftnode_fetch_extra *bfe)
} }
// Using the search parameters in the bfe, this function will // Using the search parameters in the bfe, this function will
// initialize all of the given brt node's partitions. // initialize all of the given ftnode's partitions.
static void static void
setup_partitions_using_bfe(FTNODE node, setup_partitions_using_bfe(FTNODE node,
struct ftnode_fetch_extra *bfe, struct ftnode_fetch_extra *bfe,
@ -1792,7 +1792,7 @@ cleanup:
// also creates MSN's for older messages created in older versions // also creates MSN's for older messages created in older versions
// that did not generate MSN's for messages. These new MSN's are // that did not generate MSN's for messages. These new MSN's are
// generated from the root downwards, counting backwards from MIN_MSN // generated from the root downwards, counting backwards from MIN_MSN
// and persisted in the brt header. // and persisted in the ft header.
static int static int
deserialize_and_upgrade_internal_node(FTNODE node, deserialize_and_upgrade_internal_node(FTNODE node,
struct rbuf *rb, struct rbuf *rb,
@ -2050,7 +2050,7 @@ deserialize_and_upgrade_leaf_node(FTNODE node,
setup_partitions_using_bfe(node, &temp_bfe, true); setup_partitions_using_bfe(node, &temp_bfe, true);
// 11. Deserialize the partition maps, though they are not used in the // 11. Deserialize the partition maps, though they are not used in the
// newer versions of brt nodes. // newer versions of ftnodes.
struct sub_block_map part_map[npartitions]; struct sub_block_map part_map[npartitions];
for (int i = 0; i < npartitions; ++i) { for (int i = 0; i < npartitions; ++i) {
sub_block_map_deserialize(&part_map[i], rb); sub_block_map_deserialize(&part_map[i], rb);
@ -2420,7 +2420,7 @@ cleanup:
// NOTE: Right now, callers higher in the stack will assert on // NOTE: Right now, callers higher in the stack will assert on
// failure, so this is OK for production. However, if we // failure, so this is OK for production. However, if we
// create tools that use this function to search for errors in // create tools that use this function to search for errors in
// the BRT, then we will leak memory. // the FT, then we will leak memory.
if (node) { if (node) {
toku_free(node); toku_free(node);
} }
@ -2579,7 +2579,7 @@ deserialize_ftnode_from_fd(int fd,
return r; return r;
} }
// Read brt node from file into struct. Perform version upgrade if necessary. // Read ftnode from file into struct. Perform version upgrade if necessary.
int int
toku_deserialize_ftnode_from (int fd, toku_deserialize_ftnode_from (int fd,
BLOCKNUM blocknum, BLOCKNUM blocknum,

View file

@ -256,7 +256,7 @@ struct ft_loader_s {
int progress_callback_result; // initially zero, if any call to the poll function callback returns nonzero, we save the result here (and don't call the poll callback function again). int progress_callback_result; // initially zero, if any call to the poll function callback returns nonzero, we save the result here (and don't call the poll callback function again).
LSN load_lsn; //LSN of the fsynced 'load' log entry. Write this LSN (as checkpoint_lsn) in brt headers made by this loader. LSN load_lsn; //LSN of the fsynced 'load' log entry. Write this LSN (as checkpoint_lsn) in ft headers made by this loader.
TXNID load_root_xid; //(Root) transaction that performed the load. TXNID load_root_xid; //(Root) transaction that performed the load.
QUEUE *fractal_queues; // an array of work queues, one for each secondary index. QUEUE *fractal_queues; // an array of work queues, one for each secondary index.
@ -279,7 +279,7 @@ uint64_t toku_ft_loader_get_n_rows(FTLOADER bl);
struct fractal_thread_args { struct fractal_thread_args {
FTLOADER bl; FTLOADER bl;
const DESCRIPTOR descriptor; const DESCRIPTOR descriptor;
int fd; // write the brt into tfd. int fd; // write the ft into fd.
int progress_allocation; int progress_allocation;
QUEUE q; QUEUE q;
uint64_t total_disksize_estimate; uint64_t total_disksize_estimate;
@ -311,17 +311,17 @@ int toku_merge_some_files_using_dbufio (const bool to_q, FIDX dest_data, QUEUE q
int ft_loader_sort_and_write_rows (struct rowset *rows, struct merge_fileset *fs, FTLOADER bl, int which_db, DB *dest_db, ft_compare_func); int ft_loader_sort_and_write_rows (struct rowset *rows, struct merge_fileset *fs, FTLOADER bl, int which_db, DB *dest_db, ft_compare_func);
// This is probably only for testing. // This is probably only for testing.
int toku_loader_write_brt_from_q_in_C (FTLOADER bl, int toku_loader_write_ft_from_q_in_C (FTLOADER bl,
const DESCRIPTOR descriptor, const DESCRIPTOR descriptor,
int fd, // write to here int fd, // write to here
int progress_allocation, int progress_allocation,
QUEUE q, QUEUE q,
uint64_t total_disksize_estimate, uint64_t total_disksize_estimate,
int which_db, int which_db,
uint32_t target_nodesize, uint32_t target_nodesize,
uint32_t target_basementnodesize, uint32_t target_basementnodesize,
enum toku_compression_method target_compression_method, enum toku_compression_method target_compression_method,
uint32_t fanout); uint32_t fanout);
int ft_loader_mergesort_row_array (struct row rows[/*n*/], int n, int which_db, DB *dest_db, ft_compare_func, FTLOADER, struct rowset *); int ft_loader_mergesort_row_array (struct row rows[/*n*/], int n, int which_db, DB *dest_db, ft_compare_func, FTLOADER, struct rowset *);
@ -338,7 +338,7 @@ int toku_ft_loader_internal_init (/* out */ FTLOADER *blp,
CACHETABLE cachetable, CACHETABLE cachetable,
generate_row_for_put_func g, generate_row_for_put_func g,
DB *src_db, DB *src_db,
int N, FT_HANDLE brts[/*N*/], DB* dbs[/*N*/], int N, FT_HANDLE ft_hs[/*N*/], DB* dbs[/*N*/],
const char *new_fnames_in_env[/*N*/], const char *new_fnames_in_env[/*N*/],
ft_compare_func bt_compare_functions[/*N*/], ft_compare_func bt_compare_functions[/*N*/],
const char *temp_file_template, const char *temp_file_template,

View file

@ -535,7 +535,7 @@ int toku_ft_loader_internal_init (/* out */ FTLOADER *blp,
CACHETABLE cachetable, CACHETABLE cachetable,
generate_row_for_put_func g, generate_row_for_put_func g,
DB *src_db, DB *src_db,
int N, FT_HANDLE brts[/*N*/], DB* dbs[/*N*/], int N, FT_HANDLE fts[/*N*/], DB* dbs[/*N*/],
const char *new_fnames_in_env[/*N*/], const char *new_fnames_in_env[/*N*/],
ft_compare_func bt_compare_functions[/*N*/], ft_compare_func bt_compare_functions[/*N*/],
const char *temp_file_template, const char *temp_file_template,
@ -581,11 +581,11 @@ int toku_ft_loader_internal_init (/* out */ FTLOADER *blp,
#define SET_TO_MY_STRDUP(lval, s) do { char *v = toku_strdup(s); if (!v) { int r = get_error_errno(); toku_ft_loader_internal_destroy(bl, true); return r; } lval = v; } while (0) #define SET_TO_MY_STRDUP(lval, s) do { char *v = toku_strdup(s); if (!v) { int r = get_error_errno(); toku_ft_loader_internal_destroy(bl, true); return r; } lval = v; } while (0)
MY_CALLOC_N(N, bl->root_xids_that_created); MY_CALLOC_N(N, bl->root_xids_that_created);
for (int i=0; i<N; i++) if (brts[i]) bl->root_xids_that_created[i]=brts[i]->ft->h->root_xid_that_created; for (int i=0; i<N; i++) if (fts[i]) bl->root_xids_that_created[i]=fts[i]->ft->h->root_xid_that_created;
MY_CALLOC_N(N, bl->dbs); MY_CALLOC_N(N, bl->dbs);
for (int i=0; i<N; i++) if (brts[i]) bl->dbs[i]=dbs[i]; for (int i=0; i<N; i++) if (fts[i]) bl->dbs[i]=dbs[i];
MY_CALLOC_N(N, bl->descriptors); MY_CALLOC_N(N, bl->descriptors);
for (int i=0; i<N; i++) if (brts[i]) bl->descriptors[i]=&brts[i]->ft->descriptor; for (int i=0; i<N; i++) if (fts[i]) bl->descriptors[i]=&fts[i]->ft->descriptor;
MY_CALLOC_N(N, bl->new_fnames_in_env); MY_CALLOC_N(N, bl->new_fnames_in_env);
for (int i=0; i<N; i++) SET_TO_MY_STRDUP(bl->new_fnames_in_env[i], new_fnames_in_env[i]); for (int i=0; i<N; i++) SET_TO_MY_STRDUP(bl->new_fnames_in_env[i], new_fnames_in_env[i]);
MY_CALLOC_N(N, bl->extracted_datasizes); // the calloc_n zeroed everything, which is what we want MY_CALLOC_N(N, bl->extracted_datasizes); // the calloc_n zeroed everything, which is what we want
@ -642,7 +642,7 @@ int toku_ft_loader_open (/* out */ FTLOADER *blp,
CACHETABLE cachetable, CACHETABLE cachetable,
generate_row_for_put_func g, generate_row_for_put_func g,
DB *src_db, DB *src_db,
int N, FT_HANDLE brts[/*N*/], DB* dbs[/*N*/], int N, FT_HANDLE fts[/*N*/], DB* dbs[/*N*/],
const char *new_fnames_in_env[/*N*/], const char *new_fnames_in_env[/*N*/],
ft_compare_func bt_compare_functions[/*N*/], ft_compare_func bt_compare_functions[/*N*/],
const char *temp_file_template, const char *temp_file_template,
@ -651,9 +651,9 @@ int toku_ft_loader_open (/* out */ FTLOADER *blp,
bool reserve_memory, bool reserve_memory,
uint64_t reserve_memory_size, uint64_t reserve_memory_size,
bool compress_intermediates) bool compress_intermediates)
/* Effect: called by DB_ENV->create_loader to create a brt loader. /* Effect: called by DB_ENV->create_loader to create an ft loader.
* Arguments: * Arguments:
* blp Return the brt loader here. * blp Return the ft loader here.
* g The function for generating a row * g The function for generating a row
* src_db The source database. Needed by g. May be NULL if that's ok with g. * src_db The source database. Needed by g. May be NULL if that's ok with g.
* N The number of dbs to create. * N The number of dbs to create.
@ -666,15 +666,15 @@ int toku_ft_loader_open (/* out */ FTLOADER *blp,
int result = 0; int result = 0;
{ {
int r = toku_ft_loader_internal_init(blp, cachetable, g, src_db, int r = toku_ft_loader_internal_init(blp, cachetable, g, src_db,
N, brts, dbs, N, fts, dbs,
new_fnames_in_env, new_fnames_in_env,
bt_compare_functions, bt_compare_functions,
temp_file_template, temp_file_template,
load_lsn, load_lsn,
txn, txn,
reserve_memory, reserve_memory,
reserve_memory_size, reserve_memory_size,
compress_intermediates); compress_intermediates);
if (r!=0) result = r; if (r!=0) result = r;
} }
if (result==0) { if (result==0) {
@ -1370,7 +1370,7 @@ static int process_primary_rows (FTLOADER bl, struct rowset *primary_rowset) {
} }
int toku_ft_loader_put (FTLOADER bl, DBT *key, DBT *val) int toku_ft_loader_put (FTLOADER bl, DBT *key, DBT *val)
/* Effect: Put a key-value pair into the brt loader. Called by DB_LOADER->put(). /* Effect: Put a key-value pair into the ft loader. Called by DB_LOADER->put().
* Return value: 0 on success, an error number otherwise. * Return value: 0 on success, an error number otherwise.
*/ */
{ {
@ -2672,17 +2672,17 @@ static int toku_loader_write_ft_from_q (FTLOADER bl,
return result; return result;
} }
int toku_loader_write_brt_from_q_in_C (FTLOADER bl, int toku_loader_write_ft_from_q_in_C (FTLOADER bl,
const DESCRIPTOR descriptor, const DESCRIPTOR descriptor,
int fd, // write to here int fd, // write to here
int progress_allocation, int progress_allocation,
QUEUE q, QUEUE q,
uint64_t total_disksize_estimate, uint64_t total_disksize_estimate,
int which_db, int which_db,
uint32_t target_nodesize, uint32_t target_nodesize,
uint32_t target_basementnodesize, uint32_t target_basementnodesize,
enum toku_compression_method target_compression_method, enum toku_compression_method target_compression_method,
uint32_t target_fanout) uint32_t target_fanout)
// This is probably only for testing. // This is probably only for testing.
{ {
target_nodesize = target_nodesize == 0 ? default_loader_nodesize : target_nodesize; target_nodesize = target_nodesize == 0 ? default_loader_nodesize : target_nodesize;

View file

@ -105,7 +105,7 @@ int toku_ft_loader_open (FTLOADER *bl,
generate_row_for_put_func g, generate_row_for_put_func g,
DB *src_db, DB *src_db,
int N, int N,
FT_HANDLE brts[/*N*/], DB* dbs[/*N*/], FT_HANDLE ft_hs[/*N*/], DB* dbs[/*N*/],
const char * new_fnames_in_env[/*N*/], const char * new_fnames_in_env[/*N*/],
ft_compare_func bt_compare_functions[/*N*/], ft_compare_func bt_compare_functions[/*N*/],
const char *temp_file_template, const char *temp_file_template,

View file

@ -198,7 +198,7 @@ typedef struct {
static const STAT64INFO_S ZEROSTATS = {0,0}; static const STAT64INFO_S ZEROSTATS = {0,0};
/* At the brt layer, a FILENUM uniquely identifies an open file. /* At the ft layer, a FILENUM uniquely identifies an open file.
* At the ydb layer, a DICTIONARY_ID uniquely identifies an open dictionary. * At the ydb layer, a DICTIONARY_ID uniquely identifies an open dictionary.
* With the introduction of the loader (ticket 2216), it is possible for the file that holds * With the introduction of the loader (ticket 2216), it is possible for the file that holds
* an open dictionary to change, so these are now separate and independent unique identifiers. * an open dictionary to change, so these are now separate and independent unique identifiers.
@ -329,8 +329,8 @@ struct ft_msg {
} id; } id;
} u; } u;
}; };
// Message sent into brt to implement command (insert, delete, etc.)
// This structure supports nested transactions, and obsoletes ft_msg. // Message sent into the ft to implement insert, delete, update, etc
typedef struct ft_msg FT_MSG_S; typedef struct ft_msg FT_MSG_S;
typedef struct ft_msg *FT_MSG; typedef struct ft_msg *FT_MSG;

View file

@ -104,10 +104,10 @@ PATENT RIGHTS GRANT:
typedef struct le_cursor *LE_CURSOR; typedef struct le_cursor *LE_CURSOR;
// Create a leaf cursor for a tree (brt) within a transaction (txn) // Create a leaf cursor for a tree (ft_h) within a transaction (txn)
// Success: returns 0, stores the LE_CURSOR in the le_cursor_result // Success: returns 0, stores the LE_CURSOR in the le_cursor_result
// Failure: returns a non-zero error number // Failure: returns a non-zero error number
int toku_le_cursor_create(LE_CURSOR *le_cursor_result, FT_HANDLE brt, TOKUTXN txn); int toku_le_cursor_create(LE_CURSOR *le_cursor_result, FT_HANDLE ft_h, TOKUTXN txn);
// Close and free the LE_CURSOR // Close and free the LE_CURSOR
void toku_le_cursor_close(LE_CURSOR le_cursor); void toku_le_cursor_close(LE_CURSOR le_cursor);

View file

@ -182,7 +182,7 @@ int toku_logger_create (TOKULOGGER *resultp) {
result->last_completed_checkpoint_lsn = ZERO_LSN; result->last_completed_checkpoint_lsn = ZERO_LSN;
// next_log_file_number is uninitialized // next_log_file_number is uninitialized
// n_in_file is uninitialized // n_in_file is uninitialized
result->write_block_size = FT_DEFAULT_NODE_SIZE; // default logging size is the same as the default brt block size result->write_block_size = FT_DEFAULT_NODE_SIZE; // default logging size is the same as the default ft block size
toku_logfilemgr_create(&result->logfilemgr); toku_logfilemgr_create(&result->logfilemgr);
*resultp=result; *resultp=result;
ml_init(&result->input_lock); ml_init(&result->input_lock);
@ -280,7 +280,7 @@ toku_logger_open_rollback(TOKULOGGER logger, CACHETABLE cachetable, bool create)
assert(logger->is_open); assert(logger->is_open);
assert(!logger->rollback_cachefile); assert(!logger->rollback_cachefile);
FT_HANDLE t = NULL; // Note, there is no DB associated with this BRT. FT_HANDLE t = NULL; // Note, there is no DB associated with this FT.
toku_ft_handle_create(&t); toku_ft_handle_create(&t);
int r = toku_ft_handle_open(t, toku_product_name_strings.rollback_cachefile, create, create, cachetable, NULL_TXN); int r = toku_ft_handle_open(t, toku_product_name_strings.rollback_cachefile, create, create, cachetable, NULL_TXN);
if (r == 0) { if (r == 0) {
@ -308,7 +308,7 @@ void toku_logger_close_rollback(TOKULOGGER logger) {
CACHEFILE cf = logger->rollback_cachefile; // stored in logger at rollback cachefile open CACHEFILE cf = logger->rollback_cachefile; // stored in logger at rollback cachefile open
if (cf) { if (cf) {
FT_HANDLE ft_to_close; FT_HANDLE ft_to_close;
{ //Find "brt" { //Find "ft_to_close"
logger->rollback_cache.destroy(); logger->rollback_cache.destroy();
FT CAST_FROM_VOIDP(ft, toku_cachefile_get_userdata(cf)); FT CAST_FROM_VOIDP(ft, toku_cachefile_get_userdata(cf));
//Verify it is safe to close it. //Verify it is safe to close it.

View file

@ -154,9 +154,9 @@ struct file_map_tuple {
struct __toku_db fake_db; struct __toku_db fake_db;
}; };
static void file_map_tuple_init(struct file_map_tuple *tuple, FILENUM filenum, FT_HANDLE brt, char *iname) { static void file_map_tuple_init(struct file_map_tuple *tuple, FILENUM filenum, FT_HANDLE ft_handle, char *iname) {
tuple->filenum = filenum; tuple->filenum = filenum;
tuple->ft_handle = brt; tuple->ft_handle = ft_handle;
tuple->iname = iname; tuple->iname = iname;
// use a fake DB for comparisons, using the ft's cmp descriptor // use a fake DB for comparisons, using the ft's cmp descriptor
memset(&tuple->fake_db, 0, sizeof(tuple->fake_db)); memset(&tuple->fake_db, 0, sizeof(tuple->fake_db));
@ -171,7 +171,7 @@ static void file_map_tuple_destroy(struct file_map_tuple *tuple) {
} }
} }
// Map filenum to brt // Map filenum to ft_handle
struct file_map { struct file_map {
OMT filenums; OMT filenums;
}; };
@ -242,9 +242,9 @@ static int file_map_h(OMTVALUE omtv, void *v) {
return 0; return 0;
} }
static int file_map_insert (struct file_map *fmap, FILENUM fnum, FT_HANDLE brt, char *iname) { static int file_map_insert (struct file_map *fmap, FILENUM fnum, FT_HANDLE ft_handle, char *iname) {
struct file_map_tuple *XMALLOC(tuple); struct file_map_tuple *XMALLOC(tuple);
file_map_tuple_init(tuple, fnum, brt, iname); file_map_tuple_init(tuple, fnum, ft_handle, iname);
int r = toku_omt_insert(fmap->filenums, tuple, file_map_h, &fnum, NULL); int r = toku_omt_insert(fmap->filenums, tuple, file_map_h, &fnum, NULL);
return r; return r;
} }
@ -350,48 +350,48 @@ static const char *recover_state(RECOVER_ENV renv) {
static int internal_recover_fopen_or_fcreate (RECOVER_ENV renv, bool must_create, int UU(mode), BYTESTRING *bs_iname, FILENUM filenum, uint32_t treeflags, static int internal_recover_fopen_or_fcreate (RECOVER_ENV renv, bool must_create, int UU(mode), BYTESTRING *bs_iname, FILENUM filenum, uint32_t treeflags,
TOKUTXN txn, uint32_t nodesize, uint32_t basementnodesize, enum toku_compression_method compression_method, LSN max_acceptable_lsn) { TOKUTXN txn, uint32_t nodesize, uint32_t basementnodesize, enum toku_compression_method compression_method, LSN max_acceptable_lsn) {
int r = 0; int r = 0;
FT_HANDLE brt = NULL; FT_HANDLE ft_handle = NULL;
char *iname = fixup_fname(bs_iname); char *iname = fixup_fname(bs_iname);
toku_ft_handle_create(&brt); toku_ft_handle_create(&ft_handle);
toku_ft_set_flags(brt, treeflags); toku_ft_set_flags(ft_handle, treeflags);
if (nodesize != 0) { if (nodesize != 0) {
toku_ft_handle_set_nodesize(brt, nodesize); toku_ft_handle_set_nodesize(ft_handle, nodesize);
} }
if (basementnodesize != 0) { if (basementnodesize != 0) {
toku_ft_handle_set_basementnodesize(brt, basementnodesize); toku_ft_handle_set_basementnodesize(ft_handle, basementnodesize);
} }
if (compression_method != TOKU_DEFAULT_COMPRESSION_METHOD) { if (compression_method != TOKU_DEFAULT_COMPRESSION_METHOD) {
toku_ft_handle_set_compression_method(brt, compression_method); toku_ft_handle_set_compression_method(ft_handle, compression_method);
} }
// set the key compare functions // set the key compare functions
if (!(treeflags & TOKU_DB_KEYCMP_BUILTIN) && renv->bt_compare) { if (!(treeflags & TOKU_DB_KEYCMP_BUILTIN) && renv->bt_compare) {
toku_ft_set_bt_compare(brt, renv->bt_compare); toku_ft_set_bt_compare(ft_handle, renv->bt_compare);
} }
if (renv->update_function) { if (renv->update_function) {
toku_ft_set_update(brt, renv->update_function); toku_ft_set_update(ft_handle, renv->update_function);
} }
// TODO mode (FUTURE FEATURE) // TODO mode (FUTURE FEATURE)
//mode = mode; //mode = mode;
r = toku_ft_handle_open_recovery(brt, iname, must_create, must_create, renv->ct, txn, filenum, max_acceptable_lsn); r = toku_ft_handle_open_recovery(ft_handle, iname, must_create, must_create, renv->ct, txn, filenum, max_acceptable_lsn);
if (r != 0) { if (r != 0) {
//Note: If ft_handle_open fails, then close_ft will NOT write a header to disk. //Note: If ft_handle_open fails, then close_ft will NOT write a header to disk.
//No need to provide lsn, so use the regular toku_ft_handle_close function //No need to provide lsn, so use the regular toku_ft_handle_close function
toku_ft_handle_close(brt); toku_ft_handle_close(ft_handle);
toku_free(iname); toku_free(iname);
if (r == ENOENT) //Not an error to simply be missing. if (r == ENOENT) //Not an error to simply be missing.
r = 0; r = 0;
return r; return r;
} }
file_map_insert(&renv->fmap, filenum, brt, iname); file_map_insert(&renv->fmap, filenum, ft_handle, iname);
return 0; return 0;
} }

View file

@ -281,7 +281,7 @@ int toku_rollback_commit(TOKUTXN txn, LSN lsn) {
toku_txn_unlock(txn->parent); toku_txn_unlock(txn->parent);
} }
// Note the open brts, the omts must be merged // Note the open FTs, the omts must be merged
r = txn->open_fts.iterate<struct tokutxn, note_ft_used_in_txns_parent>(txn); r = txn->open_fts.iterate<struct tokutxn, note_ft_used_in_txns_parent>(txn);
assert(r==0); assert(r==0);

View file

@ -267,7 +267,7 @@ int find_filenum (const FT &h, const FT &hfind) {
return 0; return 0;
} }
//Notify a transaction that it has touched a brt. //Notify a transaction that it has touched an ft.
void toku_txn_maybe_note_ft (TOKUTXN txn, FT ft) { void toku_txn_maybe_note_ft (TOKUTXN txn, FT ft) {
toku_txn_lock(txn); toku_txn_lock(txn);
FT ftv; FT ftv;

View file

@ -101,9 +101,9 @@ int64_key_cmp (DB *db UU(), const DBT *a, const DBT *b) {
} }
static void static void
test_prefetch_read(int fd, FT_HANDLE UU(brt), FT brt_h) { test_prefetch_read(int fd, FT_HANDLE UU(ft), FT ft_h) {
int r; int r;
brt_h->compare_fun = int64_key_cmp; ft_h->compare_fun = int64_key_cmp;
FT_CURSOR XMALLOC(cursor); FT_CURSOR XMALLOC(cursor);
FTNODE dn = NULL; FTNODE dn = NULL;
PAIR_ATTR attr; PAIR_ATTR attr;
@ -120,7 +120,7 @@ test_prefetch_read(int fd, FT_HANDLE UU(brt), FT brt_h) {
// quick test to see that we have the right behavior when we set // quick test to see that we have the right behavior when we set
// disable_prefetching to true // disable_prefetching to true
cursor->disable_prefetching = true; cursor->disable_prefetching = true;
fill_bfe_for_prefetch(&bfe, brt_h, cursor); fill_bfe_for_prefetch(&bfe, ft_h, cursor);
FTNODE_DISK_DATA ndd = NULL; FTNODE_DISK_DATA ndd = NULL;
r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd, &bfe); r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd, &bfe);
assert(r==0); assert(r==0);
@ -139,14 +139,14 @@ test_prefetch_read(int fd, FT_HANDLE UU(brt), FT brt_h) {
// now enable prefetching again // now enable prefetching again
cursor->disable_prefetching = false; cursor->disable_prefetching = false;
fill_bfe_for_prefetch(&bfe, brt_h, cursor); fill_bfe_for_prefetch(&bfe, ft_h, cursor);
r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd, &bfe); r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd, &bfe);
assert(r==0); assert(r==0);
assert(dn->n_children == 3); assert(dn->n_children == 3);
assert(BP_STATE(dn,0) == PT_AVAIL); assert(BP_STATE(dn,0) == PT_AVAIL);
assert(BP_STATE(dn,1) == PT_AVAIL); assert(BP_STATE(dn,1) == PT_AVAIL);
assert(BP_STATE(dn,2) == PT_AVAIL); assert(BP_STATE(dn,2) == PT_AVAIL);
toku_ftnode_pe_callback(dn, make_pair_attr(0xffffffff), brt_h, def_pe_finalize_impl, nullptr); toku_ftnode_pe_callback(dn, make_pair_attr(0xffffffff), ft_h, def_pe_finalize_impl, nullptr);
assert(BP_STATE(dn,0) == PT_COMPRESSED); assert(BP_STATE(dn,0) == PT_COMPRESSED);
assert(BP_STATE(dn,1) == PT_COMPRESSED); assert(BP_STATE(dn,1) == PT_COMPRESSED);
assert(BP_STATE(dn,2) == PT_COMPRESSED); assert(BP_STATE(dn,2) == PT_COMPRESSED);
@ -161,14 +161,14 @@ test_prefetch_read(int fd, FT_HANDLE UU(brt), FT brt_h) {
uint64_t left_key = 150; uint64_t left_key = 150;
toku_fill_dbt(&cursor->range_lock_left_key, &left_key, sizeof(uint64_t)); toku_fill_dbt(&cursor->range_lock_left_key, &left_key, sizeof(uint64_t));
cursor->left_is_neg_infty = false; cursor->left_is_neg_infty = false;
fill_bfe_for_prefetch(&bfe, brt_h, cursor); fill_bfe_for_prefetch(&bfe, ft_h, cursor);
r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd, &bfe); r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd, &bfe);
assert(r==0); assert(r==0);
assert(dn->n_children == 3); assert(dn->n_children == 3);
assert(BP_STATE(dn,0) == PT_ON_DISK); assert(BP_STATE(dn,0) == PT_ON_DISK);
assert(BP_STATE(dn,1) == PT_AVAIL); assert(BP_STATE(dn,1) == PT_AVAIL);
assert(BP_STATE(dn,2) == PT_AVAIL); assert(BP_STATE(dn,2) == PT_AVAIL);
toku_ftnode_pe_callback(dn, make_pair_attr(0xffffffff), brt_h, def_pe_finalize_impl, nullptr); toku_ftnode_pe_callback(dn, make_pair_attr(0xffffffff), ft_h, def_pe_finalize_impl, nullptr);
assert(BP_STATE(dn,0) == PT_ON_DISK); assert(BP_STATE(dn,0) == PT_ON_DISK);
assert(BP_STATE(dn,1) == PT_COMPRESSED); assert(BP_STATE(dn,1) == PT_COMPRESSED);
assert(BP_STATE(dn,2) == PT_COMPRESSED); assert(BP_STATE(dn,2) == PT_COMPRESSED);
@ -183,14 +183,14 @@ test_prefetch_read(int fd, FT_HANDLE UU(brt), FT brt_h) {
uint64_t right_key = 151; uint64_t right_key = 151;
toku_fill_dbt(&cursor->range_lock_right_key, &right_key, sizeof(uint64_t)); toku_fill_dbt(&cursor->range_lock_right_key, &right_key, sizeof(uint64_t));
cursor->right_is_pos_infty = false; cursor->right_is_pos_infty = false;
fill_bfe_for_prefetch(&bfe, brt_h, cursor); fill_bfe_for_prefetch(&bfe, ft_h, cursor);
r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd, &bfe); r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd, &bfe);
assert(r==0); assert(r==0);
assert(dn->n_children == 3); assert(dn->n_children == 3);
assert(BP_STATE(dn,0) == PT_ON_DISK); assert(BP_STATE(dn,0) == PT_ON_DISK);
assert(BP_STATE(dn,1) == PT_AVAIL); assert(BP_STATE(dn,1) == PT_AVAIL);
assert(BP_STATE(dn,2) == PT_ON_DISK); assert(BP_STATE(dn,2) == PT_ON_DISK);
toku_ftnode_pe_callback(dn, make_pair_attr(0xffffffff), brt_h, def_pe_finalize_impl, nullptr); toku_ftnode_pe_callback(dn, make_pair_attr(0xffffffff), ft_h, def_pe_finalize_impl, nullptr);
assert(BP_STATE(dn,0) == PT_ON_DISK); assert(BP_STATE(dn,0) == PT_ON_DISK);
assert(BP_STATE(dn,1) == PT_COMPRESSED); assert(BP_STATE(dn,1) == PT_COMPRESSED);
assert(BP_STATE(dn,2) == PT_ON_DISK); assert(BP_STATE(dn,2) == PT_ON_DISK);
@ -204,14 +204,14 @@ test_prefetch_read(int fd, FT_HANDLE UU(brt), FT brt_h) {
left_key = 100000; left_key = 100000;
right_key = 100000; right_key = 100000;
fill_bfe_for_prefetch(&bfe, brt_h, cursor); fill_bfe_for_prefetch(&bfe, ft_h, cursor);
r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd, &bfe); r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd, &bfe);
assert(r==0); assert(r==0);
assert(dn->n_children == 3); assert(dn->n_children == 3);
assert(BP_STATE(dn,0) == PT_ON_DISK); assert(BP_STATE(dn,0) == PT_ON_DISK);
assert(BP_STATE(dn,1) == PT_ON_DISK); assert(BP_STATE(dn,1) == PT_ON_DISK);
assert(BP_STATE(dn,2) == PT_AVAIL); assert(BP_STATE(dn,2) == PT_AVAIL);
toku_ftnode_pe_callback(dn, make_pair_attr(0xffffffff), brt_h, def_pe_finalize_impl, nullptr); toku_ftnode_pe_callback(dn, make_pair_attr(0xffffffff), ft_h, def_pe_finalize_impl, nullptr);
assert(BP_STATE(dn,0) == PT_ON_DISK); assert(BP_STATE(dn,0) == PT_ON_DISK);
assert(BP_STATE(dn,1) == PT_ON_DISK); assert(BP_STATE(dn,1) == PT_ON_DISK);
assert(BP_STATE(dn,2) == PT_COMPRESSED); assert(BP_STATE(dn,2) == PT_COMPRESSED);
@ -225,14 +225,14 @@ test_prefetch_read(int fd, FT_HANDLE UU(brt), FT brt_h) {
left_key = 100; left_key = 100;
right_key = 100; right_key = 100;
fill_bfe_for_prefetch(&bfe, brt_h, cursor); fill_bfe_for_prefetch(&bfe, ft_h, cursor);
r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd, &bfe); r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd, &bfe);
assert(r==0); assert(r==0);
assert(dn->n_children == 3); assert(dn->n_children == 3);
assert(BP_STATE(dn,0) == PT_AVAIL); assert(BP_STATE(dn,0) == PT_AVAIL);
assert(BP_STATE(dn,1) == PT_ON_DISK); assert(BP_STATE(dn,1) == PT_ON_DISK);
assert(BP_STATE(dn,2) == PT_ON_DISK); assert(BP_STATE(dn,2) == PT_ON_DISK);
toku_ftnode_pe_callback(dn, make_pair_attr(0xffffffff), brt_h, def_pe_finalize_impl, nullptr); toku_ftnode_pe_callback(dn, make_pair_attr(0xffffffff), ft_h, def_pe_finalize_impl, nullptr);
assert(BP_STATE(dn,0) == PT_COMPRESSED); assert(BP_STATE(dn,0) == PT_COMPRESSED);
assert(BP_STATE(dn,1) == PT_ON_DISK); assert(BP_STATE(dn,1) == PT_ON_DISK);
assert(BP_STATE(dn,2) == PT_ON_DISK); assert(BP_STATE(dn,2) == PT_ON_DISK);
@ -248,9 +248,9 @@ test_prefetch_read(int fd, FT_HANDLE UU(brt), FT brt_h) {
} }
static void static void
test_subset_read(int fd, FT_HANDLE UU(brt), FT brt_h) { test_subset_read(int fd, FT_HANDLE UU(ft), FT ft_h) {
int r; int r;
brt_h->compare_fun = int64_key_cmp; ft_h->compare_fun = int64_key_cmp;
FT_CURSOR XMALLOC(cursor); FT_CURSOR XMALLOC(cursor);
FTNODE dn = NULL; FTNODE dn = NULL;
FTNODE_DISK_DATA ndd = NULL; FTNODE_DISK_DATA ndd = NULL;
@ -271,7 +271,7 @@ test_subset_read(int fd, FT_HANDLE UU(brt), FT brt_h) {
toku_fill_dbt(&right, &right_key, sizeof(right_key)); toku_fill_dbt(&right, &right_key, sizeof(right_key));
fill_bfe_for_subset_read( fill_bfe_for_subset_read(
&bfe, &bfe,
brt_h, ft_h,
NULL, NULL,
&left, &left,
&right, &right,
@ -292,11 +292,11 @@ test_subset_read(int fd, FT_HANDLE UU(brt), FT brt_h) {
assert(BP_STATE(dn,1) == PT_ON_DISK); assert(BP_STATE(dn,1) == PT_ON_DISK);
assert(BP_STATE(dn,2) == PT_AVAIL); assert(BP_STATE(dn,2) == PT_AVAIL);
// need to call this twice because we had a subset read before, that touched the clock // need to call this twice because we had a subset read before, that touched the clock
toku_ftnode_pe_callback(dn, make_pair_attr(0xffffffff), brt_h, def_pe_finalize_impl, nullptr); toku_ftnode_pe_callback(dn, make_pair_attr(0xffffffff), ft_h, def_pe_finalize_impl, nullptr);
assert(BP_STATE(dn,0) == PT_ON_DISK); assert(BP_STATE(dn,0) == PT_ON_DISK);
assert(BP_STATE(dn,1) == PT_ON_DISK); assert(BP_STATE(dn,1) == PT_ON_DISK);
assert(BP_STATE(dn,2) == PT_AVAIL); assert(BP_STATE(dn,2) == PT_AVAIL);
toku_ftnode_pe_callback(dn, make_pair_attr(0xffffffff), brt_h, def_pe_finalize_impl, nullptr); toku_ftnode_pe_callback(dn, make_pair_attr(0xffffffff), ft_h, def_pe_finalize_impl, nullptr);
assert(BP_STATE(dn,0) == PT_ON_DISK); assert(BP_STATE(dn,0) == PT_ON_DISK);
assert(BP_STATE(dn,1) == PT_ON_DISK); assert(BP_STATE(dn,1) == PT_ON_DISK);
assert(BP_STATE(dn,2) == PT_COMPRESSED); assert(BP_STATE(dn,2) == PT_COMPRESSED);
@ -317,11 +317,11 @@ test_subset_read(int fd, FT_HANDLE UU(brt), FT brt_h) {
assert(BP_STATE(dn,1) == PT_AVAIL); assert(BP_STATE(dn,1) == PT_AVAIL);
assert(BP_STATE(dn,2) == PT_AVAIL); assert(BP_STATE(dn,2) == PT_AVAIL);
// need to call this twice because we had a subset read before, that touched the clock // need to call this twice because we had a subset read before, that touched the clock
toku_ftnode_pe_callback(dn, make_pair_attr(0xffffffff), brt_h, def_pe_finalize_impl, nullptr); toku_ftnode_pe_callback(dn, make_pair_attr(0xffffffff), ft_h, def_pe_finalize_impl, nullptr);
assert(BP_STATE(dn,0) == PT_ON_DISK); assert(BP_STATE(dn,0) == PT_ON_DISK);
assert(BP_STATE(dn,1) == PT_COMPRESSED); assert(BP_STATE(dn,1) == PT_COMPRESSED);
assert(BP_STATE(dn,2) == PT_AVAIL); assert(BP_STATE(dn,2) == PT_AVAIL);
toku_ftnode_pe_callback(dn, make_pair_attr(0xffffffff), brt_h, def_pe_finalize_impl, nullptr); toku_ftnode_pe_callback(dn, make_pair_attr(0xffffffff), ft_h, def_pe_finalize_impl, nullptr);
assert(BP_STATE(dn,0) == PT_ON_DISK); assert(BP_STATE(dn,0) == PT_ON_DISK);
assert(BP_STATE(dn,1) == PT_COMPRESSED); assert(BP_STATE(dn,1) == PT_COMPRESSED);
assert(BP_STATE(dn,2) == PT_COMPRESSED); assert(BP_STATE(dn,2) == PT_COMPRESSED);
@ -341,11 +341,11 @@ test_subset_read(int fd, FT_HANDLE UU(brt), FT brt_h) {
assert(BP_STATE(dn,1) == PT_AVAIL); assert(BP_STATE(dn,1) == PT_AVAIL);
assert(BP_STATE(dn,2) == PT_ON_DISK); assert(BP_STATE(dn,2) == PT_ON_DISK);
// need to call this twice because we had a subset read before, that touched the clock // need to call this twice because we had a subset read before, that touched the clock
toku_ftnode_pe_callback(dn, make_pair_attr(0xffffffff), brt_h, def_pe_finalize_impl, nullptr); toku_ftnode_pe_callback(dn, make_pair_attr(0xffffffff), ft_h, def_pe_finalize_impl, nullptr);
assert(BP_STATE(dn,0) == PT_AVAIL); assert(BP_STATE(dn,0) == PT_AVAIL);
assert(BP_STATE(dn,1) == PT_COMPRESSED); assert(BP_STATE(dn,1) == PT_COMPRESSED);
assert(BP_STATE(dn,2) == PT_ON_DISK); assert(BP_STATE(dn,2) == PT_ON_DISK);
toku_ftnode_pe_callback(dn, make_pair_attr(0xffffffff), brt_h, def_pe_finalize_impl, nullptr); toku_ftnode_pe_callback(dn, make_pair_attr(0xffffffff), ft_h, def_pe_finalize_impl, nullptr);
assert(BP_STATE(dn,0) == PT_COMPRESSED); assert(BP_STATE(dn,0) == PT_COMPRESSED);
assert(BP_STATE(dn,1) == PT_COMPRESSED); assert(BP_STATE(dn,1) == PT_COMPRESSED);
assert(BP_STATE(dn,2) == PT_ON_DISK); assert(BP_STATE(dn,2) == PT_ON_DISK);
@ -412,9 +412,9 @@ test_prefetching(void) {
xids_destroy(&xids_123); xids_destroy(&xids_123);
xids_destroy(&xids_234); xids_destroy(&xids_234);
FT_HANDLE XMALLOC(brt); FT_HANDLE XMALLOC(ft);
FT XCALLOC(brt_h); FT XCALLOC(ft_h);
toku_ft_init(brt_h, toku_ft_init(ft_h,
make_blocknum(0), make_blocknum(0),
ZERO_LSN, ZERO_LSN,
TXNID_NONE, TXNID_NONE,
@ -422,32 +422,32 @@ test_prefetching(void) {
128*1024, 128*1024,
TOKU_DEFAULT_COMPRESSION_METHOD, TOKU_DEFAULT_COMPRESSION_METHOD,
16); 16);
brt->ft = brt_h; ft->ft = ft_h;
toku_blocktable_create_new(&brt_h->blocktable); toku_blocktable_create_new(&ft_h->blocktable);
{ int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); } { int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); }
//Want to use block #20 //Want to use block #20
BLOCKNUM b = make_blocknum(0); BLOCKNUM b = make_blocknum(0);
while (b.b < 20) { while (b.b < 20) {
toku_allocate_blocknum(brt_h->blocktable, &b, brt_h); toku_allocate_blocknum(ft_h->blocktable, &b, ft_h);
} }
assert(b.b == 20); assert(b.b == 20);
{ {
DISKOFF offset; DISKOFF offset;
DISKOFF size; DISKOFF size;
toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, false); toku_blocknum_realloc_on_disk(ft_h->blocktable, b, 100, &offset, ft_h, fd, false);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size); toku_translate_blocknum_to_offset_size(ft_h->blocktable, b, &offset, &size);
assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(size == 100); assert(size == 100);
} }
FTNODE_DISK_DATA ndd = NULL; FTNODE_DISK_DATA ndd = NULL;
r = toku_serialize_ftnode_to(fd, make_blocknum(20), &sn, &ndd, true, brt->ft, false); r = toku_serialize_ftnode_to(fd, make_blocknum(20), &sn, &ndd, true, ft->ft, false);
assert(r==0); assert(r==0);
test_prefetch_read(fd, brt, brt_h); test_prefetch_read(fd, ft, ft_h);
test_subset_read(fd, brt, brt_h); test_subset_read(fd, ft, ft_h);
toku_free(sn.childkeys[0].data); toku_free(sn.childkeys[0].data);
toku_free(sn.childkeys[1].data); toku_free(sn.childkeys[1].data);
@ -457,11 +457,11 @@ test_prefetching(void) {
toku_free(sn.bp); toku_free(sn.bp);
toku_free(sn.childkeys); toku_free(sn.childkeys);
toku_block_free(brt_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); toku_block_free(ft_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_blocktable_destroy(&brt_h->blocktable); toku_blocktable_destroy(&ft_h->blocktable);
toku_free(brt_h->h); toku_free(ft_h->h);
toku_free(brt_h); toku_free(ft_h);
toku_free(brt); toku_free(ft);
toku_free(ndd); toku_free(ndd);
r = close(fd); assert(r != -1); r = close(fd); assert(r != -1);

View file

@ -139,11 +139,11 @@ le_malloc(bn_data* bn, uint32_t idx, const char *key, const char *val)
static void static void
test1(int fd, FT brt_h, FTNODE *dn) { test1(int fd, FT ft_h, FTNODE *dn) {
int r; int r;
struct ftnode_fetch_extra bfe_all; struct ftnode_fetch_extra bfe_all;
brt_h->compare_fun = string_key_cmp; ft_h->compare_fun = string_key_cmp;
fill_bfe_for_full_read(&bfe_all, brt_h); fill_bfe_for_full_read(&bfe_all, ft_h);
FTNODE_DISK_DATA ndd = NULL; FTNODE_DISK_DATA ndd = NULL;
r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, dn, &ndd, &bfe_all); r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, dn, &ndd, &bfe_all);
bool is_leaf = ((*dn)->height == 0); bool is_leaf = ((*dn)->height == 0);
@ -154,12 +154,12 @@ test1(int fd, FT brt_h, FTNODE *dn) {
// should sweep and NOT get rid of anything // should sweep and NOT get rid of anything
PAIR_ATTR attr; PAIR_ATTR attr;
memset(&attr,0,sizeof(attr)); memset(&attr,0,sizeof(attr));
toku_ftnode_pe_callback(*dn, attr, brt_h, def_pe_finalize_impl, nullptr); toku_ftnode_pe_callback(*dn, attr, ft_h, def_pe_finalize_impl, nullptr);
for (int i = 0; i < (*dn)->n_children; i++) { for (int i = 0; i < (*dn)->n_children; i++) {
assert(BP_STATE(*dn,i) == PT_AVAIL); assert(BP_STATE(*dn,i) == PT_AVAIL);
} }
// should sweep and get compress all // should sweep and get compress all
toku_ftnode_pe_callback(*dn, attr, brt_h, def_pe_finalize_impl, nullptr); toku_ftnode_pe_callback(*dn, attr, ft_h, def_pe_finalize_impl, nullptr);
for (int i = 0; i < (*dn)->n_children; i++) { for (int i = 0; i < (*dn)->n_children; i++) {
if (!is_leaf) { if (!is_leaf) {
assert(BP_STATE(*dn,i) == PT_COMPRESSED); assert(BP_STATE(*dn,i) == PT_COMPRESSED);
@ -172,12 +172,12 @@ test1(int fd, FT brt_h, FTNODE *dn) {
bool req = toku_ftnode_pf_req_callback(*dn, &bfe_all); bool req = toku_ftnode_pf_req_callback(*dn, &bfe_all);
assert(req); assert(req);
toku_ftnode_pf_callback(*dn, ndd, &bfe_all, fd, &size); toku_ftnode_pf_callback(*dn, ndd, &bfe_all, fd, &size);
toku_ftnode_pe_callback(*dn, attr, brt_h, def_pe_finalize_impl, nullptr); toku_ftnode_pe_callback(*dn, attr, ft_h, def_pe_finalize_impl, nullptr);
for (int i = 0; i < (*dn)->n_children; i++) { for (int i = 0; i < (*dn)->n_children; i++) {
assert(BP_STATE(*dn,i) == PT_AVAIL); assert(BP_STATE(*dn,i) == PT_AVAIL);
} }
// should sweep and get compress all // should sweep and get compress all
toku_ftnode_pe_callback(*dn, attr, brt_h, def_pe_finalize_impl, nullptr); toku_ftnode_pe_callback(*dn, attr, ft_h, def_pe_finalize_impl, nullptr);
for (int i = 0; i < (*dn)->n_children; i++) { for (int i = 0; i < (*dn)->n_children; i++) {
if (!is_leaf) { if (!is_leaf) {
assert(BP_STATE(*dn,i) == PT_COMPRESSED); assert(BP_STATE(*dn,i) == PT_COMPRESSED);
@ -190,15 +190,15 @@ test1(int fd, FT brt_h, FTNODE *dn) {
req = toku_ftnode_pf_req_callback(*dn, &bfe_all); req = toku_ftnode_pf_req_callback(*dn, &bfe_all);
assert(req); assert(req);
toku_ftnode_pf_callback(*dn, ndd, &bfe_all, fd, &size); toku_ftnode_pf_callback(*dn, ndd, &bfe_all, fd, &size);
toku_ftnode_pe_callback(*dn, attr, brt_h, def_pe_finalize_impl, nullptr); toku_ftnode_pe_callback(*dn, attr, ft_h, def_pe_finalize_impl, nullptr);
for (int i = 0; i < (*dn)->n_children; i++) { for (int i = 0; i < (*dn)->n_children; i++) {
assert(BP_STATE(*dn,i) == PT_AVAIL); assert(BP_STATE(*dn,i) == PT_AVAIL);
} }
(*dn)->dirty = 1; (*dn)->dirty = 1;
toku_ftnode_pe_callback(*dn, attr, brt_h, def_pe_finalize_impl, nullptr); toku_ftnode_pe_callback(*dn, attr, ft_h, def_pe_finalize_impl, nullptr);
toku_ftnode_pe_callback(*dn, attr, brt_h, def_pe_finalize_impl, nullptr); toku_ftnode_pe_callback(*dn, attr, ft_h, def_pe_finalize_impl, nullptr);
toku_ftnode_pe_callback(*dn, attr, brt_h, def_pe_finalize_impl, nullptr); toku_ftnode_pe_callback(*dn, attr, ft_h, def_pe_finalize_impl, nullptr);
toku_ftnode_pe_callback(*dn, attr, brt_h, def_pe_finalize_impl, nullptr); toku_ftnode_pe_callback(*dn, attr, ft_h, def_pe_finalize_impl, nullptr);
for (int i = 0; i < (*dn)->n_children; i++) { for (int i = 0; i < (*dn)->n_children; i++) {
assert(BP_STATE(*dn,i) == PT_AVAIL); assert(BP_STATE(*dn,i) == PT_AVAIL);
} }
@ -212,7 +212,7 @@ static int search_cmp(const struct ft_search& UU(so), const DBT* UU(key)) {
} }
static void static void
test2(int fd, FT brt_h, FTNODE *dn) { test2(int fd, FT ft_h, FTNODE *dn) {
struct ftnode_fetch_extra bfe_subset; struct ftnode_fetch_extra bfe_subset;
DBT left, right; DBT left, right;
DB dummy_db; DB dummy_db;
@ -221,10 +221,10 @@ test2(int fd, FT brt_h, FTNODE *dn) {
memset(&right, 0, sizeof(right)); memset(&right, 0, sizeof(right));
ft_search_t search_t; ft_search_t search_t;
brt_h->compare_fun = string_key_cmp; ft_h->compare_fun = string_key_cmp;
fill_bfe_for_subset_read( fill_bfe_for_subset_read(
&bfe_subset, &bfe_subset,
brt_h, ft_h,
ft_search_init( ft_search_init(
&search_t, &search_t,
search_cmp, search_cmp,
@ -252,11 +252,11 @@ test2(int fd, FT brt_h, FTNODE *dn) {
assert(!BP_SHOULD_EVICT(*dn, 1)); assert(!BP_SHOULD_EVICT(*dn, 1));
PAIR_ATTR attr; PAIR_ATTR attr;
memset(&attr,0,sizeof(attr)); memset(&attr,0,sizeof(attr));
toku_ftnode_pe_callback(*dn, attr, brt_h, def_pe_finalize_impl, nullptr); toku_ftnode_pe_callback(*dn, attr, ft_h, def_pe_finalize_impl, nullptr);
assert(BP_STATE(*dn, 0) == (is_leaf) ? PT_ON_DISK : PT_COMPRESSED); assert(BP_STATE(*dn, 0) == (is_leaf) ? PT_ON_DISK : PT_COMPRESSED);
assert(BP_STATE(*dn, 1) == PT_AVAIL); assert(BP_STATE(*dn, 1) == PT_AVAIL);
assert(BP_SHOULD_EVICT(*dn, 1)); assert(BP_SHOULD_EVICT(*dn, 1));
toku_ftnode_pe_callback(*dn, attr, brt_h, def_pe_finalize_impl, nullptr); toku_ftnode_pe_callback(*dn, attr, ft_h, def_pe_finalize_impl, nullptr);
assert(BP_STATE(*dn, 1) == (is_leaf) ? PT_ON_DISK : PT_COMPRESSED); assert(BP_STATE(*dn, 1) == (is_leaf) ? PT_ON_DISK : PT_COMPRESSED);
bool req = toku_ftnode_pf_req_callback(*dn, &bfe_subset); bool req = toku_ftnode_pf_req_callback(*dn, &bfe_subset);
@ -272,7 +272,7 @@ test2(int fd, FT brt_h, FTNODE *dn) {
} }
static void static void
test3_leaf(int fd, FT brt_h, FTNODE *dn) { test3_leaf(int fd, FT ft_h, FTNODE *dn) {
struct ftnode_fetch_extra bfe_min; struct ftnode_fetch_extra bfe_min;
DBT left, right; DBT left, right;
DB dummy_db; DB dummy_db;
@ -280,10 +280,10 @@ test3_leaf(int fd, FT brt_h, FTNODE *dn) {
memset(&left, 0, sizeof(left)); memset(&left, 0, sizeof(left));
memset(&right, 0, sizeof(right)); memset(&right, 0, sizeof(right));
brt_h->compare_fun = string_key_cmp; ft_h->compare_fun = string_key_cmp;
fill_bfe_for_min_read( fill_bfe_for_min_read(
&bfe_min, &bfe_min,
brt_h ft_h
); );
FTNODE_DISK_DATA ndd = NULL; FTNODE_DISK_DATA ndd = NULL;
int r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, dn, &ndd, &bfe_min); int r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, dn, &ndd, &bfe_min);
@ -347,9 +347,9 @@ test_serialize_nonleaf(void) {
xids_destroy(&xids_123); xids_destroy(&xids_123);
xids_destroy(&xids_234); xids_destroy(&xids_234);
FT_HANDLE XMALLOC(brt); FT_HANDLE XMALLOC(ft);
FT XCALLOC(brt_h); FT XCALLOC(ft_h);
toku_ft_init(brt_h, toku_ft_init(ft_h,
make_blocknum(0), make_blocknum(0),
ZERO_LSN, ZERO_LSN,
TXNID_NONE, TXNID_NONE,
@ -357,33 +357,33 @@ test_serialize_nonleaf(void) {
128*1024, 128*1024,
TOKU_DEFAULT_COMPRESSION_METHOD, TOKU_DEFAULT_COMPRESSION_METHOD,
16); 16);
brt->ft = brt_h; ft->ft = ft_h;
toku_blocktable_create_new(&brt_h->blocktable); toku_blocktable_create_new(&ft_h->blocktable);
{ int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); } { int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); }
//Want to use block #20 //Want to use block #20
BLOCKNUM b = make_blocknum(0); BLOCKNUM b = make_blocknum(0);
while (b.b < 20) { while (b.b < 20) {
toku_allocate_blocknum(brt_h->blocktable, &b, brt_h); toku_allocate_blocknum(ft_h->blocktable, &b, ft_h);
} }
assert(b.b == 20); assert(b.b == 20);
{ {
DISKOFF offset; DISKOFF offset;
DISKOFF size; DISKOFF size;
toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, false); toku_blocknum_realloc_on_disk(ft_h->blocktable, b, 100, &offset, ft_h, fd, false);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size); toku_translate_blocknum_to_offset_size(ft_h->blocktable, b, &offset, &size);
assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(size == 100); assert(size == 100);
} }
FTNODE_DISK_DATA ndd = NULL; FTNODE_DISK_DATA ndd = NULL;
r = toku_serialize_ftnode_to(fd, make_blocknum(20), &sn, &ndd, true, brt->ft, false); r = toku_serialize_ftnode_to(fd, make_blocknum(20), &sn, &ndd, true, ft->ft, false);
assert(r==0); assert(r==0);
test1(fd, brt_h, &dn); test1(fd, ft_h, &dn);
test2(fd, brt_h, &dn); test2(fd, ft_h, &dn);
toku_free(hello_string); toku_free(hello_string);
destroy_nonleaf_childinfo(BNC(&sn, 0)); destroy_nonleaf_childinfo(BNC(&sn, 0));
@ -392,11 +392,11 @@ test_serialize_nonleaf(void) {
toku_free(sn.childkeys); toku_free(sn.childkeys);
toku_free(ndd); toku_free(ndd);
toku_block_free(brt_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); toku_block_free(ft_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_blocktable_destroy(&brt_h->blocktable); toku_blocktable_destroy(&ft_h->blocktable);
toku_free(brt_h->h); toku_free(ft_h->h);
toku_free(brt_h); toku_free(ft_h);
toku_free(brt); toku_free(ft);
r = close(fd); assert(r != -1); r = close(fd); assert(r != -1);
} }
@ -431,9 +431,9 @@ test_serialize_leaf(void) {
le_malloc(BLB_DATA(&sn, 0), 1, "b", "bval"); le_malloc(BLB_DATA(&sn, 0), 1, "b", "bval");
le_malloc(BLB_DATA(&sn, 1), 0, "x", "xval"); le_malloc(BLB_DATA(&sn, 1), 0, "x", "xval");
FT_HANDLE XMALLOC(brt); FT_HANDLE XMALLOC(ft);
FT XCALLOC(brt_h); FT XCALLOC(ft_h);
toku_ft_init(brt_h, toku_ft_init(ft_h,
make_blocknum(0), make_blocknum(0),
ZERO_LSN, ZERO_LSN,
TXNID_NONE, TXNID_NONE,
@ -441,33 +441,33 @@ test_serialize_leaf(void) {
128*1024, 128*1024,
TOKU_DEFAULT_COMPRESSION_METHOD, TOKU_DEFAULT_COMPRESSION_METHOD,
16); 16);
brt->ft = brt_h; ft->ft = ft_h;
toku_blocktable_create_new(&brt_h->blocktable); toku_blocktable_create_new(&ft_h->blocktable);
{ int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); } { int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); }
//Want to use block #20 //Want to use block #20
BLOCKNUM b = make_blocknum(0); BLOCKNUM b = make_blocknum(0);
while (b.b < 20) { while (b.b < 20) {
toku_allocate_blocknum(brt_h->blocktable, &b, brt_h); toku_allocate_blocknum(ft_h->blocktable, &b, ft_h);
} }
assert(b.b == 20); assert(b.b == 20);
{ {
DISKOFF offset; DISKOFF offset;
DISKOFF size; DISKOFF size;
toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, false); toku_blocknum_realloc_on_disk(ft_h->blocktable, b, 100, &offset, ft_h, fd, false);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size); toku_translate_blocknum_to_offset_size(ft_h->blocktable, b, &offset, &size);
assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(size == 100); assert(size == 100);
} }
FTNODE_DISK_DATA ndd = NULL; FTNODE_DISK_DATA ndd = NULL;
r = toku_serialize_ftnode_to(fd, make_blocknum(20), &sn, &ndd, true, brt->ft, false); r = toku_serialize_ftnode_to(fd, make_blocknum(20), &sn, &ndd, true, ft->ft, false);
assert(r==0); assert(r==0);
test1(fd, brt_h, &dn); test1(fd, ft_h, &dn);
test3_leaf(fd, brt_h,&dn); test3_leaf(fd, ft_h,&dn);
for (int i = 0; i < sn.n_children-1; ++i) { for (int i = 0; i < sn.n_children-1; ++i) {
toku_free(sn.childkeys[i].data); toku_free(sn.childkeys[i].data);
@ -478,11 +478,11 @@ test_serialize_leaf(void) {
toku_free(sn.bp); toku_free(sn.bp);
toku_free(sn.childkeys); toku_free(sn.childkeys);
toku_block_free(brt_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); toku_block_free(ft_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_blocktable_destroy(&brt_h->blocktable); toku_blocktable_destroy(&ft_h->blocktable);
toku_free(brt_h->h); toku_free(ft_h->h);
toku_free(brt_h); toku_free(ft_h);
toku_free(brt); toku_free(ft);
toku_free(ndd); toku_free(ndd);
r = close(fd); assert(r != -1); r = close(fd); assert(r != -1);
} }

View file

@ -181,9 +181,9 @@ test_serialize_leaf(int valsize, int nelts, double entropy) {
} }
} }
FT_HANDLE XMALLOC(brt); FT_HANDLE XMALLOC(ft);
FT XCALLOC(brt_h); FT XCALLOC(ft_h);
toku_ft_init(brt_h, toku_ft_init(ft_h,
make_blocknum(0), make_blocknum(0),
ZERO_LSN, ZERO_LSN,
TXNID_NONE, TXNID_NONE,
@ -191,25 +191,25 @@ test_serialize_leaf(int valsize, int nelts, double entropy) {
128*1024, 128*1024,
TOKU_DEFAULT_COMPRESSION_METHOD, TOKU_DEFAULT_COMPRESSION_METHOD,
16); 16);
brt->ft = brt_h; ft->ft = ft_h;
brt_h->compare_fun = long_key_cmp; ft_h->compare_fun = long_key_cmp;
toku_blocktable_create_new(&brt_h->blocktable); toku_blocktable_create_new(&ft_h->blocktable);
{ int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); } { int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); }
//Want to use block #20 //Want to use block #20
BLOCKNUM b = make_blocknum(0); BLOCKNUM b = make_blocknum(0);
while (b.b < 20) { while (b.b < 20) {
toku_allocate_blocknum(brt_h->blocktable, &b, brt_h); toku_allocate_blocknum(ft_h->blocktable, &b, ft_h);
} }
assert(b.b == 20); assert(b.b == 20);
{ {
DISKOFF offset; DISKOFF offset;
DISKOFF size; DISKOFF size;
toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, false); toku_blocknum_realloc_on_disk(ft_h->blocktable, b, 100, &offset, ft_h, fd, false);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size); toku_translate_blocknum_to_offset_size(ft_h->blocktable, b, &offset, &size);
assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(size == 100); assert(size == 100);
} }
@ -217,7 +217,7 @@ test_serialize_leaf(int valsize, int nelts, double entropy) {
struct timeval t[2]; struct timeval t[2];
gettimeofday(&t[0], NULL); gettimeofday(&t[0], NULL);
FTNODE_DISK_DATA ndd = NULL; FTNODE_DISK_DATA ndd = NULL;
r = toku_serialize_ftnode_to(fd, make_blocknum(20), sn, &ndd, true, brt->ft, false); r = toku_serialize_ftnode_to(fd, make_blocknum(20), sn, &ndd, true, ft->ft, false);
assert(r==0); assert(r==0);
gettimeofday(&t[1], NULL); gettimeofday(&t[1], NULL);
double dt; double dt;
@ -225,7 +225,7 @@ test_serialize_leaf(int valsize, int nelts, double entropy) {
printf("serialize leaf: %0.05lf\n", dt); printf("serialize leaf: %0.05lf\n", dt);
struct ftnode_fetch_extra bfe; struct ftnode_fetch_extra bfe;
fill_bfe_for_full_read(&bfe, brt_h); fill_bfe_for_full_read(&bfe, ft_h);
gettimeofday(&t[0], NULL); gettimeofday(&t[0], NULL);
FTNODE_DISK_DATA ndd2 = NULL; FTNODE_DISK_DATA ndd2 = NULL;
r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd2, &bfe); r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd2, &bfe);
@ -242,11 +242,11 @@ test_serialize_leaf(int valsize, int nelts, double entropy) {
toku_ftnode_free(&dn); toku_ftnode_free(&dn);
toku_ftnode_free(&sn); toku_ftnode_free(&sn);
toku_block_free(brt_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); toku_block_free(ft_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_blocktable_destroy(&brt_h->blocktable); toku_blocktable_destroy(&ft_h->blocktable);
toku_free(brt_h->h); toku_free(ft_h->h);
toku_free(brt_h); toku_free(ft_h);
toku_free(brt); toku_free(ft);
toku_free(ndd); toku_free(ndd);
toku_free(ndd2); toku_free(ndd2);
@ -312,9 +312,9 @@ test_serialize_nonleaf(int valsize, int nelts, double entropy) {
xids_destroy(&xids_0); xids_destroy(&xids_0);
xids_destroy(&xids_123); xids_destroy(&xids_123);
FT_HANDLE XMALLOC(brt); FT_HANDLE XMALLOC(ft);
FT XCALLOC(brt_h); FT XCALLOC(ft_h);
toku_ft_init(brt_h, toku_ft_init(ft_h,
make_blocknum(0), make_blocknum(0),
ZERO_LSN, ZERO_LSN,
TXNID_NONE, TXNID_NONE,
@ -322,25 +322,25 @@ test_serialize_nonleaf(int valsize, int nelts, double entropy) {
128*1024, 128*1024,
TOKU_DEFAULT_COMPRESSION_METHOD, TOKU_DEFAULT_COMPRESSION_METHOD,
16); 16);
brt->ft = brt_h; ft->ft = ft_h;
brt_h->compare_fun = long_key_cmp; ft_h->compare_fun = long_key_cmp;
toku_blocktable_create_new(&brt_h->blocktable); toku_blocktable_create_new(&ft_h->blocktable);
{ int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); } { int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); }
//Want to use block #20 //Want to use block #20
BLOCKNUM b = make_blocknum(0); BLOCKNUM b = make_blocknum(0);
while (b.b < 20) { while (b.b < 20) {
toku_allocate_blocknum(brt_h->blocktable, &b, brt_h); toku_allocate_blocknum(ft_h->blocktable, &b, ft_h);
} }
assert(b.b == 20); assert(b.b == 20);
{ {
DISKOFF offset; DISKOFF offset;
DISKOFF size; DISKOFF size;
toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, false); toku_blocknum_realloc_on_disk(ft_h->blocktable, b, 100, &offset, ft_h, fd, false);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size); toku_translate_blocknum_to_offset_size(ft_h->blocktable, b, &offset, &size);
assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(size == 100); assert(size == 100);
} }
@ -348,7 +348,7 @@ test_serialize_nonleaf(int valsize, int nelts, double entropy) {
struct timeval t[2]; struct timeval t[2];
gettimeofday(&t[0], NULL); gettimeofday(&t[0], NULL);
FTNODE_DISK_DATA ndd = NULL; FTNODE_DISK_DATA ndd = NULL;
r = toku_serialize_ftnode_to(fd, make_blocknum(20), &sn, &ndd, true, brt->ft, false); r = toku_serialize_ftnode_to(fd, make_blocknum(20), &sn, &ndd, true, ft->ft, false);
assert(r==0); assert(r==0);
gettimeofday(&t[1], NULL); gettimeofday(&t[1], NULL);
double dt; double dt;
@ -356,7 +356,7 @@ test_serialize_nonleaf(int valsize, int nelts, double entropy) {
printf("serialize nonleaf: %0.05lf\n", dt); printf("serialize nonleaf: %0.05lf\n", dt);
struct ftnode_fetch_extra bfe; struct ftnode_fetch_extra bfe;
fill_bfe_for_full_read(&bfe, brt_h); fill_bfe_for_full_read(&bfe, ft_h);
gettimeofday(&t[0], NULL); gettimeofday(&t[0], NULL);
FTNODE_DISK_DATA ndd2 = NULL; FTNODE_DISK_DATA ndd2 = NULL;
r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd2, &bfe); r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd2, &bfe);
@ -381,11 +381,11 @@ test_serialize_nonleaf(int valsize, int nelts, double entropy) {
toku_free(sn.bp); toku_free(sn.bp);
toku_free(sn.childkeys); toku_free(sn.childkeys);
toku_block_free(brt_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); toku_block_free(ft_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_blocktable_destroy(&brt_h->blocktable); toku_blocktable_destroy(&ft_h->blocktable);
toku_free(brt_h->h); toku_free(ft_h->h);
toku_free(brt_h); toku_free(ft_h);
toku_free(brt); toku_free(ft);
toku_free(ndd); toku_free(ndd);
toku_free(ndd2); toku_free(ndd2);

View file

@ -92,9 +92,9 @@ PATENT RIGHTS GRANT:
#include "test.h" #include "test.h"
// create a brt and put n rows into it // create a ft and put n rows into it
// write the brt to the file // write the ft to the file
// verify the rows in the brt // verify the rows in the ft
static void test_sub_block(int n) { static void test_sub_block(int n) {
if (verbose) printf("%s:%d %d\n", __FUNCTION__, __LINE__, n); if (verbose) printf("%s:%d %d\n", __FUNCTION__, __LINE__, n);
@ -107,14 +107,14 @@ static void test_sub_block(int n) {
int error; int error;
CACHETABLE ct; CACHETABLE ct;
FT_HANDLE brt; FT_HANDLE ft;
int i; int i;
unlink(fname); unlink(fname);
toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER);
error = toku_open_ft_handle(fname, true, &brt, nodesize, basementnodesize, compression_method, ct, null_txn, toku_builtin_compare_fun); error = toku_open_ft_handle(fname, true, &ft, nodesize, basementnodesize, compression_method, ct, null_txn, toku_builtin_compare_fun);
assert(error == 0); assert(error == 0);
// insert keys 0, 1, 2, .. (n-1) // insert keys 0, 1, 2, .. (n-1)
@ -124,20 +124,20 @@ static void test_sub_block(int n) {
DBT key, val; DBT key, val;
toku_fill_dbt(&key, &k, sizeof k); toku_fill_dbt(&key, &k, sizeof k);
toku_fill_dbt(&val, &v, sizeof v); toku_fill_dbt(&val, &v, sizeof v);
toku_ft_insert(brt, &key, &val, 0); toku_ft_insert(ft, &key, &val, 0);
assert(error == 0); assert(error == 0);
} }
// write to the file // write to the file
error = toku_close_ft_handle_nolsn(brt, 0); error = toku_close_ft_handle_nolsn(ft, 0);
assert(error == 0); assert(error == 0);
// verify the brt by walking a cursor through the rows // verify the ft by walking a cursor through the rows
error = toku_open_ft_handle(fname, false, &brt, nodesize, basementnodesize, compression_method, ct, null_txn, toku_builtin_compare_fun); error = toku_open_ft_handle(fname, false, &ft, nodesize, basementnodesize, compression_method, ct, null_txn, toku_builtin_compare_fun);
assert(error == 0); assert(error == 0);
FT_CURSOR cursor; FT_CURSOR cursor;
error = toku_ft_cursor(brt, &cursor, NULL, false, false); error = toku_ft_cursor(ft, &cursor, NULL, false, false);
assert(error == 0); assert(error == 0);
for (i=0; ; i++) { for (i=0; ; i++) {
@ -155,7 +155,7 @@ static void test_sub_block(int n) {
toku_ft_cursor_close(cursor); toku_ft_cursor_close(cursor);
error = toku_close_ft_handle_nolsn(brt, 0); error = toku_close_ft_handle_nolsn(ft, 0);
assert(error == 0); assert(error == 0);
toku_cachetable_close(&ct); toku_cachetable_close(&ct);

View file

@ -166,18 +166,18 @@ string_key_cmp(DB *UU(e), const DBT *a, const DBT *b)
} }
static void static void
setup_dn(enum ftnode_verify_type bft, int fd, FT brt_h, FTNODE *dn, FTNODE_DISK_DATA* ndd) { setup_dn(enum ftnode_verify_type bft, int fd, FT ft_h, FTNODE *dn, FTNODE_DISK_DATA* ndd) {
int r; int r;
brt_h->compare_fun = string_key_cmp; ft_h->compare_fun = string_key_cmp;
if (bft == read_all) { if (bft == read_all) {
struct ftnode_fetch_extra bfe; struct ftnode_fetch_extra bfe;
fill_bfe_for_full_read(&bfe, brt_h); fill_bfe_for_full_read(&bfe, ft_h);
r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, dn, ndd, &bfe); r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, dn, ndd, &bfe);
assert(r==0); assert(r==0);
} }
else if (bft == read_compressed || bft == read_none) { else if (bft == read_compressed || bft == read_none) {
struct ftnode_fetch_extra bfe; struct ftnode_fetch_extra bfe;
fill_bfe_for_min_read(&bfe, brt_h); fill_bfe_for_min_read(&bfe, ft_h);
r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, dn, ndd, &bfe); r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, dn, ndd, &bfe);
assert(r==0); assert(r==0);
// assert all bp's are compressed or on disk. // assert all bp's are compressed or on disk.
@ -187,7 +187,7 @@ setup_dn(enum ftnode_verify_type bft, int fd, FT brt_h, FTNODE *dn, FTNODE_DISK_
// if read_none, get rid of the compressed bp's // if read_none, get rid of the compressed bp's
if (bft == read_none) { if (bft == read_none) {
if ((*dn)->height == 0) { if ((*dn)->height == 0) {
toku_ftnode_pe_callback(*dn, make_pair_attr(0xffffffff), brt_h, def_pe_finalize_impl, nullptr); toku_ftnode_pe_callback(*dn, make_pair_attr(0xffffffff), ft_h, def_pe_finalize_impl, nullptr);
// assert all bp's are on disk // assert all bp's are on disk
for (int i = 0; i < (*dn)->n_children; i++) { for (int i = 0; i < (*dn)->n_children; i++) {
if ((*dn)->height == 0) { if ((*dn)->height == 0) {
@ -204,7 +204,7 @@ setup_dn(enum ftnode_verify_type bft, int fd, FT brt_h, FTNODE *dn, FTNODE_DISK_
// that it is available // that it is available
// then run partial eviction to get it compressed // then run partial eviction to get it compressed
PAIR_ATTR attr; PAIR_ATTR attr;
fill_bfe_for_full_read(&bfe, brt_h); fill_bfe_for_full_read(&bfe, ft_h);
assert(toku_ftnode_pf_req_callback(*dn, &bfe)); assert(toku_ftnode_pf_req_callback(*dn, &bfe));
r = toku_ftnode_pf_callback(*dn, *ndd, &bfe, fd, &attr); r = toku_ftnode_pf_callback(*dn, *ndd, &bfe, fd, &attr);
assert(r==0); assert(r==0);
@ -212,21 +212,21 @@ setup_dn(enum ftnode_verify_type bft, int fd, FT brt_h, FTNODE *dn, FTNODE_DISK_
for (int i = 0; i < (*dn)->n_children; i++) { for (int i = 0; i < (*dn)->n_children; i++) {
assert(BP_STATE(*dn,i) == PT_AVAIL); assert(BP_STATE(*dn,i) == PT_AVAIL);
} }
toku_ftnode_pe_callback(*dn, make_pair_attr(0xffffffff), brt_h, def_pe_finalize_impl, nullptr); toku_ftnode_pe_callback(*dn, make_pair_attr(0xffffffff), ft_h, def_pe_finalize_impl, nullptr);
for (int i = 0; i < (*dn)->n_children; i++) { for (int i = 0; i < (*dn)->n_children; i++) {
// assert all bp's are still available, because we touched the clock // assert all bp's are still available, because we touched the clock
assert(BP_STATE(*dn,i) == PT_AVAIL); assert(BP_STATE(*dn,i) == PT_AVAIL);
// now assert all should be evicted // now assert all should be evicted
assert(BP_SHOULD_EVICT(*dn, i)); assert(BP_SHOULD_EVICT(*dn, i));
} }
toku_ftnode_pe_callback(*dn, make_pair_attr(0xffffffff), brt_h, def_pe_finalize_impl, nullptr); toku_ftnode_pe_callback(*dn, make_pair_attr(0xffffffff), ft_h, def_pe_finalize_impl, nullptr);
for (int i = 0; i < (*dn)->n_children; i++) { for (int i = 0; i < (*dn)->n_children; i++) {
assert(BP_STATE(*dn,i) == PT_COMPRESSED); assert(BP_STATE(*dn,i) == PT_COMPRESSED);
} }
} }
} }
// now decompress them // now decompress them
fill_bfe_for_full_read(&bfe, brt_h); fill_bfe_for_full_read(&bfe, ft_h);
assert(toku_ftnode_pf_req_callback(*dn, &bfe)); assert(toku_ftnode_pf_req_callback(*dn, &bfe));
PAIR_ATTR attr; PAIR_ATTR attr;
r = toku_ftnode_pf_callback(*dn, *ndd, &bfe, fd, &attr); r = toku_ftnode_pf_callback(*dn, *ndd, &bfe, fd, &attr);
@ -243,20 +243,20 @@ setup_dn(enum ftnode_verify_type bft, int fd, FT brt_h, FTNODE *dn, FTNODE_DISK_
} }
} }
static void write_sn_to_disk(int fd, FT_HANDLE brt, FTNODE sn, FTNODE_DISK_DATA* src_ndd, bool do_clone) { static void write_sn_to_disk(int fd, FT_HANDLE ft, FTNODE sn, FTNODE_DISK_DATA* src_ndd, bool do_clone) {
int r; int r;
if (do_clone) { if (do_clone) {
void* cloned_node_v = NULL; void* cloned_node_v = NULL;
PAIR_ATTR attr; PAIR_ATTR attr;
long clone_size; long clone_size;
toku_ftnode_clone_callback(sn, &cloned_node_v, &clone_size, &attr, false, brt->ft); toku_ftnode_clone_callback(sn, &cloned_node_v, &clone_size, &attr, false, ft->ft);
FTNODE CAST_FROM_VOIDP(cloned_node, cloned_node_v); FTNODE CAST_FROM_VOIDP(cloned_node, cloned_node_v);
r = toku_serialize_ftnode_to(fd, make_blocknum(20), cloned_node, src_ndd, false, brt->ft, false); r = toku_serialize_ftnode_to(fd, make_blocknum(20), cloned_node, src_ndd, false, ft->ft, false);
assert(r==0); assert(r==0);
toku_ftnode_free(&cloned_node); toku_ftnode_free(&cloned_node);
} }
else { else {
r = toku_serialize_ftnode_to(fd, make_blocknum(20), sn, src_ndd, true, brt->ft, false); r = toku_serialize_ftnode_to(fd, make_blocknum(20), sn, src_ndd, true, ft->ft, false);
assert(r==0); assert(r==0);
} }
} }
@ -297,9 +297,9 @@ test_serialize_leaf_check_msn(enum ftnode_verify_type bft, bool do_clone) {
BLB_MAX_MSN_APPLIED(&sn, 0) = ((MSN) { MIN_MSN.msn + 73 }); BLB_MAX_MSN_APPLIED(&sn, 0) = ((MSN) { MIN_MSN.msn + 73 });
BLB_MAX_MSN_APPLIED(&sn, 1) = POSTSERIALIZE_MSN_ON_DISK; BLB_MAX_MSN_APPLIED(&sn, 1) = POSTSERIALIZE_MSN_ON_DISK;
FT_HANDLE XMALLOC(brt); FT_HANDLE XMALLOC(ft);
FT XCALLOC(brt_h); FT XCALLOC(ft_h);
toku_ft_init(brt_h, toku_ft_init(ft_h,
make_blocknum(0), make_blocknum(0),
ZERO_LSN, ZERO_LSN,
TXNID_NONE, TXNID_NONE,
@ -307,33 +307,33 @@ test_serialize_leaf_check_msn(enum ftnode_verify_type bft, bool do_clone) {
128*1024, 128*1024,
TOKU_DEFAULT_COMPRESSION_METHOD, TOKU_DEFAULT_COMPRESSION_METHOD,
16); 16);
brt->ft = brt_h; ft->ft = ft_h;
toku_blocktable_create_new(&brt_h->blocktable); toku_blocktable_create_new(&ft_h->blocktable);
{ int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); } { int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); }
//Want to use block #20 //Want to use block #20
BLOCKNUM b = make_blocknum(0); BLOCKNUM b = make_blocknum(0);
while (b.b < 20) { while (b.b < 20) {
toku_allocate_blocknum(brt_h->blocktable, &b, brt_h); toku_allocate_blocknum(ft_h->blocktable, &b, ft_h);
} }
assert(b.b == 20); assert(b.b == 20);
{ {
DISKOFF offset; DISKOFF offset;
DISKOFF size; DISKOFF size;
toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, false); toku_blocknum_realloc_on_disk(ft_h->blocktable, b, 100, &offset, ft_h, fd, false);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size); toku_translate_blocknum_to_offset_size(ft_h->blocktable, b, &offset, &size);
assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(size == 100); assert(size == 100);
} }
FTNODE_DISK_DATA src_ndd = NULL; FTNODE_DISK_DATA src_ndd = NULL;
FTNODE_DISK_DATA dest_ndd = NULL; FTNODE_DISK_DATA dest_ndd = NULL;
write_sn_to_disk(fd, brt, &sn, &src_ndd, do_clone); write_sn_to_disk(fd, ft, &sn, &src_ndd, do_clone);
setup_dn(bft, fd, brt_h, &dn, &dest_ndd); setup_dn(bft, fd, ft_h, &dn, &dest_ndd);
assert(dn->thisnodename.b==20); assert(dn->thisnodename.b==20);
@ -390,11 +390,11 @@ test_serialize_leaf_check_msn(enum ftnode_verify_type bft, bool do_clone) {
toku_free(sn.bp); toku_free(sn.bp);
toku_free(sn.childkeys); toku_free(sn.childkeys);
toku_block_free(brt_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); toku_block_free(ft_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_blocktable_destroy(&brt_h->blocktable); toku_blocktable_destroy(&ft_h->blocktable);
toku_free(brt_h->h); toku_free(ft_h->h);
toku_free(brt_h); toku_free(ft_h);
toku_free(brt); toku_free(ft);
toku_free(src_ndd); toku_free(src_ndd);
toku_free(dest_ndd); toku_free(dest_ndd);
@ -441,9 +441,9 @@ test_serialize_leaf_with_large_pivots(enum ftnode_verify_type bft, bool do_clone
} }
} }
FT_HANDLE XMALLOC(brt); FT_HANDLE XMALLOC(ft);
FT XCALLOC(brt_h); FT XCALLOC(ft_h);
toku_ft_init(brt_h, toku_ft_init(ft_h,
make_blocknum(0), make_blocknum(0),
ZERO_LSN, ZERO_LSN,
TXNID_NONE, TXNID_NONE,
@ -451,32 +451,32 @@ test_serialize_leaf_with_large_pivots(enum ftnode_verify_type bft, bool do_clone
128*1024, 128*1024,
TOKU_DEFAULT_COMPRESSION_METHOD, TOKU_DEFAULT_COMPRESSION_METHOD,
16); 16);
brt->ft = brt_h; ft->ft = ft_h;
toku_blocktable_create_new(&brt_h->blocktable); toku_blocktable_create_new(&ft_h->blocktable);
{ int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); } { int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); }
//Want to use block #20 //Want to use block #20
BLOCKNUM b = make_blocknum(0); BLOCKNUM b = make_blocknum(0);
while (b.b < 20) { while (b.b < 20) {
toku_allocate_blocknum(brt_h->blocktable, &b, brt_h); toku_allocate_blocknum(ft_h->blocktable, &b, ft_h);
} }
assert(b.b == 20); assert(b.b == 20);
{ {
DISKOFF offset; DISKOFF offset;
DISKOFF size; DISKOFF size;
toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, false); toku_blocknum_realloc_on_disk(ft_h->blocktable, b, 100, &offset, ft_h, fd, false);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size); toku_translate_blocknum_to_offset_size(ft_h->blocktable, b, &offset, &size);
assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(size == 100); assert(size == 100);
} }
FTNODE_DISK_DATA src_ndd = NULL; FTNODE_DISK_DATA src_ndd = NULL;
FTNODE_DISK_DATA dest_ndd = NULL; FTNODE_DISK_DATA dest_ndd = NULL;
write_sn_to_disk(fd, brt, &sn, &src_ndd, do_clone); write_sn_to_disk(fd, ft, &sn, &src_ndd, do_clone);
setup_dn(bft, fd, brt_h, &dn, &dest_ndd); setup_dn(bft, fd, ft_h, &dn, &dest_ndd);
assert(dn->thisnodename.b==20); assert(dn->thisnodename.b==20);
@ -537,11 +537,11 @@ test_serialize_leaf_with_large_pivots(enum ftnode_verify_type bft, bool do_clone
} }
toku_free(sn.bp); toku_free(sn.bp);
toku_block_free(brt_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); toku_block_free(ft_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_blocktable_destroy(&brt_h->blocktable); toku_blocktable_destroy(&ft_h->blocktable);
toku_free(brt_h->h); toku_free(ft_h->h);
toku_free(brt_h); toku_free(ft_h);
toku_free(brt); toku_free(ft);
toku_free(src_ndd); toku_free(src_ndd);
toku_free(dest_ndd); toku_free(dest_ndd);
@ -579,9 +579,9 @@ test_serialize_leaf_with_many_rows(enum ftnode_verify_type bft, bool do_clone) {
le_add_to_bn(BLB_DATA(&sn, 0), i, (char *) &key, sizeof(key), (char *) &val, sizeof(val)); le_add_to_bn(BLB_DATA(&sn, 0), i, (char *) &key, sizeof(key), (char *) &val, sizeof(val));
} }
FT_HANDLE XMALLOC(brt); FT_HANDLE XMALLOC(ft);
FT XCALLOC(brt_h); FT XCALLOC(ft_h);
toku_ft_init(brt_h, toku_ft_init(ft_h,
make_blocknum(0), make_blocknum(0),
ZERO_LSN, ZERO_LSN,
TXNID_NONE, TXNID_NONE,
@ -589,33 +589,33 @@ test_serialize_leaf_with_many_rows(enum ftnode_verify_type bft, bool do_clone) {
128*1024, 128*1024,
TOKU_DEFAULT_COMPRESSION_METHOD, TOKU_DEFAULT_COMPRESSION_METHOD,
16); 16);
brt->ft = brt_h; ft->ft = ft_h;
toku_blocktable_create_new(&brt_h->blocktable); toku_blocktable_create_new(&ft_h->blocktable);
{ int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); } { int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); }
//Want to use block #20 //Want to use block #20
BLOCKNUM b = make_blocknum(0); BLOCKNUM b = make_blocknum(0);
while (b.b < 20) { while (b.b < 20) {
toku_allocate_blocknum(brt_h->blocktable, &b, brt_h); toku_allocate_blocknum(ft_h->blocktable, &b, ft_h);
} }
assert(b.b == 20); assert(b.b == 20);
{ {
DISKOFF offset; DISKOFF offset;
DISKOFF size; DISKOFF size;
toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, false); toku_blocknum_realloc_on_disk(ft_h->blocktable, b, 100, &offset, ft_h, fd, false);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size); toku_translate_blocknum_to_offset_size(ft_h->blocktable, b, &offset, &size);
assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(size == 100); assert(size == 100);
} }
FTNODE_DISK_DATA src_ndd = NULL; FTNODE_DISK_DATA src_ndd = NULL;
FTNODE_DISK_DATA dest_ndd = NULL; FTNODE_DISK_DATA dest_ndd = NULL;
write_sn_to_disk(fd, brt, &sn, &src_ndd, do_clone); write_sn_to_disk(fd, ft, &sn, &src_ndd, do_clone);
setup_dn(bft, fd, brt_h, &dn, &dest_ndd); setup_dn(bft, fd, ft_h, &dn, &dest_ndd);
assert(dn->thisnodename.b==20); assert(dn->thisnodename.b==20);
@ -678,11 +678,11 @@ test_serialize_leaf_with_many_rows(enum ftnode_verify_type bft, bool do_clone) {
toku_free(sn.bp); toku_free(sn.bp);
toku_free(sn.childkeys); toku_free(sn.childkeys);
toku_block_free(brt_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); toku_block_free(ft_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_blocktable_destroy(&brt_h->blocktable); toku_blocktable_destroy(&ft_h->blocktable);
toku_free(brt_h->h); toku_free(ft_h->h);
toku_free(brt_h); toku_free(ft_h);
toku_free(brt); toku_free(ft);
toku_free(src_ndd); toku_free(src_ndd);
toku_free(dest_ndd); toku_free(dest_ndd);
@ -727,9 +727,9 @@ test_serialize_leaf_with_large_rows(enum ftnode_verify_type bft, bool do_clone)
le_add_to_bn(BLB_DATA(&sn, 0), i,key, 8, val, val_size); le_add_to_bn(BLB_DATA(&sn, 0), i,key, 8, val, val_size);
} }
FT_HANDLE XMALLOC(brt); FT_HANDLE XMALLOC(ft);
FT XCALLOC(brt_h); FT XCALLOC(ft_h);
toku_ft_init(brt_h, toku_ft_init(ft_h,
make_blocknum(0), make_blocknum(0),
ZERO_LSN, ZERO_LSN,
TXNID_NONE, TXNID_NONE,
@ -737,33 +737,33 @@ test_serialize_leaf_with_large_rows(enum ftnode_verify_type bft, bool do_clone)
128*1024, 128*1024,
TOKU_DEFAULT_COMPRESSION_METHOD, TOKU_DEFAULT_COMPRESSION_METHOD,
16); 16);
brt->ft = brt_h; ft->ft = ft_h;
toku_blocktable_create_new(&brt_h->blocktable); toku_blocktable_create_new(&ft_h->blocktable);
{ int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); } { int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); }
//Want to use block #20 //Want to use block #20
BLOCKNUM b = make_blocknum(0); BLOCKNUM b = make_blocknum(0);
while (b.b < 20) { while (b.b < 20) {
toku_allocate_blocknum(brt_h->blocktable, &b, brt_h); toku_allocate_blocknum(ft_h->blocktable, &b, ft_h);
} }
assert(b.b == 20); assert(b.b == 20);
{ {
DISKOFF offset; DISKOFF offset;
DISKOFF size; DISKOFF size;
toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, false); toku_blocknum_realloc_on_disk(ft_h->blocktable, b, 100, &offset, ft_h, fd, false);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size); toku_translate_blocknum_to_offset_size(ft_h->blocktable, b, &offset, &size);
assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(size == 100); assert(size == 100);
} }
FTNODE_DISK_DATA src_ndd = NULL; FTNODE_DISK_DATA src_ndd = NULL;
FTNODE_DISK_DATA dest_ndd = NULL; FTNODE_DISK_DATA dest_ndd = NULL;
write_sn_to_disk(fd, brt, &sn, &src_ndd, do_clone); write_sn_to_disk(fd, ft, &sn, &src_ndd, do_clone);
setup_dn(bft, fd, brt_h, &dn, &dest_ndd); setup_dn(bft, fd, ft_h, &dn, &dest_ndd);
assert(dn->thisnodename.b==20); assert(dn->thisnodename.b==20);
@ -828,11 +828,11 @@ test_serialize_leaf_with_large_rows(enum ftnode_verify_type bft, bool do_clone)
toku_free(sn.bp); toku_free(sn.bp);
toku_free(sn.childkeys); toku_free(sn.childkeys);
toku_block_free(brt_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); toku_block_free(ft_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_blocktable_destroy(&brt_h->blocktable); toku_blocktable_destroy(&ft_h->blocktable);
toku_free(brt_h->h); toku_free(ft_h->h);
toku_free(brt_h); toku_free(ft_h);
toku_free(brt); toku_free(ft);
toku_free(src_ndd); toku_free(src_ndd);
toku_free(dest_ndd); toku_free(dest_ndd);
@ -876,9 +876,9 @@ test_serialize_leaf_with_empty_basement_nodes(enum ftnode_verify_type bft, bool
le_add_to_bn(BLB_DATA(&sn, 3), 0, "b", 2, "bval", 5); le_add_to_bn(BLB_DATA(&sn, 3), 0, "b", 2, "bval", 5);
le_add_to_bn(BLB_DATA(&sn, 5), 0, "x", 2, "xval", 5); le_add_to_bn(BLB_DATA(&sn, 5), 0, "x", 2, "xval", 5);
FT_HANDLE XMALLOC(brt); FT_HANDLE XMALLOC(ft);
FT XCALLOC(brt_h); FT XCALLOC(ft_h);
toku_ft_init(brt_h, toku_ft_init(ft_h,
make_blocknum(0), make_blocknum(0),
ZERO_LSN, ZERO_LSN,
TXNID_NONE, TXNID_NONE,
@ -886,32 +886,32 @@ test_serialize_leaf_with_empty_basement_nodes(enum ftnode_verify_type bft, bool
128*1024, 128*1024,
TOKU_DEFAULT_COMPRESSION_METHOD, TOKU_DEFAULT_COMPRESSION_METHOD,
16); 16);
brt->ft = brt_h; ft->ft = ft_h;
toku_blocktable_create_new(&brt_h->blocktable); toku_blocktable_create_new(&ft_h->blocktable);
{ int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); } { int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); }
//Want to use block #20 //Want to use block #20
BLOCKNUM b = make_blocknum(0); BLOCKNUM b = make_blocknum(0);
while (b.b < 20) { while (b.b < 20) {
toku_allocate_blocknum(brt_h->blocktable, &b, brt_h); toku_allocate_blocknum(ft_h->blocktable, &b, ft_h);
} }
assert(b.b == 20); assert(b.b == 20);
{ {
DISKOFF offset; DISKOFF offset;
DISKOFF size; DISKOFF size;
toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, false); toku_blocknum_realloc_on_disk(ft_h->blocktable, b, 100, &offset, ft_h, fd, false);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size); toku_translate_blocknum_to_offset_size(ft_h->blocktable, b, &offset, &size);
assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(size == 100); assert(size == 100);
} }
FTNODE_DISK_DATA src_ndd = NULL; FTNODE_DISK_DATA src_ndd = NULL;
FTNODE_DISK_DATA dest_ndd = NULL; FTNODE_DISK_DATA dest_ndd = NULL;
write_sn_to_disk(fd, brt, &sn, &src_ndd, do_clone); write_sn_to_disk(fd, ft, &sn, &src_ndd, do_clone);
setup_dn(bft, fd, brt_h, &dn, &dest_ndd); setup_dn(bft, fd, ft_h, &dn, &dest_ndd);
assert(dn->thisnodename.b==20); assert(dn->thisnodename.b==20);
@ -966,11 +966,11 @@ test_serialize_leaf_with_empty_basement_nodes(enum ftnode_verify_type bft, bool
toku_free(sn.bp); toku_free(sn.bp);
toku_free(sn.childkeys); toku_free(sn.childkeys);
toku_block_free(brt_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); toku_block_free(ft_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_blocktable_destroy(&brt_h->blocktable); toku_blocktable_destroy(&ft_h->blocktable);
toku_free(brt_h->h); toku_free(ft_h->h);
toku_free(brt_h); toku_free(ft_h);
toku_free(brt); toku_free(ft);
toku_free(src_ndd); toku_free(src_ndd);
toku_free(dest_ndd); toku_free(dest_ndd);
@ -1005,9 +1005,9 @@ test_serialize_leaf_with_multiple_empty_basement_nodes(enum ftnode_verify_type b
set_BLB(&sn, i, toku_create_empty_bn()); set_BLB(&sn, i, toku_create_empty_bn());
} }
FT_HANDLE XMALLOC(brt); FT_HANDLE XMALLOC(ft);
FT XCALLOC(brt_h); FT XCALLOC(ft_h);
toku_ft_init(brt_h, toku_ft_init(ft_h,
make_blocknum(0), make_blocknum(0),
ZERO_LSN, ZERO_LSN,
TXNID_NONE, TXNID_NONE,
@ -1015,33 +1015,33 @@ test_serialize_leaf_with_multiple_empty_basement_nodes(enum ftnode_verify_type b
128*1024, 128*1024,
TOKU_DEFAULT_COMPRESSION_METHOD, TOKU_DEFAULT_COMPRESSION_METHOD,
16); 16);
brt->ft = brt_h; ft->ft = ft_h;
toku_blocktable_create_new(&brt_h->blocktable); toku_blocktable_create_new(&ft_h->blocktable);
{ int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); } { int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); }
//Want to use block #20 //Want to use block #20
BLOCKNUM b = make_blocknum(0); BLOCKNUM b = make_blocknum(0);
while (b.b < 20) { while (b.b < 20) {
toku_allocate_blocknum(brt_h->blocktable, &b, brt_h); toku_allocate_blocknum(ft_h->blocktable, &b, ft_h);
} }
assert(b.b == 20); assert(b.b == 20);
{ {
DISKOFF offset; DISKOFF offset;
DISKOFF size; DISKOFF size;
toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, false); toku_blocknum_realloc_on_disk(ft_h->blocktable, b, 100, &offset, ft_h, fd, false);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size); toku_translate_blocknum_to_offset_size(ft_h->blocktable, b, &offset, &size);
assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(size == 100); assert(size == 100);
} }
FTNODE_DISK_DATA src_ndd = NULL; FTNODE_DISK_DATA src_ndd = NULL;
FTNODE_DISK_DATA dest_ndd = NULL; FTNODE_DISK_DATA dest_ndd = NULL;
write_sn_to_disk(fd, brt, &sn, &src_ndd, do_clone); write_sn_to_disk(fd, ft, &sn, &src_ndd, do_clone);
setup_dn(bft, fd, brt_h, &dn, &dest_ndd); setup_dn(bft, fd, ft_h, &dn, &dest_ndd);
assert(dn->thisnodename.b==20); assert(dn->thisnodename.b==20);
@ -1073,11 +1073,11 @@ test_serialize_leaf_with_multiple_empty_basement_nodes(enum ftnode_verify_type b
toku_free(sn.bp); toku_free(sn.bp);
toku_free(sn.childkeys); toku_free(sn.childkeys);
toku_block_free(brt_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); toku_block_free(ft_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_blocktable_destroy(&brt_h->blocktable); toku_blocktable_destroy(&ft_h->blocktable);
toku_free(brt_h->h); toku_free(ft_h->h);
toku_free(brt_h); toku_free(ft_h);
toku_free(brt); toku_free(ft);
toku_free(src_ndd); toku_free(src_ndd);
toku_free(dest_ndd); toku_free(dest_ndd);
@ -1131,9 +1131,9 @@ test_serialize_nonleaf(enum ftnode_verify_type bft, bool do_clone) {
xids_destroy(&xids_123); xids_destroy(&xids_123);
xids_destroy(&xids_234); xids_destroy(&xids_234);
FT_HANDLE XMALLOC(brt); FT_HANDLE XMALLOC(ft);
FT XCALLOC(brt_h); FT XCALLOC(ft_h);
toku_ft_init(brt_h, toku_ft_init(ft_h,
make_blocknum(0), make_blocknum(0),
ZERO_LSN, ZERO_LSN,
TXNID_NONE, TXNID_NONE,
@ -1141,32 +1141,32 @@ test_serialize_nonleaf(enum ftnode_verify_type bft, bool do_clone) {
128*1024, 128*1024,
TOKU_DEFAULT_COMPRESSION_METHOD, TOKU_DEFAULT_COMPRESSION_METHOD,
16); 16);
brt->ft = brt_h; ft->ft = ft_h;
toku_blocktable_create_new(&brt_h->blocktable); toku_blocktable_create_new(&ft_h->blocktable);
{ int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); } { int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); }
//Want to use block #20 //Want to use block #20
BLOCKNUM b = make_blocknum(0); BLOCKNUM b = make_blocknum(0);
while (b.b < 20) { while (b.b < 20) {
toku_allocate_blocknum(brt_h->blocktable, &b, brt_h); toku_allocate_blocknum(ft_h->blocktable, &b, ft_h);
} }
assert(b.b == 20); assert(b.b == 20);
{ {
DISKOFF offset; DISKOFF offset;
DISKOFF size; DISKOFF size;
toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, false); toku_blocknum_realloc_on_disk(ft_h->blocktable, b, 100, &offset, ft_h, fd, false);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size); toku_translate_blocknum_to_offset_size(ft_h->blocktable, b, &offset, &size);
assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(size == 100); assert(size == 100);
} }
FTNODE_DISK_DATA src_ndd = NULL; FTNODE_DISK_DATA src_ndd = NULL;
FTNODE_DISK_DATA dest_ndd = NULL; FTNODE_DISK_DATA dest_ndd = NULL;
write_sn_to_disk(fd, brt, &sn, &src_ndd, do_clone); write_sn_to_disk(fd, ft, &sn, &src_ndd, do_clone);
setup_dn(bft, fd, brt_h, &dn, &dest_ndd); setup_dn(bft, fd, ft_h, &dn, &dest_ndd);
assert(dn->thisnodename.b==20); assert(dn->thisnodename.b==20);
@ -1197,11 +1197,11 @@ test_serialize_nonleaf(enum ftnode_verify_type bft, bool do_clone) {
toku_free(sn.bp); toku_free(sn.bp);
toku_free(sn.childkeys); toku_free(sn.childkeys);
toku_block_free(brt_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); toku_block_free(ft_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_blocktable_destroy(&brt_h->blocktable); toku_blocktable_destroy(&ft_h->blocktable);
toku_free(brt_h->h); toku_free(ft_h->h);
toku_free(brt_h); toku_free(ft_h);
toku_free(brt); toku_free(ft);
toku_free(src_ndd); toku_free(src_ndd);
toku_free(dest_ndd); toku_free(dest_ndd);

View file

@ -113,14 +113,14 @@ static void test_multiple_ft_cursor_dbts(int n) {
int r; int r;
CACHETABLE ct; CACHETABLE ct;
FT_HANDLE brt; FT_HANDLE ft;
FT_CURSOR cursors[n]; FT_CURSOR cursors[n];
unlink(fname); unlink(fname);
toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER);
r = toku_open_ft_handle(fname, 1, &brt, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
assert(r==0); assert(r==0);
int i; int i;
@ -129,14 +129,14 @@ static void test_multiple_ft_cursor_dbts(int n) {
char key[10],val[10]; char key[10],val[10];
snprintf(key, sizeof key, "k%04d", i); snprintf(key, sizeof key, "k%04d", i);
snprintf(val, sizeof val, "v%04d", i); snprintf(val, sizeof val, "v%04d", i);
toku_ft_insert(brt, toku_ft_insert(ft,
toku_fill_dbt(&kbt, key, 1+strlen(key)), toku_fill_dbt(&kbt, key, 1+strlen(key)),
toku_fill_dbt(&vbt, val, 1+strlen(val)), toku_fill_dbt(&vbt, val, 1+strlen(val)),
0); 0);
} }
for (i=0; i<n; i++) { for (i=0; i<n; i++) {
r = toku_ft_cursor(brt, &cursors[i], NULL, false, false); r = toku_ft_cursor(ft, &cursors[i], NULL, false, false);
assert(r == 0); assert(r == 0);
} }
@ -166,7 +166,7 @@ static void test_multiple_ft_cursor_dbts(int n) {
toku_free(ptrs[i]); toku_free(ptrs[i]);
} }
r = toku_close_ft_handle_nolsn(brt, 0); r = toku_close_ft_handle_nolsn(ft, 0);
assert(r==0); assert(r==0);
toku_cachetable_close(&ct); toku_cachetable_close(&ct);

View file

@ -102,11 +102,11 @@ static int test_ft_cursor_keycompare(DB *desc __attribute__((unused)), const DBT
return toku_keycompare(a->data, a->size, b->data, b->size); return toku_keycompare(a->data, a->size, b->data, b->size);
} }
static void assert_cursor_notfound(FT_HANDLE brt, int position) { static void assert_cursor_notfound(FT_HANDLE ft, int position) {
FT_CURSOR cursor=0; FT_CURSOR cursor=0;
int r; int r;
r = toku_ft_cursor(brt, &cursor, NULL, false, false); r = toku_ft_cursor(ft, &cursor, NULL, false, false);
assert(r==0); assert(r==0);
struct check_pair pair = {0,0,0,0,0}; struct check_pair pair = {0,0,0,0,0};
@ -117,11 +117,11 @@ static void assert_cursor_notfound(FT_HANDLE brt, int position) {
toku_ft_cursor_close(cursor); toku_ft_cursor_close(cursor);
} }
static void assert_cursor_value(FT_HANDLE brt, int position, long long value) { static void assert_cursor_value(FT_HANDLE ft, int position, long long value) {
FT_CURSOR cursor=0; FT_CURSOR cursor=0;
int r; int r;
r = toku_ft_cursor(brt, &cursor, NULL, false, false); r = toku_ft_cursor(ft, &cursor, NULL, false, false);
assert(r==0); assert(r==0);
if (test_cursor_debug && verbose) printf("key: "); if (test_cursor_debug && verbose) printf("key: ");
@ -133,11 +133,11 @@ static void assert_cursor_value(FT_HANDLE brt, int position, long long value) {
toku_ft_cursor_close(cursor); toku_ft_cursor_close(cursor);
} }
static void assert_cursor_first_last(FT_HANDLE brt, long long firstv, long long lastv) { static void assert_cursor_first_last(FT_HANDLE ft, long long firstv, long long lastv) {
FT_CURSOR cursor=0; FT_CURSOR cursor=0;
int r; int r;
r = toku_ft_cursor(brt, &cursor, NULL, false, false); r = toku_ft_cursor(ft, &cursor, NULL, false, false);
assert(r==0); assert(r==0);
if (test_cursor_debug && verbose) printf("first key: "); if (test_cursor_debug && verbose) printf("first key: ");
@ -162,7 +162,7 @@ static void assert_cursor_first_last(FT_HANDLE brt, long long firstv, long long
static void test_ft_cursor_first(int n) { static void test_ft_cursor_first(int n) {
CACHETABLE ct; CACHETABLE ct;
FT_HANDLE brt; FT_HANDLE ft;
int r; int r;
int i; int i;
@ -172,7 +172,7 @@ static void test_ft_cursor_first(int n) {
toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER);
r = toku_open_ft_handle(fname, 1, &brt, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare); r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare);
assert(r==0); assert(r==0);
/* insert a bunch of kv pairs */ /* insert a bunch of kv pairs */
@ -184,15 +184,15 @@ static void test_ft_cursor_first(int n) {
toku_fill_dbt(&kbt, key, strlen(key)+1); toku_fill_dbt(&kbt, key, strlen(key)+1);
v = i; v = i;
toku_fill_dbt(&vbt, &v, sizeof v); toku_fill_dbt(&vbt, &v, sizeof v);
toku_ft_insert(brt, &kbt, &vbt, 0); toku_ft_insert(ft, &kbt, &vbt, 0);
} }
if (n == 0) if (n == 0)
assert_cursor_notfound(brt, DB_FIRST); assert_cursor_notfound(ft, DB_FIRST);
else else
assert_cursor_value(brt, DB_FIRST, 0); assert_cursor_value(ft, DB_FIRST, 0);
r = toku_close_ft_handle_nolsn(brt, 0); r = toku_close_ft_handle_nolsn(ft, 0);
assert(r==0); assert(r==0);
toku_cachetable_close(&ct); toku_cachetable_close(&ct);
@ -200,7 +200,7 @@ static void test_ft_cursor_first(int n) {
static void test_ft_cursor_last(int n) { static void test_ft_cursor_last(int n) {
CACHETABLE ct; CACHETABLE ct;
FT_HANDLE brt; FT_HANDLE ft;
int r; int r;
int i; int i;
@ -210,7 +210,7 @@ static void test_ft_cursor_last(int n) {
toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER);
r = toku_open_ft_handle(fname, 1, &brt, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare); r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare);
assert(r==0); assert(r==0);
/* insert keys 0, 1, .. (n-1) */ /* insert keys 0, 1, .. (n-1) */
@ -222,16 +222,16 @@ static void test_ft_cursor_last(int n) {
toku_fill_dbt(&kbt, key, strlen(key)+1); toku_fill_dbt(&kbt, key, strlen(key)+1);
v = i; v = i;
toku_fill_dbt(&vbt, &v, sizeof v); toku_fill_dbt(&vbt, &v, sizeof v);
toku_ft_insert(brt, &kbt, &vbt, 0); toku_ft_insert(ft, &kbt, &vbt, 0);
assert(r==0); assert(r==0);
} }
if (n == 0) if (n == 0)
assert_cursor_notfound(brt, DB_LAST); assert_cursor_notfound(ft, DB_LAST);
else else
assert_cursor_value(brt, DB_LAST, n-1); assert_cursor_value(ft, DB_LAST, n-1);
r = toku_close_ft_handle_nolsn(brt, 0); r = toku_close_ft_handle_nolsn(ft, 0);
assert(r==0); assert(r==0);
toku_cachetable_close(&ct); toku_cachetable_close(&ct);
@ -239,7 +239,7 @@ static void test_ft_cursor_last(int n) {
static void test_ft_cursor_first_last(int n) { static void test_ft_cursor_first_last(int n) {
CACHETABLE ct; CACHETABLE ct;
FT_HANDLE brt; FT_HANDLE ft;
int r; int r;
int i; int i;
@ -249,7 +249,7 @@ static void test_ft_cursor_first_last(int n) {
toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER);
r = toku_open_ft_handle(fname, 1, &brt, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare); r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare);
assert(r==0); assert(r==0);
/* insert a bunch of kv pairs */ /* insert a bunch of kv pairs */
@ -262,16 +262,16 @@ static void test_ft_cursor_first_last(int n) {
v = i; v = i;
toku_fill_dbt(&vbt, &v, sizeof v); toku_fill_dbt(&vbt, &v, sizeof v);
toku_ft_insert(brt, &kbt, &vbt, 0); toku_ft_insert(ft, &kbt, &vbt, 0);
} }
if (n == 0) { if (n == 0) {
assert_cursor_notfound(brt, DB_FIRST); assert_cursor_notfound(ft, DB_FIRST);
assert_cursor_notfound(brt, DB_LAST); assert_cursor_notfound(ft, DB_LAST);
} else } else
assert_cursor_first_last(brt, 0, n-1); assert_cursor_first_last(ft, 0, n-1);
r = toku_close_ft_handle_nolsn(brt, 0); r = toku_close_ft_handle_nolsn(ft, 0);
assert(r==0); assert(r==0);
toku_cachetable_close(&ct); toku_cachetable_close(&ct);
@ -281,7 +281,7 @@ static void test_ft_cursor_first_last(int n) {
static void test_ft_cursor_rfirst(int n) { static void test_ft_cursor_rfirst(int n) {
CACHETABLE ct; CACHETABLE ct;
FT_HANDLE brt; FT_HANDLE ft;
int r; int r;
int i; int i;
@ -291,7 +291,7 @@ static void test_ft_cursor_rfirst(int n) {
toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER);
r = toku_open_ft_handle(fname, 1, &brt, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare); r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare);
assert(r==0); assert(r==0);
/* insert keys n-1, n-2, ... , 0 */ /* insert keys n-1, n-2, ... , 0 */
@ -304,26 +304,26 @@ static void test_ft_cursor_rfirst(int n) {
toku_fill_dbt(&kbt, key, strlen(key)+1); toku_fill_dbt(&kbt, key, strlen(key)+1);
v = i; v = i;
toku_fill_dbt(&vbt, &v, sizeof v); toku_fill_dbt(&vbt, &v, sizeof v);
toku_ft_insert(brt, &kbt, &vbt, 0); toku_ft_insert(ft, &kbt, &vbt, 0);
} }
if (n == 0) if (n == 0)
assert_cursor_notfound(brt, DB_FIRST); assert_cursor_notfound(ft, DB_FIRST);
else else
assert_cursor_value(brt, DB_FIRST, 0); assert_cursor_value(ft, DB_FIRST, 0);
r = toku_close_ft_handle_nolsn(brt, 0); r = toku_close_ft_handle_nolsn(ft, 0);
assert(r==0); assert(r==0);
toku_cachetable_close(&ct); toku_cachetable_close(&ct);
} }
static void assert_cursor_walk(FT_HANDLE brt, int n) { static void assert_cursor_walk(FT_HANDLE ft, int n) {
FT_CURSOR cursor=0; FT_CURSOR cursor=0;
int i; int i;
int r; int r;
r = toku_ft_cursor(brt, &cursor, NULL, false, false); r = toku_ft_cursor(ft, &cursor, NULL, false, false);
assert(r==0); assert(r==0);
if (test_cursor_debug && verbose) printf("key: "); if (test_cursor_debug && verbose) printf("key: ");
@ -345,7 +345,7 @@ static void assert_cursor_walk(FT_HANDLE brt, int n) {
static void test_ft_cursor_walk(int n) { static void test_ft_cursor_walk(int n) {
CACHETABLE ct; CACHETABLE ct;
FT_HANDLE brt; FT_HANDLE ft;
int r; int r;
int i; int i;
@ -355,7 +355,7 @@ static void test_ft_cursor_walk(int n) {
toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER);
r = toku_open_ft_handle(fname, 1, &brt, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare); r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare);
assert(r==0); assert(r==0);
/* insert a bunch of kv pairs */ /* insert a bunch of kv pairs */
@ -367,25 +367,25 @@ static void test_ft_cursor_walk(int n) {
toku_fill_dbt(&kbt, key, strlen(key)+1); toku_fill_dbt(&kbt, key, strlen(key)+1);
v = i; v = i;
toku_fill_dbt(&vbt, &v, sizeof v); toku_fill_dbt(&vbt, &v, sizeof v);
toku_ft_insert(brt, &kbt, &vbt, 0); toku_ft_insert(ft, &kbt, &vbt, 0);
} }
/* walk the tree */ /* walk the tree */
assert_cursor_walk(brt, n); assert_cursor_walk(ft, n);
r = toku_close_ft_handle_nolsn(brt, 0); r = toku_close_ft_handle_nolsn(ft, 0);
assert(r==0); assert(r==0);
toku_cachetable_close(&ct); toku_cachetable_close(&ct);
} }
static void assert_cursor_rwalk(FT_HANDLE brt, int n) { static void assert_cursor_rwalk(FT_HANDLE ft, int n) {
FT_CURSOR cursor=0; FT_CURSOR cursor=0;
int i; int i;
int r; int r;
r = toku_ft_cursor(brt, &cursor, NULL, false, false); r = toku_ft_cursor(ft, &cursor, NULL, false, false);
assert(r==0); assert(r==0);
if (test_cursor_debug && verbose) printf("key: "); if (test_cursor_debug && verbose) printf("key: ");
@ -407,7 +407,7 @@ static void assert_cursor_rwalk(FT_HANDLE brt, int n) {
static void test_ft_cursor_rwalk(int n) { static void test_ft_cursor_rwalk(int n) {
CACHETABLE ct; CACHETABLE ct;
FT_HANDLE brt; FT_HANDLE ft;
int r; int r;
int i; int i;
@ -417,7 +417,7 @@ static void test_ft_cursor_rwalk(int n) {
toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER);
r = toku_open_ft_handle(fname, 1, &brt, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare); r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare);
assert(r==0); assert(r==0);
/* insert a bunch of kv pairs */ /* insert a bunch of kv pairs */
@ -429,13 +429,13 @@ static void test_ft_cursor_rwalk(int n) {
toku_fill_dbt(&kbt, &k, sizeof k); toku_fill_dbt(&kbt, &k, sizeof k);
v = i; v = i;
toku_fill_dbt(&vbt, &v, sizeof v); toku_fill_dbt(&vbt, &v, sizeof v);
toku_ft_insert(brt, &kbt, &vbt, 0); toku_ft_insert(ft, &kbt, &vbt, 0);
} }
/* walk the tree */ /* walk the tree */
assert_cursor_rwalk(brt, n); assert_cursor_rwalk(ft, n);
r = toku_close_ft_handle_nolsn(brt, 0); r = toku_close_ft_handle_nolsn(ft, 0);
assert(r==0); assert(r==0);
toku_cachetable_close(&ct); toku_cachetable_close(&ct);
@ -462,13 +462,13 @@ ascending_key_string_checkf (ITEMLEN keylen, bytevec key, ITEMLEN UU(vallen), by
} }
// The keys are strings (null terminated) // The keys are strings (null terminated)
static void assert_cursor_walk_inorder(FT_HANDLE brt, int n) { static void assert_cursor_walk_inorder(FT_HANDLE ft, int n) {
FT_CURSOR cursor=0; FT_CURSOR cursor=0;
int i; int i;
int r; int r;
char *prevkey = 0; char *prevkey = 0;
r = toku_ft_cursor(brt, &cursor, NULL, false, false); r = toku_ft_cursor(ft, &cursor, NULL, false, false);
assert(r==0); assert(r==0);
if (test_cursor_debug && verbose) printf("key: "); if (test_cursor_debug && verbose) printf("key: ");
@ -488,7 +488,7 @@ static void assert_cursor_walk_inorder(FT_HANDLE brt, int n) {
static void test_ft_cursor_rand(int n) { static void test_ft_cursor_rand(int n) {
CACHETABLE ct; CACHETABLE ct;
FT_HANDLE brt; FT_HANDLE ft;
int r; int r;
int i; int i;
@ -498,7 +498,7 @@ static void test_ft_cursor_rand(int n) {
toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER);
r = toku_open_ft_handle(fname, 1, &brt, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare); r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare);
assert(r==0); assert(r==0);
/* insert a bunch of kv pairs */ /* insert a bunch of kv pairs */
@ -513,22 +513,22 @@ static void test_ft_cursor_rand(int n) {
v = i; v = i;
toku_fill_dbt(&vbt, &v, sizeof v); toku_fill_dbt(&vbt, &v, sizeof v);
struct check_pair pair = {kbt.size, key, len_ignore, 0, 0}; struct check_pair pair = {kbt.size, key, len_ignore, 0, 0};
r = toku_ft_lookup(brt, &kbt, lookup_checkf, &pair); r = toku_ft_lookup(ft, &kbt, lookup_checkf, &pair);
if (r == 0) { if (r == 0) {
assert(pair.call_count==1); assert(pair.call_count==1);
if (verbose) printf("dup"); if (verbose) printf("dup");
continue; continue;
} }
assert(pair.call_count==0); assert(pair.call_count==0);
toku_ft_insert(brt, &kbt, &vbt, 0); toku_ft_insert(ft, &kbt, &vbt, 0);
break; break;
} }
} }
/* walk the tree */ /* walk the tree */
assert_cursor_walk_inorder(brt, n); assert_cursor_walk_inorder(ft, n);
r = toku_close_ft_handle_nolsn(brt, 0); r = toku_close_ft_handle_nolsn(ft, 0);
assert(r==0); assert(r==0);
toku_cachetable_close(&ct); toku_cachetable_close(&ct);
@ -536,7 +536,7 @@ static void test_ft_cursor_rand(int n) {
static void test_ft_cursor_split(int n) { static void test_ft_cursor_split(int n) {
CACHETABLE ct; CACHETABLE ct;
FT_HANDLE brt; FT_HANDLE ft;
FT_CURSOR cursor=0; FT_CURSOR cursor=0;
int r; int r;
int keyseqnum; int keyseqnum;
@ -548,7 +548,7 @@ static void test_ft_cursor_split(int n) {
toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER);
r = toku_open_ft_handle(fname, 1, &brt, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare); r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare);
assert(r==0); assert(r==0);
/* insert a bunch of kv pairs */ /* insert a bunch of kv pairs */
@ -560,10 +560,10 @@ static void test_ft_cursor_split(int n) {
toku_fill_dbt(&kbt, key, strlen(key)+1); toku_fill_dbt(&kbt, key, strlen(key)+1);
v = keyseqnum; v = keyseqnum;
toku_fill_dbt(&vbt, &v, sizeof v); toku_fill_dbt(&vbt, &v, sizeof v);
toku_ft_insert(brt, &kbt, &vbt, 0); toku_ft_insert(ft, &kbt, &vbt, 0);
} }
r = toku_ft_cursor(brt, &cursor, NULL, false, false); r = toku_ft_cursor(ft, &cursor, NULL, false, false);
assert(r==0); assert(r==0);
if (test_cursor_debug && verbose) printf("key: "); if (test_cursor_debug && verbose) printf("key: ");
@ -583,7 +583,7 @@ static void test_ft_cursor_split(int n) {
toku_fill_dbt(&kbt, key, strlen(key)+1); toku_fill_dbt(&kbt, key, strlen(key)+1);
v = keyseqnum; v = keyseqnum;
toku_fill_dbt(&vbt, &v, sizeof v); toku_fill_dbt(&vbt, &v, sizeof v);
toku_ft_insert(brt, &kbt, &vbt, 0); toku_ft_insert(ft, &kbt, &vbt, 0);
} }
if (test_cursor_debug && verbose) printf("key: "); if (test_cursor_debug && verbose) printf("key: ");
@ -601,7 +601,7 @@ static void test_ft_cursor_split(int n) {
toku_ft_cursor_close(cursor); toku_ft_cursor_close(cursor);
r = toku_close_ft_handle_nolsn(brt, 0); r = toku_close_ft_handle_nolsn(ft, 0);
assert(r==0); assert(r==0);
toku_cachetable_close(&ct); toku_cachetable_close(&ct);
@ -612,19 +612,19 @@ static void test_multiple_ft_cursors(int n) {
int r; int r;
CACHETABLE ct; CACHETABLE ct;
FT_HANDLE brt; FT_HANDLE ft;
FT_CURSOR cursors[n]; FT_CURSOR cursors[n];
unlink(fname); unlink(fname);
toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER);
r = toku_open_ft_handle(fname, 1, &brt, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare); r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare);
assert(r==0); assert(r==0);
int i; int i;
for (i=0; i<n; i++) { for (i=0; i<n; i++) {
r = toku_ft_cursor(brt, &cursors[i], NULL, false, false); r = toku_ft_cursor(ft, &cursors[i], NULL, false, false);
assert(r == 0); assert(r == 0);
} }
@ -632,7 +632,7 @@ static void test_multiple_ft_cursors(int n) {
toku_ft_cursor_close(cursors[i]); toku_ft_cursor_close(cursors[i]);
} }
r = toku_close_ft_handle_nolsn(brt, 0); r = toku_close_ft_handle_nolsn(ft, 0);
assert(r==0); assert(r==0);
toku_cachetable_close(&ct); toku_cachetable_close(&ct);
@ -653,7 +653,7 @@ static void test_multiple_ft_cursor_walk(int n) {
int r; int r;
CACHETABLE ct; CACHETABLE ct;
FT_HANDLE brt; FT_HANDLE ft;
const int cursor_gap = 1000; const int cursor_gap = 1000;
const int ncursors = n/cursor_gap; const int ncursors = n/cursor_gap;
FT_CURSOR cursors[ncursors]; FT_CURSOR cursors[ncursors];
@ -665,13 +665,13 @@ static void test_multiple_ft_cursor_walk(int n) {
int cachesize = 2 * h * ncursors * nodesize; int cachesize = 2 * h * ncursors * nodesize;
toku_cachetable_create(&ct, cachesize, ZERO_LSN, NULL_LOGGER); toku_cachetable_create(&ct, cachesize, ZERO_LSN, NULL_LOGGER);
r = toku_open_ft_handle(fname, 1, &brt, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare); r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare);
assert(r==0); assert(r==0);
int c; int c;
/* create the cursors */ /* create the cursors */
for (c=0; c<ncursors; c++) { for (c=0; c<ncursors; c++) {
r = toku_ft_cursor(brt, &cursors[c], NULL, false, false); r = toku_ft_cursor(ft, &cursors[c], NULL, false, false);
assert(r == 0); assert(r == 0);
} }
@ -686,7 +686,7 @@ static void test_multiple_ft_cursor_walk(int n) {
toku_fill_dbt(&key, &k, sizeof k); toku_fill_dbt(&key, &k, sizeof k);
toku_fill_dbt(&val, &v, sizeof v); toku_fill_dbt(&val, &v, sizeof v);
toku_ft_insert(brt, &key, &val, 0); toku_ft_insert(ft, &key, &val, 0);
} }
/* point cursor i / cursor_gap to the current last key i */ /* point cursor i / cursor_gap to the current last key i */
@ -720,7 +720,7 @@ static void test_multiple_ft_cursor_walk(int n) {
toku_ft_cursor_close(cursors[i]); toku_ft_cursor_close(cursors[i]);
} }
r = toku_close_ft_handle_nolsn(brt, 0); r = toku_close_ft_handle_nolsn(ft, 0);
assert(r==0); assert(r==0);
toku_cachetable_close(&ct); toku_cachetable_close(&ct);
@ -731,14 +731,14 @@ static void test_ft_cursor_set(int n, int cursor_op) {
int r; int r;
CACHETABLE ct; CACHETABLE ct;
FT_HANDLE brt; FT_HANDLE ft;
FT_CURSOR cursor=0; FT_CURSOR cursor=0;
unlink(fname); unlink(fname);
toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER);
r = toku_open_ft_handle(fname, 1, &brt, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare); r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare);
assert(r==0); assert(r==0);
int i; int i;
@ -750,10 +750,10 @@ static void test_ft_cursor_set(int n, int cursor_op) {
DBT key,val; DBT key,val;
toku_fill_dbt(&key, &k, sizeof k); toku_fill_dbt(&key, &k, sizeof k);
toku_fill_dbt(&val, &v, sizeof v); toku_fill_dbt(&val, &v, sizeof v);
toku_ft_insert(brt, &key, &val, 0); toku_ft_insert(ft, &key, &val, 0);
} }
r = toku_ft_cursor(brt, &cursor, NULL, false, false); r = toku_ft_cursor(ft, &cursor, NULL, false, false);
assert(r==0); assert(r==0);
/* set cursor to random keys in set { 0, 10, 20, .. 10*(n-1) } */ /* set cursor to random keys in set { 0, 10, 20, .. 10*(n-1) } */
@ -788,7 +788,7 @@ static void test_ft_cursor_set(int n, int cursor_op) {
toku_ft_cursor_close(cursor); toku_ft_cursor_close(cursor);
r = toku_close_ft_handle_nolsn(brt, 0); r = toku_close_ft_handle_nolsn(ft, 0);
assert(r==0); assert(r==0);
toku_cachetable_close(&ct); toku_cachetable_close(&ct);
@ -799,14 +799,14 @@ static void test_ft_cursor_set_range(int n) {
int r; int r;
CACHETABLE ct; CACHETABLE ct;
FT_HANDLE brt; FT_HANDLE ft;
FT_CURSOR cursor=0; FT_CURSOR cursor=0;
unlink(fname); unlink(fname);
toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER);
r = toku_open_ft_handle(fname, 1, &brt, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare); r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare);
assert(r==0); assert(r==0);
int i; int i;
@ -819,10 +819,10 @@ static void test_ft_cursor_set_range(int n) {
DBT key, val; DBT key, val;
toku_fill_dbt(&key, &k, sizeof k); toku_fill_dbt(&key, &k, sizeof k);
toku_fill_dbt(&val, &v, sizeof v); toku_fill_dbt(&val, &v, sizeof v);
toku_ft_insert(brt, &key, &val, 0); toku_ft_insert(ft, &key, &val, 0);
} }
r = toku_ft_cursor(brt, &cursor, NULL, false, false); r = toku_ft_cursor(ft, &cursor, NULL, false, false);
assert(r==0); assert(r==0);
/* pick random keys v in 0 <= v < 10*n, the cursor should point /* pick random keys v in 0 <= v < 10*n, the cursor should point
@ -848,7 +848,7 @@ static void test_ft_cursor_set_range(int n) {
toku_ft_cursor_close(cursor); toku_ft_cursor_close(cursor);
r = toku_close_ft_handle_nolsn(brt, 0); r = toku_close_ft_handle_nolsn(ft, 0);
assert(r==0); assert(r==0);
toku_cachetable_close(&ct); toku_cachetable_close(&ct);
@ -859,17 +859,17 @@ static void test_ft_cursor_delete(int n) {
int error; int error;
CACHETABLE ct; CACHETABLE ct;
FT_HANDLE brt; FT_HANDLE ft;
FT_CURSOR cursor=0; FT_CURSOR cursor=0;
unlink(fname); unlink(fname);
toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER);
error = toku_open_ft_handle(fname, 1, &brt, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare); error = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare);
assert(error == 0); assert(error == 0);
error = toku_ft_cursor(brt, &cursor, NULL, false, false); error = toku_ft_cursor(ft, &cursor, NULL, false, false);
assert(error == 0); assert(error == 0);
DBT key, val; DBT key, val;
@ -882,7 +882,7 @@ static void test_ft_cursor_delete(int n) {
v = i; v = i;
toku_fill_dbt(&key, &k, sizeof k); toku_fill_dbt(&key, &k, sizeof k);
toku_fill_dbt(&val, &v, sizeof v); toku_fill_dbt(&val, &v, sizeof v);
toku_ft_insert(brt, &key, &val, 0); toku_ft_insert(ft, &key, &val, 0);
} }
/* walk the tree and delete under the cursor */ /* walk the tree and delete under the cursor */
@ -905,7 +905,7 @@ static void test_ft_cursor_delete(int n) {
toku_ft_cursor_close(cursor); toku_ft_cursor_close(cursor);
error = toku_close_ft_handle_nolsn(brt, 0); error = toku_close_ft_handle_nolsn(ft, 0);
assert(error == 0); assert(error == 0);
toku_cachetable_close(&ct); toku_cachetable_close(&ct);

View file

@ -92,7 +92,7 @@ PATENT RIGHTS GRANT:
#include "test.h" #include "test.h"
// The purpose of this test is to verify that certain information in the // The purpose of this test is to verify that certain information in the
// brt_header is properly serialized and deserialized. // ft_header is properly serialized and deserialized.
static TOKUTXN const null_txn = 0; static TOKUTXN const null_txn = 0;

View file

@ -214,7 +214,7 @@ static void test_multiple_ft_handles_one_db_one_file (void) {
/* Check to see if data can be read that was written. */ /* Check to see if data can be read that was written. */
static void test_read_what_was_written (void) { static void test_read_what_was_written (void) {
CACHETABLE ct; CACHETABLE ct;
FT_HANDLE brt; FT_HANDLE ft;
int r; int r;
const int NVALS=10000; const int NVALS=10000;
@ -224,34 +224,34 @@ static void test_read_what_was_written (void) {
toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER);
r = toku_open_ft_handle(fname, 1, &brt, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0);
r = toku_close_ft_handle_nolsn(brt, 0); assert(r==0); r = toku_close_ft_handle_nolsn(ft, 0); assert(r==0);
toku_cachetable_close(&ct); toku_cachetable_close(&ct);
/* Now see if we can read an empty tree in. */ /* Now see if we can read an empty tree in. */
toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER);
r = toku_open_ft_handle(fname, 0, &brt, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); r = toku_open_ft_handle(fname, 0, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0);
/* See if we can put something in it. */ /* See if we can put something in it. */
{ {
DBT k,v; DBT k,v;
toku_ft_insert(brt, toku_fill_dbt(&k, "hello", 6), toku_fill_dbt(&v, "there", 6), null_txn); toku_ft_insert(ft, toku_fill_dbt(&k, "hello", 6), toku_fill_dbt(&v, "there", 6), null_txn);
} }
r = toku_close_ft_handle_nolsn(brt, 0); assert(r==0); r = toku_close_ft_handle_nolsn(ft, 0); assert(r==0);
toku_cachetable_close(&ct); toku_cachetable_close(&ct);
/* Now see if we can read it in and get the value. */ /* Now see if we can read it in and get the value. */
toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER);
r = toku_open_ft_handle(fname, 0, &brt, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); r = toku_open_ft_handle(fname, 0, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0);
ft_lookup_and_check_nodup(brt, "hello", "there"); ft_lookup_and_check_nodup(ft, "hello", "there");
assert(toku_verify_ft(brt)==0); assert(toku_verify_ft(ft)==0);
/* Now put a bunch (NVALS) of things in. */ /* Now put a bunch (NVALS) of things in. */
{ {
@ -262,14 +262,14 @@ static void test_read_what_was_written (void) {
snprintf(key, 100, "key%d", i); snprintf(key, 100, "key%d", i);
snprintf(val, 100, "val%d", i); snprintf(val, 100, "val%d", i);
if (i<600) { if (i<600) {
int verify_result=toku_verify_ft(brt);; int verify_result=toku_verify_ft(ft);;
assert(verify_result==0); assert(verify_result==0);
} }
toku_ft_insert(brt, toku_fill_dbt(&k, key, strlen(key)+1), toku_fill_dbt(&v, val, strlen(val)+1), null_txn); toku_ft_insert(ft, toku_fill_dbt(&k, key, strlen(key)+1), toku_fill_dbt(&v, val, strlen(val)+1), null_txn);
if (i<600) { if (i<600) {
int verify_result=toku_verify_ft(brt); int verify_result=toku_verify_ft(ft);
if (verify_result) { if (verify_result) {
r = toku_dump_ft(stdout, brt); r = toku_dump_ft(stdout, ft);
assert(r==0); assert(r==0);
assert(0); assert(0);
} }
@ -279,7 +279,7 @@ static void test_read_what_was_written (void) {
char expectedval[100]; char expectedval[100];
snprintf(key, 100, "key%d", j); snprintf(key, 100, "key%d", j);
snprintf(expectedval, 100, "val%d", j); snprintf(expectedval, 100, "val%d", j);
ft_lookup_and_check_nodup(brt, key, expectedval); ft_lookup_and_check_nodup(ft, key, expectedval);
} }
} }
} }
@ -287,9 +287,9 @@ static void test_read_what_was_written (void) {
} }
if (verbose) printf("Now read them out\n"); if (verbose) printf("Now read them out\n");
r = toku_verify_ft(brt); r = toku_verify_ft(ft);
assert(r==0); assert(r==0);
//dump_ft(brt); //dump_ft(ft);
/* See if we can read them all out again. */ /* See if we can read them all out again. */
{ {
@ -298,31 +298,31 @@ static void test_read_what_was_written (void) {
char key[100],expectedval[100]; char key[100],expectedval[100];
snprintf(key, 100, "key%d", i); snprintf(key, 100, "key%d", i);
snprintf(expectedval, 100, "val%d", i); snprintf(expectedval, 100, "val%d", i);
ft_lookup_and_check_nodup(brt, key, expectedval); ft_lookup_and_check_nodup(ft, key, expectedval);
} }
} }
r = toku_close_ft_handle_nolsn(brt, 0); assert(r==0); r = toku_close_ft_handle_nolsn(ft, 0); assert(r==0);
if (verbose) printf("%s:%d About to close %p\n", __FILE__, __LINE__, ct); if (verbose) printf("%s:%d About to close %p\n", __FILE__, __LINE__, ct);
toku_cachetable_close(&ct); toku_cachetable_close(&ct);
toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER);
r = toku_open_ft_handle(fname, 0, &brt, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); r = toku_open_ft_handle(fname, 0, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0);
ft_lookup_and_check_nodup(brt, "hello", "there"); ft_lookup_and_check_nodup(ft, "hello", "there");
{ {
int i; int i;
for (i=0; i<NVALS; i++) { for (i=0; i<NVALS; i++) {
char key[100],expectedval[100]; char key[100],expectedval[100];
snprintf(key, 100, "key%d", i); snprintf(key, 100, "key%d", i);
snprintf(expectedval, 100, "val%d", i); snprintf(expectedval, 100, "val%d", i);
ft_lookup_and_check_nodup(brt, key, expectedval); ft_lookup_and_check_nodup(ft, key, expectedval);
} }
} }
r = toku_close_ft_handle_nolsn(brt, 0); assert(r==0); r = toku_close_ft_handle_nolsn(ft, 0); assert(r==0);
toku_cachetable_close(&ct); toku_cachetable_close(&ct);
@ -334,7 +334,7 @@ static void test_read_what_was_written (void) {
/* Test c_get(DB_LAST) on an empty tree */ /* Test c_get(DB_LAST) on an empty tree */
static void test_cursor_last_empty(void) { static void test_cursor_last_empty(void) {
CACHETABLE ct; CACHETABLE ct;
FT_HANDLE brt; FT_HANDLE ft;
FT_CURSOR cursor=0; FT_CURSOR cursor=0;
int r; int r;
if (verbose) printf("%s", __FUNCTION__); if (verbose) printf("%s", __FUNCTION__);
@ -343,9 +343,9 @@ static void test_cursor_last_empty(void) {
//printf("%s:%d %d alloced\n", __FILE__, __LINE__, toku_get_n_items_malloced()); toku_print_malloced_items(); //printf("%s:%d %d alloced\n", __FILE__, __LINE__, toku_get_n_items_malloced()); toku_print_malloced_items();
toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER);
//printf("%s:%d %d alloced\n", __FILE__, __LINE__, toku_get_n_items_malloced()); toku_print_malloced_items(); //printf("%s:%d %d alloced\n", __FILE__, __LINE__, toku_get_n_items_malloced()); toku_print_malloced_items();
r = toku_open_ft_handle(fname, 1, &brt, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0);
//printf("%s:%d %d alloced\n", __FILE__, __LINE__, toku_get_n_items_malloced()); toku_print_malloced_items(); //printf("%s:%d %d alloced\n", __FILE__, __LINE__, toku_get_n_items_malloced()); toku_print_malloced_items();
r = toku_ft_cursor(brt, &cursor, NULL, false, false); assert(r==0); r = toku_ft_cursor(ft, &cursor, NULL, false, false); assert(r==0);
{ {
struct check_pair pair = {0,0,0,0,0}; struct check_pair pair = {0,0,0,0,0};
r = toku_ft_cursor_get(cursor, NULL, lookup_checkf, &pair, DB_LAST); r = toku_ft_cursor_get(cursor, NULL, lookup_checkf, &pair, DB_LAST);
@ -359,7 +359,7 @@ static void test_cursor_last_empty(void) {
assert(r==DB_NOTFOUND); assert(r==DB_NOTFOUND);
} }
toku_ft_cursor_close(cursor); toku_ft_cursor_close(cursor);
r = toku_close_ft_handle_nolsn(brt, 0); r = toku_close_ft_handle_nolsn(ft, 0);
//printf("%s:%d %d alloced\n", __FILE__, __LINE__, toku_get_n_items_malloced()); toku_print_malloced_items(); //printf("%s:%d %d alloced\n", __FILE__, __LINE__, toku_get_n_items_malloced()); toku_print_malloced_items();
toku_cachetable_close(&ct); toku_cachetable_close(&ct);
//printf("%s:%d %d alloced\n", __FILE__, __LINE__, toku_get_n_items_malloced()); toku_print_malloced_items(); //printf("%s:%d %d alloced\n", __FILE__, __LINE__, toku_get_n_items_malloced()); toku_print_malloced_items();
@ -368,7 +368,7 @@ static void test_cursor_last_empty(void) {
static void test_cursor_next (void) { static void test_cursor_next (void) {
CACHETABLE ct; CACHETABLE ct;
FT_HANDLE brt; FT_HANDLE ft;
FT_CURSOR cursor=0; FT_CURSOR cursor=0;
int r; int r;
DBT kbt, vbt; DBT kbt, vbt;
@ -377,12 +377,12 @@ static void test_cursor_next (void) {
toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER);
//printf("%s:%d %d alloced\n", __FILE__, __LINE__, toku_get_n_items_malloced()); toku_print_malloced_items(); //printf("%s:%d %d alloced\n", __FILE__, __LINE__, toku_get_n_items_malloced()); toku_print_malloced_items();
r = toku_open_ft_handle(fname, 1, &brt, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0);
//printf("%s:%d %d alloced\n", __FILE__, __LINE__, toku_get_n_items_malloced()); toku_print_malloced_items(); //printf("%s:%d %d alloced\n", __FILE__, __LINE__, toku_get_n_items_malloced()); toku_print_malloced_items();
toku_ft_insert(brt, toku_fill_dbt(&kbt, "hello", 6), toku_fill_dbt(&vbt, "there", 6), null_txn); toku_ft_insert(ft, toku_fill_dbt(&kbt, "hello", 6), toku_fill_dbt(&vbt, "there", 6), null_txn);
toku_ft_insert(brt, toku_fill_dbt(&kbt, "byebye", 7), toku_fill_dbt(&vbt, "byenow", 7), null_txn); toku_ft_insert(ft, toku_fill_dbt(&kbt, "byebye", 7), toku_fill_dbt(&vbt, "byenow", 7), null_txn);
if (verbose) printf("%s:%d calling toku_ft_cursor(...)\n", __FILE__, __LINE__); if (verbose) printf("%s:%d calling toku_ft_cursor(...)\n", __FILE__, __LINE__);
r = toku_ft_cursor(brt, &cursor, NULL, false, false); assert(r==0); r = toku_ft_cursor(ft, &cursor, NULL, false, false); assert(r==0);
toku_init_dbt(&kbt); toku_init_dbt(&kbt);
//printf("%s:%d %d alloced\n", __FILE__, __LINE__, toku_get_n_items_malloced()); toku_print_malloced_items(); //printf("%s:%d %d alloced\n", __FILE__, __LINE__, toku_get_n_items_malloced()); toku_print_malloced_items();
toku_init_dbt(&vbt); toku_init_dbt(&vbt);
@ -411,7 +411,7 @@ static void test_cursor_next (void) {
} }
toku_ft_cursor_close(cursor); toku_ft_cursor_close(cursor);
r = toku_close_ft_handle_nolsn(brt, 0); r = toku_close_ft_handle_nolsn(ft, 0);
//printf("%s:%d %d alloced\n", __FILE__, __LINE__, toku_get_n_items_malloced()); toku_print_malloced_items(); //printf("%s:%d %d alloced\n", __FILE__, __LINE__, toku_get_n_items_malloced()); toku_print_malloced_items();
toku_cachetable_close(&ct); toku_cachetable_close(&ct);
//printf("%s:%d %d alloced\n", __FILE__, __LINE__, toku_get_n_items_malloced()); toku_print_malloced_items(); //printf("%s:%d %d alloced\n", __FILE__, __LINE__, toku_get_n_items_malloced()); toku_print_malloced_items();
@ -436,7 +436,7 @@ static int wrong_compare_fun(DB* UU(desc), const DBT *a, const DBT *b) {
static void test_wrongendian_compare (int wrong_p, unsigned int N) { static void test_wrongendian_compare (int wrong_p, unsigned int N) {
CACHETABLE ct; CACHETABLE ct;
FT_HANDLE brt; FT_HANDLE ft;
int r; int r;
unsigned int i; unsigned int i;
@ -455,7 +455,7 @@ static void test_wrongendian_compare (int wrong_p, unsigned int N) {
//printf("%s:%d WRONG=%d\n", __FILE__, __LINE__, wrong_p); //printf("%s:%d WRONG=%d\n", __FILE__, __LINE__, wrong_p);
if (0) { // ???? Why is this commented out? if (0) { // ???? Why is this commented out?
r = toku_open_ft_handle(fname, 1, &brt, 1<<20, 1<<17, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, wrong_p ? wrong_compare_fun : toku_builtin_compare_fun); assert(r==0); r = toku_open_ft_handle(fname, 1, &ft, 1<<20, 1<<17, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, wrong_p ? wrong_compare_fun : toku_builtin_compare_fun); assert(r==0);
for (i=1; i<257; i+=255) { for (i=1; i<257; i+=255) {
unsigned char a[4],b[4]; unsigned char a[4],b[4];
b[3] = a[0] = (unsigned char)(i&255); b[3] = a[0] = (unsigned char)(i&255);
@ -470,11 +470,11 @@ static void test_wrongendian_compare (int wrong_p, unsigned int N) {
printf("%s:%d insert: %02x%02x%02x%02x -> %02x%02x%02x%02x\n", __FILE__, __LINE__, printf("%s:%d insert: %02x%02x%02x%02x -> %02x%02x%02x%02x\n", __FILE__, __LINE__,
((char*)kbt.data)[0], ((char*)kbt.data)[1], ((char*)kbt.data)[2], ((char*)kbt.data)[3], ((char*)kbt.data)[0], ((char*)kbt.data)[1], ((char*)kbt.data)[2], ((char*)kbt.data)[3],
((char*)vbt.data)[0], ((char*)vbt.data)[1], ((char*)vbt.data)[2], ((char*)vbt.data)[3]); ((char*)vbt.data)[0], ((char*)vbt.data)[1], ((char*)vbt.data)[2], ((char*)vbt.data)[3]);
toku_ft_insert(brt, &kbt, &vbt, null_txn); toku_ft_insert(ft, &kbt, &vbt, null_txn);
} }
{ {
FT_CURSOR cursor=0; FT_CURSOR cursor=0;
r = toku_ft_cursor(brt, &cursor, NULL, false, false); assert(r==0); r = toku_ft_cursor(ft, &cursor, NULL, false, false); assert(r==0);
for (i=0; i<2; i++) { for (i=0; i<2; i++) {
unsigned char a[4],b[4]; unsigned char a[4],b[4];
@ -489,13 +489,13 @@ static void test_wrongendian_compare (int wrong_p, unsigned int N) {
} }
r = toku_close_ft_handle_nolsn(brt, 0); r = toku_close_ft_handle_nolsn(ft, 0);
} }
} }
{ {
toku_cachetable_verify(ct); toku_cachetable_verify(ct);
r = toku_open_ft_handle(fname, 1, &brt, 1<<20, 1<<17, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, wrong_p ? wrong_compare_fun : toku_builtin_compare_fun); assert(r==0); r = toku_open_ft_handle(fname, 1, &ft, 1<<20, 1<<17, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, wrong_p ? wrong_compare_fun : toku_builtin_compare_fun); assert(r==0);
toku_cachetable_verify(ct); toku_cachetable_verify(ct);
for (i=0; i<N; i++) { for (i=0; i<N; i++) {
@ -511,11 +511,11 @@ static void test_wrongendian_compare (int wrong_p, unsigned int N) {
if (0) printf("%s:%d insert: %02x%02x%02x%02x -> %02x%02x%02x%02x\n", __FILE__, __LINE__, if (0) printf("%s:%d insert: %02x%02x%02x%02x -> %02x%02x%02x%02x\n", __FILE__, __LINE__,
((unsigned char*)kbt.data)[0], ((unsigned char*)kbt.data)[1], ((unsigned char*)kbt.data)[2], ((unsigned char*)kbt.data)[3], ((unsigned char*)kbt.data)[0], ((unsigned char*)kbt.data)[1], ((unsigned char*)kbt.data)[2], ((unsigned char*)kbt.data)[3],
((unsigned char*)vbt.data)[0], ((unsigned char*)vbt.data)[1], ((unsigned char*)vbt.data)[2], ((unsigned char*)vbt.data)[3]); ((unsigned char*)vbt.data)[0], ((unsigned char*)vbt.data)[1], ((unsigned char*)vbt.data)[2], ((unsigned char*)vbt.data)[3]);
toku_ft_insert(brt, &kbt, &vbt, null_txn); toku_ft_insert(ft, &kbt, &vbt, null_txn);
toku_cachetable_verify(ct); toku_cachetable_verify(ct);
} }
FT_CURSOR cursor=0; FT_CURSOR cursor=0;
r = toku_ft_cursor(brt, &cursor, NULL, false, false); assert(r==0); r = toku_ft_cursor(ft, &cursor, NULL, false, false); assert(r==0);
for (i=0; i<N; i++) { for (i=0; i<N; i++) {
unsigned char a[4],b[4]; unsigned char a[4],b[4];
@ -530,7 +530,7 @@ static void test_wrongendian_compare (int wrong_p, unsigned int N) {
toku_cachetable_verify(ct); toku_cachetable_verify(ct);
} }
toku_ft_cursor_close(cursor); toku_ft_cursor_close(cursor);
r = toku_close_ft_handle_nolsn(brt, 0); r = toku_close_ft_handle_nolsn(ft, 0);
assert(r==0); assert(r==0);
} }
toku_cachetable_close(&ct); toku_cachetable_close(&ct);
@ -881,22 +881,22 @@ static void test_ft_delete(void) {
static void test_new_ft_cursor_create_close (void) { static void test_new_ft_cursor_create_close (void) {
int r; int r;
FT_HANDLE brt=0; FT_HANDLE ft=0;
int n = 8; int n = 8;
FT_CURSOR cursors[n]; FT_CURSOR cursors[n];
toku_ft_handle_create(&brt); toku_ft_handle_create(&ft);
int i; int i;
for (i=0; i<n; i++) { for (i=0; i<n; i++) {
r = toku_ft_cursor(brt, &cursors[i], NULL, false, false); assert(r == 0); r = toku_ft_cursor(ft, &cursors[i], NULL, false, false); assert(r == 0);
} }
for (i=0; i<n; i++) { for (i=0; i<n; i++) {
toku_ft_cursor_close(cursors[i]); toku_ft_cursor_close(cursors[i]);
} }
r = toku_close_ft_handle_nolsn(brt, 0); assert(r == 0); r = toku_close_ft_handle_nolsn(ft, 0); assert(r == 0);
} }
static void test_new_ft_cursor_first(int n) { static void test_new_ft_cursor_first(int n) {
@ -1177,14 +1177,14 @@ static void test_new_ft_cursor_set_range(int n) {
int r; int r;
CACHETABLE ct; CACHETABLE ct;
FT_HANDLE brt=0; FT_HANDLE ft=0;
FT_CURSOR cursor=0; FT_CURSOR cursor=0;
toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER);
unlink(fname); unlink(fname);
toku_ft_handle_create(&brt); toku_ft_handle_create(&ft);
toku_ft_handle_set_nodesize(brt, 4096); toku_ft_handle_set_nodesize(ft, 4096);
r = toku_ft_handle_open(brt, fname, 1, 1, ct, null_txn); assert(r==0); r = toku_ft_handle_open(ft, fname, 1, 1, ct, null_txn); assert(r==0);
int i; int i;
@ -1194,10 +1194,10 @@ static void test_new_ft_cursor_set_range(int n) {
DBT key, val; DBT key, val;
int k = toku_htonl(10*i); int k = toku_htonl(10*i);
int v = 10*i; int v = 10*i;
toku_ft_insert(brt, toku_fill_dbt(&key, &k, sizeof k), toku_fill_dbt(&val, &v, sizeof v), 0); assert(r == 0); toku_ft_insert(ft, toku_fill_dbt(&key, &k, sizeof k), toku_fill_dbt(&val, &v, sizeof v), 0); assert(r == 0);
} }
r = toku_ft_cursor(brt, &cursor, NULL, false, false); assert(r==0); r = toku_ft_cursor(ft, &cursor, NULL, false, false); assert(r==0);
/* pick random keys v in 0 <= v < 10*n, the cursor should point /* pick random keys v in 0 <= v < 10*n, the cursor should point
to the smallest key in the tree that is >= v */ to the smallest key in the tree that is >= v */
@ -1226,7 +1226,7 @@ static void test_new_ft_cursor_set_range(int n) {
toku_ft_cursor_close(cursor); toku_ft_cursor_close(cursor);
r = toku_close_ft_handle_nolsn(brt, 0); assert(r==0); r = toku_close_ft_handle_nolsn(ft, 0); assert(r==0);
toku_cachetable_close(&ct); toku_cachetable_close(&ct);
} }
@ -1236,14 +1236,14 @@ static void test_new_ft_cursor_set(int n, int cursor_op, DB *db) {
int r; int r;
CACHETABLE ct; CACHETABLE ct;
FT_HANDLE brt; FT_HANDLE ft;
FT_CURSOR cursor=0; FT_CURSOR cursor=0;
unlink(fname); unlink(fname);
toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER);
r = toku_open_ft_handle(fname, 1, &brt, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare); assert(r==0); r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare); assert(r==0);
int i; int i;
@ -1252,10 +1252,10 @@ static void test_new_ft_cursor_set(int n, int cursor_op, DB *db) {
DBT key, val; DBT key, val;
int k = toku_htonl(10*i); int k = toku_htonl(10*i);
int v = 10*i; int v = 10*i;
toku_ft_insert(brt, toku_fill_dbt(&key, &k, sizeof k), toku_fill_dbt(&val, &v, sizeof v), 0); assert(r == 0); toku_ft_insert(ft, toku_fill_dbt(&key, &k, sizeof k), toku_fill_dbt(&val, &v, sizeof v), 0); assert(r == 0);
} }
r = toku_ft_cursor(brt, &cursor, NULL, false, false); assert(r==0); r = toku_ft_cursor(ft, &cursor, NULL, false, false); assert(r==0);
/* set cursor to random keys in set { 0, 10, 20, .. 10*(n-1) } */ /* set cursor to random keys in set { 0, 10, 20, .. 10*(n-1) } */
for (i=0; i<n; i++) { for (i=0; i<n; i++) {
@ -1287,7 +1287,7 @@ static void test_new_ft_cursor_set(int n, int cursor_op, DB *db) {
toku_ft_cursor_close(cursor); toku_ft_cursor_close(cursor);
r = toku_close_ft_handle_nolsn(brt, 0); assert(r==0); r = toku_close_ft_handle_nolsn(ft, 0); assert(r==0);
toku_cachetable_close(&ct); toku_cachetable_close(&ct);
} }

View file

@ -158,19 +158,19 @@ static void test_extractor(int nrows, int nrowsets, bool expect_fail) {
// open the ft_loader. this runs the extractor. // open the ft_loader. this runs the extractor.
const int N = 1; const int N = 1;
FT_HANDLE brts[N]; FT_HANDLE fts[N];
DB* dbs[N]; DB* dbs[N];
const char *fnames[N]; const char *fnames[N];
ft_compare_func compares[N]; ft_compare_func compares[N];
for (int i = 0; i < N; i++) { for (int i = 0; i < N; i++) {
brts[i] = NULL; fts[i] = NULL;
dbs[i] = NULL; dbs[i] = NULL;
fnames[i] = ""; fnames[i] = "";
compares[i] = compare_int; compares[i] = compare_int;
} }
FTLOADER loader; FTLOADER loader;
r = toku_ft_loader_open(&loader, NULL, generate, NULL, N, brts, dbs, fnames, compares, "tempXXXXXX", ZERO_LSN, nullptr, true, 0, false); r = toku_ft_loader_open(&loader, NULL, generate, NULL, N, fts, dbs, fnames, compares, "tempXXXXXX", ZERO_LSN, nullptr, true, 0, false);
assert(r == 0); assert(r == 0);
struct rowset *rowset[nrowsets]; struct rowset *rowset[nrowsets];

View file

@ -165,12 +165,12 @@ static void test_extractor(int nrows, int nrowsets, bool expect_fail, const char
// open the ft_loader. this runs the extractor. // open the ft_loader. this runs the extractor.
const int N = 1; const int N = 1;
FT_HANDLE brts[N]; FT_HANDLE fts[N];
DB* dbs[N]; DB* dbs[N];
const char *fnames[N]; const char *fnames[N];
ft_compare_func compares[N]; ft_compare_func compares[N];
for (int i = 0; i < N; i++) { for (int i = 0; i < N; i++) {
brts[i] = NULL; fts[i] = NULL;
dbs[i] = NULL; dbs[i] = NULL;
fnames[i] = ""; fnames[i] = "";
compares[i] = compare_int; compares[i] = compare_int;
@ -180,7 +180,7 @@ static void test_extractor(int nrows, int nrowsets, bool expect_fail, const char
sprintf(temp, "%s/%s", testdir, "tempXXXXXX"); sprintf(temp, "%s/%s", testdir, "tempXXXXXX");
FTLOADER loader; FTLOADER loader;
r = toku_ft_loader_open(&loader, NULL, generate, NULL, N, brts, dbs, fnames, compares, "tempXXXXXX", ZERO_LSN, nullptr, true, 0, false); r = toku_ft_loader_open(&loader, NULL, generate, NULL, N, fts, dbs, fnames, compares, "tempXXXXXX", ZERO_LSN, nullptr, true, 0, false);
assert(r == 0); assert(r == 0);
struct rowset *rowset[nrowsets]; struct rowset *rowset[nrowsets];

View file

@ -89,7 +89,7 @@ PATENT RIGHTS GRANT:
#ident "Copyright (c) 2010-2013 Tokutek Inc. All rights reserved." #ident "Copyright (c) 2010-2013 Tokutek Inc. All rights reserved."
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
// The purpose of this test is to test the extractor component of the brt loader. We insert rowsets into the extractor queue and verify temp files // The purpose of this test is to test the extractor component of the ft loader. We insert rowsets into the extractor queue and verify temp files
// after the extractor is finished. // after the extractor is finished.
#define DONT_DEPRECATE_MALLOC #define DONT_DEPRECATE_MALLOC
@ -387,12 +387,12 @@ static void test_extractor(int nrows, int nrowsets, const char *testdir) {
// open the ft_loader. this runs the extractor. // open the ft_loader. this runs the extractor.
const int N = 1; const int N = 1;
FT_HANDLE brts[N]; FT_HANDLE fts[N];
DB* dbs[N]; DB* dbs[N];
const char *fnames[N]; const char *fnames[N];
ft_compare_func compares[N]; ft_compare_func compares[N];
for (int i = 0; i < N; i++) { for (int i = 0; i < N; i++) {
brts[i] = NULL; fts[i] = NULL;
dbs[i] = NULL; dbs[i] = NULL;
fnames[i] = ""; fnames[i] = "";
compares[i] = compare_int; compares[i] = compare_int;
@ -402,7 +402,7 @@ static void test_extractor(int nrows, int nrowsets, const char *testdir) {
sprintf(temp, "%s/%s", testdir, "tempXXXXXX"); sprintf(temp, "%s/%s", testdir, "tempXXXXXX");
FTLOADER loader; FTLOADER loader;
r = toku_ft_loader_open(&loader, NULL, generate, NULL, N, brts, dbs, fnames, compares, temp, ZERO_LSN, nullptr, true, 0, false); r = toku_ft_loader_open(&loader, NULL, generate, NULL, N, fts, dbs, fnames, compares, temp, ZERO_LSN, nullptr, true, 0, false);
assert(r == 0); assert(r == 0);
struct rowset *rowset[nrowsets]; struct rowset *rowset[nrowsets];

View file

@ -385,7 +385,7 @@ static void test (const char *directory, bool is_error) {
} }
FTLOADER bl; FTLOADER bl;
FT_HANDLE *XCALLOC_N(N_DEST_DBS, brts); FT_HANDLE *XCALLOC_N(N_DEST_DBS, fts);
DB* *XCALLOC_N(N_DEST_DBS, dbs); DB* *XCALLOC_N(N_DEST_DBS, dbs);
const char **XMALLOC_N(N_DEST_DBS, new_fnames_in_env); const char **XMALLOC_N(N_DEST_DBS, new_fnames_in_env);
for (int i=0; i<N_DEST_DBS; i++) { for (int i=0; i<N_DEST_DBS; i++) {
@ -407,7 +407,7 @@ static void test (const char *directory, bool is_error) {
ct, ct,
(generate_row_for_put_func)NULL, (generate_row_for_put_func)NULL,
(DB*)NULL, (DB*)NULL,
N_DEST_DBS, brts, dbs, N_DEST_DBS, fts, dbs,
new_fnames_in_env, new_fnames_in_env,
bt_compare_functions, bt_compare_functions,
"tempxxxxxx", "tempxxxxxx",
@ -522,7 +522,7 @@ static void test (const char *directory, bool is_error) {
destroy_dbufio_fileset(bfs); destroy_dbufio_fileset(bfs);
toku_free(fnames); toku_free(fnames);
toku_free(fds); toku_free(fds);
toku_free(brts); toku_free(fts);
toku_free(dbs); toku_free(dbs);
toku_free(new_fnames_in_env); toku_free(new_fnames_in_env);
toku_free(bt_compare_functions); toku_free(bt_compare_functions);

View file

@ -126,12 +126,12 @@ static void test_loader_open(int ndbs) {
FTLOADER loader; FTLOADER loader;
// open the ft_loader. this runs the extractor. // open the ft_loader. this runs the extractor.
FT_HANDLE brts[ndbs]; FT_HANDLE fts[ndbs];
DB* dbs[ndbs]; DB* dbs[ndbs];
const char *fnames[ndbs]; const char *fnames[ndbs];
ft_compare_func compares[ndbs]; ft_compare_func compares[ndbs];
for (int i = 0; i < ndbs; i++) { for (int i = 0; i < ndbs; i++) {
brts[i] = NULL; fts[i] = NULL;
dbs[i] = NULL; dbs[i] = NULL;
fnames[i] = ""; fnames[i] = "";
compares[i] = my_compare; compares[i] = my_compare;
@ -143,7 +143,7 @@ static void test_loader_open(int ndbs) {
for (i = 0; ; i++) { for (i = 0; ; i++) {
set_my_malloc_trigger(i+1); set_my_malloc_trigger(i+1);
r = toku_ft_loader_open(&loader, NULL, NULL, NULL, ndbs, brts, dbs, fnames, compares, "", ZERO_LSN, nullptr, true, 0, false); r = toku_ft_loader_open(&loader, NULL, NULL, NULL, ndbs, fts, dbs, fnames, compares, "", ZERO_LSN, nullptr, true, 0, false);
if (r == 0) if (r == 0)
break; break;
} }

View file

@ -213,7 +213,7 @@ static int write_dbfile (char *tf_template, int n, char *output_name, bool expec
ft_loader_set_error_function(&bl.error_callback, NULL, NULL); ft_loader_set_error_function(&bl.error_callback, NULL, NULL);
ft_loader_set_poll_function(&bl.poll_callback, loader_poll_callback, NULL); ft_loader_set_poll_function(&bl.poll_callback, loader_poll_callback, NULL);
result = toku_loader_write_brt_from_q_in_C(&bl, &desc, fd, 1000, q2, size_est, 0, 0, 0, TOKU_DEFAULT_COMPRESSION_METHOD, 16); result = toku_loader_write_ft_from_q_in_C(&bl, &desc, fd, 1000, q2, size_est, 0, 0, 0, TOKU_DEFAULT_COMPRESSION_METHOD, 16);
toku_set_func_malloc_only(NULL); toku_set_func_malloc_only(NULL);
toku_set_func_realloc_only(NULL); toku_set_func_realloc_only(NULL);

View file

@ -137,9 +137,9 @@ static void verify_dbfile(int n, const char *name) {
toku_ft_set_bt_compare(t, compare_ints); toku_ft_set_bt_compare(t, compare_ints);
r = toku_ft_handle_open(t, name, 0, 0, ct, null_txn); assert(r==0); r = toku_ft_handle_open(t, name, 0, 0, ct, null_txn); assert(r==0);
if (verbose) traceit("Verifying brt internals"); if (verbose) traceit("Verifying ft internals");
r = toku_verify_ft(t); r = toku_verify_ft(t);
if (verbose) traceit("Verified brt internals"); if (verbose) traceit("Verified ft internals");
FT_CURSOR cursor = NULL; FT_CURSOR cursor = NULL;
r = toku_ft_cursor(t, &cursor, NULL, false, false); assert(r == 0); r = toku_ft_cursor(t, &cursor, NULL, false, false); assert(r == 0);
@ -262,7 +262,7 @@ static void test_write_dbfile (char *tf_template, int n, char *output_name, TXNI
assert(fd>=0); assert(fd>=0);
if (verbose) traceit("write to file"); if (verbose) traceit("write to file");
r = toku_loader_write_brt_from_q_in_C(&bl, &desc, fd, 1000, q2, size_est, 0, 0, 0, TOKU_DEFAULT_COMPRESSION_METHOD, 16); r = toku_loader_write_ft_from_q_in_C(&bl, &desc, fd, 1000, q2, size_est, 0, 0, 0, TOKU_DEFAULT_COMPRESSION_METHOD, 16);
assert(r==0); assert(r==0);
r = queue_destroy(q2); r = queue_destroy(q2);

View file

@ -425,7 +425,7 @@ static void test_merge_files (const char *tf_template, const char *output_name)
int fd = open(output_name, O_RDWR | O_CREAT | O_BINARY, S_IRWXU|S_IRWXG|S_IRWXO); int fd = open(output_name, O_RDWR | O_CREAT | O_BINARY, S_IRWXU|S_IRWXG|S_IRWXO);
assert(fd>=0); assert(fd>=0);
r = toku_loader_write_brt_from_q_in_C(&bl, &desc, fd, 1000, q, size_est, 0, 0, 0, TOKU_DEFAULT_COMPRESSION_METHOD, 16); r = toku_loader_write_ft_from_q_in_C(&bl, &desc, fd, 1000, q, size_est, 0, 0, 0, TOKU_DEFAULT_COMPRESSION_METHOD, 16);
assert(r==0); assert(r==0);
destroy_merge_fileset(&fs); destroy_merge_fileset(&fs);

View file

@ -98,7 +98,7 @@ PATENT RIGHTS GRANT:
#define FILENAME "test0.ft" #define FILENAME "test0.ft"
static void test_it (int N) { static void test_it (int N) {
FT_HANDLE brt; FT_HANDLE ft;
int r; int r;
toku_os_recursive_delete(TOKU_TEST_FILENAME); toku_os_recursive_delete(TOKU_TEST_FILENAME);
r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU); CKERR(r); r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU); CKERR(r);
@ -119,18 +119,18 @@ static void test_it (int N) {
TOKUTXN txn; TOKUTXN txn;
r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_ROOT, false); CKERR(r); r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_ROOT, false); CKERR(r);
r = toku_open_ft_handle(FILENAME, 1, &brt, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, toku_builtin_compare_fun); CKERR(r); r = toku_open_ft_handle(FILENAME, 1, &ft, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, toku_builtin_compare_fun); CKERR(r);
r = toku_txn_commit_txn(txn, false, NULL, NULL); CKERR(r); r = toku_txn_commit_txn(txn, false, NULL, NULL); CKERR(r);
toku_txn_close_txn(txn); toku_txn_close_txn(txn);
CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct); CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
r = toku_checkpoint(cp, logger, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT); CKERR(r); r = toku_checkpoint(cp, logger, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT); CKERR(r);
r = toku_close_ft_handle_nolsn(brt, NULL); CKERR(r); r = toku_close_ft_handle_nolsn(ft, NULL); CKERR(r);
unsigned int rands[N]; unsigned int rands[N];
for (int i=0; i<N; i++) { for (int i=0; i<N; i++) {
r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_ROOT, false); CKERR(r); r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_ROOT, false); CKERR(r);
r = toku_open_ft_handle(FILENAME, 0, &brt, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, toku_builtin_compare_fun); CKERR(r); r = toku_open_ft_handle(FILENAME, 0, &ft, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, toku_builtin_compare_fun); CKERR(r);
r = toku_txn_commit_txn(txn, false, NULL, NULL); CKERR(r); r = toku_txn_commit_txn(txn, false, NULL, NULL); CKERR(r);
toku_txn_close_txn(txn); toku_txn_close_txn(txn);
@ -141,19 +141,19 @@ static void test_it (int N) {
snprintf(key, sizeof(key), "key%x.%x", rands[i], i); snprintf(key, sizeof(key), "key%x.%x", rands[i], i);
memset(val, 'v', sizeof(val)); memset(val, 'v', sizeof(val));
val[sizeof(val)-1]=0; val[sizeof(val)-1]=0;
toku_ft_insert(brt, toku_fill_dbt(&k, key, 1+strlen(key)), toku_fill_dbt(&v, val, 1+strlen(val)), txn); toku_ft_insert(ft, toku_fill_dbt(&k, key, 1+strlen(key)), toku_fill_dbt(&v, val, 1+strlen(val)), txn);
r = toku_txn_commit_txn(txn, false, NULL, NULL); CKERR(r); r = toku_txn_commit_txn(txn, false, NULL, NULL); CKERR(r);
toku_txn_close_txn(txn); toku_txn_close_txn(txn);
r = toku_checkpoint(cp, logger, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT); CKERR(r); r = toku_checkpoint(cp, logger, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT); CKERR(r);
r = toku_close_ft_handle_nolsn(brt, NULL); CKERR(r); r = toku_close_ft_handle_nolsn(ft, NULL); CKERR(r);
if (verbose) printf("i=%d\n", i); if (verbose) printf("i=%d\n", i);
} }
for (int i=0; i<N; i++) { for (int i=0; i<N; i++) {
r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_ROOT, false); CKERR(r); r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_ROOT, false); CKERR(r);
r = toku_open_ft_handle(FILENAME, 0, &brt, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, toku_builtin_compare_fun); CKERR(r); r = toku_open_ft_handle(FILENAME, 0, &ft, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, toku_builtin_compare_fun); CKERR(r);
r = toku_txn_commit_txn(txn, false, NULL, NULL); CKERR(r); r = toku_txn_commit_txn(txn, false, NULL, NULL); CKERR(r);
toku_txn_close_txn(txn); toku_txn_close_txn(txn);
@ -161,11 +161,11 @@ static void test_it (int N) {
char key[100]; char key[100];
DBT k; DBT k;
snprintf(key, sizeof(key), "key%x.%x", rands[i], i); snprintf(key, sizeof(key), "key%x.%x", rands[i], i);
toku_ft_delete(brt, toku_fill_dbt(&k, key, 1+strlen(key)), txn); toku_ft_delete(ft, toku_fill_dbt(&k, key, 1+strlen(key)), txn);
if (0) { if (0) {
bool is_empty; bool is_empty;
is_empty = toku_ft_is_empty_fast(brt); is_empty = toku_ft_is_empty_fast(ft);
assert(!is_empty); assert(!is_empty);
} }
@ -173,23 +173,23 @@ static void test_it (int N) {
toku_txn_close_txn(txn); toku_txn_close_txn(txn);
r = toku_checkpoint(cp, logger, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT); CKERR(r); r = toku_checkpoint(cp, logger, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT); CKERR(r);
r = toku_close_ft_handle_nolsn(brt, NULL); CKERR(r); r = toku_close_ft_handle_nolsn(ft, NULL); CKERR(r);
if (verbose) printf("d=%d\n", i); if (verbose) printf("d=%d\n", i);
} }
r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_ROOT, false); CKERR(r); r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_ROOT, false); CKERR(r);
r = toku_open_ft_handle(FILENAME, 0, &brt, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, toku_builtin_compare_fun); CKERR(r); r = toku_open_ft_handle(FILENAME, 0, &ft, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, toku_builtin_compare_fun); CKERR(r);
r = toku_txn_commit_txn(txn, false, NULL, NULL); CKERR(r); r = toku_txn_commit_txn(txn, false, NULL, NULL); CKERR(r);
toku_txn_close_txn(txn); toku_txn_close_txn(txn);
if (0) { if (0) {
bool is_empty; bool is_empty;
is_empty = toku_ft_is_empty_fast(brt); is_empty = toku_ft_is_empty_fast(ft);
assert(is_empty); assert(is_empty);
} }
r = toku_checkpoint(cp, logger, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT); CKERR(r); r = toku_checkpoint(cp, logger, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT); CKERR(r);
r = toku_close_ft_handle_nolsn(brt, NULL); CKERR(r); r = toku_close_ft_handle_nolsn(ft, NULL); CKERR(r);
r = toku_checkpoint(cp, logger, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT); CKERR(r); r = toku_checkpoint(cp, logger, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT); CKERR(r);
toku_logger_close_rollback(logger); toku_logger_close_rollback(logger);

View file

@ -132,8 +132,8 @@ static void reload (uint64_t limit) {
enum memory_state { enum memory_state {
LEAVE_IN_MEMORY, // leave the state in main memory LEAVE_IN_MEMORY, // leave the state in main memory
CLOSE_AND_RELOAD, // close the brts and reload them into main memory (that will cause >1 partitio in many leaves.) CLOSE_AND_RELOAD, // close the fts and reload them into main memory (that will cause >1 partitio in many leaves.)
CLOSE_AND_REOPEN_LEAVE_ON_DISK // close the brts, reopen them, but leave the state on disk. CLOSE_AND_REOPEN_LEAVE_ON_DISK // close the fts, reopen them, but leave the state on disk.
}; };
static void maybe_reopen (enum memory_state ms, uint64_t limit) { static void maybe_reopen (enum memory_state ms, uint64_t limit) {

View file

@ -138,8 +138,8 @@ create_populate_tree(const char *logdir, const char *fname, int n) {
error = toku_txn_begin_txn(NULL, NULL, &txn, logger, TXN_SNAPSHOT_NONE, false); error = toku_txn_begin_txn(NULL, NULL, &txn, logger, TXN_SNAPSHOT_NONE, false);
assert(error == 0); assert(error == 0);
FT_HANDLE brt = NULL; FT_HANDLE ft = NULL;
error = toku_open_ft_handle(fname, 1, &brt, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, test_ft_cursor_keycompare); error = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, test_ft_cursor_keycompare);
assert(error == 0); assert(error == 0);
error = toku_txn_commit_txn(txn, true, NULL, NULL); error = toku_txn_commit_txn(txn, true, NULL, NULL);
@ -158,7 +158,7 @@ create_populate_tree(const char *logdir, const char *fname, int n) {
toku_fill_dbt(&key, &k, sizeof k); toku_fill_dbt(&key, &k, sizeof k);
DBT val; DBT val;
toku_fill_dbt(&val, &v, sizeof v); toku_fill_dbt(&val, &v, sizeof v);
toku_ft_insert(brt, &key, &val, txn); toku_ft_insert(ft, &key, &val, txn);
assert(error == 0); assert(error == 0);
} }
@ -166,7 +166,7 @@ create_populate_tree(const char *logdir, const char *fname, int n) {
assert(error == 0); assert(error == 0);
toku_txn_close_txn(txn); toku_txn_close_txn(txn);
error = toku_close_ft_handle_nolsn(brt, NULL); error = toku_close_ft_handle_nolsn(ft, NULL);
assert(error == 0); assert(error == 0);
CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct); CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
@ -208,8 +208,8 @@ test_provdel(const char *logdir, const char *fname, int n) {
error = toku_txn_begin_txn(NULL, NULL, &txn, logger, TXN_SNAPSHOT_NONE, false); error = toku_txn_begin_txn(NULL, NULL, &txn, logger, TXN_SNAPSHOT_NONE, false);
assert(error == 0); assert(error == 0);
FT_HANDLE brt = NULL; FT_HANDLE ft = NULL;
error = toku_open_ft_handle(fname, 1, &brt, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, test_ft_cursor_keycompare); error = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, test_ft_cursor_keycompare);
assert(error == 0); assert(error == 0);
error = toku_txn_commit_txn(txn, true, NULL, NULL); error = toku_txn_commit_txn(txn, true, NULL, NULL);
@ -225,7 +225,7 @@ test_provdel(const char *logdir, const char *fname, int n) {
int k = toku_htonl(i); int k = toku_htonl(i);
DBT key; DBT key;
toku_fill_dbt(&key, &k, sizeof k); toku_fill_dbt(&key, &k, sizeof k);
toku_ft_delete(brt, &key, txn); toku_ft_delete(ft, &key, txn);
assert(error == 0); assert(error == 0);
} }
@ -234,7 +234,7 @@ test_provdel(const char *logdir, const char *fname, int n) {
assert(error == 0); assert(error == 0);
LE_CURSOR cursor = NULL; LE_CURSOR cursor = NULL;
error = toku_le_cursor_create(&cursor, brt, cursortxn); error = toku_le_cursor_create(&cursor, ft, cursortxn);
assert(error == 0); assert(error == 0);
DBT key; DBT key;
@ -267,7 +267,7 @@ test_provdel(const char *logdir, const char *fname, int n) {
assert(error == 0); assert(error == 0);
toku_txn_close_txn(txn); toku_txn_close_txn(txn);
error = toku_close_ft_handle_nolsn(brt, NULL); error = toku_close_ft_handle_nolsn(ft, NULL);
assert(error == 0); assert(error == 0);
CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct); CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
error = toku_checkpoint(cp, logger, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT); error = toku_checkpoint(cp, logger, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);

View file

@ -142,8 +142,8 @@ create_populate_tree(const char *logdir, const char *fname, int n) {
error = toku_txn_begin_txn(NULL, NULL, &txn, logger, TXN_SNAPSHOT_NONE, false); error = toku_txn_begin_txn(NULL, NULL, &txn, logger, TXN_SNAPSHOT_NONE, false);
assert(error == 0); assert(error == 0);
FT_HANDLE brt = NULL; FT_HANDLE ft = NULL;
error = toku_open_ft_handle(fname, 1, &brt, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, test_keycompare); error = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, test_keycompare);
assert(error == 0); assert(error == 0);
error = toku_txn_commit_txn(txn, true, NULL, NULL); error = toku_txn_commit_txn(txn, true, NULL, NULL);
@ -162,14 +162,14 @@ create_populate_tree(const char *logdir, const char *fname, int n) {
toku_fill_dbt(&key, &k, sizeof k); toku_fill_dbt(&key, &k, sizeof k);
DBT val; DBT val;
toku_fill_dbt(&val, &v, sizeof v); toku_fill_dbt(&val, &v, sizeof v);
toku_ft_insert(brt, &key, &val, txn); toku_ft_insert(ft, &key, &val, txn);
} }
error = toku_txn_commit_txn(txn, true, NULL, NULL); error = toku_txn_commit_txn(txn, true, NULL, NULL);
assert(error == 0); assert(error == 0);
toku_txn_close_txn(txn); toku_txn_close_txn(txn);
error = toku_close_ft_handle_nolsn(brt, NULL); error = toku_close_ft_handle_nolsn(ft, NULL);
assert(error == 0); assert(error == 0);
CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct); CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
@ -198,13 +198,13 @@ test_pos_infinity(const char *fname, int n) {
CACHETABLE ct = NULL; CACHETABLE ct = NULL;
toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER);
FT_HANDLE brt = NULL; FT_HANDLE ft = NULL;
error = toku_open_ft_handle(fname, 1, &brt, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_keycompare); error = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_keycompare);
assert(error == 0); assert(error == 0);
// position the cursor at -infinity // position the cursor at -infinity
LE_CURSOR cursor = NULL; LE_CURSOR cursor = NULL;
error = toku_le_cursor_create(&cursor, brt, NULL); error = toku_le_cursor_create(&cursor, ft, NULL);
assert(error == 0); assert(error == 0);
for (int i = 0; i < 2*n; i++) { for (int i = 0; i < 2*n; i++) {
@ -217,7 +217,7 @@ test_pos_infinity(const char *fname, int n) {
toku_le_cursor_close(cursor); toku_le_cursor_close(cursor);
error = toku_close_ft_handle_nolsn(brt, 0); error = toku_close_ft_handle_nolsn(ft, 0);
assert(error == 0); assert(error == 0);
toku_cachetable_close(&ct); toku_cachetable_close(&ct);
@ -232,13 +232,13 @@ test_neg_infinity(const char *fname, int n) {
CACHETABLE ct = NULL; CACHETABLE ct = NULL;
toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER);
FT_HANDLE brt = NULL; FT_HANDLE ft = NULL;
error = toku_open_ft_handle(fname, 1, &brt, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_keycompare); error = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_keycompare);
assert(error == 0); assert(error == 0);
// position the LE_CURSOR at +infinity // position the LE_CURSOR at +infinity
LE_CURSOR cursor = NULL; LE_CURSOR cursor = NULL;
error = toku_le_cursor_create(&cursor, brt, NULL); error = toku_le_cursor_create(&cursor, ft, NULL);
assert(error == 0); assert(error == 0);
DBT key; DBT key;
@ -271,7 +271,7 @@ test_neg_infinity(const char *fname, int n) {
toku_le_cursor_close(cursor); toku_le_cursor_close(cursor);
error = toku_close_ft_handle_nolsn(brt, 0); error = toku_close_ft_handle_nolsn(ft, 0);
assert(error == 0); assert(error == 0);
toku_cachetable_close(&ct); toku_cachetable_close(&ct);
@ -286,13 +286,13 @@ test_between(const char *fname, int n) {
CACHETABLE ct = NULL; CACHETABLE ct = NULL;
toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER);
FT_HANDLE brt = NULL; FT_HANDLE ft = NULL;
error = toku_open_ft_handle(fname, 1, &brt, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_keycompare); error = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_keycompare);
assert(error == 0); assert(error == 0);
// position the LE_CURSOR at +infinity // position the LE_CURSOR at +infinity
LE_CURSOR cursor = NULL; LE_CURSOR cursor = NULL;
error = toku_le_cursor_create(&cursor, brt, NULL); error = toku_le_cursor_create(&cursor, ft, NULL);
assert(error == 0); assert(error == 0);
DBT key; DBT key;
@ -337,7 +337,7 @@ test_between(const char *fname, int n) {
toku_le_cursor_close(cursor); toku_le_cursor_close(cursor);
error = toku_close_ft_handle_nolsn(brt, 0); error = toku_close_ft_handle_nolsn(ft, 0);
assert(error == 0); assert(error == 0);
toku_cachetable_close(&ct); toku_cachetable_close(&ct);

View file

@ -139,8 +139,8 @@ create_populate_tree(const char *logdir, const char *fname, int n) {
error = toku_txn_begin_txn(NULL, NULL, &txn, logger, TXN_SNAPSHOT_NONE, false); error = toku_txn_begin_txn(NULL, NULL, &txn, logger, TXN_SNAPSHOT_NONE, false);
assert(error == 0); assert(error == 0);
FT_HANDLE brt = NULL; FT_HANDLE ft = NULL;
error = toku_open_ft_handle(fname, 1, &brt, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, test_ft_cursor_keycompare); error = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, test_ft_cursor_keycompare);
assert(error == 0); assert(error == 0);
error = toku_txn_commit_txn(txn, true, NULL, NULL); error = toku_txn_commit_txn(txn, true, NULL, NULL);
@ -159,14 +159,14 @@ create_populate_tree(const char *logdir, const char *fname, int n) {
toku_fill_dbt(&key, &k, sizeof k); toku_fill_dbt(&key, &k, sizeof k);
DBT val; DBT val;
toku_fill_dbt(&val, &v, sizeof v); toku_fill_dbt(&val, &v, sizeof v);
toku_ft_insert(brt, &key, &val, txn); toku_ft_insert(ft, &key, &val, txn);
} }
error = toku_txn_commit_txn(txn, true, NULL, NULL); error = toku_txn_commit_txn(txn, true, NULL, NULL);
assert(error == 0); assert(error == 0);
toku_txn_close_txn(txn); toku_txn_close_txn(txn);
error = toku_close_ft_handle_nolsn(brt, NULL); error = toku_close_ft_handle_nolsn(ft, NULL);
assert(error == 0); assert(error == 0);
CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct); CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
@ -194,12 +194,12 @@ walk_tree(const char *fname, int n) {
CACHETABLE ct = NULL; CACHETABLE ct = NULL;
toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER);
FT_HANDLE brt = NULL; FT_HANDLE ft = NULL;
error = toku_open_ft_handle(fname, 1, &brt, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare); error = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare);
assert(error == 0); assert(error == 0);
LE_CURSOR cursor = NULL; LE_CURSOR cursor = NULL;
error = toku_le_cursor_create(&cursor, brt, NULL); error = toku_le_cursor_create(&cursor, ft, NULL);
assert(error == 0); assert(error == 0);
DBT key; DBT key;
@ -224,7 +224,7 @@ walk_tree(const char *fname, int n) {
toku_le_cursor_close(cursor); toku_le_cursor_close(cursor);
error = toku_close_ft_handle_nolsn(brt, 0); error = toku_close_ft_handle_nolsn(ft, 0);
assert(error == 0); assert(error == 0);
toku_cachetable_close(&ct); toku_cachetable_close(&ct);

View file

@ -103,10 +103,10 @@ PATENT RIGHTS GRANT:
#include "test.h" #include "test.h"
static FTNODE static FTNODE
make_node(FT_HANDLE brt, int height) { make_node(FT_HANDLE ft, int height) {
FTNODE node = NULL; FTNODE node = NULL;
int n_children = (height == 0) ? 1 : 0; int n_children = (height == 0) ? 1 : 0;
toku_create_new_ftnode(brt, &node, height, n_children); toku_create_new_ftnode(ft, &node, height, n_children);
if (n_children) BP_STATE(node,0) = PT_AVAIL; if (n_children) BP_STATE(node,0) = PT_AVAIL;
return node; return node;
} }
@ -146,29 +146,29 @@ populate_leaf(FTNODE leafnode, int seq, int n, int *minkey, int *maxkey) {
} }
static void static void
insert_into_child_buffer(FT_HANDLE brt, FTNODE node, int childnum, int minkey, int maxkey) { insert_into_child_buffer(FT_HANDLE ft, FTNODE node, int childnum, int minkey, int maxkey) {
for (unsigned int val = htonl(minkey); val <= htonl(maxkey); val++) { for (unsigned int val = htonl(minkey); val <= htonl(maxkey); val++) {
MSN msn = next_dummymsn(); MSN msn = next_dummymsn();
unsigned int key = htonl(val); unsigned int key = htonl(val);
DBT thekey; toku_fill_dbt(&thekey, &key, sizeof key); DBT thekey; toku_fill_dbt(&thekey, &key, sizeof key);
DBT theval; toku_fill_dbt(&theval, &val, sizeof val); DBT theval; toku_fill_dbt(&theval, &val, sizeof val);
toku_ft_append_to_child_buffer(brt->ft->compare_fun, NULL, node, childnum, FT_INSERT, msn, xids_get_root_xids(), true, &thekey, &theval); toku_ft_append_to_child_buffer(ft->ft->compare_fun, NULL, node, childnum, FT_INSERT, msn, xids_get_root_xids(), true, &thekey, &theval);
node->max_msn_applied_to_node_on_disk = msn; node->max_msn_applied_to_node_on_disk = msn;
} }
} }
static FTNODE static FTNODE
make_tree(FT_HANDLE brt, int height, int fanout, int nperleaf, int *seq, int *minkey, int *maxkey) { make_tree(FT_HANDLE ft, int height, int fanout, int nperleaf, int *seq, int *minkey, int *maxkey) {
FTNODE node; FTNODE node;
if (height == 0) { if (height == 0) {
node = make_node(brt, 0); node = make_node(ft, 0);
populate_leaf(node, *seq, nperleaf, minkey, maxkey); populate_leaf(node, *seq, nperleaf, minkey, maxkey);
*seq += nperleaf; *seq += nperleaf;
} else { } else {
node = make_node(brt, height); node = make_node(ft, height);
int minkeys[fanout], maxkeys[fanout]; int minkeys[fanout], maxkeys[fanout];
for (int childnum = 0; childnum < fanout; childnum++) { for (int childnum = 0; childnum < fanout; childnum++) {
FTNODE child = make_tree(brt, height-1, fanout, nperleaf, seq, &minkeys[childnum], &maxkeys[childnum]); FTNODE child = make_tree(ft, height-1, fanout, nperleaf, seq, &minkeys[childnum], &maxkeys[childnum]);
if (childnum == 0) { if (childnum == 0) {
toku_ft_nonleaf_append_child(node, child, NULL); toku_ft_nonleaf_append_child(node, child, NULL);
} else { } else {
@ -176,8 +176,8 @@ make_tree(FT_HANDLE brt, int height, int fanout, int nperleaf, int *seq, int *mi
DBT pivotkey; DBT pivotkey;
toku_ft_nonleaf_append_child(node, child, toku_fill_dbt(&pivotkey, &k, sizeof k)); toku_ft_nonleaf_append_child(node, child, toku_fill_dbt(&pivotkey, &k, sizeof k));
} }
toku_unpin_ftnode(brt->ft, child); toku_unpin_ftnode(ft->ft, child);
insert_into_child_buffer(brt, node, childnum, minkeys[childnum], maxkeys[childnum]); insert_into_child_buffer(ft, node, childnum, minkeys[childnum], maxkeys[childnum]);
} }
*minkey = minkeys[0]; *minkey = minkeys[0];
*maxkey = maxkeys[0]; *maxkey = maxkeys[0];
@ -211,31 +211,31 @@ test_make_tree(int height, int fanout, int nperleaf, int do_verify) {
CACHETABLE ct = NULL; CACHETABLE ct = NULL;
toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER);
// create the brt // create the ft
TOKUTXN null_txn = NULL; TOKUTXN null_txn = NULL;
FT_HANDLE brt = NULL; FT_HANDLE ft = NULL;
r = toku_open_ft_handle(fname, 1, &brt, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); r = toku_open_ft_handle(fname, 1, &ft, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
assert(r == 0); assert(r == 0);
// make a tree // make a tree
int seq = 0, minkey, maxkey; int seq = 0, minkey, maxkey;
FTNODE newroot = make_tree(brt, height, fanout, nperleaf, &seq, &minkey, &maxkey); FTNODE newroot = make_tree(ft, height, fanout, nperleaf, &seq, &minkey, &maxkey);
// set the new root to point to the new tree // set the new root to point to the new tree
toku_ft_set_new_root_blocknum(brt->ft, newroot->thisnodename); toku_ft_set_new_root_blocknum(ft->ft, newroot->thisnodename);
brt->ft->h->max_msn_in_ft = last_dummymsn(); // capture msn of last message injected into tree ft->ft->h->max_msn_in_ft = last_dummymsn(); // capture msn of last message injected into tree
// unpin the new root // unpin the new root
toku_unpin_ftnode(brt->ft, newroot); toku_unpin_ftnode(ft->ft, newroot);
if (do_verify) { if (do_verify) {
r = toku_verify_ft(brt); r = toku_verify_ft(ft);
assert(r == 0); assert(r == 0);
} }
// flush to the file system // flush to the file system
r = toku_close_ft_handle_nolsn(brt, 0); r = toku_close_ft_handle_nolsn(ft, 0);
assert(r == 0); assert(r == 0);
// shutdown the cachetable // shutdown the cachetable

View file

@ -108,16 +108,16 @@ PATENT RIGHTS GRANT:
#include "test.h" #include "test.h"
static FTNODE static FTNODE
make_node(FT_HANDLE brt, int height) { make_node(FT_HANDLE ft, int height) {
FTNODE node = NULL; FTNODE node = NULL;
int n_children = (height == 0) ? 1 : 0; int n_children = (height == 0) ? 1 : 0;
toku_create_new_ftnode(brt, &node, height, n_children); toku_create_new_ftnode(ft, &node, height, n_children);
if (n_children) BP_STATE(node,0) = PT_AVAIL; if (n_children) BP_STATE(node,0) = PT_AVAIL;
return node; return node;
} }
static void static void
append_leaf(FT_HANDLE brt, FTNODE leafnode, void *key, uint32_t keylen, void *val, uint32_t vallen) { append_leaf(FT_HANDLE ft, FTNODE leafnode, void *key, uint32_t keylen, void *val, uint32_t vallen) {
assert(leafnode->height == 0); assert(leafnode->height == 0);
DBT thekey; toku_fill_dbt(&thekey, key, keylen); DBT thekey; toku_fill_dbt(&thekey, key, keylen);
@ -130,36 +130,36 @@ append_leaf(FT_HANDLE brt, FTNODE leafnode, void *key, uint32_t keylen, void *va
// apply an insert to the leaf node // apply an insert to the leaf node
MSN msn = next_dummymsn(); MSN msn = next_dummymsn();
brt->ft->h->max_msn_in_ft = msn; ft->ft->h->max_msn_in_ft = msn;
FT_MSG_S msg = { FT_INSERT, msn, xids_get_root_xids(), .u={.id = { &thekey, &theval }} }; FT_MSG_S msg = { FT_INSERT, msn, xids_get_root_xids(), .u={.id = { &thekey, &theval }} };
txn_gc_info gc_info(nullptr, TXNID_NONE, TXNID_NONE, false); txn_gc_info gc_info(nullptr, TXNID_NONE, TXNID_NONE, false);
toku_ft_leaf_apply_msg(brt->ft->compare_fun, brt->ft->update_fun, &brt->ft->cmp_descriptor, leafnode, -1, &msg, &gc_info, nullptr, nullptr); toku_ft_leaf_apply_msg(ft->ft->compare_fun, ft->ft->update_fun, &ft->ft->cmp_descriptor, leafnode, -1, &msg, &gc_info, nullptr, nullptr);
{ {
int r = toku_ft_lookup(brt, &thekey, lookup_checkf, &pair); int r = toku_ft_lookup(ft, &thekey, lookup_checkf, &pair);
assert(r==0); assert(r==0);
assert(pair.call_count==1); assert(pair.call_count==1);
} }
FT_MSG_S badmsg = { FT_INSERT, msn, xids_get_root_xids(), .u={.id = { &thekey, &badval }} }; FT_MSG_S badmsg = { FT_INSERT, msn, xids_get_root_xids(), .u={.id = { &thekey, &badval }} };
toku_ft_leaf_apply_msg(brt->ft->compare_fun, brt->ft->update_fun, &brt->ft->cmp_descriptor, leafnode, -1, &badmsg, &gc_info, nullptr, nullptr); toku_ft_leaf_apply_msg(ft->ft->compare_fun, ft->ft->update_fun, &ft->ft->cmp_descriptor, leafnode, -1, &badmsg, &gc_info, nullptr, nullptr);
// message should be rejected for duplicate msn, row should still have original val // message should be rejected for duplicate msn, row should still have original val
{ {
int r = toku_ft_lookup(brt, &thekey, lookup_checkf, &pair); int r = toku_ft_lookup(ft, &thekey, lookup_checkf, &pair);
assert(r==0); assert(r==0);
assert(pair.call_count==2); assert(pair.call_count==2);
} }
// now verify that message with proper msn gets through // now verify that message with proper msn gets through
msn = next_dummymsn(); msn = next_dummymsn();
brt->ft->h->max_msn_in_ft = msn; ft->ft->h->max_msn_in_ft = msn;
FT_MSG_S msg2 = { FT_INSERT, msn, xids_get_root_xids(), .u={.id = { &thekey, &val2 }} }; FT_MSG_S msg2 = { FT_INSERT, msn, xids_get_root_xids(), .u={.id = { &thekey, &val2 }} };
toku_ft_leaf_apply_msg(brt->ft->compare_fun, brt->ft->update_fun, &brt->ft->cmp_descriptor, leafnode, -1, &msg2, &gc_info, nullptr, nullptr); toku_ft_leaf_apply_msg(ft->ft->compare_fun, ft->ft->update_fun, &ft->ft->cmp_descriptor, leafnode, -1, &msg2, &gc_info, nullptr, nullptr);
// message should be accepted, val should have new value // message should be accepted, val should have new value
{ {
int r = toku_ft_lookup(brt, &thekey, lookup_checkf, &pair2); int r = toku_ft_lookup(ft, &thekey, lookup_checkf, &pair2);
assert(r==0); assert(r==0);
assert(pair2.call_count==1); assert(pair2.call_count==1);
} }
@ -167,11 +167,11 @@ append_leaf(FT_HANDLE brt, FTNODE leafnode, void *key, uint32_t keylen, void *va
// now verify that message with lesser (older) msn is rejected // now verify that message with lesser (older) msn is rejected
msn.msn = msn.msn - 10; msn.msn = msn.msn - 10;
FT_MSG_S msg3 = { FT_INSERT, msn, xids_get_root_xids(), .u={.id = { &thekey, &badval } }}; FT_MSG_S msg3 = { FT_INSERT, msn, xids_get_root_xids(), .u={.id = { &thekey, &badval } }};
toku_ft_leaf_apply_msg(brt->ft->compare_fun, brt->ft->update_fun, &brt->ft->cmp_descriptor, leafnode, -1, &msg3, &gc_info, nullptr, nullptr); toku_ft_leaf_apply_msg(ft->ft->compare_fun, ft->ft->update_fun, &ft->ft->cmp_descriptor, leafnode, -1, &msg3, &gc_info, nullptr, nullptr);
// message should be rejected, val should still have value in pair2 // message should be rejected, val should still have value in pair2
{ {
int r = toku_ft_lookup(brt, &thekey, lookup_checkf, &pair2); int r = toku_ft_lookup(ft, &thekey, lookup_checkf, &pair2);
assert(r==0); assert(r==0);
assert(pair2.call_count==2); assert(pair2.call_count==2);
} }
@ -181,11 +181,11 @@ append_leaf(FT_HANDLE brt, FTNODE leafnode, void *key, uint32_t keylen, void *va
} }
static void static void
populate_leaf(FT_HANDLE brt, FTNODE leafnode, int k, int v) { populate_leaf(FT_HANDLE ft, FTNODE leafnode, int k, int v) {
char vbuf[32]; // store v in a buffer large enough to dereference unaligned int's char vbuf[32]; // store v in a buffer large enough to dereference unaligned int's
memset(vbuf, 0, sizeof vbuf); memset(vbuf, 0, sizeof vbuf);
memcpy(vbuf, &v, sizeof v); memcpy(vbuf, &v, sizeof v);
append_leaf(brt, leafnode, &k, sizeof k, vbuf, sizeof v); append_leaf(ft, leafnode, &k, sizeof k, vbuf, sizeof v);
} }
static void static void
@ -204,16 +204,16 @@ test_msnfilter(int do_verify) {
CACHETABLE ct = NULL; CACHETABLE ct = NULL;
toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER);
// create the brt // create the ft
TOKUTXN null_txn = NULL; TOKUTXN null_txn = NULL;
FT_HANDLE brt = NULL; FT_HANDLE ft = NULL;
r = toku_open_ft_handle(fname, 1, &brt, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); r = toku_open_ft_handle(fname, 1, &ft, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
assert(r == 0); assert(r == 0);
FTNODE newroot = make_node(brt, 0); FTNODE newroot = make_node(ft, 0);
// set the new root to point to the new tree // set the new root to point to the new tree
toku_ft_set_new_root_blocknum(brt->ft, newroot->thisnodename); toku_ft_set_new_root_blocknum(ft->ft, newroot->thisnodename);
// KLUDGE: Unpin the new root so toku_ft_lookup() can pin it. (Pin lock is no longer a recursive // KLUDGE: Unpin the new root so toku_ft_lookup() can pin it. (Pin lock is no longer a recursive
// mutex.) Just leaving it unpinned for this test program works because it is the only // mutex.) Just leaving it unpinned for this test program works because it is the only
@ -221,17 +221,17 @@ test_msnfilter(int do_verify) {
// node and unlock it again before and after each message injection, but that requires more // node and unlock it again before and after each message injection, but that requires more
// work than it's worth (setting up dummy callbacks, etc.) // work than it's worth (setting up dummy callbacks, etc.)
// //
toku_unpin_ftnode(brt->ft, newroot); toku_unpin_ftnode(ft->ft, newroot);
populate_leaf(brt, newroot, htonl(2), 1); populate_leaf(ft, newroot, htonl(2), 1);
if (do_verify) { if (do_verify) {
r = toku_verify_ft(brt); r = toku_verify_ft(ft);
assert(r == 0); assert(r == 0);
} }
// flush to the file system // flush to the file system
r = toku_close_ft_handle_nolsn(brt, 0); r = toku_close_ft_handle_nolsn(ft, 0);
assert(r == 0); assert(r == 0);
// shutdown the cachetable // shutdown the cachetable

View file

@ -96,7 +96,7 @@ static const char *fname = TOKU_TEST_FILENAME;
static TOKUTXN const null_txn = 0; static TOKUTXN const null_txn = 0;
CACHETABLE ct; CACHETABLE ct;
FT_HANDLE brt; FT_HANDLE ft;
FT_CURSOR cursor; FT_CURSOR cursor;
static int test_ft_cursor_keycompare(DB *db __attribute__((unused)), const DBT *a, const DBT *b) { static int test_ft_cursor_keycompare(DB *db __attribute__((unused)), const DBT *a, const DBT *b) {
@ -109,15 +109,15 @@ test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute
unlink(fname); unlink(fname);
toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER);
r = toku_open_ft_handle(fname, 1, &brt, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare); assert(r==0); r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare); assert(r==0);
r = toku_ft_cursor(brt, &cursor, NULL, false, false); assert(r==0); r = toku_ft_cursor(ft, &cursor, NULL, false, false); assert(r==0);
int i; int i;
for (i=0; i<1000; i++) { for (i=0; i<1000; i++) {
char string[100]; char string[100];
snprintf(string, sizeof(string), "%04d", i); snprintf(string, sizeof(string), "%04d", i);
DBT key,val; DBT key,val;
toku_ft_insert(brt, toku_fill_dbt(&key, string, 5), toku_fill_dbt(&val, string, 5), 0); toku_ft_insert(ft, toku_fill_dbt(&key, string, 5), toku_fill_dbt(&val, string, 5), 0);
} }
{ {
@ -132,7 +132,7 @@ test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute
// This will invalidate due to the root counter bumping, but the OMT itself will still be valid. // This will invalidate due to the root counter bumping, but the OMT itself will still be valid.
{ {
DBT key, val; DBT key, val;
toku_ft_insert(brt, toku_fill_dbt(&key, "d", 2), toku_fill_dbt(&val, "w", 2), 0); toku_ft_insert(ft, toku_fill_dbt(&key, "d", 2), toku_fill_dbt(&val, "w", 2), 0);
} }
{ {
@ -141,7 +141,7 @@ test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute
} }
toku_ft_cursor_close(cursor); toku_ft_cursor_close(cursor);
r = toku_close_ft_handle_nolsn(brt, 0); assert(r==0); r = toku_close_ft_handle_nolsn(ft, 0); assert(r==0);
toku_cachetable_close(&ct); toku_cachetable_close(&ct);
return 0; return 0;
} }

View file

@ -103,7 +103,7 @@ static DB * const null_db = 0;
enum { NODESIZE = 1024, KSIZE=NODESIZE-100, TOKU_PSIZE=20 }; enum { NODESIZE = 1024, KSIZE=NODESIZE-100, TOKU_PSIZE=20 };
CACHETABLE ct; CACHETABLE ct;
FT_HANDLE brt; FT_HANDLE ft;
const char *fname = TOKU_TEST_FILENAME; const char *fname = TOKU_TEST_FILENAME;
static int update_func( static int update_func(
@ -134,11 +134,11 @@ doit (void) {
toku_cachetable_create(&ct, 500*1024*1024, ZERO_LSN, NULL_LOGGER); toku_cachetable_create(&ct, 500*1024*1024, ZERO_LSN, NULL_LOGGER);
unlink(fname); unlink(fname);
r = toku_open_ft_handle(fname, 1, &brt, NODESIZE, NODESIZE/2, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); r = toku_open_ft_handle(fname, 1, &ft, NODESIZE, NODESIZE/2, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
assert(r==0); assert(r==0);
brt->ft->update_fun = update_func; ft->ft->update_fun = update_func;
brt->ft->update_fun = update_func; ft->ft->update_fun = update_func;
toku_testsetup_initialize(); // must precede any other toku_testsetup calls toku_testsetup_initialize(); // must precede any other toku_testsetup calls
@ -146,16 +146,16 @@ doit (void) {
pivots[0] = toku_strdup("kkkkk"); pivots[0] = toku_strdup("kkkkk");
int pivot_len = 6; int pivot_len = 6;
r = toku_testsetup_leaf(brt, &node_leaf, 2, pivots, &pivot_len); r = toku_testsetup_leaf(ft, &node_leaf, 2, pivots, &pivot_len);
assert(r==0); assert(r==0);
r = toku_testsetup_nonleaf(brt, 1, &node_internal, 1, &node_leaf, 0, 0); r = toku_testsetup_nonleaf(ft, 1, &node_internal, 1, &node_leaf, 0, 0);
assert(r==0); assert(r==0);
r = toku_testsetup_nonleaf(brt, 2, &node_root, 1, &node_internal, 0, 0); r = toku_testsetup_nonleaf(ft, 2, &node_root, 1, &node_internal, 0, 0);
assert(r==0); assert(r==0);
r = toku_testsetup_root(brt, node_root); r = toku_testsetup_root(ft, node_root);
assert(r==0); assert(r==0);
// //
@ -165,7 +165,7 @@ doit (void) {
// now we insert a row into each leaf node // now we insert a row into each leaf node
r = toku_testsetup_insert_to_leaf ( r = toku_testsetup_insert_to_leaf (
brt, ft,
node_leaf, node_leaf,
"a", // key "a", // key
2, // keylen 2, // keylen
@ -174,7 +174,7 @@ doit (void) {
); );
assert(r==0); assert(r==0);
r = toku_testsetup_insert_to_leaf ( r = toku_testsetup_insert_to_leaf (
brt, ft,
node_leaf, node_leaf,
"z", // key "z", // key
2, // keylen 2, // keylen
@ -187,7 +187,7 @@ doit (void) {
// now we insert filler data so that the rebalance // now we insert filler data so that the rebalance
// keeps it at two nodes // keeps it at two nodes
r = toku_testsetup_insert_to_leaf ( r = toku_testsetup_insert_to_leaf (
brt, ft,
node_leaf, node_leaf,
"b", // key "b", // key
2, // keylen 2, // keylen
@ -196,7 +196,7 @@ doit (void) {
); );
assert(r==0); assert(r==0);
r = toku_testsetup_insert_to_leaf ( r = toku_testsetup_insert_to_leaf (
brt, ft,
node_leaf, node_leaf,
"y", // key "y", // key
2, // keylen 2, // keylen
@ -211,7 +211,7 @@ doit (void) {
// //
for (int i = 0; i < 100000; i++) { for (int i = 0; i < 100000; i++) {
r = toku_testsetup_insert_to_nonleaf ( r = toku_testsetup_insert_to_nonleaf (
brt, ft,
node_internal, node_internal,
FT_DELETE_ANY, FT_DELETE_ANY,
"jj", // this key does not exist, so its message application should be a no-op "jj", // this key does not exist, so its message application should be a no-op
@ -226,7 +226,7 @@ doit (void) {
// now insert a broadcast message into the root // now insert a broadcast message into the root
// //
r = toku_testsetup_insert_to_nonleaf ( r = toku_testsetup_insert_to_nonleaf (
brt, ft,
node_root, node_root,
FT_UPDATE_BROADCAST_ALL, FT_UPDATE_BROADCAST_ALL,
NULL, NULL,
@ -239,11 +239,11 @@ doit (void) {
// now lock and release the leaf node to make sure it is what we expect it to be. // now lock and release the leaf node to make sure it is what we expect it to be.
FTNODE node = NULL; FTNODE node = NULL;
struct ftnode_fetch_extra bfe; struct ftnode_fetch_extra bfe;
fill_bfe_for_min_read(&bfe, brt->ft); fill_bfe_for_min_read(&bfe, ft->ft);
toku_pin_ftnode_with_dep_nodes( toku_pin_ftnode_with_dep_nodes(
brt->ft, ft->ft,
node_leaf, node_leaf,
toku_cachetable_hash(brt->ft->cf, node_leaf), toku_cachetable_hash(ft->ft->cf, node_leaf),
&bfe, &bfe,
PL_WRITE_EXPENSIVE, PL_WRITE_EXPENSIVE,
0, 0,
@ -255,12 +255,12 @@ doit (void) {
assert(node->n_children == 2); assert(node->n_children == 2);
assert(BP_STATE(node,0) == PT_AVAIL); assert(BP_STATE(node,0) == PT_AVAIL);
assert(BP_STATE(node,1) == PT_AVAIL); assert(BP_STATE(node,1) == PT_AVAIL);
toku_unpin_ftnode(brt->ft, node); toku_unpin_ftnode(ft->ft, node);
// now do a lookup on one of the keys, this should bring a leaf node up to date // now do a lookup on one of the keys, this should bring a leaf node up to date
DBT k; DBT k;
struct check_pair pair = {2, "a", 0, NULL, 0}; struct check_pair pair = {2, "a", 0, NULL, 0};
r = toku_ft_lookup(brt, toku_fill_dbt(&k, "a", 2), lookup_checkf, &pair); r = toku_ft_lookup(ft, toku_fill_dbt(&k, "a", 2), lookup_checkf, &pair);
assert(r==0); assert(r==0);
// //
@ -269,11 +269,11 @@ doit (void) {
// node is in memory and another is // node is in memory and another is
// on disk // on disk
// //
fill_bfe_for_min_read(&bfe, brt->ft); fill_bfe_for_min_read(&bfe, ft->ft);
toku_pin_ftnode_with_dep_nodes( toku_pin_ftnode_with_dep_nodes(
brt->ft, ft->ft,
node_leaf, node_leaf,
toku_cachetable_hash(brt->ft->cf, node_leaf), toku_cachetable_hash(ft->ft->cf, node_leaf),
&bfe, &bfe,
PL_WRITE_EXPENSIVE, PL_WRITE_EXPENSIVE,
0, 0,
@ -285,16 +285,16 @@ doit (void) {
assert(node->n_children == 2); assert(node->n_children == 2);
assert(BP_STATE(node,0) == PT_AVAIL); assert(BP_STATE(node,0) == PT_AVAIL);
assert(BP_STATE(node,1) == PT_AVAIL); assert(BP_STATE(node,1) == PT_AVAIL);
toku_unpin_ftnode(brt->ft, node); toku_unpin_ftnode(ft->ft, node);
// //
// now let us induce a clean on the internal node // now let us induce a clean on the internal node
// //
fill_bfe_for_min_read(&bfe, brt->ft); fill_bfe_for_min_read(&bfe, ft->ft);
toku_pin_ftnode_with_dep_nodes( toku_pin_ftnode_with_dep_nodes(
brt->ft, ft->ft,
node_internal, node_internal,
toku_cachetable_hash(brt->ft->cf, node_internal), toku_cachetable_hash(ft->ft->cf, node_internal),
&bfe, &bfe,
PL_WRITE_EXPENSIVE, PL_WRITE_EXPENSIVE,
0, 0,
@ -310,16 +310,16 @@ doit (void) {
r = toku_ftnode_cleaner_callback( r = toku_ftnode_cleaner_callback(
node, node,
node_internal, node_internal,
toku_cachetable_hash(brt->ft->cf, node_internal), toku_cachetable_hash(ft->ft->cf, node_internal),
brt->ft ft->ft
); );
// verify that node_internal's buffer is empty // verify that node_internal's buffer is empty
fill_bfe_for_min_read(&bfe, brt->ft); fill_bfe_for_min_read(&bfe, ft->ft);
toku_pin_ftnode_with_dep_nodes( toku_pin_ftnode_with_dep_nodes(
brt->ft, ft->ft,
node_internal, node_internal,
toku_cachetable_hash(brt->ft->cf, node_internal), toku_cachetable_hash(ft->ft->cf, node_internal),
&bfe, &bfe,
PL_WRITE_EXPENSIVE, PL_WRITE_EXPENSIVE,
0, 0,
@ -329,7 +329,7 @@ doit (void) {
); );
// check that buffers are empty // check that buffers are empty
assert(toku_bnc_nbytesinbuf(BNC(node, 0)) == 0); assert(toku_bnc_nbytesinbuf(BNC(node, 0)) == 0);
toku_unpin_ftnode(brt->ft, node); toku_unpin_ftnode(ft->ft, node);
// //
// now run a checkpoint to get everything clean, // now run a checkpoint to get everything clean,
@ -341,14 +341,14 @@ doit (void) {
// check that lookups on the two keys is still good // check that lookups on the two keys is still good
struct check_pair pair1 = {2, "a", 0, NULL, 0}; struct check_pair pair1 = {2, "a", 0, NULL, 0};
r = toku_ft_lookup(brt, toku_fill_dbt(&k, "a", 2), lookup_checkf, &pair1); r = toku_ft_lookup(ft, toku_fill_dbt(&k, "a", 2), lookup_checkf, &pair1);
assert(r==0); assert(r==0);
struct check_pair pair2 = {2, "z", 0, NULL, 0}; struct check_pair pair2 = {2, "z", 0, NULL, 0};
r = toku_ft_lookup(brt, toku_fill_dbt(&k, "z", 2), lookup_checkf, &pair2); r = toku_ft_lookup(ft, toku_fill_dbt(&k, "z", 2), lookup_checkf, &pair2);
assert(r==0); assert(r==0);
r = toku_close_ft_handle_nolsn(brt, 0); assert(r==0); r = toku_close_ft_handle_nolsn(ft, 0); assert(r==0);
toku_cachetable_close(&ct); toku_cachetable_close(&ct);
toku_free(pivots[0]); toku_free(pivots[0]);

View file

@ -103,7 +103,7 @@ static DB * const null_db = 0;
enum { NODESIZE = 1024, KSIZE=NODESIZE-100, TOKU_PSIZE=20 }; enum { NODESIZE = 1024, KSIZE=NODESIZE-100, TOKU_PSIZE=20 };
CACHETABLE ct; CACHETABLE ct;
FT_HANDLE brt; FT_HANDLE ft;
const char *fname = TOKU_TEST_FILENAME; const char *fname = TOKU_TEST_FILENAME;
static int update_func( static int update_func(
@ -134,11 +134,11 @@ doit (bool keep_other_bn_in_memory) {
toku_cachetable_create(&ct, 500*1024*1024, ZERO_LSN, NULL_LOGGER); toku_cachetable_create(&ct, 500*1024*1024, ZERO_LSN, NULL_LOGGER);
unlink(fname); unlink(fname);
r = toku_open_ft_handle(fname, 1, &brt, NODESIZE, NODESIZE/2, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); r = toku_open_ft_handle(fname, 1, &ft, NODESIZE, NODESIZE/2, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
assert(r==0); assert(r==0);
brt->options.update_fun = update_func; ft->options.update_fun = update_func;
brt->ft->update_fun = update_func; ft->ft->update_fun = update_func;
toku_testsetup_initialize(); // must precede any other toku_testsetup calls toku_testsetup_initialize(); // must precede any other toku_testsetup calls
@ -146,16 +146,16 @@ doit (bool keep_other_bn_in_memory) {
pivots[0] = toku_strdup("kkkkk"); pivots[0] = toku_strdup("kkkkk");
int pivot_len = 6; int pivot_len = 6;
r = toku_testsetup_leaf(brt, &node_leaf, 2, pivots, &pivot_len); r = toku_testsetup_leaf(ft, &node_leaf, 2, pivots, &pivot_len);
assert(r==0); assert(r==0);
r = toku_testsetup_nonleaf(brt, 1, &node_internal, 1, &node_leaf, 0, 0); r = toku_testsetup_nonleaf(ft, 1, &node_internal, 1, &node_leaf, 0, 0);
assert(r==0); assert(r==0);
r = toku_testsetup_nonleaf(brt, 2, &node_root, 1, &node_internal, 0, 0); r = toku_testsetup_nonleaf(ft, 2, &node_root, 1, &node_internal, 0, 0);
assert(r==0); assert(r==0);
r = toku_testsetup_root(brt, node_root); r = toku_testsetup_root(ft, node_root);
assert(r==0); assert(r==0);
// //
@ -165,7 +165,7 @@ doit (bool keep_other_bn_in_memory) {
// now we insert a row into each leaf node // now we insert a row into each leaf node
r = toku_testsetup_insert_to_leaf ( r = toku_testsetup_insert_to_leaf (
brt, ft,
node_leaf, node_leaf,
"a", // key "a", // key
2, // keylen 2, // keylen
@ -174,7 +174,7 @@ doit (bool keep_other_bn_in_memory) {
); );
assert(r==0); assert(r==0);
r = toku_testsetup_insert_to_leaf ( r = toku_testsetup_insert_to_leaf (
brt, ft,
node_leaf, node_leaf,
"z", // key "z", // key
2, // keylen 2, // keylen
@ -187,7 +187,7 @@ doit (bool keep_other_bn_in_memory) {
// now we insert filler data so that the rebalance // now we insert filler data so that the rebalance
// keeps it at two nodes // keeps it at two nodes
r = toku_testsetup_insert_to_leaf ( r = toku_testsetup_insert_to_leaf (
brt, ft,
node_leaf, node_leaf,
"b", // key "b", // key
2, // keylen 2, // keylen
@ -196,7 +196,7 @@ doit (bool keep_other_bn_in_memory) {
); );
assert(r==0); assert(r==0);
r = toku_testsetup_insert_to_leaf ( r = toku_testsetup_insert_to_leaf (
brt, ft,
node_leaf, node_leaf,
"y", // key "y", // key
2, // keylen 2, // keylen
@ -211,7 +211,7 @@ doit (bool keep_other_bn_in_memory) {
// //
for (int i = 0; i < 100000; i++) { for (int i = 0; i < 100000; i++) {
r = toku_testsetup_insert_to_nonleaf ( r = toku_testsetup_insert_to_nonleaf (
brt, ft,
node_internal, node_internal,
FT_DELETE_ANY, FT_DELETE_ANY,
"jj", // this key does not exist, so its message application should be a no-op "jj", // this key does not exist, so its message application should be a no-op
@ -226,7 +226,7 @@ doit (bool keep_other_bn_in_memory) {
// now insert a broadcast message into the root // now insert a broadcast message into the root
// //
r = toku_testsetup_insert_to_nonleaf ( r = toku_testsetup_insert_to_nonleaf (
brt, ft,
node_root, node_root,
FT_UPDATE_BROADCAST_ALL, FT_UPDATE_BROADCAST_ALL,
NULL, NULL,
@ -245,11 +245,11 @@ doit (bool keep_other_bn_in_memory) {
// now lock and release the leaf node to make sure it is what we expect it to be. // now lock and release the leaf node to make sure it is what we expect it to be.
FTNODE node = NULL; FTNODE node = NULL;
struct ftnode_fetch_extra bfe; struct ftnode_fetch_extra bfe;
fill_bfe_for_min_read(&bfe, brt->ft); fill_bfe_for_min_read(&bfe, ft->ft);
toku_pin_ftnode( toku_pin_ftnode(
brt->ft, ft->ft,
node_leaf, node_leaf,
toku_cachetable_hash(brt->ft->cf, node_leaf), toku_cachetable_hash(ft->ft->cf, node_leaf),
&bfe, &bfe,
PL_WRITE_EXPENSIVE, PL_WRITE_EXPENSIVE,
&node, &node,
@ -259,18 +259,18 @@ doit (bool keep_other_bn_in_memory) {
assert(node->n_children == 2); assert(node->n_children == 2);
// a hack to get the basement nodes evicted // a hack to get the basement nodes evicted
for (int i = 0; i < 20; i++) { for (int i = 0; i < 20; i++) {
toku_ftnode_pe_callback(node, make_pair_attr(0xffffffff), brt->ft, def_pe_finalize_impl, nullptr); toku_ftnode_pe_callback(node, make_pair_attr(0xffffffff), ft->ft, def_pe_finalize_impl, nullptr);
} }
// this ensures that when we do the lookups below, // this ensures that when we do the lookups below,
// that the data is read off disk // that the data is read off disk
assert(BP_STATE(node,0) == PT_ON_DISK); assert(BP_STATE(node,0) == PT_ON_DISK);
assert(BP_STATE(node,1) == PT_ON_DISK); assert(BP_STATE(node,1) == PT_ON_DISK);
toku_unpin_ftnode(brt->ft, node); toku_unpin_ftnode(ft->ft, node);
// now do a lookup on one of the keys, this should bring a leaf node up to date // now do a lookup on one of the keys, this should bring a leaf node up to date
DBT k; DBT k;
struct check_pair pair = {2, "a", 0, NULL, 0}; struct check_pair pair = {2, "a", 0, NULL, 0};
r = toku_ft_lookup(brt, toku_fill_dbt(&k, "a", 2), lookup_checkf, &pair); r = toku_ft_lookup(ft, toku_fill_dbt(&k, "a", 2), lookup_checkf, &pair);
assert(r==0); assert(r==0);
if (keep_other_bn_in_memory) { if (keep_other_bn_in_memory) {
@ -281,7 +281,7 @@ doit (bool keep_other_bn_in_memory) {
// but only one should have broadcast message // but only one should have broadcast message
// applied. // applied.
// //
fill_bfe_for_full_read(&bfe, brt->ft); fill_bfe_for_full_read(&bfe, ft->ft);
} }
else { else {
// //
@ -290,12 +290,12 @@ doit (bool keep_other_bn_in_memory) {
// node is in memory and another is // node is in memory and another is
// on disk // on disk
// //
fill_bfe_for_min_read(&bfe, brt->ft); fill_bfe_for_min_read(&bfe, ft->ft);
} }
toku_pin_ftnode( toku_pin_ftnode(
brt->ft, ft->ft,
node_leaf, node_leaf,
toku_cachetable_hash(brt->ft->cf, node_leaf), toku_cachetable_hash(ft->ft->cf, node_leaf),
&bfe, &bfe,
PL_WRITE_EXPENSIVE, PL_WRITE_EXPENSIVE,
&node, &node,
@ -310,16 +310,16 @@ doit (bool keep_other_bn_in_memory) {
else { else {
assert(BP_STATE(node,1) == PT_ON_DISK); assert(BP_STATE(node,1) == PT_ON_DISK);
} }
toku_unpin_ftnode(brt->ft, node); toku_unpin_ftnode(ft->ft, node);
// //
// now let us induce a clean on the internal node // now let us induce a clean on the internal node
// //
fill_bfe_for_min_read(&bfe, brt->ft); fill_bfe_for_min_read(&bfe, ft->ft);
toku_pin_ftnode( toku_pin_ftnode(
brt->ft, ft->ft,
node_internal, node_internal,
toku_cachetable_hash(brt->ft->cf, node_internal), toku_cachetable_hash(ft->ft->cf, node_internal),
&bfe, &bfe,
PL_WRITE_EXPENSIVE, PL_WRITE_EXPENSIVE,
&node, &node,
@ -333,16 +333,16 @@ doit (bool keep_other_bn_in_memory) {
r = toku_ftnode_cleaner_callback( r = toku_ftnode_cleaner_callback(
node, node,
node_internal, node_internal,
toku_cachetable_hash(brt->ft->cf, node_internal), toku_cachetable_hash(ft->ft->cf, node_internal),
brt->ft ft->ft
); );
// verify that node_internal's buffer is empty // verify that node_internal's buffer is empty
fill_bfe_for_min_read(&bfe, brt->ft); fill_bfe_for_min_read(&bfe, ft->ft);
toku_pin_ftnode( toku_pin_ftnode(
brt->ft, ft->ft,
node_internal, node_internal,
toku_cachetable_hash(brt->ft->cf, node_internal), toku_cachetable_hash(ft->ft->cf, node_internal),
&bfe, &bfe,
PL_WRITE_EXPENSIVE, PL_WRITE_EXPENSIVE,
&node, &node,
@ -350,7 +350,7 @@ doit (bool keep_other_bn_in_memory) {
); );
// check that buffers are empty // check that buffers are empty
assert(toku_bnc_nbytesinbuf(BNC(node, 0)) == 0); assert(toku_bnc_nbytesinbuf(BNC(node, 0)) == 0);
toku_unpin_ftnode(brt->ft, node); toku_unpin_ftnode(ft->ft, node);
// //
// now run a checkpoint to get everything clean, // now run a checkpoint to get everything clean,
@ -361,14 +361,14 @@ doit (bool keep_other_bn_in_memory) {
// check that lookups on the two keys is still good // check that lookups on the two keys is still good
struct check_pair pair1 = {2, "a", 0, NULL, 0}; struct check_pair pair1 = {2, "a", 0, NULL, 0};
r = toku_ft_lookup(brt, toku_fill_dbt(&k, "a", 2), lookup_checkf, &pair1); r = toku_ft_lookup(ft, toku_fill_dbt(&k, "a", 2), lookup_checkf, &pair1);
assert(r==0); assert(r==0);
struct check_pair pair2 = {2, "z", 0, NULL, 0}; struct check_pair pair2 = {2, "z", 0, NULL, 0};
r = toku_ft_lookup(brt, toku_fill_dbt(&k, "z", 2), lookup_checkf, &pair2); r = toku_ft_lookup(ft, toku_fill_dbt(&k, "z", 2), lookup_checkf, &pair2);
assert(r==0); assert(r==0);
r = toku_close_ft_handle_nolsn(brt, 0); assert(r==0); r = toku_close_ft_handle_nolsn(ft, 0); assert(r==0);
toku_cachetable_close(&ct); toku_cachetable_close(&ct);
toku_free(pivots[0]); toku_free(pivots[0]);

View file

@ -102,7 +102,7 @@ static DB * const null_db = 0;
enum { NODESIZE = 1024, KSIZE=NODESIZE-100, TOKU_PSIZE=20 }; enum { NODESIZE = 1024, KSIZE=NODESIZE-100, TOKU_PSIZE=20 };
CACHETABLE ct; CACHETABLE ct;
FT_HANDLE brt; FT_HANDLE ft;
const char *fname = TOKU_TEST_FILENAME; const char *fname = TOKU_TEST_FILENAME;
static int update_func( static int update_func(
@ -133,30 +133,30 @@ doit (void) {
toku_cachetable_create(&ct, 500*1024*1024, ZERO_LSN, NULL_LOGGER); toku_cachetable_create(&ct, 500*1024*1024, ZERO_LSN, NULL_LOGGER);
unlink(fname); unlink(fname);
r = toku_open_ft_handle(fname, 1, &brt, NODESIZE, NODESIZE/2, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); r = toku_open_ft_handle(fname, 1, &ft, NODESIZE, NODESIZE/2, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
assert(r==0); assert(r==0);
brt->options.update_fun = update_func; ft->options.update_fun = update_func;
brt->ft->update_fun = update_func; ft->ft->update_fun = update_func;
toku_testsetup_initialize(); // must precede any other toku_testsetup calls toku_testsetup_initialize(); // must precede any other toku_testsetup calls
r = toku_testsetup_leaf(brt, &node_leaf[0], 1, NULL, NULL); r = toku_testsetup_leaf(ft, &node_leaf[0], 1, NULL, NULL);
assert(r==0); assert(r==0);
r = toku_testsetup_leaf(brt, &node_leaf[1], 1, NULL, NULL); r = toku_testsetup_leaf(ft, &node_leaf[1], 1, NULL, NULL);
assert(r==0); assert(r==0);
char* pivots[1]; char* pivots[1];
pivots[0] = toku_strdup("kkkkk"); pivots[0] = toku_strdup("kkkkk");
int pivot_len = 6; int pivot_len = 6;
r = toku_testsetup_nonleaf(brt, 1, &node_internal, 2, node_leaf, pivots, &pivot_len); r = toku_testsetup_nonleaf(ft, 1, &node_internal, 2, node_leaf, pivots, &pivot_len);
assert(r==0); assert(r==0);
r = toku_testsetup_nonleaf(brt, 2, &node_root, 1, &node_internal, 0, 0); r = toku_testsetup_nonleaf(ft, 2, &node_root, 1, &node_internal, 0, 0);
assert(r==0); assert(r==0);
r = toku_testsetup_root(brt, node_root); r = toku_testsetup_root(ft, node_root);
assert(r==0); assert(r==0);
// //
@ -166,7 +166,7 @@ doit (void) {
// now we insert a row into each leaf node // now we insert a row into each leaf node
r = toku_testsetup_insert_to_leaf ( r = toku_testsetup_insert_to_leaf (
brt, ft,
node_leaf[0], node_leaf[0],
"a", // key "a", // key
2, // keylen 2, // keylen
@ -175,7 +175,7 @@ doit (void) {
); );
assert(r==0); assert(r==0);
r = toku_testsetup_insert_to_leaf ( r = toku_testsetup_insert_to_leaf (
brt, ft,
node_leaf[1], node_leaf[1],
"z", // key "z", // key
2, // keylen 2, // keylen
@ -190,7 +190,7 @@ doit (void) {
// //
for (int i = 0; i < 100000; i++) { for (int i = 0; i < 100000; i++) {
r = toku_testsetup_insert_to_nonleaf ( r = toku_testsetup_insert_to_nonleaf (
brt, ft,
node_internal, node_internal,
FT_DELETE_ANY, FT_DELETE_ANY,
"jj", // this key does not exist, so its message application should be a no-op "jj", // this key does not exist, so its message application should be a no-op
@ -205,7 +205,7 @@ doit (void) {
// now insert a broadcast message into the root // now insert a broadcast message into the root
// //
r = toku_testsetup_insert_to_nonleaf ( r = toku_testsetup_insert_to_nonleaf (
brt, ft,
node_root, node_root,
FT_UPDATE_BROADCAST_ALL, FT_UPDATE_BROADCAST_ALL,
NULL, NULL,
@ -219,23 +219,23 @@ doit (void) {
// now let us induce a clean on the internal node // now let us induce a clean on the internal node
// //
FTNODE node; FTNODE node;
toku_pin_node_with_min_bfe(&node, node_leaf[1], brt); toku_pin_node_with_min_bfe(&node, node_leaf[1], ft);
// hack to get merge going // hack to get merge going
BLB_SEQINSERT(node, node->n_children-1) = false; BLB_SEQINSERT(node, node->n_children-1) = false;
toku_unpin_ftnode(brt->ft, node); toku_unpin_ftnode(ft->ft, node);
// now do a lookup on one of the keys, this should bring a leaf node up to date // now do a lookup on one of the keys, this should bring a leaf node up to date
DBT k; DBT k;
struct check_pair pair = {2, "a", 0, NULL, 0}; struct check_pair pair = {2, "a", 0, NULL, 0};
r = toku_ft_lookup(brt, toku_fill_dbt(&k, "a", 2), lookup_checkf, &pair); r = toku_ft_lookup(ft, toku_fill_dbt(&k, "a", 2), lookup_checkf, &pair);
assert(r==0); assert(r==0);
struct ftnode_fetch_extra bfe; struct ftnode_fetch_extra bfe;
fill_bfe_for_min_read(&bfe, brt->ft); fill_bfe_for_min_read(&bfe, ft->ft);
toku_pin_ftnode( toku_pin_ftnode(
brt->ft, ft->ft,
node_internal, node_internal,
toku_cachetable_hash(brt->ft->cf, node_internal), toku_cachetable_hash(ft->ft->cf, node_internal),
&bfe, &bfe,
PL_WRITE_EXPENSIVE, PL_WRITE_EXPENSIVE,
&node, &node,
@ -248,16 +248,16 @@ doit (void) {
r = toku_ftnode_cleaner_callback( r = toku_ftnode_cleaner_callback(
node, node,
node_internal, node_internal,
toku_cachetable_hash(brt->ft->cf, node_internal), toku_cachetable_hash(ft->ft->cf, node_internal),
brt->ft ft->ft
); );
// verify that node_internal's buffer is empty // verify that node_internal's buffer is empty
fill_bfe_for_min_read(&bfe, brt->ft); fill_bfe_for_min_read(&bfe, ft->ft);
toku_pin_ftnode( toku_pin_ftnode(
brt->ft, ft->ft,
node_internal, node_internal,
toku_cachetable_hash(brt->ft->cf, node_internal), toku_cachetable_hash(ft->ft->cf, node_internal),
&bfe, &bfe,
PL_WRITE_EXPENSIVE, PL_WRITE_EXPENSIVE,
&node, &node,
@ -267,7 +267,7 @@ doit (void) {
assert(node->n_children == 1); assert(node->n_children == 1);
// check that buffers are empty // check that buffers are empty
assert(toku_bnc_nbytesinbuf(BNC(node, 0)) == 0); assert(toku_bnc_nbytesinbuf(BNC(node, 0)) == 0);
toku_unpin_ftnode(brt->ft, node); toku_unpin_ftnode(ft->ft, node);
// //
// now run a checkpoint to get everything clean, // now run a checkpoint to get everything clean,
@ -279,14 +279,14 @@ doit (void) {
// check that lookups on the two keys is still good // check that lookups on the two keys is still good
struct check_pair pair1 = {2, "a", 0, NULL, 0}; struct check_pair pair1 = {2, "a", 0, NULL, 0};
r = toku_ft_lookup(brt, toku_fill_dbt(&k, "a", 2), lookup_checkf, &pair1); r = toku_ft_lookup(ft, toku_fill_dbt(&k, "a", 2), lookup_checkf, &pair1);
assert(r==0); assert(r==0);
struct check_pair pair2 = {2, "z", 0, NULL, 0}; struct check_pair pair2 = {2, "z", 0, NULL, 0};
r = toku_ft_lookup(brt, toku_fill_dbt(&k, "z", 2), lookup_checkf, &pair2); r = toku_ft_lookup(ft, toku_fill_dbt(&k, "z", 2), lookup_checkf, &pair2);
assert(r==0); assert(r==0);
r = toku_close_ft_handle_nolsn(brt, 0); assert(r==0); r = toku_close_ft_handle_nolsn(ft, 0); assert(r==0);
toku_cachetable_close(&ct); toku_cachetable_close(&ct);
toku_free(pivots[0]); toku_free(pivots[0]);

View file

@ -1,6 +1,6 @@
/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
// Test the first case for the bug in #1308 (brt-serialize.c:33 does the cast wrong) // Test the first case for the bug in #1308 (ft-serialize.c:33 does the cast wrong)
#ident "$Id$" #ident "$Id$"
/* /*
COPYING CONDITIONS NOTICE: COPYING CONDITIONS NOTICE:

View file

@ -212,20 +212,20 @@ test_split_on_boundary(void)
unlink(fname); unlink(fname);
CACHETABLE ct; CACHETABLE ct;
FT_HANDLE brt; FT_HANDLE ft;
toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER);
r = toku_open_ft_handle(fname, 1, &brt, nodesize, bnsize, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); r = toku_open_ft_handle(fname, 1, &ft, nodesize, bnsize, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0);
FTNODE nodea, nodeb; FTNODE nodea, nodeb;
DBT splitk; DBT splitk;
// if we haven't done it right, we should hit the assert in the top of move_leafentries // if we haven't done it right, we should hit the assert in the top of move_leafentries
ftleaf_split(brt->ft, &sn, &nodea, &nodeb, &splitk, true, SPLIT_EVENLY, 0, NULL); ftleaf_split(ft->ft, &sn, &nodea, &nodeb, &splitk, true, SPLIT_EVENLY, 0, NULL);
verify_basement_node_msns(nodea, dummy_msn_3884); verify_basement_node_msns(nodea, dummy_msn_3884);
verify_basement_node_msns(nodeb, dummy_msn_3884); verify_basement_node_msns(nodeb, dummy_msn_3884);
toku_unpin_ftnode(brt->ft, nodeb); toku_unpin_ftnode(ft->ft, nodeb);
r = toku_close_ft_handle_nolsn(brt, NULL); assert(r == 0); r = toku_close_ft_handle_nolsn(ft, NULL); assert(r == 0);
toku_cachetable_close(&ct); toku_cachetable_close(&ct);
if (splitk.data) { if (splitk.data) {
@ -282,17 +282,17 @@ test_split_with_everything_on_the_left(void)
unlink(fname); unlink(fname);
CACHETABLE ct; CACHETABLE ct;
FT_HANDLE brt; FT_HANDLE ft;
toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER);
r = toku_open_ft_handle(fname, 1, &brt, nodesize, bnsize, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); r = toku_open_ft_handle(fname, 1, &ft, nodesize, bnsize, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0);
FTNODE nodea, nodeb; FTNODE nodea, nodeb;
DBT splitk; DBT splitk;
// if we haven't done it right, we should hit the assert in the top of move_leafentries // if we haven't done it right, we should hit the assert in the top of move_leafentries
ftleaf_split(brt->ft, &sn, &nodea, &nodeb, &splitk, true, SPLIT_EVENLY, 0, NULL); ftleaf_split(ft->ft, &sn, &nodea, &nodeb, &splitk, true, SPLIT_EVENLY, 0, NULL);
toku_unpin_ftnode(brt->ft, nodeb); toku_unpin_ftnode(ft->ft, nodeb);
r = toku_close_ft_handle_nolsn(brt, NULL); assert(r == 0); r = toku_close_ft_handle_nolsn(ft, NULL); assert(r == 0);
toku_cachetable_close(&ct); toku_cachetable_close(&ct);
if (splitk.data) { if (splitk.data) {
@ -354,17 +354,17 @@ test_split_on_boundary_of_last_node(void)
unlink(fname); unlink(fname);
CACHETABLE ct; CACHETABLE ct;
FT_HANDLE brt; FT_HANDLE ft;
toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER);
r = toku_open_ft_handle(fname, 1, &brt, nodesize, bnsize, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); r = toku_open_ft_handle(fname, 1, &ft, nodesize, bnsize, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0);
FTNODE nodea, nodeb; FTNODE nodea, nodeb;
DBT splitk; DBT splitk;
// if we haven't done it right, we should hit the assert in the top of move_leafentries // if we haven't done it right, we should hit the assert in the top of move_leafentries
ftleaf_split(brt->ft, &sn, &nodea, &nodeb, &splitk, true, SPLIT_EVENLY, 0, NULL); ftleaf_split(ft->ft, &sn, &nodea, &nodeb, &splitk, true, SPLIT_EVENLY, 0, NULL);
toku_unpin_ftnode(brt->ft, nodeb); toku_unpin_ftnode(ft->ft, nodeb);
r = toku_close_ft_handle_nolsn(brt, NULL); assert(r == 0); r = toku_close_ft_handle_nolsn(ft, NULL); assert(r == 0);
toku_cachetable_close(&ct); toku_cachetable_close(&ct);
if (splitk.data) { if (splitk.data) {
@ -418,17 +418,17 @@ test_split_at_begin(void)
unlink(fname); unlink(fname);
CACHETABLE ct; CACHETABLE ct;
FT_HANDLE brt; FT_HANDLE ft;
toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER);
r = toku_open_ft_handle(fname, 1, &brt, nodesize, bnsize, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); r = toku_open_ft_handle(fname, 1, &ft, nodesize, bnsize, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0);
FTNODE nodea, nodeb; FTNODE nodea, nodeb;
DBT splitk; DBT splitk;
// if we haven't done it right, we should hit the assert in the top of move_leafentries // if we haven't done it right, we should hit the assert in the top of move_leafentries
ftleaf_split(brt->ft, &sn, &nodea, &nodeb, &splitk, true, SPLIT_EVENLY, 0, NULL); ftleaf_split(ft->ft, &sn, &nodea, &nodeb, &splitk, true, SPLIT_EVENLY, 0, NULL);
toku_unpin_ftnode(brt->ft, nodeb); toku_unpin_ftnode(ft->ft, nodeb);
r = toku_close_ft_handle_nolsn(brt, NULL); assert(r == 0); r = toku_close_ft_handle_nolsn(ft, NULL); assert(r == 0);
toku_cachetable_close(&ct); toku_cachetable_close(&ct);
if (splitk.data) { if (splitk.data) {
@ -478,17 +478,17 @@ test_split_at_end(void)
unlink(fname); unlink(fname);
CACHETABLE ct; CACHETABLE ct;
FT_HANDLE brt; FT_HANDLE ft;
toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER);
r = toku_open_ft_handle(fname, 1, &brt, nodesize, bnsize, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); r = toku_open_ft_handle(fname, 1, &ft, nodesize, bnsize, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0);
FTNODE nodea, nodeb; FTNODE nodea, nodeb;
DBT splitk; DBT splitk;
// if we haven't done it right, we should hit the assert in the top of move_leafentries // if we haven't done it right, we should hit the assert in the top of move_leafentries
ftleaf_split(brt->ft, &sn, &nodea, &nodeb, &splitk, true, SPLIT_EVENLY, 0, NULL); ftleaf_split(ft->ft, &sn, &nodea, &nodeb, &splitk, true, SPLIT_EVENLY, 0, NULL);
toku_unpin_ftnode(brt->ft, nodeb); toku_unpin_ftnode(ft->ft, nodeb);
r = toku_close_ft_handle_nolsn(brt, NULL); assert(r == 0); r = toku_close_ft_handle_nolsn(ft, NULL); assert(r == 0);
toku_cachetable_close(&ct); toku_cachetable_close(&ct);
if (splitk.data) { if (splitk.data) {
@ -532,20 +532,20 @@ test_split_odd_nodes(void)
unlink(fname); unlink(fname);
CACHETABLE ct; CACHETABLE ct;
FT_HANDLE brt; FT_HANDLE ft;
toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER);
r = toku_open_ft_handle(fname, 1, &brt, nodesize, bnsize, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); r = toku_open_ft_handle(fname, 1, &ft, nodesize, bnsize, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0);
FTNODE nodea, nodeb; FTNODE nodea, nodeb;
DBT splitk; DBT splitk;
// if we haven't done it right, we should hit the assert in the top of move_leafentries // if we haven't done it right, we should hit the assert in the top of move_leafentries
ftleaf_split(brt->ft, &sn, &nodea, &nodeb, &splitk, true, SPLIT_EVENLY, 0, NULL); ftleaf_split(ft->ft, &sn, &nodea, &nodeb, &splitk, true, SPLIT_EVENLY, 0, NULL);
verify_basement_node_msns(nodea, dummy_msn_3884); verify_basement_node_msns(nodea, dummy_msn_3884);
verify_basement_node_msns(nodeb, dummy_msn_3884); verify_basement_node_msns(nodeb, dummy_msn_3884);
toku_unpin_ftnode(brt->ft, nodeb); toku_unpin_ftnode(ft->ft, nodeb);
r = toku_close_ft_handle_nolsn(brt, NULL); assert(r == 0); r = toku_close_ft_handle_nolsn(ft, NULL); assert(r == 0);
toku_cachetable_close(&ct); toku_cachetable_close(&ct);
if (splitk.data) { if (splitk.data) {

View file

@ -106,10 +106,10 @@ PATENT RIGHTS GRANT:
#include "test.h" #include "test.h"
static FTNODE static FTNODE
make_node(FT_HANDLE brt, int height) { make_node(FT_HANDLE ft, int height) {
FTNODE node = NULL; FTNODE node = NULL;
int n_children = (height == 0) ? 1 : 0; int n_children = (height == 0) ? 1 : 0;
toku_create_new_ftnode(brt, &node, height, n_children); toku_create_new_ftnode(ft, &node, height, n_children);
if (n_children) BP_STATE(node,0) = PT_AVAIL; if (n_children) BP_STATE(node,0) = PT_AVAIL;
return node; return node;
} }
@ -150,13 +150,13 @@ populate_leaf(FTNODE leafnode, int seq, int n, int *minkey, int *maxkey) {
} }
static void static void
insert_into_child_buffer(FT_HANDLE brt, FTNODE node, int childnum, int minkey, int maxkey) { insert_into_child_buffer(FT_HANDLE ft, FTNODE node, int childnum, int minkey, int maxkey) {
for (unsigned int val = htonl(minkey); val <= htonl(maxkey); val++) { for (unsigned int val = htonl(minkey); val <= htonl(maxkey); val++) {
MSN msn = next_dummymsn(); MSN msn = next_dummymsn();
unsigned int key = htonl(val); unsigned int key = htonl(val);
DBT thekey; toku_fill_dbt(&thekey, &key, sizeof key); DBT thekey; toku_fill_dbt(&thekey, &key, sizeof key);
DBT theval; toku_fill_dbt(&theval, &val, sizeof val); DBT theval; toku_fill_dbt(&theval, &val, sizeof val);
toku_ft_append_to_child_buffer(brt->ft->compare_fun, NULL, node, childnum, FT_INSERT, msn, xids_get_root_xids(), true, &thekey, &theval); toku_ft_append_to_child_buffer(ft->ft->compare_fun, NULL, node, childnum, FT_INSERT, msn, xids_get_root_xids(), true, &thekey, &theval);
// Create bad tree (don't do following): // Create bad tree (don't do following):
// node->max_msn_applied_to_node = msn; // node->max_msn_applied_to_node = msn;
@ -164,17 +164,17 @@ insert_into_child_buffer(FT_HANDLE brt, FTNODE node, int childnum, int minkey, i
} }
static FTNODE static FTNODE
make_tree(FT_HANDLE brt, int height, int fanout, int nperleaf, int *seq, int *minkey, int *maxkey) { make_tree(FT_HANDLE ft, int height, int fanout, int nperleaf, int *seq, int *minkey, int *maxkey) {
FTNODE node; FTNODE node;
if (height == 0) { if (height == 0) {
node = make_node(brt, 0); node = make_node(ft, 0);
populate_leaf(node, *seq, nperleaf, minkey, maxkey); populate_leaf(node, *seq, nperleaf, minkey, maxkey);
*seq += nperleaf; *seq += nperleaf;
} else { } else {
node = make_node(brt, height); node = make_node(ft, height);
int minkeys[fanout], maxkeys[fanout]; int minkeys[fanout], maxkeys[fanout];
for (int childnum = 0; childnum < fanout; childnum++) { for (int childnum = 0; childnum < fanout; childnum++) {
FTNODE child = make_tree(brt, height-1, fanout, nperleaf, seq, &minkeys[childnum], &maxkeys[childnum]); FTNODE child = make_tree(ft, height-1, fanout, nperleaf, seq, &minkeys[childnum], &maxkeys[childnum]);
if (childnum == 0) { if (childnum == 0) {
toku_ft_nonleaf_append_child(node, child, NULL); toku_ft_nonleaf_append_child(node, child, NULL);
} else { } else {
@ -182,8 +182,8 @@ make_tree(FT_HANDLE brt, int height, int fanout, int nperleaf, int *seq, int *mi
DBT pivotkey; DBT pivotkey;
toku_ft_nonleaf_append_child(node, child, toku_fill_dbt(&pivotkey, &k, sizeof k)); toku_ft_nonleaf_append_child(node, child, toku_fill_dbt(&pivotkey, &k, sizeof k));
} }
toku_unpin_ftnode(brt->ft, child); toku_unpin_ftnode(ft->ft, child);
insert_into_child_buffer(brt, node, childnum, minkeys[childnum], maxkeys[childnum]); insert_into_child_buffer(ft, node, childnum, minkeys[childnum], maxkeys[childnum]);
} }
*minkey = minkeys[0]; *minkey = minkeys[0];
*maxkey = maxkeys[0]; *maxkey = maxkeys[0];
@ -214,32 +214,32 @@ test_make_tree(int height, int fanout, int nperleaf, int do_verify) {
CACHETABLE ct = NULL; CACHETABLE ct = NULL;
toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER);
// create the brt // create the ft
TOKUTXN null_txn = NULL; TOKUTXN null_txn = NULL;
FT_HANDLE brt = NULL; FT_HANDLE ft = NULL;
r = toku_open_ft_handle(fname, 1, &brt, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); r = toku_open_ft_handle(fname, 1, &ft, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
assert(r == 0); assert(r == 0);
// make a tree // make a tree
int seq = 0, minkey, maxkey; int seq = 0, minkey, maxkey;
FTNODE newroot = make_tree(brt, height, fanout, nperleaf, &seq, &minkey, &maxkey); FTNODE newroot = make_tree(ft, height, fanout, nperleaf, &seq, &minkey, &maxkey);
// set the new root to point to the new tree // set the new root to point to the new tree
toku_ft_set_new_root_blocknum(brt->ft, newroot->thisnodename); toku_ft_set_new_root_blocknum(ft->ft, newroot->thisnodename);
// Create bad tree (don't do following): // Create bad tree (don't do following):
// newroot->max_msn_applied_to_node = last_dummymsn(); // capture msn of last message injected into tree // newroot->max_msn_applied_to_node = last_dummymsn(); // capture msn of last message injected into tree
// unpin the new root // unpin the new root
toku_unpin_ftnode(brt->ft, newroot); toku_unpin_ftnode(ft->ft, newroot);
if (do_verify) { if (do_verify) {
r = toku_verify_ft(brt); r = toku_verify_ft(ft);
assert(r != 0); assert(r != 0);
} }
// flush to the file system // flush to the file system
r = toku_close_ft_handle_nolsn(brt, 0); r = toku_close_ft_handle_nolsn(ft, 0);
assert(r == 0); assert(r == 0);
// shutdown the cachetable // shutdown the cachetable

View file

@ -88,17 +88,17 @@ PATENT RIGHTS GRANT:
#ident "Copyright (c) 2011-2013 Tokutek Inc. All rights reserved." #ident "Copyright (c) 2011-2013 Tokutek Inc. All rights reserved."
// generate a tree with bad pivots and check that brt->verify finds them // generate a tree with bad pivots and check that ft->verify finds them
#include <ft-cachetable-wrappers.h> #include <ft-cachetable-wrappers.h>
#include "test.h" #include "test.h"
static FTNODE static FTNODE
make_node(FT_HANDLE brt, int height) { make_node(FT_HANDLE ft, int height) {
FTNODE node = NULL; FTNODE node = NULL;
int n_children = (height == 0) ? 1 : 0; int n_children = (height == 0) ? 1 : 0;
toku_create_new_ftnode(brt, &node, height, n_children); toku_create_new_ftnode(ft, &node, height, n_children);
if (n_children) BP_STATE(node,0) = PT_AVAIL; if (n_children) BP_STATE(node,0) = PT_AVAIL;
return node; return node;
} }
@ -135,17 +135,17 @@ populate_leaf(FTNODE leafnode, int seq, int n, int *minkey, int *maxkey) {
} }
static FTNODE static FTNODE
make_tree(FT_HANDLE brt, int height, int fanout, int nperleaf, int *seq, int *minkey, int *maxkey) { make_tree(FT_HANDLE ft, int height, int fanout, int nperleaf, int *seq, int *minkey, int *maxkey) {
FTNODE node; FTNODE node;
if (height == 0) { if (height == 0) {
node = make_node(brt, 0); node = make_node(ft, 0);
populate_leaf(node, *seq, nperleaf, minkey, maxkey); populate_leaf(node, *seq, nperleaf, minkey, maxkey);
*seq += nperleaf; *seq += nperleaf;
} else { } else {
node = make_node(brt, height); node = make_node(ft, height);
int minkeys[fanout], maxkeys[fanout]; int minkeys[fanout], maxkeys[fanout];
for (int childnum = 0; childnum < fanout; childnum++) { for (int childnum = 0; childnum < fanout; childnum++) {
FTNODE child = make_tree(brt, height-1, fanout, nperleaf, seq, &minkeys[childnum], &maxkeys[childnum]); FTNODE child = make_tree(ft, height-1, fanout, nperleaf, seq, &minkeys[childnum], &maxkeys[childnum]);
if (childnum == 0) { if (childnum == 0) {
toku_ft_nonleaf_append_child(node, child, NULL); toku_ft_nonleaf_append_child(node, child, NULL);
} else { } else {
@ -153,7 +153,7 @@ make_tree(FT_HANDLE brt, int height, int fanout, int nperleaf, int *seq, int *mi
DBT pivotkey; DBT pivotkey;
toku_ft_nonleaf_append_child(node, child, toku_fill_dbt(&pivotkey, &k, sizeof k)); toku_ft_nonleaf_append_child(node, child, toku_fill_dbt(&pivotkey, &k, sizeof k));
} }
toku_unpin_ftnode(brt->ft, child); toku_unpin_ftnode(ft->ft, child);
} }
*minkey = minkeys[0]; *minkey = minkeys[0];
*maxkey = maxkeys[0]; *maxkey = maxkeys[0];
@ -184,29 +184,29 @@ test_make_tree(int height, int fanout, int nperleaf, int do_verify) {
CACHETABLE ct = NULL; CACHETABLE ct = NULL;
toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER);
// create the brt // create the ft
TOKUTXN null_txn = NULL; TOKUTXN null_txn = NULL;
FT_HANDLE brt = NULL; FT_HANDLE ft = NULL;
r = toku_open_ft_handle(fname, 1, &brt, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); r = toku_open_ft_handle(fname, 1, &ft, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
assert(r == 0); assert(r == 0);
// make a tree // make a tree
int seq = 0, minkey, maxkey; int seq = 0, minkey, maxkey;
FTNODE newroot = make_tree(brt, height, fanout, nperleaf, &seq, &minkey, &maxkey); FTNODE newroot = make_tree(ft, height, fanout, nperleaf, &seq, &minkey, &maxkey);
// discard the old root block // discard the old root block
toku_ft_set_new_root_blocknum(brt->ft, newroot->thisnodename); toku_ft_set_new_root_blocknum(ft->ft, newroot->thisnodename);
// unpin the new root // unpin the new root
toku_unpin_ftnode(brt->ft, newroot); toku_unpin_ftnode(ft->ft, newroot);
if (do_verify) { if (do_verify) {
r = toku_verify_ft(brt); r = toku_verify_ft(ft);
assert(r != 0); assert(r != 0);
} }
// flush to the file system // flush to the file system
r = toku_close_ft_handle_nolsn(brt, 0); r = toku_close_ft_handle_nolsn(ft, 0);
assert(r == 0); assert(r == 0);
// shutdown the cachetable // shutdown the cachetable

View file

@ -89,17 +89,17 @@ PATENT RIGHTS GRANT:
#ident "Copyright (c) 2011-2013 Tokutek Inc. All rights reserved." #ident "Copyright (c) 2011-2013 Tokutek Inc. All rights reserved."
// generate a tree with a single leaf node containing duplicate keys // generate a tree with a single leaf node containing duplicate keys
// check that brt verify finds them // check that ft verify finds them
#include <ft-cachetable-wrappers.h> #include <ft-cachetable-wrappers.h>
#include "test.h" #include "test.h"
static FTNODE static FTNODE
make_node(FT_HANDLE brt, int height) { make_node(FT_HANDLE ft, int height) {
FTNODE node = NULL; FTNODE node = NULL;
int n_children = (height == 0) ? 1 : 0; int n_children = (height == 0) ? 1 : 0;
toku_create_new_ftnode(brt, &node, height, n_children); toku_create_new_ftnode(ft, &node, height, n_children);
if (n_children) BP_STATE(node,0) = PT_AVAIL; if (n_children) BP_STATE(node,0) = PT_AVAIL;
return node; return node;
} }
@ -142,31 +142,31 @@ test_dup_in_leaf(int do_verify) {
CACHETABLE ct = NULL; CACHETABLE ct = NULL;
toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER);
// create the brt // create the ft
TOKUTXN null_txn = NULL; TOKUTXN null_txn = NULL;
FT_HANDLE brt = NULL; FT_HANDLE ft = NULL;
r = toku_open_ft_handle(fname, 1, &brt, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); r = toku_open_ft_handle(fname, 1, &ft, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
assert(r == 0); assert(r == 0);
// discard the old root block // discard the old root block
FTNODE newroot = make_node(brt, 0); FTNODE newroot = make_node(ft, 0);
populate_leaf(newroot, htonl(2), 1); populate_leaf(newroot, htonl(2), 1);
populate_leaf(newroot, htonl(2), 2); populate_leaf(newroot, htonl(2), 2);
// set the new root to point to the new tree // set the new root to point to the new tree
toku_ft_set_new_root_blocknum(brt->ft, newroot->thisnodename); toku_ft_set_new_root_blocknum(ft->ft, newroot->thisnodename);
// unpin the new root // unpin the new root
toku_unpin_ftnode(brt->ft, newroot); toku_unpin_ftnode(ft->ft, newroot);
if (do_verify) { if (do_verify) {
r = toku_verify_ft(brt); r = toku_verify_ft(ft);
assert(r != 0); assert(r != 0);
} }
// flush to the file system // flush to the file system
r = toku_close_ft_handle_nolsn(brt, 0); r = toku_close_ft_handle_nolsn(ft, 0);
assert(r == 0); assert(r == 0);
// shutdown the cachetable // shutdown the cachetable

View file

@ -88,17 +88,17 @@ PATENT RIGHTS GRANT:
#ident "Copyright (c) 2011-2013 Tokutek Inc. All rights reserved." #ident "Copyright (c) 2011-2013 Tokutek Inc. All rights reserved."
// generate a tree with duplicate pivots and check that brt->verify finds them // generate a tree with duplicate pivots and check that ft->verify finds them
#include <ft-cachetable-wrappers.h> #include <ft-cachetable-wrappers.h>
#include "test.h" #include "test.h"
static FTNODE static FTNODE
make_node(FT_HANDLE brt, int height) { make_node(FT_HANDLE ft, int height) {
FTNODE node = NULL; FTNODE node = NULL;
int n_children = (height == 0) ? 1 : 0; int n_children = (height == 0) ? 1 : 0;
toku_create_new_ftnode(brt, &node, height, n_children); toku_create_new_ftnode(ft, &node, height, n_children);
if (n_children) BP_STATE(node,0) = PT_AVAIL; if (n_children) BP_STATE(node,0) = PT_AVAIL;
return node; return node;
} }
@ -135,17 +135,17 @@ populate_leaf(FTNODE leafnode, int seq, int n, int *minkey, int *maxkey) {
} }
static FTNODE static FTNODE
make_tree(FT_HANDLE brt, int height, int fanout, int nperleaf, int *seq, int *minkey, int *maxkey) { make_tree(FT_HANDLE ft, int height, int fanout, int nperleaf, int *seq, int *minkey, int *maxkey) {
FTNODE node; FTNODE node;
if (height == 0) { if (height == 0) {
node = make_node(brt, 0); node = make_node(ft, 0);
populate_leaf(node, *seq, nperleaf, minkey, maxkey); populate_leaf(node, *seq, nperleaf, minkey, maxkey);
*seq += nperleaf; *seq += nperleaf;
} else { } else {
node = make_node(brt, height); node = make_node(ft, height);
int minkeys[fanout], maxkeys[fanout]; int minkeys[fanout], maxkeys[fanout];
for (int childnum = 0; childnum < fanout; childnum++) { for (int childnum = 0; childnum < fanout; childnum++) {
FTNODE child = make_tree(brt, height-1, fanout, nperleaf, seq, &minkeys[childnum], &maxkeys[childnum]); FTNODE child = make_tree(ft, height-1, fanout, nperleaf, seq, &minkeys[childnum], &maxkeys[childnum]);
if (childnum == 0) { if (childnum == 0) {
toku_ft_nonleaf_append_child(node, child, NULL); toku_ft_nonleaf_append_child(node, child, NULL);
} else { } else {
@ -153,7 +153,7 @@ make_tree(FT_HANDLE brt, int height, int fanout, int nperleaf, int *seq, int *mi
DBT pivotkey; DBT pivotkey;
toku_ft_nonleaf_append_child(node, child, toku_fill_dbt(&pivotkey, &k, sizeof k)); toku_ft_nonleaf_append_child(node, child, toku_fill_dbt(&pivotkey, &k, sizeof k));
} }
toku_unpin_ftnode(brt->ft, child); toku_unpin_ftnode(ft->ft, child);
} }
*minkey = minkeys[0]; *minkey = minkeys[0];
*maxkey = maxkeys[0]; *maxkey = maxkeys[0];
@ -187,30 +187,30 @@ test_make_tree(int height, int fanout, int nperleaf, int do_verify) {
CACHETABLE ct = NULL; CACHETABLE ct = NULL;
toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER);
// create the brt // create the ft
TOKUTXN null_txn = NULL; TOKUTXN null_txn = NULL;
FT_HANDLE brt = NULL; FT_HANDLE ft = NULL;
r = toku_open_ft_handle(fname, 1, &brt, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); r = toku_open_ft_handle(fname, 1, &ft, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
assert(r == 0); assert(r == 0);
// make a tree // make a tree
int seq = 0, minkey, maxkey; int seq = 0, minkey, maxkey;
FTNODE newroot = make_tree(brt, height, fanout, nperleaf, &seq, &minkey, &maxkey); FTNODE newroot = make_tree(ft, height, fanout, nperleaf, &seq, &minkey, &maxkey);
// discard the old root block // discard the old root block
// set the new root to point to the new tree // set the new root to point to the new tree
toku_ft_set_new_root_blocknum(brt->ft, newroot->thisnodename); toku_ft_set_new_root_blocknum(ft->ft, newroot->thisnodename);
// unpin the new root // unpin the new root
toku_unpin_ftnode(brt->ft, newroot); toku_unpin_ftnode(ft->ft, newroot);
if (do_verify) { if (do_verify) {
r = toku_verify_ft(brt); r = toku_verify_ft(ft);
assert(r != 0); assert(r != 0);
} }
// flush to the file system // flush to the file system
r = toku_close_ft_handle_nolsn(brt, 0); r = toku_close_ft_handle_nolsn(ft, 0);
assert(r == 0); assert(r == 0);
// shutdown the cachetable // shutdown the cachetable

View file

@ -89,17 +89,17 @@ PATENT RIGHTS GRANT:
#ident "Copyright (c) 2011-2013 Tokutek Inc. All rights reserved." #ident "Copyright (c) 2011-2013 Tokutek Inc. All rights reserved."
// generate a tree with misrouted messages in the child buffers. // generate a tree with misrouted messages in the child buffers.
// check that brt verify finds them. // check that ft verify finds them.
#include <ft-cachetable-wrappers.h> #include <ft-cachetable-wrappers.h>
#include "test.h" #include "test.h"
static FTNODE static FTNODE
make_node(FT_HANDLE brt, int height) { make_node(FT_HANDLE ft, int height) {
FTNODE node = NULL; FTNODE node = NULL;
int n_children = (height == 0) ? 1 : 0; int n_children = (height == 0) ? 1 : 0;
toku_create_new_ftnode(brt, &node, height, n_children); toku_create_new_ftnode(ft, &node, height, n_children);
if (n_children) BP_STATE(node,0) = PT_AVAIL; if (n_children) BP_STATE(node,0) = PT_AVAIL;
return node; return node;
} }
@ -136,7 +136,7 @@ populate_leaf(FTNODE leafnode, int seq, int n, int *minkey, int *maxkey) {
} }
static void static void
insert_into_child_buffer(FT_HANDLE brt, FTNODE node, int childnum, int minkey, int maxkey) { insert_into_child_buffer(FT_HANDLE ft, FTNODE node, int childnum, int minkey, int maxkey) {
int k = htonl(maxkey); int k = htonl(maxkey);
maxkey = htonl(k+1); maxkey = htonl(k+1);
for (unsigned int val = htonl(minkey); val <= htonl(maxkey); val++) { for (unsigned int val = htonl(minkey); val <= htonl(maxkey); val++) {
@ -144,22 +144,22 @@ insert_into_child_buffer(FT_HANDLE brt, FTNODE node, int childnum, int minkey, i
DBT thekey; toku_fill_dbt(&thekey, &key, sizeof key); DBT thekey; toku_fill_dbt(&thekey, &key, sizeof key);
DBT theval; toku_fill_dbt(&theval, &val, sizeof val); DBT theval; toku_fill_dbt(&theval, &val, sizeof val);
MSN msn = next_dummymsn(); MSN msn = next_dummymsn();
toku_ft_append_to_child_buffer(brt->ft->compare_fun, NULL, node, childnum, FT_INSERT, msn, xids_get_root_xids(), true, &thekey, &theval); toku_ft_append_to_child_buffer(ft->ft->compare_fun, NULL, node, childnum, FT_INSERT, msn, xids_get_root_xids(), true, &thekey, &theval);
} }
} }
static FTNODE static FTNODE
make_tree(FT_HANDLE brt, int height, int fanout, int nperleaf, int *seq, int *minkey, int *maxkey) { make_tree(FT_HANDLE ft, int height, int fanout, int nperleaf, int *seq, int *minkey, int *maxkey) {
FTNODE node; FTNODE node;
if (height == 0) { if (height == 0) {
node = make_node(brt, 0); node = make_node(ft, 0);
populate_leaf(node, *seq, nperleaf, minkey, maxkey); populate_leaf(node, *seq, nperleaf, minkey, maxkey);
*seq += nperleaf; *seq += nperleaf;
} else { } else {
node = make_node(brt, height); node = make_node(ft, height);
int minkeys[fanout], maxkeys[fanout]; int minkeys[fanout], maxkeys[fanout];
for (int childnum = 0; childnum < fanout; childnum++) { for (int childnum = 0; childnum < fanout; childnum++) {
FTNODE child = make_tree(brt, height-1, fanout, nperleaf, seq, &minkeys[childnum], &maxkeys[childnum]); FTNODE child = make_tree(ft, height-1, fanout, nperleaf, seq, &minkeys[childnum], &maxkeys[childnum]);
if (childnum == 0) { if (childnum == 0) {
toku_ft_nonleaf_append_child(node, child, NULL); toku_ft_nonleaf_append_child(node, child, NULL);
} else { } else {
@ -167,8 +167,8 @@ make_tree(FT_HANDLE brt, int height, int fanout, int nperleaf, int *seq, int *mi
DBT pivotkey; DBT pivotkey;
toku_ft_nonleaf_append_child(node, child, toku_fill_dbt(&pivotkey, &k, sizeof k)); toku_ft_nonleaf_append_child(node, child, toku_fill_dbt(&pivotkey, &k, sizeof k));
} }
toku_unpin_ftnode(brt->ft, child); toku_unpin_ftnode(ft->ft, child);
insert_into_child_buffer(brt, node, childnum, minkeys[childnum], maxkeys[childnum]); insert_into_child_buffer(ft, node, childnum, minkeys[childnum], maxkeys[childnum]);
} }
*minkey = minkeys[0]; *minkey = minkeys[0];
*maxkey = maxkeys[0]; *maxkey = maxkeys[0];
@ -199,30 +199,30 @@ test_make_tree(int height, int fanout, int nperleaf, int do_verify) {
CACHETABLE ct = NULL; CACHETABLE ct = NULL;
toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER);
// create the brt // create the ft
TOKUTXN null_txn = NULL; TOKUTXN null_txn = NULL;
FT_HANDLE brt = NULL; FT_HANDLE ft = NULL;
r = toku_open_ft_handle(fname, 1, &brt, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); r = toku_open_ft_handle(fname, 1, &ft, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
assert(r == 0); assert(r == 0);
// make a tree // make a tree
int seq = 0, minkey, maxkey; int seq = 0, minkey, maxkey;
FTNODE newroot = make_tree(brt, height, fanout, nperleaf, &seq, &minkey, &maxkey); FTNODE newroot = make_tree(ft, height, fanout, nperleaf, &seq, &minkey, &maxkey);
// discard the old root block // discard the old root block
// set the new root to point to the new tree // set the new root to point to the new tree
toku_ft_set_new_root_blocknum(brt->ft, newroot->thisnodename); toku_ft_set_new_root_blocknum(ft->ft, newroot->thisnodename);
// unpin the new root // unpin the new root
toku_unpin_ftnode(brt->ft, newroot); toku_unpin_ftnode(ft->ft, newroot);
if (do_verify) { if (do_verify) {
r = toku_verify_ft(brt); r = toku_verify_ft(ft);
assert(r != 0); assert(r != 0);
} }
// flush to the file system // flush to the file system
r = toku_close_ft_handle_nolsn(brt, 0); r = toku_close_ft_handle_nolsn(ft, 0);
assert(r == 0); assert(r == 0);
// shutdown the cachetable // shutdown the cachetable

View file

@ -89,17 +89,17 @@ PATENT RIGHTS GRANT:
#ident "Copyright (c) 2011-2013 Tokutek Inc. All rights reserved." #ident "Copyright (c) 2011-2013 Tokutek Inc. All rights reserved."
// generate a tree with a single leaf node containing unsorted keys // generate a tree with a single leaf node containing unsorted keys
// check that brt verify finds them // check that ft verify finds them
#include <ft-cachetable-wrappers.h> #include <ft-cachetable-wrappers.h>
#include "test.h" #include "test.h"
static FTNODE static FTNODE
make_node(FT_HANDLE brt, int height) { make_node(FT_HANDLE ft, int height) {
FTNODE node = NULL; FTNODE node = NULL;
int n_children = (height == 0) ? 1 : 0; int n_children = (height == 0) ? 1 : 0;
toku_create_new_ftnode(brt, &node, height, n_children); toku_create_new_ftnode(ft, &node, height, n_children);
if (n_children) BP_STATE(node,0) = PT_AVAIL; if (n_children) BP_STATE(node,0) = PT_AVAIL;
return node; return node;
} }
@ -144,30 +144,30 @@ test_dup_in_leaf(int do_verify) {
CACHETABLE ct = NULL; CACHETABLE ct = NULL;
toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER);
// create the brt // create the ft
TOKUTXN null_txn = NULL; TOKUTXN null_txn = NULL;
FT_HANDLE brt = NULL; FT_HANDLE ft = NULL;
r = toku_open_ft_handle(fname, 1, &brt, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); r = toku_open_ft_handle(fname, 1, &ft, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
assert(r == 0); assert(r == 0);
// discard the old root block // discard the old root block
FTNODE newroot = make_node(brt, 0); FTNODE newroot = make_node(ft, 0);
populate_leaf(newroot, htonl(2), 1); populate_leaf(newroot, htonl(2), 1);
populate_leaf(newroot, htonl(1), 2); populate_leaf(newroot, htonl(1), 2);
// set the new root to point to the new tree // set the new root to point to the new tree
toku_ft_set_new_root_blocknum(brt->ft, newroot->thisnodename); toku_ft_set_new_root_blocknum(ft->ft, newroot->thisnodename);
// unpin the new root // unpin the new root
toku_unpin_ftnode(brt->ft, newroot); toku_unpin_ftnode(ft->ft, newroot);
if (do_verify) { if (do_verify) {
r = toku_verify_ft(brt); r = toku_verify_ft(ft);
assert(r != 0); assert(r != 0);
} }
// flush to the file system // flush to the file system
r = toku_close_ft_handle_nolsn(brt, 0); r = toku_close_ft_handle_nolsn(ft, 0);
assert(r == 0); assert(r == 0);
// shutdown the cachetable // shutdown the cachetable

View file

@ -88,17 +88,17 @@ PATENT RIGHTS GRANT:
#ident "Copyright (c) 2011-2013 Tokutek Inc. All rights reserved." #ident "Copyright (c) 2011-2013 Tokutek Inc. All rights reserved."
// generate a tree with unsorted pivots and check that brt->verify finds them // generate a tree with unsorted pivots and check that ft->verify finds them
#include <ft-cachetable-wrappers.h> #include <ft-cachetable-wrappers.h>
#include "test.h" #include "test.h"
static FTNODE static FTNODE
make_node(FT_HANDLE brt, int height) { make_node(FT_HANDLE ft, int height) {
FTNODE node = NULL; FTNODE node = NULL;
int n_children = (height == 0) ? 1 : 0; int n_children = (height == 0) ? 1 : 0;
toku_create_new_ftnode(brt, &node, height, n_children); toku_create_new_ftnode(ft, &node, height, n_children);
if (n_children) BP_STATE(node,0) = PT_AVAIL; if (n_children) BP_STATE(node,0) = PT_AVAIL;
return node; return node;
} }
@ -135,17 +135,17 @@ populate_leaf(FTNODE leafnode, int seq, int n, int *minkey, int *maxkey) {
} }
static FTNODE static FTNODE
make_tree(FT_HANDLE brt, int height, int fanout, int nperleaf, int *seq, int *minkey, int *maxkey) { make_tree(FT_HANDLE ft, int height, int fanout, int nperleaf, int *seq, int *minkey, int *maxkey) {
FTNODE node; FTNODE node;
if (height == 0) { if (height == 0) {
node = make_node(brt, 0); node = make_node(ft, 0);
populate_leaf(node, *seq, nperleaf, minkey, maxkey); populate_leaf(node, *seq, nperleaf, minkey, maxkey);
*seq += nperleaf; *seq += nperleaf;
} else { } else {
node = make_node(brt, height); node = make_node(ft, height);
int minkeys[fanout], maxkeys[fanout]; int minkeys[fanout], maxkeys[fanout];
for (int childnum = 0; childnum < fanout; childnum++) { for (int childnum = 0; childnum < fanout; childnum++) {
FTNODE child = make_tree(brt, height-1, fanout, nperleaf, seq, &minkeys[childnum], &maxkeys[childnum]); FTNODE child = make_tree(ft, height-1, fanout, nperleaf, seq, &minkeys[childnum], &maxkeys[childnum]);
if (childnum == 0) { if (childnum == 0) {
toku_ft_nonleaf_append_child(node, child, NULL); toku_ft_nonleaf_append_child(node, child, NULL);
} else { } else {
@ -153,7 +153,7 @@ make_tree(FT_HANDLE brt, int height, int fanout, int nperleaf, int *seq, int *mi
DBT pivotkey; DBT pivotkey;
toku_ft_nonleaf_append_child(node, child, toku_fill_dbt(&pivotkey, &k, sizeof k)); toku_ft_nonleaf_append_child(node, child, toku_fill_dbt(&pivotkey, &k, sizeof k));
} }
toku_unpin_ftnode(brt->ft, child); toku_unpin_ftnode(ft->ft, child);
} }
*minkey = minkeys[0]; *minkey = minkeys[0];
*maxkey = maxkeys[0]; *maxkey = maxkeys[0];
@ -184,29 +184,29 @@ test_make_tree(int height, int fanout, int nperleaf, int do_verify) {
CACHETABLE ct = NULL; CACHETABLE ct = NULL;
toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER);
// create the brt // create the ft
TOKUTXN null_txn = NULL; TOKUTXN null_txn = NULL;
FT_HANDLE brt = NULL; FT_HANDLE ft = NULL;
r = toku_open_ft_handle(fname, 1, &brt, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); r = toku_open_ft_handle(fname, 1, &ft, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
assert(r == 0); assert(r == 0);
// make a tree // make a tree
int seq = 0, minkey, maxkey; int seq = 0, minkey, maxkey;
FTNODE newroot = make_tree(brt, height, fanout, nperleaf, &seq, &minkey, &maxkey); FTNODE newroot = make_tree(ft, height, fanout, nperleaf, &seq, &minkey, &maxkey);
// discard the old root block // discard the old root block
toku_ft_set_new_root_blocknum(brt->ft, newroot->thisnodename); toku_ft_set_new_root_blocknum(ft->ft, newroot->thisnodename);
// unpin the new root // unpin the new root
toku_unpin_ftnode(brt->ft, newroot); toku_unpin_ftnode(ft->ft, newroot);
if (do_verify) { if (do_verify) {
r = toku_verify_ft(brt); r = toku_verify_ft(ft);
assert(r != 0); assert(r != 0);
} }
// flush to the file system // flush to the file system
r = toku_close_ft_handle_nolsn(brt, 0); r = toku_close_ft_handle_nolsn(ft, 0);
assert(r == 0); assert(r == 0);
// shutdown the cachetable // shutdown the cachetable

View file

@ -119,13 +119,13 @@ static void test_xid_lsn_independent(int N) {
test_setup(TOKU_TEST_FILENAME, &logger, &ct); test_setup(TOKU_TEST_FILENAME, &logger, &ct);
FT_HANDLE brt; FT_HANDLE ft;
TOKUTXN txn; TOKUTXN txn;
r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_NONE, false); r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_NONE, false);
CKERR(r); CKERR(r);
r = toku_open_ft_handle("ftfile", 1, &brt, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, toku_builtin_compare_fun); r = toku_open_ft_handle("ftfile", 1, &ft, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, toku_builtin_compare_fun);
CKERR(r); CKERR(r);
r = toku_txn_commit_txn(txn, false, NULL, NULL); r = toku_txn_commit_txn(txn, false, NULL, NULL);
@ -143,7 +143,7 @@ static void test_xid_lsn_independent(int N) {
snprintf(key, sizeof(key), "key%x.%x", rands[i], i); snprintf(key, sizeof(key), "key%x.%x", rands[i], i);
memset(val, 'v', sizeof(val)); memset(val, 'v', sizeof(val));
val[sizeof(val)-1]=0; val[sizeof(val)-1]=0;
toku_ft_insert(brt, toku_fill_dbt(&k, key, 1+strlen(key)), toku_fill_dbt(&v, val, 1+strlen(val)), txn); toku_ft_insert(ft, toku_fill_dbt(&k, key, 1+strlen(key)), toku_fill_dbt(&v, val, 1+strlen(val)), txn);
} }
{ {
TOKUTXN txn2; TOKUTXN txn2;
@ -172,7 +172,7 @@ static void test_xid_lsn_independent(int N) {
CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct); CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
r = toku_checkpoint(cp, logger, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT); r = toku_checkpoint(cp, logger, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
CKERR(r); CKERR(r);
r = toku_close_ft_handle_nolsn(brt, NULL); r = toku_close_ft_handle_nolsn(ft, NULL);
CKERR(r); CKERR(r);
clean_shutdown(&logger, &ct); clean_shutdown(&logger, &ct);

View file

@ -89,7 +89,7 @@ PATENT RIGHTS GRANT:
#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved."
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
/* Tell me the diff between two brt files. */ /* Tell me the diff between two FT files. */
#include "cachetable.h" #include "cachetable.h"
#include "ft.h" #include "ft.h"

View file

@ -123,7 +123,7 @@ toku_assert_init(void)
malloc_stats_f = (malloc_stats_fun_t) dlsym(RTLD_DEFAULT, "malloc_stats"); malloc_stats_f = (malloc_stats_fun_t) dlsym(RTLD_DEFAULT, "malloc_stats");
} }
// Function pointers are zero by default so asserts can be used by brt-layer tests without an environment. // Function pointers are zero by default so asserts can be used by ft-layer tests without an environment.
static int (*toku_maybe_get_engine_status_text_p)(char* buff, int buffsize) = 0; static int (*toku_maybe_get_engine_status_text_p)(char* buff, int buffsize) = 0;
static void (*toku_maybe_set_env_panic_p)(int code, const char* msg) = 0; static void (*toku_maybe_set_env_panic_p)(int code, const char* msg) = 0;

View file

@ -581,8 +581,8 @@ indexer_find_prev_xr(DB_INDEXER *UU(indexer), ULEHANDLE ule, uint64_t xrindex, u
return prev_found; return prev_found;
} }
// inject "delete" message into brt with logging in recovery and rollback logs, // inject "delete" message into ft with logging in recovery and rollback logs,
// and making assocation between txn and brt // and making assocation between txn and ft
static int static int
indexer_ft_delete_provisional(DB_INDEXER *indexer, DB *hotdb, DBT *hotkey, XIDS xids, TOKUTXN txn) { indexer_ft_delete_provisional(DB_INDEXER *indexer, DB *hotdb, DBT *hotkey, XIDS xids, TOKUTXN txn) {
int result = 0; int result = 0;
@ -630,8 +630,8 @@ indexer_ft_delete_committed(DB_INDEXER *indexer, DB *hotdb, DBT *hotkey, XIDS xi
return result; return result;
} }
// inject "insert" message into brt with logging in recovery and rollback logs, // inject "insert" message into ft with logging in recovery and rollback logs,
// and making assocation between txn and brt // and making assocation between txn and ft
static int static int
indexer_ft_insert_provisional(DB_INDEXER *indexer, DB *hotdb, DBT *hotkey, DBT *hotval, XIDS xids, TOKUTXN txn) { indexer_ft_insert_provisional(DB_INDEXER *indexer, DB *hotdb, DBT *hotkey, DBT *hotval, XIDS xids, TOKUTXN txn) {
int result = 0; int result = 0;
@ -650,7 +650,7 @@ indexer_ft_insert_provisional(DB_INDEXER *indexer, DB *hotdb, DBT *hotkey, DBT *
} }
// send an insert message into the tree without rollback or recovery logging // send an insert message into the tree without rollback or recovery logging
// and without associating the txn and the brt // and without associating the txn and the ft
static int static int
indexer_ft_insert_committed(DB_INDEXER *indexer, DB *hotdb, DBT *hotkey, DBT *hotval, XIDS xids) { indexer_ft_insert_committed(DB_INDEXER *indexer, DB *hotdb, DBT *hotkey, DBT *hotval, XIDS xids) {
int result = 0; int result = 0;

View file

@ -306,15 +306,15 @@ toku_loader_create_loader(DB_ENV *env,
// time to open the big kahuna // time to open the big kahuna
char **XMALLOC_N(N, new_inames_in_env); char **XMALLOC_N(N, new_inames_in_env);
FT_HANDLE *XMALLOC_N(N, brts); FT_HANDLE *XMALLOC_N(N, fts);
for (int i=0; i<N; i++) { for (int i=0; i<N; i++) {
brts[i] = dbs[i]->i->ft_handle; fts[i] = dbs[i]->i->ft_handle;
} }
LSN load_lsn; LSN load_lsn;
rval = locked_load_inames(env, txn, N, dbs, new_inames_in_env, &load_lsn, puts_allowed); rval = locked_load_inames(env, txn, N, dbs, new_inames_in_env, &load_lsn, puts_allowed);
if ( rval!=0 ) { if ( rval!=0 ) {
toku_free(new_inames_in_env); toku_free(new_inames_in_env);
toku_free(brts); toku_free(fts);
goto create_exit; goto create_exit;
} }
TOKUTXN ttxn = txn ? db_txn_struct_i(txn)->tokutxn : NULL; TOKUTXN ttxn = txn ? db_txn_struct_i(txn)->tokutxn : NULL;
@ -323,7 +323,7 @@ toku_loader_create_loader(DB_ENV *env,
env->i->generate_row_for_put, env->i->generate_row_for_put,
src_db, src_db,
N, N,
brts, dbs, fts, dbs,
(const char **)new_inames_in_env, (const char **)new_inames_in_env,
compare_functions, compare_functions,
loader->i->temp_file_template, loader->i->temp_file_template,
@ -334,11 +334,11 @@ toku_loader_create_loader(DB_ENV *env,
compress_intermediates); compress_intermediates);
if ( rval!=0 ) { if ( rval!=0 ) {
toku_free(new_inames_in_env); toku_free(new_inames_in_env);
toku_free(brts); toku_free(fts);
goto create_exit; goto create_exit;
} }
loader->i->inames_in_env = new_inames_in_env; loader->i->inames_in_env = new_inames_in_env;
toku_free(brts); toku_free(fts);
if (!puts_allowed) { if (!puts_allowed) {
rval = ft_loader_close_and_redirect(loader); rval = ft_loader_close_and_redirect(loader);

View file

@ -92,7 +92,7 @@ PATENT RIGHTS GRANT:
// This test, when run under helgrind, should detect the race problem documented in #3219. // This test, when run under helgrind, should detect the race problem documented in #3219.
// The test: // The test:
// checkpointing runs (in one thread) // checkpointing runs (in one thread)
// another thread does a brt lookup. // another thread does an ft lookup.
// We expect to see a lock-acquisition error. // We expect to see a lock-acquisition error.

View file

@ -106,7 +106,7 @@ test_cursor (void) {
DB_ENV * env; DB_ENV * env;
DB *db; DB *db;
DB_TXN * const null_txn = 0; DB_TXN * const null_txn = 0;
const char * const fname = "test.cursor.brt"; const char * const fname = "test.cursor.ft";
int r; int r;
/* create the dup database file */ /* create the dup database file */

View file

@ -117,7 +117,7 @@ char *db_v4_dir = OLDDATADIR "env_preload.4.2.0.cleanshutdown";
char *db_v4_dir_node4k = OLDDATADIR "env_preload.4.2.0.node4k.cleanshutdown"; char *db_v4_dir_node4k = OLDDATADIR "env_preload.4.2.0.node4k.cleanshutdown";
char *db_v4_dir_flat = OLDDATADIR "env_preload.4.2.0.flat.cleanshutdown"; char *db_v4_dir_flat = OLDDATADIR "env_preload.4.2.0.flat.cleanshutdown";
// HACK: Newer versions of the database/brt to use with this old // HACK: Newer versions of the database/ft to use with this old
// upgrade test code. // upgrade test code.
char *db_v6_dir = OLDDATADIR "env_preload.5.0.8.cleanshutdown"; char *db_v6_dir = OLDDATADIR "env_preload.5.0.8.cleanshutdown";
char *db_v6_dir_node4k = OLDDATADIR "env_preload.5.0.8.node4k.cleanshutdown"; char *db_v6_dir_node4k = OLDDATADIR "env_preload.5.0.8.node4k.cleanshutdown";

View file

@ -476,7 +476,7 @@ needs_recovery (DB_ENV *env) {
static int toku_env_txn_checkpoint(DB_ENV * env, uint32_t kbyte, uint32_t min, uint32_t flags); static int toku_env_txn_checkpoint(DB_ENV * env, uint32_t kbyte, uint32_t min, uint32_t flags);
// Instruct db to use the default (built-in) key comparison function // Instruct db to use the default (built-in) key comparison function
// by setting the flag bits in the db and brt structs // by setting the flag bits in the db and ft structs
static int static int
db_use_builtin_key_cmp(DB *db) { db_use_builtin_key_cmp(DB *db) {
HANDLE_PANICKED_DB(db); HANDLE_PANICKED_DB(db);
@ -3035,7 +3035,7 @@ env_get_iname(DB_ENV* env, DBT* dname_dbt, DBT* iname_dbt) {
// TODO 2216: Patch out this (dangerous) function when loader is working and // TODO 2216: Patch out this (dangerous) function when loader is working and
// we don't need to test the low-level redirect anymore. // we don't need to test the low-level redirect anymore.
// for use by test programs only, just a wrapper around brt call: // for use by test programs only, just a wrapper around ft call:
int int
toku_test_db_redirect_dictionary(DB * db, const char * dname_of_new_file, DB_TXN *dbtxn) { toku_test_db_redirect_dictionary(DB * db, const char * dname_of_new_file, DB_TXN *dbtxn) {
int r; int r;
@ -3043,7 +3043,7 @@ toku_test_db_redirect_dictionary(DB * db, const char * dname_of_new_file, DB_TXN
DBT iname_dbt; DBT iname_dbt;
char * new_iname_in_env; char * new_iname_in_env;
FT_HANDLE brt = db->i->ft_handle; FT_HANDLE ft_handle = db->i->ft_handle;
TOKUTXN tokutxn = db_txn_struct_i(dbtxn)->tokutxn; TOKUTXN tokutxn = db_txn_struct_i(dbtxn)->tokutxn;
toku_fill_dbt(&dname_dbt, dname_of_new_file, strlen(dname_of_new_file)+1); toku_fill_dbt(&dname_dbt, dname_of_new_file, strlen(dname_of_new_file)+1);
@ -3053,7 +3053,7 @@ toku_test_db_redirect_dictionary(DB * db, const char * dname_of_new_file, DB_TXN
new_iname_in_env = (char *) iname_dbt.data; new_iname_in_env = (char *) iname_dbt.data;
toku_multi_operation_client_lock(); //Must hold MO lock for dictionary_redirect. toku_multi_operation_client_lock(); //Must hold MO lock for dictionary_redirect.
r = toku_dictionary_redirect(new_iname_in_env, brt, tokutxn); r = toku_dictionary_redirect(new_iname_in_env, ft_handle, tokutxn);
toku_multi_operation_client_unlock(); toku_multi_operation_client_unlock();
toku_free(new_iname_in_env); toku_free(new_iname_in_env);

View file

@ -313,7 +313,7 @@ c_getf_first_callback(ITEMLEN keylen, bytevec key, ITEMLEN vallen, bytevec val,
r = context->r_user_callback; r = context->r_user_callback;
} }
//Give brt-layer an error (if any) to return from toku_ft_cursor_first //Give ft-layer an error (if any) to return from toku_ft_cursor_first
return r; return r;
} }
@ -364,7 +364,7 @@ c_getf_last_callback(ITEMLEN keylen, bytevec key, ITEMLEN vallen, bytevec val, v
r = context->r_user_callback; r = context->r_user_callback;
} }
//Give brt-layer an error (if any) to return from toku_ft_cursor_last //Give ft-layer an error (if any) to return from toku_ft_cursor_last
return r; return r;
} }
@ -423,7 +423,7 @@ c_getf_next_callback(ITEMLEN keylen, bytevec key, ITEMLEN vallen, bytevec val, v
r = context->r_user_callback; r = context->r_user_callback;
} }
//Give brt-layer an error (if any) to return from toku_ft_cursor_next //Give ft-layer an error (if any) to return from toku_ft_cursor_next
return r; return r;
} }
@ -481,7 +481,7 @@ c_getf_prev_callback(ITEMLEN keylen, bytevec key, ITEMLEN vallen, bytevec val, v
r = context->r_user_callback; r = context->r_user_callback;
} }
//Give brt-layer an error (if any) to return from toku_ft_cursor_prev //Give ft-layer an error (if any) to return from toku_ft_cursor_prev
return r; return r;
} }
@ -518,7 +518,7 @@ c_getf_current_callback(ITEMLEN keylen, bytevec key, ITEMLEN vallen, bytevec val
r = 0; r = 0;
} }
//Give brt-layer an error (if any) to return from toku_ft_cursor_current //Give ft-layer an error (if any) to return from toku_ft_cursor_current
return r; return r;
} }
@ -571,7 +571,7 @@ c_getf_set_callback(ITEMLEN keylen, bytevec key, ITEMLEN vallen, bytevec val, vo
r = context->r_user_callback; r = context->r_user_callback;
} }
//Give brt-layer an error (if any) to return from toku_ft_cursor_set //Give ft-layer an error (if any) to return from toku_ft_cursor_set
return r; return r;
} }
@ -627,7 +627,7 @@ c_getf_set_range_callback(ITEMLEN keylen, bytevec key, ITEMLEN vallen, bytevec v
r = context->r_user_callback; r = context->r_user_callback;
} }
//Give brt-layer an error (if any) to return from toku_ft_cursor_set_range //Give ft-layer an error (if any) to return from toku_ft_cursor_set_range
return r; return r;
} }
@ -683,7 +683,7 @@ c_getf_set_range_reverse_callback(ITEMLEN keylen, bytevec key, ITEMLEN vallen, b
r = context->r_user_callback; r = context->r_user_callback;
} }
//Give brt-layer an error (if any) to return from toku_ft_cursor_set_range_reverse //Give ft-layer an error (if any) to return from toku_ft_cursor_set_range_reverse
return r; return r;
} }
@ -876,7 +876,7 @@ toku_db_cursor_internal(DB * db, DB_TXN * txn, DBC ** c, uint32_t flags, int is_
); );
assert(r == 0 || r == TOKUDB_MVCC_DICTIONARY_TOO_NEW); assert(r == 0 || r == TOKUDB_MVCC_DICTIONARY_TOO_NEW);
if (r == 0) { if (r == 0) {
// Set the is_temporary_cursor boolean inside the brt node so // Set the is_temporary_cursor boolean inside the ftnode so
// that a query only needing one cursor will not perform // that a query only needing one cursor will not perform
// unecessary malloc calls. // unecessary malloc calls.
if (is_temporary_cursor) { if (is_temporary_cursor) {

View file

@ -1048,7 +1048,7 @@ toku_db_verify_with_progress(DB *db, int (*progress_callback)(void *extra, float
return r; return r;
} }
int toku_setup_db_internal (DB **dbp, DB_ENV *env, uint32_t flags, FT_HANDLE brt, bool is_open) { int toku_setup_db_internal (DB **dbp, DB_ENV *env, uint32_t flags, FT_HANDLE ft_handle, bool is_open) {
if (flags || env == NULL) if (flags || env == NULL)
return EINVAL; return EINVAL;
@ -1067,7 +1067,7 @@ int toku_setup_db_internal (DB **dbp, DB_ENV *env, uint32_t flags, FT_HANDLE brt
return ENOMEM; return ENOMEM;
} }
memset(result->i, 0, sizeof *result->i); memset(result->i, 0, sizeof *result->i);
result->i->ft_handle = brt; result->i->ft_handle = ft_handle;
result->i->opened = is_open; result->i->opened = is_open;
*dbp = result; *dbp = result;
return 0; return 0;
@ -1082,10 +1082,10 @@ toku_db_create(DB ** db, DB_ENV * env, uint32_t flags) {
return EINVAL; return EINVAL;
FT_HANDLE brt; FT_HANDLE ft_handle;
toku_ft_handle_create(&brt); toku_ft_handle_create(&ft_handle);
int r = toku_setup_db_internal(db, env, flags, brt, false); int r = toku_setup_db_internal(db, env, flags, ft_handle, false);
if (r != 0) return r; if (r != 0) return r;
DB *result=*db; DB *result=*db;
@ -1162,7 +1162,7 @@ toku_db_create(DB ** db, DB_ENV * env, uint32_t flags) {
// The new inames are returned to the caller. // The new inames are returned to the caller.
// It is the caller's responsibility to free them. // It is the caller's responsibility to free them.
// If "mark_as_loader" is true, then include a mark in the iname // If "mark_as_loader" is true, then include a mark in the iname
// to indicate that the file is created by the brt loader. // to indicate that the file is created by the ft loader.
// Return 0 on success (could fail if write lock not available). // Return 0 on success (could fail if write lock not available).
static int static int
load_inames(DB_ENV * env, DB_TXN * txn, int N, DB * dbs[/*N*/], const char * new_inames_in_env[/*N*/], LSN *load_lsn, bool mark_as_loader) { load_inames(DB_ENV * env, DB_TXN * txn, int N, DB * dbs[/*N*/], const char * new_inames_in_env[/*N*/], LSN *load_lsn, bool mark_as_loader) {
@ -1207,13 +1207,13 @@ load_inames(DB_ENV * env, DB_TXN * txn, int N, DB * dbs[/*N*/], const char * new
int do_fsync = 0; int do_fsync = 0;
LSN *get_lsn = NULL; LSN *get_lsn = NULL;
for (i = 0; i < N; i++) { for (i = 0; i < N; i++) {
FT_HANDLE brt = dbs[i]->i->ft_handle; FT_HANDLE ft_handle = dbs[i]->i->ft_handle;
//Fsync is necessary for the last one only. //Fsync is necessary for the last one only.
if (i==N-1) { if (i==N-1) {
do_fsync = 1; //We only need a single fsync of logs. do_fsync = 1; //We only need a single fsync of logs.
get_lsn = load_lsn; //Set pointer to capture the last lsn. get_lsn = load_lsn; //Set pointer to capture the last lsn.
} }
toku_ft_load(brt, ttxn, new_inames_in_env[i], do_fsync, get_lsn); toku_ft_load(ft_handle, ttxn, new_inames_in_env[i], do_fsync, get_lsn);
} }
} }
return rval; return rval;

View file

@ -139,7 +139,7 @@ int toku_db_pre_acquire_table_lock(DB *db, DB_TXN *txn);
int toku_db_get (DB * db, DB_TXN * txn, DBT * key, DBT * data, uint32_t flags); int toku_db_get (DB * db, DB_TXN * txn, DBT * key, DBT * data, uint32_t flags);
int toku_db_create(DB ** db, DB_ENV * env, uint32_t flags); int toku_db_create(DB ** db, DB_ENV * env, uint32_t flags);
int toku_db_close(DB * db); int toku_db_close(DB * db);
int toku_setup_db_internal (DB **dbp, DB_ENV *env, uint32_t flags, FT_HANDLE brt, bool is_open); int toku_setup_db_internal (DB **dbp, DB_ENV *env, uint32_t flags, FT_HANDLE ft_handle, bool is_open);
int db_getf_set(DB *db, DB_TXN *txn, uint32_t flags, DBT *key, YDB_CALLBACK_FUNCTION f, void *extra); int db_getf_set(DB *db, DB_TXN *txn, uint32_t flags, DBT *key, YDB_CALLBACK_FUNCTION f, void *extra);
int autotxn_db_get(DB* db, DB_TXN* txn, DBT* key, DBT* data, uint32_t flags); int autotxn_db_get(DB* db, DB_TXN* txn, DBT* key, DBT* data, uint32_t flags);

View file

@ -104,7 +104,7 @@ PATENT RIGHTS GRANT:
// The new inames are returned to the caller. // The new inames are returned to the caller.
// It is the caller's responsibility to free them. // It is the caller's responsibility to free them.
// If "mark_as_loader" is true, then include a mark in the iname // If "mark_as_loader" is true, then include a mark in the iname
// to indicate that the file is created by the brt loader. // to indicate that the file is created by the ft loader.
// Return 0 on success (could fail if write lock not available). // Return 0 on success (could fail if write lock not available).
int locked_load_inames(DB_ENV * env, int locked_load_inames(DB_ENV * env,
DB_TXN * txn, DB_TXN * txn,

View file

@ -276,7 +276,7 @@ toku_db_put(DB *db, DB_TXN *txn, DBT *key, DBT *val, uint32_t flags, bool holds_
r = toku_db_get_point_write_lock(db, txn, key); r = toku_db_get_point_write_lock(db, txn, key);
} }
if (r == 0) { if (r == 0) {
//Insert into the brt. //Insert into the ft.
TOKUTXN ttxn = txn ? db_txn_struct_i(txn)->tokutxn : NULL; TOKUTXN ttxn = txn ? db_txn_struct_i(txn)->tokutxn : NULL;
enum ft_msg_type type = FT_INSERT; enum ft_msg_type type = FT_INSERT;
if (flags==DB_NOOVERWRITE_NO_ERROR) { if (flags==DB_NOOVERWRITE_NO_ERROR) {
@ -396,9 +396,9 @@ cleanup:
} }
static void static void
log_del_single(DB_TXN *txn, FT_HANDLE brt, const DBT *key) { log_del_single(DB_TXN *txn, FT_HANDLE ft_handle, const DBT *key) {
TOKUTXN ttxn = db_txn_struct_i(txn)->tokutxn; TOKUTXN ttxn = db_txn_struct_i(txn)->tokutxn;
toku_ft_log_del(ttxn, brt, key); toku_ft_log_del(ttxn, ft_handle, key);
} }
static uint32_t static uint32_t
@ -413,7 +413,7 @@ sum_size(uint32_t num_arrays, DBT_ARRAY keys[], uint32_t overhead) {
} }
static void static void
log_del_multiple(DB_TXN *txn, DB *src_db, const DBT *key, const DBT *val, uint32_t num_dbs, FT_HANDLE brts[], DBT_ARRAY keys[]) { log_del_multiple(DB_TXN *txn, DB *src_db, const DBT *key, const DBT *val, uint32_t num_dbs, FT_HANDLE fts[], DBT_ARRAY keys[]) {
if (num_dbs > 0) { if (num_dbs > 0) {
TOKUTXN ttxn = db_txn_struct_i(txn)->tokutxn; TOKUTXN ttxn = db_txn_struct_i(txn)->tokutxn;
FT_HANDLE src_ft = src_db ? src_db->i->ft_handle : NULL; FT_HANDLE src_ft = src_db ? src_db->i->ft_handle : NULL;
@ -422,11 +422,11 @@ log_del_multiple(DB_TXN *txn, DB *src_db, const DBT *key, const DBT *val, uint32
if (del_single_sizes < del_multiple_size) { if (del_single_sizes < del_multiple_size) {
for (uint32_t i = 0; i < num_dbs; i++) { for (uint32_t i = 0; i < num_dbs; i++) {
for (uint32_t j = 0; j < keys[i].size; j++) { for (uint32_t j = 0; j < keys[i].size; j++) {
log_del_single(txn, brts[i], &keys[i].dbts[j]); log_del_single(txn, fts[i], &keys[i].dbts[j]);
} }
} }
} else { } else {
toku_ft_log_del_multiple(ttxn, src_ft, brts, num_dbs, key, val); toku_ft_log_del_multiple(ttxn, src_ft, fts, num_dbs, key, val);
} }
} }
} }
@ -539,7 +539,7 @@ env_del_multiple(
uint32_t lock_flags[num_dbs]; uint32_t lock_flags[num_dbs];
uint32_t remaining_flags[num_dbs]; uint32_t remaining_flags[num_dbs];
FT_HANDLE brts[num_dbs]; FT_HANDLE fts[num_dbs];
bool indexer_lock_taken = false; bool indexer_lock_taken = false;
bool src_same = false; bool src_same = false;
bool indexer_shortcut = false; bool indexer_shortcut = false;
@ -594,7 +594,7 @@ env_del_multiple(
if (r != 0) goto cleanup; if (r != 0) goto cleanup;
} }
} }
brts[which_db] = db->i->ft_handle; fts[which_db] = db->i->ft_handle;
} }
if (indexer) { if (indexer) {
@ -611,7 +611,7 @@ env_del_multiple(
} }
} }
toku_multi_operation_client_lock(); toku_multi_operation_client_lock();
log_del_multiple(txn, src_db, src_key, src_val, num_dbs, brts, del_keys); log_del_multiple(txn, src_db, src_key, src_val, num_dbs, fts, del_keys);
r = do_del_multiple(txn, num_dbs, db_array, del_keys, src_db, src_key, indexer_shortcut); r = do_del_multiple(txn, num_dbs, db_array, del_keys, src_db, src_key, indexer_shortcut);
toku_multi_operation_client_unlock(); toku_multi_operation_client_unlock();
if (indexer_lock_taken) { if (indexer_lock_taken) {
@ -627,11 +627,11 @@ cleanup:
} }
static void static void
log_put_multiple(DB_TXN *txn, DB *src_db, const DBT *src_key, const DBT *src_val, uint32_t num_dbs, FT_HANDLE brts[]) { log_put_multiple(DB_TXN *txn, DB *src_db, const DBT *src_key, const DBT *src_val, uint32_t num_dbs, FT_HANDLE fts[]) {
if (num_dbs > 0) { if (num_dbs > 0) {
TOKUTXN ttxn = db_txn_struct_i(txn)->tokutxn; TOKUTXN ttxn = db_txn_struct_i(txn)->tokutxn;
FT_HANDLE src_ft = src_db ? src_db->i->ft_handle : NULL; FT_HANDLE src_ft = src_db ? src_db->i->ft_handle : NULL;
toku_ft_log_put_multiple(ttxn, src_ft, brts, num_dbs, src_key, src_val); toku_ft_log_put_multiple(ttxn, src_ft, fts, num_dbs, src_key, src_val);
} }
} }
@ -701,7 +701,7 @@ env_put_multiple_internal(
uint32_t lock_flags[num_dbs]; uint32_t lock_flags[num_dbs];
uint32_t remaining_flags[num_dbs]; uint32_t remaining_flags[num_dbs];
FT_HANDLE brts[num_dbs]; FT_HANDLE fts[num_dbs];
bool indexer_shortcut = false; bool indexer_shortcut = false;
bool indexer_lock_taken = false; bool indexer_lock_taken = false;
bool src_same = false; bool src_same = false;
@ -773,7 +773,7 @@ env_put_multiple_internal(
if (r != 0) goto cleanup; if (r != 0) goto cleanup;
} }
} }
brts[which_db] = db->i->ft_handle; fts[which_db] = db->i->ft_handle;
} }
if (indexer) { if (indexer) {
@ -790,7 +790,7 @@ env_put_multiple_internal(
} }
} }
toku_multi_operation_client_lock(); toku_multi_operation_client_lock();
log_put_multiple(txn, src_db, src_key, src_val, num_dbs, brts); log_put_multiple(txn, src_db, src_key, src_val, num_dbs, fts);
r = do_put_multiple(txn, num_dbs, db_array, put_keys, put_vals, src_db, src_key, indexer_shortcut); r = do_put_multiple(txn, num_dbs, db_array, put_keys, put_vals, src_db, src_key, indexer_shortcut);
toku_multi_operation_client_unlock(); toku_multi_operation_client_unlock();
if (indexer_lock_taken) { if (indexer_lock_taken) {