mirror of
https://github.com/MariaDB/server.git
synced 2025-01-23 15:24:16 +01:00
refs #5779 merge new accounting to main
git-svn-id: file:///svn/toku/tokudb@51141 c7de825b-a66e-492c-adef-691d508d4ae1
This commit is contained in:
parent
ad7254879d
commit
0062aabc73
2 changed files with 40 additions and 14 deletions
|
@ -513,8 +513,6 @@ struct ft_handle {
|
|||
struct ft_options options;
|
||||
};
|
||||
|
||||
// FIXME needs toku prefix
|
||||
long ftnode_memory_size (FTNODE node);
|
||||
PAIR_ATTR make_ftnode_pair_attr(FTNODE node);
|
||||
PAIR_ATTR make_invalid_pair_attr(void);
|
||||
|
||||
|
@ -973,12 +971,8 @@ typedef enum {
|
|||
FT_UPDATES = 0,
|
||||
FT_UPDATES_BROADCAST,
|
||||
FT_DESCRIPTOR_SET,
|
||||
FT_PARTIAL_EVICTIONS_NONLEAF, // number of nonleaf node partial evictions
|
||||
FT_PARTIAL_EVICTIONS_LEAF, // number of leaf node partial evictions
|
||||
FT_MSN_DISCARDS, // how many messages were ignored by leaf because of msn
|
||||
//FT_MAX_WORKDONE, // max workdone value of any buffer
|
||||
FT_TOTAL_RETRIES, // total number of search retries due to TRY_AGAIN
|
||||
//FT_MAX_SEARCH_EXCESS_RETRIES, // max number of excess search retries (retries - treeheight) due to TRY_AGAIN
|
||||
FT_SEARCH_TRIES_GT_HEIGHT, // number of searches that required more tries than the height of the tree
|
||||
FT_SEARCH_TRIES_GT_HEIGHTPLUS3, // number of searches that required more tries than the height of the tree plus three
|
||||
FT_DISK_FLUSH_LEAF, // number of leaf nodes flushed to disk, not for checkpoint
|
||||
|
@ -997,6 +991,14 @@ typedef enum {
|
|||
FT_DISK_FLUSH_NONLEAF_BYTES_FOR_CHECKPOINT,// number of nonleaf nodes flushed to disk for checkpoint
|
||||
FT_DISK_FLUSH_NONLEAF_UNCOMPRESSED_BYTES_FOR_CHECKPOINT,// number of nonleaf nodes flushed to disk for checkpoint
|
||||
FT_DISK_FLUSH_NONLEAF_TOKUTIME_FOR_CHECKPOINT,// number of nonleaf nodes flushed to disk for checkpoint
|
||||
FT_PARTIAL_EVICTIONS_NONLEAF, // number of nonleaf node partial evictions
|
||||
FT_PARTIAL_EVICTIONS_NONLEAF_BYTES, // number of nonleaf node partial evictions
|
||||
FT_PARTIAL_EVICTIONS_LEAF, // number of leaf node partial evictions
|
||||
FT_PARTIAL_EVICTIONS_LEAF_BYTES, // number of leaf node partial evictions
|
||||
FT_FULL_EVICTIONS_LEAF, // number of full cachetable evictions on leaf nodes
|
||||
FT_FULL_EVICTIONS_LEAF_BYTES, // number of full cachetable evictions on leaf nodes (bytes)
|
||||
FT_FULL_EVICTIONS_NONLEAF, // number of full cachetable evictions on nonleaf nodes
|
||||
FT_FULL_EVICTIONS_NONLEAF_BYTES, // number of full cachetable evictions on nonleaf nodes (bytes)
|
||||
FT_CREATE_LEAF, // number of leaf nodes created
|
||||
FT_CREATE_NONLEAF, // number of nonleaf nodes created
|
||||
FT_DESTROY_LEAF, // number of leaf nodes destroyed
|
||||
|
|
38
ft/ft-ops.cc
38
ft/ft-ops.cc
|
@ -175,8 +175,6 @@ status_init(void)
|
|||
STATUS_INIT(FT_UPDATES, PARCOUNT, "dictionary updates");
|
||||
STATUS_INIT(FT_UPDATES_BROADCAST, PARCOUNT, "dictionary broadcast updates");
|
||||
STATUS_INIT(FT_DESCRIPTOR_SET, PARCOUNT, "descriptor set");
|
||||
STATUS_INIT(FT_PARTIAL_EVICTIONS_NONLEAF, PARCOUNT, "nonleaf node partial evictions");
|
||||
STATUS_INIT(FT_PARTIAL_EVICTIONS_LEAF, PARCOUNT, "leaf node partial evictions");
|
||||
STATUS_INIT(FT_MSN_DISCARDS, PARCOUNT, "messages ignored by leaf due to msn");
|
||||
STATUS_INIT(FT_TOTAL_RETRIES, PARCOUNT, "total search retries due to TRY_AGAIN");
|
||||
STATUS_INIT(FT_SEARCH_TRIES_GT_HEIGHT, PARCOUNT, "searches requiring more tries than the height of the tree");
|
||||
|
@ -200,6 +198,16 @@ status_init(void)
|
|||
STATUS_INIT(FT_NUM_MSG_BUFFER_DECOMPRESSED_PREFETCH, PARCOUNT, "buffers decompressed for prefetch");
|
||||
STATUS_INIT(FT_NUM_MSG_BUFFER_DECOMPRESSED_WRITE, PARCOUNT, "buffers decompressed for write");
|
||||
|
||||
// Eviction statistics:
|
||||
STATUS_INIT(FT_FULL_EVICTIONS_LEAF, PARCOUNT, "leaf node full evictions");
|
||||
STATUS_INIT(FT_FULL_EVICTIONS_LEAF_BYTES, PARCOUNT, "leaf node full evictions (bytes)");
|
||||
STATUS_INIT(FT_FULL_EVICTIONS_NONLEAF, PARCOUNT, "nonleaf node full evictions");
|
||||
STATUS_INIT(FT_FULL_EVICTIONS_NONLEAF_BYTES, PARCOUNT, "nonleaf node full evictions (bytes)");
|
||||
STATUS_INIT(FT_PARTIAL_EVICTIONS_LEAF, PARCOUNT, "leaf node partial evictions");
|
||||
STATUS_INIT(FT_PARTIAL_EVICTIONS_LEAF_BYTES, PARCOUNT, "leaf node partial evictions (bytes)");
|
||||
STATUS_INIT(FT_PARTIAL_EVICTIONS_NONLEAF, PARCOUNT, "nonleaf node partial evictions");
|
||||
STATUS_INIT(FT_PARTIAL_EVICTIONS_NONLEAF_BYTES, PARCOUNT, "nonleaf node partial evictions (bytes)");
|
||||
|
||||
// Disk read statistics:
|
||||
//
|
||||
// Pivots: For queries, prefetching, or writing.
|
||||
|
@ -498,7 +506,7 @@ exit:
|
|||
return retval;
|
||||
}
|
||||
|
||||
long
|
||||
static long
|
||||
ftnode_memory_size (FTNODE node)
|
||||
// Effect: Estimate how much main memory a node requires.
|
||||
{
|
||||
|
@ -827,6 +835,14 @@ void toku_ftnode_flush_callback (
|
|||
}
|
||||
if (!keep_me) {
|
||||
if (!is_clone) {
|
||||
long node_size = ftnode_memory_size(ftnode);
|
||||
if (ftnode->height == 0) {
|
||||
STATUS_INC(FT_FULL_EVICTIONS_LEAF, 1);
|
||||
STATUS_INC(FT_FULL_EVICTIONS_LEAF_BYTES, node_size);
|
||||
} else {
|
||||
STATUS_INC(FT_FULL_EVICTIONS_NONLEAF, 1);
|
||||
STATUS_INC(FT_FULL_EVICTIONS_NONLEAF_BYTES, node_size);
|
||||
}
|
||||
toku_free(*disk_data);
|
||||
}
|
||||
else {
|
||||
|
@ -993,8 +1009,11 @@ int toku_ftnode_pe_callback (void *ftnode_pv, PAIR_ATTR UU(old_attr), PAIR_ATTR*
|
|||
for (int i = 0; i < node->n_children; i++) {
|
||||
if (BP_STATE(node,i) == PT_AVAIL) {
|
||||
if (BP_SHOULD_EVICT(node,i)) {
|
||||
long size_before = ftnode_memory_size(node);
|
||||
compress_internal_node_partition(node, i, ft->h->compression_method);
|
||||
long delta = size_before - ftnode_memory_size(node);
|
||||
STATUS_INC(FT_PARTIAL_EVICTIONS_NONLEAF, 1);
|
||||
cilk_spawn compress_internal_node_partition(node, i, ft->h->compression_method);
|
||||
STATUS_INC(FT_PARTIAL_EVICTIONS_NONLEAF_BYTES, delta);
|
||||
}
|
||||
else {
|
||||
BP_SWEEP_CLOCK(node,i);
|
||||
|
@ -1004,7 +1023,6 @@ int toku_ftnode_pe_callback (void *ftnode_pv, PAIR_ATTR UU(old_attr), PAIR_ATTR*
|
|||
continue;
|
||||
}
|
||||
}
|
||||
cilk_sync;
|
||||
}
|
||||
//
|
||||
// partial eviction strategy for basement nodes:
|
||||
|
@ -1015,17 +1033,23 @@ int toku_ftnode_pe_callback (void *ftnode_pv, PAIR_ATTR UU(old_attr), PAIR_ATTR*
|
|||
for (int i = 0; i < node->n_children; i++) {
|
||||
// Get rid of compressed stuff no matter what.
|
||||
if (BP_STATE(node,i) == PT_COMPRESSED) {
|
||||
STATUS_INC(FT_PARTIAL_EVICTIONS_LEAF, 1);
|
||||
long size_before = ftnode_memory_size(node);
|
||||
SUB_BLOCK sb = BSB(node, i);
|
||||
toku_free(sb->compressed_ptr);
|
||||
toku_free(sb);
|
||||
set_BNULL(node, i);
|
||||
BP_STATE(node,i) = PT_ON_DISK;
|
||||
long delta = size_before - ftnode_memory_size(node);
|
||||
STATUS_INC(FT_PARTIAL_EVICTIONS_LEAF, 1);
|
||||
STATUS_INC(FT_PARTIAL_EVICTIONS_LEAF_BYTES, delta);
|
||||
}
|
||||
else if (BP_STATE(node,i) == PT_AVAIL) {
|
||||
if (BP_SHOULD_EVICT(node,i)) {
|
||||
STATUS_INC(FT_PARTIAL_EVICTIONS_LEAF, 1);
|
||||
long size_before = ftnode_memory_size(node);
|
||||
toku_evict_bn_from_memory(node, i, ft);
|
||||
long delta = size_before - ftnode_memory_size(node);
|
||||
STATUS_INC(FT_PARTIAL_EVICTIONS_LEAF, 1);
|
||||
STATUS_INC(FT_PARTIAL_EVICTIONS_LEAF_BYTES, delta);
|
||||
}
|
||||
else {
|
||||
BP_SWEEP_CLOCK(node,i);
|
||||
|
|
Loading…
Add table
Reference in a new issue