mirror of
https://github.com/MariaDB/server.git
synced 2025-02-01 11:31:51 +01:00
Refs Tokutek/ft-index#46 Unify toku_mempool_*_(size|space) to be toku_mempool_*_size
This commit is contained in:
parent
5c9a1a4ca1
commit
4d3451acd0
5 changed files with 22 additions and 26 deletions
16
ft/bndata.cc
16
ft/bndata.cc
|
@ -205,7 +205,7 @@ void bn_data::serialize_header(struct wbuf *wb) const {
|
|||
//key_data_size
|
||||
wbuf_nocrc_uint(wb, m_disksize_of_keys);
|
||||
//val_data_size
|
||||
wbuf_nocrc_uint(wb, toku_mempool_get_used_space(&m_buffer_mempool));
|
||||
wbuf_nocrc_uint(wb, toku_mempool_get_used_size(&m_buffer_mempool));
|
||||
//fixed_klpair_length
|
||||
wbuf_nocrc_uint(wb, m_buffer.get_fixed_length());
|
||||
// all_keys_same_length
|
||||
|
@ -222,7 +222,7 @@ void bn_data::serialize_rest(struct wbuf *wb) const {
|
|||
//Write leafentries
|
||||
//Just ran dmt_compress_kvspace so there is no fragmentation and also leafentries are in sorted order.
|
||||
paranoid_invariant(toku_mempool_get_frag_size(&m_buffer_mempool) == 0);
|
||||
uint32_t val_data_size = toku_mempool_get_used_space(&m_buffer_mempool);
|
||||
uint32_t val_data_size = toku_mempool_get_used_size(&m_buffer_mempool);
|
||||
wbuf_nocrc_literal_bytes(wb, toku_mempool_get_base(&m_buffer_mempool), val_data_size);
|
||||
}
|
||||
|
||||
|
@ -347,7 +347,7 @@ void bn_data::deserialize_from_rbuf(uint32_t num_entries, struct rbuf *rb, uint3
|
|||
// Unnecessary after version 26
|
||||
// Reallocate smaller mempool to save memory
|
||||
invariant_zero(toku_mempool_get_frag_size(&m_buffer_mempool));
|
||||
toku_mempool_realloc_larger(&m_buffer_mempool, toku_mempool_get_used_space(&m_buffer_mempool));
|
||||
toku_mempool_realloc_larger(&m_buffer_mempool, toku_mempool_get_used_size(&m_buffer_mempool));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -396,7 +396,7 @@ static int move_it (const uint32_t, klpair_struct *klpair, const uint32_t idx UU
|
|||
// Compress things, and grow or shrink the mempool if needed.
|
||||
// May (always if force_compress) have a side effect of putting contents of mempool in sorted order.
|
||||
void bn_data::dmt_compress_kvspace(size_t added_size, void **maybe_free, bool force_compress) {
|
||||
uint32_t total_size_needed = toku_mempool_get_used_space(&m_buffer_mempool) + added_size;
|
||||
uint32_t total_size_needed = toku_mempool_get_used_size(&m_buffer_mempool) + added_size;
|
||||
// set the new mempool size to be twice of the space we actually need.
|
||||
// On top of the 25% that is padded within toku_mempool_construct (which we
|
||||
// should consider getting rid of), that should be good enough.
|
||||
|
@ -556,7 +556,7 @@ void bn_data::split_klpairs(
|
|||
|
||||
right_bd->init_zero();
|
||||
|
||||
size_t mpsize = toku_mempool_get_used_space(&m_buffer_mempool); // overkill, but safe
|
||||
size_t mpsize = toku_mempool_get_used_size(&m_buffer_mempool); // overkill, but safe
|
||||
|
||||
struct mempool new_left_mp;
|
||||
toku_mempool_construct(&new_left_mp, mpsize);
|
||||
|
@ -587,14 +587,14 @@ void bn_data::split_klpairs(
|
|||
// We overallocated ("overkill") above
|
||||
struct mempool *const left_mp = &m_buffer_mempool;
|
||||
paranoid_invariant_zero(toku_mempool_get_frag_size(left_mp));
|
||||
toku_mempool_realloc_larger(left_mp, toku_mempool_get_used_space(left_mp));
|
||||
toku_mempool_realloc_larger(left_mp, toku_mempool_get_used_size(left_mp));
|
||||
paranoid_invariant_zero(toku_mempool_get_frag_size(right_mp));
|
||||
toku_mempool_realloc_larger(right_mp, toku_mempool_get_used_space(right_mp));
|
||||
toku_mempool_realloc_larger(right_mp, toku_mempool_get_used_size(right_mp));
|
||||
}
|
||||
|
||||
uint64_t bn_data::get_disk_size() {
|
||||
return m_disksize_of_keys +
|
||||
toku_mempool_get_used_space(&m_buffer_mempool);
|
||||
toku_mempool_get_used_size(&m_buffer_mempool);
|
||||
}
|
||||
|
||||
struct verify_le_in_mempool_state {
|
||||
|
|
|
@ -325,8 +325,6 @@ public:
|
|||
// Between calling prepare_to_serialize and actually serializing, the basement node may not be modified
|
||||
void prepare_to_serialize(void);
|
||||
|
||||
//TODO(yoni): go to serialize_ftnode_partition and move prepare/header/etc (and wbufwriteleafentry) into here and add just one external function: serialize_to_wbuf()
|
||||
|
||||
// Serialize the basement node header to a wbuf
|
||||
// Requires prepare_to_serialize() to have been called first.
|
||||
void serialize_header(struct wbuf *wb) const;
|
||||
|
|
16
util/dmt.cc
16
util/dmt.cc
|
@ -288,7 +288,7 @@ dmtdata_t * dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::get_array_value_internal(
|
|||
//TODO(leif) write microbenchmarks to compare growth factor. Note: growth factor here is actually 2.5 because of mempool_construct
|
||||
template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
|
||||
void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::maybe_resize_array_for_insert(void) {
|
||||
bool space_available = toku_mempool_get_free_space(&this->mp) >= align(this->value_length);
|
||||
bool space_available = toku_mempool_get_free_size(&this->mp) >= align(this->value_length);
|
||||
|
||||
if (!space_available) {
|
||||
const uint32_t n = this->d.a.num_values + 1;
|
||||
|
@ -299,7 +299,7 @@ void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::maybe_resize_array_for_insert(vo
|
|||
toku_mempool_construct(&new_kvspace, new_space);
|
||||
size_t copy_bytes = this->d.a.num_values * align(this->value_length);
|
||||
invariant(copy_bytes + align(this->value_length) <= new_space);
|
||||
paranoid_invariant(copy_bytes <= toku_mempool_get_used_space(&this->mp));
|
||||
paranoid_invariant(copy_bytes <= toku_mempool_get_used_size(&this->mp));
|
||||
// Copy over to new mempool
|
||||
if (this->d.a.num_values > 0) {
|
||||
void* dest = toku_mempool_malloc(&new_kvspace, copy_bytes, 1);
|
||||
|
@ -435,7 +435,7 @@ template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
|
|||
void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::verify(void) const {
|
||||
uint32_t num_values = this->size();
|
||||
invariant(num_values < UINT32_MAX);
|
||||
size_t pool_used = toku_mempool_get_used_space(&this->mp);
|
||||
size_t pool_used = toku_mempool_get_used_size(&this->mp);
|
||||
size_t pool_size = toku_mempool_get_size(&this->mp);
|
||||
size_t pool_frag = toku_mempool_get_frag_size(&this->mp);
|
||||
invariant(pool_used <= pool_size);
|
||||
|
@ -607,8 +607,8 @@ void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::node_free(const subtree &st) {
|
|||
template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
|
||||
void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::maybe_resize_tree(const dmtwriter_t * value) {
|
||||
const ssize_t curr_capacity = toku_mempool_get_size(&this->mp);
|
||||
const ssize_t curr_free = toku_mempool_get_free_space(&this->mp);
|
||||
const ssize_t curr_used = toku_mempool_get_used_space(&this->mp);
|
||||
const ssize_t curr_free = toku_mempool_get_free_size(&this->mp);
|
||||
const ssize_t curr_used = toku_mempool_get_used_size(&this->mp);
|
||||
ssize_t add_size = 0;
|
||||
if (value) {
|
||||
add_size = __builtin_offsetof(dmt_node, value) + value->get_size();
|
||||
|
@ -886,7 +886,7 @@ template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
|
|||
node_offset* dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::alloc_temp_node_offsets(uint32_t num_offsets) {
|
||||
size_t mem_needed = num_offsets * sizeof(node_offset);
|
||||
size_t mem_free;
|
||||
mem_free = toku_mempool_get_free_space(&this->mp);
|
||||
mem_free = toku_mempool_get_free_size(&this->mp);
|
||||
node_offset* CAST_FROM_VOIDP(tmp, toku_mempool_get_next_free_ptr(&this->mp));
|
||||
if (mem_free >= mem_needed) {
|
||||
return tmp;
|
||||
|
@ -1149,7 +1149,7 @@ void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::serialize_values(uint32_t expect
|
|||
const uint32_t fixed_len = this->value_length;
|
||||
const uint32_t fixed_aligned_len = align(this->value_length);
|
||||
paranoid_invariant(expected_unpadded_memory == this->d.a.num_values * this->value_length);
|
||||
paranoid_invariant(toku_mempool_get_used_space(&this->mp) >=
|
||||
paranoid_invariant(toku_mempool_get_used_size(&this->mp) >=
|
||||
expected_unpadded_memory + pad_bytes * this->d.a.num_values);
|
||||
if (this->d.a.num_values == 0) {
|
||||
// Nothing to serialize
|
||||
|
@ -1234,7 +1234,7 @@ void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::builder::build(dmt<dmtdata_t, dm
|
|||
}
|
||||
paranoid_invariant_null(this->sorted_node_offsets);
|
||||
|
||||
const size_t used = toku_mempool_get_used_space(&this->temp.mp);
|
||||
const size_t used = toku_mempool_get_used_size(&this->temp.mp);
|
||||
const size_t allocated = toku_mempool_get_size(&this->temp.mp);
|
||||
// We want to use no more than (about) the actual used space + 25% overhead for mempool growth.
|
||||
// When we know the elements are fixed-length, we use the better dmt constructor.
|
||||
|
|
|
@ -183,13 +183,11 @@ size_t toku_mempool_get_size(const struct mempool *mp) {
|
|||
return mp->size;
|
||||
}
|
||||
|
||||
// TODO(yoni): unify the toku_mempool_get*_size and toku_mempool_get*_space functions (use either size or space but not both)
|
||||
// use _size for all
|
||||
size_t toku_mempool_get_frag_size(const struct mempool *mp) {
|
||||
return mp->frag_size;
|
||||
}
|
||||
|
||||
size_t toku_mempool_get_used_space(const struct mempool *mp) {
|
||||
size_t toku_mempool_get_used_size(const struct mempool *mp) {
|
||||
return mp->free_offset - mp->frag_size;
|
||||
}
|
||||
|
||||
|
@ -201,11 +199,11 @@ size_t toku_mempool_get_offset_limit(const struct mempool *mp) {
|
|||
return mp->free_offset;
|
||||
}
|
||||
|
||||
size_t toku_mempool_get_free_space(const struct mempool *mp) {
|
||||
size_t toku_mempool_get_free_size(const struct mempool *mp) {
|
||||
return mp->size - mp->free_offset;
|
||||
}
|
||||
|
||||
size_t toku_mempool_get_allocated_space(const struct mempool *mp) {
|
||||
size_t toku_mempool_get_allocated_size(const struct mempool *mp) {
|
||||
return mp->free_offset;
|
||||
}
|
||||
|
||||
|
|
|
@ -156,13 +156,13 @@ size_t toku_mempool_get_size(const struct mempool *mp);
|
|||
size_t toku_mempool_get_frag_size(const struct mempool *mp);
|
||||
|
||||
/* get the amount of space that is holding useful data */
|
||||
size_t toku_mempool_get_used_space(const struct mempool *mp);
|
||||
size_t toku_mempool_get_used_size(const struct mempool *mp);
|
||||
|
||||
/* get the amount of space that is available for new data */
|
||||
size_t toku_mempool_get_free_space(const struct mempool *mp);
|
||||
size_t toku_mempool_get_free_size(const struct mempool *mp);
|
||||
|
||||
/* get the amount of space that has been allocated for use (wasted or not) */
|
||||
size_t toku_mempool_get_allocated_space(const struct mempool *mp);
|
||||
size_t toku_mempool_get_allocated_size(const struct mempool *mp);
|
||||
|
||||
/* allocate a chunk of memory from the memory pool suitably aligned */
|
||||
void *toku_mempool_malloc(struct mempool *mp, size_t size, int alignment);
|
||||
|
|
Loading…
Add table
Reference in a new issue