mirror of
https://github.com/MariaDB/server.git
synced 2025-03-08 04:03:30 +01:00
FT-276 Remove alignment from toku_mempool_malloc API
This commit is contained in:
parent
84c5d22e29
commit
46ab99301c
4 changed files with 21 additions and 25 deletions
14
ft/bndata.cc
14
ft/bndata.cc
|
@ -140,7 +140,7 @@ void bn_data::initialize_from_separate_keys_and_vals(uint32_t num_entries, struc
|
|||
rbuf_literal_bytes(rb, &vals_src, val_data_size);
|
||||
|
||||
if (num_entries > 0) {
|
||||
void *vals_dest = toku_mempool_malloc(&this->m_buffer_mempool, val_data_size, 1);
|
||||
void *vals_dest = toku_mempool_malloc(&this->m_buffer_mempool, val_data_size);
|
||||
paranoid_invariant_notnull(vals_dest);
|
||||
memcpy(vals_dest, vals_src, val_data_size);
|
||||
}
|
||||
|
@ -384,7 +384,7 @@ struct dmt_compressor_state {
|
|||
static int move_it (const uint32_t, klpair_struct *klpair, const uint32_t idx UU(), struct dmt_compressor_state * const oc) {
|
||||
LEAFENTRY old_le = oc->bd->get_le_from_klpair(klpair);
|
||||
uint32_t size = leafentry_memsize(old_le);
|
||||
void* newdata = toku_mempool_malloc(oc->new_kvspace, size, 1);
|
||||
void* newdata = toku_mempool_malloc(oc->new_kvspace, size);
|
||||
paranoid_invariant_notnull(newdata); // we do this on a fresh mempool, so nothing bad should happen
|
||||
memcpy(newdata, old_le, size);
|
||||
klpair->le_offset = toku_mempool_get_offset_from_pointer_and_base(oc->new_kvspace, newdata);
|
||||
|
@ -411,7 +411,7 @@ void bn_data::dmt_compress_kvspace(size_t added_size, void **maybe_free, bool fo
|
|||
} else {
|
||||
toku_mempool_construct(&new_kvspace, total_size_needed);
|
||||
size_t old_offset_limit = toku_mempool_get_offset_limit(&m_buffer_mempool);
|
||||
void *new_mempool_base = toku_mempool_malloc(&new_kvspace, old_offset_limit, 1);
|
||||
void *new_mempool_base = toku_mempool_malloc(&new_kvspace, old_offset_limit);
|
||||
memcpy(new_mempool_base, old_mempool_base, old_offset_limit);
|
||||
}
|
||||
|
||||
|
@ -428,10 +428,10 @@ void bn_data::dmt_compress_kvspace(size_t added_size, void **maybe_free, bool fo
|
|||
// If MAYBE_FREE is nullptr then free the old mempool's space.
|
||||
// Otherwise, store the old mempool's space in maybe_free.
|
||||
LEAFENTRY bn_data::mempool_malloc_and_update_dmt(size_t size, void **maybe_free) {
|
||||
void *v = toku_mempool_malloc(&m_buffer_mempool, size, 1);
|
||||
void *v = toku_mempool_malloc(&m_buffer_mempool, size);
|
||||
if (v == nullptr) {
|
||||
dmt_compress_kvspace(size, maybe_free, false);
|
||||
v = toku_mempool_malloc(&m_buffer_mempool, size, 1);
|
||||
v = toku_mempool_malloc(&m_buffer_mempool, size);
|
||||
paranoid_invariant_notnull(v);
|
||||
}
|
||||
return (LEAFENTRY)v;
|
||||
|
@ -506,7 +506,7 @@ class split_klpairs_extra {
|
|||
LEAFENTRY old_le = m_left_bn->get_le_from_klpair(&klpair);
|
||||
size_t le_size = leafentry_memsize(old_le);
|
||||
|
||||
void *new_le = toku_mempool_malloc(dest_mp, le_size, 1);
|
||||
void *new_le = toku_mempool_malloc(dest_mp, le_size);
|
||||
paranoid_invariant_notnull(new_le);
|
||||
memcpy(new_le, old_le, le_size);
|
||||
size_t le_offset = toku_mempool_get_offset_from_pointer_and_base(dest_mp, new_le);
|
||||
|
@ -659,7 +659,7 @@ void bn_data::set_contents_as_clone_of_sorted_array(
|
|||
dmt_builder.create(num_les, total_key_size);
|
||||
|
||||
for (uint32_t idx = 0; idx < num_les; idx++) {
|
||||
void* new_le = toku_mempool_malloc(&m_buffer_mempool, le_sizes[idx], 1);
|
||||
void* new_le = toku_mempool_malloc(&m_buffer_mempool, le_sizes[idx]);
|
||||
paranoid_invariant_notnull(new_le);
|
||||
memcpy(new_le, old_les[idx], le_sizes[idx]);
|
||||
size_t le_offset = toku_mempool_get_offset_from_pointer_and_base(&m_buffer_mempool, new_le);
|
||||
|
|
14
util/dmt.cc
14
util/dmt.cc
|
@ -130,7 +130,7 @@ void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::create_from_sorted_memory_of_fix
|
|||
toku_mempool_construct(&this->mp, aligned_memsize);
|
||||
if (aligned_memsize > 0) {
|
||||
paranoid_invariant(numvalues > 0);
|
||||
void *ptr = toku_mempool_malloc(&this->mp, aligned_memsize, 1);
|
||||
void *ptr = toku_mempool_malloc(&this->mp, aligned_memsize);
|
||||
paranoid_invariant_notnull(ptr);
|
||||
uint8_t * const CAST_FROM_VOIDP(dest, ptr);
|
||||
const uint8_t * const CAST_FROM_VOIDP(src, mem);
|
||||
|
@ -261,7 +261,7 @@ dmtdata_t * dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::alloc_array_value_end(voi
|
|||
paranoid_invariant(this->values_same_size);
|
||||
this->d.a.num_values++;
|
||||
|
||||
void *ptr = toku_mempool_malloc(&this->mp, align(this->value_length), 1);
|
||||
void *ptr = toku_mempool_malloc(&this->mp, align(this->value_length));
|
||||
paranoid_invariant_notnull(ptr);
|
||||
paranoid_invariant(reinterpret_cast<size_t>(ptr) % ALIGNMENT == 0);
|
||||
dmtdata_t *CAST_FROM_VOIDP(n, ptr);
|
||||
|
@ -302,7 +302,7 @@ void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::maybe_resize_array_for_insert(vo
|
|||
paranoid_invariant(copy_bytes <= toku_mempool_get_used_size(&this->mp));
|
||||
// Copy over to new mempool
|
||||
if (this->d.a.num_values > 0) {
|
||||
void* dest = toku_mempool_malloc(&new_kvspace, copy_bytes, 1);
|
||||
void* dest = toku_mempool_malloc(&new_kvspace, copy_bytes);
|
||||
invariant(dest!=nullptr);
|
||||
memcpy(dest, get_array_value(0), copy_bytes);
|
||||
}
|
||||
|
@ -344,7 +344,7 @@ void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::convert_from_tree_to_array(void)
|
|||
const uint32_t fixed_aligned_len = align(this->value_length);
|
||||
size_t mem_needed = num_values * fixed_aligned_len;
|
||||
toku_mempool_construct(&new_mp, mem_needed);
|
||||
uint8_t* CAST_FROM_VOIDP(dest, toku_mempool_malloc(&new_mp, mem_needed, 1));
|
||||
uint8_t* CAST_FROM_VOIDP(dest, toku_mempool_malloc(&new_mp, mem_needed));
|
||||
paranoid_invariant_notnull(dest);
|
||||
for (uint32_t i = 0; i < num_values; i++) {
|
||||
const dmt_node &n = get_node(tmp_array[i]);
|
||||
|
@ -588,7 +588,7 @@ node_offset dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::node_malloc_and_set_value
|
|||
size_t val_size = value.get_size();
|
||||
size_t size_to_alloc = __builtin_offsetof(dmt_node, value) + val_size;
|
||||
size_to_alloc = align(size_to_alloc);
|
||||
void* np = toku_mempool_malloc(&this->mp, size_to_alloc, 1);
|
||||
void* np = toku_mempool_malloc(&this->mp, size_to_alloc);
|
||||
paranoid_invariant_notnull(np);
|
||||
dmt_node *CAST_FROM_VOIDP(n, np);
|
||||
node_set_value(n, value);
|
||||
|
@ -645,7 +645,7 @@ void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::maybe_resize_tree(const dmtwrite
|
|||
dmt_node &node = get_node(tmp_array[i]);
|
||||
const size_t bytes_to_copy = __builtin_offsetof(dmt_node, value) + node.value_length;
|
||||
const size_t bytes_to_alloc = align(bytes_to_copy);
|
||||
void* newdata = toku_mempool_malloc(&new_kvspace, bytes_to_alloc, 1);
|
||||
void* newdata = toku_mempool_malloc(&new_kvspace, bytes_to_alloc);
|
||||
memcpy(newdata, &node, bytes_to_copy);
|
||||
tmp_array[i] = toku_mempool_get_offset_from_pointer_and_base(&new_kvspace, newdata);
|
||||
}
|
||||
|
@ -1251,7 +1251,7 @@ void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::builder::build(dmt<dmtdata_t, dm
|
|||
invariant_zero(toku_mempool_get_frag_size(&this->temp.mp));
|
||||
struct mempool new_mp;
|
||||
toku_mempool_construct(&new_mp, used);
|
||||
void * newbase = toku_mempool_malloc(&new_mp, used, 1);
|
||||
void * newbase = toku_mempool_malloc(&new_mp, used);
|
||||
invariant_notnull(newbase);
|
||||
memcpy(newbase, toku_mempool_get_base(&this->temp.mp), used);
|
||||
toku_mempool_destroy(&this->temp.mp);
|
||||
|
|
|
@ -207,24 +207,20 @@ size_t toku_mempool_get_allocated_size(const struct mempool *mp) {
|
|||
return mp->free_offset;
|
||||
}
|
||||
|
||||
void *toku_mempool_malloc(struct mempool *mp, size_t size, int alignment) {
|
||||
void *toku_mempool_malloc(struct mempool *mp, size_t size) {
|
||||
paranoid_invariant(size < (1U<<31));
|
||||
paranoid_invariant(mp->size < (1U<<31));
|
||||
paranoid_invariant(mp->free_offset < (1U<<31));
|
||||
paranoid_invariant(mp->free_offset <= mp->size);
|
||||
void *vp;
|
||||
size_t offset = (mp->free_offset + (alignment-1)) & ~(alignment-1);
|
||||
//printf("mempool_malloc size=%ld base=%p free_offset=%ld mp->size=%ld offset=%ld\n", size, mp->base, mp->free_offset, mp->size, offset);
|
||||
if (offset + size > mp->size) {
|
||||
vp = 0;
|
||||
if (mp->free_offset + size > mp->size) {
|
||||
vp = nullptr;
|
||||
} else {
|
||||
vp = (char *)mp->base + offset;
|
||||
mp->free_offset = offset + size;
|
||||
vp = reinterpret_cast<char *>(mp->base) + mp->free_offset;
|
||||
mp->free_offset += size;
|
||||
}
|
||||
paranoid_invariant(mp->free_offset <= mp->size);
|
||||
paranoid_invariant(((long)vp & (alignment-1)) == 0);
|
||||
paranoid_invariant(vp == 0 || toku_mempool_inrange(mp, vp, size));
|
||||
//printf("mempool returning %p\n", vp);
|
||||
return vp;
|
||||
}
|
||||
|
||||
|
|
|
@ -163,8 +163,8 @@ size_t toku_mempool_get_free_size(const struct mempool *mp);
|
|||
/* get the amount of space that has been allocated for use (wasted or not) */
|
||||
size_t toku_mempool_get_allocated_size(const struct mempool *mp);
|
||||
|
||||
/* allocate a chunk of memory from the memory pool suitably aligned */
|
||||
void *toku_mempool_malloc(struct mempool *mp, size_t size, int alignment);
|
||||
/* allocate a chunk of memory from the memory pool */
|
||||
void *toku_mempool_malloc(struct mempool *mp, size_t size);
|
||||
|
||||
/* free a previously allocated chunk of memory. the free only updates
|
||||
a count of the amount of free space in the memory pool. the memory
|
||||
|
|
Loading…
Add table
Reference in a new issue