2007-11-29 14:18:54 +00:00
/* -*- mode: C; c-basic-offset: 4 -*- */
2013-04-16 23:57:48 -04:00
# ident "$Id$"
2013-04-16 23:59:09 -04:00
# ident "Copyright (c) 2007-2010 Tokutek Inc. All rights reserved."
2013-04-16 23:57:48 -04:00
# ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11 / 760379 and to the patents and / or patent applications resulting from it."
2007-11-29 14:18:54 +00:00
2013-04-16 23:57:20 -04:00
# include "includes.h"
2013-04-16 23:59:47 -04:00
# include "sort.h"
2013-04-16 23:59:23 -04:00
# include "threadpool.h"
2013-04-16 23:59:40 -04:00
# include <compress.h>
2013-04-16 23:59:46 -04:00
# if defined(HAVE_CILK)
# include <cilk/cilk.h>
# define cilk_worker_count (__cilkrts_get_nworkers())
# else
# define cilk_spawn
# define cilk_sync
# define cilk_for for
# define cilk_worker_count 1
# endif
2013-04-16 23:59:00 -04:00
2013-04-16 23:59:17 -04:00
2013-04-17 00:00:08 -04:00
static BRT_UPGRADE_STATUS_S brt_upgrade_status ;
# define UPGRADE_STATUS_INIT(k,t,l) { \
brt_upgrade_status . status [ k ] . keyname = # k ; \
brt_upgrade_status . status [ k ] . type = t ; \
brt_upgrade_status . status [ k ] . legend = " brt upgrade: " l ; \
}
static void
status_init ( void )
{
// Note, this function initializes the keyname, type, and legend fields.
// Value fields are initialized to zero by compiler.
UPGRADE_STATUS_INIT ( BRT_UPGRADE_FOOTPRINT , UINT64 , " footprint " ) ;
UPGRADE_STATUS_INIT ( BRT_UPGRADE_HEADER_13 , UINT64 , " V13 headers " ) ;
UPGRADE_STATUS_INIT ( BRT_UPGRADE_NONLEAF_13 , UINT64 , " V13 nonleaf nodes " ) ;
UPGRADE_STATUS_INIT ( BRT_UPGRADE_LEAF_13 , UINT64 , " V13 leaf nodes " ) ;
UPGRADE_STATUS_INIT ( BRT_UPGRADE_OPTIMIZED_FOR_UPGRADE , UINT64 , " optimized for upgrade " ) ;
brt_upgrade_status . initialized = true ;
2013-04-16 23:59:17 -04:00
}
2013-04-17 00:00:08 -04:00
# undef UPGRADE_STATUS_INIT
# define UPGRADE_STATUS_VALUE(x) brt_upgrade_status.status[x].value.num
void
toku_brt_upgrade_get_status ( BRT_UPGRADE_STATUS s ) {
if ( ! brt_upgrade_status . initialized ) {
status_init ( ) ;
}
UPGRADE_STATUS_VALUE ( BRT_UPGRADE_FOOTPRINT ) = toku_log_upgrade_get_footprint ( ) ;
* s = brt_upgrade_status ;
}
2013-04-16 23:59:17 -04:00
2013-04-16 23:59:05 -04:00
// performance tracing
2013-04-16 23:59:00 -04:00
# define DO_TOKU_TRACE 0
# if DO_TOKU_TRACE
static inline void do_toku_trace ( const char * cp , int len ) {
2013-04-16 23:59:05 -04:00
const int toku_trace_fd = - 1 ;
2013-04-16 23:59:00 -04:00
write ( toku_trace_fd , cp , len ) ;
}
# define toku_trace(a) do_toku_trace(a, strlen(a))
# else
# define toku_trace(a)
# endif
2013-04-16 23:59:05 -04:00
static int num_cores = 0 ; // cache the number of cores for the parallelization
2013-04-16 23:59:23 -04:00
static struct toku_thread_pool * brt_pool = NULL ;
2007-11-14 17:58:38 +00:00
2013-04-16 23:59:05 -04:00
int
toku_brt_serialize_init ( void ) {
2013-04-16 23:59:23 -04:00
num_cores = toku_os_get_number_active_processors ( ) ;
2013-04-16 23:59:23 -04:00
int r = toku_thread_pool_create ( & brt_pool , num_cores ) ; lazy_assert_zero ( r ) ;
2013-04-16 23:59:05 -04:00
return 0 ;
2013-04-16 23:57:18 -04:00
}
2013-04-16 23:59:05 -04:00
int
toku_brt_serialize_destroy ( void ) {
2013-04-16 23:59:23 -04:00
toku_thread_pool_destroy ( & brt_pool ) ;
2013-04-16 23:59:05 -04:00
return 0 ;
2013-04-16 23:57:18 -04:00
}
// This mutex protects pwrite from running in parallel, and also protects modifications to the block allocator.
2013-04-16 23:57:27 -04:00
static toku_pthread_mutex_t pwrite_mutex = TOKU_PTHREAD_MUTEX_INITIALIZER ;
2013-04-16 23:57:18 -04:00
static int pwrite_is_locked = 0 ;
2013-04-16 23:59:05 -04:00
int
toku_pwrite_lock_init ( void ) {
2013-04-16 23:59:23 -04:00
int r = toku_pthread_mutex_init ( & pwrite_mutex , NULL ) ; resource_assert_zero ( r ) ;
2013-04-16 23:57:53 -04:00
return r ;
2013-04-16 23:57:30 -04:00
}
2013-04-16 23:59:05 -04:00
int
toku_pwrite_lock_destroy ( void ) {
2013-04-16 23:59:23 -04:00
int r = toku_pthread_mutex_destroy ( & pwrite_mutex ) ; resource_assert_zero ( r ) ;
2013-04-16 23:57:53 -04:00
return r ;
2013-04-16 23:57:30 -04:00
}
2013-04-16 23:57:18 -04:00
static inline void
lock_for_pwrite ( void ) {
2013-04-16 23:57:41 -04:00
// Locks the pwrite_mutex.
2013-04-16 23:59:23 -04:00
int r = toku_pthread_mutex_lock ( & pwrite_mutex ) ; resource_assert_zero ( r ) ;
2013-04-16 23:57:18 -04:00
pwrite_is_locked = 1 ;
}
static inline void
unlock_for_pwrite ( void ) {
pwrite_is_locked = 0 ;
2013-04-16 23:59:23 -04:00
int r = toku_pthread_mutex_unlock ( & pwrite_mutex ) ; resource_assert_zero ( r ) ;
2013-04-16 23:57:18 -04:00
}
2013-04-16 23:57:47 -04:00
enum { FILE_CHANGE_INCREMENT = ( 16 < < 20 ) } ;
2013-04-16 23:59:05 -04:00
static inline u_int64_t
alignup64 ( u_int64_t a , u_int64_t b ) {
return ( ( a + b - 1 ) / b ) * b ;
}
2013-04-16 23:58:04 -04:00
//Race condition if ydb lock is split.
//Ydb lock is held when this function is called.
//Not going to truncate and delete (redirect to devnull) at same time.
2013-04-16 23:59:02 -04:00
//Must be holding a read or write lock on fdlock (fd is protected)
2013-04-16 23:57:47 -04:00
void
2013-04-16 23:59:02 -04:00
toku_maybe_truncate_cachefile ( CACHEFILE cf , int fd , u_int64_t size_used )
2013-04-16 23:57:47 -04:00
// Effect: If file size >= SIZE+32MiB, reduce file size.
// (32 instead of 16.. hysteresis).
// Return 0 on success, otherwise an error number.
{
//Check file size before taking pwrite lock to reduce likelihood of taking
2013-04-16 23:59:02 -04:00
//the pwrite lock needlessly.
2013-04-16 23:57:47 -04:00
//Check file size after taking lock to avoid race conditions.
int64_t file_size ;
2013-04-16 23:59:02 -04:00
if ( toku_cachefile_is_dev_null_unlocked ( cf ) ) goto done ;
2013-04-16 23:57:47 -04:00
{
2013-04-16 23:59:02 -04:00
int r = toku_os_get_file_size ( fd , & file_size ) ;
2013-04-16 23:59:23 -04:00
lazy_assert_zero ( r ) ;
invariant ( file_size > = 0 ) ;
2013-04-16 23:57:47 -04:00
}
// If file space is overallocated by at least 32M
if ( ( u_int64_t ) file_size > = size_used + ( 2 * FILE_CHANGE_INCREMENT ) ) {
lock_for_pwrite ( ) ;
{
2013-04-16 23:59:02 -04:00
int r = toku_os_get_file_size ( fd , & file_size ) ;
2013-04-16 23:59:23 -04:00
lazy_assert_zero ( r ) ;
invariant ( file_size > = 0 ) ;
2013-04-16 23:57:47 -04:00
}
if ( ( u_int64_t ) file_size > = size_used + ( 2 * FILE_CHANGE_INCREMENT ) ) {
2013-04-16 23:59:05 -04:00
toku_off_t new_size = alignup64 ( size_used , ( 2 * FILE_CHANGE_INCREMENT ) ) ; //Truncate to new size_used.
2013-04-16 23:59:23 -04:00
invariant ( new_size < file_size ) ;
2013-04-16 23:57:47 -04:00
int r = toku_cachefile_truncate ( cf , new_size ) ;
2013-04-16 23:59:23 -04:00
lazy_assert_zero ( r ) ;
2013-04-16 23:57:47 -04:00
}
unlock_for_pwrite ( ) ;
}
done :
return ;
}
2013-04-16 23:59:05 -04:00
static u_int64_t
umin64 ( u_int64_t a , u_int64_t b ) {
if ( a < b ) return a ;
return b ;
}
2013-04-16 23:57:47 -04:00
int
maybe_preallocate_in_file ( int fd , u_int64_t size )
// Effect: If file size is less than SIZE, make it bigger by either doubling it or growing by 16MiB whichever is less.
// Return 0 on success, otherwise an error number.
{
int64_t file_size ;
{
int r = toku_os_get_file_size ( fd , & file_size ) ;
2013-04-16 23:59:02 -04:00
if ( r ! = 0 ) { // debug #2463
int the_errno = errno ;
2013-04-16 23:59:39 -04:00
fprintf ( stderr , " %s:%d fd=%d size=% " PRIu64 " r=%d errno=%d \n " , __FUNCTION__ , __LINE__ , fd , size , r , the_errno ) ; fflush ( stderr ) ;
2013-04-16 23:59:02 -04:00
}
2013-04-16 23:59:23 -04:00
lazy_assert_zero ( r ) ;
2013-04-16 23:57:47 -04:00
}
2013-04-16 23:59:23 -04:00
invariant ( file_size > = 0 ) ;
2013-04-16 23:57:47 -04:00
if ( ( u_int64_t ) file_size < size ) {
const int N = umin64 ( size , FILE_CHANGE_INCREMENT ) ; // Double the size of the file, or add 16MiB, whichever is less.
char * MALLOC_N ( N , wbuf ) ;
memset ( wbuf , 0 , N ) ;
2013-04-16 23:59:05 -04:00
toku_off_t start_write = alignup64 ( file_size , 4096 ) ;
2013-04-16 23:59:23 -04:00
invariant ( start_write > = file_size ) ;
2013-04-16 23:58:55 -04:00
toku_os_full_pwrite ( fd , wbuf , N , start_write ) ;
2013-04-16 23:57:47 -04:00
toku_free ( wbuf ) ;
}
return 0 ;
}
2013-04-16 23:58:55 -04:00
static void
toku_full_pwrite_extend ( int fd , const void * buf , size_t count , toku_off_t offset )
2013-04-16 23:57:18 -04:00
// requires that the pwrite has been locked
2013-04-16 23:58:55 -04:00
// On failure, this does not return (an assertion fails or something).
2013-04-16 23:57:18 -04:00
{
2013-04-16 23:59:23 -04:00
invariant ( pwrite_is_locked ) ;
2013-04-16 23:57:38 -04:00
{
2013-04-16 23:59:40 -04:00
int r = maybe_preallocate_in_file ( fd , offset + count ) ;
lazy_assert_zero ( r ) ;
2013-04-16 23:57:38 -04:00
}
2013-04-16 23:58:55 -04:00
toku_os_full_pwrite ( fd , buf , count , offset ) ;
2013-04-16 23:57:18 -04:00
}
2013-04-16 23:59:05 -04:00
// Don't include the sub_block header
2013-04-16 23:58:01 -04:00
// Overhead calculated in same order fields are written to wbuf
2013-04-16 23:59:05 -04:00
enum {
2013-04-16 23:59:05 -04:00
node_header_overhead = ( 8 + // magic "tokunode" or "tokuleaf" or "tokuroll"
2013-04-16 23:59:05 -04:00
4 + // layout_version
2013-04-16 23:59:36 -04:00
4 + // layout_version_original
4 ) , // build_id
2013-04-16 23:59:05 -04:00
} ;
2007-11-14 17:58:38 +00:00
2013-04-16 23:59:05 -04:00
# include "sub_block.h"
# include "sub_block_map.h"
2013-04-16 23:57:46 -04:00
// uncompressed header offsets
enum {
uncompressed_magic_offset = 0 ,
uncompressed_version_offset = 8 ,
} ;
2013-04-16 23:59:40 -04:00
static u_int32_t
serialize_node_header_size ( BRTNODE node ) {
u_int32_t retval = 0 ;
retval + = 8 ; // magic
retval + = sizeof ( node - > layout_version ) ;
retval + = sizeof ( node - > layout_version_original ) ;
retval + = 4 ; // BUILD_ID
2013-04-16 23:59:52 -04:00
retval + = 4 ; // n_children
retval + = node - > n_children * 8 ; // encode start offset and length of each partition
2013-04-16 23:59:40 -04:00
retval + = 4 ; // checksum
return retval ;
}
2013-04-16 23:59:05 -04:00
static void
2013-04-17 00:00:13 -04:00
serialize_node_header ( BRTNODE node , BRTNODE_DISK_DATA ndd , struct wbuf * wbuf ) {
2013-04-16 23:59:05 -04:00
if ( node - > height = = 0 )
wbuf_nocrc_literal_bytes ( wbuf , " tokuleaf " , 8 ) ;
2013-04-16 23:59:05 -04:00
else
2013-04-16 23:59:05 -04:00
wbuf_nocrc_literal_bytes ( wbuf , " tokunode " , 8 ) ;
2013-04-16 23:59:23 -04:00
invariant ( node - > layout_version = = BRT_LAYOUT_VERSION ) ;
2013-04-16 23:59:05 -04:00
wbuf_nocrc_int ( wbuf , node - > layout_version ) ;
wbuf_nocrc_int ( wbuf , node - > layout_version_original ) ;
2013-04-16 23:59:36 -04:00
wbuf_nocrc_uint ( wbuf , BUILD_ID ) ;
2013-04-16 23:59:52 -04:00
wbuf_nocrc_int ( wbuf , node - > n_children ) ;
for ( int i = 0 ; i < node - > n_children ; i + + ) {
2013-04-17 00:00:13 -04:00
assert ( BP_SIZE ( ndd , i ) > 0 ) ;
wbuf_nocrc_int ( wbuf , BP_START ( ndd , i ) ) ; // save the beginning of the partition
wbuf_nocrc_int ( wbuf , BP_SIZE ( ndd , i ) ) ; // and the size
2013-04-16 23:59:52 -04:00
}
2013-04-16 23:59:40 -04:00
// checksum the header
u_int32_t end_to_end_checksum = x1764_memory ( wbuf - > buf , wbuf_get_woffset ( wbuf ) ) ;
wbuf_nocrc_int ( wbuf , end_to_end_checksum ) ;
invariant ( wbuf - > ndone = = wbuf - > size ) ;
2013-04-16 23:59:05 -04:00
}
static int
wbufwriteleafentry ( OMTVALUE lev , u_int32_t UU ( idx ) , void * v ) {
LEAFENTRY le = lev ;
struct wbuf * thisw = v ;
2013-04-16 23:59:05 -04:00
wbuf_nocrc_LEAFENTRY ( thisw , le ) ;
2013-04-16 23:59:05 -04:00
return 0 ;
}
2013-04-16 23:59:40 -04:00
static u_int32_t
serialize_brtnode_partition_size ( BRTNODE node , int i )
{
u_int32_t result = 0 ;
2013-04-16 23:59:41 -04:00
assert ( node - > bp [ i ] . state = = PT_AVAIL ) ;
2013-04-16 23:59:40 -04:00
result + + ; // Byte that states what the partition is
if ( node - > height > 0 ) {
2013-04-16 23:59:41 -04:00
result + = 4 ; // size of bytes in buffer table
2013-04-16 23:59:48 -04:00
result + = toku_bnc_nbytesinbuf ( BNC ( node , i ) ) ;
2013-04-16 23:59:40 -04:00
}
else {
result + = 4 ; // n_entries in buffer table
2013-04-16 23:59:41 -04:00
result + = BLB_NBYTESINBUF ( node , i ) ;
2013-04-16 23:59:40 -04:00
}
result + = 4 ; // checksum
return result ;
}
2013-04-16 23:59:25 -04:00
2013-04-16 23:59:40 -04:00
# define BRTNODE_PARTITION_OMT_LEAVES 0xaa
# define BRTNODE_PARTITION_FIFO_MSG 0xbb
2013-04-16 23:59:05 -04:00
2013-04-16 23:59:48 -04:00
static void
serialize_nonleaf_childinfo ( NONLEAF_CHILDINFO bnc , struct wbuf * wb )
{
unsigned char ch = BRTNODE_PARTITION_FIFO_MSG ;
wbuf_nocrc_char ( wb , ch ) ;
// serialize the FIFO, first the number of entries, then the elements
wbuf_nocrc_int ( wb , toku_bnc_n_entries ( bnc ) ) ;
FIFO_ITERATE (
2013-04-16 23:59:48 -04:00
bnc - > buffer , key , keylen , data , datalen , type , msn , xids , is_fresh ,
2013-04-16 23:59:48 -04:00
{
invariant ( ( int ) type > = 0 & & type < 256 ) ;
wbuf_nocrc_char ( wb , ( unsigned char ) type ) ;
2013-04-16 23:59:48 -04:00
wbuf_nocrc_char ( wb , ( unsigned char ) is_fresh ) ;
2013-04-16 23:59:48 -04:00
wbuf_MSN ( wb , msn ) ;
wbuf_nocrc_xids ( wb , xids ) ;
wbuf_nocrc_bytes ( wb , key , keylen ) ;
wbuf_nocrc_bytes ( wb , data , datalen ) ;
} ) ;
}
2013-04-16 23:59:40 -04:00
//
// Serialize the i'th partition of node into sb
// For leaf nodes, this would be the i'th basement node
// For internal nodes, this would be the i'th internal node
//
static void
serialize_brtnode_partition ( BRTNODE node , int i , struct sub_block * sb ) {
assert ( sb - > uncompressed_size = = 0 ) ;
assert ( sb - > uncompressed_ptr = = NULL ) ;
sb - > uncompressed_size = serialize_brtnode_partition_size ( node , i ) ;
sb - > uncompressed_ptr = toku_xmalloc ( sb - > uncompressed_size ) ;
//
// Now put the data into sb->uncompressed_ptr
//
struct wbuf wb ;
wbuf_init ( & wb , sb - > uncompressed_ptr , sb - > uncompressed_size ) ;
if ( node - > height > 0 ) {
// TODO: (Zardosht) possibly exit early if there are no messages
2013-04-16 23:59:48 -04:00
serialize_nonleaf_childinfo ( BNC ( node , i ) , & wb ) ;
2013-04-16 23:59:05 -04:00
}
2013-04-16 23:59:40 -04:00
else {
unsigned char ch = BRTNODE_PARTITION_OMT_LEAVES ;
wbuf_nocrc_char ( & wb , ch ) ;
2013-04-16 23:59:41 -04:00
wbuf_nocrc_uint ( & wb , toku_omt_size ( BLB_BUFFER ( node , i ) ) ) ;
2013-04-16 23:59:40 -04:00
//
// iterate over leafentries and place them into the buffer
//
2013-04-16 23:59:41 -04:00
toku_omt_iterate ( BLB_BUFFER ( node , i ) , wbufwriteleafentry , & wb ) ;
2013-04-16 23:59:40 -04:00
}
u_int32_t end_to_end_checksum = x1764_memory ( sb - > uncompressed_ptr , wbuf_get_woffset ( & wb ) ) ;
wbuf_nocrc_int ( & wb , end_to_end_checksum ) ;
invariant ( wb . ndone = = wb . size ) ;
invariant ( sb - > uncompressed_size = = wb . ndone ) ;
}
2013-04-16 23:59:05 -04:00
2013-04-16 23:59:40 -04:00
//
// Takes the data in sb->uncompressed_ptr, and compresses it
// into a newly allocated buffer sb->compressed_ptr
//
static void
2013-04-17 00:00:14 -04:00
compress_brtnode_sub_block ( struct sub_block * sb , enum toku_compression_method method ) {
2013-04-16 23:59:40 -04:00
assert ( sb - > compressed_ptr = = NULL ) ;
2013-04-17 00:00:14 -04:00
set_compressed_size_bound ( sb , method ) ;
2013-04-16 23:59:40 -04:00
// add 8 extra bytes, 4 for compressed size, 4 for decompressed size
sb - > compressed_ptr = toku_xmalloc ( sb - > compressed_size_bound + 8 ) ;
//
// This probably seems a bit complicated. Here is what is going on.
// In TokuDB 5.0, sub_blocks were compressed and the compressed data
// was checksummed. The checksum did NOT include the size of the compressed data
// and the size of the uncompressed data. The fields of sub_block only reference the
// compressed data, and it is the responsibility of the user of the sub_block
// to write the length
//
// For Dr. No, we want the checksum to also include the size of the compressed data, and the
// size of the decompressed data, because this data
// may be read off of disk alone, so it must be verifiable alone.
//
// So, we pass in a buffer to compress_nocrc_sub_block that starts 8 bytes after the beginning
// of sb->compressed_ptr, so we have space to put in the sizes, and then run the checksum.
//
sb - > compressed_size = compress_nocrc_sub_block (
sb ,
( char * ) sb - > compressed_ptr + 8 ,
2013-04-17 00:00:14 -04:00
sb - > compressed_size_bound ,
method
2013-04-16 23:59:40 -04:00
) ;
u_int32_t * extra = ( u_int32_t * ) ( sb - > compressed_ptr ) ;
// store the compressed and uncompressed size at the beginning
extra [ 0 ] = toku_htod32 ( sb - > compressed_size ) ;
extra [ 1 ] = toku_htod32 ( sb - > uncompressed_size ) ;
// now checksum the entire thing
sb - > compressed_size + = 8 ; // now add the eight bytes that we saved for the sizes
sb - > xsum = x1764_memory ( sb - > compressed_ptr , sb - > compressed_size ) ;
//
// This is the end result for Dr. No and forward. For brtnodes, sb->compressed_ptr contains
// two integers at the beginning, the size and uncompressed size, and then the compressed
// data. sb->xsum contains the checksum of this entire thing.
//
// In TokuDB 5.0, sb->compressed_ptr only contained the compressed data, sb->xsum
// checksummed only the compressed data, and the checksumming of the sizes were not
// done here.
//
}
//
// Returns the size needed to serialize the brtnode info
// Does not include header information that is common with rollback logs
// such as the magic, layout_version, and build_id
// Includes only node specific info such as pivot information, n_children, and so on
//
static u_int32_t
serialize_brtnode_info_size ( BRTNODE node )
{
u_int32_t retval = 0 ;
2013-04-16 23:59:41 -04:00
retval + = 8 ; // max_msn_applied_to_node_on_disk
2013-04-16 23:59:40 -04:00
retval + = 4 ; // nodesize
retval + = 4 ; // flags
retval + = 4 ; // height;
2013-04-16 23:59:52 -04:00
retval + = 4 ; // optimized_for_upgrade
2013-04-16 23:59:40 -04:00
retval + = node - > totalchildkeylens ; // total length of pivots
retval + = ( node - > n_children - 1 ) * 4 ; // encode length of each pivot
if ( node - > height > 0 ) {
retval + = node - > n_children * 8 ; // child blocknum's
2013-04-16 23:59:05 -04:00
}
2013-04-16 23:59:40 -04:00
retval + = 4 ; // checksum
return retval ;
2013-04-16 23:59:05 -04:00
}
2013-04-16 23:59:52 -04:00
static void serialize_brtnode_info ( BRTNODE node ,
SUB_BLOCK sb // output
) {
2013-04-16 23:59:40 -04:00
assert ( sb - > uncompressed_size = = 0 ) ;
assert ( sb - > uncompressed_ptr = = NULL ) ;
sb - > uncompressed_size = serialize_brtnode_info_size ( node ) ;
sb - > uncompressed_ptr = toku_xmalloc ( sb - > uncompressed_size ) ;
assert ( sb - > uncompressed_ptr ) ;
2013-04-16 23:59:05 -04:00
struct wbuf wb ;
2013-04-16 23:59:40 -04:00
wbuf_init ( & wb , sb - > uncompressed_ptr , sb - > uncompressed_size ) ;
2013-04-16 23:59:43 -04:00
wbuf_MSN ( & wb , node - > max_msn_applied_to_node_on_disk ) ;
2013-04-16 23:59:40 -04:00
wbuf_nocrc_uint ( & wb , node - > nodesize ) ;
wbuf_nocrc_uint ( & wb , node - > flags ) ;
wbuf_nocrc_int ( & wb , node - > height ) ;
2013-04-16 23:59:52 -04:00
wbuf_nocrc_int ( & wb , node - > optimized_for_upgrade ) ;
2013-04-16 23:59:40 -04:00
// pivot information
for ( int i = 0 ; i < node - > n_children - 1 ; i + + ) {
wbuf_nocrc_bytes ( & wb , kv_pair_key ( node - > childkeys [ i ] ) , toku_brt_pivot_key_len ( node - > childkeys [ i ] ) ) ;
}
// child blocks, only for internal nodes
if ( node - > height > 0 ) {
for ( int i = 0 ; i < node - > n_children ; i + + ) {
2013-04-16 23:59:41 -04:00
wbuf_nocrc_BLOCKNUM ( & wb , BP_BLOCKNUM ( node , i ) ) ;
2013-04-16 23:59:40 -04:00
}
}
2013-04-16 23:59:05 -04:00
2013-04-16 23:59:40 -04:00
u_int32_t end_to_end_checksum = x1764_memory ( sb - > uncompressed_ptr , wbuf_get_woffset ( & wb ) ) ;
2013-04-16 23:59:37 -04:00
wbuf_nocrc_int ( & wb , end_to_end_checksum ) ;
2013-04-16 23:59:23 -04:00
invariant ( wb . ndone = = wb . size ) ;
2013-04-16 23:59:40 -04:00
invariant ( sb - > uncompressed_size = = wb . ndone ) ;
}
2013-04-16 23:59:41 -04:00
2013-04-16 23:59:40 -04:00
// This is the size of the uncompressed data, not including the compression headers
unsigned int
toku_serialize_brtnode_size ( BRTNODE node ) {
unsigned int result = 0 ;
2013-04-16 23:59:41 -04:00
//
// As of now, this seems to be called if and only if the entire node is supposed
// to be in memory, so we will assert it.
//
toku_assert_entire_node_in_memory ( node ) ;
2013-04-16 23:59:40 -04:00
result + = serialize_node_header_size ( node ) ;
result + = serialize_brtnode_info_size ( node ) ;
for ( int i = 0 ; i < node - > n_children ; i + + ) {
result + = serialize_brtnode_partition_size ( node , i ) ;
}
return result ;
2013-04-16 23:59:05 -04:00
}
2013-04-16 23:59:40 -04:00
struct array_info {
u_int32_t offset ;
OMTVALUE * array ;
} ;
2013-04-16 23:59:05 -04:00
2013-04-16 23:59:15 -04:00
static int
2013-04-16 23:59:40 -04:00
array_item ( OMTVALUE lev , u_int32_t idx , void * vsi ) {
struct array_info * ai = vsi ;
ai - > array [ idx + ai - > offset ] = lev ;
return 0 ;
}
2013-04-16 23:59:05 -04:00
2013-04-16 23:59:40 -04:00
struct sum_info {
unsigned int dsum ;
unsigned int count ;
} ;
2013-04-16 23:57:16 -04:00
2013-04-16 23:59:40 -04:00
static int
sum_item ( OMTVALUE lev , u_int32_t UU ( idx ) , void * vsi ) {
LEAFENTRY le = lev ;
struct sum_info * si = vsi ;
si - > count + + ;
2013-04-16 23:59:55 -04:00
si - > dsum + = leafentry_disksize ( le ) ; // TODO 4050 delete this redundant call and use le_sizes[]
2013-04-16 23:59:40 -04:00
return 0 ;
}
2013-04-16 23:57:16 -04:00
2013-04-16 23:59:41 -04:00
// There must still be at least one child
2013-04-16 23:59:45 -04:00
// Requires that all messages in buffers above have been applied.
// Because all messages above have been applied, setting msn of all new basements
// to max msn of existing basements is correct. (There cannot be any messages in
// buffers above that still need to be applied.)
2013-04-17 00:00:13 -04:00
void
2013-04-16 23:59:44 -04:00
rebalance_brtnode_leaf ( BRTNODE node , unsigned int basementnodesize )
2013-04-16 23:59:40 -04:00
{
2013-04-16 23:59:41 -04:00
assert ( node - > height = = 0 ) ;
2013-04-16 23:59:43 -04:00
assert ( node - > dirty ) ;
2013-04-16 23:59:55 -04:00
uint32_t num_orig_basements = node - > n_children ;
// Count number of leaf entries in this leaf (num_le).
u_int32_t num_le = 0 ;
for ( uint32_t i = 0 ; i < num_orig_basements ; i + + ) {
invariant ( BLB_BUFFER ( node , i ) ) ;
2013-04-16 23:59:41 -04:00
num_le + = toku_omt_size ( BLB_BUFFER ( node , i ) ) ;
2013-04-16 23:59:40 -04:00
}
2013-04-16 23:59:55 -04:00
uint32_t num_alloc = num_le ? num_le : 1 ; // simplify logic below by always having at least one entry per array
// Create an array of OMTVALUE's that store all the pointers to all the data.
// Each element in leafpointers is a pointer to a leaf.
2013-04-16 23:59:56 -04:00
OMTVALUE * XMALLOC_N ( num_alloc , leafpointers ) ;
leafpointers [ 0 ] = NULL ;
2013-04-16 23:59:55 -04:00
// Capture pointers to old mempools' buffers (so they can be destroyed)
2013-04-16 23:59:56 -04:00
void * * XMALLOC_N ( num_orig_basements , old_mempool_bases ) ;
2013-04-16 23:59:55 -04:00
2013-04-16 23:59:40 -04:00
u_int32_t curr_le = 0 ;
2013-04-16 23:59:55 -04:00
for ( uint32_t i = 0 ; i < num_orig_basements ; i + + ) {
2013-04-16 23:59:41 -04:00
OMT curr_omt = BLB_BUFFER ( node , i ) ;
2013-04-16 23:59:40 -04:00
struct array_info ai ;
2013-04-16 23:59:55 -04:00
ai . offset = curr_le ; // index of first le in basement
ai . array = leafpointers ;
2013-04-16 23:59:40 -04:00
toku_omt_iterate ( curr_omt , array_item , & ai ) ;
curr_le + = toku_omt_size ( curr_omt ) ;
2013-04-16 23:59:55 -04:00
BASEMENTNODE bn = BLB ( node , i ) ;
old_mempool_bases [ i ] = toku_mempool_get_base ( & bn - > buffer_mempool ) ;
2013-04-16 23:57:46 -04:00
}
2013-04-16 23:59:44 -04:00
2013-04-16 23:59:55 -04:00
// Create an array that will store indexes of new pivots.
// Each element in new_pivots is the index of a pivot key.
// (Allocating num_le of them is overkill, but num_le is an upper bound.)
2013-04-16 23:59:56 -04:00
u_int32_t * XMALLOC_N ( num_alloc , new_pivots ) ;
new_pivots [ 0 ] = 0 ;
2013-04-16 23:59:55 -04:00
// Each element in le_sizes is the size of the leafentry pointed to by leafpointers.
2013-04-16 23:59:56 -04:00
size_t * XMALLOC_N ( num_alloc , le_sizes ) ;
le_sizes [ 0 ] = 0 ;
2013-04-16 23:59:55 -04:00
// Create an array that will store the size of each basement.
// This is the sum of the leaf sizes of all the leaves in that basement.
// We don't know how many basements there will be, so we use num_le as the upper bound.
2013-04-16 23:59:56 -04:00
size_t * XMALLOC_N ( num_alloc , bn_sizes ) ;
bn_sizes [ 0 ] = 0 ;
2013-04-16 23:59:55 -04:00
2013-04-16 23:59:56 -04:00
// TODO 4050: All these arrays should be combined into a single array of some bn_info struct (pivot, msize, num_les).
2013-04-16 23:59:55 -04:00
// Each entry is the number of leafentries in this basement. (Again, num_le is overkill upper bound.)
2013-04-16 23:59:56 -04:00
uint32_t * XMALLOC_N ( num_alloc , num_les_this_bn ) ;
num_les_this_bn [ 0 ] = 0 ;
2013-04-16 23:59:55 -04:00
// Figure out the new pivots.
// We need the index of each pivot, and for each basement we need
// the number of leaves and the sum of the sizes of the leaves (memory requirement for basement).
2013-04-16 23:59:40 -04:00
u_int32_t curr_pivot = 0 ;
u_int32_t num_le_in_curr_bn = 0 ;
u_int32_t bn_size_so_far = 0 ;
for ( u_int32_t i = 0 ; i < num_le ; i + + ) {
2013-04-16 23:59:55 -04:00
u_int32_t curr_le_size = leafentry_disksize ( leafpointers [ i ] ) ;
le_sizes [ i ] = curr_le_size ;
if ( ( bn_size_so_far + curr_le_size > basementnodesize ) & & ( num_le_in_curr_bn ! = 0 ) ) {
2013-04-16 23:59:40 -04:00
// cap off the current basement node to end with the element before i
new_pivots [ curr_pivot ] = i - 1 ;
curr_pivot + + ;
num_le_in_curr_bn = 0 ;
bn_size_so_far = 0 ;
}
num_le_in_curr_bn + + ;
2013-04-16 23:59:55 -04:00
num_les_this_bn [ curr_pivot ] = num_le_in_curr_bn ;
bn_size_so_far + = curr_le_size ;
bn_sizes [ curr_pivot ] = bn_size_so_far ;
2013-04-16 23:59:40 -04:00
}
2013-04-16 23:59:55 -04:00
// curr_pivot is now the total number of pivot keys in the leaf node
int num_pivots = curr_pivot ;
int num_children = num_pivots + 1 ;
2013-04-16 23:59:40 -04:00
// now we need to fill in the new basement nodes and pivots
// TODO: (Zardosht) this is an ugly thing right now
2013-04-16 23:59:52 -04:00
// Need to figure out how to properly deal with seqinsert.
// I am not happy with how this is being
2013-04-16 23:59:40 -04:00
// handled with basement nodes
2013-04-16 23:59:55 -04:00
u_int32_t tmp_seqinsert = BLB_SEQINSERT ( node , num_orig_basements - 1 ) ;
2013-04-16 23:59:40 -04:00
2013-04-16 23:59:55 -04:00
// choose the max msn applied to any basement as the max msn applied to all new basements
2013-04-16 23:59:43 -04:00
MSN max_msn = MIN_MSN ;
2013-04-16 23:59:55 -04:00
for ( uint32_t i = 0 ; i < num_orig_basements ; i + + ) {
2013-04-16 23:59:43 -04:00
MSN curr_msn = BLB_MAX_MSN_APPLIED ( node , i ) ;
max_msn = ( curr_msn . msn > max_msn . msn ) ? curr_msn : max_msn ;
}
2013-04-16 23:59:55 -04:00
// Now destroy the old basements, but do not destroy leaves
2013-04-16 23:59:40 -04:00
toku_destroy_brtnode_internals ( node ) ;
// now reallocate pieces and start filling them in
2013-04-16 23:59:55 -04:00
invariant ( num_children > 0 ) ;
2013-04-16 23:59:40 -04:00
node - > totalchildkeylens = 0 ;
2013-04-16 23:59:41 -04:00
2013-04-16 23:59:55 -04:00
XCALLOC_N ( num_pivots , node - > childkeys ) ; // allocate pointers to pivot structs
2013-04-16 23:59:40 -04:00
node - > n_children = num_children ;
2013-04-16 23:59:55 -04:00
XCALLOC_N ( num_children , node - > bp ) ; // allocate pointers to basements (bp)
2013-04-16 23:59:41 -04:00
for ( int i = 0 ; i < num_children ; i + + ) {
2013-04-16 23:59:55 -04:00
set_BLB ( node , i , toku_create_empty_bn ( ) ) ; // allocate empty basements and set bp pointers
2013-04-16 23:59:41 -04:00
}
2013-04-16 23:59:40 -04:00
// now we start to fill in the data
// first the pivots
2013-04-16 23:59:55 -04:00
for ( int i = 0 ; i < num_pivots ; i + + ) {
LEAFENTRY curr_le_pivot = leafpointers [ new_pivots [ i ] ] ;
2013-04-16 23:59:40 -04:00
node - > childkeys [ i ] = kv_pair_malloc (
le_key ( curr_le_pivot ) ,
le_keylen ( curr_le_pivot ) ,
0 ,
0
) ;
assert ( node - > childkeys [ i ] ) ;
node - > totalchildkeylens + = toku_brt_pivot_key_len ( node - > childkeys [ i ] ) ;
}
2013-04-16 23:59:55 -04:00
uint32_t baseindex_this_bn = 0 ;
2013-04-16 23:59:40 -04:00
// now the basement nodes
for ( int i = 0 ; i < num_children ; i + + ) {
2013-04-16 23:59:52 -04:00
// put back seqinsert
2013-04-16 23:59:41 -04:00
BLB_SEQINSERT ( node , i ) = tmp_seqinsert ;
2013-04-16 23:59:40 -04:00
// create start (inclusive) and end (exclusive) boundaries for data of basement node
2013-04-16 23:59:55 -04:00
u_int32_t curr_start = ( i = = 0 ) ? 0 : new_pivots [ i - 1 ] + 1 ; // index of first leaf in basement
u_int32_t curr_end = ( i = = num_pivots ) ? num_le : new_pivots [ i ] + 1 ; // index of first leaf in next basement
u_int32_t num_in_bn = curr_end - curr_start ; // number of leaves in this basement
// create indexes for new basement
invariant ( baseindex_this_bn = = curr_start ) ;
uint32_t num_les_to_copy = num_les_this_bn [ i ] ;
invariant ( num_les_to_copy = = num_in_bn ) ;
// construct mempool for this basement
size_t size_this_bn = bn_sizes [ i ] ;
BASEMENTNODE bn = BLB ( node , i ) ;
struct mempool * mp = & bn - > buffer_mempool ;
toku_mempool_construct ( mp , size_this_bn ) ;
2013-04-16 23:59:56 -04:00
OMTVALUE * XMALLOC_N ( num_in_bn , bn_array ) ;
2013-04-16 23:59:55 -04:00
for ( uint32_t le_index = 0 ; le_index < num_les_to_copy ; le_index + + ) {
uint32_t le_within_node = baseindex_this_bn + le_index ;
size_t le_size = le_sizes [ le_within_node ] ;
void * new_le = toku_mempool_malloc ( mp , le_size , 1 ) ; // point to new location
void * old_le = leafpointers [ le_within_node ] ;
memcpy ( new_le , old_le , le_size ) ; // put le data at new location
bn_array [ le_index ] = new_le ; // point to new location (in new mempool)
}
2013-04-16 23:59:40 -04:00
2013-04-16 23:59:41 -04:00
toku_omt_destroy ( & BLB_BUFFER ( node , i ) ) ;
2013-04-16 23:59:40 -04:00
int r = toku_omt_create_steal_sorted_array (
2013-04-16 23:59:44 -04:00
& BLB_BUFFER ( node , i ) ,
& bn_array ,
num_in_bn ,
2013-04-16 23:59:40 -04:00
num_in_bn
) ;
2013-04-16 23:59:55 -04:00
invariant_zero ( r ) ;
2013-04-16 23:59:56 -04:00
BLB_NBYTESINBUF ( node , i ) = size_this_bn ;
2008-03-18 12:08:56 +00:00
2013-04-16 23:59:41 -04:00
BP_STATE ( node , i ) = PT_AVAIL ;
2013-04-16 23:59:41 -04:00
BP_TOUCH_CLOCK ( node , i ) ;
2013-04-16 23:59:43 -04:00
BLB_MAX_MSN_APPLIED ( node , i ) = max_msn ;
2013-04-16 23:59:55 -04:00
baseindex_this_bn + = num_les_to_copy ; // set to index of next bn
2013-04-16 23:59:40 -04:00
}
2013-04-16 23:59:43 -04:00
node - > max_msn_applied_to_node_on_disk = max_msn ;
2013-04-16 23:59:44 -04:00
2013-04-16 23:59:55 -04:00
// destroy buffers of old mempools
for ( uint32_t i = 0 ; i < num_orig_basements ; i + + ) {
toku_free ( old_mempool_bases [ i ] ) ;
}
toku_free ( leafpointers ) ;
toku_free ( old_mempool_bases ) ;
2013-04-16 23:59:40 -04:00
toku_free ( new_pivots ) ;
2013-04-16 23:59:55 -04:00
toku_free ( le_sizes ) ;
toku_free ( bn_sizes ) ;
toku_free ( num_les_this_bn ) ;
} // end of rebalance_brtnode_leaf()
2013-04-16 23:59:01 -04:00
2013-04-16 23:59:48 -04:00
static void
serialize_and_compress_partition ( BRTNODE node , int childnum , SUB_BLOCK sb )
{
serialize_brtnode_partition ( node , childnum , sb ) ;
2013-04-17 00:00:14 -04:00
compress_brtnode_sub_block ( sb , node - > h - > compression_method ) ;
2013-04-16 23:59:48 -04:00
}
void
toku_create_compressed_partition_from_available (
2013-04-17 00:00:14 -04:00
BRTNODE node ,
int childnum ,
2013-04-16 23:59:48 -04:00
SUB_BLOCK sb
)
{
serialize_and_compress_partition ( node , childnum , sb ) ;
//
// now we have an sb that would be ready for being written out,
// but we are not writing it out, we are storing it in cache for a potentially
// long time, so we need to do some cleanup
//
// The buffer created above contains metadata in the first 8 bytes, and is overallocated
// It allocates a bound on the compressed length (evaluated before compression) as opposed
// to just the amount of the actual compressed data. So, we create a new buffer and copy
// just the compressed data.
//
u_int32_t compressed_size = toku_dtoh32 ( * ( u_int32_t * ) sb - > compressed_ptr ) ;
void * compressed_data = toku_xmalloc ( compressed_size ) ;
memcpy ( compressed_data , ( char * ) sb - > compressed_ptr + 8 , compressed_size ) ;
toku_free ( sb - > compressed_ptr ) ;
sb - > compressed_ptr = compressed_data ;
sb - > compressed_size = compressed_size ;
if ( sb - > uncompressed_ptr ) {
toku_free ( sb - > uncompressed_ptr ) ;
sb - > uncompressed_ptr = NULL ;
}
}
2013-04-16 23:59:46 -04:00
static void
serialize_and_compress ( BRTNODE node , int npartitions , struct sub_block sb [ ] ) {
2013-04-17 00:00:13 -04:00
for ( int i = 0 ; i < npartitions ; i + + ) {
2013-04-16 23:59:48 -04:00
serialize_and_compress_partition ( node , i , & sb [ i ] ) ;
2013-04-16 23:59:46 -04:00
}
}
2013-04-16 23:59:40 -04:00
// Writes out each child to a separate malloc'd buffer, then compresses
// all of them, and writes the uncompressed header, to bytes_to_write,
// which is malloc'd.
//
2013-04-16 23:59:05 -04:00
int
2013-04-16 23:59:40 -04:00
toku_serialize_brtnode_to_memory ( BRTNODE node ,
2013-04-17 00:00:13 -04:00
BRTNODE_DISK_DATA * ndd ,
2013-04-16 23:59:44 -04:00
unsigned int basementnodesize ,
2013-04-17 00:00:13 -04:00
BOOL do_rebalancing ,
2013-04-16 23:59:40 -04:00
/*out*/ size_t * n_bytes_to_write ,
/*out*/ char * * bytes_to_write )
{
2013-04-16 23:59:41 -04:00
toku_assert_entire_node_in_memory ( node ) ;
2013-04-16 23:59:01 -04:00
2013-04-17 00:00:13 -04:00
if ( do_rebalancing & & node - > height = = 0 ) {
rebalance_brtnode_leaf ( node , basementnodesize ) ;
2013-04-16 23:59:40 -04:00
}
const int npartitions = node - > n_children ;
2013-04-16 23:59:44 -04:00
2013-04-16 23:59:40 -04:00
// Each partition represents a compressed sub block
// For internal nodes, a sub block is a message buffer
// For leaf nodes, a sub block is a basement node
2013-04-16 23:59:50 -04:00
struct sub_block * XMALLOC_N ( npartitions , sb ) ;
2013-04-17 00:00:13 -04:00
* ndd = toku_xrealloc ( * ndd , npartitions * sizeof ( * * ndd ) ) ;
2013-04-16 23:59:40 -04:00
struct sub_block sb_node_info ;
for ( int i = 0 ; i < npartitions ; i + + ) {
2013-04-16 23:59:42 -04:00
sub_block_init ( & sb [ i ] ) ; ;
2013-04-16 23:59:40 -04:00
}
sub_block_init ( & sb_node_info ) ;
2013-04-16 23:59:05 -04:00
2013-04-16 23:59:40 -04:00
//
// First, let's serialize and compress the individual sub blocks
//
2013-04-16 23:59:44 -04:00
serialize_and_compress ( node , npartitions , sb ) ;
2013-04-16 23:59:40 -04:00
//
// Now lets create a sub-block that has the common node information,
// This does NOT include the header
//
2013-04-16 23:59:52 -04:00
serialize_brtnode_info ( node , & sb_node_info ) ;
2013-04-17 00:00:14 -04:00
compress_brtnode_sub_block ( & sb_node_info , node - > h - > compression_method ) ;
2013-04-16 23:59:40 -04:00
// now we have compressed each of our pieces into individual sub_blocks,
// we can put the header and all the subblocks into a single buffer
// and return it.
// The total size of the node is:
// size of header + disk size of the n+1 sub_block's created above
2013-04-16 23:59:52 -04:00
u_int32_t total_node_size = ( serialize_node_header_size ( node ) // uncomrpessed header
+ sb_node_info . compressed_size // compressed nodeinfo (without its checksum)
+ 4 ) ; // nodinefo's checksum
// store the BP_SIZESs
for ( int i = 0 ; i < node - > n_children ; i + + ) {
u_int32_t len = sb [ i ] . compressed_size + 4 ; // data and checksum
2013-04-17 00:00:13 -04:00
BP_SIZE ( * ndd , i ) = len ;
BP_START ( * ndd , i ) = total_node_size ;
2013-04-16 23:59:40 -04:00
total_node_size + = sb [ i ] . compressed_size + 4 ;
}
2013-04-16 23:59:41 -04:00
2013-04-16 23:59:42 -04:00
char * data = toku_xmalloc ( total_node_size ) ;
char * curr_ptr = data ;
2013-04-16 23:59:40 -04:00
// now create the final serialized node
// write the header
struct wbuf wb ;
wbuf_init ( & wb , curr_ptr , serialize_node_header_size ( node ) ) ;
2013-04-17 00:00:13 -04:00
serialize_node_header ( node , * ndd , & wb ) ;
2013-04-16 23:59:40 -04:00
assert ( wb . ndone = = wb . size ) ;
curr_ptr + = serialize_node_header_size ( node ) ;
2013-04-16 23:59:14 -04:00
2013-04-16 23:59:40 -04:00
// now write sb_node_info
memcpy ( curr_ptr , sb_node_info . compressed_ptr , sb_node_info . compressed_size ) ;
curr_ptr + = sb_node_info . compressed_size ;
// write the checksum
* ( u_int32_t * ) curr_ptr = toku_htod32 ( sb_node_info . xsum ) ;
curr_ptr + = sizeof ( sb_node_info . xsum ) ;
2013-04-16 23:59:14 -04:00
2013-04-16 23:59:40 -04:00
for ( int i = 0 ; i < npartitions ; i + + ) {
memcpy ( curr_ptr , sb [ i ] . compressed_ptr , sb [ i ] . compressed_size ) ;
curr_ptr + = sb [ i ] . compressed_size ;
// write the checksum
* ( u_int32_t * ) curr_ptr = toku_htod32 ( sb [ i ] . xsum ) ;
curr_ptr + = sizeof ( sb [ i ] . xsum ) ;
}
assert ( curr_ptr - data = = total_node_size ) ;
* bytes_to_write = data ;
* n_bytes_to_write = total_node_size ;
//
// now that node has been serialized, go through sub_block's and free
// memory
//
toku_free ( sb_node_info . compressed_ptr ) ;
toku_free ( sb_node_info . uncompressed_ptr ) ;
for ( int i = 0 ; i < npartitions ; i + + ) {
toku_free ( sb [ i ] . compressed_ptr ) ;
toku_free ( sb [ i ] . uncompressed_ptr ) ;
2013-04-16 23:59:14 -04:00
}
2013-04-16 23:59:44 -04:00
2013-04-16 23:59:46 -04:00
toku_free ( sb ) ;
2013-04-16 23:59:40 -04:00
return 0 ;
2013-04-16 23:59:01 -04:00
}
2013-04-16 23:59:40 -04:00
int
2013-04-17 00:00:13 -04:00
toku_serialize_brtnode_to ( int fd , BLOCKNUM blocknum , BRTNODE node , BRTNODE_DISK_DATA * ndd , BOOL do_rebalancing , struct brt_header * h , int UU ( n_workitems ) , int UU ( n_threads ) , BOOL for_checkpoint ) {
2013-04-16 23:59:01 -04:00
size_t n_to_write ;
2013-04-16 23:59:40 -04:00
char * compressed_buf = NULL ;
2013-04-16 23:59:01 -04:00
{
2013-04-17 00:00:13 -04:00
int r = toku_serialize_brtnode_to_memory ( node , ndd , h - > basementnodesize , do_rebalancing ,
2013-04-16 23:59:44 -04:00
& n_to_write , & compressed_buf ) ;
2013-04-16 23:59:01 -04:00
if ( r ! = 0 ) return r ;
}
2007-11-14 17:58:38 +00:00
//write_now: printf("%s:%d Writing %d bytes\n", __FILE__, __LINE__, w.ndone);
2007-07-13 19:37:47 +00:00
{
2008-06-15 17:09:14 +00:00
// If the node has never been written, then write the whole buffer, including the zeros
2013-04-16 23:59:23 -04:00
invariant ( blocknum . b > = 0 ) ;
2013-04-16 23:57:18 -04:00
//printf("%s:%d h=%p\n", __FILE__, __LINE__, h);
//printf("%s:%d translated_blocknum_limit=%lu blocknum.b=%lu\n", __FILE__, __LINE__, h->translated_blocknum_limit, blocknum.b);
//printf("%s:%d allocator=%p\n", __FILE__, __LINE__, h->block_allocator);
//printf("%s:%d bt=%p\n", __FILE__, __LINE__, h->block_translation);
2013-04-16 23:57:47 -04:00
DISKOFF offset ;
2013-04-16 23:57:44 -04:00
2013-04-16 23:57:47 -04:00
toku_blocknum_realloc_on_disk ( h - > blocktable , blocknum , n_to_write , & offset ,
2013-04-16 23:59:05 -04:00
h , for_checkpoint ) ; //dirties h
2013-04-16 23:57:47 -04:00
lock_for_pwrite ( ) ;
2013-04-16 23:58:55 -04:00
toku_full_pwrite_extend ( fd , compressed_buf , n_to_write , offset ) ;
2013-04-16 23:57:18 -04:00
unlock_for_pwrite ( ) ;
2007-07-13 19:37:47 +00:00
}
//printf("%s:%d wrote %d bytes for %lld size=%lld\n", __FILE__, __LINE__, w.ndone, off, size);
2013-04-16 23:57:16 -04:00
toku_free ( compressed_buf ) ;
2013-04-16 23:57:59 -04:00
node - > dirty = 0 ; // See #1957. Must set the node to be clean after serializing it so that it doesn't get written again on the next checkpoint or eviction.
2013-04-16 23:58:55 -04:00
return 0 ;
2007-07-13 19:37:47 +00:00
}
2013-04-16 23:59:05 -04:00
static void
2013-04-16 23:59:48 -04:00
deserialize_child_buffer ( NONLEAF_CHILDINFO bnc , struct rbuf * rbuf ,
2013-04-16 23:59:54 -04:00
DESCRIPTOR desc , brt_compare_func cmp ) {
2013-04-16 23:59:47 -04:00
int r ;
2013-04-16 23:59:05 -04:00
int n_bytes_in_buffer = 0 ;
int n_in_this_buffer = rbuf_int ( rbuf ) ;
2013-04-16 23:59:48 -04:00
void * * fresh_offsets , * * stale_offsets ;
2013-04-16 23:59:47 -04:00
void * * broadcast_offsets ;
2013-04-16 23:59:48 -04:00
int nfresh = 0 , nstale = 0 ;
2013-04-16 23:59:47 -04:00
int nbroadcast_offsets = 0 ;
if ( cmp ) {
2013-04-16 23:59:50 -04:00
XMALLOC_N ( n_in_this_buffer , stale_offsets ) ;
XMALLOC_N ( n_in_this_buffer , fresh_offsets ) ;
XMALLOC_N ( n_in_this_buffer , broadcast_offsets ) ;
2013-04-16 23:59:47 -04:00
}
2013-04-16 23:59:05 -04:00
for ( int i = 0 ; i < n_in_this_buffer ; i + + ) {
bytevec key ; ITEMLEN keylen ;
bytevec val ; ITEMLEN vallen ;
2013-04-16 23:59:47 -04:00
// this is weird but it's necessary to pass icc and gcc together
unsigned char ctype = rbuf_char ( rbuf ) ;
enum brt_msg_type type = ( enum brt_msg_type ) ctype ;
2013-04-16 23:59:48 -04:00
bool is_fresh = rbuf_char ( rbuf ) ;
2013-04-16 23:59:40 -04:00
MSN msn = rbuf_msn ( rbuf ) ;
2013-04-16 23:59:05 -04:00
XIDS xids ;
xids_create_from_buffer ( rbuf , & xids ) ;
rbuf_bytes ( rbuf , & key , & keylen ) ; /* Returns a pointer into the rbuf. */
rbuf_bytes ( rbuf , & val , & vallen ) ;
//printf("Found %s,%s\n", (char*)key, (char*)val);
2013-04-16 23:59:47 -04:00
long * dest ;
if ( cmp ) {
if ( brt_msg_type_applies_once ( type ) ) {
2013-04-16 23:59:48 -04:00
if ( is_fresh ) {
dest = ( long * ) & fresh_offsets [ nfresh ] ;
nfresh + + ;
} else {
dest = ( long * ) & stale_offsets [ nstale ] ;
nstale + + ;
}
2013-04-16 23:59:47 -04:00
} else if ( brt_msg_type_applies_all ( type ) | | brt_msg_type_does_nothing ( type ) ) {
dest = ( long * ) & broadcast_offsets [ nbroadcast_offsets ] ;
nbroadcast_offsets + + ;
} else {
assert ( FALSE ) ;
}
} else {
dest = NULL ;
}
2013-04-16 23:59:48 -04:00
r = toku_fifo_enq ( bnc - > buffer , key , keylen , val , vallen , type , msn , xids , is_fresh , dest ) ; /* Copies the data into the fifo */
2013-04-16 23:59:23 -04:00
lazy_assert_zero ( r ) ;
2013-04-16 23:59:05 -04:00
n_bytes_in_buffer + = keylen + vallen + KEY_VALUE_OVERHEAD + BRT_CMD_OVERHEAD + xids_get_serialize_size ( xids ) ;
//printf("Inserted\n");
xids_destroy ( & xids ) ;
}
2013-04-16 23:59:23 -04:00
invariant ( rbuf - > ndone = = rbuf - > size ) ;
2013-04-16 23:59:05 -04:00
2013-04-16 23:59:47 -04:00
if ( cmp ) {
2013-04-16 23:59:54 -04:00
struct toku_fifo_entry_key_msn_cmp_extra extra = { . desc = desc , . cmp = cmp , . fifo = bnc - > buffer } ;
2013-04-16 23:59:48 -04:00
r = mergesort_r ( fresh_offsets , nfresh , sizeof fresh_offsets [ 0 ] , & extra , toku_fifo_entry_key_msn_cmp ) ;
2013-04-16 23:59:47 -04:00
assert_zero ( r ) ;
2013-04-16 23:59:48 -04:00
toku_omt_destroy ( & bnc - > fresh_message_tree ) ;
2013-04-16 23:59:48 -04:00
r = toku_omt_create_steal_sorted_array ( & bnc - > fresh_message_tree , & fresh_offsets , nfresh , n_in_this_buffer ) ;
assert_zero ( r ) ;
r = mergesort_r ( stale_offsets , nstale , sizeof stale_offsets [ 0 ] , & extra , toku_fifo_entry_key_msn_cmp ) ;
assert_zero ( r ) ;
toku_omt_destroy ( & bnc - > stale_message_tree ) ;
r = toku_omt_create_steal_sorted_array ( & bnc - > stale_message_tree , & stale_offsets , nstale , n_in_this_buffer ) ;
2013-04-16 23:59:47 -04:00
assert_zero ( r ) ;
2013-04-16 23:59:48 -04:00
toku_omt_destroy ( & bnc - > broadcast_list ) ;
r = toku_omt_create_steal_sorted_array ( & bnc - > broadcast_list , & broadcast_offsets , nbroadcast_offsets , n_in_this_buffer ) ;
2013-04-16 23:59:47 -04:00
assert_zero ( r ) ;
}
2013-04-16 23:59:48 -04:00
bnc - > n_bytes_in_buffer = n_bytes_in_buffer ;
2013-04-16 23:59:05 -04:00
}
2013-04-16 23:59:34 -04:00
// dump a buffer to stderr
// no locking around this for now
static void
dump_bad_block ( unsigned char * vp , u_int64_t size ) {
const u_int64_t linesize = 64 ;
u_int64_t n = size / linesize ;
for ( u_int64_t i = 0 ; i < n ; i + + ) {
fprintf ( stderr , " %p: " , vp ) ;
for ( u_int64_t j = 0 ; j < linesize ; j + + ) {
unsigned char c = vp [ j ] ;
fprintf ( stderr , " %2.2X " , c ) ;
}
fprintf ( stderr , " \n " ) ;
vp + = linesize ;
}
size = size % linesize ;
for ( u_int64_t i = 0 ; i < size ; i + + ) {
2013-04-16 23:59:40 -04:00
if ( ( i % linesize ) = = 0 )
fprintf ( stderr , " %p: " , vp + i ) ;
fprintf ( stderr , " %2.2X " , vp [ i ] ) ;
if ( ( ( i + 1 ) % linesize ) = = 0 )
fprintf ( stderr , " \n " ) ;
2013-04-16 23:59:34 -04:00
}
fprintf ( stderr , " \n " ) ;
}
2013-04-16 23:57:46 -04:00
2013-04-16 23:59:40 -04:00
////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////
2013-04-16 23:57:46 -04:00
2013-04-16 23:59:42 -04:00
BASEMENTNODE toku_create_empty_bn ( void ) {
BASEMENTNODE bn = toku_create_empty_bn_no_buffer ( ) ;
2013-04-16 23:59:41 -04:00
int r ;
r = toku_omt_create ( & bn - > buffer ) ;
assert_zero ( r ) ;
2013-04-16 23:59:42 -04:00
return bn ;
}
2013-04-17 00:00:13 -04:00
struct mp_pair {
void * orig_base ;
void * new_base ;
OMT omt ;
} ;
static int fix_mp_offset ( OMTVALUE v , u_int32_t i , void * extra ) {
struct mp_pair * p = extra ;
char * old_value = v ;
char * new_value = old_value - ( char * ) p - > orig_base + ( char * ) p - > new_base ;
toku_omt_set_at ( p - > omt , ( OMTVALUE ) new_value , i ) ;
return 0 ;
}
BASEMENTNODE toku_clone_bn ( BASEMENTNODE orig_bn ) {
BASEMENTNODE bn = toku_create_empty_bn_no_buffer ( ) ;
bn - > max_msn_applied = orig_bn - > max_msn_applied ;
bn - > n_bytes_in_buffer = orig_bn - > n_bytes_in_buffer ;
bn - > seqinsert = orig_bn - > seqinsert ;
bn - > stale_ancestor_messages_applied = orig_bn - > stale_ancestor_messages_applied ;
bn - > stat64_delta = orig_bn - > stat64_delta ;
toku_mempool_clone ( & orig_bn - > buffer_mempool , & bn - > buffer_mempool ) ;
toku_omt_clone_noptr ( & bn - > buffer , orig_bn - > buffer ) ;
struct mp_pair p ;
p . orig_base = toku_mempool_get_base ( & orig_bn - > buffer_mempool ) ;
p . new_base = toku_mempool_get_base ( & bn - > buffer_mempool ) ;
p . omt = bn - > buffer ;
toku_omt_iterate (
bn - > buffer ,
fix_mp_offset ,
& p
) ;
return bn ;
}
2013-04-16 23:59:42 -04:00
BASEMENTNODE toku_create_empty_bn_no_buffer ( void ) {
BASEMENTNODE XMALLOC ( bn ) ;
2013-04-16 23:59:43 -04:00
bn - > max_msn_applied . msn = 0 ;
2013-04-16 23:59:42 -04:00
bn - > buffer = NULL ;
2013-04-16 23:59:41 -04:00
bn - > n_bytes_in_buffer = 0 ;
bn - > seqinsert = 0 ;
2013-04-16 23:59:48 -04:00
bn - > stale_ancestor_messages_applied = false ;
2013-04-16 23:59:55 -04:00
toku_mempool_zero ( & bn - > buffer_mempool ) ;
2013-04-17 00:00:00 -04:00
bn - > stat64_delta = ZEROSTATS ;
2013-04-16 23:59:42 -04:00
return bn ;
}
NONLEAF_CHILDINFO toku_create_empty_nl ( void ) {
NONLEAF_CHILDINFO XMALLOC ( cn ) ;
cn - > n_bytes_in_buffer = 0 ;
2013-04-16 23:59:48 -04:00
int r = toku_fifo_create ( & cn - > buffer ) ; assert_zero ( r ) ;
r = toku_omt_create ( & cn - > fresh_message_tree ) ; assert_zero ( r ) ;
r = toku_omt_create ( & cn - > stale_message_tree ) ; assert_zero ( r ) ;
r = toku_omt_create ( & cn - > broadcast_list ) ; assert_zero ( r ) ;
2013-04-16 23:59:42 -04:00
return cn ;
}
2013-04-17 00:00:13 -04:00
// does NOT create OMTs, just the FIFO
NONLEAF_CHILDINFO toku_clone_nl ( NONLEAF_CHILDINFO orig_childinfo ) {
NONLEAF_CHILDINFO XMALLOC ( cn ) ;
cn - > n_bytes_in_buffer = orig_childinfo - > n_bytes_in_buffer ;
cn - > fresh_message_tree = NULL ;
cn - > stale_message_tree = NULL ;
cn - > broadcast_list = NULL ;
toku_fifo_clone ( orig_childinfo - > buffer , & cn - > buffer ) ;
return cn ;
}
2013-04-16 23:59:42 -04:00
void destroy_basement_node ( BASEMENTNODE bn )
{
// The buffer may have been freed already, in some cases.
if ( bn - > buffer ) {
toku_omt_destroy ( & bn - > buffer ) ;
}
toku_free ( bn ) ;
}
void destroy_nonleaf_childinfo ( NONLEAF_CHILDINFO nl )
{
toku_fifo_free ( & nl - > buffer ) ;
2013-04-17 00:00:13 -04:00
if ( nl - > fresh_message_tree ) toku_omt_destroy ( & nl - > fresh_message_tree ) ;
if ( nl - > stale_message_tree ) toku_omt_destroy ( & nl - > stale_message_tree ) ;
if ( nl - > broadcast_list ) toku_omt_destroy ( & nl - > broadcast_list ) ;
2013-04-16 23:59:42 -04:00
toku_free ( nl ) ;
2013-04-16 23:57:58 -04:00
}
2013-04-16 23:59:41 -04:00
//
2013-04-16 23:57:58 -04:00
static int
2013-04-16 23:59:40 -04:00
read_block_from_fd_into_rbuf (
int fd ,
BLOCKNUM blocknum ,
struct brt_header * h ,
struct rbuf * rb
)
{
if ( h - > panic ) {
toku_trace ( " panic set, will not read block from fd into buf " ) ;
return h - > panic ;
2007-07-13 19:37:47 +00:00
}
2013-04-16 23:59:05 -04:00
toku_trace ( " deserial start nopanic " ) ;
2013-04-16 23:59:40 -04:00
2013-04-16 23:57:58 -04:00
// get the file offset and block size for the block
DISKOFF offset , size ;
toku_translate_blocknum_to_offset_size ( h - > blocktable , blocknum , & offset , & size ) ;
2013-04-16 23:58:01 -04:00
u_int8_t * XMALLOC_N ( size , raw_block ) ;
2013-04-16 23:59:40 -04:00
rbuf_init ( rb , raw_block , size ) ;
2007-11-14 17:58:38 +00:00
{
2013-04-16 23:59:40 -04:00
// read the block
2013-04-16 23:59:34 -04:00
ssize_t rlen = toku_os_pread ( fd , raw_block , size , offset ) ;
2013-04-16 23:59:23 -04:00
lazy_assert ( ( DISKOFF ) rlen = = size ) ;
2007-11-14 17:58:38 +00:00
}
2013-04-16 23:59:40 -04:00
return 0 ;
}
2013-04-16 23:59:53 -04:00
static const int read_header_heuristic_max = 32 * 1024 ;
# define MIN(a,b) (((a)>(b)) ? (b) : (a))
static void read_brtnode_header_from_fd_into_rbuf_if_small_enough ( int fd , BLOCKNUM blocknum , struct brt_header * h , struct rbuf * rb )
// Effect: If the header part of the node is small enough, then read it into the rbuf. The rbuf will be allocated to be big enough in any case.
{
assert ( ! h - > panic ) ;
DISKOFF offset , size ;
toku_translate_blocknum_to_offset_size ( h - > blocktable , blocknum , & offset , & size ) ;
DISKOFF read_size = MIN ( read_header_heuristic_max , size ) ;
u_int8_t * XMALLOC_N ( size , raw_block ) ;
rbuf_init ( rb , raw_block , read_size ) ;
{
// read the block
ssize_t rlen = toku_os_pread ( fd , raw_block , read_size , offset ) ;
assert ( rlen > = 0 ) ;
rbuf_init ( rb , raw_block , rlen ) ;
}
}
2013-04-16 23:59:41 -04:00
//
// read the compressed partition into the sub_block,
// validate the checksum of the compressed data
//
2013-04-16 23:59:40 -04:00
static void
read_compressed_sub_block ( struct rbuf * rb , struct sub_block * sb )
{
sb - > compressed_size = rbuf_int ( rb ) ;
sb - > uncompressed_size = rbuf_int ( rb ) ;
bytevec * cp = ( bytevec * ) & sb - > compressed_ptr ;
rbuf_literal_bytes ( rb , cp , sb - > compressed_size ) ;
sb - > xsum = rbuf_int ( rb ) ;
// let's check the checksum
u_int32_t actual_xsum = x1764_memory ( ( char * ) sb - > compressed_ptr - 8 , 8 + sb - > compressed_size ) ;
invariant ( sb - > xsum = = actual_xsum ) ;
2013-04-16 23:59:41 -04:00
}
static void
read_and_decompress_sub_block ( struct rbuf * rb , struct sub_block * sb )
{
read_compressed_sub_block ( rb , sb ) ;
2013-04-16 23:59:40 -04:00
sb - > uncompressed_ptr = toku_xmalloc ( sb - > uncompressed_size ) ;
assert ( sb - > uncompressed_ptr ) ;
2013-04-16 23:59:41 -04:00
2013-04-16 23:59:40 -04:00
toku_decompress (
sb - > uncompressed_ptr ,
sb - > uncompressed_size ,
sb - > compressed_ptr ,
sb - > compressed_size
) ;
}
// verify the checksum
static void
verify_brtnode_sub_block ( struct sub_block * sb )
{
// first verify the checksum
u_int32_t data_size = sb - > uncompressed_size - 4 ; // checksum is 4 bytes at end
u_int32_t stored_xsum = toku_dtoh32 ( * ( ( u_int32_t * ) ( ( char * ) sb - > uncompressed_ptr + data_size ) ) ) ;
u_int32_t actual_xsum = x1764_memory ( sb - > uncompressed_ptr , data_size ) ;
if ( stored_xsum ! = actual_xsum ) {
dump_bad_block ( sb - > uncompressed_ptr , sb - > uncompressed_size ) ;
assert ( FALSE ) ;
}
}
// This function deserializes the data stored by serialize_brtnode_info
static void
2013-04-16 23:59:41 -04:00
deserialize_brtnode_info (
struct sub_block * sb ,
BRTNODE node
)
2013-04-16 23:59:40 -04:00
{
// sb_node_info->uncompressed_ptr stores the serialized node information
// this function puts that information into node
// first verify the checksum
verify_brtnode_sub_block ( sb ) ;
u_int32_t data_size = sb - > uncompressed_size - 4 ; // checksum is 4 bytes at end
// now with the data verified, we can read the information into the node
struct rbuf rb = { . buf = NULL , . size = 0 , . ndone = 0 } ;
rbuf_init ( & rb , sb - > uncompressed_ptr , data_size ) ;
2013-04-16 23:59:41 -04:00
node - > max_msn_applied_to_node_on_disk = rbuf_msn ( & rb ) ;
2013-04-16 23:59:40 -04:00
node - > nodesize = rbuf_int ( & rb ) ;
node - > flags = rbuf_int ( & rb ) ;
node - > height = rbuf_int ( & rb ) ;
2013-04-16 23:59:52 -04:00
node - > optimized_for_upgrade = rbuf_int ( & rb ) ;
2013-04-16 23:59:40 -04:00
// now create the basement nodes or childinfos, depending on whether this is a
// leaf node or internal node
// now the subtree_estimates
2013-04-16 23:59:52 -04:00
// n_children is now in the header, nd the allocatio of the node->bp is in deserialize_brtnode_from_rbuf.
assert ( node - > bp ! = NULL ) ; //
2013-04-16 23:59:40 -04:00
// now the pivots
node - > totalchildkeylens = 0 ;
if ( node - > n_children > 1 ) {
XMALLOC_N ( node - > n_children - 1 , node - > childkeys ) ;
assert ( node - > childkeys ) ;
for ( int i = 0 ; i < node - > n_children - 1 ; i + + ) {
bytevec childkeyptr ;
unsigned int cklen ;
rbuf_bytes ( & rb , & childkeyptr , & cklen ) ;
node - > childkeys [ i ] = kv_pair_malloc ( ( void * ) childkeyptr , cklen , 0 , 0 ) ;
node - > totalchildkeylens + = toku_brt_pivot_key_len ( node - > childkeys [ i ] ) ;
2013-04-16 23:57:58 -04:00
}
2013-04-16 23:59:40 -04:00
}
else {
node - > childkeys = NULL ;
node - > totalchildkeylens = 0 ;
}
// if this is an internal node, unpack the block nums, and fill in necessary fields
// of childinfo
if ( node - > height > 0 ) {
for ( int i = 0 ; i < node - > n_children ; i + + ) {
2013-04-16 23:59:41 -04:00
BP_BLOCKNUM ( node , i ) = rbuf_blocknum ( & rb ) ;
2013-04-16 23:59:42 -04:00
BP_WORKDONE ( node , i ) = 0 ;
2013-04-16 23:59:40 -04:00
}
}
2013-04-16 23:59:52 -04:00
2013-04-16 23:59:40 -04:00
// make sure that all the data was read
if ( data_size ! = rb . ndone ) {
dump_bad_block ( rb . buf , rb . size ) ;
assert ( FALSE ) ;
}
}
2013-04-16 23:59:41 -04:00
static void
setup_available_brtnode_partition ( BRTNODE node , int i ) {
if ( node - > height = = 0 ) {
2013-04-16 23:59:42 -04:00
set_BLB ( node , i , toku_create_empty_bn ( ) ) ;
2013-04-16 23:59:43 -04:00
BLB_MAX_MSN_APPLIED ( node , i ) = node - > max_msn_applied_to_node_on_disk ;
2013-04-16 23:59:41 -04:00
}
else {
2013-04-16 23:59:42 -04:00
set_BNC ( node , i , toku_create_empty_nl ( ) ) ;
2013-04-16 23:59:41 -04:00
}
}
2013-04-16 23:59:53 -04:00
static void setup_brtnode_partitions ( BRTNODE node , struct brtnode_fetch_extra * bfe , bool data_in_memory )
// Effect: Used when reading a brtnode into main memory, this sets up the partitions.
// We set bfe->child_to_read as well as the BP_STATE and the data pointers (e.g., with set_BSB or set_BNULL or other set_ operations).
// Arguments: Node: the node to set up.
// bfe: Describes the key range needed.
// data_in_memory: true if we have all the data (in which case we set the BP_STATE to be either PT_AVAIL or PT_COMPRESSED depending on the bfe.
// false if we don't have the partitions in main memory (in which case we set the state to PT_ON_DISK.
{
2013-04-16 23:59:46 -04:00
if ( bfe - > type = = brtnode_fetch_subset & & bfe - > search ! = NULL ) {
2013-04-16 23:59:41 -04:00
// we do not take into account prefetching yet
// as of now, if we need a subset, the only thing
// we can possibly require is a single basement node
// we find out what basement node the query cares about
// and check if it is available
assert ( bfe - > search ) ;
bfe - > child_to_read = toku_brt_search_which_child (
2013-04-17 00:00:15 -04:00
& bfe - > h - > cmp_descriptor ,
2013-04-16 23:59:54 -04:00
bfe - > h - > compare_fun ,
2013-04-16 23:59:41 -04:00
node ,
bfe - > search
) ;
}
2013-04-16 23:59:46 -04:00
int lc , rc ;
if ( bfe - > type = = brtnode_fetch_subset | | bfe - > type = = brtnode_fetch_prefetch ) {
lc = toku_bfe_leftmost_child_wanted ( bfe , node ) ;
rc = toku_bfe_rightmost_child_wanted ( bfe , node ) ;
} else {
lc = - 1 ;
rc = - 1 ;
}
2013-04-16 23:59:41 -04:00
//
// setup memory needed for the node
//
2013-04-16 23:59:46 -04:00
//printf("node height %d, blocknum %"PRId64", type %d lc %d rc %d\n", node->height, node->thisnodename.b, bfe->type, lc, rc);
2013-04-16 23:59:41 -04:00
for ( int i = 0 ; i < node - > n_children ; i + + ) {
2013-04-16 23:59:41 -04:00
BP_INIT_UNTOUCHED_CLOCK ( node , i ) ;
2013-04-16 23:59:59 -04:00
if ( data_in_memory ) {
BP_STATE ( node , i ) = ( ( toku_bfe_wants_child_available ( bfe , i ) | | ( lc < = i & & i < = rc ) )
? PT_AVAIL : PT_COMPRESSED ) ;
} else {
BP_STATE ( node , i ) = PT_ON_DISK ;
}
2013-04-16 23:59:43 -04:00
BP_WORKDONE ( node , i ) = 0 ;
2013-04-16 23:59:59 -04:00
switch ( BP_STATE ( node , i ) ) {
case PT_AVAIL :
2013-04-16 23:59:41 -04:00
setup_available_brtnode_partition ( node , i ) ;
2013-04-16 23:59:41 -04:00
BP_TOUCH_CLOCK ( node , i ) ;
2013-04-16 23:59:59 -04:00
continue ;
case PT_COMPRESSED :
2013-04-16 23:59:42 -04:00
set_BSB ( node , i , sub_block_creat ( ) ) ;
2013-04-16 23:59:59 -04:00
continue ;
case PT_ON_DISK :
set_BNULL ( node , i ) ;
continue ;
case PT_INVALID :
break ;
}
assert ( FALSE ) ;
2013-04-16 23:59:41 -04:00
}
}
2013-04-16 23:59:55 -04:00
/* deserialize the partition from the sub-block's uncompressed buffer
* and destroy the uncompressed buffer
*/
2013-04-16 23:59:47 -04:00
static void
2013-04-16 23:59:40 -04:00
deserialize_brtnode_partition (
2013-04-16 23:59:47 -04:00
struct sub_block * sb ,
BRTNODE node ,
2013-04-16 23:59:55 -04:00
int childnum , // which partition to deserialize
2013-04-16 23:59:54 -04:00
DESCRIPTOR desc ,
2013-04-16 23:59:47 -04:00
brt_compare_func cmp
2013-04-16 23:59:40 -04:00
)
{
verify_brtnode_sub_block ( sb ) ;
u_int32_t data_size = sb - > uncompressed_size - 4 ; // checksum is 4 bytes at end
2013-04-16 23:59:47 -04:00
2013-04-16 23:59:40 -04:00
// now with the data verified, we can read the information into the node
struct rbuf rb = { . buf = NULL , . size = 0 , . ndone = 0 } ;
rbuf_init ( & rb , sb - > uncompressed_ptr , data_size ) ;
2013-04-16 23:59:55 -04:00
unsigned char ch = rbuf_char ( & rb ) ;
2013-04-16 23:59:47 -04:00
2013-04-16 23:59:40 -04:00
if ( node - > height > 0 ) {
assert ( ch = = BRTNODE_PARTITION_FIFO_MSG ) ;
2013-04-16 23:59:55 -04:00
deserialize_child_buffer ( BNC ( node , childnum ) , & rb , desc , cmp ) ;
BP_WORKDONE ( node , childnum ) = 0 ;
2013-04-16 23:59:40 -04:00
}
else {
assert ( ch = = BRTNODE_PARTITION_OMT_LEAVES ) ;
2013-04-16 23:59:55 -04:00
BLB_SEQINSERT ( node , childnum ) = 0 ;
uint32_t num_entries = rbuf_int ( & rb ) ;
uint32_t start_of_data = rb . ndone ; // index of first byte of first leafentry
data_size - = start_of_data ; // remaining bytes of leafentry data
// TODO 3988 Count empty basements (data_size == 0)
if ( data_size = = 0 ) {
// printf("#### Deserialize empty basement, childnum = %d\n", childnum);
invariant_zero ( num_entries ) ;
}
OMTVALUE * XMALLOC_N ( num_entries , array ) ; // create array of pointers to leafentries
BASEMENTNODE bn = BLB ( node , childnum ) ;
toku_mempool_copy_construct ( & bn - > buffer_mempool , & rb . buf [ rb . ndone ] , data_size ) ;
uint8_t * le_base = toku_mempool_get_base ( & bn - > buffer_mempool ) ; // point to first le in mempool
for ( u_int32_t i = 0 ; i < num_entries ; i + + ) { // now set up the pointers in the omt
LEAFENTRY le = ( LEAFENTRY ) & le_base [ rb . ndone - start_of_data ] ; // point to durable mempool, not to transient rbuf
2013-04-16 23:59:40 -04:00
u_int32_t disksize = leafentry_disksize ( le ) ;
rb . ndone + = disksize ;
invariant ( rb . ndone < = rb . size ) ;
2013-04-16 23:59:55 -04:00
array [ i ] = ( OMTVALUE ) le ;
2013-04-16 23:57:58 -04:00
}
2013-04-16 23:59:40 -04:00
u_int32_t end_of_data = rb . ndone ;
2013-04-16 23:59:55 -04:00
BLB_NBYTESINBUF ( node , childnum ) + = end_of_data - start_of_data ;
// destroy old omt (bn.buffer) that was created by toku_create_empty_bn(), so we can create a new one
toku_omt_destroy ( & BLB_BUFFER ( node , childnum ) ) ;
int r = toku_omt_create_steal_sorted_array ( & BLB_BUFFER ( node , childnum ) , & array , num_entries , num_entries ) ;
invariant_zero ( r ) ;
2008-05-29 03:12:59 +00:00
}
2013-04-16 23:59:40 -04:00
assert ( rb . ndone = = rb . size ) ;
2013-04-16 23:59:55 -04:00
toku_free ( sb - > uncompressed_ptr ) ;
2013-04-16 23:59:40 -04:00
}
2013-04-16 23:57:58 -04:00
2013-04-16 23:59:46 -04:00
static void
2013-04-16 23:59:54 -04:00
decompress_and_deserialize_worker ( struct rbuf curr_rbuf , struct sub_block curr_sb , BRTNODE node , int child , DESCRIPTOR desc , brt_compare_func cmp )
2013-04-16 23:59:46 -04:00
{
read_and_decompress_sub_block ( & curr_rbuf , & curr_sb ) ;
// at this point, sb->uncompressed_ptr stores the serialized node partition
2013-04-16 23:59:54 -04:00
deserialize_brtnode_partition ( & curr_sb , node , child , desc , cmp ) ;
2013-04-16 23:59:46 -04:00
}
static void
check_and_copy_compressed_sub_block_worker ( struct rbuf curr_rbuf , struct sub_block curr_sb , BRTNODE node , int child )
{
read_compressed_sub_block ( & curr_rbuf , & curr_sb ) ;
SUB_BLOCK bp_sb = BSB ( node , child ) ;
bp_sb - > compressed_size = curr_sb . compressed_size ;
bp_sb - > uncompressed_size = curr_sb . uncompressed_size ;
bp_sb - > compressed_ptr = toku_xmalloc ( bp_sb - > compressed_size ) ;
memcpy ( bp_sb - > compressed_ptr , curr_sb . compressed_ptr , bp_sb - > compressed_size ) ;
}
2013-04-16 23:57:58 -04:00
2013-04-16 23:59:53 -04:00
static int deserialize_brtnode_header_from_rbuf_if_small_enough ( BRTNODE * brtnode ,
2013-04-17 00:00:13 -04:00
BRTNODE_DISK_DATA * ndd ,
2013-04-16 23:59:59 -04:00
BLOCKNUM blocknum ,
u_int32_t fullhash ,
struct brtnode_fetch_extra * bfe ,
struct rbuf * rb ,
int fd )
2013-04-16 23:59:53 -04:00
// If we have enough information in the rbuf to construct a header, then do so.
// Also fetch in the basement node if needed.
// Return 0 if it worked. If something goes wrong (including that we are looking at some old data format that doesn't have partitions) then return nonzero.
{
int r ;
BRTNODE node = toku_xmalloc ( sizeof ( * node ) ) ;
2013-04-16 23:59:59 -04:00
2013-04-16 23:59:53 -04:00
// fill in values that are known and not stored in rb
node - > fullhash = fullhash ;
node - > thisnodename = blocknum ;
node - > dirty = 0 ;
node - > bp = NULL ; // fill this in so we can free without a leak.
if ( rb - > size < 24 ) {
r = EINVAL ;
goto cleanup ;
}
bytevec magic ;
rbuf_literal_bytes ( rb , & magic , 8 ) ;
if ( memcmp ( magic , " tokuleaf " , 8 ) ! = 0 & &
memcmp ( magic , " tokunode " , 8 ) ! = 0 ) {
r = toku_db_badformat ( ) ;
goto cleanup ;
}
node - > layout_version_read_from_disk = rbuf_int ( rb ) ;
if ( node - > layout_version_read_from_disk < BRT_FIRST_LAYOUT_VERSION_WITH_BASEMENT_NODES ) {
// This code path doesn't have to worry about upgrade.
r = EINVAL ;
goto cleanup ;
}
2013-04-16 23:59:59 -04:00
2013-04-16 23:59:53 -04:00
node - > layout_version = node - > layout_version_read_from_disk ;
node - > layout_version_original = rbuf_int ( rb ) ;
node - > build_id = rbuf_int ( rb ) ;
node - > n_children = rbuf_int ( rb ) ;
2013-04-16 23:59:59 -04:00
// Guaranteed to be have been able to read up to here. If n_children
// is too big, we may have a problem, so check that we won't overflow
// while reading the partition locations.
2013-04-16 23:59:53 -04:00
unsigned int nhsize = serialize_node_header_size ( node ) ; // we can do this because n_children is filled in.
unsigned int needed_size = nhsize + 12 ; // we need 12 more so that we can read the compressed block size information that follows for the nodeinfo.
if ( needed_size > rb - > size ) {
r = EINVAL ;
goto cleanup ;
}
XMALLOC_N ( node - > n_children , node - > bp ) ;
2013-04-17 00:00:13 -04:00
* ndd = toku_xmalloc ( node - > n_children * sizeof ( * * ndd ) ) ;
2013-04-16 23:59:53 -04:00
// read the partition locations
for ( int i = 0 ; i < node - > n_children ; i + + ) {
2013-04-17 00:00:13 -04:00
BP_START ( * ndd , i ) = rbuf_int ( rb ) ;
BP_SIZE ( * ndd , i ) = rbuf_int ( rb ) ;
2013-04-16 23:59:53 -04:00
}
u_int32_t checksum = x1764_memory ( rb - > buf , rb - > ndone ) ;
u_int32_t stored_checksum = rbuf_int ( rb ) ;
if ( stored_checksum ! = checksum ) {
dump_bad_block ( rb - > buf , rb - > size ) ;
invariant ( stored_checksum = = checksum ) ;
}
// Now we want to read the pivot information.
struct sub_block sb_node_info ;
sub_block_init ( & sb_node_info ) ;
sb_node_info . compressed_size = rbuf_int ( rb ) ; // we'll be able to read these because we checked the size earlier.
sb_node_info . uncompressed_size = rbuf_int ( rb ) ;
if ( rb - > size - rb - > ndone < sb_node_info . compressed_size + 8 ) {
2013-04-16 23:59:59 -04:00
r = EINVAL ; // we won't
2013-04-16 23:59:53 -04:00
goto cleanup ;
}
// We got the entire header and node info!
2013-04-16 23:59:59 -04:00
toku_brt_status_update_pivot_fetch_reason ( bfe ) ;
2013-04-16 23:59:53 -04:00
// Finish reading compressed the sub_block
bytevec * cp = ( bytevec * ) & sb_node_info . compressed_ptr ;
rbuf_literal_bytes ( rb , cp , sb_node_info . compressed_size ) ;
sb_node_info . xsum = rbuf_int ( rb ) ;
// let's check the checksum
u_int32_t actual_xsum = x1764_memory ( ( char * ) sb_node_info . compressed_ptr - 8 , 8 + sb_node_info . compressed_size ) ;
invariant ( sb_node_info . xsum = = actual_xsum ) ;
// Now decompress the subblock
sb_node_info . uncompressed_ptr = toku_xmalloc ( sb_node_info . uncompressed_size ) ;
assert ( sb_node_info . uncompressed_ptr ) ;
2013-04-16 23:59:59 -04:00
2013-04-16 23:59:53 -04:00
toku_decompress (
sb_node_info . uncompressed_ptr ,
sb_node_info . uncompressed_size ,
sb_node_info . compressed_ptr ,
sb_node_info . compressed_size
) ;
// at this point sb->uncompressed_ptr stores the serialized node info.
deserialize_brtnode_info ( & sb_node_info , node ) ;
toku_free ( sb_node_info . uncompressed_ptr ) ;
sb_node_info . uncompressed_ptr = NULL ;
2013-04-16 23:59:59 -04:00
// Now we have the brtnode_info. We have a bunch more stuff in the
// rbuf, so we might be able to store the compressed data for some
// objects.
2013-04-16 23:59:53 -04:00
// We can proceed to deserialize the individual subblocks.
assert ( bfe - > type = = brtnode_fetch_none | | bfe - > type = = brtnode_fetch_subset | | bfe - > type = = brtnode_fetch_all | | bfe - > type = = brtnode_fetch_prefetch ) ;
// setup the memory of the partitions
// for partitions being decompressed, create either FIFO or basement node
// for partitions staying compressed, create sub_block
setup_brtnode_partitions ( node , bfe , false ) ;
2013-04-16 23:59:59 -04:00
if ( bfe - > type ! = brtnode_fetch_none ) {
PAIR_ATTR attr ;
2013-04-17 00:00:13 -04:00
toku_brtnode_pf_callback ( node , * ndd , bfe , fd , & attr ) ;
2013-04-16 23:59:53 -04:00
}
2013-04-16 23:59:59 -04:00
// handle clock
for ( int i = 0 ; i < node - > n_children ; i + + ) {
2013-04-16 23:59:54 -04:00
if ( toku_bfe_wants_child_available ( bfe , i ) ) {
2013-04-16 23:59:59 -04:00
assert ( BP_STATE ( node , i ) = = PT_AVAIL ) ;
2013-04-16 23:59:54 -04:00
BP_TOUCH_CLOCK ( node , i ) ;
}
2013-04-16 23:59:53 -04:00
}
* brtnode = node ;
r = 0 ;
cleanup :
if ( r ! = 0 ) {
if ( node ) {
2013-04-17 00:00:13 -04:00
toku_free ( * ndd ) ;
2013-04-16 23:59:53 -04:00
toku_free ( node - > bp ) ;
toku_free ( node ) ;
}
}
return r ;
}
2013-04-16 23:59:40 -04:00
static int
deserialize_brtnode_from_rbuf (
2013-04-16 23:59:46 -04:00
BRTNODE * brtnode ,
2013-04-17 00:00:13 -04:00
BRTNODE_DISK_DATA * ndd ,
2013-04-16 23:59:46 -04:00
BLOCKNUM blocknum ,
u_int32_t fullhash ,
2013-04-16 23:59:41 -04:00
struct brtnode_fetch_extra * bfe ,
2013-04-16 23:59:40 -04:00
struct rbuf * rb
)
2013-04-16 23:59:53 -04:00
// Effect: deserializes a brtnode that is in rb (with pointer of rb just past the magic) into a BRTNODE.
2013-04-16 23:59:40 -04:00
{
int r = 0 ;
2013-04-16 23:59:53 -04:00
BRTNODE node = toku_xmalloc ( sizeof ( * node ) ) ;
2013-04-16 23:59:40 -04:00
struct sub_block sb_node_info ;
// fill in values that are known and not stored in rb
node - > fullhash = fullhash ;
node - > thisnodename = blocknum ;
node - > dirty = 0 ;
// now start reading from rbuf
// first thing we do is read the header information
2013-04-16 23:59:53 -04:00
bytevec magic ;
rbuf_literal_bytes ( rb , & magic , 8 ) ;
if ( memcmp ( magic , " tokuleaf " , 8 ) ! = 0 & &
memcmp ( magic , " tokunode " , 8 ) ! = 0 ) {
r = toku_db_badformat ( ) ;
goto cleanup ;
}
2013-04-16 23:59:40 -04:00
node - > layout_version_read_from_disk = rbuf_int ( rb ) ;
2013-04-16 23:59:55 -04:00
// TODO 4053
invariant ( node - > layout_version_read_from_disk = = BRT_LAYOUT_VERSION ) ;
2013-04-16 23:59:40 -04:00
node - > layout_version = node - > layout_version_read_from_disk ;
node - > layout_version_original = rbuf_int ( rb ) ;
node - > build_id = rbuf_int ( rb ) ;
2013-04-16 23:59:52 -04:00
node - > n_children = rbuf_int ( rb ) ;
XMALLOC_N ( node - > n_children , node - > bp ) ;
2013-04-17 00:00:13 -04:00
* ndd = toku_xmalloc ( node - > n_children * sizeof ( * * ndd ) ) ;
2013-04-16 23:59:52 -04:00
// read the partition locations
for ( int i = 0 ; i < node - > n_children ; i + + ) {
2013-04-17 00:00:13 -04:00
BP_START ( * ndd , i ) = rbuf_int ( rb ) ;
BP_SIZE ( * ndd , i ) = rbuf_int ( rb ) ;
2013-04-16 23:59:52 -04:00
}
2013-04-16 23:59:40 -04:00
// verify checksum of header stored
2013-04-16 23:59:53 -04:00
u_int32_t checksum = x1764_memory ( rb - > buf , rb - > ndone ) ;
u_int32_t stored_checksum = rbuf_int ( rb ) ;
2013-04-16 23:59:40 -04:00
if ( stored_checksum ! = checksum ) {
dump_bad_block ( rb - > buf , rb - > size ) ;
invariant ( stored_checksum = = checksum ) ;
}
//now we read and decompress the pivot and child information
sub_block_init ( & sb_node_info ) ;
2013-04-16 23:59:41 -04:00
read_and_decompress_sub_block ( rb , & sb_node_info ) ;
2013-04-16 23:59:40 -04:00
// at this point, sb->uncompressed_ptr stores the serialized node info
2013-04-16 23:59:41 -04:00
deserialize_brtnode_info ( & sb_node_info , node ) ;
2013-04-16 23:59:40 -04:00
toku_free ( sb_node_info . uncompressed_ptr ) ;
2013-04-16 23:59:46 -04:00
// now that the node info has been deserialized, we can proceed to deserialize
2013-04-16 23:59:40 -04:00
// the individual sub blocks
2013-04-16 23:59:46 -04:00
assert ( bfe - > type = = brtnode_fetch_none | | bfe - > type = = brtnode_fetch_subset | | bfe - > type = = brtnode_fetch_all | | bfe - > type = = brtnode_fetch_prefetch ) ;
2013-04-16 23:59:41 -04:00
// setup the memory of the partitions
// for partitions being decompressed, create either FIFO or basement node
// for partitions staying compressed, create sub_block
2013-04-16 23:59:53 -04:00
setup_brtnode_partitions ( node , bfe , true ) ;
2013-04-16 23:59:46 -04:00
2013-04-16 23:59:52 -04:00
// Previously, this code was a for loop with spawns inside and a sync at the end.
// But now the loop is parallelizeable since we don't have a dependency on the work done so far.
cilk_for ( int i = 0 ; i < node - > n_children ; i + + ) {
2013-04-17 00:00:13 -04:00
u_int32_t curr_offset = BP_START ( * ndd , i ) ;
u_int32_t curr_size = BP_SIZE ( * ndd , i ) ;
2013-04-16 23:59:40 -04:00
// the compressed, serialized partitions start at where rb is currently pointing,
// which would be rb->buf + rb->ndone
// we need to intialize curr_rbuf to point to this place
struct rbuf curr_rbuf = { . buf = NULL , . size = 0 , . ndone = 0 } ;
2013-04-16 23:59:52 -04:00
rbuf_init ( & curr_rbuf , rb - > buf + curr_offset , curr_size ) ;
2013-04-16 23:59:41 -04:00
//
// now we are at the point where we have:
// - read the entire compressed node off of disk,
2013-04-16 23:59:46 -04:00
// - decompressed the pivot and offset information,
2013-04-16 23:59:41 -04:00
// - have arrived at the individual partitions.
//
2013-04-16 23:59:46 -04:00
// Based on the information in bfe, we want to decompress a subset of
2013-04-16 23:59:41 -04:00
// of the compressed partitions (also possibly none or possibly all)
// The partitions that we want to decompress and make available
// to the node, we do, the rest we simply copy in compressed
// form into the node, and set the state of the partition to PT_COMPRESSED
//
2013-04-16 23:59:46 -04:00
struct sub_block curr_sb ;
sub_block_init ( & curr_sb ) ;
2013-04-16 23:59:52 -04:00
// curr_rbuf is passed by value to decompress_and_deserialize_worker, so there's no ugly race condition.
// This would be more obvious if curr_rbuf were an array.
2013-04-16 23:59:41 -04:00
// deserialize_brtnode_info figures out what the state
// should be and sets up the memory so that we are ready to use it
2013-04-16 23:59:52 -04:00
switch ( BP_STATE ( node , i ) ) {
case PT_AVAIL :
// case where we read and decompress the partition
2013-04-17 00:00:15 -04:00
decompress_and_deserialize_worker ( curr_rbuf , curr_sb , node , i , & bfe - > h - > cmp_descriptor , bfe - > h - > compare_fun ) ;
2013-04-16 23:59:52 -04:00
continue ;
case PT_COMPRESSED :
// case where we leave the partition in the compressed state
check_and_copy_compressed_sub_block_worker ( curr_rbuf , curr_sb , node , i ) ;
continue ;
case PT_INVALID : // this is really bad
case PT_ON_DISK : // it's supposed to be in memory.
assert ( 0 ) ;
continue ;
2013-04-16 23:59:41 -04:00
}
2013-04-16 23:59:52 -04:00
assert ( 0 ) ;
2013-04-16 23:59:40 -04:00
}
* brtnode = node ;
r = 0 ;
2013-04-16 23:59:05 -04:00
cleanup :
2013-04-16 23:59:40 -04:00
if ( r ! = 0 ) {
if ( node ) toku_free ( node ) ;
2013-04-16 23:59:05 -04:00
}
return r ;
}
2013-04-16 23:59:41 -04:00
void
2013-04-17 00:00:13 -04:00
toku_deserialize_bp_from_disk ( BRTNODE node , BRTNODE_DISK_DATA ndd , int childnum , int fd , struct brtnode_fetch_extra * bfe ) {
2013-04-16 23:59:41 -04:00
assert ( BP_STATE ( node , childnum ) = = PT_ON_DISK ) ;
2013-04-16 23:59:42 -04:00
assert ( node - > bp [ childnum ] . ptr . tag = = BCT_NULL ) ;
2013-04-16 23:59:41 -04:00
//
// setup the partition
//
setup_available_brtnode_partition ( node , childnum ) ;
BP_STATE ( node , childnum ) = PT_AVAIL ;
//
// read off disk and make available in memory
//
// get the file offset and block size for the block
DISKOFF node_offset , total_node_disk_size ;
toku_translate_blocknum_to_offset_size (
bfe - > h - > blocktable ,
node - > thisnodename ,
& node_offset ,
& total_node_disk_size
) ;
2013-04-17 00:00:13 -04:00
u_int32_t curr_offset = BP_START ( ndd , childnum ) ;
u_int32_t curr_size = BP_SIZE ( ndd , childnum ) ;
2013-04-16 23:59:41 -04:00
struct rbuf rb = { . buf = NULL , . size = 0 , . ndone = 0 } ;
u_int8_t * XMALLOC_N ( curr_size , raw_block ) ;
rbuf_init ( & rb , raw_block , curr_size ) ;
{
// read the block
ssize_t rlen = toku_os_pread ( fd , raw_block , curr_size , node_offset + curr_offset ) ;
lazy_assert ( ( DISKOFF ) rlen = = curr_size ) ;
}
2013-04-16 23:59:47 -04:00
2013-04-16 23:59:41 -04:00
struct sub_block curr_sb ;
sub_block_init ( & curr_sb ) ;
read_and_decompress_sub_block ( & rb , & curr_sb ) ;
// at this point, sb->uncompressed_ptr stores the serialized node partition
2013-04-17 00:00:15 -04:00
deserialize_brtnode_partition ( & curr_sb , node , childnum , & bfe - > h - > cmp_descriptor , bfe - > h - > compare_fun ) ;
2013-04-16 23:59:41 -04:00
toku_free ( raw_block ) ;
}
2013-04-16 23:59:41 -04:00
// Take a brtnode partition that is in the compressed state, and make it avail
void
2013-04-16 23:59:47 -04:00
toku_deserialize_bp_from_compressed ( BRTNODE node , int childnum ,
2013-04-16 23:59:54 -04:00
DESCRIPTOR desc , brt_compare_func cmp ) {
2013-04-16 23:59:41 -04:00
assert ( BP_STATE ( node , childnum ) = = PT_COMPRESSED ) ;
2013-04-16 23:59:42 -04:00
SUB_BLOCK curr_sb = BSB ( node , childnum ) ;
2013-04-16 23:59:41 -04:00
assert ( curr_sb - > uncompressed_ptr = = NULL ) ;
curr_sb - > uncompressed_ptr = toku_xmalloc ( curr_sb - > uncompressed_size ) ;
2013-04-16 23:59:47 -04:00
2013-04-16 23:59:41 -04:00
setup_available_brtnode_partition ( node , childnum ) ;
BP_STATE ( node , childnum ) = PT_AVAIL ;
// decompress the sub_block
toku_decompress (
curr_sb - > uncompressed_ptr ,
curr_sb - > uncompressed_size ,
curr_sb - > compressed_ptr ,
curr_sb - > compressed_size
) ;
2013-04-16 23:59:54 -04:00
deserialize_brtnode_partition ( curr_sb , node , childnum , desc , cmp ) ;
2013-04-16 23:59:41 -04:00
toku_free ( curr_sb - > compressed_ptr ) ;
toku_free ( curr_sb ) ;
}
2013-04-16 23:59:05 -04:00
// Read brt node from file into struct. Perform version upgrade if necessary.
2013-04-16 23:59:53 -04:00
int toku_deserialize_brtnode_from ( int fd ,
BLOCKNUM blocknum ,
u_int32_t fullhash ,
BRTNODE * brtnode ,
2013-04-17 00:00:13 -04:00
BRTNODE_DISK_DATA * ndd ,
2013-04-16 23:59:53 -04:00
struct brtnode_fetch_extra * bfe )
// Effect: Read a node in. If possible, read just the header.
2013-04-16 23:59:41 -04:00
{
2013-04-16 23:59:05 -04:00
toku_trace ( " deserial start " ) ;
2013-04-16 23:59:53 -04:00
struct rbuf rb = RBUF_INITIALIZER ;
read_brtnode_header_from_fd_into_rbuf_if_small_enough ( fd , blocknum , bfe - > h , & rb ) ;
2013-04-16 23:59:05 -04:00
2013-04-17 00:00:13 -04:00
int r = deserialize_brtnode_header_from_rbuf_if_small_enough ( brtnode , ndd , blocknum , fullhash , bfe , & rb , fd ) ;
2013-04-16 23:59:53 -04:00
if ( r ! = 0 ) {
toku_free ( rb . buf ) ;
rb = RBUF_INITIALIZER ;
// Something went wrong, go back to doing it the old way.
2013-04-16 23:59:05 -04:00
2013-04-16 23:59:53 -04:00
r = read_block_from_fd_into_rbuf ( fd , blocknum , bfe - > h , & rb ) ;
if ( r ! = 0 ) { goto cleanup ; } // if we were successful, then we are done.
2013-04-16 23:59:05 -04:00
2013-04-17 00:00:13 -04:00
r = deserialize_brtnode_from_rbuf ( brtnode , ndd , blocknum , fullhash , bfe , & rb ) ;
2013-04-16 23:59:53 -04:00
if ( r ! = 0 ) {
dump_bad_block ( rb . buf , rb . size ) ;
}
lazy_assert_zero ( r ) ;
2013-04-16 23:57:58 -04:00
2013-04-16 23:59:53 -04:00
}
2013-04-16 23:58:58 -04:00
toku_trace ( " deserial done " ) ;
2013-04-16 23:57:58 -04:00
cleanup :
2013-04-16 23:59:53 -04:00
toku_free ( rb . buf ) ;
2013-04-16 23:57:58 -04:00
return r ;
2007-07-13 19:37:47 +00:00
}
2013-04-16 23:57:58 -04:00
2013-04-16 23:58:01 -04:00
int
toku_maybe_upgrade_brt ( BRT t ) { // possibly do some work to complete the version upgrade of brt
2013-04-16 23:59:25 -04:00
// If someday we need to inject a message to upgrade the brt, this is where
// it should be done. Whenever an upgrade is done, all nodes will be marked
// as dirty, so it makes sense here to always inject an OPTIMIZE message.
// (Note, if someday the version number is stored in the translation instead
// of in each node, then the upgrade would not necessarily dirty each node.)
2013-04-16 23:58:01 -04:00
int r = 0 ;
int version = t - > h - > layout_version_read_from_disk ;
2013-04-16 23:59:35 -04:00
2013-04-16 23:59:25 -04:00
int upgrade = 0 ;
if ( ! t - > h - > upgrade_brt_performed ) { // upgrade may be necessary
2013-04-16 23:58:01 -04:00
switch ( version ) {
2013-04-16 23:59:36 -04:00
case BRT_LAYOUT_VERSION_13 :
2013-04-16 23:59:25 -04:00
r = 0 ;
upgrade + + ;
//Fall through on purpose.
case BRT_LAYOUT_VERSION :
if ( r = = 0 & & upgrade ) {
r = toku_brt_optimize_for_upgrade ( t ) ;
2013-04-16 23:59:36 -04:00
if ( r = = 0 )
2013-04-17 00:00:08 -04:00
__sync_fetch_and_add ( & UPGRADE_STATUS_VALUE ( BRT_UPGRADE_OPTIMIZED_FOR_UPGRADE ) , 1 ) ;
2013-04-16 23:59:25 -04:00
}
if ( r = = 0 ) {
t - > h - > upgrade_brt_performed = TRUE ; // no further upgrade necessary
}
break ;
default :
invariant ( FALSE ) ;
2013-04-16 23:58:01 -04:00
}
}
if ( r ) {
if ( t - > h - > panic = = 0 ) {
char * e = strerror ( r ) ;
int l = 200 + strlen ( e ) ;
char s [ l ] ;
t - > h - > panic = r ;
snprintf ( s , l - 1 , " While upgrading brt version, error %d (%s) " , r , e ) ;
t - > h - > panic_string = toku_strdup ( s ) ;
}
}
return r ;
}
2013-04-16 23:57:58 -04:00
// ################
2013-04-16 23:59:17 -04:00
void
2013-04-16 23:59:37 -04:00
toku_verify_or_set_counts ( BRTNODE node ) {
2013-04-16 23:59:40 -04:00
node = node ;
2007-07-13 19:37:47 +00:00
if ( node - > height = = 0 ) {
2013-04-16 23:59:40 -04:00
for ( int i = 0 ; i < node - > n_children ; i + + ) {
2013-04-16 23:59:41 -04:00
lazy_assert ( BLB_BUFFER ( node , i ) ) ;
2013-04-16 23:59:55 -04:00
struct sum_info sum_info = { 0 , 0 } ;
2013-04-16 23:59:41 -04:00
toku_omt_iterate ( BLB_BUFFER ( node , i ) , sum_item , & sum_info ) ;
lazy_assert ( sum_info . count = = toku_omt_size ( BLB_BUFFER ( node , i ) ) ) ;
lazy_assert ( sum_info . dsum = = BLB_NBYTESINBUF ( node , i ) ) ;
2013-04-16 23:59:40 -04:00
}
2013-04-16 23:59:41 -04:00
}
2013-04-16 23:59:40 -04:00
else {
2013-04-16 23:59:41 -04:00
// nothing to do because we no longer store n_bytes_in_buffers for
// the whole node
2007-07-13 19:37:47 +00:00
}
}
2013-04-16 23:57:41 -04:00
2013-04-16 23:57:49 -04:00
static u_int32_t
serialize_brt_header_min_size ( u_int32_t version ) {
2013-04-16 23:58:01 -04:00
u_int32_t size = 0 ;
2013-04-16 23:59:35 -04:00
2013-04-16 23:57:49 -04:00
switch ( version ) {
2013-04-17 00:00:14 -04:00
case BRT_LAYOUT_VERSION_19 :
size + = 1 ; // compression method
2013-04-17 00:00:03 -04:00
case BRT_LAYOUT_VERSION_18 :
size + = sizeof ( uint64_t ) ; // time_of_last_optimize_begin
size + = sizeof ( uint64_t ) ; // time_of_last_optimize_end
size + = sizeof ( uint32_t ) ; // count_of_optimize_in_progress
size + = sizeof ( MSN ) ; // msn_at_start_of_last_completed_optimize
2013-04-17 00:00:00 -04:00
case BRT_LAYOUT_VERSION_17 :
size + = 16 ;
invariant ( sizeof ( STAT64INFO_S ) = = 16 ) ;
2013-04-16 23:59:56 -04:00
case BRT_LAYOUT_VERSION_16 :
2013-04-16 23:59:40 -04:00
case BRT_LAYOUT_VERSION_15 :
2013-04-16 23:59:44 -04:00
size + = 4 ; // basement node size
2013-04-16 23:59:52 -04:00
size + = 8 ; // num_blocks_to_upgrade_14 (previously num_blocks_to_upgrade, now one int each for upgrade from 13, 14
2013-04-16 23:59:54 -04:00
size + = 8 ; // time of last verification
2013-04-16 23:59:36 -04:00
case BRT_LAYOUT_VERSION_14 :
2013-04-16 23:59:22 -04:00
size + = 8 ; //TXNID that created
2013-04-16 23:59:36 -04:00
case BRT_LAYOUT_VERSION_13 :
size + = ( 4 // build_id
+ 4 // build_id_original
+ 8 // time_of_creation
+ 8 // time_of_last_modification
2013-04-16 23:59:44 -04:00
) ;
2013-04-16 23:59:36 -04:00
// fall through
2013-04-16 23:59:00 -04:00
case BRT_LAYOUT_VERSION_12 :
2013-04-16 23:58:01 -04:00
size + = ( + 8 // "tokudata"
+ 4 // version
2013-04-16 23:59:17 -04:00
+ 4 // original_version
2013-04-16 23:58:01 -04:00
+ 4 // size
+ 8 // byte order verification
+ 8 // checkpoint_count
+ 8 // checkpoint_lsn
+ 4 // tree's nodesize
+ 8 // translation_size_on_disk
+ 8 // translation_address_on_disk
+ 4 // checksum
2013-04-16 23:59:25 -04:00
+ 8 // Number of blocks in old version.
+ 8 // diskoff
+ 4 // flags
2013-04-16 23:58:01 -04:00
) ;
break ;
2013-04-16 23:57:49 -04:00
default :
2013-04-16 23:59:23 -04:00
lazy_assert ( FALSE ) ;
2013-04-16 23:57:49 -04:00
}
2013-04-16 23:59:23 -04:00
lazy_assert ( size < = BLOCK_ALLOCATOR_HEADER_RESERVE ) ;
2007-11-21 13:07:49 +00:00
return size ;
}
2013-04-16 23:57:49 -04:00
int toku_serialize_brt_header_size ( struct brt_header * h ) {
u_int32_t size = serialize_brt_header_min_size ( h - > layout_version ) ;
2013-04-16 23:58:01 -04:00
//There is no dynamic data.
2013-04-16 23:59:23 -04:00
lazy_assert ( size < = BLOCK_ALLOCATOR_HEADER_RESERVE ) ;
2013-04-16 23:57:49 -04:00
return size ;
}
2013-04-16 23:57:47 -04:00
int toku_serialize_brt_header_to_wbuf ( struct wbuf * wbuf , struct brt_header * h , DISKOFF translation_location_on_disk , DISKOFF translation_size_on_disk ) {
2007-11-21 13:07:49 +00:00
unsigned int size = toku_serialize_brt_header_size ( h ) ; // !!! seems silly to recompute the size when the caller knew it. Do we really need the size?
2008-05-22 21:28:00 +00:00
wbuf_literal_bytes ( wbuf , " tokudata " , 8 ) ;
2013-04-16 23:57:46 -04:00
wbuf_network_int ( wbuf , h - > layout_version ) ; //MUST be in network order regardless of disk order
2013-04-16 23:59:36 -04:00
wbuf_network_int ( wbuf , BUILD_ID ) ; //MUST be in network order regardless of disk order
2013-04-16 23:57:49 -04:00
wbuf_network_int ( wbuf , size ) ; //MUST be in network order regardless of disk order
2013-04-16 23:57:46 -04:00
wbuf_literal_bytes ( wbuf , & toku_byte_order_host , 8 ) ; //Must not translate byte order
2013-04-16 23:57:47 -04:00
wbuf_ulonglong ( wbuf , h - > checkpoint_count ) ;
wbuf_LSN ( wbuf , h - > checkpoint_lsn ) ;
2007-11-21 13:07:49 +00:00
wbuf_int ( wbuf , h - > nodesize ) ;
2013-04-16 23:57:41 -04:00
2013-04-16 23:57:18 -04:00
//printf("%s:%d bta=%lu size=%lu\n", __FILE__, __LINE__, h->block_translation_address_on_disk, 4 + 16*h->translated_blocknum_limit);
2013-04-16 23:57:47 -04:00
wbuf_DISKOFF ( wbuf , translation_location_on_disk ) ;
wbuf_DISKOFF ( wbuf , translation_size_on_disk ) ;
2013-04-17 00:00:04 -04:00
wbuf_BLOCKNUM ( wbuf , h - > root_blocknum ) ;
2013-04-16 23:58:01 -04:00
wbuf_int ( wbuf , h - > flags ) ;
wbuf_int ( wbuf , h - > layout_version_original ) ;
2013-04-16 23:59:36 -04:00
wbuf_int ( wbuf , h - > build_id_original ) ;
wbuf_ulonglong ( wbuf , h - > time_of_creation ) ;
2013-04-16 23:59:51 -04:00
wbuf_ulonglong ( wbuf , h - > time_of_last_modification ) ;
2013-04-16 23:59:52 -04:00
wbuf_ulonglong ( wbuf , h - > num_blocks_to_upgrade_13 ) ;
wbuf_ulonglong ( wbuf , h - > num_blocks_to_upgrade_14 ) ;
2013-04-16 23:59:22 -04:00
wbuf_TXNID ( wbuf , h - > root_xid_that_created ) ;
2013-04-16 23:59:44 -04:00
wbuf_int ( wbuf , h - > basementnodesize ) ;
2013-04-16 23:59:54 -04:00
wbuf_ulonglong ( wbuf , h - > time_of_last_verification ) ;
2013-04-17 00:00:00 -04:00
wbuf_ulonglong ( wbuf , h - > checkpoint_staging_stats . numrows ) ;
wbuf_ulonglong ( wbuf , h - > checkpoint_staging_stats . numbytes ) ;
2013-04-17 00:00:03 -04:00
wbuf_ulonglong ( wbuf , h - > time_of_last_optimize_begin ) ;
wbuf_ulonglong ( wbuf , h - > time_of_last_optimize_end ) ;
wbuf_int ( wbuf , h - > count_of_optimize_in_progress ) ;
wbuf_MSN ( wbuf , h - > msn_at_start_of_last_completed_optimize ) ;
2013-04-17 00:00:14 -04:00
wbuf_char ( wbuf , ( unsigned char ) h - > compression_method ) ;
2013-04-16 23:57:47 -04:00
u_int32_t checksum = x1764_finish ( & wbuf - > checksum ) ;
wbuf_int ( wbuf , checksum ) ;
2013-04-16 23:59:23 -04:00
lazy_assert ( wbuf - > ndone = = wbuf - > size ) ;
2007-11-21 13:07:49 +00:00
return 0 ;
}
int toku_serialize_brt_header_to ( int fd , struct brt_header * h ) {
2013-04-16 23:57:38 -04:00
int rr = 0 ;
if ( h - > panic ) return h - > panic ;
2013-04-16 23:59:23 -04:00
lazy_assert ( h - > type = = BRTHEADER_CHECKPOINT_INPROGRESS ) ;
2013-04-16 23:57:51 -04:00
toku_brtheader_lock ( h ) ;
2013-04-16 23:57:47 -04:00
struct wbuf w_translation ;
int64_t size_translation ;
int64_t address_translation ;
{
//Must serialize translation first, to get address,size for header.
toku_serialize_translation_to_wbuf_unlocked ( h - > blocktable , & w_translation ,
& address_translation ,
& size_translation ) ;
2013-04-16 23:59:23 -04:00
lazy_assert ( size_translation = = w_translation . size ) ;
2013-04-16 23:57:47 -04:00
}
2013-04-16 23:57:41 -04:00
struct wbuf w_main ;
unsigned int size_main = toku_serialize_brt_header_size ( h ) ;
2007-07-13 19:37:47 +00:00
{
2013-04-16 23:59:40 -04:00
wbuf_init ( & w_main , toku_xmalloc ( size_main ) , size_main ) ;
2013-04-16 23:57:38 -04:00
{
2013-04-16 23:57:47 -04:00
int r = toku_serialize_brt_header_to_wbuf ( & w_main , h , address_translation , size_translation ) ;
2013-04-16 23:59:23 -04:00
lazy_assert_zero ( r ) ;
2013-04-16 23:57:38 -04:00
}
2013-04-16 23:59:23 -04:00
lazy_assert ( w_main . ndone = = size_main ) ;
2013-04-16 23:57:41 -04:00
}
2013-04-16 23:57:51 -04:00
toku_brtheader_unlock ( h ) ;
2013-04-16 23:57:47 -04:00
lock_for_pwrite ( ) ;
2013-04-16 23:57:41 -04:00
{
2013-04-16 23:57:47 -04:00
//Actual Write translation table
2013-04-16 23:58:55 -04:00
toku_full_pwrite_extend ( fd , w_translation . buf ,
size_translation , address_translation ) ;
2013-04-16 23:57:41 -04:00
}
{
2013-04-16 23:59:16 -04:00
//Everything but the header MUST be on disk before header starts.
//Otherwise we will think the header is good and some blocks might not
//yet be on disk.
//If the header has a cachefile we need to do cachefile fsync (to
//prevent crash if we redirected to dev null)
//If there is no cachefile we still need to do an fsync.
if ( h - > cf ) {
rr = toku_cachefile_fsync ( h - > cf ) ;
}
else {
rr = toku_file_fsync ( fd ) ;
}
if ( rr = = 0 ) {
//Alternate writing header to two locations:
// Beginning (0) or BLOCK_ALLOCATOR_HEADER_RESERVE
toku_off_t main_offset ;
main_offset = ( h - > checkpoint_count & 0x1 ) ? 0 : BLOCK_ALLOCATOR_HEADER_RESERVE ;
toku_full_pwrite_extend ( fd , w_main . buf , w_main . ndone , main_offset ) ;
}
2007-07-13 19:37:47 +00:00
}
2013-04-16 23:57:47 -04:00
toku_free ( w_main . buf ) ;
2013-04-16 23:57:41 -04:00
toku_free ( w_translation . buf ) ;
2013-04-16 23:57:18 -04:00
unlock_for_pwrite ( ) ;
2013-04-16 23:57:38 -04:00
return rr ;
2007-07-13 19:37:47 +00:00
}
2013-04-16 23:59:37 -04:00
// not version-sensitive because we only serialize a descriptor using the current layout_version
2013-04-16 23:57:48 -04:00
u_int32_t
2013-04-16 23:59:17 -04:00
toku_serialize_descriptor_size ( const DESCRIPTOR desc ) {
2013-04-16 23:57:48 -04:00
//Checksum NOT included in this. Checksum only exists in header's version.
2013-04-16 23:59:37 -04:00
u_int32_t size = 4 ; // four bytes for size of descriptor
size + = desc - > dbt . size ;
return size ;
}
static u_int32_t
deserialize_descriptor_size ( const DESCRIPTOR desc , int layout_version ) {
//Checksum NOT included in this. Checksum only exists in header's version.
u_int32_t size = 4 ; // four bytes for size of descriptor
if ( layout_version = = BRT_LAYOUT_VERSION_13 )
size + = 4 ; // for version 13, include four bytes of "version"
2013-04-16 23:57:48 -04:00
size + = desc - > dbt . size ;
return size ;
}
2013-04-16 23:59:01 -04:00
void
2013-04-16 23:59:17 -04:00
toku_serialize_descriptor_contents_to_wbuf ( struct wbuf * wb , const DESCRIPTOR desc ) {
2013-04-16 23:57:48 -04:00
wbuf_bytes ( wb , desc - > dbt . data , desc - > dbt . size ) ;
}
2013-04-16 23:57:47 -04:00
//Descriptor is written to disk during toku_brt_open iff we have a new (or changed)
//descriptor.
//Descriptors are NOT written during the header checkpoint process.
2013-04-16 23:57:47 -04:00
int
2013-04-16 23:59:17 -04:00
toku_serialize_descriptor_contents_to_fd ( int fd , const DESCRIPTOR desc , DISKOFF offset ) {
2013-04-16 23:58:55 -04:00
int r = 0 ;
2013-04-16 23:57:47 -04:00
// make the checksum
2013-04-16 23:57:48 -04:00
int64_t size = toku_serialize_descriptor_size ( desc ) + 4 ; //4 for checksum
2013-04-16 23:57:47 -04:00
struct wbuf w ;
wbuf_init ( & w , toku_xmalloc ( size ) , size ) ;
2013-04-16 23:59:01 -04:00
toku_serialize_descriptor_contents_to_wbuf ( & w , desc ) ;
2013-04-16 23:57:48 -04:00
{
//Add checksum
u_int32_t checksum = x1764_finish ( & w . checksum ) ;
wbuf_int ( & w , checksum ) ;
}
2013-04-16 23:59:23 -04:00
lazy_assert ( w . ndone = = w . size ) ;
2013-04-16 23:57:47 -04:00
{
lock_for_pwrite ( ) ;
//Actual Write translation table
2013-04-16 23:58:55 -04:00
toku_full_pwrite_extend ( fd , w . buf , size , offset ) ;
2013-04-16 23:57:47 -04:00
unlock_for_pwrite ( ) ;
}
toku_free ( w . buf ) ;
return r ;
}
static void
2013-04-16 23:59:37 -04:00
deserialize_descriptor_from_rbuf ( struct rbuf * rb , DESCRIPTOR desc , int layout_version ) {
if ( layout_version = = BRT_LAYOUT_VERSION_13 ) {
// in older versions of TokuDB the Descriptor had a 4 byte version, which we must skip over
2013-04-16 23:59:59 -04:00
u_int32_t dummy_version __attribute__ ( ( __unused__ ) ) = rbuf_int ( rb ) ;
2013-04-16 23:59:37 -04:00
}
2013-04-16 23:57:48 -04:00
u_int32_t size ;
bytevec data ;
rbuf_bytes ( rb , & data , & size ) ;
2013-04-16 23:57:48 -04:00
bytevec data_copy = data ; ;
if ( size > 0 ) {
2013-04-16 23:59:37 -04:00
data_copy = toku_memdup ( data , size ) ; //Cannot keep the reference from rbuf. Must copy.
lazy_assert ( data_copy ) ;
2013-04-16 23:57:48 -04:00
}
2013-04-16 23:57:48 -04:00
else {
2013-04-16 23:59:23 -04:00
lazy_assert ( size = = 0 ) ;
2013-04-16 23:57:48 -04:00
data_copy = NULL ;
}
toku_fill_dbt ( & desc - > dbt , data_copy , size ) ;
}
static void
2013-04-16 23:59:37 -04:00
deserialize_descriptor_from ( int fd , BLOCK_TABLE bt , DESCRIPTOR desc , int layout_version ) {
2013-04-16 23:57:47 -04:00
DISKOFF offset ;
DISKOFF size ;
2013-04-16 23:59:37 -04:00
toku_get_descriptor_offset_size ( bt , & offset , & size ) ;
2013-04-16 23:57:47 -04:00
memset ( desc , 0 , sizeof ( * desc ) ) ;
2013-04-16 23:57:47 -04:00
if ( size > 0 ) {
2013-04-16 23:59:23 -04:00
lazy_assert ( size > = 4 ) ; //4 for checksum
2013-04-16 23:57:47 -04:00
{
unsigned char * XMALLOC_N ( size , dbuf ) ;
{
lock_for_pwrite ( ) ;
2013-04-16 23:59:34 -04:00
ssize_t r = toku_os_pread ( fd , dbuf , size , offset ) ;
2013-04-16 23:59:23 -04:00
lazy_assert ( r = = size ) ;
2013-04-16 23:57:47 -04:00
unlock_for_pwrite ( ) ;
}
{
// check the checksum
u_int32_t x1764 = x1764_memory ( dbuf , size - 4 ) ;
//printf("%s:%d read from %ld (x1764 offset=%ld) size=%ld\n", __FILE__, __LINE__, block_translation_address_on_disk, offset, block_translation_size_on_disk);
u_int32_t stored_x1764 = toku_dtoh32 ( * ( int * ) ( dbuf + size - 4 ) ) ;
2013-04-16 23:59:23 -04:00
lazy_assert ( x1764 = = stored_x1764 ) ;
2013-04-16 23:57:47 -04:00
}
2013-04-16 23:57:48 -04:00
{
struct rbuf rb = { . buf = dbuf , . size = size , . ndone = 0 } ;
2013-04-16 23:57:48 -04:00
//Not temporary; must have a toku_memdup'd copy.
2013-04-16 23:59:37 -04:00
deserialize_descriptor_from_rbuf ( & rb , desc , layout_version ) ;
2013-04-16 23:57:48 -04:00
}
2013-04-16 23:59:37 -04:00
lazy_assert ( deserialize_descriptor_size ( desc , layout_version ) + 4 = = size ) ;
2013-04-16 23:57:48 -04:00
toku_free ( dbuf ) ;
2013-04-16 23:57:47 -04:00
}
}
}
2013-04-16 23:57:58 -04:00
2013-04-16 23:57:18 -04:00
// We only deserialize brt header once and then share everything with all the brts.
2013-04-16 23:57:19 -04:00
static int
2013-04-16 23:57:47 -04:00
deserialize_brtheader ( int fd , struct rbuf * rb , struct brt_header * * brth ) {
// We already know:
// we have an rbuf representing the header.
// The checksum has been validated
2013-04-16 23:57:58 -04:00
//Steal rbuf (used to simplify merge, reduce diff size, and keep old code)
2013-04-16 23:57:47 -04:00
struct rbuf rc = * rb ;
memset ( rb , 0 , sizeof ( * rb ) ) ;
2013-04-16 23:57:49 -04:00
//Verification of initial elements.
{
//Check magic number
bytevec magic ;
rbuf_literal_bytes ( & rc , & magic , 8 ) ;
2013-04-16 23:59:23 -04:00
lazy_assert ( memcmp ( magic , " tokudata " , 8 ) = = 0 ) ;
2013-04-16 23:57:49 -04:00
}
2013-04-16 23:57:47 -04:00
struct brt_header * CALLOC ( h ) ;
2008-05-22 21:28:00 +00:00
if ( h = = 0 ) return errno ;
int ret = - 1 ;
2013-04-16 23:57:47 -04:00
if ( 0 ) { died1 : toku_free ( h ) ; return ret ; }
h - > type = BRTHEADER_CURRENT ;
h - > checkpoint_header = NULL ;
2008-05-22 21:28:00 +00:00
h - > dirty = 0 ;
2013-04-16 23:57:38 -04:00
h - > panic = 0 ;
h - > panic_string = 0 ;
2013-04-16 23:58:05 -04:00
toku_list_init ( & h - > live_brts ) ;
toku_list_init ( & h - > zombie_brts ) ;
2013-04-16 23:59:06 -04:00
toku_list_init ( & h - > checkpoint_before_commit_link ) ;
2013-04-16 23:59:36 -04:00
2013-04-16 23:57:46 -04:00
//version MUST be in network order on disk regardless of disk order
h - > layout_version = rbuf_network_int ( & rc ) ;
2013-04-16 23:57:58 -04:00
//TODO: #1924
2013-04-16 23:59:25 -04:00
invariant ( h - > layout_version > = BRT_LAYOUT_MIN_SUPPORTED_VERSION ) ;
invariant ( h - > layout_version < = BRT_LAYOUT_VERSION ) ;
h - > layout_version_read_from_disk = h - > layout_version ;
2013-04-16 23:57:49 -04:00
2013-04-16 23:59:36 -04:00
//build_id MUST be in network order on disk regardless of disk order
h - > build_id = rbuf_network_int ( & rc ) ;
2013-04-16 23:57:49 -04:00
//Size MUST be in network order regardless of disk order.
u_int32_t size = rbuf_network_int ( & rc ) ;
2013-04-16 23:59:23 -04:00
lazy_assert ( size = = rc . size ) ;
2013-04-16 23:57:49 -04:00
2013-04-16 23:57:46 -04:00
bytevec tmp_byte_order_check ;
rbuf_literal_bytes ( & rc , & tmp_byte_order_check , 8 ) ; //Must not translate byte order
int64_t byte_order_stored = * ( int64_t * ) tmp_byte_order_check ;
2013-04-16 23:59:23 -04:00
lazy_assert ( byte_order_stored = = toku_byte_order_host ) ;
2013-04-16 23:57:46 -04:00
2013-04-16 23:57:47 -04:00
h - > checkpoint_count = rbuf_ulonglong ( & rc ) ;
h - > checkpoint_lsn = rbuf_lsn ( & rc ) ;
h - > nodesize = rbuf_int ( & rc ) ;
DISKOFF translation_address_on_disk = rbuf_diskoff ( & rc ) ;
DISKOFF translation_size_on_disk = rbuf_diskoff ( & rc ) ;
2013-04-16 23:59:23 -04:00
lazy_assert ( translation_address_on_disk > 0 ) ;
lazy_assert ( translation_size_on_disk > 0 ) ;
2013-04-16 23:57:47 -04:00
2013-04-17 00:00:04 -04:00
// initialize the tree lock
toku_brtheader_init_treelock ( h ) ;
2013-04-16 23:57:18 -04:00
// printf("%s:%d translated_blocknum_limit=%ld, block_translation_address_on_disk=%ld\n", __FILE__, __LINE__, h->translated_blocknum_limit, h->block_translation_address_on_disk);
2013-04-16 23:57:47 -04:00
//Load translation table
{
lock_for_pwrite ( ) ;
unsigned char * XMALLOC_N ( translation_size_on_disk , tbuf ) ;
{
// This cast is messed up in 32-bits if the block translation table is ever more than 4GB. But in that case, the translation table itself won't fit in main memory.
2013-04-16 23:59:34 -04:00
ssize_t r = toku_os_pread ( fd , tbuf , translation_size_on_disk , translation_address_on_disk ) ;
2013-04-16 23:59:23 -04:00
lazy_assert ( r = = translation_size_on_disk ) ;
2013-04-16 23:57:47 -04:00
}
2013-04-16 23:57:47 -04:00
unlock_for_pwrite ( ) ;
2013-04-16 23:57:47 -04:00
// Create table and read in data.
toku_blocktable_create_from_buffer ( & h - > blocktable ,
translation_address_on_disk ,
translation_size_on_disk ,
2013-04-16 23:59:25 -04:00
tbuf ) ;
2013-04-16 23:57:47 -04:00
toku_free ( tbuf ) ;
2013-04-16 23:57:18 -04:00
}
2013-04-16 23:57:47 -04:00
2013-04-17 00:00:04 -04:00
h - > root_blocknum = rbuf_blocknum ( & rc ) ;
2013-04-16 23:57:47 -04:00
h - > flags = rbuf_int ( & rc ) ;
2013-04-16 23:58:01 -04:00
h - > layout_version_original = rbuf_int ( & rc ) ;
2013-04-16 23:59:36 -04:00
h - > build_id_original = rbuf_int ( & rc ) ;
h - > time_of_creation = rbuf_ulonglong ( & rc ) ;
h - > time_of_last_modification = rbuf_ulonglong ( & rc ) ;
2013-04-16 23:59:54 -04:00
h - > time_of_last_verification = 0 ;
2013-04-16 23:59:52 -04:00
h - > num_blocks_to_upgrade_13 = rbuf_ulonglong ( & rc ) ;
h - > num_blocks_to_upgrade_14 = rbuf_ulonglong ( & rc ) ;
2013-04-16 23:59:35 -04:00
2013-04-16 23:59:36 -04:00
if ( h - > layout_version > = BRT_LAYOUT_VERSION_14 ) {
// at this layer, this new field is the only difference between versions 13 and 14
2013-04-16 23:59:25 -04:00
rbuf_TXNID ( & rc , & h - > root_xid_that_created ) ;
}
2013-04-16 23:59:44 -04:00
if ( h - > layout_version > = BRT_LAYOUT_VERSION_15 ) {
h - > basementnodesize = rbuf_int ( & rc ) ;
2013-04-16 23:59:54 -04:00
h - > time_of_last_verification = rbuf_ulonglong ( & rc ) ;
2013-04-16 23:59:44 -04:00
}
2013-04-17 00:00:03 -04:00
if ( h - > layout_version > = BRT_LAYOUT_VERSION_18 ) {
2013-04-17 00:00:00 -04:00
h - > on_disk_stats . numrows = rbuf_ulonglong ( & rc ) ;
h - > on_disk_stats . numbytes = rbuf_ulonglong ( & rc ) ;
h - > in_memory_stats = h - > on_disk_stats ;
2013-04-17 00:00:03 -04:00
h - > time_of_last_optimize_begin = rbuf_ulonglong ( & rc ) ;
h - > time_of_last_optimize_end = rbuf_ulonglong ( & rc ) ;
h - > count_of_optimize_in_progress = rbuf_int ( & rc ) ;
h - > count_of_optimize_in_progress_read_from_disk = h - > count_of_optimize_in_progress ;
h - > msn_at_start_of_last_completed_optimize = rbuf_msn ( & rc ) ;
2013-04-17 00:00:00 -04:00
}
2013-04-17 00:00:14 -04:00
if ( h - > layout_version > = BRT_LAYOUT_VERSION_19 ) {
unsigned char method = rbuf_char ( & rc ) ;
h - > compression_method = ( enum toku_compression_method ) method ;
} else {
// we hard coded zlib until 5.2, then quicklz in 5.2
if ( h - > layout_version < BRT_LAYOUT_VERSION_18 ) {
h - > compression_method = TOKU_ZLIB_METHOD ;
} else {
h - > compression_method = TOKU_QUICKLZ_METHOD ;
}
}
2013-04-17 00:00:00 -04:00
2013-04-16 23:57:47 -04:00
( void ) rbuf_int ( & rc ) ; //Read in checksum and ignore (already verified).
2013-04-16 23:57:47 -04:00
if ( rc . ndone ! = rc . size ) { ret = EINVAL ; goto died1 ; }
2008-05-22 21:28:00 +00:00
toku_free ( rc . buf ) ;
2013-04-16 23:57:47 -04:00
rc . buf = NULL ;
2008-05-22 21:28:00 +00:00
* brth = h ;
return 0 ;
}
2013-04-16 23:57:58 -04:00
2013-04-16 23:59:37 -04:00
static int
write_descriptor_to_disk_unlocked ( struct brt_header * h , DESCRIPTOR d , int fd ) {
int r = 0 ;
DISKOFF offset ;
//4 for checksum
toku_realloc_descriptor_on_disk_unlocked ( h - > blocktable , toku_serialize_descriptor_size ( d ) + 4 , & offset , h ) ;
r = toku_serialize_descriptor_contents_to_fd ( fd , d , offset ) ;
return r ;
}
2013-04-16 23:59:36 -04:00
//TODO: When version 15 exists, add case for version 14 that looks like today's version 13 case,
2013-04-16 23:57:58 -04:00
static int
deserialize_brtheader_versioned ( int fd , struct rbuf * rb , struct brt_header * * brth , u_int32_t version ) {
int rval ;
int upgrade = 0 ;
2013-04-16 23:59:25 -04:00
struct brt_header * h = NULL ;
rval = deserialize_brtheader ( fd , rb , & h ) ; //deserialize from rbuf and fd into header
2013-04-16 23:58:01 -04:00
if ( rval = = 0 ) {
2013-04-16 23:59:25 -04:00
invariant ( h ) ;
2013-04-16 23:59:53 -04:00
invariant ( ( uint32_t ) h - > layout_version = = version ) ;
deserialize_descriptor_from ( fd , h - > blocktable , & ( h - > descriptor ) , version ) ;
2013-04-17 00:00:15 -04:00
h - > cmp_descriptor . dbt . size = h - > descriptor . dbt . size ;
h - > cmp_descriptor . dbt . data = toku_xmemdup ( h - > descriptor . dbt . data , h - > descriptor . dbt . size ) ;
2013-04-16 23:59:25 -04:00
switch ( version ) {
2013-04-16 23:59:36 -04:00
case BRT_LAYOUT_VERSION_13 :
invariant ( h - > layout_version = = BRT_LAYOUT_VERSION_13 ) ;
2013-04-16 23:59:25 -04:00
{
//Upgrade root_xid_that_created
//Fake creation during the last checkpoint.
h - > root_xid_that_created = h - > checkpoint_lsn . lsn ;
}
{
//Deprecate 'TOKU_DB_VALCMP_BUILTIN'. Just remove the flag
2013-04-16 23:59:36 -04:00
h - > flags & = ~ TOKU_DB_VALCMP_BUILTIN_13 ;
2013-04-16 23:59:25 -04:00
}
h - > layout_version + + ;
2013-04-17 00:00:08 -04:00
__sync_fetch_and_add ( & UPGRADE_STATUS_VALUE ( BRT_UPGRADE_HEADER_13 ) , 1 ) ; // how many header nodes upgraded from v13
2013-04-16 23:59:25 -04:00
upgrade + + ;
//Fall through on purpose
2013-04-16 23:59:37 -04:00
case BRT_LAYOUT_VERSION_14 :
2013-04-16 23:59:44 -04:00
h - > basementnodesize = 128 * 1024 ; // basement nodes added in v15
//fall through on purpose
2013-04-17 00:00:14 -04:00
case BRT_LAYOUT_VERSION_19 :
2013-04-17 00:00:03 -04:00
case BRT_LAYOUT_VERSION_18 :
case BRT_LAYOUT_VERSION_17 : // version 17 never released to customers
2013-04-17 00:00:00 -04:00
case BRT_LAYOUT_VERSION_16 : // version 16 never released to customers
case BRT_LAYOUT_VERSION_15 : // this will not properly support version 15, we'll fix that on upgrade.
2013-04-16 23:59:25 -04:00
invariant ( h - > layout_version = = BRT_LAYOUT_VERSION ) ;
h - > upgrade_brt_performed = FALSE ;
if ( upgrade ) {
toku_brtheader_lock ( h ) ;
2013-04-16 23:59:52 -04:00
h - > num_blocks_to_upgrade_13 = toku_block_get_blocks_in_use_unlocked ( h - > blocktable ) ; //Total number of blocks
2013-04-16 23:59:37 -04:00
if ( version = = BRT_LAYOUT_VERSION_13 ) {
// write upgraded descriptor to disk if descriptor upgraded from version 13
rval = write_descriptor_to_disk_unlocked ( h , & ( h - > descriptor ) , fd ) ;
}
2013-04-16 23:59:25 -04:00
h - > dirty = 1 ;
toku_brtheader_unlock ( h ) ;
}
* brth = h ;
break ; // this is the only break
default :
invariant ( FALSE ) ;
}
2013-04-16 23:58:01 -04:00
}
2013-04-16 23:57:58 -04:00
return rval ;
}
// Simply reading the raw bytes of the header into an rbuf is insensitive to disk format version.
// If that ever changes, then modify this.
2013-04-16 23:57:49 -04:00
//TOKUDB_DICTIONARY_NO_HEADER means we can overwrite everything in the file AND the header is useless
2013-04-16 23:57:47 -04:00
static int
2013-04-16 23:59:35 -04:00
deserialize_brtheader_from_fd_into_rbuf ( int fd , toku_off_t offset_of_header , struct rbuf * rb ,
u_int64_t * checkpoint_count , LSN * checkpoint_lsn , u_int32_t * version_p ) {
2013-04-16 23:57:47 -04:00
int r = 0 ;
2013-04-16 23:57:49 -04:00
const int64_t prefix_size = 8 + // magic ("tokudata")
4 + // version
2013-04-16 23:59:36 -04:00
4 + // build_id
2013-04-16 23:57:49 -04:00
4 ; // size
unsigned char prefix [ prefix_size ] ;
2013-04-16 23:57:47 -04:00
rb - > buf = NULL ;
2013-04-16 23:59:34 -04:00
int64_t n = toku_os_pread ( fd , prefix , prefix_size , offset_of_header ) ;
2013-04-16 23:57:49 -04:00
if ( n = = 0 ) r = TOKUDB_DICTIONARY_NO_HEADER ;
2013-04-16 23:59:23 -04:00
else if ( n < 0 ) { r = errno ; lazy_assert ( r ! = 0 ) ; }
2013-04-16 23:57:47 -04:00
else if ( n ! = prefix_size ) r = EINVAL ;
else {
2013-04-16 23:57:49 -04:00
rb - > size = prefix_size ;
rb - > ndone = 0 ;
rb - > buf = prefix ;
{
//Check magic number
bytevec magic ;
rbuf_literal_bytes ( rb , & magic , 8 ) ;
if ( memcmp ( magic , " tokudata " , 8 ) ! = 0 ) {
if ( ( * ( u_int64_t * ) magic ) = = 0 ) r = TOKUDB_DICTIONARY_NO_HEADER ;
else r = EINVAL ; //Not a tokudb file! Do not use.
2013-04-16 23:57:47 -04:00
}
2013-04-16 23:57:49 -04:00
}
u_int32_t version = 0 ;
if ( r = = 0 ) {
//Version MUST be in network order regardless of disk order.
version = rbuf_network_int ( rb ) ;
2013-04-16 23:57:58 -04:00
* version_p = version ;
2013-04-16 23:57:58 -04:00
if ( version < BRT_LAYOUT_MIN_SUPPORTED_VERSION ) r = TOKUDB_DICTIONARY_TOO_OLD ; //Cannot use
Addresses #1125 Merged nested transactions from temporary merge branch into main.
Current tests fail (not regressions, they fail as of 13461)
* {{{x1.tdbrun}}}
* {{{test_log(2,3,4,5,6,7,8,9,10).recover}}}
* {{{test-recover(1,2,3).tdbrun}}}
* {{{test1324.tdbrun}}}
ULE_DEBUG disabled (defined to 0) Can be re-enabled for test purposes (set to 1).
refs [t:1125]
Merging into the temp branch (tokudb.main_13461+1125)
{{{svn merge --accept=postpone -r 12527:13461 ../tokudb.1125 ./}}}
Merging into main
{{{svn merge --accept=postpone -r13462:13463 ../tokudb.main_13461+1125/ ./}}}
git-svn-id: file:///svn/toku/tokudb@13464 c7de825b-a66e-492c-adef-691d508d4ae1
2013-04-16 23:57:56 -04:00
if ( version > BRT_LAYOUT_VERSION ) r = TOKUDB_DICTIONARY_TOO_NEW ; //Cannot use
2013-04-16 23:59:36 -04:00
//build_id MUST be in network order regardless of disk order.
2013-04-16 23:59:59 -04:00
u_int32_t build_id __attribute__ ( ( __unused__ ) ) = rbuf_network_int ( rb ) ;
2013-04-16 23:57:49 -04:00
}
u_int32_t size ;
if ( r = = 0 ) {
const int64_t max_header_size = BLOCK_ALLOCATOR_HEADER_RESERVE ;
int64_t min_header_size = serialize_brt_header_min_size ( version ) ;
//Size MUST be in network order regardless of disk order.
size = rbuf_network_int ( rb ) ;
//If too big, it is corrupt. We would probably notice during checksum
//but may have to do a multi-gigabyte malloc+read to find out.
//If its too small reading rbuf would crash, so verify.
if ( size > max_header_size | | size < min_header_size ) r = TOKUDB_DICTIONARY_NO_HEADER ;
}
if ( r ! = 0 ) {
rb - > buf = NULL ; //Prevent freeing of 'prefix'
}
if ( r = = 0 ) {
2013-04-16 23:59:23 -04:00
lazy_assert ( rb - > ndone = = prefix_size ) ;
2013-04-16 23:57:49 -04:00
rb - > size = size ;
rb - > buf = toku_xmalloc ( rb - > size ) ;
}
if ( r = = 0 ) {
2013-04-16 23:59:34 -04:00
n = toku_os_pread ( fd , rb - > buf , rb - > size , offset_of_header ) ;
2013-04-16 23:57:49 -04:00
if ( n = = - 1 ) {
r = errno ;
2013-04-16 23:59:23 -04:00
lazy_assert ( r ! = 0 ) ;
2013-04-16 23:57:47 -04:00
}
2013-04-16 23:57:49 -04:00
else if ( n ! = ( int64_t ) rb - > size ) r = EINVAL ; //Header might be useless (wrong size) or could be a disk read error.
2013-04-16 23:57:47 -04:00
}
2013-04-16 23:57:49 -04:00
//It's version 10 or later. Magic looks OK.
//We have an rbuf that represents the header.
//Size is within acceptable bounds.
if ( r = = 0 ) {
2013-04-16 23:59:36 -04:00
//Verify checksum (BRT_LAYOUT_VERSION_13 or later, when checksum function changed)
2013-04-16 23:57:49 -04:00
u_int32_t calculated_x1764 = x1764_memory ( rb - > buf , rb - > size - 4 ) ;
u_int32_t stored_x1764 = toku_dtoh32 ( * ( int * ) ( rb - > buf + rb - > size - 4 ) ) ;
if ( calculated_x1764 ! = stored_x1764 ) r = TOKUDB_DICTIONARY_NO_HEADER ; //Header useless
}
if ( r = = 0 ) {
//Verify byte order
bytevec tmp_byte_order_check ;
rbuf_literal_bytes ( rb , & tmp_byte_order_check , 8 ) ; //Must not translate byte order
int64_t byte_order_stored = * ( int64_t * ) tmp_byte_order_check ;
if ( byte_order_stored ! = toku_byte_order_host ) r = TOKUDB_DICTIONARY_NO_HEADER ; //Cannot use dictionary
}
if ( r = = 0 ) {
//Load checkpoint count
* checkpoint_count = rbuf_ulonglong ( rb ) ;
2013-04-16 23:59:35 -04:00
* checkpoint_lsn = rbuf_lsn ( rb ) ;
2013-04-16 23:57:49 -04:00
//Restart at beginning during regular deserialization
rb - > ndone = 0 ;
}
}
if ( r ! = 0 & & rb - > buf ) {
toku_free ( rb - > buf ) ;
rb - > buf = NULL ;
2013-04-16 23:57:47 -04:00
}
return r ;
}
2013-04-16 23:57:58 -04:00
2013-04-16 23:59:35 -04:00
// Read brtheader from file into struct. Read both headers and use one.
2013-04-16 23:59:35 -04:00
// We want the latest acceptable header whose checkpoint_lsn is no later
// than max_acceptable_lsn.
2013-04-16 23:57:58 -04:00
int
2013-04-16 23:59:35 -04:00
toku_deserialize_brtheader_from ( int fd , LSN max_acceptable_lsn , struct brt_header * * brth ) {
2013-04-16 23:57:47 -04:00
struct rbuf rb_0 ;
struct rbuf rb_1 ;
u_int64_t checkpoint_count_0 ;
u_int64_t checkpoint_count_1 ;
2013-04-16 23:59:35 -04:00
LSN checkpoint_lsn_0 ;
LSN checkpoint_lsn_1 ;
2013-04-16 23:57:58 -04:00
u_int32_t version_0 , version_1 , version = 0 ;
2013-04-16 23:59:35 -04:00
BOOL h0_acceptable = FALSE ;
BOOL h1_acceptable = FALSE ;
struct rbuf * rb = NULL ;
int r0 , r1 , r ;
2013-04-16 23:57:58 -04:00
2013-04-16 23:57:47 -04:00
{
toku_off_t header_0_off = 0 ;
2013-04-16 23:59:35 -04:00
r0 = deserialize_brtheader_from_fd_into_rbuf ( fd , header_0_off , & rb_0 , & checkpoint_count_0 , & checkpoint_lsn_0 , & version_0 ) ;
2013-04-16 23:59:35 -04:00
if ( ( r0 = = 0 ) & & ( checkpoint_lsn_0 . lsn < = max_acceptable_lsn . lsn ) )
h0_acceptable = TRUE ;
2013-04-16 23:57:47 -04:00
}
{
toku_off_t header_1_off = BLOCK_ALLOCATOR_HEADER_RESERVE ;
2013-04-16 23:59:35 -04:00
r1 = deserialize_brtheader_from_fd_into_rbuf ( fd , header_1_off , & rb_1 , & checkpoint_count_1 , & checkpoint_lsn_1 , & version_1 ) ;
2013-04-16 23:59:35 -04:00
if ( ( r1 = = 0 ) & & ( checkpoint_lsn_1 . lsn < = max_acceptable_lsn . lsn ) )
h1_acceptable = TRUE ;
2013-04-16 23:57:47 -04:00
}
2013-04-16 23:59:35 -04:00
// if either header is too new, the dictionary is unreadable
2013-04-16 23:57:49 -04:00
if ( r0 ! = TOKUDB_DICTIONARY_TOO_NEW & & r1 ! = TOKUDB_DICTIONARY_TOO_NEW ) {
2013-04-16 23:59:35 -04:00
if ( h0_acceptable & & h1_acceptable ) {
if ( checkpoint_count_0 > checkpoint_count_1 ) {
invariant ( checkpoint_count_0 = = checkpoint_count_1 + 1 ) ;
invariant ( version_0 > = version_1 ) ;
2013-04-16 23:59:35 -04:00
rb = & rb_0 ;
version = version_0 ;
2013-04-16 23:59:35 -04:00
r = 0 ;
2013-04-16 23:59:35 -04:00
}
else {
2013-04-16 23:59:35 -04:00
invariant ( checkpoint_count_1 = = checkpoint_count_0 + 1 ) ;
invariant ( version_1 > = version_0 ) ;
2013-04-16 23:59:35 -04:00
rb = & rb_1 ;
version = version_1 ;
2013-04-16 23:59:35 -04:00
r = 0 ;
2013-04-16 23:59:35 -04:00
}
}
2013-04-16 23:59:35 -04:00
else if ( h0_acceptable ) {
rb = & rb_0 ;
version = version_0 ;
r = 0 ;
}
else if ( h1_acceptable ) {
rb = & rb_1 ;
version = version_1 ;
r = 0 ;
2013-04-16 23:59:35 -04:00
}
}
2013-04-16 23:59:35 -04:00
if ( rb = = NULL ) {
2013-04-16 23:57:49 -04:00
// We were unable to read either header or at least one is too new.
// Certain errors are higher priority than others. Order of these if/else if is important.
if ( r0 = = TOKUDB_DICTIONARY_TOO_NEW | | r1 = = TOKUDB_DICTIONARY_TOO_NEW )
r = TOKUDB_DICTIONARY_TOO_NEW ;
else if ( r0 = = TOKUDB_DICTIONARY_TOO_OLD | | r1 = = TOKUDB_DICTIONARY_TOO_OLD ) {
r = TOKUDB_DICTIONARY_TOO_OLD ;
}
else if ( r0 = = TOKUDB_DICTIONARY_NO_HEADER | | r1 = = TOKUDB_DICTIONARY_NO_HEADER ) {
r = TOKUDB_DICTIONARY_NO_HEADER ;
}
2013-04-16 23:59:35 -04:00
else r = r0 ? r0 : r1 ; //Arbitrarily report the error from the first header, unless it's readable
// it should not be possible for both headers to be later than the max_acceptable_lsn
invariant ( ! ( ( r0 = = 0 & & checkpoint_lsn_0 . lsn > max_acceptable_lsn . lsn ) & &
( r1 = = 0 & & checkpoint_lsn_1 . lsn > max_acceptable_lsn . lsn ) ) ) ;
invariant ( r ! = 0 ) ;
2013-04-16 23:57:47 -04:00
}
2013-04-16 23:57:58 -04:00
if ( r = = 0 ) r = deserialize_brtheader_versioned ( fd , rb , brth , version ) ;
2013-04-16 23:57:49 -04:00
if ( rb_0 . buf ) toku_free ( rb_0 . buf ) ;
if ( rb_1 . buf ) toku_free ( rb_1 . buf ) ;
2013-04-16 23:57:47 -04:00
return r ;
2008-05-22 21:28:00 +00:00
}
2013-04-16 23:59:05 -04:00
unsigned int
2013-04-16 23:59:21 -04:00
toku_brt_pivot_key_len ( struct kv_pair * pk ) {
return kv_pair_keylen ( pk ) ;
2007-12-06 14:20:47 +00:00
}
2008-04-09 02:45:27 +00:00
2013-04-16 23:59:05 -04:00
int
toku_db_badformat ( void ) {
2013-04-16 23:57:33 -04:00
return DB_BADFORMAT ;
}
2013-04-16 23:57:58 -04:00
2013-04-16 23:59:05 -04:00
static size_t
serialize_rollback_log_size ( ROLLBACK_LOG_NODE log ) {
2013-04-16 23:59:36 -04:00
size_t size = node_header_overhead //8 "tokuroll", 4 version, 4 version_original, 4 build_id
2013-04-16 23:59:05 -04:00
+ 8 //TXNID
+ 8 //sequence
+ 8 //thislogname
+ 8 //older (blocknum)
+ 8 //resident_bytecount
+ 8 //memarena_size_needed_to_load
+ log - > rollentry_resident_bytecount ;
return size ;
}
static void
serialize_rollback_log_node_to_buf ( ROLLBACK_LOG_NODE log , char * buf , size_t calculated_size , int UU ( n_sub_blocks ) , struct sub_block UU ( sub_block [ ] ) ) {
struct wbuf wb ;
wbuf_init ( & wb , buf , calculated_size ) ;
{ //Serialize rollback log to local wbuf
wbuf_nocrc_literal_bytes ( & wb , " tokuroll " , 8 ) ;
2013-04-16 23:59:23 -04:00
lazy_assert ( log - > layout_version = = BRT_LAYOUT_VERSION ) ;
2013-04-16 23:59:05 -04:00
wbuf_nocrc_int ( & wb , log - > layout_version ) ;
wbuf_nocrc_int ( & wb , log - > layout_version_original ) ;
2013-04-16 23:59:36 -04:00
wbuf_nocrc_uint ( & wb , BUILD_ID ) ;
2013-04-16 23:59:05 -04:00
wbuf_nocrc_TXNID ( & wb , log - > txnid ) ;
wbuf_nocrc_ulonglong ( & wb , log - > sequence ) ;
wbuf_nocrc_BLOCKNUM ( & wb , log - > thislogname ) ;
wbuf_nocrc_BLOCKNUM ( & wb , log - > older ) ;
wbuf_nocrc_ulonglong ( & wb , log - > rollentry_resident_bytecount ) ;
//Write down memarena size needed to restore
wbuf_nocrc_ulonglong ( & wb , memarena_total_size_in_use ( log - > rollentry_arena ) ) ;
{
//Store rollback logs
struct roll_entry * item ;
size_t done_before = wb . ndone ;
for ( item = log - > newest_logentry ; item ; item = item - > prev ) {
toku_logger_rollback_wbuf_nocrc_write ( & wb , item ) ;
}
2013-04-16 23:59:23 -04:00
lazy_assert ( done_before + log - > rollentry_resident_bytecount = = wb . ndone ) ;
2013-04-16 23:59:05 -04:00
}
}
2013-04-16 23:59:23 -04:00
lazy_assert ( wb . ndone = = wb . size ) ;
lazy_assert ( calculated_size = = wb . ndone ) ;
2013-04-16 23:59:05 -04:00
}
2013-04-16 23:59:40 -04:00
static int
serialize_uncompressed_block_to_memory ( char * uncompressed_buf ,
int n_sub_blocks ,
2013-04-16 23:59:42 -04:00
struct sub_block sub_block [ /*n_sub_blocks*/ ] ,
2013-04-17 00:00:14 -04:00
enum toku_compression_method method ,
2013-04-16 23:59:40 -04:00
/*out*/ size_t * n_bytes_to_write ,
/*out*/ char * * bytes_to_write ) {
// allocate space for the compressed uncompressed_buf
2013-04-17 00:00:14 -04:00
size_t compressed_len = get_sum_compressed_size_bound ( n_sub_blocks , sub_block , method ) ;
2013-04-16 23:59:40 -04:00
size_t sub_block_header_len = sub_block_header_size ( n_sub_blocks ) ;
size_t header_len = node_header_overhead + sub_block_header_len + sizeof ( uint32_t ) ; // node + sub_block + checksum
char * XMALLOC_N ( header_len + compressed_len , compressed_buf ) ;
if ( compressed_buf = = NULL )
return errno ;
// copy the header
memcpy ( compressed_buf , uncompressed_buf , node_header_overhead ) ;
if ( 0 ) printf ( " First 4 bytes before compressing data are %02x%02x%02x%02x \n " ,
uncompressed_buf [ node_header_overhead ] , uncompressed_buf [ node_header_overhead + 1 ] ,
uncompressed_buf [ node_header_overhead + 2 ] , uncompressed_buf [ node_header_overhead + 3 ] ) ;
// compress all of the sub blocks
char * uncompressed_ptr = uncompressed_buf + node_header_overhead ;
char * compressed_ptr = compressed_buf + header_len ;
2013-04-17 00:00:14 -04:00
compressed_len = compress_all_sub_blocks ( n_sub_blocks , sub_block , uncompressed_ptr , compressed_ptr , num_cores , brt_pool , method ) ;
2013-04-16 23:59:40 -04:00
//if (0) printf("Block %" PRId64 " Size before compressing %u, after compression %"PRIu64"\n", blocknum.b, calculated_size-node_header_overhead, (uint64_t) compressed_len);
// serialize the sub block header
uint32_t * ptr = ( uint32_t * ) ( compressed_buf + node_header_overhead ) ;
* ptr + + = toku_htod32 ( n_sub_blocks ) ;
for ( int i = 0 ; i < n_sub_blocks ; i + + ) {
ptr [ 0 ] = toku_htod32 ( sub_block [ i ] . compressed_size ) ;
ptr [ 1 ] = toku_htod32 ( sub_block [ i ] . uncompressed_size ) ;
ptr [ 2 ] = toku_htod32 ( sub_block [ i ] . xsum ) ;
ptr + = 3 ;
}
// compute the header checksum and serialize it
uint32_t header_length = ( char * ) ptr - ( char * ) compressed_buf ;
uint32_t xsum = x1764_memory ( compressed_buf , header_length ) ;
* ptr = toku_htod32 ( xsum ) ;
* n_bytes_to_write = header_len + compressed_len ;
* bytes_to_write = compressed_buf ;
return 0 ;
}
2013-04-16 23:59:05 -04:00
static int
toku_serialize_rollback_log_to_memory ( ROLLBACK_LOG_NODE log ,
int UU ( n_workitems ) , int UU ( n_threads ) ,
2013-04-17 00:00:14 -04:00
enum toku_compression_method method ,
2013-04-16 23:59:05 -04:00
/*out*/ size_t * n_bytes_to_write ,
/*out*/ char * * bytes_to_write ) {
// get the size of the serialized node
size_t calculated_size = serialize_rollback_log_size ( log ) ;
// choose sub block parameters
int n_sub_blocks = 0 , sub_block_size = 0 ;
size_t data_size = calculated_size - node_header_overhead ;
choose_sub_block_size ( data_size , max_sub_blocks , & sub_block_size , & n_sub_blocks ) ;
2013-04-16 23:59:23 -04:00
lazy_assert ( 0 < n_sub_blocks & & n_sub_blocks < = max_sub_blocks ) ;
lazy_assert ( sub_block_size > 0 ) ;
2013-04-16 23:59:05 -04:00
// set the initial sub block size for all of the sub blocks
struct sub_block sub_block [ n_sub_blocks ] ;
for ( int i = 0 ; i < n_sub_blocks ; i + + )
sub_block_init ( & sub_block [ i ] ) ;
set_all_sub_block_sizes ( data_size , sub_block_size , n_sub_blocks , sub_block ) ;
// allocate space for the serialized node
char * XMALLOC_N ( calculated_size , buf ) ;
// serialize the node into buf
serialize_rollback_log_node_to_buf ( log , buf , calculated_size , n_sub_blocks , sub_block ) ;
//Compress and malloc buffer to write
2013-04-17 00:00:14 -04:00
int result = serialize_uncompressed_block_to_memory ( buf , n_sub_blocks , sub_block , method ,
2013-04-16 23:59:15 -04:00
n_bytes_to_write , bytes_to_write ) ;
2013-04-16 23:59:05 -04:00
toku_free ( buf ) ;
2013-04-16 23:59:15 -04:00
return result ;
2013-04-16 23:59:05 -04:00
}
int
toku_serialize_rollback_log_to ( int fd , BLOCKNUM blocknum , ROLLBACK_LOG_NODE log ,
struct brt_header * h , int n_workitems , int n_threads ,
BOOL for_checkpoint ) {
size_t n_to_write ;
char * compressed_buf ;
{
2013-04-17 00:00:14 -04:00
int r = toku_serialize_rollback_log_to_memory ( log , n_workitems , n_threads , h - > compression_method , & n_to_write , & compressed_buf ) ;
2013-04-16 23:59:05 -04:00
if ( r ! = 0 ) return r ;
}
{
2013-04-16 23:59:23 -04:00
lazy_assert ( blocknum . b > = 0 ) ;
2013-04-16 23:59:05 -04:00
DISKOFF offset ;
toku_blocknum_realloc_on_disk ( h - > blocktable , blocknum , n_to_write , & offset ,
h , for_checkpoint ) ; //dirties h
lock_for_pwrite ( ) ;
toku_full_pwrite_extend ( fd , compressed_buf , n_to_write , offset ) ;
unlock_for_pwrite ( ) ;
}
toku_free ( compressed_buf ) ;
log - > dirty = 0 ; // See #1957. Must set the node to be clean after serializing it so that it doesn't get written again on the next checkpoint or eviction.
return 0 ;
}
static int
deserialize_rollback_log_from_rbuf ( BLOCKNUM blocknum , u_int32_t fullhash , ROLLBACK_LOG_NODE * log_p ,
2013-04-16 23:59:06 -04:00
struct brt_header * h , struct rbuf * rb ) {
2013-04-16 23:59:22 -04:00
ROLLBACK_LOG_NODE MALLOC ( result ) ;
2013-04-16 23:59:05 -04:00
int r ;
if ( result = = NULL ) {
r = errno ;
if ( 0 ) { died0 : toku_free ( result ) ; }
return r ;
}
//printf("Deserializing %lld datasize=%d\n", off, datasize);
bytevec magic ;
rbuf_literal_bytes ( rb , & magic , 8 ) ;
2013-04-16 23:59:23 -04:00
lazy_assert ( ! memcmp ( magic , " tokuroll " , 8 ) ) ;
2013-04-16 23:59:05 -04:00
result - > layout_version = rbuf_int ( rb ) ;
2013-04-16 23:59:23 -04:00
lazy_assert ( result - > layout_version = = BRT_LAYOUT_VERSION ) ;
2013-04-16 23:59:05 -04:00
result - > layout_version_original = rbuf_int ( rb ) ;
result - > layout_version_read_from_disk = result - > layout_version ;
2013-04-16 23:59:36 -04:00
result - > build_id = rbuf_int ( rb ) ;
2013-04-16 23:59:05 -04:00
result - > dirty = FALSE ;
//TODO: Maybe add descriptor (or just descriptor version) here eventually?
//TODO: This is hard.. everything is shared in a single dictionary.
rbuf_TXNID ( rb , & result - > txnid ) ;
result - > sequence = rbuf_ulonglong ( rb ) ;
result - > thislogname = rbuf_blocknum ( rb ) ;
if ( result - > thislogname . b ! = blocknum . b ) {
r = toku_db_badformat ( ) ;
goto died0 ;
}
result - > thishash = toku_cachetable_hash ( h - > cf , result - > thislogname ) ;
if ( result - > thishash ! = fullhash ) {
r = toku_db_badformat ( ) ;
goto died0 ;
}
result - > older = rbuf_blocknum ( rb ) ;
result - > older_hash = toku_cachetable_hash ( h - > cf , result - > older ) ;
result - > rollentry_resident_bytecount = rbuf_ulonglong ( rb ) ;
size_t arena_initial_size = rbuf_ulonglong ( rb ) ;
result - > rollentry_arena = memarena_create_presized ( arena_initial_size ) ;
if ( 0 ) { died1 : memarena_close ( & result - > rollentry_arena ) ; goto died0 ; }
//Load rollback entries
2013-04-16 23:59:23 -04:00
lazy_assert ( rb - > size > 4 ) ;
2013-04-16 23:59:05 -04:00
//Start with empty list
result - > oldest_logentry = result - > newest_logentry = NULL ;
while ( rb - > ndone < rb - > size ) {
struct roll_entry * item ;
uint32_t rollback_fsize = rbuf_int ( rb ) ; //Already read 4. Rest is 4 smaller
bytevec item_vec ;
rbuf_literal_bytes ( rb , & item_vec , rollback_fsize - 4 ) ;
unsigned char * item_buf = ( unsigned char * ) item_vec ;
r = toku_parse_rollback ( item_buf , rollback_fsize - 4 , & item , result - > rollentry_arena ) ;
if ( r ! = 0 ) {
r = toku_db_badformat ( ) ;
goto died1 ;
}
//Add to head of list
if ( result - > oldest_logentry ) {
result - > oldest_logentry - > prev = item ;
result - > oldest_logentry = item ;
item - > prev = NULL ;
}
else {
result - > oldest_logentry = result - > newest_logentry = item ;
item - > prev = NULL ;
}
}
toku_free ( rb - > buf ) ;
rb - > buf = NULL ;
* log_p = result ;
return 0 ;
}
static int
deserialize_rollback_log_from_rbuf_versioned ( u_int32_t version , BLOCKNUM blocknum , u_int32_t fullhash ,
ROLLBACK_LOG_NODE * log ,
2013-04-16 23:59:06 -04:00
struct brt_header * h , struct rbuf * rb ) {
2013-04-16 23:59:05 -04:00
int r = 0 ;
ROLLBACK_LOG_NODE rollback_log_node = NULL ;
2013-04-16 23:59:25 -04:00
invariant ( version = = BRT_LAYOUT_VERSION ) ; //Rollback log nodes do not survive version changes.
r = deserialize_rollback_log_from_rbuf ( blocknum , fullhash , & rollback_log_node , h , rb ) ;
if ( r = = 0 ) {
* log = rollback_log_node ;
2013-04-16 23:59:05 -04:00
}
return r ;
}
2013-04-16 23:59:40 -04:00
static int
decompress_from_raw_block_into_rbuf ( u_int8_t * raw_block , size_t raw_block_size , struct rbuf * rb , BLOCKNUM blocknum ) {
toku_trace ( " decompress " ) ;
// get the number of compressed sub blocks
int n_sub_blocks ;
n_sub_blocks = toku_dtoh32 ( * ( u_int32_t * ) ( & raw_block [ node_header_overhead ] ) ) ;
// verify the number of sub blocks
invariant ( 0 < = n_sub_blocks & & n_sub_blocks < = max_sub_blocks ) ;
{ // verify the header checksum
u_int32_t header_length = node_header_overhead + sub_block_header_size ( n_sub_blocks ) ;
invariant ( header_length < = raw_block_size ) ;
u_int32_t xsum = x1764_memory ( raw_block , header_length ) ;
u_int32_t stored_xsum = toku_dtoh32 ( * ( u_int32_t * ) ( raw_block + header_length ) ) ;
invariant ( xsum = = stored_xsum ) ;
}
int r ;
// deserialize the sub block header
struct sub_block sub_block [ n_sub_blocks ] ;
u_int32_t * sub_block_header = ( u_int32_t * ) & raw_block [ node_header_overhead + 4 ] ;
for ( int i = 0 ; i < n_sub_blocks ; i + + ) {
sub_block_init ( & sub_block [ i ] ) ;
sub_block [ i ] . compressed_size = toku_dtoh32 ( sub_block_header [ 0 ] ) ;
sub_block [ i ] . uncompressed_size = toku_dtoh32 ( sub_block_header [ 1 ] ) ;
sub_block [ i ] . xsum = toku_dtoh32 ( sub_block_header [ 2 ] ) ;
sub_block_header + = 3 ;
}
// verify sub block sizes
for ( int i = 0 ; i < n_sub_blocks ; i + + ) {
u_int32_t compressed_size = sub_block [ i ] . compressed_size ;
if ( compressed_size < = 0 | | compressed_size > ( 1 < < 30 ) ) { r = toku_db_badformat ( ) ; return r ; }
u_int32_t uncompressed_size = sub_block [ i ] . uncompressed_size ;
if ( 0 ) printf ( " Block % " PRId64 " Compressed size = %u, uncompressed size=%u \n " , blocknum . b , compressed_size , uncompressed_size ) ;
if ( uncompressed_size < = 0 | | uncompressed_size > ( 1 < < 30 ) ) { r = toku_db_badformat ( ) ; return r ; }
}
// sum up the uncompressed size of the sub blocks
size_t uncompressed_size = get_sum_uncompressed_size ( n_sub_blocks , sub_block ) ;
// allocate the uncompressed buffer
size_t size = node_header_overhead + uncompressed_size ;
unsigned char * buf = toku_xmalloc ( size ) ;
lazy_assert ( buf ) ;
rbuf_init ( rb , buf , size ) ;
// copy the uncompressed node header to the uncompressed buffer
memcpy ( rb - > buf , raw_block , node_header_overhead ) ;
// point at the start of the compressed data (past the node header, the sub block header, and the header checksum)
unsigned char * compressed_data = raw_block + node_header_overhead + sub_block_header_size ( n_sub_blocks ) + sizeof ( u_int32_t ) ;
// point at the start of the uncompressed data
unsigned char * uncompressed_data = rb - > buf + node_header_overhead ;
// decompress all the compressed sub blocks into the uncompressed buffer
r = decompress_all_sub_blocks ( n_sub_blocks , sub_block , compressed_data , uncompressed_data , num_cores , brt_pool ) ;
if ( r ! = 0 ) {
fprintf ( stderr , " %s:%d block % " PRId64 " failed %d at %p size %lu \n " , __FUNCTION__ , __LINE__ , blocknum . b , r , raw_block , raw_block_size ) ;
dump_bad_block ( raw_block , raw_block_size ) ;
}
lazy_assert_zero ( r ) ;
toku_trace ( " decompress done " ) ;
rb - > ndone = 0 ;
return 0 ;
}
static int
decompress_from_raw_block_into_rbuf_versioned ( u_int32_t version , u_int8_t * raw_block , size_t raw_block_size , struct rbuf * rb , BLOCKNUM blocknum ) {
// This function exists solely to accomodate future changes in compression.
int r ;
switch ( version ) {
case BRT_LAYOUT_VERSION_13 :
case BRT_LAYOUT_VERSION_14 :
case BRT_LAYOUT_VERSION :
r = decompress_from_raw_block_into_rbuf ( raw_block , raw_block_size , rb , blocknum ) ;
break ;
default :
lazy_assert ( FALSE ) ;
}
return r ;
}
static int
read_and_decompress_block_from_fd_into_rbuf ( int fd , BLOCKNUM blocknum ,
struct brt_header * h ,
struct rbuf * rb ,
/* out */ int * layout_version_p ) {
int r ;
if ( 0 ) printf ( " Deserializing Block % " PRId64 " \n " , blocknum . b ) ;
if ( h - > panic ) return h - > panic ;
toku_trace ( " deserial start nopanic " ) ;
// get the file offset and block size for the block
DISKOFF offset , size ;
toku_translate_blocknum_to_offset_size ( h - > blocktable , blocknum , & offset , & size ) ;
u_int8_t * XMALLOC_N ( size , raw_block ) ;
{
// read the (partially compressed) block
ssize_t rlen = toku_os_pread ( fd , raw_block , size , offset ) ;
lazy_assert ( ( DISKOFF ) rlen = = size ) ;
}
// get the layout_version
int layout_version ;
{
u_int8_t * magic = raw_block + uncompressed_magic_offset ;
if ( memcmp ( magic , " tokuleaf " , 8 ) ! = 0 & &
memcmp ( magic , " tokunode " , 8 ) ! = 0 & &
memcmp ( magic , " tokuroll " , 8 ) ! = 0 ) {
r = toku_db_badformat ( ) ;
goto cleanup ;
}
u_int8_t * version = raw_block + uncompressed_version_offset ;
layout_version = toku_dtoh32 ( * ( uint32_t * ) version ) ;
if ( layout_version < BRT_LAYOUT_MIN_SUPPORTED_VERSION | | layout_version > BRT_LAYOUT_VERSION ) {
r = toku_db_badformat ( ) ;
goto cleanup ;
}
}
r = decompress_from_raw_block_into_rbuf_versioned ( layout_version , raw_block , size , rb , blocknum ) ;
if ( r ! = 0 ) goto cleanup ;
* layout_version_p = layout_version ;
cleanup :
if ( r ! = 0 ) {
if ( rb - > buf ) toku_free ( rb - > buf ) ;
rb - > buf = NULL ;
}
if ( raw_block ) toku_free ( raw_block ) ;
return r ;
}
2013-04-16 23:59:05 -04:00
// Read rollback log node from file into struct. Perform version upgrade if necessary.
int
toku_deserialize_rollback_log_from ( int fd , BLOCKNUM blocknum , u_int32_t fullhash ,
2013-04-16 23:59:06 -04:00
ROLLBACK_LOG_NODE * logp , struct brt_header * h ) {
2013-04-16 23:59:05 -04:00
toku_trace ( " deserial start " ) ;
int r ;
struct rbuf rb = { . buf = NULL , . size = 0 , . ndone = 0 } ;
2013-04-16 23:59:40 -04:00
int layout_version = 0 ;
2013-04-16 23:59:05 -04:00
r = read_and_decompress_block_from_fd_into_rbuf ( fd , blocknum , h , & rb , & layout_version ) ;
if ( r ! = 0 ) goto cleanup ;
{
u_int8_t * magic = rb . buf + uncompressed_magic_offset ;
if ( memcmp ( magic , " tokuroll " , 8 ) ! = 0 ) {
r = toku_db_badformat ( ) ;
goto cleanup ;
}
}
2013-04-16 23:59:06 -04:00
r = deserialize_rollback_log_from_rbuf_versioned ( layout_version , blocknum , fullhash , logp , h , & rb ) ;
2013-04-16 23:59:05 -04:00
toku_trace ( " deserial done " ) ;
cleanup :
if ( rb . buf ) toku_free ( rb . buf ) ;
return r ;
}
2013-04-17 00:00:08 -04:00
# undef UPGRADE_STATUS_VALUE