mirror of
https://github.com/MariaDB/server.git
synced 2025-01-22 06:44:16 +01:00
[t:4326] Improve checkpoint status: footprint indicates caller, make status threadsafe, add indicator of other threads waiting for checkpoint_safe lock. Refs #4326.
git-svn-id: file:///svn/toku/tokudb@38451 c7de825b-a66e-492c-adef-691d508d4ae1
This commit is contained in:
parent
0aa48b2bf9
commit
6d460bedc0
22 changed files with 73 additions and 42 deletions
|
@ -86,6 +86,7 @@ typedef struct __toku_engine_status {
|
|||
u_int64_t checkpoint_last_lsn; /* LSN of last complete checkpoint */
|
||||
u_int64_t checkpoint_count; /* number of checkpoints taken */
|
||||
u_int64_t checkpoint_count_fail; /* number of checkpoints failed */
|
||||
u_int64_t checkpoint_waiters_now; /* number of threads currently waiting for checkpoint_safe lock */
|
||||
u_int64_t cleaner_period; /* delay between executions of cleaner */
|
||||
u_int64_t cleaner_iterations; /* number of nodes to flush per cleaner execution */
|
||||
u_int64_t txn_begin; /* number of transactions ever begun */
|
||||
|
|
|
@ -86,6 +86,7 @@ typedef struct __toku_engine_status {
|
|||
u_int64_t checkpoint_last_lsn; /* LSN of last complete checkpoint */
|
||||
u_int64_t checkpoint_count; /* number of checkpoints taken */
|
||||
u_int64_t checkpoint_count_fail; /* number of checkpoints failed */
|
||||
u_int64_t checkpoint_waiters_now; /* number of threads currently waiting for checkpoint_safe lock */
|
||||
u_int64_t cleaner_period; /* delay between executions of cleaner */
|
||||
u_int64_t cleaner_iterations; /* number of nodes to flush per cleaner execution */
|
||||
u_int64_t txn_begin; /* number of transactions ever begun */
|
||||
|
|
|
@ -86,6 +86,7 @@ typedef struct __toku_engine_status {
|
|||
u_int64_t checkpoint_last_lsn; /* LSN of last complete checkpoint */
|
||||
u_int64_t checkpoint_count; /* number of checkpoints taken */
|
||||
u_int64_t checkpoint_count_fail; /* number of checkpoints failed */
|
||||
u_int64_t checkpoint_waiters_now; /* number of threads currently waiting for checkpoint_safe lock */
|
||||
u_int64_t cleaner_period; /* delay between executions of cleaner */
|
||||
u_int64_t cleaner_iterations; /* number of nodes to flush per cleaner execution */
|
||||
u_int64_t txn_begin; /* number of transactions ever begun */
|
||||
|
|
|
@ -86,6 +86,7 @@ typedef struct __toku_engine_status {
|
|||
u_int64_t checkpoint_last_lsn; /* LSN of last complete checkpoint */
|
||||
u_int64_t checkpoint_count; /* number of checkpoints taken */
|
||||
u_int64_t checkpoint_count_fail; /* number of checkpoints failed */
|
||||
u_int64_t checkpoint_waiters_now; /* number of threads currently waiting for checkpoint_safe lock */
|
||||
u_int64_t cleaner_period; /* delay between executions of cleaner */
|
||||
u_int64_t cleaner_iterations; /* number of nodes to flush per cleaner execution */
|
||||
u_int64_t txn_begin; /* number of transactions ever begun */
|
||||
|
|
|
@ -86,6 +86,7 @@ typedef struct __toku_engine_status {
|
|||
u_int64_t checkpoint_last_lsn; /* LSN of last complete checkpoint */
|
||||
u_int64_t checkpoint_count; /* number of checkpoints taken */
|
||||
u_int64_t checkpoint_count_fail; /* number of checkpoints failed */
|
||||
u_int64_t checkpoint_waiters_now; /* number of threads currently waiting for checkpoint_safe lock */
|
||||
u_int64_t cleaner_period; /* delay between executions of cleaner */
|
||||
u_int64_t cleaner_iterations; /* number of nodes to flush per cleaner execution */
|
||||
u_int64_t txn_begin; /* number of transactions ever begun */
|
||||
|
|
|
@ -479,6 +479,7 @@ int main (int argc __attribute__((__unused__)), char *const argv[] __attribute__
|
|||
printf(" u_int64_t checkpoint_last_lsn; /* LSN of last complete checkpoint */ \n");
|
||||
printf(" u_int64_t checkpoint_count; /* number of checkpoints taken */ \n");
|
||||
printf(" u_int64_t checkpoint_count_fail; /* number of checkpoints failed */ \n");
|
||||
printf(" u_int64_t checkpoint_waiters_now; /* number of threads currently waiting for checkpoint_safe lock */ \n");
|
||||
printf(" u_int64_t cleaner_period; /* delay between executions of cleaner */ \n");
|
||||
printf(" u_int64_t cleaner_iterations; /* number of nodes to flush per cleaner execution */ \n");
|
||||
printf(" u_int64_t txn_begin; /* number of transactions ever begun */ \n");
|
||||
|
|
|
@ -86,6 +86,7 @@ typedef struct __toku_engine_status {
|
|||
u_int64_t checkpoint_last_lsn; /* LSN of last complete checkpoint */
|
||||
u_int64_t checkpoint_count; /* number of checkpoints taken */
|
||||
u_int64_t checkpoint_count_fail; /* number of checkpoints failed */
|
||||
u_int64_t checkpoint_waiters_now; /* number of threads currently waiting for checkpoint_safe lock */
|
||||
u_int64_t cleaner_period; /* delay between executions of cleaner */
|
||||
u_int64_t cleaner_iterations; /* number of nodes to flush per cleaner execution */
|
||||
u_int64_t txn_begin; /* number of transactions ever begun */
|
||||
|
|
|
@ -86,6 +86,7 @@ typedef struct __toku_engine_status {
|
|||
u_int64_t checkpoint_last_lsn; /* LSN of last complete checkpoint */
|
||||
u_int64_t checkpoint_count; /* number of checkpoints taken */
|
||||
u_int64_t checkpoint_count_fail; /* number of checkpoints failed */
|
||||
u_int64_t checkpoint_waiters_now; /* number of threads currently waiting for checkpoint_safe lock */
|
||||
u_int64_t cleaner_period; /* delay between executions of cleaner */
|
||||
u_int64_t cleaner_iterations; /* number of nodes to flush per cleaner execution */
|
||||
u_int64_t txn_begin; /* number of transactions ever begun */
|
||||
|
|
|
@ -356,7 +356,7 @@ checkpoint_thread (void *cachetable_v)
|
|||
// This thread notices those changes by waiting on a condition variable.
|
||||
{
|
||||
CACHETABLE ct = cachetable_v;
|
||||
int r = toku_checkpoint(ct, ct->logger, NULL, NULL, NULL, NULL);
|
||||
int r = toku_checkpoint(ct, ct->logger, NULL, NULL, NULL, NULL, SCHEDULED_CHECKPOINT);
|
||||
if (r) {
|
||||
fprintf(stderr, "%s:%d Got error %d while doing checkpoint\n", __FILE__, __LINE__, r);
|
||||
abort(); // Don't quite know what to do with these errors.
|
||||
|
|
|
@ -196,57 +196,66 @@ toku_checkpoint_destroy(void) {
|
|||
return r;
|
||||
}
|
||||
|
||||
#define SET_CHECKPOINT_FOOTPRINT(x) status.footprint = footprint_offset + x;
|
||||
|
||||
|
||||
// Take a checkpoint of all currently open dictionaries
|
||||
int
|
||||
toku_checkpoint(CACHETABLE ct, TOKULOGGER logger,
|
||||
void (*callback_f)(void*), void * extra,
|
||||
void (*callback2_f)(void*), void * extra2) {
|
||||
void (*callback2_f)(void*), void * extra2,
|
||||
checkpoint_caller_t caller_id) {
|
||||
int r;
|
||||
int footprint_offset = (int) caller_id * 1000;
|
||||
|
||||
status.footprint = 10;
|
||||
assert(initialized);
|
||||
// for #4341, we changed the order these locks are taken.
|
||||
// to keep the status footprints the same, we moved those
|
||||
// as well. That is why 30 comes before 20.
|
||||
(void) __sync_fetch_and_add(&status.waiters_now, 1);
|
||||
checkpoint_safe_checkpoint_lock();
|
||||
status.footprint = 30;
|
||||
|
||||
(void) __sync_fetch_and_sub(&status.waiters_now, 1);
|
||||
SET_CHECKPOINT_FOOTPRINT(10)
|
||||
multi_operation_checkpoint_lock();
|
||||
status.footprint = 20;
|
||||
SET_CHECKPOINT_FOOTPRINT(20)
|
||||
ydb_lock();
|
||||
|
||||
status.footprint = 40;
|
||||
SET_CHECKPOINT_FOOTPRINT(30)
|
||||
status.time_last_checkpoint_begin = time(NULL);
|
||||
r = toku_cachetable_begin_checkpoint(ct, logger);
|
||||
|
||||
multi_operation_checkpoint_unlock();
|
||||
ydb_unlock();
|
||||
|
||||
status.footprint = 50;
|
||||
SET_CHECKPOINT_FOOTPRINT(40);
|
||||
if (r==0) {
|
||||
if (callback_f)
|
||||
callback_f(extra); // callback is called with checkpoint_safe_lock still held
|
||||
r = toku_cachetable_end_checkpoint(ct, logger, ydb_lock, ydb_unlock, callback2_f, extra2);
|
||||
}
|
||||
SET_CHECKPOINT_FOOTPRINT(50);
|
||||
if (r==0 && logger) {
|
||||
last_completed_checkpoint_lsn = logger->last_completed_checkpoint_lsn;
|
||||
r = toku_logger_maybe_trim_log(logger, last_completed_checkpoint_lsn);
|
||||
status.last_lsn = last_completed_checkpoint_lsn.lsn;
|
||||
}
|
||||
|
||||
status.footprint = 60;
|
||||
SET_CHECKPOINT_FOOTPRINT(60);
|
||||
status.time_last_checkpoint_end = time(NULL);
|
||||
status.time_last_checkpoint_begin_complete = status.time_last_checkpoint_begin;
|
||||
checkpoint_safe_checkpoint_unlock();
|
||||
status.footprint = 0;
|
||||
|
||||
if (r == 0)
|
||||
status.checkpoint_count++;
|
||||
else
|
||||
status.checkpoint_count_fail++;
|
||||
|
||||
status.footprint = 0;
|
||||
checkpoint_safe_checkpoint_unlock();
|
||||
return r;
|
||||
}
|
||||
|
||||
#undef SET_CHECKPOINT_FOOTPRINT
|
||||
|
||||
// Can we get rid of this (placating drd), now that all status is updated when holding the checkpoint_safe lock?
|
||||
|
||||
#include <valgrind/drd.h>
|
||||
void __attribute__((__constructor__)) toku_checkpoint_drd_ignore(void);
|
||||
void
|
||||
|
|
|
@ -20,8 +20,8 @@ u_int32_t toku_get_checkpoint_period_unlocked(CACHETABLE ct);
|
|||
|
||||
/******
|
||||
*
|
||||
* NOTE: multi_operation_lock is highest level lock
|
||||
* checkpoint_safe_lock is next level lock
|
||||
* NOTE: checkpoint_safe_lock is highest level lock
|
||||
* multi_operation_lock is next level lock
|
||||
* ydb_big_lock is next level lock
|
||||
*
|
||||
* Locks must always be taken in this sequence (highest level first).
|
||||
|
@ -61,12 +61,22 @@ int toku_checkpoint_init(void (*ydb_lock_callback)(void), void (*ydb_unlock_call
|
|||
|
||||
int toku_checkpoint_destroy(void);
|
||||
|
||||
typedef enum {SCHEDULED_CHECKPOINT = 0, // "normal" checkpoint taken on checkpoint thread
|
||||
CLIENT_CHECKPOINT = 1, // induced by client, such as FLUSH LOGS or SAVEPOINT
|
||||
TXN_COMMIT_CHECKPOINT = 2,
|
||||
STARTUP_CHECKPOINT = 3,
|
||||
UPGRADE_CHECKPOINT = 4,
|
||||
RECOVERY_CHECKPOINT = 5,
|
||||
SHUTDOWN_CHECKPOINT = 6} checkpoint_caller_t;
|
||||
|
||||
// Take a checkpoint of all currently open dictionaries
|
||||
// Callbacks are called during checkpoint procedure while checkpoint_safe lock is still held.
|
||||
// Callbacks are primarily intended for use in testing.
|
||||
// caller_id identifies why the checkpoint is being taken.
|
||||
int toku_checkpoint(CACHETABLE ct, TOKULOGGER logger,
|
||||
void (*callback_f)(void*), void * extra,
|
||||
void (*callback2_f)(void*), void * extra2);
|
||||
void (*callback2_f)(void*), void * extra2,
|
||||
checkpoint_caller_t caller_id);
|
||||
|
||||
|
||||
|
||||
|
@ -77,13 +87,14 @@ int toku_checkpoint(CACHETABLE ct, TOKULOGGER logger,
|
|||
* (If checkpoint is in progress, it may overwrite status info while it is being read.)
|
||||
*****/
|
||||
typedef struct {
|
||||
u_int64_t footprint;
|
||||
uint64_t footprint;
|
||||
time_t time_last_checkpoint_begin_complete;
|
||||
time_t time_last_checkpoint_begin;
|
||||
time_t time_last_checkpoint_end;
|
||||
uint64_t last_lsn;
|
||||
uint64_t checkpoint_count;
|
||||
uint64_t checkpoint_count_fail;
|
||||
uint64_t waiters_now; // how many threads are currently waiting for the checkpoint_safe lock
|
||||
} CHECKPOINT_STATUS_S, *CHECKPOINT_STATUS;
|
||||
|
||||
void toku_checkpoint_get_status(CHECKPOINT_STATUS stat);
|
||||
|
|
|
@ -147,7 +147,7 @@ upgrade_log(const char *env_dir, const char *log_dir, LSN last_lsn) { // the rea
|
|||
assert(r==0);
|
||||
}
|
||||
{ //Checkpoint
|
||||
r = toku_checkpoint(ct, logger, NULL, NULL, NULL, NULL); //fsyncs log dir
|
||||
r = toku_checkpoint(ct, logger, NULL, NULL, NULL, NULL, UPGRADE_CHECKPOINT); //fsyncs log dir
|
||||
assert(r == 0);
|
||||
}
|
||||
{ //Close cachetable and logger
|
||||
|
|
|
@ -1347,7 +1347,7 @@ static int do_recovery(RECOVER_ENV renv, const char *env_dir, const char *log_di
|
|||
// checkpoint
|
||||
tnow = time(NULL);
|
||||
fprintf(stderr, "%.24s Tokudb recovery making a checkpoint\n", ctime(&tnow));
|
||||
r = toku_checkpoint(renv->ct, renv->logger, NULL, NULL, NULL, NULL);
|
||||
r = toku_checkpoint(renv->ct, renv->logger, NULL, NULL, NULL, NULL, RECOVERY_CHECKPOINT);
|
||||
assert(r == 0);
|
||||
tnow = time(NULL);
|
||||
fprintf(stderr, "%.24s Tokudb recovery done\n", ctime(&tnow));
|
||||
|
|
|
@ -99,7 +99,7 @@ do_update (void *UU(ignore))
|
|||
static void*
|
||||
do_checkpoint (void *UU(v))
|
||||
{
|
||||
int r = toku_checkpoint(ct, NULL, NULL, NULL, NULL, NULL);
|
||||
int r = toku_checkpoint(ct, NULL, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
|
||||
assert(r == 0);
|
||||
return 0;
|
||||
}
|
||||
|
@ -154,14 +154,14 @@ static void checkpoint_pending(void) {
|
|||
//printf("E43\n");
|
||||
n_flush = n_write_me = n_keep_me = n_fetch = 0; expect_value = 43;
|
||||
|
||||
r = toku_checkpoint(ct, NULL, NULL, NULL, NULL, NULL);
|
||||
r = toku_checkpoint(ct, NULL, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
|
||||
assert(r == 0);
|
||||
assert(n_flush == N && n_write_me == N && n_keep_me == N);
|
||||
|
||||
// a subsequent checkpoint should cause no flushes, or writes since all of the items are clean
|
||||
n_flush = n_write_me = n_keep_me = n_fetch = 0;
|
||||
|
||||
r = toku_checkpoint(ct, NULL, NULL, NULL, NULL, NULL);
|
||||
r = toku_checkpoint(ct, NULL, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
|
||||
assert(r == 0);
|
||||
assert(n_flush == 0 && n_write_me == 0 && n_keep_me == 0);
|
||||
|
||||
|
|
|
@ -83,7 +83,7 @@ static void cachetable_checkpoint_test(int n, enum cachetable_dirty dirty) {
|
|||
// all items should be kept in the cachetable
|
||||
n_flush = n_write_me = n_keep_me = n_fetch = 0;
|
||||
|
||||
r = toku_checkpoint(ct, NULL, checkpoint_callback, &callback_was_called, checkpoint_callback2, &callback2_was_called);
|
||||
r = toku_checkpoint(ct, NULL, checkpoint_callback, &callback_was_called, checkpoint_callback2, &callback2_was_called, CLIENT_CHECKPOINT);
|
||||
assert(r == 0);
|
||||
assert(callback_was_called != 0);
|
||||
assert(callback2_was_called != 0);
|
||||
|
@ -115,7 +115,7 @@ static void cachetable_checkpoint_test(int n, enum cachetable_dirty dirty) {
|
|||
n_flush = n_write_me = n_keep_me = n_fetch = 0;
|
||||
|
||||
|
||||
r = toku_checkpoint(ct, NULL, NULL, NULL, NULL, NULL);
|
||||
r = toku_checkpoint(ct, NULL, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
|
||||
assert(r == 0);
|
||||
assert(n_flush == 0 && n_write_me == 0 && n_keep_me == 0);
|
||||
|
||||
|
|
|
@ -109,7 +109,7 @@ static void cachetable_prefetch_checkpoint_test(int n, enum cachetable_dirty dir
|
|||
// all items should be kept in the cachetable
|
||||
n_flush = n_write_me = n_keep_me = n_fetch = 0;
|
||||
|
||||
r = toku_checkpoint(ct, NULL, NULL, NULL, NULL, NULL);
|
||||
r = toku_checkpoint(ct, NULL, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
|
||||
assert(r == 0);
|
||||
assert(n_flush == n && n_write_me == n && n_keep_me == n);
|
||||
|
||||
|
@ -138,7 +138,7 @@ static void cachetable_prefetch_checkpoint_test(int n, enum cachetable_dirty dir
|
|||
// a subsequent checkpoint should cause no flushes, or writes since all of the items are clean
|
||||
n_flush = n_write_me = n_keep_me = n_fetch = 0;
|
||||
|
||||
r = toku_checkpoint(ct, NULL, NULL, NULL, NULL, NULL);
|
||||
r = toku_checkpoint(ct, NULL, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
|
||||
assert(r == 0);
|
||||
assert(n_flush == 0 && n_write_me == 0 && n_keep_me == 0);
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ static void test_it (int N) {
|
|||
r = toku_txn_commit_txn(txn, FALSE, do_yield, NULL, NULL, NULL, false); CKERR(r);
|
||||
toku_txn_close_txn(txn);
|
||||
|
||||
r = toku_checkpoint(ct, logger, NULL, NULL, NULL, NULL); CKERR(r);
|
||||
r = toku_checkpoint(ct, logger, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT); CKERR(r);
|
||||
r = toku_close_brt(brt, NULL); CKERR(r);
|
||||
|
||||
unsigned int rands[N];
|
||||
|
@ -66,7 +66,7 @@ static void test_it (int N) {
|
|||
toku_txn_close_txn(txn);
|
||||
|
||||
|
||||
r = toku_checkpoint(ct, logger, NULL, NULL, NULL, NULL); CKERR(r);
|
||||
r = toku_checkpoint(ct, logger, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT); CKERR(r);
|
||||
r = toku_close_brt(brt, NULL); CKERR(r);
|
||||
|
||||
if (verbose) printf("i=%d\n", i);
|
||||
|
@ -93,7 +93,7 @@ static void test_it (int N) {
|
|||
toku_txn_close_txn(txn);
|
||||
|
||||
|
||||
r = toku_checkpoint(ct, logger, NULL, NULL, NULL, NULL); CKERR(r);
|
||||
r = toku_checkpoint(ct, logger, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT); CKERR(r);
|
||||
r = toku_close_brt(brt, NULL); CKERR(r);
|
||||
|
||||
if (verbose) printf("d=%d\n", i);
|
||||
|
@ -109,12 +109,12 @@ static void test_it (int N) {
|
|||
assert(is_empty);
|
||||
}
|
||||
|
||||
r = toku_checkpoint(ct, logger, NULL, NULL, NULL, NULL); CKERR(r);
|
||||
r = toku_checkpoint(ct, logger, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT); CKERR(r);
|
||||
r = toku_close_brt(brt, NULL); CKERR(r);
|
||||
|
||||
r = toku_checkpoint(ct, logger, NULL, NULL, NULL, NULL); CKERR(r);
|
||||
r = toku_checkpoint(ct, logger, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT); CKERR(r);
|
||||
r = toku_logger_close_rollback(logger, FALSE); CKERR(r);
|
||||
r = toku_checkpoint(ct, logger, NULL, NULL, NULL, NULL); CKERR(r);
|
||||
r = toku_checkpoint(ct, logger, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT); CKERR(r);
|
||||
r = toku_cachetable_close(&ct); CKERR(r);
|
||||
r = toku_logger_close(&logger); assert(r==0);
|
||||
|
||||
|
|
|
@ -75,7 +75,7 @@ create_populate_tree(const char *logdir, const char *fname, int n) {
|
|||
error = toku_close_brt(brt, NULL);
|
||||
assert(error == 0);
|
||||
|
||||
error = toku_checkpoint(ct, logger, NULL, NULL, NULL, NULL);
|
||||
error = toku_checkpoint(ct, logger, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
|
||||
assert(error == 0);
|
||||
|
||||
error = toku_logger_close_rollback(logger, FALSE);
|
||||
|
@ -176,7 +176,7 @@ test_provdel(const char *logdir, const char *fname, int n) {
|
|||
error = toku_close_brt(brt, NULL);
|
||||
assert(error == 0);
|
||||
|
||||
error = toku_checkpoint(ct, logger, NULL, NULL, NULL, NULL);
|
||||
error = toku_checkpoint(ct, logger, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
|
||||
assert(error == 0);
|
||||
|
||||
error = toku_logger_close_rollback(logger, FALSE);
|
||||
|
|
|
@ -79,7 +79,7 @@ create_populate_tree(const char *logdir, const char *fname, int n) {
|
|||
error = toku_close_brt(brt, NULL);
|
||||
assert(error == 0);
|
||||
|
||||
error = toku_checkpoint(ct, logger, NULL, NULL, NULL, NULL);
|
||||
error = toku_checkpoint(ct, logger, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
|
||||
assert(error == 0);
|
||||
error = toku_logger_close_rollback(logger, FALSE);
|
||||
assert(error == 0);
|
||||
|
|
|
@ -75,7 +75,7 @@ create_populate_tree(const char *logdir, const char *fname, int n) {
|
|||
error = toku_close_brt(brt, NULL);
|
||||
assert(error == 0);
|
||||
|
||||
error = toku_checkpoint(ct, logger, NULL, NULL, NULL, NULL);
|
||||
error = toku_checkpoint(ct, logger, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
|
||||
assert(error == 0);
|
||||
error = toku_logger_close_rollback(logger, FALSE);
|
||||
assert(error == 0);
|
||||
|
|
|
@ -51,7 +51,7 @@ static void *startb (void *n) {
|
|||
assert(n==NULL);
|
||||
int count=0;
|
||||
while (!done) {
|
||||
int r = toku_checkpoint(ct, NULL, NULL, NULL, NULL, NULL); assert(r==0);
|
||||
int r = toku_checkpoint(ct, NULL, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT); assert(r==0);
|
||||
count++;
|
||||
}
|
||||
printf("count=%d\n", count);
|
||||
|
|
13
src/ydb.c
13
src/ydb.c
|
@ -1005,7 +1005,7 @@ toku_env_open(DB_ENV * env, const char *home, u_int32_t flags, int mode) {
|
|||
assert(r==0);
|
||||
}
|
||||
toku_ydb_unlock();
|
||||
r = toku_checkpoint(env->i->cachetable, env->i->logger, NULL, NULL, NULL, NULL);
|
||||
r = toku_checkpoint(env->i->cachetable, env->i->logger, NULL, NULL, NULL, NULL, STARTUP_CHECKPOINT);
|
||||
assert(r==0);
|
||||
toku_ydb_lock();
|
||||
env_fs_poller(env); // get the file system state at startup
|
||||
|
@ -1071,7 +1071,7 @@ toku_env_close(DB_ENV * env, u_int32_t flags) {
|
|||
toku_ydb_unlock(); // ydb lock must not be held when shutting down minicron
|
||||
toku_cachetable_minicron_shutdown(env->i->cachetable);
|
||||
if (env->i->logger) {
|
||||
r = toku_checkpoint(env->i->cachetable, env->i->logger, NULL, NULL, NULL, NULL);
|
||||
r = toku_checkpoint(env->i->cachetable, env->i->logger, NULL, NULL, NULL, NULL, SHUTDOWN_CHECKPOINT);
|
||||
if (r) {
|
||||
err_msg = "Cannot close environment (error during checkpoint)\n";
|
||||
toku_ydb_do_error(env, r, "%s", err_msg);
|
||||
|
@ -1093,7 +1093,7 @@ toku_env_close(DB_ENV * env, u_int32_t flags) {
|
|||
goto panic_and_quit_early;
|
||||
}
|
||||
//Do a second checkpoint now that the rollback cachefile is closed.
|
||||
r = toku_checkpoint(env->i->cachetable, env->i->logger, NULL, NULL, NULL, NULL);
|
||||
r = toku_checkpoint(env->i->cachetable, env->i->logger, NULL, NULL, NULL, NULL, SHUTDOWN_CHECKPOINT);
|
||||
if (r) {
|
||||
err_msg = "Cannot close environment (error during checkpoint)\n";
|
||||
toku_ydb_do_error(env, r, "%s", err_msg);
|
||||
|
@ -1437,7 +1437,8 @@ static int
|
|||
toku_env_txn_checkpoint(DB_ENV * env, u_int32_t kbyte __attribute__((__unused__)), u_int32_t min __attribute__((__unused__)), u_int32_t flags __attribute__((__unused__))) {
|
||||
int r = toku_checkpoint(env->i->cachetable, env->i->logger,
|
||||
checkpoint_callback_f, checkpoint_callback_extra,
|
||||
checkpoint_callback2_f, checkpoint_callback2_extra);
|
||||
checkpoint_callback2_f, checkpoint_callback2_extra,
|
||||
CLIENT_CHECKPOINT);
|
||||
if (r) {
|
||||
// Panicking the whole environment may be overkill, but I'm not sure what else to do.
|
||||
env_panic(env, r, "checkpoint error\n");
|
||||
|
@ -1931,6 +1932,7 @@ env_get_engine_status(DB_ENV * env, ENGINE_STATUS * engstat, char * env_panic_st
|
|||
engstat->checkpoint_last_lsn = cpstat.last_lsn;
|
||||
engstat->checkpoint_count = cpstat.checkpoint_count;
|
||||
engstat->checkpoint_count_fail = cpstat.checkpoint_count_fail;
|
||||
engstat->checkpoint_waiters_now = cpstat.waiters_now;
|
||||
}
|
||||
engstat->cleaner_period = toku_get_cleaner_period_unlocked(env->i->cachetable);
|
||||
engstat->cleaner_iterations = toku_get_cleaner_iterations_unlocked(env->i->cachetable);
|
||||
|
@ -2273,6 +2275,7 @@ env_get_engine_status_text(DB_ENV * env, char * buff, int bufsiz) {
|
|||
n += snprintf(buff + n, bufsiz - n, "checkpoint_last_lsn %"PRIu64"\n", engstat.checkpoint_last_lsn);
|
||||
n += snprintf(buff + n, bufsiz - n, "checkpoint_count %"PRIu64"\n", engstat.checkpoint_count);
|
||||
n += snprintf(buff + n, bufsiz - n, "checkpoint_count_fail %"PRIu64"\n", engstat.checkpoint_count_fail);
|
||||
n += snprintf(buff + n, bufsiz - n, "checkpoint_waiters_now %"PRIu64"\n", engstat.checkpoint_waiters_now);
|
||||
n += snprintf(buff + n, bufsiz - n, "cleaner_period %"PRIu64"\n", engstat.cleaner_period);
|
||||
n += snprintf(buff + n, bufsiz - n, "cleaner_iterations %"PRIu64"\n", engstat.cleaner_iterations);
|
||||
n += snprintf(buff + n, bufsiz - n, "txn_begin %"PRIu64"\n", engstat.txn_begin);
|
||||
|
@ -2906,7 +2909,7 @@ locked_txn_commit_with_progress(DB_TXN *txn, u_int32_t flags,
|
|||
toku_ydb_unlock();
|
||||
assert(r==0);
|
||||
if (toku_txn_requires_checkpoint(ttxn)) {
|
||||
toku_checkpoint(txn->mgrp->i->cachetable, txn->mgrp->i->logger, NULL, NULL, NULL, NULL);
|
||||
toku_checkpoint(txn->mgrp->i->cachetable, txn->mgrp->i->logger, NULL, NULL, NULL, NULL, TXN_COMMIT_CHECKPOINT);
|
||||
}
|
||||
toku_multi_operation_client_lock(); //Cannot checkpoint during a commit.
|
||||
toku_ydb_lock();
|
||||
|
|
Loading…
Add table
Reference in a new issue