MDEV-16045: Allocate log_sys statically

There is only one redo log subsystem in InnoDB. Allocate the object
statically, to avoid unnecessary dereferencing of the pointer.

log_t::create(): Renamed from log_sys_init().

log_t::close(): Renamed from log_shutdown().

log_t::checkpoint_buf_ptr: Remove. Allocate log_t::checkpoint_buf
statically.
This commit is contained in:
Marko Mäkelä 2018-04-27 10:06:14 +03:00
parent 715e4f4320
commit d73a898d64
19 changed files with 458 additions and 463 deletions

View file

@ -2445,7 +2445,7 @@ lsn_t
xtrabackup_copy_log(copy_logfile copy, lsn_t start_lsn, lsn_t end_lsn)
{
lsn_t scanned_lsn = start_lsn;
const byte* log_block = log_sys->buf;
const byte* log_block = log_sys.buf;
bool more_data = false;
for (ulint scanned_checkpoint = 0;
@ -2494,7 +2494,7 @@ xtrabackup_copy_log(copy_logfile copy, lsn_t start_lsn, lsn_t end_lsn)
recv_sys_justify_left_parsing_buf();
log_sys->log.scanned_lsn = scanned_lsn;
log_sys.log.scanned_lsn = scanned_lsn;
end_lsn = copy == COPY_LAST
? ut_uint64_align_up(scanned_lsn, OS_FILE_LOG_BLOCK_SIZE)
@ -2502,10 +2502,10 @@ xtrabackup_copy_log(copy_logfile copy, lsn_t start_lsn, lsn_t end_lsn)
if (ulint write_size = ulint(end_lsn - start_lsn)) {
if (srv_encrypt_log) {
log_crypt(log_sys->buf, start_lsn, write_size);
log_crypt(log_sys.buf, start_lsn, write_size);
}
if (ds_write(dst_log_file, log_sys->buf, write_size)) {
if (ds_write(dst_log_file, log_sys.buf, write_size)) {
msg("mariabackup: Error: "
"write to logfile failed\n");
return(0);
@ -2544,7 +2544,7 @@ xtrabackup_copy_logfile(copy_logfile copy)
lsn_t lsn= start_lsn;
for(int retries= 0; retries < 100; retries++) {
if (log_group_read_log_seg(log_sys->buf, &log_sys->log,
if (log_group_read_log_seg(log_sys.buf, &log_sys.log,
&lsn, end_lsn)){
break;
}
@ -2565,7 +2565,7 @@ xtrabackup_copy_logfile(copy_logfile copy)
}
} while (start_lsn == end_lsn);
ut_ad(start_lsn == log_sys->log.scanned_lsn);
ut_ad(start_lsn == log_sys.log.scanned_lsn);
msg_ts(">> log scanned up to (" LSN_PF ")\n", start_lsn);
@ -3656,9 +3656,9 @@ xtrabackup_backup_low()
log_mutex_enter();
if (recv_find_max_checkpoint(&max_cp_field) == DB_SUCCESS
&& log_sys->log.format != 0) {
&& log_sys.log.format != 0) {
metadata_to_lsn = mach_read_from_8(
log_sys->checkpoint_buf + LOG_CHECKPOINT_LSN);
log_sys.checkpoint_buf + LOG_CHECKPOINT_LSN);
msg("mariabackup: The latest check point"
" (for incremental): '" LSN_PF "'\n",
metadata_to_lsn);
@ -3818,7 +3818,7 @@ fail:
os_aio_init(srv_n_read_io_threads, srv_n_write_io_threads,
SRV_MAX_N_PENDING_SYNC_IOS);
log_sys_init();
log_sys.create();
log_init(srv_n_log_files);
fil_space_t* space = fil_space_create(
"innodb_redo_log", SRV_LOG_SPACE_FIRST_ID, 0,
@ -3894,7 +3894,7 @@ log_fail:
goto fail;
}
if (log_sys->log.format == 0) {
if (log_sys.log.format == 0) {
old_format:
msg("mariabackup: Error: cannot process redo log"
" before MariaDB 10.2.2\n");
@ -3902,14 +3902,14 @@ old_format:
goto log_fail;
}
ut_ad(!((log_sys->log.format ^ LOG_HEADER_FORMAT_CURRENT)
ut_ad(!((log_sys.log.format ^ LOG_HEADER_FORMAT_CURRENT)
& ~LOG_HEADER_FORMAT_ENCRYPTED));
const byte* buf = log_sys->checkpoint_buf;
const byte* buf = log_sys.checkpoint_buf;
reread_log_header:
checkpoint_lsn_start = log_sys->log.lsn;
checkpoint_no_start = log_sys->next_checkpoint_no;
checkpoint_lsn_start = log_sys.log.lsn;
checkpoint_no_start = log_sys.next_checkpoint_no;
err = recv_find_max_checkpoint(&max_cp_field);
@ -3917,14 +3917,14 @@ reread_log_header:
goto log_fail;
}
if (log_sys->log.format == 0) {
if (log_sys.log.format == 0) {
goto old_format;
}
ut_ad(!((log_sys->log.format ^ LOG_HEADER_FORMAT_CURRENT)
ut_ad(!((log_sys.log.format ^ LOG_HEADER_FORMAT_CURRENT)
& ~LOG_HEADER_FORMAT_ENCRYPTED));
log_group_header_read(&log_sys->log, max_cp_field);
log_group_header_read(&log_sys.log, max_cp_field);
if (checkpoint_no_start != mach_read_from_8(buf + LOG_CHECKPOINT_NO)) {
goto reread_log_header;
@ -3950,7 +3950,7 @@ reread_log_header:
/* label it */
byte MY_ALIGNED(OS_FILE_LOG_BLOCK_SIZE) log_hdr[OS_FILE_LOG_BLOCK_SIZE];
memset(log_hdr, 0, sizeof log_hdr);
mach_write_to_4(LOG_HEADER_FORMAT + log_hdr, log_sys->log.format);
mach_write_to_4(LOG_HEADER_FORMAT + log_hdr, log_sys.log.format);
mach_write_to_8(LOG_HEADER_START_LSN + log_hdr, checkpoint_lsn_start);
strcpy(reinterpret_cast<char*>(LOG_HEADER_CREATOR + log_hdr),
"Backup " MYSQL_SERVER_VERSION);
@ -4936,7 +4936,7 @@ xtrabackup_prepare_func(char** argv)
ut_d(sync_check_enable());
ut_crc32_init();
recv_sys_init();
log_sys_init();
log_sys.create();
recv_recovery_on = true;
#ifdef WITH_INNODB_DISALLOW_WRITES
@ -4970,7 +4970,7 @@ xtrabackup_prepare_func(char** argv)
os_event_destroy(srv_allow_writes_event);
#endif
innodb_free_param();
log_shutdown();
log_sys.close();
sync_check_close();
if (!ok) goto error_cleanup;
}

View file

@ -1,7 +1,7 @@
--source include/have_innodb.inc
#
# MDEV-11705: InnoDB: Failing assertion: (&log_sys->mutex)->is_owned() if server started with innodb-scrub-log
# MDEV-11705: InnoDB: Failing assertion: (&log_sys.mutex)->is_owned() if server started with innodb-scrub-log
#
create table t1(a int not null primary key auto_increment,

View file

@ -725,7 +725,7 @@ BtrBulk::pageCommit(
void
BtrBulk::logFreeCheck()
{
if (log_sys->check_flush_or_checkpoint) {
if (log_sys.check_flush_or_checkpoint) {
release();
log_free_check();

View file

@ -1191,11 +1191,11 @@ buf_madvise_do_dump()
buf_pool_t* buf_pool;
buf_chunk_t* chunk;
/* mirrors allocation in log_sys_init() */
if (log_sys->buf) {
ret+= madvise(log_sys->first_in_use
? log_sys->buf
: log_sys->buf - srv_log_buffer_size,
/* mirrors allocation in log_t::create() */
if (log_sys.buf) {
ret+= madvise(log_sys.first_in_use
? log_sys.buf
: log_sys.buf - srv_log_buffer_size,
srv_log_buffer_size * 2,
MADV_DODUMP);
}

View file

@ -2439,7 +2439,7 @@ page_cleaner_flush_pages_recommendation(
cur_lsn = log_get_lsn_nowait();
/* log_get_lsn_nowait tries to get log_sys->mutex with
/* log_get_lsn_nowait tries to get log_sys.mutex with
mutex_enter_nowait, if this does not succeed function
returns 0, do not use that value to update stats. */
if (cur_lsn == 0) {

View file

@ -3122,7 +3122,7 @@ func_exit:
log_mutex_enter();
}
/* log_sys->mutex is above fil_system.mutex in the latching order */
/* log_sys.mutex is above fil_system.mutex in the latching order */
ut_ad(log_mutex_own());
mutex_enter(&fil_system.mutex);
ut_ad(space->name == old_space_name);
@ -5120,12 +5120,12 @@ fil_names_dirty(
{
ut_ad(log_mutex_own());
ut_ad(recv_recovery_is_on());
ut_ad(log_sys->lsn != 0);
ut_ad(log_sys.lsn != 0);
ut_ad(space->max_lsn == 0);
ut_d(fil_space_validate_for_mtr_commit(space));
UT_LIST_ADD_LAST(fil_system.named_spaces, space);
space->max_lsn = log_sys->lsn;
space->max_lsn = log_sys.lsn;
}
/** Write MLOG_FILE_NAME records when a non-predefined persistent
@ -5140,7 +5140,7 @@ fil_names_dirty_and_write(
{
ut_ad(log_mutex_own());
ut_d(fil_space_validate_for_mtr_commit(space));
ut_ad(space->max_lsn == log_sys->lsn);
ut_ad(space->max_lsn == log_sys.lsn);
UT_LIST_ADD_LAST(fil_system.named_spaces, space);
fil_names_write(space, mtr);
@ -5177,8 +5177,8 @@ fil_names_clear(
ut_ad(log_mutex_own());
if (log_sys->append_on_checkpoint) {
mtr_write_log(log_sys->append_on_checkpoint);
if (log_sys.append_on_checkpoint) {
mtr_write_log(log_sys.append_on_checkpoint);
do_write = true;
}

View file

@ -18562,16 +18562,16 @@ checkpoint_now_set(
check function */
{
if (*(my_bool*) save) {
while (log_sys->last_checkpoint_lsn
while (log_sys.last_checkpoint_lsn
+ SIZE_OF_MLOG_CHECKPOINT
+ (log_sys->append_on_checkpoint != NULL
? log_sys->append_on_checkpoint->size() : 0)
< log_sys->lsn) {
+ (log_sys.append_on_checkpoint != NULL
? log_sys.append_on_checkpoint->size() : 0)
< log_sys.lsn) {
log_make_checkpoint_at(LSN_MAX, TRUE);
fil_flush_file_spaces(FIL_TYPE_LOG);
}
dberr_t err = fil_write_flushed_lsn(log_sys->lsn);
dberr_t err = fil_write_flushed_lsn(log_sys.lsn);
if (err != DB_SUCCESS) {
ib::warn() << "Checkpoint set failed " << err;

View file

@ -82,7 +82,7 @@ struct fil_space_t {
/*!< LSN of the most recent
fil_names_write_if_was_clean().
Reset to 0 by fil_names_clear().
Protected by log_sys->mutex.
Protected by log_sys.mutex.
If and only if this is nonzero, the
tablespace will be in named_spaces. */
bool stop_ios;/*!< true if we want to rename the
@ -286,7 +286,7 @@ struct fil_space_t {
struct fil_node_t {
/** tablespace containing this file */
fil_space_t* space;
/** file name; protected by fil_system.mutex and log_sys->mutex. */
/** file name; protected by fil_system.mutex and log_sys.mutex. */
char* name;
/** file handle (valid if is_open) */
pfs_os_file_t handle;
@ -628,7 +628,7 @@ public:
for which a MLOG_FILE_NAME
record has been written since
the latest redo log checkpoint.
Protected only by log_sys->mutex. */
Protected only by log_sys.mutex. */
UT_LIST_BASE_NODE_T(fil_space_t) rotation_list;
/*!< list of all file spaces needing
key rotation.*/
@ -1326,8 +1326,8 @@ fil_names_write_if_was_clean(
}
const bool was_clean = space->max_lsn == 0;
ut_ad(space->max_lsn <= log_sys->lsn);
space->max_lsn = log_sys->lsn;
ut_ad(space->max_lsn <= log_sys.lsn);
space->max_lsn = log_sys.lsn;
if (was_clean) {
fil_names_dirty_and_write(space, mtr);

View file

@ -56,7 +56,7 @@ step which modifies the database, is started */
typedef ulint (*log_checksum_func_t)(const byte* log_block);
/** Pointer to the log checksum calculation function. Protected with
log_sys->mutex. */
log_sys.mutex. */
extern log_checksum_func_t log_checksum_algorithm_ptr;
/** Append a string to the log.
@ -136,7 +136,7 @@ log_get_flush_lsn(void);
/*=============*/
/****************************************************************
Gets the log group capacity. It is OK to read the value without
holding log_sys->mutex because it is constant.
holding log_sys.mutex because it is constant.
@return log group capacity */
UNIV_INLINE
lsn_t
@ -150,9 +150,6 @@ UNIV_INLINE
lsn_t
log_get_max_modified_age_async(void);
/*================================*/
/** Initializes the redo logging subsystem. */
void
log_sys_init();
/** Initialize the redo log.
@param[in] n_files number of files */
@ -233,7 +230,7 @@ shutdown. This function also writes all log in log files to the log archive. */
void
logs_empty_and_mark_files_at_shutdown(void);
/*=======================================*/
/** Read a log group header page to log_sys->checkpoint_buf.
/** Read a log group header page to log_sys.checkpoint_buf.
@param[in] group log group
@param[in] header 0 or LOG_CHEKCPOINT_1 or LOG_CHECKPOINT2 */
void
@ -406,9 +403,6 @@ Closes all log groups. */
void
log_group_close_all(void);
/*=====================*/
/** Shut down the redo log subsystem. */
void
log_shutdown();
/** Whether to generate and require checksums on the redo log pages */
extern my_bool innodb_log_checksums;
@ -443,7 +437,7 @@ extern my_bool innodb_log_checksums;
from this offset in this log block,
if value not 0 */
#define LOG_BLOCK_CHECKPOINT_NO 8 /* 4 lower bytes of the value of
log_sys->next_checkpoint_no when the
log_sys.next_checkpoint_no when the
log block was last written to: if the
block has not yet been written full,
this value is only updated before a
@ -544,9 +538,9 @@ typedef ib_mutex_t FlushOrderMutex;
/** Log group consists of a number of log files, each of the same size; a log
group is implemented as a space in the sense of the module fil0fil.
Currently, this is only protected by log_sys->mutex. However, in the case
Currently, this is only protected by log_sys.mutex. However, in the case
of log_write_up_to(), we will access some members only with the protection
of log_sys->write_mutex, which should affect nothing for now. */
of log_sys.write_mutex, which should affect nothing for now. */
struct log_group_t{
/** number of files in the group */
ulint n_files;
@ -588,25 +582,22 @@ struct log_group_t{
/** Redo log buffer */
struct log_t{
char pad1[CACHE_LINE_SIZE];
/*!< Padding to prevent other memory
update hotspots from residing on the
same memory cache line */
MY_ALIGNED(CACHE_LINE_SIZE)
lsn_t lsn; /*!< log sequence number */
ulong buf_free; /*!< first free offset within the log
buffer in use */
char pad2[CACHE_LINE_SIZE];/*!< Padding */
MY_ALIGNED(CACHE_LINE_SIZE)
LogSysMutex mutex; /*!< mutex protecting the log */
char pad3[CACHE_LINE_SIZE]; /*!< Padding */
MY_ALIGNED(CACHE_LINE_SIZE)
LogSysMutex write_mutex; /*!< mutex protecting writing to log
file and accessing to log_group_t */
char pad4[CACHE_LINE_SIZE];/*!< Padding */
MY_ALIGNED(CACHE_LINE_SIZE)
FlushOrderMutex log_flush_order_mutex;/*!< mutex to serialize access to
the flush list when we are putting
dirty blocks in the list. The idea
behind this mutex is to be able
to release log_sys->mutex during
to release log_sys.mutex during
mtr_commit and still ensure that
insertions in the flush_list happen
in the LSN order. */
@ -636,7 +627,7 @@ struct log_t{
peeked at by log_free_check(), which
does not reserve the log mutex */
/** the redo log */
log_group_t log;
log_group_t log;
/** The fields involved in the log buffer flush @{ */
@ -707,7 +698,7 @@ struct log_t{
/*!< extra redo log records to write
during a checkpoint, or NULL if none.
The pointer is protected by
log_sys->mutex, and the data must
log_sys.mutex, and the data must
remain constant as long as this
pointer is not NULL. */
ulint n_pending_checkpoint_writes;
@ -717,62 +708,79 @@ struct log_t{
checkpoint write is running; a thread
should wait for this without owning
the log mutex */
byte* checkpoint_buf_ptr;/* unaligned checkpoint header */
byte* checkpoint_buf; /*!< checkpoint header is read to this
buffer */
/** buffer for checkpoint header */
MY_ALIGNED(OS_FILE_LOG_BLOCK_SIZE)
byte checkpoint_buf[OS_FILE_LOG_BLOCK_SIZE];
/* @} */
/** @return whether the redo log is encrypted */
bool is_encrypted() const
{
return(log.is_encrypted());
}
private:
bool m_initialised;
public:
/**
Constructor.
Some members may require late initialisation, thus we just mark object as
uninitialised. Real initialisation happens in create().
*/
log_t(): m_initialised(false) {}
/** @return whether the redo log is encrypted */
bool is_encrypted() const { return(log.is_encrypted()); }
bool is_initialised() { return m_initialised; }
/** Initialise the redo log subsystem. */
void create();
/** Shut down the redo log subsystem. */
void close();
};
/** Redo log system */
extern log_t* log_sys;
extern log_t log_sys;
/** Test if flush order mutex is owned. */
#define log_flush_order_mutex_own() \
mutex_own(&log_sys->log_flush_order_mutex)
mutex_own(&log_sys.log_flush_order_mutex)
/** Acquire the flush order mutex. */
#define log_flush_order_mutex_enter() do { \
mutex_enter(&log_sys->log_flush_order_mutex); \
mutex_enter(&log_sys.log_flush_order_mutex); \
} while (0)
/** Release the flush order mutex. */
# define log_flush_order_mutex_exit() do { \
mutex_exit(&log_sys->log_flush_order_mutex); \
mutex_exit(&log_sys.log_flush_order_mutex); \
} while (0)
/** Test if log sys mutex is owned. */
#define log_mutex_own() mutex_own(&log_sys->mutex)
#define log_mutex_own() mutex_own(&log_sys.mutex)
/** Test if log sys write mutex is owned. */
#define log_write_mutex_own() mutex_own(&log_sys->write_mutex)
#define log_write_mutex_own() mutex_own(&log_sys.write_mutex)
/** Acquire the log sys mutex. */
#define log_mutex_enter() mutex_enter(&log_sys->mutex)
#define log_mutex_enter() mutex_enter(&log_sys.mutex)
/** Acquire the log sys write mutex. */
#define log_write_mutex_enter() mutex_enter(&log_sys->write_mutex)
#define log_write_mutex_enter() mutex_enter(&log_sys.write_mutex)
/** Acquire all the log sys mutexes. */
#define log_mutex_enter_all() do { \
mutex_enter(&log_sys->write_mutex); \
mutex_enter(&log_sys->mutex); \
mutex_enter(&log_sys.write_mutex); \
mutex_enter(&log_sys.mutex); \
} while (0)
/** Release the log sys mutex. */
#define log_mutex_exit() mutex_exit(&log_sys->mutex)
#define log_mutex_exit() mutex_exit(&log_sys.mutex)
/** Release the log sys write mutex.*/
#define log_write_mutex_exit() mutex_exit(&log_sys->write_mutex)
#define log_write_mutex_exit() mutex_exit(&log_sys.write_mutex)
/** Release all the log sys mutexes. */
#define log_mutex_exit_all() do { \
mutex_exit(&log_sys->mutex); \
mutex_exit(&log_sys->write_mutex); \
mutex_exit(&log_sys.mutex); \
mutex_exit(&log_sys.write_mutex); \
} while (0)
/** Calculate the offset of an lsn within a log group.

View file

@ -330,15 +330,15 @@ log_reserve_and_write_fast(
len - SIZE_OF_MLOG_CHECKPOINT]
? 0
: 1
+ mach_get_compressed_size(log_sys->lsn >> 32)
+ mach_get_compressed_size(log_sys->lsn & 0xFFFFFFFFUL);
+ mach_get_compressed_size(log_sys.lsn >> 32)
+ mach_get_compressed_size(log_sys.lsn & 0xFFFFFFFFUL);
#endif /* UNIV_LOG_LSN_DEBUG */
const ulint data_len = len
#ifdef UNIV_LOG_LSN_DEBUG
+ lsn_len
#endif /* UNIV_LOG_LSN_DEBUG */
+ log_sys->buf_free % OS_FILE_LOG_BLOCK_SIZE;
+ log_sys.buf_free % OS_FILE_LOG_BLOCK_SIZE;
if (data_len >= OS_FILE_LOG_BLOCK_SIZE - LOG_BLOCK_TRL_SIZE) {
@ -348,44 +348,44 @@ log_reserve_and_write_fast(
return(0);
}
*start_lsn = log_sys->lsn;
*start_lsn = log_sys.lsn;
#ifdef UNIV_LOG_LSN_DEBUG
if (lsn_len) {
/* Write the LSN pseudo-record. */
byte* b = &log_sys->buf[log_sys->buf_free];
byte* b = &log_sys.buf[log_sys.buf_free];
*b++ = MLOG_LSN | (MLOG_SINGLE_REC_FLAG & *(const byte*) str);
/* Write the LSN in two parts,
as a pseudo page number and space id. */
b += mach_write_compressed(b, log_sys->lsn >> 32);
b += mach_write_compressed(b, log_sys->lsn & 0xFFFFFFFFUL);
ut_a(b - lsn_len == &log_sys->buf[log_sys->buf_free]);
b += mach_write_compressed(b, log_sys.lsn >> 32);
b += mach_write_compressed(b, log_sys.lsn & 0xFFFFFFFFUL);
ut_a(b - lsn_len == &log_sys.buf[log_sys.buf_free]);
::memcpy(b, str, len);
len += lsn_len;
} else
#endif /* UNIV_LOG_LSN_DEBUG */
memcpy(log_sys->buf + log_sys->buf_free, str, len);
memcpy(log_sys.buf + log_sys.buf_free, str, len);
log_block_set_data_len(
reinterpret_cast<byte*>(ut_align_down(
log_sys->buf + log_sys->buf_free,
log_sys.buf + log_sys.buf_free,
OS_FILE_LOG_BLOCK_SIZE)),
data_len);
log_sys->buf_free += ulong(len);
log_sys.buf_free += ulong(len);
ut_ad(log_sys->buf_free <= srv_log_buffer_size);
ut_ad(log_sys.buf_free <= srv_log_buffer_size);
log_sys->lsn += len;
log_sys.lsn += len;
MONITOR_SET(MONITOR_LSN_CHECKPOINT_AGE,
log_sys->lsn - log_sys->last_checkpoint_lsn);
log_sys.lsn - log_sys.last_checkpoint_lsn);
return(log_sys->lsn);
return(log_sys.lsn);
}
/************************************************************//**
@ -400,7 +400,7 @@ log_get_lsn(void)
log_mutex_enter();
lsn = log_sys->lsn;
lsn = log_sys.lsn;
log_mutex_exit();
@ -418,7 +418,7 @@ log_get_flush_lsn(void)
log_mutex_enter();
lsn = log_sys->flushed_to_disk_lsn;
lsn = log_sys.flushed_to_disk_lsn;
log_mutex_exit();
@ -435,11 +435,11 @@ log_get_lsn_nowait(void)
{
lsn_t lsn=0;
if (!mutex_enter_nowait(&(log_sys->mutex))) {
if (!mutex_enter_nowait(&(log_sys.mutex))) {
lsn = log_sys->lsn;
lsn = log_sys.lsn;
mutex_exit(&(log_sys->mutex));
mutex_exit(&(log_sys.mutex));
}
return(lsn);
@ -447,14 +447,14 @@ log_get_lsn_nowait(void)
/****************************************************************
Gets the log group capacity. It is OK to read the value without
holding log_sys->mutex because it is constant.
holding log_sys.mutex because it is constant.
@return log group capacity */
UNIV_INLINE
lsn_t
log_get_capacity(void)
/*==================*/
{
return(log_sys->log_group_capacity);
return(log_sys.log_group_capacity);
}
/****************************************************************
@ -466,7 +466,7 @@ lsn_t
log_get_max_modified_age_async(void)
/*================================*/
{
return(log_sys->max_modified_age_async);
return(log_sys.max_modified_age_async);
}
/***********************************************************************//**
@ -498,7 +498,7 @@ log_free_check(void)
sync_allowed_latches(latches,
latches + UT_ARR_SIZE(latches))));
if (log_sys->check_flush_or_checkpoint) {
if (log_sys.check_flush_or_checkpoint) {
log_check_margins();
}

View file

@ -331,7 +331,7 @@ extern bool recv_no_ibuf_operations;
extern bool recv_needed_recovery;
#ifdef UNIV_DEBUG
/** TRUE if writing to the redo log (mtr_commit) is forbidden.
Protected by log_sys->mutex. */
Protected by log_sys.mutex. */
extern bool recv_no_log_write;
#endif /* UNIV_DEBUG */

View file

@ -80,7 +80,7 @@ struct srv_stats_t
lsn_ctr_1_t os_log_written;
/** Number of writes being done to the log files.
Protected by log_sys->write_mutex. */
Protected by log_sys.write_mutex. */
ulint_ctr_1_t os_log_pending_writes;
/** We increase this counter, when we don't have enough

View file

@ -219,7 +219,7 @@ bool
log_crypt_init()
{
ut_ad(log_mutex_own());
ut_ad(log_sys->is_encrypted());
ut_ad(log_sys.is_encrypted());
info.key_version = encryption_key_get_latest_version(
LOG_DEFAULT_ENCRYPTION_KEY);

File diff suppressed because it is too large Load diff

View file

@ -79,7 +79,7 @@ volatile bool recv_recovery_on;
bool recv_needed_recovery;
#ifdef UNIV_DEBUG
/** TRUE if writing to the redo log (mtr_commit) is forbidden.
Protected by log_sys->mutex. */
Protected by log_sys.mutex. */
bool recv_no_log_write = false;
#endif /* UNIV_DEBUG */
@ -669,7 +669,7 @@ loop:
(source_offset % group->file_size));
}
log_sys->n_log_ios++;
log_sys.n_log_ios++;
MONITOR_INC(MONITOR_LOG_IO);
@ -760,13 +760,13 @@ recv_synchronize_groups()
lsn_t start_lsn = ut_uint64_align_down(recovered_lsn,
OS_FILE_LOG_BLOCK_SIZE);
log_group_read_log_seg(log_sys->buf, &log_sys->log,
log_group_read_log_seg(log_sys.buf, &log_sys.log,
&start_lsn, start_lsn + OS_FILE_LOG_BLOCK_SIZE);
/* Update the fields in the group struct to correspond to
recovered_lsn */
log_group_set_fields(&log_sys->log, recovered_lsn);
log_group_set_fields(&log_sys.log, recovered_lsn);
/* Copy the checkpoint info to the log; remember that we have
incremented checkpoint_no by one, and the info will not be written
@ -799,10 +799,10 @@ static MY_ATTRIBUTE((warn_unused_result))
dberr_t
recv_find_max_checkpoint_0(log_group_t** max_group, ulint* max_field)
{
log_group_t* group = &log_sys->log;
log_group_t* group = &log_sys.log;
ib_uint64_t max_no = 0;
ib_uint64_t checkpoint_no;
byte* buf = log_sys->checkpoint_buf;
byte* buf = log_sys.checkpoint_buf;
ut_ad(group->format == 0);
@ -882,12 +882,12 @@ dberr_t
recv_log_format_0_recover(lsn_t lsn)
{
log_mutex_enter();
log_group_t* group = &log_sys->log;
log_group_t* group = &log_sys.log;
const lsn_t source_offset
= log_group_calc_lsn_offset(lsn, group);
log_mutex_exit();
const ulint page_no = ulint(source_offset >> srv_page_size_shift);
byte* buf = log_sys->buf;
byte* buf = log_sys.buf;
static const char* NO_UPGRADE_RECOVERY_MSG =
"Upgrade after a crash is not supported."
@ -919,11 +919,11 @@ recv_log_format_0_recover(lsn_t lsn)
recv_sys->parse_start_lsn = recv_sys->recovered_lsn
= recv_sys->scanned_lsn
= recv_sys->mlog_checkpoint_lsn = lsn;
log_sys->last_checkpoint_lsn = log_sys->next_checkpoint_lsn
= log_sys->lsn = log_sys->write_lsn
= log_sys->current_flush_lsn = log_sys->flushed_to_disk_lsn
log_sys.last_checkpoint_lsn = log_sys.next_checkpoint_lsn
= log_sys.lsn = log_sys.write_lsn
= log_sys.current_flush_lsn = log_sys.flushed_to_disk_lsn
= lsn;
log_sys->next_checkpoint_no = 0;
log_sys.next_checkpoint_no = 0;
return(DB_SUCCESS);
}
@ -939,12 +939,12 @@ recv_find_max_checkpoint(ulint* max_field)
ulint field;
byte* buf;
group = &log_sys->log;
group = &log_sys.log;
max_no = 0;
*max_field = 0;
buf = log_sys->checkpoint_buf;
buf = log_sys.checkpoint_buf;
group->state = LOG_GROUP_CORRUPTED;
@ -1019,7 +1019,7 @@ recv_find_max_checkpoint(ulint* max_field)
buf + LOG_CHECKPOINT_LSN);
group->lsn_offset = mach_read_from_8(
buf + LOG_CHECKPOINT_OFFSET);
log_sys->next_checkpoint_no = checkpoint_no;
log_sys.next_checkpoint_no = checkpoint_no;
}
}
@ -1751,7 +1751,7 @@ recv_recover_page(bool just_read_in, buf_block_t* block)
while (recv) {
end_lsn = recv->end_lsn;
ut_ad(end_lsn <= log_sys->log.scanned_lsn);
ut_ad(end_lsn <= log_sys.log.scanned_lsn);
if (recv->len > RECV_DATA_BLOCK_SIZE) {
/* We have to copy the record body to a separate
@ -2927,11 +2927,11 @@ recv_group_scan_log_recs(
OS_FILE_LOG_BLOCK_SIZE);
end_lsn = start_lsn;
log_group_read_log_seg(
log_sys->buf, group, &end_lsn,
log_sys.buf, group, &end_lsn,
start_lsn + RECV_SCAN_SIZE);
} while (end_lsn != start_lsn
&& !recv_scan_log_recs(
available_mem, &store_to_hash, log_sys->buf,
available_mem, &store_to_hash, log_sys.buf,
checkpoint_lsn,
start_lsn, end_lsn,
contiguous_lsn, &group->scanned_lsn));
@ -3157,14 +3157,14 @@ recv_recovery_from_checkpoint_start(lsn_t flush_lsn)
if (err != DB_SUCCESS) {
srv_start_lsn = recv_sys->recovered_lsn = log_sys->lsn;
srv_start_lsn = recv_sys->recovered_lsn = log_sys.lsn;
log_mutex_exit();
return(err);
}
log_group_header_read(&log_sys->log, max_cp_field);
log_group_header_read(&log_sys.log, max_cp_field);
buf = log_sys->checkpoint_buf;
buf = log_sys.checkpoint_buf;
checkpoint_lsn = mach_read_from_8(buf + LOG_CHECKPOINT_LSN);
checkpoint_no = mach_read_from_8(buf + LOG_CHECKPOINT_NO);
@ -3177,7 +3177,7 @@ recv_recovery_from_checkpoint_start(lsn_t flush_lsn)
ut_ad(RECV_SCAN_SIZE <= srv_log_buffer_size);
group = &log_sys->log;
group = &log_sys.log;
const lsn_t end_lsn = mach_read_from_8(
buf + LOG_CHECKPOINT_END_LSN);
@ -3283,7 +3283,7 @@ recv_recovery_from_checkpoint_start(lsn_t flush_lsn)
}
}
log_sys->lsn = recv_sys->recovered_lsn;
log_sys.lsn = recv_sys->recovered_lsn;
if (recv_needed_recovery) {
bool missing_tablespace = false;
@ -3378,8 +3378,8 @@ recv_recovery_from_checkpoint_start(lsn_t flush_lsn)
/* Synchronize the uncorrupted log groups to the most up-to-date log
group; we also copy checkpoint info to groups */
log_sys->next_checkpoint_lsn = checkpoint_lsn;
log_sys->next_checkpoint_no = checkpoint_no + 1;
log_sys.next_checkpoint_lsn = checkpoint_lsn;
log_sys.next_checkpoint_no = checkpoint_no + 1;
recv_synchronize_groups();
@ -3389,24 +3389,24 @@ recv_recovery_from_checkpoint_start(lsn_t flush_lsn)
srv_start_lsn = recv_sys->recovered_lsn;
}
log_sys->buf_free = ulong(log_sys->lsn % OS_FILE_LOG_BLOCK_SIZE);
log_sys->buf_next_to_write = log_sys->buf_free;
log_sys->write_lsn = log_sys->lsn;
log_sys.buf_free = ulong(log_sys.lsn % OS_FILE_LOG_BLOCK_SIZE);
log_sys.buf_next_to_write = log_sys.buf_free;
log_sys.write_lsn = log_sys.lsn;
log_sys->last_checkpoint_lsn = checkpoint_lsn;
log_sys.last_checkpoint_lsn = checkpoint_lsn;
if (!srv_read_only_mode && srv_operation == SRV_OPERATION_NORMAL) {
/* Write a MLOG_CHECKPOINT marker as the first thing,
before generating any other redo log. This ensures
that subsequent crash recovery will be possible even
if the server were killed soon after this. */
fil_names_clear(log_sys->last_checkpoint_lsn, true);
fil_names_clear(log_sys.last_checkpoint_lsn, true);
}
MONITOR_SET(MONITOR_LSN_CHECKPOINT_AGE,
log_sys->lsn - log_sys->last_checkpoint_lsn);
log_sys.lsn - log_sys.last_checkpoint_lsn);
log_sys->next_checkpoint_no = ++checkpoint_no;
log_sys.next_checkpoint_no = ++checkpoint_no;
mutex_enter(&recv_sys->mutex);
@ -3512,26 +3512,26 @@ recv_reset_logs(
{
ut_ad(log_mutex_own());
log_sys->lsn = ut_uint64_align_up(lsn, OS_FILE_LOG_BLOCK_SIZE);
log_sys.lsn = ut_uint64_align_up(lsn, OS_FILE_LOG_BLOCK_SIZE);
log_sys->log.lsn = log_sys->lsn;
log_sys->log.lsn_offset = LOG_FILE_HDR_SIZE;
log_sys.log.lsn = log_sys.lsn;
log_sys.log.lsn_offset = LOG_FILE_HDR_SIZE;
log_sys->buf_next_to_write = 0;
log_sys->write_lsn = log_sys->lsn;
log_sys.buf_next_to_write = 0;
log_sys.write_lsn = log_sys.lsn;
log_sys->next_checkpoint_no = 0;
log_sys->last_checkpoint_lsn = 0;
log_sys.next_checkpoint_no = 0;
log_sys.last_checkpoint_lsn = 0;
memset(log_sys->buf, 0, srv_log_buffer_size);
log_block_init(log_sys->buf, log_sys->lsn);
log_block_set_first_rec_group(log_sys->buf, LOG_BLOCK_HDR_SIZE);
memset(log_sys.buf, 0, srv_log_buffer_size);
log_block_init(log_sys.buf, log_sys.lsn);
log_block_set_first_rec_group(log_sys.buf, LOG_BLOCK_HDR_SIZE);
log_sys->buf_free = LOG_BLOCK_HDR_SIZE;
log_sys->lsn += LOG_BLOCK_HDR_SIZE;
log_sys.buf_free = LOG_BLOCK_HDR_SIZE;
log_sys.lsn += LOG_BLOCK_HDR_SIZE;
MONITOR_SET(MONITOR_LSN_CHECKPOINT_AGE,
(log_sys->lsn - log_sys->last_checkpoint_lsn));
(log_sys.lsn - log_sys.last_checkpoint_lsn));
log_mutex_exit();

View file

@ -481,7 +481,7 @@ mtr_write_log(
ut_ad(!recv_no_log_write);
DBUG_PRINT("ib_log",
(ULINTPF " extra bytes written at " LSN_PF,
len, log_sys->lsn));
len, log_sys.lsn));
log_reserve_and_open(len);
log->for_each_block(write_log);
@ -624,7 +624,7 @@ mtr_t::commit_checkpoint(
if (write_mlog_checkpoint) {
DBUG_PRINT("ib_log",
("MLOG_CHECKPOINT(" LSN_PF ") written at " LSN_PF,
checkpoint_lsn, log_sys->lsn));
checkpoint_lsn, log_sys.lsn));
}
}
@ -774,7 +774,7 @@ mtr_t::Command::prepare_write()
case MTR_LOG_NONE:
ut_ad(m_impl->m_log.size() == 0);
log_mutex_enter();
m_end_lsn = m_start_lsn = log_sys->lsn;
m_end_lsn = m_start_lsn = log_sys.lsn;
return(0);
case MTR_LOG_ALL:
break;

View file

@ -144,7 +144,7 @@ public:
ut_ad(dict_index_is_spatial(m_index));
DBUG_EXECUTE_IF("row_merge_instrument_log_check_flush",
log_sys->check_flush_or_checkpoint = true;
log_sys.check_flush_or_checkpoint = true;
);
for (idx_tuple_vec::iterator it = m_dtuple_vec->begin();
@ -153,7 +153,7 @@ public:
dtuple = *it;
ut_ad(dtuple);
if (log_sys->check_flush_or_checkpoint) {
if (log_sys.check_flush_or_checkpoint) {
if (!(*mtr_committed)) {
btr_pcur_move_to_prev_on_page(pcur);
btr_pcur_store_position(pcur, scan_mtr);

View file

@ -2000,11 +2000,11 @@ srv_mon_process_existing_counter(
break;
case MONITOR_OVLD_LSN_FLUSHDISK:
value = (mon_type_t) log_sys->flushed_to_disk_lsn;
value = (mon_type_t) log_sys.flushed_to_disk_lsn;
break;
case MONITOR_OVLD_LSN_CURRENT:
value = (mon_type_t) log_sys->lsn;
value = (mon_type_t) log_sys.lsn;
break;
case MONITOR_OVLD_BUF_OLDEST_LSN:
@ -2012,15 +2012,15 @@ srv_mon_process_existing_counter(
break;
case MONITOR_OVLD_LSN_CHECKPOINT:
value = (mon_type_t) log_sys->last_checkpoint_lsn;
value = (mon_type_t) log_sys.last_checkpoint_lsn;
break;
case MONITOR_OVLD_MAX_AGE_ASYNC:
value = log_sys->max_modified_age_async;
value = log_sys.max_modified_age_async;
break;
case MONITOR_OVLD_MAX_AGE_SYNC:
value = log_sys->max_modified_age_sync;
value = log_sys.max_modified_age_sync;
break;
#ifdef BTR_CUR_HASH_ADAPT

View file

@ -496,7 +496,7 @@ create_log_files(
/* Create a log checkpoint. */
log_mutex_enter();
if (log_sys->is_encrypted() && !log_crypt_init()) {
if (log_sys.is_encrypted() && !log_crypt_init()) {
return(DB_ERROR);
}
ut_d(recv_no_log_write = false);
@ -1370,14 +1370,14 @@ srv_prepare_to_delete_redo_log_files(
log_mutex_enter();
fil_names_clear(log_sys->lsn, false);
fil_names_clear(log_sys.lsn, false);
flushed_lsn = log_sys->lsn;
flushed_lsn = log_sys.lsn;
{
ib::info info;
if (srv_log_file_size == 0
|| (log_sys->log.format
|| (log_sys.log.format
& ~LOG_HEADER_FORMAT_ENCRYPTED)
!= LOG_HEADER_FORMAT_CURRENT) {
info << "Upgrading redo log: ";
@ -1385,7 +1385,7 @@ srv_prepare_to_delete_redo_log_files(
|| srv_log_file_size
!= srv_log_file_size_requested) {
if (srv_encrypt_log
== (my_bool)log_sys->is_encrypted()) {
== (my_bool)log_sys.is_encrypted()) {
info << (srv_encrypt_log
? "Resizing encrypted"
: "Resizing");
@ -1689,7 +1689,7 @@ dberr_t srv_start(bool create_new_db)
}
#endif /* UNIV_DEBUG */
log_sys_init();
log_sys.create();
recv_sys_init();
lock_sys.create(srv_lock_table_size);
@ -2204,7 +2204,7 @@ files_checked:
/* Leave the redo log alone. */
} else if (srv_log_file_size_requested == srv_log_file_size
&& srv_n_log_files_found == srv_n_log_files
&& log_sys->log.format
&& log_sys.log.format
== (srv_encrypt_log
? LOG_HEADER_FORMAT_CURRENT
| LOG_HEADER_FORMAT_ENCRYPTED
@ -2674,11 +2674,11 @@ void innodb_shutdown()
ut_ad(buf_dblwr || !srv_was_started || srv_read_only_mode
|| srv_force_recovery >= SRV_FORCE_NO_TRX_UNDO);
ut_ad(lock_sys.is_initialised() || !srv_was_started);
ut_ad(log_sys.is_initialised() || !srv_was_started);
#ifdef BTR_CUR_HASH_ADAPT
ut_ad(btr_search_sys || !srv_was_started);
#endif /* BTR_CUR_HASH_ADAPT */
ut_ad(ibuf || !srv_was_started);
ut_ad(log_sys || !srv_was_started);
if (dict_stats_event) {
dict_stats_thread_deinit();
@ -2705,9 +2705,7 @@ void innodb_shutdown()
if (ibuf) {
ibuf_close();
}
if (log_sys) {
log_shutdown();
}
log_sys.close();
purge_sys.close();
trx_sys.close();
if (buf_dblwr) {