mirror of
https://github.com/MariaDB/server.git
synced 2025-01-16 12:02:42 +01:00
8c71c6aa8b
InnoDB I/O and buffer pool interfaces and the redo log format have been changed between MariaDB 10.1 and 10.2, and the backup code has to be adjusted accordingly. The code has been simplified, and many memory leaks have been fixed. Instead of the file name xtrabackup_logfile, the file name ib_logfile0 is being used for the copy of the redo log. Unnecessary InnoDB startup and shutdown and some unnecessary threads have been removed. Some help was provided by Vladislav Vaintroub. Parameters have been cleaned up and aligned with those of MariaDB 10.2. The --dbug option has been added, so that in debug builds, --dbug=d,ib_log can be specified to enable diagnostic messages for processing redo log entries. By default, innodb_doublewrite=OFF, so that --prepare works faster. If more crash-safety for --prepare is needed, double buffering can be enabled. The parameter innodb_log_checksums=OFF can be used to ignore redo log checksums in --backup. Some messages have been cleaned up. Unless --export is specified, Mariabackup will not deal with undo log. The InnoDB mini-transaction redo log is not only about user-level transactions; it is actually about mini-transactions. To avoid confusion, call it the redo log, not transaction log. We disable any undo log processing in --prepare. Because MariaDB 10.2 supports indexed virtual columns, the undo log processing would need to be able to evaluate virtual column expressions. To reduce the amount of code dependencies, we will not process any undo log in prepare. This means that the --export option must be disabled for now. This also means that the following options are redundant and have been removed: xtrabackup --apply-log-only innobackupex --redo-only In addition to disabling any undo log processing, we will disable any further changes to data pages during --prepare, including the change buffer merge. This means that restoring incremental backups should reliably work even when change buffering is being used on the server. Because of this, preparing a backup will not generate any further redo log, and the redo log file can be safely deleted. (If the --export option is enabled in the future, it must generate redo log when processing undo logs and buffered changes.) In --prepare, we cannot easily know if a partial backup was used, especially when restoring a series of incremental backups. So, we simply warn about any missing files, and ignore the redo log for them. FIXME: Enable the --export option. FIXME: Improve the handling of the MLOG_INDEX_LOAD record, and write a test that initiates a backup while an ALGORITHM=INPLACE operation is creating indexes or rebuilding a table. An error should be detected when preparing the backup. FIXME: In --incremental --prepare, xtrabackup_apply_delta() should ensure that if FSP_SIZE is modified, the file size will be adjusted accordingly.
207 lines
6.2 KiB
C++
207 lines
6.2 KiB
C++
/******************************************************
|
|
XtraBackup: hot backup tool for InnoDB
|
|
(c) 2009-2012 Percona Inc.
|
|
Originally Created 3/3/2009 Yasufumi Kinoshita
|
|
Written by Alexey Kopytov, Aleksandr Kuzminsky, Stewart Smith, Vadim Tkachenko,
|
|
Yasufumi Kinoshita, Ignacio Nin and Baron Schwartz.
|
|
|
|
This program is free software; you can redistribute it and/or modify
|
|
it under the terms of the GNU General Public License as published by
|
|
the Free Software Foundation; version 2 of the License.
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
GNU General Public License for more details.
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
along with this program; if not, write to the Free Software
|
|
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
|
|
|
|
*******************************************************/
|
|
|
|
/* Data file read filter implementation */
|
|
|
|
#include "read_filt.h"
|
|
#include "common.h"
|
|
#include "fil_cur.h"
|
|
#include "xtrabackup.h"
|
|
|
|
/****************************************************************//**
|
|
Perform read filter context initialization that is common to all read
|
|
filters. */
|
|
static
|
|
void
|
|
common_init(
|
|
/*========*/
|
|
xb_read_filt_ctxt_t* ctxt, /*!<in/out: read filter context */
|
|
const xb_fil_cur_t* cursor) /*!<in: file cursor */
|
|
{
|
|
ctxt->offset = 0;
|
|
ctxt->data_file_size = cursor->statinfo.st_size;
|
|
ctxt->buffer_capacity = cursor->buf_size;
|
|
ctxt->page_size = cursor->page_size;
|
|
}
|
|
|
|
/****************************************************************//**
|
|
Initialize the pass-through read filter. */
|
|
static
|
|
void
|
|
rf_pass_through_init(
|
|
/*=================*/
|
|
xb_read_filt_ctxt_t* ctxt, /*!<in/out: read filter context */
|
|
const xb_fil_cur_t* cursor, /*!<in: file cursor */
|
|
ulint space_id __attribute__((unused)))
|
|
/*!<in: space id we are reading */
|
|
{
|
|
common_init(ctxt, cursor);
|
|
}
|
|
|
|
/****************************************************************//**
|
|
Get the next batch of pages for the pass-through read filter. */
|
|
static
|
|
void
|
|
rf_pass_through_get_next_batch(
|
|
/*===========================*/
|
|
xb_read_filt_ctxt_t* ctxt, /*!<in/out: read filter
|
|
context */
|
|
ib_int64_t* read_batch_start, /*!<out: starting read
|
|
offset in bytes for the
|
|
next batch of pages */
|
|
ib_int64_t* read_batch_len) /*!<out: length in
|
|
bytes of the next batch
|
|
of pages */
|
|
{
|
|
*read_batch_start = ctxt->offset;
|
|
*read_batch_len = ctxt->data_file_size - ctxt->offset;
|
|
|
|
if (*read_batch_len > (ib_int64_t)ctxt->buffer_capacity) {
|
|
*read_batch_len = ctxt->buffer_capacity;
|
|
}
|
|
|
|
ctxt->offset += *read_batch_len;
|
|
}
|
|
|
|
/****************************************************************//**
|
|
Deinitialize the pass-through read filter. */
|
|
static
|
|
void
|
|
rf_pass_through_deinit(
|
|
/*===================*/
|
|
xb_read_filt_ctxt_t* ctxt __attribute__((unused)))
|
|
/*!<in: read filter context */
|
|
{
|
|
}
|
|
|
|
/****************************************************************//**
|
|
Initialize the changed page bitmap-based read filter. Assumes that
|
|
the bitmap is already set up in changed_page_bitmap. */
|
|
static
|
|
void
|
|
rf_bitmap_init(
|
|
/*===========*/
|
|
xb_read_filt_ctxt_t* ctxt, /*!<in/out: read filter
|
|
context */
|
|
const xb_fil_cur_t* cursor, /*!<in: read cursor */
|
|
ulint space_id) /*!<in: space id */
|
|
{
|
|
common_init(ctxt, cursor);
|
|
ctxt->bitmap_range = xb_page_bitmap_range_init(changed_page_bitmap,
|
|
space_id);
|
|
ctxt->filter_batch_end = 0;
|
|
}
|
|
|
|
/****************************************************************//**
|
|
Get the next batch of pages for the bitmap read filter. */
|
|
static
|
|
void
|
|
rf_bitmap_get_next_batch(
|
|
/*=====================*/
|
|
xb_read_filt_ctxt_t* ctxt, /*!<in/out: read filter
|
|
context */
|
|
ib_int64_t* read_batch_start, /*!<out: starting read
|
|
offset in bytes for the
|
|
next batch of pages */
|
|
ib_int64_t* read_batch_len) /*!<out: length in
|
|
bytes of the next batch
|
|
of pages */
|
|
{
|
|
ulint start_page_id;
|
|
const ulint page_size = ctxt->page_size.physical();
|
|
|
|
start_page_id = (ulint)(ctxt->offset / page_size);
|
|
|
|
xb_a (ctxt->offset % page_size == 0);
|
|
|
|
if (start_page_id == ctxt->filter_batch_end) {
|
|
|
|
/* Used up all the previous bitmap range, get some more */
|
|
ulint next_page_id;
|
|
|
|
/* Find the next changed page using the bitmap */
|
|
next_page_id = xb_page_bitmap_range_get_next_bit
|
|
(ctxt->bitmap_range, TRUE);
|
|
|
|
if (next_page_id == ULINT_UNDEFINED) {
|
|
*read_batch_len = 0;
|
|
return;
|
|
}
|
|
|
|
ctxt->offset = next_page_id * page_size;
|
|
|
|
/* Find the end of the current changed page block by searching
|
|
for the next cleared bitmap bit */
|
|
ctxt->filter_batch_end
|
|
= xb_page_bitmap_range_get_next_bit(ctxt->bitmap_range,
|
|
FALSE);
|
|
xb_a(next_page_id < ctxt->filter_batch_end);
|
|
}
|
|
|
|
*read_batch_start = ctxt->offset;
|
|
if (ctxt->filter_batch_end == ULINT_UNDEFINED) {
|
|
/* No more cleared bits in the bitmap, need to copy all the
|
|
remaining pages. */
|
|
*read_batch_len = ctxt->data_file_size - ctxt->offset;
|
|
} else {
|
|
*read_batch_len = ctxt->filter_batch_end * page_size
|
|
- ctxt->offset;
|
|
}
|
|
|
|
/* If the page block is larger than the buffer capacity, limit it to
|
|
buffer capacity. The subsequent invocations will continue returning
|
|
the current block in buffer-sized pieces until ctxt->filter_batch_end
|
|
is reached, trigerring the next bitmap query. */
|
|
if (*read_batch_len > (ib_int64_t)ctxt->buffer_capacity) {
|
|
*read_batch_len = ctxt->buffer_capacity;
|
|
}
|
|
|
|
ctxt->offset += *read_batch_len;
|
|
xb_a (ctxt->offset % page_size == 0);
|
|
xb_a (*read_batch_start % page_size == 0);
|
|
xb_a (*read_batch_len % page_size == 0);
|
|
}
|
|
|
|
/****************************************************************//**
|
|
Deinitialize the changed page bitmap-based read filter. */
|
|
static
|
|
void
|
|
rf_bitmap_deinit(
|
|
/*=============*/
|
|
xb_read_filt_ctxt_t* ctxt) /*!<in/out: read filter context */
|
|
{
|
|
xb_page_bitmap_range_deinit(ctxt->bitmap_range);
|
|
}
|
|
|
|
/* The pass-through read filter */
|
|
xb_read_filt_t rf_pass_through = {
|
|
&rf_pass_through_init,
|
|
&rf_pass_through_get_next_batch,
|
|
&rf_pass_through_deinit
|
|
};
|
|
|
|
/* The changed page bitmap-based read filter */
|
|
xb_read_filt_t rf_bitmap = {
|
|
&rf_bitmap_init,
|
|
&rf_bitmap_get_next_batch,
|
|
&rf_bitmap_deinit
|
|
};
|