mirror of
https://github.com/MariaDB/server.git
synced 2025-01-31 19:11:46 +01:00
f29bfa7eee
Changes in the InnoDB codebase required to compile and integrate the MEB codebase with MySQL 5.5. @ storage/innobase/btr/btr0btr.c Excluded buffer pool usage from MEB build. buf_pool_from_bpage calls are in buf0buf.ic, and the buffer pool functions from that file are disabled in MEB. @ storage/innobase/buf/buf0buf.c Disabling more buffer pool functions unused in MEB. @ storage/innobase/dict/dict0dict.c Disabling dict_ind_free that is unused in MEB. @ storage/innobase/dict/dict0mem.c The include #include "ha_prototypes.h" Was causing conflicts with definitions in my_global.h Linking C executable mysqlbackup libinnodb.a(dict0mem.c.o): In function `dict_mem_foreign_table_name_lookup_set': dict0mem.c:(.text+0x91c): undefined reference to `innobase_get_lower_case_table_names' libinnodb.a(dict0mem.c.o): In function `dict_mem_referenced_table_name_lookup_set': dict0mem.c:(.text+0x9fc): undefined reference to `innobase_get_lower_case_table_names' libinnodb.a(dict0mem.c.o): In function `dict_mem_foreign_table_name_lookup_set': dict0mem.c:(.text+0x96e): undefined reference to `innobase_casedn_str' libinnodb.a(dict0mem.c.o): In function `dict_mem_referenced_table_name_lookup_set': dict0mem.c:(.text+0xa4e): undefined reference to `innobase_casedn_str' collect2: ld returned 1 exit status make[2]: *** [mysqlbackup] Error 1 innobase_get_lower_case_table_names innobase_casedn_str are functions that are part of ha_innodb.cc that is not part of the build dict_mem_foreign_table_name_lookup_set function is not there in the current codebase, meaning we do not use it in MEB. @ storage/innobase/fil/fil0fil.c The srv_fast_shutdown variable is declared in srv0srv.c that is not compiled in the mysqlbackup codebase. This throws an undeclared error. From the Manual --------------- innodb_fast_shutdown -------------------- The InnoDB shutdown mode. The default value is 1 as of MySQL 3.23.50, which causes a “fast� shutdown (the normal type of shutdown). If the value is 0, InnoDB does a full purge and an insert buffer merge before a shutdown. These operations can take minutes, or even hours in extreme cases. If the value is 1, InnoDB skips these operations at shutdown. This ideally does not matter from mysqlbackup @ storage/innobase/ha/ha0ha.c In file included from /home/narayanan/mysql-server/mysql-5.5-meb-rel3.8-innodb-integration-1/storage/innobase/ha/ha0ha.c:34:0: /home/narayanan/mysql-server/mysql-5.5-meb-rel3.8-innodb-integration-1/storage/innobase/include/btr0sea.h:286:17: error: expected ‘=’, ‘,’, ‘;’, ‘asm’ or ‘__attribute__’ before ‘*’ token make[2]: *** [CMakeFiles/innodb.dir/home/narayanan/mysql-server/mysql-5.5-meb-rel3.8-innodb-integration-1/storage/innobase/ha/ha0ha.c.o] Error 1 make[1]: *** [CMakeFiles/innodb.dir/all] Error 2 make: *** [all] Error 2 # include "sync0rw.h" is excluded from hotbackup compilation in dict0dict.h This causes extern rw_lock_t* btr_search_latch_temp; to throw a failure because the definition of rw_lock_t is not found. @ storage/innobase/include/buf0buf.h Excluding buffer pool functions that are unused from the MEB codebase. @ storage/innobase/include/buf0buf.ic replicated the exclusion of #include "buf0flu.h" #include "buf0lru.h" #include "buf0rea.h" by looking at the current codebase in <meb-trunk>/src/innodb @ storage/innobase/include/dict0dict.h dict_table_x_lock_indexes, dict_table_x_unlock_indexes, dict_table_is_corrupted, dict_index_is_corrupted, buf_block_buf_fix_inc_func are unused in MEB and was leading to compilation errors and hence excluded. @ storage/innobase/include/dict0dict.ic dict_table_x_lock_indexes, dict_table_x_unlock_indexes, dict_table_is_corrupted, dict_index_is_corrupted, buf_block_buf_fix_inc_func are unused in MEB and was leading to compilation errors and hence excluded. @ storage/innobase/include/log0log.h /home/narayanan/mysql-server/mysql-5.5-meb-rel3.8-innodb-integration-1/storage/innobase/include/log0log.h: At top level: /home/narayanan/mysql-server/mysql-5.5-meb-rel3.8-innodb-integration-1/storage/innobase/include/log0log.h:767:2: error: expected specifier-qualifier-list before â⠂¬Ëœmutex_t’ mutex_t definitions were excluded as seen from ambient code hence excluding definition for log_flush_order_mutex also. @ storage/innobase/include/os0file.h Bug in InnoDB code, create_mode should have been create. @ storage/innobase/include/srv0srv.h In file included from /home/narayanan/mysql-server/mysql-5.5-meb-rel3.8-innodb-integration-1/storage/innobase/buf/buf0buf.c:50:0: /home/narayanan/mysql-server/mysql-5.5-meb-rel3.8-innodb-integration-1/storage/innobase/include/srv0srv.h: At top level: /home/narayanan/mysql-server/mysql-5.5-meb-rel3.8-innodb-integration-1/storage/innobase/include/srv0srv.h:120:16: error: expected ‘=’, ‘,’, ‘;’, ‘asm’ or ‘__attribute__’ before ‘srv_use_native_aio’ srv_use_native_aio - we do not use native aio of the OS anyway from MEB. MEB does not compile InnoDB with this option. Hence disabling it. @ storage/innobase/include/trx0sys.h [ 56%] Building C object CMakeFiles/innodb.dir/home/narayanan/mysql-server/mysql-5.5-meb-rel3.8-innodb-integration-1/storage/innobase/trx/trx0sys.c.o /home/narayanan/mysql-server/mysql-5.5-meb-rel3.8-innodb-integration-1/storage/innobase/trx/trx0sys.c: In function ‘trx_sys_read_file_format_id’: /home/narayanan/mysql-server/mysql-5.5-meb-rel3.8-innodb-integration-1/storage/innobase/trx/trx0sys.c:1499:20: error: ‘TRX_SYS_FILE_FORMAT_TAG_MAGIC_N’ undeclared (first use in this function) /home/narayanan/mysql-server/mysql-5.5-meb-rel3.8-innodb-integration-1/storage/innobase/trx/trx0sys.c:1499:20: note: each undeclared identifier is reported only once for each function it appears in /home/narayanan/mysql-server/mysql-5.5-meb-rel3.8-innodb-integration-1/storage/innobase/trx/trx0sys.c: At top level: /home/narayanan/mysql-server/mysql-5.5-meb-rel3.8-innodb-integration-1/storage/innobase/include/buf0buf.h:607:1: warning: ‘buf_block_buf_fix_inc_func’ declared ‘static’ but never defined make[2]: *** [CMakeFiles/innodb.dir/home/narayanan/mysql-server/mysql-5.5-meb-rel3.8-innodb-integration-1/storage/innobase/trx/trx0sys.c.o] Error 1 unused calls excluded to enable compilation @ storage/innobase/mem/mem0dbg.c excluding #include "ha_prototypes.h" that lead to definitions in ha_innodb.cc @ storage/innobase/os/os0file.c InnoDB not compiled with aio support from MEB anyway. Hence excluding this from the compilation. @ storage/innobase/page/page0zip.c page0zip.c:(.text+0x4e9e): undefined reference to `buf_pool_from_block' collect2: ld returned 1 exit status buf_pool_from_block defined in buf0buf.ic, most of the file is excluded for compilation of MEB @ storage/innobase/ut/ut0dbg.c excluding #include "ha_prototypes.h" since it leads to definitions in ha_innodb.cc innobase_basename(file) is defined in ha_innodb.cc. Hence excluding that also. @ storage/innobase/ut/ut0ut.c cal_tm unused from MEB, was leading to earnings, hence disabling for MEB.
427 lines
11 KiB
C
427 lines
11 KiB
C
/*****************************************************************************
|
|
|
|
Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved.
|
|
|
|
This program is free software; you can redistribute it and/or modify it under
|
|
the terms of the GNU General Public License as published by the Free Software
|
|
Foundation; version 2 of the License.
|
|
|
|
This program is distributed in the hope that it will be useful, but WITHOUT
|
|
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
|
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
|
|
|
|
You should have received a copy of the GNU General Public License along with
|
|
this program; if not, write to the Free Software Foundation, Inc., 59 Temple
|
|
Place, Suite 330, Boston, MA 02111-1307 USA
|
|
|
|
*****************************************************************************/
|
|
|
|
/********************************************************************//**
|
|
@file ha/ha0ha.c
|
|
The hash table with external chains
|
|
|
|
Created 8/22/1994 Heikki Tuuri
|
|
*************************************************************************/
|
|
|
|
#include "ha0ha.h"
|
|
#ifdef UNIV_NONINL
|
|
#include "ha0ha.ic"
|
|
#endif
|
|
|
|
#ifndef UNIV_HOTBACKUP
|
|
#ifdef UNIV_DEBUG
|
|
# include "buf0buf.h"
|
|
#endif /* UNIV_DEBUG */
|
|
#include "btr0sea.h"
|
|
#include "page0page.h"
|
|
|
|
/*************************************************************//**
|
|
Creates a hash table with at least n array cells. The actual number
|
|
of cells is chosen to be a prime number slightly bigger than n.
|
|
@return own: created table */
|
|
UNIV_INTERN
|
|
hash_table_t*
|
|
ha_create_func(
|
|
/*===========*/
|
|
ulint n, /*!< in: number of array cells */
|
|
#ifdef UNIV_SYNC_DEBUG
|
|
ulint mutex_level, /*!< in: level of the mutexes in the latching
|
|
order: this is used in the debug version */
|
|
#endif /* UNIV_SYNC_DEBUG */
|
|
ulint n_mutexes) /*!< in: number of mutexes to protect the
|
|
hash table: must be a power of 2, or 0 */
|
|
{
|
|
hash_table_t* table;
|
|
ulint i;
|
|
|
|
ut_ad(ut_is_2pow(n_mutexes));
|
|
table = hash_create(n);
|
|
|
|
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
|
|
table->adaptive = TRUE;
|
|
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
|
|
/* Creating MEM_HEAP_BTR_SEARCH type heaps can potentially fail,
|
|
but in practise it never should in this case, hence the asserts. */
|
|
|
|
if (n_mutexes == 0) {
|
|
table->heap = mem_heap_create_in_btr_search(
|
|
ut_min(4096, MEM_MAX_ALLOC_IN_BUF));
|
|
ut_a(table->heap);
|
|
|
|
return(table);
|
|
}
|
|
|
|
hash_create_mutexes(table, n_mutexes, mutex_level);
|
|
|
|
table->heaps = mem_alloc(n_mutexes * sizeof(void*));
|
|
|
|
for (i = 0; i < n_mutexes; i++) {
|
|
table->heaps[i] = mem_heap_create_in_btr_search(4096);
|
|
ut_a(table->heaps[i]);
|
|
}
|
|
|
|
return(table);
|
|
}
|
|
|
|
/*************************************************************//**
|
|
Inserts an entry into a hash table. If an entry with the same fold number
|
|
is found, its node is updated to point to the new data, and no new node
|
|
is inserted. If btr_search_enabled is set to FALSE, we will only allow
|
|
updating existing nodes, but no new node is allowed to be added.
|
|
@return TRUE if succeed, FALSE if no more memory could be allocated */
|
|
UNIV_INTERN
|
|
ibool
|
|
ha_insert_for_fold_func(
|
|
/*====================*/
|
|
hash_table_t* table, /*!< in: hash table */
|
|
ulint fold, /*!< in: folded value of data; if a node with
|
|
the same fold value already exists, it is
|
|
updated to point to the same data, and no new
|
|
node is created! */
|
|
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
|
|
buf_block_t* block, /*!< in: buffer block containing the data */
|
|
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
|
|
const rec_t* data) /*!< in: data, must not be NULL */
|
|
{
|
|
hash_cell_t* cell;
|
|
ha_node_t* node;
|
|
ha_node_t* prev_node;
|
|
ulint hash;
|
|
|
|
ut_ad(data);
|
|
ut_ad(table);
|
|
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
|
|
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
|
|
ut_a(block->frame == page_align(data));
|
|
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
|
|
#ifdef UNIV_SYNC_DEBUG
|
|
ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX));
|
|
#endif /* UNIV_SYNC_DEBUG */
|
|
ASSERT_HASH_MUTEX_OWN(table, fold);
|
|
ut_ad(btr_search_enabled);
|
|
|
|
hash = hash_calc_hash(fold, table);
|
|
|
|
cell = hash_get_nth_cell(table, hash);
|
|
|
|
prev_node = cell->node;
|
|
|
|
while (prev_node != NULL) {
|
|
if (prev_node->fold == fold) {
|
|
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
|
|
if (table->adaptive) {
|
|
buf_block_t* prev_block = prev_node->block;
|
|
ut_a(prev_block->frame
|
|
== page_align(prev_node->data));
|
|
ut_a(prev_block->n_pointers > 0);
|
|
prev_block->n_pointers--;
|
|
block->n_pointers++;
|
|
}
|
|
|
|
prev_node->block = block;
|
|
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
|
|
prev_node->data = data;
|
|
|
|
return(TRUE);
|
|
}
|
|
|
|
prev_node = prev_node->next;
|
|
}
|
|
|
|
/* We have to allocate a new chain node */
|
|
|
|
node = mem_heap_alloc(hash_get_heap(table, fold), sizeof(ha_node_t));
|
|
|
|
if (node == NULL) {
|
|
/* It was a btr search type memory heap and at the moment
|
|
no more memory could be allocated: return */
|
|
|
|
ut_ad(hash_get_heap(table, fold)->type & MEM_HEAP_BTR_SEARCH);
|
|
|
|
return(FALSE);
|
|
}
|
|
|
|
ha_node_set_data(node, block, data);
|
|
|
|
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
|
|
if (table->adaptive) {
|
|
block->n_pointers++;
|
|
}
|
|
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
|
|
|
|
node->fold = fold;
|
|
|
|
node->next = NULL;
|
|
|
|
prev_node = cell->node;
|
|
|
|
if (prev_node == NULL) {
|
|
|
|
cell->node = node;
|
|
|
|
return(TRUE);
|
|
}
|
|
|
|
while (prev_node->next != NULL) {
|
|
|
|
prev_node = prev_node->next;
|
|
}
|
|
|
|
prev_node->next = node;
|
|
|
|
return(TRUE);
|
|
}
|
|
|
|
/***********************************************************//**
|
|
Deletes a hash node. */
|
|
UNIV_INTERN
|
|
void
|
|
ha_delete_hash_node(
|
|
/*================*/
|
|
hash_table_t* table, /*!< in: hash table */
|
|
ha_node_t* del_node) /*!< in: node to be deleted */
|
|
{
|
|
ut_ad(table);
|
|
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
|
|
#ifdef UNIV_SYNC_DEBUG
|
|
ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX));
|
|
#endif /* UNIV_SYNC_DEBUG */
|
|
ut_ad(btr_search_enabled);
|
|
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
|
|
if (table->adaptive) {
|
|
ut_a(del_node->block->frame = page_align(del_node->data));
|
|
ut_a(del_node->block->n_pointers > 0);
|
|
del_node->block->n_pointers--;
|
|
}
|
|
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
|
|
|
|
HASH_DELETE_AND_COMPACT(ha_node_t, next, table, del_node);
|
|
}
|
|
|
|
/*********************************************************//**
|
|
Looks for an element when we know the pointer to the data, and updates
|
|
the pointer to data, if found. */
|
|
UNIV_INTERN
|
|
void
|
|
ha_search_and_update_if_found_func(
|
|
/*===============================*/
|
|
hash_table_t* table, /*!< in/out: hash table */
|
|
ulint fold, /*!< in: folded value of the searched data */
|
|
const rec_t* data, /*!< in: pointer to the data */
|
|
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
|
|
buf_block_t* new_block,/*!< in: block containing new_data */
|
|
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
|
|
const rec_t* new_data)/*!< in: new pointer to the data */
|
|
{
|
|
ha_node_t* node;
|
|
|
|
ut_ad(table);
|
|
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
|
|
ASSERT_HASH_MUTEX_OWN(table, fold);
|
|
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
|
|
ut_a(new_block->frame == page_align(new_data));
|
|
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
|
|
#ifdef UNIV_SYNC_DEBUG
|
|
ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX));
|
|
#endif /* UNIV_SYNC_DEBUG */
|
|
|
|
if (!btr_search_enabled) {
|
|
return;
|
|
}
|
|
|
|
node = ha_search_with_data(table, fold, data);
|
|
|
|
if (node) {
|
|
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
|
|
if (table->adaptive) {
|
|
ut_a(node->block->n_pointers > 0);
|
|
node->block->n_pointers--;
|
|
new_block->n_pointers++;
|
|
}
|
|
|
|
node->block = new_block;
|
|
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
|
|
node->data = new_data;
|
|
}
|
|
}
|
|
|
|
/*****************************************************************//**
|
|
Removes from the chain determined by fold all nodes whose data pointer
|
|
points to the page given. */
|
|
UNIV_INTERN
|
|
void
|
|
ha_remove_all_nodes_to_page(
|
|
/*========================*/
|
|
hash_table_t* table, /*!< in: hash table */
|
|
ulint fold, /*!< in: fold value */
|
|
const page_t* page) /*!< in: buffer page */
|
|
{
|
|
ha_node_t* node;
|
|
|
|
ut_ad(table);
|
|
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
|
|
ASSERT_HASH_MUTEX_OWN(table, fold);
|
|
#ifdef UNIV_SYNC_DEBUG
|
|
ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX));
|
|
#endif /* UNIV_SYNC_DEBUG */
|
|
ut_ad(btr_search_enabled);
|
|
|
|
node = ha_chain_get_first(table, fold);
|
|
|
|
while (node) {
|
|
if (page_align(ha_node_get_data(node)) == page) {
|
|
|
|
/* Remove the hash node */
|
|
|
|
ha_delete_hash_node(table, node);
|
|
|
|
/* Start again from the first node in the chain
|
|
because the deletion may compact the heap of
|
|
nodes and move other nodes! */
|
|
|
|
node = ha_chain_get_first(table, fold);
|
|
} else {
|
|
node = ha_chain_get_next(node);
|
|
}
|
|
}
|
|
#ifdef UNIV_DEBUG
|
|
/* Check that all nodes really got deleted */
|
|
|
|
node = ha_chain_get_first(table, fold);
|
|
|
|
while (node) {
|
|
ut_a(page_align(ha_node_get_data(node)) != page);
|
|
|
|
node = ha_chain_get_next(node);
|
|
}
|
|
#endif
|
|
}
|
|
|
|
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
|
|
/*************************************************************//**
|
|
Validates a given range of the cells in hash table.
|
|
@return TRUE if ok */
|
|
UNIV_INTERN
|
|
ibool
|
|
ha_validate(
|
|
/*========*/
|
|
hash_table_t* table, /*!< in: hash table */
|
|
ulint start_index, /*!< in: start index */
|
|
ulint end_index) /*!< in: end index */
|
|
{
|
|
hash_cell_t* cell;
|
|
ha_node_t* node;
|
|
ibool ok = TRUE;
|
|
ulint i;
|
|
|
|
ut_ad(table);
|
|
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
|
|
ut_a(start_index <= end_index);
|
|
ut_a(start_index < hash_get_n_cells(table));
|
|
ut_a(end_index < hash_get_n_cells(table));
|
|
|
|
for (i = start_index; i <= end_index; i++) {
|
|
|
|
cell = hash_get_nth_cell(table, i);
|
|
|
|
node = cell->node;
|
|
|
|
while (node) {
|
|
if (hash_calc_hash(node->fold, table) != i) {
|
|
ut_print_timestamp(stderr);
|
|
fprintf(stderr,
|
|
"InnoDB: Error: hash table node"
|
|
" fold value %lu does not\n"
|
|
"InnoDB: match the cell number %lu.\n",
|
|
(ulong) node->fold, (ulong) i);
|
|
|
|
ok = FALSE;
|
|
}
|
|
|
|
node = node->next;
|
|
}
|
|
}
|
|
|
|
return(ok);
|
|
}
|
|
#endif /* defined UNIV_AHI_DEBUG || defined UNIV_DEBUG */
|
|
|
|
/*************************************************************//**
|
|
Prints info of a hash table. */
|
|
UNIV_INTERN
|
|
void
|
|
ha_print_info(
|
|
/*==========*/
|
|
FILE* file, /*!< in: file where to print */
|
|
hash_table_t* table) /*!< in: hash table */
|
|
{
|
|
#ifdef UNIV_DEBUG
|
|
/* Some of the code here is disabled for performance reasons in production
|
|
builds, see http://bugs.mysql.com/36941 */
|
|
#define PRINT_USED_CELLS
|
|
#endif /* UNIV_DEBUG */
|
|
|
|
#ifdef PRINT_USED_CELLS
|
|
hash_cell_t* cell;
|
|
ulint cells = 0;
|
|
ulint i;
|
|
#endif /* PRINT_USED_CELLS */
|
|
ulint n_bufs;
|
|
|
|
ut_ad(table);
|
|
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
|
|
#ifdef PRINT_USED_CELLS
|
|
for (i = 0; i < hash_get_n_cells(table); i++) {
|
|
|
|
cell = hash_get_nth_cell(table, i);
|
|
|
|
if (cell->node) {
|
|
|
|
cells++;
|
|
}
|
|
}
|
|
#endif /* PRINT_USED_CELLS */
|
|
|
|
fprintf(file, "Hash table size %lu",
|
|
(ulong) hash_get_n_cells(table));
|
|
|
|
#ifdef PRINT_USED_CELLS
|
|
fprintf(file, ", used cells %lu", (ulong) cells);
|
|
#endif /* PRINT_USED_CELLS */
|
|
|
|
if (table->heaps == NULL && table->heap != NULL) {
|
|
|
|
/* This calculation is intended for the adaptive hash
|
|
index: how many buffer frames we have reserved? */
|
|
|
|
n_bufs = UT_LIST_GET_LEN(table->heap->base) - 1;
|
|
|
|
if (table->heap->free_block) {
|
|
n_bufs++;
|
|
}
|
|
|
|
fprintf(file, ", node heap has %lu buffer(s)\n",
|
|
(ulong) n_bufs);
|
|
}
|
|
}
|
|
#endif /* !UNIV_HOTBACKUP */
|