mariadb/storage/innobase/fts/fts0opt.cc
Thirunarayanan Balathandayuthapani dfa7fe347f MDEV-28730 Remove internal parser usage from InnoDB fulltext
- Introduce a class FTSQueryRunner to handle queries for fulltext
internal tables. Basically it creates a query, prepares the
fulltext internal table for read or write process. Build a tuple
based on the given table and assign the FTS_CONFIG, FTS_COMMON_TABLES
and FTS_AUX_TABLE fields based on the given value. This class
also handles INSERT, DELETE, REPLACE, UPDATE and
execute the function for each record (record_executor).

FTSQueryRunner::create_query_thread(): Create a query thread to execute
the statement on internal FULLTEXT tables

FTSQueryRunner::build_tuple(): Build a tuple for the operation

FTSQueryRunner::build_clust_ref(): Build a clustered index reference
for clustered index lookup for the secondary index record

FTSQueryRunner::assign_config_fields(): Assign the tuple for the
FTS CONFIG internal table

FTSQueryRunner::assign_common_table_fields(): Assign the tuple for
FTS_DELETED, FTS_DELETED_CACHE, FTS_BEGIN_DELETED,
FTS_BEGIN_DELETED_CACHE common tables

FTSQueryRunner::assign_aux_table_fields(): Assign the tuple for
FTS_PREFIX_INDEX tables.

FTSQueryRunner::handle_error(): Handling error for DB_LOCK_WAIT,
retry the operation

FTSQueryRunner::open_table(): Open the table based on the fulltext
auxiliary table name and FTS common table name

FTSQueryRunner::prepare_for_write(): Lock the table for write
process by taking Intention Exclusive lock

FTSQueryRunner::prepare_for_read(): Lock the table for read
process by taking Intention Shared lock

FTSQueryRunner::write_record(): Insert the tuple into the given table

FTSQueryRunner::lock_or_sees_rec(): Lock the record in case of
DELETE, SELECT_UPDATE operation. Fetch the correct version of record in
case of READ operation. It also does clustered index lookup in case
of search is on secondary index

fts_cmp_rec_dtuple_prefix(): Compare the record with given tuple field
for tuple field length

FTSQueryRunner::record_executor(): Read the record of the given index and
do call the callback function for each record

FTSQueryRunner::build_update_config(): Build the update vector for
FULLTEXT CONFIG table

FTSQueryRunner::update_record(): Update the record with update vector
exist in FTSQueryRunner

Removed the fts_parse_sql(), fts_eval_sql(), fts_get_select_columns_str()
and fts_get_docs_clear().

Moved fts_get_table_id() & fts_get_table_name() from fts0sql.cc to
fts0fts.cc and deleted the file fts0sql.cc

Removed ins_graph, sel_graph from fts_index_cache_t

Changed the callback function default read function parameter for
each clustered index record to

bool fts_sql_callback(dict_index_t*, const rec_t *, const rec_offs*,
                      void *);

Following parameters are changed to default read function parameter:

fts_read_stopword()
fts_fetch_store_doc_id()
fts_query_expansion_fetch_doc()
fts_read_count()
fts_get_rows_count()
fts_init_doc_id()
fts_init_recover_doc()
read_fts_config()
fts_optimize_read_node()
fts_optimize_index_fetch_node()
fts_index_fetch_nodes()
fts_fetch_index_words()
fts_index_fetch_words()
fts_fetch_doc_ids()
fts_table_fetch_doc_ids()
fts_read_ulint()
fts_copy_doc_ids()
fts_optimize_create_deleted_doc_id_snapshot()
fts_query_index_fetch_nodes()
fts_query_fetch_document()
fts_query_index_fetch_nodes()

row_upd_clust_rec_low(): Function does updates a clustered
index record of a row when ordering fields don't change.
Function doesn't have dependency on row_prebuilt_t. This can be
used by fulltext internal table update operation

Row_sel_get_clust_rec_for_mysql::operator(): Removed the
parameter row_prebuilt_t and caller does pass the prebuilt
related variables

Removed the parser usage and execute the query directly on
fulltext internal tables in the following function:

fts_read_stopword()
fts_fetch_store_doc_id()
fts_query_expansion_fetch_doc()
fts_read_count()
fts_get_rows_count()
fts_init_doc_id()
fts_init_recover_doc()
read_fts_config()
fts_optimize_read_node()
fts_optimize_index_fetch_node()
fts_index_fetch_nodes()
fts_fetch_index_words()
fts_index_fetch_words()
fts_fetch_doc_ids()
fts_table_fetch_doc_ids()
fts_read_ulint()
fts_copy_doc_ids()
fts_optimize_create_deleted_doc_id_snapshot()
fts_query_index_fetch_nodes()
fts_query_fetch_document()
fts_query_index_fetch_nodes()
i_s_fts_deleted_generic_fill()
i_s_fts_index_table_fill_selected()
2025-05-26 16:35:18 +05:30

2925 lines
81 KiB
C++

/*****************************************************************************
Copyright (c) 2007, 2018, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2016, 2022, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
*****************************************************************************/
/******************************************************************//**
@file fts/fts0opt.cc
Full Text Search optimize thread
Created 2007/03/27 Sunny Bains
Completed 2011/7/10 Sunny and Jimmy Yang
***********************************************************************/
#include "fts0fts.h"
#include "row0sel.h"
#include "que0types.h"
#include "fts0priv.h"
#include "fts0types.h"
#include "ut0wqueue.h"
#include "srv0start.h"
#include "ut0list.h"
#include "zlib.h"
#include "fts0opt.h"
#include "fts0vlc.h"
#include "wsrep.h"
#ifdef WITH_WSREP
extern Atomic_relaxed<bool> wsrep_sst_disable_writes;
#else
constexpr bool wsrep_sst_disable_writes= false;
#endif
/** The FTS optimize thread's work queue. */
ib_wqueue_t* fts_optimize_wq;
static void fts_optimize_callback(void *);
static void timer_callback(void*);
static tpool::timer* timer;
static tpool::task_group task_group(1);
static tpool::task task(fts_optimize_callback,0, &task_group);
/** FTS optimize thread, for MDL acquisition */
static THD *fts_opt_thd;
/** The FTS vector to store fts_slot_t */
static ib_vector_t* fts_slots;
/** Default optimize interval in secs. */
static const ulint FTS_OPTIMIZE_INTERVAL_IN_SECS = 300;
/** Server is shutting down, so does we exiting the optimize thread */
static bool fts_opt_start_shutdown = false;
/** Condition variable for shutting down the optimize thread.
Protected by fts_optimize_wq->mutex. */
static pthread_cond_t fts_opt_shutdown_cond;
/** Initial size of nodes in fts_word_t. */
static const ulint FTS_WORD_NODES_INIT_SIZE = 64;
/** Last time we did check whether system need a sync */
static time_t last_check_sync_time;
/** FTS optimize thread message types. */
enum fts_msg_type_t {
FTS_MSG_STOP, /*!< Stop optimizing and exit thread */
FTS_MSG_ADD_TABLE, /*!< Add table to the optimize thread's
work queue */
FTS_MSG_DEL_TABLE, /*!< Remove a table from the optimize
threads work queue */
FTS_MSG_SYNC_TABLE /*!< Sync fts cache of a table */
};
/** Compressed list of words that have been read from FTS INDEX
that needs to be optimized. */
struct fts_zip_t {
lint status; /*!< Status of (un)/zip operation */
ulint n_words; /*!< Number of words compressed */
ulint block_sz; /*!< Size of a block in bytes */
ib_vector_t* blocks; /*!< Vector of compressed blocks */
ib_alloc_t* heap_alloc; /*!< Heap to use for allocations */
ulint pos; /*!< Offset into blocks */
ulint last_big_block; /*!< Offset of last block in the
blocks array that is of size
block_sz. Blocks beyond this offset
are of size FTS_MAX_WORD_LEN */
z_streamp zp; /*!< ZLib state */
/*!< The value of the last word read
from the FTS INDEX table. This is
used to discard duplicates */
fts_string_t word; /*!< UTF-8 string */
ulint max_words; /*!< maximum number of words to read
in one pase */
};
/** Prepared statemets used during optimize */
struct fts_optimize_graph_t {
/*!< Delete a word from FTS INDEX */
que_t* delete_nodes_graph;
/*!< Insert a word into FTS INDEX */
que_t* write_nodes_graph;
/*!< COMMIT a transaction */
que_t* commit_graph;
/*!< Read the nodes from FTS_INDEX */
que_t* read_nodes_graph;
};
/** Used by fts_optimize() to store state. */
struct fts_optimize_t {
trx_t* trx; /*!< The transaction used for all SQL */
ib_alloc_t* self_heap; /*!< Heap to use for allocations */
char* name_prefix; /*!< FTS table name prefix */
fts_table_t fts_index_table;/*!< Common table definition */
/*!< Common table definition */
fts_table_t fts_common_table;
dict_table_t* table; /*!< Table that has to be queried */
dict_index_t* index; /*!< The FTS index to be optimized */
fts_doc_ids_t* to_delete; /*!< doc ids to delete, we check against
this vector and purge the matching
entries during the optimizing
process. The vector entries are
sorted on doc id */
ulint del_pos; /*!< Offset within to_delete vector,
this is used to keep track of where
we are up to in the vector */
ibool done; /*!< TRUE when optimize finishes */
ib_vector_t* words; /*!< Word + Nodes read from FTS_INDEX,
it contains instances of fts_word_t */
fts_zip_t* zip; /*!< Words read from the FTS_INDEX */
fts_optimize_graph_t /*!< Prepared statements used during */
graph; /*optimize */
ulint n_completed; /*!< Number of FTS indexes that have
been optimized */
ibool del_list_regenerated;
/*!< BEING_DELETED list regenarated */
};
/** Used by the optimize, to keep state during compacting nodes. */
struct fts_encode_t {
doc_id_t src_last_doc_id;/*!< Last doc id read from src node */
byte* src_ilist_ptr; /*!< Current ptr within src ilist */
};
/** We use this information to determine when to start the optimize
cycle for a table. */
struct fts_slot_t {
/** table, or NULL if the slot is unused */
dict_table_t* table;
/** whether this slot is being processed */
bool running;
ulint added; /*!< Number of doc ids added since the
last time this table was optimized */
ulint deleted; /*!< Number of doc ids deleted since the
last time this table was optimized */
/** time(NULL) of completing fts_optimize_table_bk() */
time_t last_run;
/** time(NULL) of latest successful fts_optimize_table() */
time_t completed;
};
/** A table remove message for the FTS optimize thread. */
struct fts_msg_del_t
{
/** the table to remove */
dict_table_t *table;
/** condition variable to signal message consumption */
pthread_cond_t *cond;
};
/** The FTS optimize message work queue message type. */
struct fts_msg_t {
fts_msg_type_t type; /*!< Message type */
void* ptr; /*!< The message contents */
mem_heap_t* heap; /*!< The heap used to allocate this
message, the message consumer will
free the heap. */
};
/** The number of words to read and optimize in a single pass. */
ulong fts_num_word_optimize;
/** Whether to enable additional FTS diagnostic printout. */
char fts_enable_diag_print;
/** ZLib compressed block size.*/
static ulint FTS_ZIP_BLOCK_SIZE = 1024;
/** The amount of time optimizing in a single pass, in seconds. */
static ulint fts_optimize_time_limit;
/** It's defined in fts0fts.cc */
extern const char* fts_common_tables[];
/**********************************************************************//**
Initialize fts_zip_t. */
static
void
fts_zip_initialize(
/*===============*/
fts_zip_t* zip) /*!< out: zip instance to initialize */
{
zip->pos = 0;
zip->n_words = 0;
zip->status = Z_OK;
zip->last_big_block = 0;
zip->word.f_len = 0;
*zip->word.f_str = 0;
ib_vector_reset(zip->blocks);
memset(zip->zp, 0, sizeof(*zip->zp));
}
/**********************************************************************//**
Create an instance of fts_zip_t.
@return a new instance of fts_zip_t */
static
fts_zip_t*
fts_zip_create(
/*===========*/
mem_heap_t* heap, /*!< in: heap */
ulint block_sz, /*!< in: size of a zip block.*/
ulint max_words) /*!< in: max words to read */
{
fts_zip_t* zip;
zip = static_cast<fts_zip_t*>(mem_heap_zalloc(heap, sizeof(*zip)));
zip->word.f_str = static_cast<byte*>(
mem_heap_zalloc(heap, FTS_MAX_WORD_LEN + 1));
zip->block_sz = block_sz;
zip->heap_alloc = ib_heap_allocator_create(heap);
zip->blocks = ib_vector_create(zip->heap_alloc, sizeof(void*), 128);
zip->max_words = max_words;
zip->zp = static_cast<z_stream*>(
mem_heap_zalloc(heap, sizeof(*zip->zp)));
return(zip);
}
/**********************************************************************//**
Initialize an instance of fts_zip_t. */
static
void
fts_zip_init(
/*=========*/
fts_zip_t* zip) /*!< in: zip instance to init */
{
memset(zip->zp, 0, sizeof(*zip->zp));
zip->word.f_len = 0;
*zip->word.f_str = '\0';
}
/**********************************************************************//**
Create a fts_optimizer_word_t instance.
@return new instance */
static
fts_word_t*
fts_word_init(
/*==========*/
fts_word_t* word, /*!< in: word to initialize */
byte* utf8, /*!< in: UTF-8 string */
ulint len) /*!< in: length of string in bytes */
{
mem_heap_t* heap = mem_heap_create(sizeof(fts_node_t));
memset(word, 0, sizeof(*word));
word->text.f_len = len;
word->text.f_str = static_cast<byte*>(mem_heap_alloc(heap, len + 1));
/* Need to copy the NUL character too. */
memcpy(word->text.f_str, utf8, word->text.f_len);
word->text.f_str[word->text.f_len] = 0;
word->heap_alloc = ib_heap_allocator_create(heap);
word->nodes = ib_vector_create(
word->heap_alloc, sizeof(fts_node_t), FTS_WORD_NODES_INIT_SIZE);
return(word);
}
/** Read the node from auxiliary table record
@param index auxiliary table index
@param word word
@param rec auxiliary table record
@param offsets offsets to table record
@param heap memory heap to store external data */
static fts_node_t *fts_optimize_read_node(dict_index_t *index, fts_word_t *word,
const rec_t *rec,
const rec_offs *offsets,
mem_heap_t *heap)
{
ut_ad(dict_index_is_clust(index));
/* Start from 1 since the first node has been read by the caller */
fts_node_t *node= static_cast<fts_node_t*>(ib_vector_push(word->nodes, NULL));
ulint len;
const byte *data= nullptr;
for (uint32_t i= 1; i < index->n_fields; i++)
{
switch(i) {
case 1:
/* FIRST DOC ID */
data= rec_get_nth_field(rec, offsets, i, &len);
ut_ad(len == 8);
node->first_doc_id= fts_read_doc_id(data);
break;
case 4:
/* Last doc id */
data= rec_get_nth_field(rec, offsets, i, &len);
ut_ad(len == 8);
node->last_doc_id= fts_read_doc_id(data);
break;
case 5:
/* Doc count */
data= rec_get_nth_field(rec, offsets, i, &len);
ut_ad(len == 4);
node->doc_count= mach_read_from_4(data);
break;
case 6:
/* Ilist */
if (rec_offs_nth_extern(offsets, 6))
data= btr_rec_copy_externally_stored_field(rec, offsets, 0, 6, &len,
heap);
else data = rec_get_nth_field(rec, offsets, 6, &len);
node->ilist_size_alloc= node->ilist_size= len;
node->ilist = static_cast<byte*>(ut_malloc_nokey(len));
memcpy(node->ilist, data, len);
}
}
return node;
}
bool fts_optimize_index_fetch_node(dict_index_t *index, const rec_t *rec,
const rec_offs *offsets, void *user_arg)
{
ut_ad(dict_index_is_clust(index));
fts_fetch_t *fetch = static_cast<fts_fetch_t*>(user_arg);
ib_vector_t *words = static_cast<ib_vector_t*>(fetch->read_arg);
bool is_word_init = false;
fts_word_t *word;
ulint len= 0;
/* Word */
const byte *data= rec_get_nth_field(rec, offsets, 0, &len);
if (ib_vector_size(words) == 0)
{
word = static_cast<fts_word_t*>(ib_vector_push(words, NULL));
fts_word_init(word, (byte*) data, len);
is_word_init = true;
}
word = static_cast<fts_word_t*>(ib_vector_last(words));
if (len != word->text.f_len ||
memcmp(word->text.f_str, data, len))
{
word = static_cast<fts_word_t*>(ib_vector_push(words, NULL));
fts_word_init(word, (byte*) data, len);
is_word_init = true;
}
fts_node_t *node= fts_optimize_read_node(index, word, rec, offsets,
fetch->heap);
fetch->total_memory += node->ilist_size;
if (is_word_init)
fetch->total_memory += sizeof(fts_word_t)
+ sizeof(ib_alloc_t) + sizeof(ib_vector_t) + len
+ sizeof(fts_node_t) * FTS_WORD_NODES_INIT_SIZE;
else if (ib_vector_size(words) > FTS_WORD_NODES_INIT_SIZE)
fetch->total_memory += sizeof(fts_node_t);
return fetch->total_memory < fts_result_cache_limit;
}
dberr_t fts_index_fetch_nodes_low(
dict_table_t *aux_table, const fts_string_t *word, fts_fetch_t *fetch,
FTSQueryRunner *sqlRunner)
{
dberr_t err= sqlRunner->prepare_for_read(aux_table);
if (err == DB_SUCCESS)
{
ut_ad(UT_LIST_GET_LEN(aux_table->indexes) == 1);
dict_index_t *index= dict_table_get_first_index(aux_table);
sqlRunner->build_tuple(index, 1, 1);
ulint len= word->f_len;
fts_match_key match_op= MATCH_PREFIX;
if (word->f_str[word->f_len - 1] == '%')
{
len-= 1;
match_op= MATCH_PATTERN;
}
sqlRunner->assign_aux_table_fields(word->f_str, len);
/* SELECT WORD, FIRST_DOC_ID, LAST_DOC_ID, DOC_COUNT, ILIST
FROM FTS_AUX_TABLE WHERE WORD >= word */
err= sqlRunner->record_executor(index, READ, match_op, PAGE_CUR_GE,
fetch->callback, fetch);
if (err == DB_RECORD_NOT_FOUND || err == DB_END_OF_INDEX)
err= DB_SUCCESS;
}
return err;
}
dberr_t fts_index_fetch_nodes(trx_t *trx, fts_table_t *fts_table,
const fts_string_t *word, fts_fetch_t *fetch)
{
ulint selected= fts_select_index(fts_table->charset,
word->f_str, word->f_len);
fts_table->suffix = fts_get_suffix(selected);
dberr_t err= DB_SUCCESS;
FTSQueryRunner sqlRunner(trx);
fetch->heap= sqlRunner.heap();
dict_table_t *aux_table= sqlRunner.open_table(fts_table, &err);
if (aux_table)
{
retry:
err= fts_index_fetch_nodes_low(aux_table, word, fetch, &sqlRunner);
if (UNIV_LIKELY(err == DB_SUCCESS))
fts_sql_commit(trx);
else
{
fts_sql_rollback(trx);
if (err == DB_LOCK_WAIT_TIMEOUT)
{
ib::warn() << "lock wait timeout reading FTS index. Retrying!";
trx->error_state = DB_SUCCESS;
goto retry;
}
else
ib::error() << "(" << err << ") while reading FTS index.";
}
aux_table->release();
}
fetch->heap= nullptr;
return err;
}
/**********************************************************************//**
Read a word */
static
byte*
fts_zip_read_word(
/*==============*/
fts_zip_t* zip, /*!< in: Zip state + data */
fts_string_t* word) /*!< out: uncompressed word */
{
short len = 0;
void* null = NULL;
byte* ptr = word->f_str;
int flush = Z_NO_FLUSH;
/* Either there was an error or we are at the Z_STREAM_END. */
if (zip->status != Z_OK) {
return(NULL);
}
zip->zp->next_out = reinterpret_cast<byte*>(&len);
zip->zp->avail_out = sizeof(len);
while (zip->status == Z_OK && zip->zp->avail_out > 0) {
/* Finished decompressing block. */
if (zip->zp->avail_in == 0) {
/* Free the block that's been decompressed. */
if (zip->pos > 0) {
ulint prev = zip->pos - 1;
ut_a(zip->pos < ib_vector_size(zip->blocks));
ut_free(ib_vector_getp(zip->blocks, prev));
ib_vector_set(zip->blocks, prev, &null);
}
/* Any more blocks to decompress. */
if (zip->pos < ib_vector_size(zip->blocks)) {
zip->zp->next_in = static_cast<byte*>(
ib_vector_getp(
zip->blocks, zip->pos));
if (zip->pos > zip->last_big_block) {
zip->zp->avail_in =
FTS_MAX_WORD_LEN;
} else {
zip->zp->avail_in =
static_cast<uInt>(zip->block_sz);
}
++zip->pos;
} else {
flush = Z_FINISH;
}
}
switch (zip->status = inflate(zip->zp, flush)) {
case Z_OK:
if (zip->zp->avail_out == 0 && len > 0) {
ut_a(len <= FTS_MAX_WORD_LEN);
ptr[len] = 0;
zip->zp->next_out = ptr;
zip->zp->avail_out = uInt(len);
word->f_len = ulint(len);
len = 0;
}
break;
case Z_BUF_ERROR: /* No progress possible. */
case Z_STREAM_END:
inflateEnd(zip->zp);
break;
case Z_STREAM_ERROR:
default:
ut_error;
}
}
/* All blocks must be freed at end of inflate. */
if (zip->status != Z_OK) {
for (ulint i = 0; i < ib_vector_size(zip->blocks); ++i) {
if (ib_vector_getp(zip->blocks, i)) {
ut_free(ib_vector_getp(zip->blocks, i));
ib_vector_set(zip->blocks, i, &null);
}
}
}
if (ptr != NULL) {
ut_ad(word->f_len == strlen((char*) ptr));
}
return(zip->status == Z_OK || zip->status == Z_STREAM_END ? ptr : NULL);
}
/** Callback function to fetch and compress the word in an FTS AUXILIARY table
@param index auxiliary index
@param rec auxiliary index record
@param offsets offsets to the record
@param user_arg compressed list which will have words from auxiliary table
@retval true if number of words is lesser than maximum words to be compressed
or else false */
static bool fts_fetch_index_words(dict_index_t *index, const rec_t *rec,
const rec_offs *offsets, void *user_arg)
{
ut_ad(dict_index_is_clust(index));
fts_zip_t *zip = static_cast<fts_zip_t*>(user_arg);
ulint rec_len;
const byte *data= rec_get_nth_field(rec, offsets, 0, &rec_len);
ut_a(rec_len <= FTS_MAX_WORD_LEN);
uint16_t len= (uint16_t) rec_len;
/* Skip the duplicate words. */
if (zip->word.f_len == len && !memcmp(zip->word.f_str, data, len))
return true;
memcpy(zip->word.f_str, data, len);
zip->word.f_len = len;
ut_a(zip->zp->avail_in == 0);
ut_a(zip->zp->next_in == NULL);
/* The string is prefixed by len. */
/* FIXME: This is not byte order agnostic (InnoDB data files
with FULLTEXT INDEX are not portable between little-endian and
big-endian systems!) */
zip->zp->next_in = reinterpret_cast<byte*>(&len);
zip->zp->avail_in = sizeof(len);
/* Compress the word, create output blocks as necessary. */
while (zip->zp->avail_in > 0)
{
/* No space left in output buffer, create a new one. */
if (zip->zp->avail_out == 0)
{
byte *block= static_cast<byte*>(ut_malloc_nokey(zip->block_sz));
ib_vector_push(zip->blocks, &block);
zip->zp->next_out = block;
zip->zp->avail_out = static_cast<uInt>(zip->block_sz);
}
switch (zip->status = deflate(zip->zp, Z_NO_FLUSH)) {
case Z_OK:
if (zip->zp->avail_in == 0)
{
zip->zp->next_in = const_cast<byte*>(data);
zip->zp->avail_in = uInt(len);
ut_a(len <= FTS_MAX_WORD_LEN);
len = 0;
}
continue;
case Z_STREAM_END:
case Z_BUF_ERROR:
case Z_STREAM_ERROR:
default: ut_error;
}
}
/* All data should have been compressed. */
ut_a(zip->zp->avail_in == 0);
zip->zp->next_in = NULL;
++zip->n_words;
return zip->n_words < zip->max_words;
}
/**********************************************************************//**
Finish Zip deflate. */
static
void
fts_zip_deflate_end(
/*================*/
fts_zip_t* zip) /*!< in: instance that should be closed*/
{
ut_a(zip->zp->avail_in == 0);
ut_a(zip->zp->next_in == NULL);
zip->status = deflate(zip->zp, Z_FINISH);
ut_a(ib_vector_size(zip->blocks) > 0);
zip->last_big_block = ib_vector_size(zip->blocks) - 1;
/* Allocate smaller block(s), since this is trailing data. */
while (zip->status == Z_OK) {
byte* block;
ut_a(zip->zp->avail_out == 0);
block = static_cast<byte*>(
ut_malloc_nokey(FTS_MAX_WORD_LEN + 1));
ib_vector_push(zip->blocks, &block);
zip->zp->next_out = block;
zip->zp->avail_out = FTS_MAX_WORD_LEN;
zip->status = deflate(zip->zp, Z_FINISH);
}
ut_a(zip->status == Z_STREAM_END);
zip->status = deflateEnd(zip->zp);
ut_a(zip->status == Z_OK);
/* Reset the ZLib data structure. */
memset(zip->zp, 0, sizeof(*zip->zp));
}
/** Read the words from the FTS INDEX.
@param optim optimize scratch pad
@param word query word
@param n_words max words to be read
@return DB_SUCCESS if all OK, DB_TABLE_NOT_FOUND if no more indexes
to search else error code */
static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t fts_index_fetch_words(fts_optimize_t *optim, const fts_string_t *word,
ulint n_words)
{
dberr_t error= DB_SUCCESS;
mem_heap_t *heap= static_cast<mem_heap_t*>(optim->self_heap->arg);
bool inited= true;
optim->trx->op_info = "fetching FTS index words";
if (optim->zip == NULL)
optim->zip = fts_zip_create(heap, FTS_ZIP_BLOCK_SIZE, n_words);
else fts_zip_initialize(optim->zip);
fts_zip_t *zip= optim->zip;
dberr_t err= DB_SUCCESS;
FTSQueryRunner sqlRunner(optim->trx);
for (ulint selected=
fts_select_index(optim->fts_index_table.charset, word->f_str,
word->f_len);
selected < FTS_NUM_AUX_INDEX; selected++)
{
optim->fts_index_table.suffix= fts_get_suffix(selected);
dict_table_t *aux_table= sqlRunner.open_table(&optim->fts_index_table,
&err);
if (aux_table)
{
retry:
err= sqlRunner.prepare_for_read(aux_table);
if (err == DB_SUCCESS)
{
ut_ad(UT_LIST_GET_LEN(aux_table->indexes) == 1);
dict_index_t *clust_index= dict_table_get_first_index(aux_table);
sqlRunner.build_tuple(clust_index, 1, 1);
sqlRunner.assign_aux_table_fields(word->f_str, word->f_len);
int ret;
if (!inited && ((ret= deflateInit(zip->zp, 9)) != Z_OK))
{
ib::error() << "ZLib deflateInit() failed: " << ret;
err= DB_ERROR;
break;
}
else
{
inited= true;
/* Executes the following query
SELECT word FROM FTS_AUX_TABLE WHERE word > :word */
err= sqlRunner.record_executor(clust_index, READ, MATCH_ALL,
PAGE_CUR_G, &fts_fetch_index_words,
&zip);
}
}
if (UNIV_LIKELY(err == DB_SUCCESS));
else if (error == DB_LOCK_WAIT_TIMEOUT)
{
ib::warn() << "Lock wait timeout reading document. Retrying!";
/* We need to reset the ZLib state. */
inited= false;
deflateEnd(zip->zp);
fts_zip_init(zip);
optim->trx->error_state = DB_SUCCESS;
/* Retry the same auxiliary table again */
goto retry;
}
else ib::error() << "(" << err << ") while reading document.";
}
aux_table->release();
/* Check if max word to fetch is exceeded */
if (zip->n_words >= n_words) goto func_exit;
}
func_exit:
if (err == DB_SUCCESS && zip->status == Z_OK && zip->n_words > 0)
{
/* All data should have been read. */
ut_a(zip->zp->avail_in == 0);
fts_zip_deflate_end(zip);
}
else deflateEnd(zip->zp);
return err;
}
/** Callback function to fetch the doc id from the record.
@return always returns TRUE */
static bool fts_fetch_doc_ids(dict_index_t *index, const rec_t *rec,
const rec_offs *offsets, void *user_arg)
{
ut_ad(dict_index_is_clust(index));
fts_doc_ids_t *fts_doc_ids= static_cast<fts_doc_ids_t*>(user_arg);
doc_id_t *update=
static_cast<doc_id_t*>(ib_vector_push(fts_doc_ids->doc_ids, NULL));
ulint len;
const byte *data= rec_get_nth_field(rec, offsets, 0, &len);
*update= mach_read_from_8(data);
return true;
}
/** Read the rows from a FTS common auxiliary table.
@param trx transaction
@param fts_table Fulltext table information
@param doc_ids For collecting doc_ids
@return DB_SUCCESS or error code */
dberr_t fts_table_fetch_doc_ids(trx_t *trx, fts_table_t *fts_table,
fts_doc_ids_t *doc_ids)
{
dberr_t err= DB_SUCCESS;
bool alloc_trx= false;
if (!trx)
{
trx= trx_create();
alloc_trx= true;
}
FTSQueryRunner sqlRunner(trx);
dict_table_t *fts_aux= sqlRunner.open_table(fts_table, &err);
if (!fts_aux) goto func_exit;
err= sqlRunner.prepare_for_read(fts_aux);
if (err == DB_SUCCESS)
{
ut_ad(UT_LIST_GET_LEN(fts_aux->indexes) == 1);
dict_index_t *clust_index= dict_table_get_first_index(fts_aux);
err= sqlRunner.record_executor(clust_index, READ, MATCH_ALL,
PAGE_CUR_GE, &fts_fetch_doc_ids, doc_ids);
}
if (err == DB_END_OF_INDEX) err= DB_SUCCESS;
if (alloc_trx)
{
if (err == DB_SUCCESS) fts_sql_commit(trx);
else fts_sql_rollback(trx);
}
if (err == DB_SUCCESS) fts_doc_ids_sort(doc_ids->doc_ids);
func_exit:
if (alloc_trx) { trx->free(); }
if (fts_aux) fts_aux->release();
return err;
}
/**********************************************************************//**
Do a binary search for a doc id in the array
@return +ve index if found -ve index where it should be inserted
if not found */
int
fts_bsearch(
/*========*/
doc_id_t* array, /*!< in: array to sort */
int lower, /*!< in: the array lower bound */
int upper, /*!< in: the array upper bound */
doc_id_t doc_id) /*!< in: the doc id to search for */
{
int orig_size = upper;
if (upper == 0) {
/* Nothing to search */
return(-1);
} else {
while (lower < upper) {
int i = (lower + upper) >> 1;
if (doc_id > array[i]) {
lower = i + 1;
} else if (doc_id < array[i]) {
upper = i - 1;
} else {
return(i); /* Found. */
}
}
}
if (lower == upper && lower < orig_size) {
if (doc_id == array[lower]) {
return(lower);
} else if (lower == 0) {
return(-1);
}
}
/* Not found. */
return( (lower == 0) ? -1 : -(lower));
}
/**********************************************************************//**
Search in the to delete array whether any of the doc ids within
the [first, last] range are to be deleted
@return +ve index if found -ve index where it should be inserted
if not found */
static
int
fts_optimize_lookup(
/*================*/
ib_vector_t* doc_ids, /*!< in: array to search */
ulint lower, /*!< in: lower limit of array */
doc_id_t first_doc_id, /*!< in: doc id to lookup */
doc_id_t last_doc_id) /*!< in: doc id to lookup */
{
int pos;
int upper = static_cast<int>(ib_vector_size(doc_ids));
doc_id_t* array = (doc_id_t*) doc_ids->data;
pos = fts_bsearch(array, static_cast<int>(lower), upper, first_doc_id);
ut_a(abs(pos) <= upper + 1);
if (pos < 0) {
int i = abs(pos);
/* If i is 1, it could be first_doc_id is less than
either the first or second array item, do a
double check */
if (i == 1 && array[0] <= last_doc_id
&& first_doc_id < array[0]) {
pos = 0;
} else if (i < upper && array[i] <= last_doc_id) {
/* Check if the "next" doc id is within the
first & last doc id of the node. */
pos = i;
}
}
return(pos);
}
/**********************************************************************//**
Encode the word pos list into the node
@return DB_SUCCESS or error code*/
static MY_ATTRIBUTE((nonnull))
dberr_t
fts_optimize_encode_node(
/*=====================*/
fts_node_t* node, /*!< in: node to fill*/
doc_id_t doc_id, /*!< in: doc id to encode */
fts_encode_t* enc) /*!< in: encoding state.*/
{
byte* dst;
ulint enc_len;
ulint pos_enc_len;
doc_id_t doc_id_delta;
dberr_t error = DB_SUCCESS;
const byte* src = enc->src_ilist_ptr;
if (node->first_doc_id == 0) {
ut_a(node->last_doc_id == 0);
node->first_doc_id = doc_id;
}
/* Calculate the space required to store the ilist. */
ut_ad(doc_id > node->last_doc_id);
doc_id_delta = doc_id - node->last_doc_id;
enc_len = fts_get_encoded_len(static_cast<ulint>(doc_id_delta));
/* Calculate the size of the encoded pos array. */
while (*src) {
fts_decode_vlc(&src);
}
/* Skip the 0x00 byte at the end of the word positions list. */
++src;
/* Number of encoded pos bytes to copy. */
pos_enc_len = ulint(src - enc->src_ilist_ptr);
/* Total number of bytes required for copy. */
enc_len += pos_enc_len;
/* Check we have enough space in the destination buffer for
copying the document word list. */
if (!node->ilist) {
ulint new_size;
ut_a(node->ilist_size == 0);
new_size = enc_len > FTS_ILIST_MAX_SIZE
? enc_len : FTS_ILIST_MAX_SIZE;
node->ilist = static_cast<byte*>(ut_malloc_nokey(new_size));
node->ilist_size_alloc = new_size;
} else if ((node->ilist_size + enc_len) > node->ilist_size_alloc) {
ulint new_size = node->ilist_size + enc_len;
byte* ilist = static_cast<byte*>(ut_malloc_nokey(new_size));
memcpy(ilist, node->ilist, node->ilist_size);
ut_free(node->ilist);
node->ilist = ilist;
node->ilist_size_alloc = new_size;
}
src = enc->src_ilist_ptr;
dst = node->ilist + node->ilist_size;
/* Encode the doc id. Cast to ulint, the delta should be small and
therefore no loss of precision. */
dst = fts_encode_int(doc_id_delta, dst);
/* Copy the encoded pos array. */
memcpy(dst, src, pos_enc_len);
node->last_doc_id = doc_id;
/* Data copied upto here. */
node->ilist_size += enc_len;
enc->src_ilist_ptr += pos_enc_len;
ut_a(node->ilist_size <= node->ilist_size_alloc);
return(error);
}
/**********************************************************************//**
Optimize the data contained in a node.
@return DB_SUCCESS or error code*/
static MY_ATTRIBUTE((nonnull))
dberr_t
fts_optimize_node(
/*==============*/
ib_vector_t* del_vec, /*!< in: vector of doc ids to delete*/
int* del_pos, /*!< in: offset into above vector */
fts_node_t* dst_node, /*!< in: node to fill*/
fts_node_t* src_node, /*!< in: source node for data*/
fts_encode_t* enc) /*!< in: encoding state */
{
ulint copied;
dberr_t error = DB_SUCCESS;
doc_id_t doc_id = enc->src_last_doc_id;
if (!enc->src_ilist_ptr) {
enc->src_ilist_ptr = src_node->ilist;
}
copied = ulint(enc->src_ilist_ptr - src_node->ilist);
/* While there is data in the source node and space to copy
into in the destination node. */
while (copied < src_node->ilist_size
&& dst_node->ilist_size < FTS_ILIST_MAX_SIZE) {
doc_id_t delta;
doc_id_t del_doc_id = FTS_NULL_DOC_ID;
delta = fts_decode_vlc(
(const byte**)&enc->src_ilist_ptr);
test_again:
/* Check whether the doc id is in the delete list, if
so then we skip the entries but we need to track the
delta for decoding the entries following this document's
entries. */
if (*del_pos >= 0 && *del_pos < (int) ib_vector_size(del_vec)) {
doc_id_t* update;
update = (doc_id_t*) ib_vector_get(
del_vec, ulint(*del_pos));
del_doc_id = *update;
}
if (enc->src_ilist_ptr == src_node->ilist && doc_id == 0) {
ut_a(delta == src_node->first_doc_id);
}
doc_id += delta;
if (del_doc_id > 0 && doc_id == del_doc_id) {
++*del_pos;
/* Skip the entries for this document. */
while (*enc->src_ilist_ptr) {
fts_decode_vlc((const byte**)&enc->src_ilist_ptr);
}
/* Skip the end of word position marker. */
++enc->src_ilist_ptr;
} else {
/* DOC ID already becomes larger than
del_doc_id, check the next del_doc_id */
if (del_doc_id > 0 && doc_id > del_doc_id) {
del_doc_id = 0;
++*del_pos;
delta = 0;
goto test_again;
}
/* Decode and copy the word positions into
the dest node. */
fts_optimize_encode_node(dst_node, doc_id, enc);
++dst_node->doc_count;
ut_a(dst_node->last_doc_id == doc_id);
}
/* Bytes copied so for from source. */
copied = ulint(enc->src_ilist_ptr - src_node->ilist);
}
if (copied >= src_node->ilist_size) {
ut_a(doc_id == src_node->last_doc_id);
}
enc->src_last_doc_id = doc_id;
return(error);
}
/**********************************************************************//**
Determine the starting pos within the deleted doc id vector for a word.
@return delete position */
static MY_ATTRIBUTE((nonnull, warn_unused_result))
int
fts_optimize_deleted_pos(
/*=====================*/
fts_optimize_t* optim, /*!< in: optimize state data */
fts_word_t* word) /*!< in: the word data to check */
{
int del_pos;
ib_vector_t* del_vec = optim->to_delete->doc_ids;
/* Get the first and last dict ids for the word, we will use
these values to determine which doc ids need to be removed
when we coalesce the nodes. This way we can reduce the numer
of elements that need to be searched in the deleted doc ids
vector and secondly we can remove the doc ids during the
coalescing phase. */
if (ib_vector_size(del_vec) > 0) {
fts_node_t* node;
doc_id_t last_id;
doc_id_t first_id;
ulint size = ib_vector_size(word->nodes);
node = (fts_node_t*) ib_vector_get(word->nodes, 0);
first_id = node->first_doc_id;
node = (fts_node_t*) ib_vector_get(word->nodes, size - 1);
last_id = node->last_doc_id;
ut_a(first_id <= last_id);
del_pos = fts_optimize_lookup(
del_vec, optim->del_pos, first_id, last_id);
} else {
del_pos = -1; /* Note that there is nothing to delete. */
}
return(del_pos);
}
#define FTS_DEBUG_PRINT
/**********************************************************************//**
Compact the nodes for a word, we also remove any doc ids during the
compaction pass.
@return DB_SUCCESS or error code.*/
static
ib_vector_t*
fts_optimize_word(
/*==============*/
fts_optimize_t* optim, /*!< in: optimize state data */
fts_word_t* word) /*!< in: the word to optimize */
{
fts_encode_t enc;
ib_vector_t* nodes;
ulint i = 0;
int del_pos;
fts_node_t* dst_node = NULL;
ib_vector_t* del_vec = optim->to_delete->doc_ids;
ulint size = ib_vector_size(word->nodes);
del_pos = fts_optimize_deleted_pos(optim, word);
nodes = ib_vector_create(word->heap_alloc, sizeof(*dst_node), 128);
enc.src_last_doc_id = 0;
enc.src_ilist_ptr = NULL;
while (i < size) {
ulint copied;
fts_node_t* src_node;
src_node = (fts_node_t*) ib_vector_get(word->nodes, i);
if (dst_node == NULL
|| dst_node->last_doc_id > src_node->first_doc_id) {
dst_node = static_cast<fts_node_t*>(
ib_vector_push(nodes, NULL));
memset(dst_node, 0, sizeof(*dst_node));
}
/* Copy from the src to the dst node. */
fts_optimize_node(del_vec, &del_pos, dst_node, src_node, &enc);
ut_a(enc.src_ilist_ptr != NULL);
/* Determine the numer of bytes copied to dst_node. */
copied = ulint(enc.src_ilist_ptr - src_node->ilist);
/* Can't copy more than whats in the vlc array. */
ut_a(copied <= src_node->ilist_size);
/* We are done with this node release the resources. */
if (copied == src_node->ilist_size) {
enc.src_last_doc_id = 0;
enc.src_ilist_ptr = NULL;
ut_free(src_node->ilist);
src_node->ilist = NULL;
src_node->ilist_size = src_node->ilist_size_alloc = 0;
src_node = NULL;
++i; /* Get next source node to OPTIMIZE. */
}
if (dst_node->ilist_size >= FTS_ILIST_MAX_SIZE || i >= size) {
dst_node = NULL;
}
}
/* All dst nodes created should have been added to the vector. */
ut_a(dst_node == NULL);
/* Return the OPTIMIZED nodes. */
return(nodes);
}
/** Update the FTS index table. This is a delete followed by an insert.
@param trx transaction to update the aux tables
@param fts_table table of FTS index
@param word word data to write
@param node node to write into aux table
@return DB_SUCCESS or error code */
static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t fts_optimize_write_word(trx_t *trx, fts_table_t *fts_table,
fts_string_t *word, ib_vector_t *nodes)
{
FTSQueryRunner sqlRunner(trx);
ulint selected = fts_select_index(fts_table->charset, word->f_str,
word->f_len);
fts_table->suffix = fts_get_suffix(selected);
dberr_t err= DB_SUCCESS;
dict_table_t *aux_table= sqlRunner.open_table(fts_table, &err);
if (!aux_table) goto free_node;
err= sqlRunner.prepare_for_write(aux_table);
if (err == DB_SUCCESS)
{
ut_ad(UT_LIST_GET_LEN(aux_table->indexes) == 1);
dict_index_t *clust_index= dict_table_get_first_index(aux_table);
sqlRunner.build_tuple(clust_index, 1, 1);
sqlRunner.assign_aux_table_fields(word->f_str, word->f_len, nullptr);
/* DELETE FROM FTS_AUX WHERE WORD= :word */
err= sqlRunner.record_executor(clust_index, REMOVE, MATCH_UNIQUE);
}
if (err == DB_RECORD_NOT_FOUND || err == DB_END_OF_INDEX)
err= DB_SUCCESS;
if (UNIV_UNLIKELY(err != DB_SUCCESS))
ib::error() << "(" << err << ") during optimize,"
" when deleting a word from the FTS index.";
free_node:
/* Even if the operation needs to be rolled back and redone,
we iterate over the nodes in order to free the ilist. */
for (uint32_t i = 0; i < ib_vector_size(nodes); ++i)
{
fts_node_t* node = (fts_node_t*) ib_vector_get(nodes, i);
/* Skip empty node. */
if (node->ilist == NULL)
{
ut_ad(node->ilist_size == 0);
continue;
}
if (err == DB_SUCCESS)
{
/* INSERT INTO FTS_AUX TABLE (word, first_doc_id, last_doc_id,
doc_count, ilist) */
err = fts_write_node(aux_table, word, node, &sqlRunner);
if (UNIV_UNLIKELY(err != DB_SUCCESS))
ib::error() << "(" << err << ") during optimize, while adding a"
<< " word to the FTS index.";
}
ut_free(node->ilist);
node->ilist = NULL;
node->ilist_size = node->ilist_size_alloc = 0;
}
if (aux_table) aux_table->release();
return err;
}
/**********************************************************************//**
Free fts_optimizer_word_t instanace.*/
void
fts_word_free(
/*==========*/
fts_word_t* word) /*!< in: instance to free.*/
{
mem_heap_t* heap = static_cast<mem_heap_t*>(word->heap_alloc->arg);
#ifdef UNIV_DEBUG
memset(word, 0, sizeof(*word));
#endif /* UNIV_DEBUG */
mem_heap_free(heap);
}
/**********************************************************************//**
Optimize the word ilist and rewrite data to the FTS index.
@return status one of RESTART, EXIT, ERROR */
static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_optimize_compact(
/*=================*/
fts_optimize_t* optim, /*!< in: optimize state data */
dict_index_t* index, /*!< in: current FTS being optimized */
time_t start_time) /*!< in: optimize start time */
{
ulint i;
dberr_t error = DB_SUCCESS;
ulint size = ib_vector_size(optim->words);
for (i = 0; i < size && error == DB_SUCCESS && !optim->done; ++i) {
fts_word_t* word;
ib_vector_t* nodes;
trx_t* trx = optim->trx;
word = (fts_word_t*) ib_vector_get(optim->words, i);
/* nodes is allocated from the word heap and will be destroyed
when the word is freed. We however have to be careful about
the ilist, that needs to be freed explicitly. */
nodes = fts_optimize_word(optim, word);
/* Update the data on disk. */
error = fts_optimize_write_word(
trx, &optim->fts_index_table, &word->text, nodes);
if (error == DB_SUCCESS) {
/* Write the last word optimized to the config table,
we use this value for restarting optimize. */
error = fts_config_set_index_value(
optim->trx, index,
FTS_LAST_OPTIMIZED_WORD, &word->text);
}
/* Free the word that was optimized. */
fts_word_free(word);
ulint interval = ulint(time(NULL) - start_time);
if (fts_optimize_time_limit > 0
&& (lint(interval) < 0
|| interval > fts_optimize_time_limit)) {
optim->done = TRUE;
}
}
return(error);
}
/**********************************************************************//**
Create an instance of fts_optimize_t. Also create a new
background transaction.*/
static
fts_optimize_t*
fts_optimize_create(
/*================*/
dict_table_t* table) /*!< in: table with FTS indexes */
{
fts_optimize_t* optim;
mem_heap_t* heap = mem_heap_create(128);
optim = (fts_optimize_t*) mem_heap_zalloc(heap, sizeof(*optim));
optim->self_heap = ib_heap_allocator_create(heap);
optim->to_delete = fts_doc_ids_create();
optim->words = ib_vector_create(
optim->self_heap, sizeof(fts_word_t), 256);
optim->table = table;
optim->trx = trx_create();
trx_start_internal(optim->trx);
optim->fts_common_table.table_id = table->id;
optim->fts_common_table.type = FTS_COMMON_TABLE;
optim->fts_common_table.table = table;
optim->fts_index_table.table_id = table->id;
optim->fts_index_table.type = FTS_INDEX_TABLE;
optim->fts_index_table.table = table;
/* The common prefix for all this parent table's aux tables. */
char table_id[FTS_AUX_MIN_TABLE_ID_LENGTH];
const size_t table_id_len = 1
+ size_t(fts_get_table_id(&optim->fts_common_table, table_id));
dict_sys.freeze(SRW_LOCK_CALL);
/* Include the separator as well. */
const size_t dbname_len = table->name.dblen() + 1;
ut_ad(dbname_len > 1);
const size_t prefix_name_len = dbname_len + 4 + table_id_len;
char* prefix_name = static_cast<char*>(
ut_malloc_nokey(prefix_name_len));
memcpy(prefix_name, table->name.m_name, dbname_len);
dict_sys.unfreeze();
memcpy(prefix_name + dbname_len, "FTS_", 4);
memcpy(prefix_name + dbname_len + 4, table_id, table_id_len);
optim->name_prefix =prefix_name;
return(optim);
}
#ifdef FTS_OPTIMIZE_DEBUG
/**********************************************************************//**
Get optimize start time of an FTS index.
@return DB_SUCCESS if all OK else error code */
static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_optimize_get_index_start_time(
/*==============================*/
trx_t* trx, /*!< in: transaction */
dict_index_t* index, /*!< in: FTS index */
time_t* start_time) /*!< out: time in secs */
{
return(fts_config_get_index_ulint(
trx, index, FTS_OPTIMIZE_START_TIME,
(ulint*) start_time));
}
/**********************************************************************//**
Set the optimize start time of an FTS index.
@return DB_SUCCESS if all OK else error code */
static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_optimize_set_index_start_time(
/*==============================*/
trx_t* trx, /*!< in: transaction */
dict_index_t* index, /*!< in: FTS index */
time_t start_time) /*!< in: start time */
{
return(fts_config_set_index_ulint(
trx, index, FTS_OPTIMIZE_START_TIME,
(ulint) start_time));
}
/**********************************************************************//**
Get optimize end time of an FTS index.
@return DB_SUCCESS if all OK else error code */
static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_optimize_get_index_end_time(
/*============================*/
trx_t* trx, /*!< in: transaction */
dict_index_t* index, /*!< in: FTS index */
time_t* end_time) /*!< out: time in secs */
{
return(fts_config_get_index_ulint(
trx, index, FTS_OPTIMIZE_END_TIME, (ulint*) end_time));
}
/**********************************************************************//**
Set the optimize end time of an FTS index.
@return DB_SUCCESS if all OK else error code */
static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_optimize_set_index_end_time(
/*============================*/
trx_t* trx, /*!< in: transaction */
dict_index_t* index, /*!< in: FTS index */
time_t end_time) /*!< in: end time */
{
return(fts_config_set_index_ulint(
trx, index, FTS_OPTIMIZE_END_TIME, (ulint) end_time));
}
#endif
/**********************************************************************//**
Free the optimize prepared statements.*/
static
void
fts_optimize_graph_free(
/*====================*/
fts_optimize_graph_t* graph) /*!< in/out: The graph instances
to free */
{
if (graph->commit_graph) {
que_graph_free(graph->commit_graph);
graph->commit_graph = NULL;
}
if (graph->write_nodes_graph) {
que_graph_free(graph->write_nodes_graph);
graph->write_nodes_graph = NULL;
}
if (graph->delete_nodes_graph) {
que_graph_free(graph->delete_nodes_graph);
graph->delete_nodes_graph = NULL;
}
if (graph->read_nodes_graph) {
que_graph_free(graph->read_nodes_graph);
graph->read_nodes_graph = NULL;
}
}
/**********************************************************************//**
Free all optimize resources. */
static
void
fts_optimize_free(
/*==============*/
fts_optimize_t* optim) /*!< in: table with on FTS index */
{
mem_heap_t* heap = static_cast<mem_heap_t*>(optim->self_heap->arg);
trx_commit_for_mysql(optim->trx);
optim->trx->free();
optim->trx = NULL;
fts_doc_ids_free(optim->to_delete);
fts_optimize_graph_free(&optim->graph);
ut_free(optim->name_prefix);
/* This will free the heap from which optim itself was allocated. */
mem_heap_free(heap);
}
/**********************************************************************//**
Get the max time optimize should run in millisecs.
@return max optimize time limit in millisecs. */
static
ulint
fts_optimize_get_time_limit(
/*========================*/
trx_t* trx, /*!< in: transaction */
fts_table_t* fts_table) /*!< in: aux table */
{
ulint time_limit = 0;
fts_config_get_ulint(
trx, fts_table,
FTS_OPTIMIZE_LIMIT_IN_SECS, &time_limit);
/* FIXME: This is returning milliseconds, while the variable
is being stored and interpreted as seconds! */
return(time_limit * 1000);
}
/**********************************************************************//**
Run OPTIMIZE on the given table. Note: this can take a very long time
(hours). */
static
void
fts_optimize_words(
/*===============*/
fts_optimize_t* optim, /*!< in: optimize instance */
dict_index_t* index, /*!< in: current FTS being optimized */
fts_string_t* word) /*!< in: the starting word to optimize */
{
fts_fetch_t fetch;
CHARSET_INFO* charset = optim->fts_index_table.charset;
ut_a(!optim->done);
/* Get the time limit from the config table. */
fts_optimize_time_limit = fts_optimize_get_time_limit(
optim->trx, &optim->fts_common_table);
const time_t start_time = time(NULL);
std::map<ulint, dict_table_t*> aux_tables;
trx_t *trx = optim->trx;
dict_table_t *aux_table= nullptr;
FTSQueryRunner sqlRunner(trx);
/* Setup the callback to use for fetching the word ilist etc. */
fetch.read_arg = optim->words;
fetch.callback = &fts_optimize_index_fetch_node;
fetch.heap= sqlRunner.heap();
while (!optim->done) {
dberr_t error = DB_SUCCESS;
ulint selected;
ut_a(ib_vector_size(optim->words) == 0);
selected = fts_select_index(charset, word->f_str, word->f_len);
auto it = aux_tables.find(selected);
if (it == aux_tables.end()) {
optim->fts_index_table.suffix =
fts_get_suffix(selected);
aux_table =
sqlRunner.open_table(&optim->fts_index_table,
&error);
if (aux_table) {
aux_tables[selected] = aux_table;
}
}
else aux_table = it->second;
if (error == DB_SUCCESS) {
/* Read the index records to optimize. */
fetch.total_memory = 0;
error = fts_index_fetch_nodes_low(aux_table, word,
&fetch, &sqlRunner);
ut_ad(fetch.total_memory < fts_result_cache_limit);
}
if (error == DB_SUCCESS) {
/* There must be some nodes to read. */
ut_a(ib_vector_size(optim->words) > 0);
/* Optimize the nodes that were read and write
back to DB. */
error = fts_optimize_compact(optim, index, start_time);
if (error == DB_SUCCESS) {
fts_sql_commit(optim->trx);
} else {
fts_sql_rollback(optim->trx);
}
}
ib_vector_reset(optim->words);
if (error == DB_SUCCESS) {
if (!optim->done) {
if (!fts_zip_read_word(optim->zip, word)) {
optim->done = TRUE;
}
}
} else if (error == DB_LOCK_WAIT_TIMEOUT) {
ib::warn() << "Lock wait timeout during optimize."
" Retrying!";
trx->error_state = DB_SUCCESS;
} else if (error == DB_DEADLOCK) {
ib::warn() << "Deadlock during optimize. Retrying!";
trx->error_state = DB_SUCCESS;
} else {
optim->done = TRUE; /* Exit the loop. */
}
}
/* Release all aux tables */
for (auto it : aux_tables) {
it.second->release();
}
}
/**********************************************************************//**
Optimize is complete. Set the completion time, and reset the optimize
start string for this FTS index to "".
@return DB_SUCCESS if all OK */
static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_optimize_index_completed(
/*=========================*/
fts_optimize_t* optim, /*!< in: optimize instance */
dict_index_t* index) /*!< in: table with one FTS index */
{
fts_string_t word;
dberr_t error;
byte buf[sizeof(ulint)];
#ifdef FTS_OPTIMIZE_DEBUG
time_t end_time = time(NULL);
error = fts_optimize_set_index_end_time(optim->trx, index, end_time);
#endif
/* If we've reached the end of the index then set the start
word to the empty string. */
word.f_len = 0;
word.f_str = buf;
*word.f_str = '\0';
error = fts_config_set_index_value(
optim->trx, index, FTS_LAST_OPTIMIZED_WORD, &word);
if (UNIV_UNLIKELY(error != DB_SUCCESS)) {
ib::error() << "(" << error << ") while updating"
" last optimized word!";
}
return(error);
}
/**********************************************************************//**
Read the list of words from the FTS auxiliary index that will be
optimized in this pass.
@return DB_SUCCESS if all OK */
static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_optimize_index_read_words(
/*==========================*/
fts_optimize_t* optim, /*!< in: optimize instance */
dict_index_t* index, /*!< in: table with one FTS index */
fts_string_t* word) /*!< in: buffer to use */
{
dberr_t error = DB_SUCCESS;
if (optim->del_list_regenerated) {
word->f_len = 0;
} else {
/* Get the last word that was optimized from
the config table. */
error = fts_config_get_index_value(
optim->trx, index, FTS_LAST_OPTIMIZED_WORD, word);
}
/* If record not found then we start from the top. */
if (error == DB_RECORD_NOT_FOUND) {
word->f_len = 0;
error = DB_SUCCESS;
}
while (error == DB_SUCCESS) {
error = fts_index_fetch_words(
optim, word, fts_num_word_optimize);
if (error == DB_SUCCESS) {
/* Reset the last optimized word to '' if no
more words could be read from the FTS index. */
if (optim->zip->n_words == 0) {
word->f_len = 0;
*word->f_str = 0;
}
break;
}
}
return(error);
}
/**********************************************************************//**
Run OPTIMIZE on the given FTS index. Note: this can take a very long
time (hours).
@return DB_SUCCESS if all OK */
static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_optimize_index(
/*===============*/
fts_optimize_t* optim, /*!< in: optimize instance */
dict_index_t* index) /*!< in: table with one FTS index */
{
fts_string_t word;
dberr_t error;
byte str[FTS_MAX_WORD_LEN + 1];
/* Set the current index that we have to optimize. */
optim->fts_index_table.index_id = index->id;
optim->fts_index_table.charset = fts_index_get_charset(index);
optim->done = FALSE; /* Optimize until !done */
/* We need to read the last word optimized so that we start from
the next word. */
word.f_str = str;
/* We set the length of word to the size of str since we
need to pass the max len info to the fts_get_config_value() function. */
word.f_len = sizeof(str) - 1;
memset(word.f_str, 0x0, word.f_len);
/* Read the words that will be optimized in this pass. */
error = fts_optimize_index_read_words(optim, index, &word);
if (error == DB_SUCCESS) {
int zip_error;
ut_a(optim->zip->pos == 0);
ut_a(optim->zip->zp->total_in == 0);
ut_a(optim->zip->zp->total_out == 0);
zip_error = inflateInit(optim->zip->zp);
ut_a(zip_error == Z_OK);
word.f_len = 0;
word.f_str = str;
/* Read the first word to optimize from the Zip buffer. */
if (!fts_zip_read_word(optim->zip, &word)) {
optim->done = TRUE;
} else {
fts_optimize_words(optim, index, &word);
}
/* If we couldn't read any records then optimize is
complete. Increment the number of indexes that have
been optimized and set FTS index optimize state to
completed. */
if (error == DB_SUCCESS && optim->zip->n_words == 0) {
error = fts_optimize_index_completed(optim, index);
if (error == DB_SUCCESS) {
++optim->n_completed;
}
}
}
return(error);
}
/** Delete the document ids in the delete, and delete cache tables.
@param optim Optimize instance for fulltext index
@return DB_SUCCESS if all OK */
static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t fts_optimize_purge_deleted_doc_ids(fts_optimize_t *optim)
{
dberr_t err= DB_SUCCESS;
doc_id_t *update= static_cast<doc_id_t*>(
ib_vector_get(optim->to_delete->doc_ids, 0));
doc_id_t write_doc_id;
/* Convert to "storage" byte order. */
fts_write_doc_id((byte*) &write_doc_id, *update);
FTSQueryRunner sqlRunner(optim->trx);
optim->fts_common_table.suffix= fts_common_tables[3];
dict_table_t *fts_deleted_cache= nullptr;
dict_table_t *fts_deleted=
sqlRunner.open_table(&optim->fts_common_table, &err);
ut_ad(UT_LIST_GET_LEN(fts_deleted->indexes) == 1);
dict_index_t *deleted_cache_idx= nullptr;
if (fts_deleted == nullptr) goto func_exit;
err= sqlRunner.prepare_for_write(fts_deleted);
if (err) goto func_exit;
optim->fts_common_table.suffix= fts_common_tables[4];
fts_deleted_cache= sqlRunner.open_table(&optim->fts_common_table, &err);
if (!fts_deleted_cache) goto func_exit;
err= sqlRunner.prepare_for_write(fts_deleted_cache);
if (err) goto func_exit;
ut_ad(UT_LIST_GET_LEN(fts_deleted_cache->indexes) == 1);
deleted_cache_idx= dict_table_get_first_index(fts_deleted_cache);
sqlRunner.build_tuple(deleted_cache_idx, 1, 1);
for (ulint i = 0; i < ib_vector_size(optim->to_delete->doc_ids);
++i)
{
update= static_cast<doc_id_t*>(
ib_vector_get(optim->to_delete->doc_ids, i));
/* Convert to "storage" byte order. */
fts_write_doc_id((byte*) &write_doc_id, *update);
sqlRunner.assign_common_table_fields(&write_doc_id);
/* DELETE FROM FTS_DELETED_CACHE WHERE DOC_ID= :doc_id */
err= sqlRunner.record_executor(deleted_cache_idx, REMOVE);
if (err == DB_RECORD_NOT_FOUND || err == DB_END_OF_INDEX)
err= DB_SUCCESS;
if (err == DB_SUCCESS)
{
/* DELETE FROM FTS_DELETED WHERE DOC_ID= :doc_id */
err= sqlRunner.record_executor(dict_table_get_first_index(fts_deleted),
REMOVE);
if (err == DB_RECORD_NOT_FOUND || err == DB_END_OF_INDEX)
err= DB_SUCCESS;
}
if (err) break;
}
func_exit:
if (fts_deleted_cache) fts_deleted_cache->release();
if (fts_deleted) fts_deleted->release();
return err;
}
/** Delete the document ids in the pending delete, and delete tables.
@param optim optimize instance of fulltext index
@return DB_SUCCESS if all OK */
static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t fts_optimize_purge_deleted_doc_id_snapshot(fts_optimize_t *optim)
{
FTSQueryRunner sqlRunner(optim->trx);
dberr_t err= DB_SUCCESS;
dict_table_t *fts_deleted_cache= nullptr;
optim->fts_common_table.suffix = fts_common_tables[0];
dict_table_t *fts_deleted= sqlRunner.open_table(&optim->fts_common_table,
&err);
if (!fts_deleted) goto func_exit;
err= sqlRunner.prepare_for_write(fts_deleted);
if (err) goto func_exit;
ut_ad(UT_LIST_GET_LEN(fts_deleted->indexes) == 1);
/* DELETE FROM FTS_DELETED */
err= sqlRunner.record_executor(dict_table_get_first_index(fts_deleted),
REMOVE, MATCH_ALL);
if (err == DB_END_OF_INDEX || err == DB_SUCCESS)
{
optim->fts_common_table.suffix= fts_common_tables[4];
fts_deleted_cache= sqlRunner.open_table(&optim->fts_common_table, &err);
if (!fts_deleted_cache) goto func_exit;
err= sqlRunner.prepare_for_write(fts_deleted_cache);
if (err) goto func_exit;
ut_ad(UT_LIST_GET_LEN(fts_deleted_cache->indexes) == 1);
/* DELETE FROM FTS_DELETED_CACHE */
err= sqlRunner.record_executor(
dict_table_get_first_index(fts_deleted_cache), REMOVE, MATCH_ALL);
if (err == DB_END_OF_INDEX || err == DB_SUCCESS)
err= DB_SUCCESS;
}
func_exit:
if (fts_deleted_cache) fts_deleted_cache->release();
if (fts_deleted) fts_deleted->release();
return err;
}
/**********************************************************************//**
Copy the deleted doc ids that will be purged during this optimize run
to the being deleted FTS auxiliary tables. The transaction is committed
upon successfull copy and rolled back on DB_DUPLICATE_KEY error.
@return DB_SUCCESS if all OK */
static
ulint
fts_optimize_being_deleted_count(
/*=============================*/
fts_optimize_t* optim) /*!< in: optimize instance */
{
fts_table_t fts_table;
FTS_INIT_FTS_TABLE(&fts_table, "BEING_DELETED", FTS_COMMON_TABLE,
optim->table);
return(fts_get_rows_count(&fts_table));
}
/* Read doc_id from the given record.
@param index DELETED or DELETED CACHE index
@param rec record belongs to DELETED or DELETED cache
@param offsets record offsets
@param user_arg vector of document ids
@return true always */
static
bool fts_read_ulint(dict_index_t *index, const rec_t *rec,
const rec_offs* offsets, void *user_arg)
{
ut_ad(dict_index_is_clust(index));
std::vector<doc_id_t> *doc_ids=
static_cast<std::vector<doc_id_t>*>(user_arg);
ulint len;
const byte *data= rec_get_nth_field(rec, offsets, 0, &len);
doc_id_t doc_id= mach_read_from_8(data);
doc_ids->push_back(doc_id);
return true;
}
/** Copy the record from FROM to TO common table
@param from DELETED or DELETED_CACHE table
@param to BEING_DELETED or BEING_DELETED_CACHE table
@param sqlRunner executor for FTS internal query
@return error code or DB_SUCCESS */
static
dberr_t fts_copy_doc_ids(dict_table_t *from, dict_table_t *to,
FTSQueryRunner *sqlRunner)
{
std::vector<doc_id_t> doc_ids;
dberr_t error= sqlRunner->prepare_for_read(from);
if (error == DB_SUCCESS)
{
ut_ad(UT_LIST_GET_LEN(from->indexes) == 1);
dict_index_t *from_index= dict_table_get_first_index(from);
error= sqlRunner->record_executor(from_index, READ, MATCH_ALL,
PAGE_CUR_GE, &fts_read_ulint, &doc_ids);
}
if (error == DB_SUCCESS)
error= sqlRunner->prepare_for_write(to);
if (error == DB_SUCCESS)
{
dict_index_t *to_index= dict_table_get_first_index(to);
sqlRunner->build_tuple(to_index);
for (doc_id_t it : doc_ids)
{
sqlRunner->assign_common_table_fields(&it);
error= sqlRunner->write_record(to);
if (error) return error;
}
}
return error;
}
/** Copy the deleted doc ids that will be purged during this
optimize run to the being deleted FTS auxiliary tables. The transaction
is committed upon successfull copy and rolled back on DB_DUPLICATE_KEY error.
@param optim optimize table instance
@return DB_SUCCESS if all OK */
static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_optimize_create_deleted_doc_id_snapshot(fts_optimize_t *optim)
{
dberr_t error= DB_SUCCESS;
FTSQueryRunner sqlRunner(optim->trx);
dict_table_t *deleted= nullptr;
dict_table_t *deleted_cache= nullptr;
dict_table_t *being_deleted= nullptr;
dict_table_t *being_deleted_cache= nullptr;
std::vector<doc_id_t> doc_ids;
dberr_t err= DB_SUCCESS;
/* Read all rows from DELETED table */
optim->fts_common_table.suffix = fts_common_tables[3];
deleted= sqlRunner.open_table(&optim->fts_common_table, &err);
if (deleted)
{
/* Write into BEING_DELETED table */
optim->fts_common_table.suffix = fts_common_tables[0];
being_deleted= sqlRunner.open_table(&optim->fts_common_table, &err);
if (being_deleted == nullptr) goto err_exit;
}
if (error == DB_SUCCESS)
error= fts_copy_doc_ids(deleted, being_deleted, &sqlRunner);
if (error) goto err_exit;
/* Read all rows from DELETED_CACHE table */
optim->fts_common_table.suffix = fts_common_tables[4];
deleted_cache= sqlRunner.open_table(&optim->fts_common_table, &err);
if (error == DB_SUCCESS)
{
optim->fts_common_table.suffix = fts_common_tables[1];
being_deleted_cache= sqlRunner.open_table(&optim->fts_common_table, &err);
}
/* Write these rows into BEING_DELETED_CACHE table */
if (error == DB_SUCCESS)
error= fts_copy_doc_ids(deleted_cache, being_deleted_cache,
&sqlRunner);
err_exit:
if (error != DB_SUCCESS)
fts_sql_rollback(optim->trx);
else fts_sql_commit(optim->trx);
optim->del_list_regenerated = TRUE;
if (deleted) deleted->release();
if (being_deleted) being_deleted->release();
if (deleted_cache) deleted_cache->release();
if (being_deleted_cache) being_deleted_cache->release();
return error;
}
/*********************************************************************//**
Read in the document ids that are to be purged during optimize. The
transaction is committed upon successfully read.
@return DB_SUCCESS if all OK */
static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_optimize_read_deleted_doc_id_snapshot(
/*======================================*/
fts_optimize_t* optim) /*!< in: optimize instance */
{
dberr_t error;
optim->fts_common_table.suffix = "BEING_DELETED";
/* Read the doc_ids to delete. */
error = fts_table_fetch_doc_ids(
optim->trx, &optim->fts_common_table, optim->to_delete);
if (error == DB_SUCCESS) {
optim->fts_common_table.suffix = "BEING_DELETED_CACHE";
/* Read additional doc_ids to delete. */
error = fts_table_fetch_doc_ids(
optim->trx, &optim->fts_common_table, optim->to_delete);
}
if (error != DB_SUCCESS) {
fts_doc_ids_free(optim->to_delete);
optim->to_delete = NULL;
}
return(error);
}
/*********************************************************************//**
Optimze all the FTS indexes, skipping those that have already been
optimized, since the FTS auxiliary indexes are not guaranteed to be
of the same cardinality.
@return DB_SUCCESS if all OK */
static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_optimize_indexes(
/*=================*/
fts_optimize_t* optim) /*!< in: optimize instance */
{
ulint i;
dberr_t error = DB_SUCCESS;
fts_t* fts = optim->table->fts;
/* Optimize the FTS indexes. */
for (i = 0; i < ib_vector_size(fts->indexes); ++i) {
dict_index_t* index;
#ifdef FTS_OPTIMIZE_DEBUG
time_t end_time;
time_t start_time;
/* Get the start and end optimize times for this index. */
error = fts_optimize_get_index_start_time(
optim->trx, index, &start_time);
if (error != DB_SUCCESS) {
break;
}
error = fts_optimize_get_index_end_time(
optim->trx, index, &end_time);
if (error != DB_SUCCESS) {
break;
}
/* Start time will be 0 only for the first time or after
completing the optimization of all FTS indexes. */
if (start_time == 0) {
start_time = time(NULL);
error = fts_optimize_set_index_start_time(
optim->trx, index, start_time);
}
/* Check if this index needs to be optimized or not. */
if (difftime(end_time, start_time) < 0) {
error = fts_optimize_index(optim, index);
if (error != DB_SUCCESS) {
break;
}
} else {
++optim->n_completed;
}
#endif
index = static_cast<dict_index_t*>(
ib_vector_getp(fts->indexes, i));
error = fts_optimize_index(optim, index);
}
if (error == DB_SUCCESS) {
fts_sql_commit(optim->trx);
} else {
fts_sql_rollback(optim->trx);
}
return(error);
}
/*********************************************************************//**
Cleanup the snapshot tables and the master deleted table.
@return DB_SUCCESS if all OK */
static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_optimize_purge_snapshot(
/*========================*/
fts_optimize_t* optim) /*!< in: optimize instance */
{
dberr_t error;
/* Delete the doc ids from the master deleted tables, that were
in the snapshot that was taken at the start of optimize. */
error = fts_optimize_purge_deleted_doc_ids(optim);
if (error == DB_SUCCESS) {
/* Destroy the deleted doc id snapshot. */
error = fts_optimize_purge_deleted_doc_id_snapshot(optim);
}
if (error == DB_SUCCESS) {
fts_sql_commit(optim->trx);
} else {
fts_sql_rollback(optim->trx);
}
return(error);
}
/*********************************************************************//**
Reset the start time to 0 so that a new optimize can be started.
@return DB_SUCCESS if all OK */
static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_optimize_reset_start_time(
/*==========================*/
fts_optimize_t* optim) /*!< in: optimize instance */
{
dberr_t error = DB_SUCCESS;
#ifdef FTS_OPTIMIZE_DEBUG
fts_t* fts = optim->table->fts;
/* Optimization should have been completed for all indexes. */
ut_a(optim->n_completed == ib_vector_size(fts->indexes));
for (uint i = 0; i < ib_vector_size(fts->indexes); ++i) {
dict_index_t* index;
time_t start_time = 0;
/* Reset the start time to 0 for this index. */
error = fts_optimize_set_index_start_time(
optim->trx, index, start_time);
index = static_cast<dict_index_t*>(
ib_vector_getp(fts->indexes, i));
}
#endif
if (error == DB_SUCCESS) {
fts_sql_commit(optim->trx);
} else {
fts_sql_rollback(optim->trx);
}
return(error);
}
/*********************************************************************//**
Run OPTIMIZE on the given table by a background thread.
@return DB_SUCCESS if all OK */
static MY_ATTRIBUTE((nonnull))
dberr_t
fts_optimize_table_bk(
/*==================*/
fts_slot_t* slot) /*!< in: table to optimiza */
{
const time_t now = time(NULL);
const ulint interval = ulint(now - slot->last_run);
/* Avoid optimizing tables that were optimized recently. */
if (slot->last_run > 0
&& lint(interval) >= 0
&& interval < FTS_OPTIMIZE_INTERVAL_IN_SECS) {
return(DB_SUCCESS);
}
dict_table_t* table = slot->table;
dberr_t error;
if (table->is_accessible()
&& table->fts && table->fts->cache
&& table->fts->cache->deleted >= FTS_OPTIMIZE_THRESHOLD) {
error = fts_optimize_table(table);
slot->last_run = time(NULL);
if (error == DB_SUCCESS) {
slot->running = false;
slot->completed = slot->last_run;
}
} else {
/* Note time this run completed. */
slot->last_run = now;
error = DB_SUCCESS;
}
return(error);
}
/*********************************************************************//**
Run OPTIMIZE on the given table.
@return DB_SUCCESS if all OK */
dberr_t
fts_optimize_table(
/*===============*/
dict_table_t* table) /*!< in: table to optimiza */
{
if (srv_read_only_mode) {
return DB_READ_ONLY;
}
dberr_t error = DB_SUCCESS;
fts_optimize_t* optim = NULL;
fts_t* fts = table->fts;
if (UNIV_UNLIKELY(fts_enable_diag_print)) {
ib::info() << "FTS start optimize " << table->name;
}
optim = fts_optimize_create(table);
// FIXME: Call this only at the start of optimize, currently we
// rely on DB_DUPLICATE_KEY to handle corrupting the snapshot.
/* Check whether there are still records in BEING_DELETED table */
if (fts_optimize_being_deleted_count(optim) == 0) {
/* Take a snapshot of the deleted document ids, they are copied
to the BEING_ tables. */
error = fts_optimize_create_deleted_doc_id_snapshot(optim);
}
/* A duplicate error is OK, since we don't erase the
doc ids from the being deleted state until all FTS
indexes have been optimized. */
if (error == DB_DUPLICATE_KEY) {
error = DB_SUCCESS;
}
if (error == DB_SUCCESS) {
/* These document ids will be filtered out during the
index optimization phase. They are in the snapshot that we
took above, at the start of the optimize. */
error = fts_optimize_read_deleted_doc_id_snapshot(optim);
if (error == DB_SUCCESS) {
/* Commit the read of being deleted
doc ids transaction. */
fts_sql_commit(optim->trx);
/* We would do optimization only if there
are deleted records to be cleaned up */
if (ib_vector_size(optim->to_delete->doc_ids) > 0) {
error = fts_optimize_indexes(optim);
}
} else {
ut_a(optim->to_delete == NULL);
}
/* Only after all indexes have been optimized can we
delete the (snapshot) doc ids in the pending delete,
and master deleted tables. */
if (error == DB_SUCCESS
&& optim->n_completed == ib_vector_size(fts->indexes)) {
if (UNIV_UNLIKELY(fts_enable_diag_print)) {
ib::info() << "FTS_OPTIMIZE: Completed"
" Optimize, cleanup DELETED table";
}
if (ib_vector_size(optim->to_delete->doc_ids) > 0) {
/* Purge the doc ids that were in the
snapshot from the snapshot tables and
the master deleted table. */
error = fts_optimize_purge_snapshot(optim);
}
if (error == DB_SUCCESS) {
/* Reset the start time of all the FTS indexes
so that optimize can be restarted. */
error = fts_optimize_reset_start_time(optim);
}
}
}
fts_optimize_free(optim);
if (UNIV_UNLIKELY(fts_enable_diag_print)) {
ib::info() << "FTS end optimize " << table->name;
}
return(error);
}
/********************************************************************//**
Add the table to add to the OPTIMIZER's list.
@return new message instance */
static
fts_msg_t*
fts_optimize_create_msg(
/*====================*/
fts_msg_type_t type, /*!< in: type of message */
void* ptr) /*!< in: message payload */
{
mem_heap_t* heap;
fts_msg_t* msg;
heap = mem_heap_create(sizeof(*msg) + sizeof(ib_list_node_t) + 16);
msg = static_cast<fts_msg_t*>(mem_heap_alloc(heap, sizeof(*msg)));
msg->ptr = ptr;
msg->type = type;
msg->heap = heap;
return(msg);
}
/** Add message to wqueue, signal thread pool*/
static void add_msg(fts_msg_t *msg)
{
ib_wqueue_add(fts_optimize_wq, msg, msg->heap, true);
srv_thread_pool->submit_task(&task);
}
/**
Called by "idle" timer. Submits optimize task, which
will only recalculate is_sync_needed, in case the queue is empty.
*/
static void timer_callback(void*)
{
srv_thread_pool->submit_task(&task);
}
/** Add the table to add to the OPTIMIZER's list.
@param[in] table table to add */
void fts_optimize_add_table(dict_table_t* table)
{
fts_msg_t* msg;
if (!fts_optimize_wq) {
return;
}
/* Make sure table with FTS index cannot be evicted */
dict_sys.prevent_eviction(table);
msg = fts_optimize_create_msg(FTS_MSG_ADD_TABLE, table);
mysql_mutex_lock(&fts_optimize_wq->mutex);
add_msg(msg);
table->fts->in_queue = true;
mysql_mutex_unlock(&fts_optimize_wq->mutex);
}
/**********************************************************************//**
Remove the table from the OPTIMIZER's list. We do wait for
acknowledgement from the consumer of the message. */
void
fts_optimize_remove_table(
/*======================*/
dict_table_t* table) /*!< in: table to remove */
{
if (!fts_optimize_wq)
return;
if (fts_opt_start_shutdown)
{
ib::info() << "Try to remove table " << table->name
<< " after FTS optimize thread exiting.";
while (fts_optimize_wq)
std::this_thread::sleep_for(std::chrono::milliseconds(10));
return;
}
mysql_mutex_lock(&fts_optimize_wq->mutex);
if (table->fts->in_queue)
{
fts_msg_t *msg= fts_optimize_create_msg(FTS_MSG_DEL_TABLE, nullptr);
pthread_cond_t cond;
pthread_cond_init(&cond, nullptr);
msg->ptr= new(mem_heap_alloc(msg->heap, sizeof(fts_msg_del_t)))
fts_msg_del_t{table, &cond};
add_msg(msg);
my_cond_wait(&cond, &fts_optimize_wq->mutex.m_mutex);
pthread_cond_destroy(&cond);
ut_ad(!table->fts->in_queue);
}
mysql_mutex_unlock(&fts_optimize_wq->mutex);
}
/** Send sync fts cache for the table.
@param[in] table table to sync */
void
fts_optimize_request_sync_table(
dict_table_t* table)
{
/* if the optimize system not yet initialized, return */
if (!fts_optimize_wq) {
return;
}
mysql_mutex_lock(&fts_optimize_wq->mutex);
/* FTS optimizer thread is already exited */
if (fts_opt_start_shutdown) {
ib::info() << "Try to sync table " << table->name
<< " after FTS optimize thread exiting.";
} else if (table->fts->sync_message) {
/* If the table already has SYNC message in
fts_optimize_wq queue then ignore it */
} else {
add_msg(fts_optimize_create_msg(FTS_MSG_SYNC_TABLE, table));
table->fts->sync_message = true;
DBUG_EXECUTE_IF("fts_optimize_wq_count_check",
DBUG_ASSERT(fts_optimize_wq->length <= 1000););
}
mysql_mutex_unlock(&fts_optimize_wq->mutex);
}
/** Add a table to fts_slots if it doesn't already exist. */
static bool fts_optimize_new_table(dict_table_t* table)
{
ut_ad(table);
ulint i;
fts_slot_t* slot;
fts_slot_t* empty = NULL;
/* Search for duplicates, also find a free slot if one exists. */
for (i = 0; i < ib_vector_size(fts_slots); ++i) {
slot = static_cast<fts_slot_t*>(ib_vector_get(fts_slots, i));
if (!slot->table) {
empty = slot;
} else if (slot->table == table) {
/* Already exists in our optimize queue. */
return false;
}
}
slot = empty ? empty : static_cast<fts_slot_t*>(
ib_vector_push(fts_slots, NULL));
memset(slot, 0x0, sizeof(*slot));
slot->table = table;
return true;
}
/** Remove a table from fts_slots if it exists.
@param remove table to be removed from fts_slots */
static bool fts_optimize_del_table(fts_msg_del_t *remove)
{
const dict_table_t* table = remove->table;
ut_ad(table);
for (ulint i = 0; i < ib_vector_size(fts_slots); ++i) {
fts_slot_t* slot;
slot = static_cast<fts_slot_t*>(ib_vector_get(fts_slots, i));
if (slot->table == table) {
if (UNIV_UNLIKELY(fts_enable_diag_print)) {
ib::info() << "FTS Optimize Removing table "
<< table->name;
}
mysql_mutex_lock(&fts_optimize_wq->mutex);
table->fts->in_queue = false;
pthread_cond_signal(remove->cond);
mysql_mutex_unlock(&fts_optimize_wq->mutex);
slot->table = NULL;
return true;
}
}
mysql_mutex_lock(&fts_optimize_wq->mutex);
pthread_cond_signal(remove->cond);
mysql_mutex_unlock(&fts_optimize_wq->mutex);
return false;
}
/**********************************************************************//**
Calculate how many tables in fts_slots need to be optimized.
@return no. of tables to optimize */
static ulint fts_optimize_how_many()
{
ulint n_tables = 0;
const time_t current_time = time(NULL);
for (ulint i = 0; i < ib_vector_size(fts_slots); ++i) {
const fts_slot_t* slot = static_cast<const fts_slot_t*>(
ib_vector_get_const(fts_slots, i));
if (!slot->table) {
continue;
}
const time_t end = slot->running
? slot->last_run : slot->completed;
ulint interval = ulint(current_time - end);
if (lint(interval) < 0
|| interval >= FTS_OPTIMIZE_INTERVAL_IN_SECS) {
++n_tables;
}
}
return(n_tables);
}
/**********************************************************************//**
Check if the total memory used by all FTS table exceeds the maximum limit.
@return true if a sync is needed, false otherwise */
static bool fts_is_sync_needed()
{
ulint total_memory = 0;
const time_t now = time(NULL);
double time_diff = difftime(now, last_check_sync_time);
if (fts_need_sync || (time_diff >= 0 && time_diff < 5)) {
return(false);
}
last_check_sync_time = now;
for (ulint i = 0; i < ib_vector_size(fts_slots); ++i) {
const fts_slot_t* slot = static_cast<const fts_slot_t*>(
ib_vector_get_const(fts_slots, i));
if (!slot->table) {
continue;
}
if (slot->table->fts && slot->table->fts->cache) {
total_memory += slot->table->fts->cache->total_size;
}
if (total_memory > fts_max_total_cache_size) {
return(true);
}
}
return(false);
}
/** Sync fts cache of a table
@param[in,out] table table to be synced
@param[in] process_message processing messages from fts_optimize_wq */
static void fts_optimize_sync_table(dict_table_t *table,
bool process_message= false)
{
MDL_ticket* mdl_ticket= nullptr;
dict_table_t *sync_table= dict_acquire_mdl_shared<true>(table, fts_opt_thd,
&mdl_ticket);
if (!sync_table)
return;
if (sync_table->fts && sync_table->fts->cache && sync_table->is_accessible())
{
fts_sync_table(sync_table, false);
if (process_message)
{
mysql_mutex_lock(&fts_optimize_wq->mutex);
sync_table->fts->sync_message = false;
mysql_mutex_unlock(&fts_optimize_wq->mutex);
}
}
DBUG_EXECUTE_IF("ib_optimize_wq_hang",
std::this_thread::sleep_for(std::chrono::seconds(6)););
if (mdl_ticket)
dict_table_close(sync_table, fts_opt_thd, mdl_ticket);
}
/**********************************************************************//**
Optimize all FTS tables.
@return Dummy return */
static void fts_optimize_callback(void *)
{
ut_ad(!srv_read_only_mode);
static ulint current;
static bool done;
static ulint n_optimize;
if (!fts_optimize_wq || done) {
/* Possibly timer initiated callback, can come after FTS_MSG_STOP.*/
return;
}
static ulint n_tables = ib_vector_size(fts_slots);
while (!done && srv_shutdown_state <= SRV_SHUTDOWN_INITIATED) {
/* If there is no message in the queue and we have tables
to optimize then optimize the tables. */
if (!done
&& ib_wqueue_is_empty(fts_optimize_wq)
&& n_tables > 0
&& n_optimize > 0) {
/* The queue is empty but we have tables
to optimize. */
if (UNIV_UNLIKELY(wsrep_sst_disable_writes)) {
retry_later:
if (fts_is_sync_needed()) {
fts_need_sync = true;
}
if (n_tables) {
timer->set_time(5000, 0);
}
return;
}
fts_slot_t* slot = static_cast<fts_slot_t*>(
ib_vector_get(fts_slots, current));
/* Handle the case of empty slots. */
if (slot->table) {
slot->running = true;
fts_optimize_table_bk(slot);
}
/* Wrap around the counter. */
if (++current >= ib_vector_size(fts_slots)) {
n_optimize = fts_optimize_how_many();
current = 0;
}
} else if (n_optimize == 0
|| !ib_wqueue_is_empty(fts_optimize_wq)) {
fts_msg_t* msg = static_cast<fts_msg_t*>
(ib_wqueue_nowait(fts_optimize_wq));
/* Timeout ? */
if (!msg) {
goto retry_later;
}
switch (msg->type) {
case FTS_MSG_STOP:
done = true;
break;
case FTS_MSG_ADD_TABLE:
ut_a(!done);
if (fts_optimize_new_table(
static_cast<dict_table_t*>(
msg->ptr))) {
++n_tables;
}
break;
case FTS_MSG_DEL_TABLE:
if (fts_optimize_del_table(
static_cast<fts_msg_del_t*>(
msg->ptr))) {
--n_tables;
}
break;
case FTS_MSG_SYNC_TABLE:
if (UNIV_UNLIKELY(wsrep_sst_disable_writes)) {
add_msg(msg);
goto retry_later;
}
DBUG_EXECUTE_IF(
"fts_instrument_msg_sync_sleep",
std::this_thread::sleep_for(
std::chrono::milliseconds(
300)););
fts_optimize_sync_table(
static_cast<dict_table_t*>(msg->ptr),
true);
break;
default:
ut_error;
}
mem_heap_free(msg->heap);
n_optimize = done ? 0 : fts_optimize_how_many();
}
}
/* Server is being shutdown, sync the data from FTS cache to disk
if needed */
if (n_tables > 0) {
for (ulint i = 0; i < ib_vector_size(fts_slots); i++) {
fts_slot_t* slot = static_cast<fts_slot_t*>(
ib_vector_get(fts_slots, i));
if (slot->table) {
fts_optimize_sync_table(slot->table);
}
}
}
ib_vector_free(fts_slots);
mysql_mutex_lock(&fts_optimize_wq->mutex);
fts_slots = NULL;
pthread_cond_broadcast(&fts_opt_shutdown_cond);
mysql_mutex_unlock(&fts_optimize_wq->mutex);
ib::info() << "FTS optimize thread exiting.";
}
/**********************************************************************//**
Startup the optimize thread and create the work queue. */
void
fts_optimize_init(void)
/*===================*/
{
mem_heap_t* heap;
ib_alloc_t* heap_alloc;
ut_ad(!srv_read_only_mode);
/* For now we only support one optimize thread. */
ut_a(!fts_optimize_wq);
/* Create FTS optimize work queue */
fts_optimize_wq = ib_wqueue_create();
timer = srv_thread_pool->create_timer(timer_callback);
/* Create FTS vector to store fts_slot_t */
heap = mem_heap_create(sizeof(dict_table_t*) * 64);
heap_alloc = ib_heap_allocator_create(heap);
fts_slots = ib_vector_create(heap_alloc, sizeof(fts_slot_t), 4);
fts_opt_thd = innobase_create_background_thd("InnoDB FTS optimizer");
/* Add fts tables to fts_slots which could be skipped
during dict_load_table_one() because fts_optimize_thread
wasn't even started. */
dict_sys.freeze(SRW_LOCK_CALL);
for (dict_table_t* table = UT_LIST_GET_FIRST(dict_sys.table_LRU);
table != NULL;
table = UT_LIST_GET_NEXT(table_LRU, table)) {
if (!table->fts || !dict_table_has_fts_index(table)) {
continue;
}
/* fts_optimize_thread is not started yet. So there is no
need to acquire fts_optimize_wq->mutex for adding the fts
table to the fts slots. */
ut_ad(!table->can_be_evicted);
fts_optimize_new_table(table);
table->fts->in_queue = true;
}
dict_sys.unfreeze();
pthread_cond_init(&fts_opt_shutdown_cond, nullptr);
last_check_sync_time = time(NULL);
}
/** Shutdown fts optimize thread. */
void
fts_optimize_shutdown()
{
ut_ad(!srv_read_only_mode);
/* If there is an ongoing activity on dictionary, such as
srv_master_evict_from_table_cache(), wait for it */
dict_sys.freeze(SRW_LOCK_CALL);
mysql_mutex_lock(&fts_optimize_wq->mutex);
/* Tells FTS optimizer system that we are exiting from
optimizer thread, message send their after will not be
processed */
fts_opt_start_shutdown = true;
dict_sys.unfreeze();
/* We tell the OPTIMIZE thread to switch to state done, we
can't delete the work queue here because the add thread needs
deregister the FTS tables. */
timer->disarm();
task_group.cancel_pending(&task);
add_msg(fts_optimize_create_msg(FTS_MSG_STOP, nullptr));
while (fts_slots) {
my_cond_wait(&fts_opt_shutdown_cond,
&fts_optimize_wq->mutex.m_mutex);
}
destroy_background_thd(fts_opt_thd);
fts_opt_thd = NULL;
pthread_cond_destroy(&fts_opt_shutdown_cond);
mysql_mutex_unlock(&fts_optimize_wq->mutex);
ib_wqueue_free(fts_optimize_wq);
fts_optimize_wq = NULL;
delete timer;
timer = NULL;
}
/** Sync the table during commit phase
@param[in] table table to be synced */
void fts_sync_during_ddl(dict_table_t* table)
{
if (!fts_optimize_wq)
return;
mysql_mutex_lock(&fts_optimize_wq->mutex);
const auto sync_message= table->fts->sync_message;
mysql_mutex_unlock(&fts_optimize_wq->mutex);
if (!sync_message)
return;
fts_sync_table(table, false);
mysql_mutex_lock(&fts_optimize_wq->mutex);
table->fts->sync_message = false;
mysql_mutex_unlock(&fts_optimize_wq->mutex);
}