Merge 10.4 into 10.5

This commit is contained in:
Marko Mäkelä 2022-03-07 09:26:50 +02:00
commit 2dce3bad9c
21 changed files with 175 additions and 154 deletions

View file

@ -131,6 +131,7 @@ connection default;
DROP DATABASE db1;
# Test 2: Primary index (implicit), should block writes.
CREATE TABLE t1(a INT NOT NULL, b INT NOT NULL) engine=innodb;
INSERT INTO t1 VALUES(1, 2);
SET DEBUG_SYNC= "alter_table_inplace_after_lock_downgrade SIGNAL manage WAIT_FOR query";
# Sending:
ALTER TABLE t1 ADD UNIQUE INDEX(a), LOCK=SHARED;
@ -139,15 +140,16 @@ SET DEBUG_SYNC= "now WAIT_FOR manage";
USE test;
SELECT * FROM t1;
a b
1 2
# Sending:
UPDATE t1 SET a=NULL;
UPDATE t1 SET a=3;
connection con2;
# Waiting for SELECT to be blocked by the metadata lock on t1
SET DEBUG_SYNC= "now SIGNAL query";
connection default;
# Reaping: ALTER TABLE t1 ADD UNIQUE INDEX(a)
connection con1;
# Reaping: UPDATE t1 SET a=NULL
# Reaping: UPDATE t1 SET a=3
# Test 3: Primary index (explicit), should block writes.
connection default;
ALTER TABLE t1 DROP INDEX a;
@ -158,15 +160,16 @@ connection con1;
SET DEBUG_SYNC= "now WAIT_FOR manage";
SELECT * FROM t1;
a b
3 2
# Sending:
UPDATE t1 SET a=NULL;
UPDATE t1 SET a=4;
connection con2;
# Waiting for SELECT to be blocked by the metadata lock on t1
SET DEBUG_SYNC= "now SIGNAL query";
connection default;
# Reaping: ALTER TABLE t1 ADD PRIMARY KEY (a)
connection con1;
# Reaping: UPDATE t1 SET a=NULL
# Reaping: UPDATE t1 SET a=4
# Test 4: Secondary unique index, should not block reads.
connection default;
SET DEBUG_SYNC= "alter_table_inplace_after_lock_downgrade SIGNAL manage WAIT_FOR query";
@ -176,6 +179,7 @@ connection con1;
SET DEBUG_SYNC= "now WAIT_FOR manage";
SELECT * FROM t1;
a b
4 2
SET DEBUG_SYNC= "now SIGNAL query";
connection default;
# Reaping: ALTER TABLE t1 ADD UNIQUE (b)

View file

@ -176,6 +176,7 @@ DROP DATABASE db1;
--echo # Test 2: Primary index (implicit), should block writes.
CREATE TABLE t1(a INT NOT NULL, b INT NOT NULL) engine=innodb;
INSERT INTO t1 VALUES(1, 2);
SET DEBUG_SYNC= "alter_table_inplace_after_lock_downgrade SIGNAL manage WAIT_FOR query";
--echo # Sending:
--send ALTER TABLE t1 ADD UNIQUE INDEX(a), LOCK=SHARED
@ -185,13 +186,13 @@ SET DEBUG_SYNC= "now WAIT_FOR manage";
USE test;
SELECT * FROM t1;
--echo # Sending:
--send UPDATE t1 SET a=NULL
--send UPDATE t1 SET a=3
connection con2;
--echo # Waiting for SELECT to be blocked by the metadata lock on t1
let $wait_condition= SELECT COUNT(*)= 1 FROM information_schema.processlist
WHERE state= 'Waiting for table metadata lock'
AND info='UPDATE t1 SET a=NULL';
AND info='UPDATE t1 SET a=3';
--source include/wait_condition.inc
SET DEBUG_SYNC= "now SIGNAL query";
@ -200,7 +201,7 @@ connection default;
--reap
connection con1;
--echo # Reaping: UPDATE t1 SET a=NULL
--echo # Reaping: UPDATE t1 SET a=3
--reap
--echo # Test 3: Primary index (explicit), should block writes.
@ -215,13 +216,13 @@ connection con1;
SET DEBUG_SYNC= "now WAIT_FOR manage";
SELECT * FROM t1;
--echo # Sending:
--send UPDATE t1 SET a=NULL
--send UPDATE t1 SET a=4
connection con2;
--echo # Waiting for SELECT to be blocked by the metadata lock on t1
let $wait_condition= SELECT COUNT(*)= 1 FROM information_schema.processlist
WHERE state= 'Waiting for table metadata lock'
AND info='UPDATE t1 SET a=NULL';
AND info='UPDATE t1 SET a=4';
--source include/wait_condition.inc
SET DEBUG_SYNC= "now SIGNAL query";
@ -230,7 +231,7 @@ connection default;
--reap
connection con1;
--echo # Reaping: UPDATE t1 SET a=NULL
--echo # Reaping: UPDATE t1 SET a=4
--reap
--echo # Test 4: Secondary unique index, should not block reads.

View file

@ -1,3 +1,4 @@
source have_federatedx.inc;
source include/have_binlog_format_row.inc;
source include/master-slave.inc;

View file

@ -11,6 +11,7 @@
##############################################################################
GAL-501 : MDEV-24645 galera_3nodes.GAL-501 MTR failed: failed to open gcomm backend connection: 110
galera_2_cluster : MDEV-22195 temporarily disabled due to issues to be fixed with MDEV-22195
galera_gtid_2_cluster : MDEV-23775 Galera test failure on galera_3nodes.galera_gtid_2_cluster
galera_ist_gcache_rollover : MDEV-23578 WSREP: exception caused by message: {v=0,t=1,ut=255,o=4,s=0,sr=0,as=1,f=6,src=50524cfe,srcvid=view_id(REG,50524cfe,4),insvid=view_id(UNKNOWN,00000000,0),ru=00000000,r=[-1,-1],fs=75,nl=(}
galera_load_data_ist : MDEV-24639 galera_3nodes.galera_load_data_ist MTR failed with SIGABRT: query 'reap' failed: 2013: Lost connection to MySQL server during query

View file

@ -462,12 +462,27 @@ INSERT INTO t1 SET a=0, i=REPEAT('1', 10000);
ROLLBACK;
set DEBUG_SYNC='now SIGNAL go';
connection default;
disconnect con1;
SELECT * FROM t1;
a b c d e f g h i
1 2 3 4 5 6 7 8 test
DROP TABLE t1;
SET DEBUG_SYNC=RESET;
#
# MDEV-27962 Instant DDL downgrades the MDL when table is empty
#
CREATE TABLE t1(f1 INT NOT NULL, f2 INT NOT NULL)ENGINE=InnoDB;
SET DEBUG_SYNC="alter_table_inplace_after_lock_downgrade SIGNAL try_insert WAIT_FOR alter_progress";
ALTER TABLE t1 ADD INDEX(f1), ADD INDEX(f2);
connection con1;
SET SESSION lock_wait_timeout=1;
SET DEBUG_SYNC="now WAIT_FOR try_insert";
INSERT INTO t1 VALUES(1, 2);
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
SET DEBUG_SYNC="now SIGNAL alter_progress";
disconnect con1;
connection default;
DROP TABLE t1;
SET DEBUG_SYNC=reset;
# End of 10.4 tests
#
# MDEV-22867 Assertion instant.n_core_fields == n_core_fields

View file

@ -25,3 +25,10 @@ f1
disconnect con1;
connection default;
drop table t1;
#
# MDEV-27993 Assertion failed in btr_page_reorganize_low()
#
CREATE TABLE t1(a INT PRIMARY KEY, b INT UNIQUE) ENGINE=InnoDB;
SET DEBUG_DBUG = '+d,do_page_reorganize';
INSERT INTO t1 VALUES(0,0);
DROP TABLE t1;

View file

@ -533,11 +533,28 @@ set DEBUG_SYNC='now SIGNAL go';
connection default;
reap;
disconnect con1;
SELECT * FROM t1;
DROP TABLE t1;
SET DEBUG_SYNC=RESET;
--echo #
--echo # MDEV-27962 Instant DDL downgrades the MDL when table is empty
--echo #
CREATE TABLE t1(f1 INT NOT NULL, f2 INT NOT NULL)ENGINE=InnoDB;
SET DEBUG_SYNC="alter_table_inplace_after_lock_downgrade SIGNAL try_insert WAIT_FOR alter_progress";
send ALTER TABLE t1 ADD INDEX(f1), ADD INDEX(f2);
connection con1;
SET SESSION lock_wait_timeout=1;
SET DEBUG_SYNC="now WAIT_FOR try_insert";
--error ER_LOCK_WAIT_TIMEOUT
INSERT INTO t1 VALUES(1, 2);
SET DEBUG_SYNC="now SIGNAL alter_progress";
disconnect con1;
connection default;
reap;
DROP TABLE t1;
SET DEBUG_SYNC=reset;
--echo # End of 10.4 tests
--echo #

View file

@ -53,4 +53,12 @@ connection default;
drop table t1;
--echo #
--echo # MDEV-27993 Assertion failed in btr_page_reorganize_low()
--echo #
CREATE TABLE t1(a INT PRIMARY KEY, b INT UNIQUE) ENGINE=InnoDB;
SET DEBUG_DBUG = '+d,do_page_reorganize';
INSERT INTO t1 VALUES(0,0);
DROP TABLE t1;
--source include/wait_until_count_sessions.inc

View file

@ -289,3 +289,32 @@ ENGINE=InnoDB;
ALTER TABLE t1 ADD c SERIAL;
DROP TABLE t1;
# End of 10.3 tests
#
# MDEV-27582 Fulltext DDL decrements the FTS_DOC_ID value
#
CREATE TABLE t1 (
f1 INT NOT NULL PRIMARY KEY,
f2 VARCHAR(64), FULLTEXT ft(f2)) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1,'foo'),(2,'bar');
connect con1,localhost,root,,,;
START TRANSACTION WITH CONSISTENT SNAPSHOT;
connection default;
DELETE FROM t1 WHERE f1 = 2;
ALTER TABLE t1 DROP INDEX ft;
ALTER TABLE t1 ADD FULLTEXT INDEX ft (f2);
INSERT INTO t1 VALUES (3, 'innodb fts search');
SET GLOBAL innodb_optimize_fulltext_only=ON;
OPTIMIZE TABLE t1;
Table Op Msg_type Msg_text
test.t1 optimize status OK
SET GLOBAL innodb_ft_aux_table = 'test/t1';
SELECT max(DOC_ID) FROM INFORMATION_SCHEMA.INNODB_FT_INDEX_TABLE;
max(DOC_ID)
3
SELECT * FROM t1 WHERE MATCH(f2) AGAINST("+innodb +search" IN BOOLEAN MODE);
f1 f2
3 innodb fts search
DROP TABLE t1;
disconnect con1;
SET GLOBAL innodb_optimize_fulltext_only=OFF;
SET GLOBAL innodb_ft_aux_table = default;

View file

@ -1 +1,2 @@
--enable-plugin-innodb-sys-tables
--innodb_ft_index_table

View file

@ -357,3 +357,28 @@ ALTER TABLE t1 ADD c SERIAL;
DROP TABLE t1;
--echo # End of 10.3 tests
--echo #
--echo # MDEV-27582 Fulltext DDL decrements the FTS_DOC_ID value
--echo #
CREATE TABLE t1 (
f1 INT NOT NULL PRIMARY KEY,
f2 VARCHAR(64), FULLTEXT ft(f2)) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1,'foo'),(2,'bar');
connect(con1,localhost,root,,,);
START TRANSACTION WITH CONSISTENT SNAPSHOT;
connection default;
DELETE FROM t1 WHERE f1 = 2;
ALTER TABLE t1 DROP INDEX ft;
ALTER TABLE t1 ADD FULLTEXT INDEX ft (f2);
INSERT INTO t1 VALUES (3, 'innodb fts search');
SET GLOBAL innodb_optimize_fulltext_only=ON;
OPTIMIZE TABLE t1;
SET GLOBAL innodb_ft_aux_table = 'test/t1';
SELECT max(DOC_ID) FROM INFORMATION_SCHEMA.INNODB_FT_INDEX_TABLE;
SELECT * FROM t1 WHERE MATCH(f2) AGAINST("+innodb +search" IN BOOLEAN MODE);
DROP TABLE t1;
disconnect con1;
SET GLOBAL innodb_optimize_fulltext_only=OFF;
SET GLOBAL innodb_ft_aux_table = default;

View file

@ -512,10 +512,11 @@ then
args="$args --user=$user"
fi
if test -n "$group"
then
args="$args --group=$group"
fi
#To be enabled if/when we enable --group as an option to mysqld
#if test -n "$group"
#then
# args="$args --group=$group"
#fi
if test -f "$ldata/mysql/user.frm"
then

View file

@ -703,6 +703,7 @@ then
if test "$user" != "root" -o $SET_USER = 1
then
USER_OPTION="--user=$user"
# To be used if/when we enable --system-group as an option to mysqld
GROUP_OPTION="--group=$group"
fi
if test -n "$open_files"

View file

@ -3484,6 +3484,7 @@ udf_handler::fix_fields(THD *thd, Item_func_or_sum *func,
thd->alloc(f_args.arg_count*sizeof(Item_result))))
{
err_exit:
free_udf(u_d);
DBUG_RETURN(TRUE);
}
@ -3523,7 +3524,8 @@ udf_handler::fix_fields(THD *thd, Item_func_or_sum *func,
func->used_tables_and_const_cache_join(item);
f_args.arg_type[i]=item->result_type();
}
if (!(buffers=new (thd->mem_root) String[arg_count]) ||
buffers=new (thd->mem_root) String[arg_count];
if (!buffers ||
!multi_alloc_root(thd->mem_root,
&f_args.args, arg_count * sizeof(char *),
&f_args.lengths, arg_count * sizeof(long),
@ -3532,10 +3534,7 @@ udf_handler::fix_fields(THD *thd, Item_func_or_sum *func,
&f_args.attributes, arg_count * sizeof(char *),
&f_args.attribute_lengths, arg_count * sizeof(long),
NullS))
{
free_udf(u_d);
DBUG_RETURN(TRUE);
}
goto err_exit;
}
if (func->fix_length_and_dec())
DBUG_RETURN(TRUE);
@ -3603,8 +3602,7 @@ udf_handler::fix_fields(THD *thd, Item_func_or_sum *func,
{
my_error(ER_CANT_INITIALIZE_UDF, MYF(0),
u_d->name.str, init_msg_buff);
free_udf(u_d);
DBUG_RETURN(TRUE);
goto err_exit;
}
func->max_length=MY_MIN(initid.max_length,MAX_BLOB_WIDTH);
func->maybe_null=initid.maybe_null;

View file

@ -8012,16 +8012,15 @@ static bool mysql_inplace_alter_table(THD *thd,
lock for prepare phase under LOCK TABLES in the same way as when
exclusive lock is required for duration of the whole statement.
*/
if (!ha_alter_info->mdl_exclusive_after_prepare &&
(inplace_supported == HA_ALTER_INPLACE_EXCLUSIVE_LOCK ||
((inplace_supported == HA_ALTER_INPLACE_COPY_NO_LOCK ||
if (inplace_supported == HA_ALTER_INPLACE_EXCLUSIVE_LOCK ||
((inplace_supported == HA_ALTER_INPLACE_COPY_NO_LOCK ||
inplace_supported == HA_ALTER_INPLACE_COPY_LOCK ||
inplace_supported == HA_ALTER_INPLACE_NOCOPY_NO_LOCK ||
inplace_supported == HA_ALTER_INPLACE_NOCOPY_LOCK ||
inplace_supported == HA_ALTER_INPLACE_INSTANT) &&
(thd->locked_tables_mode == LTM_LOCK_TABLES ||
thd->locked_tables_mode == LTM_PRELOCKED_UNDER_LOCK_TABLES)) ||
alter_info->requested_lock == Alter_info::ALTER_TABLE_LOCK_EXCLUSIVE))
alter_info->requested_lock == Alter_info::ALTER_TABLE_LOCK_EXCLUSIVE)
{
if (wait_while_table_is_used(thd, table, HA_EXTRA_FORCE_REOPEN))
goto cleanup;
@ -8116,7 +8115,8 @@ static bool mysql_inplace_alter_table(THD *thd,
necessary only for prepare phase (unless we are not under LOCK TABLES) and
user has not explicitly requested exclusive lock.
*/
if ((inplace_supported == HA_ALTER_INPLACE_COPY_NO_LOCK ||
if (!ha_alter_info->mdl_exclusive_after_prepare &&
(inplace_supported == HA_ALTER_INPLACE_COPY_NO_LOCK ||
inplace_supported == HA_ALTER_INPLACE_COPY_LOCK ||
inplace_supported == HA_ALTER_INPLACE_NOCOPY_LOCK ||
inplace_supported == HA_ALTER_INPLACE_NOCOPY_NO_LOCK) &&

View file

@ -3,7 +3,7 @@
Copyright (c) 1994, 2019, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, Google Inc.
Copyright (c) 2012, Facebook Inc.
Copyright (c) 2015, 2021, MariaDB Corporation.
Copyright (c) 2015, 2022, MariaDB Corporation.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@ -3520,6 +3520,7 @@ fail_err:
<< ib::hex(thr ? thr->graph->trx->id : 0)
<< ' ' << rec_printer(entry).str());
DBUG_EXECUTE_IF("do_page_reorganize",
if (n_recs)
btr_page_reorganize(page_cursor, index, mtr););
/* Now, try the insert */

View file

@ -231,18 +231,6 @@ fts_add_doc_by_id(
/*==============*/
fts_trx_table_t*ftt, /*!< in: FTS trx table */
doc_id_t doc_id); /*!< in: doc id */
/******************************************************************//**
Update the last document id. This function could create a new
transaction to update the last document id.
@return DB_SUCCESS if OK */
static
dberr_t
fts_update_sync_doc_id(
/*===================*/
const dict_table_t* table, /*!< in: table */
doc_id_t doc_id, /*!< in: last document id */
trx_t* trx) /*!< in: update trx, or NULL */
MY_ATTRIBUTE((nonnull(1)));
/** Tokenize a document.
@param[in,out] doc document to tokenize
@ -2527,27 +2515,6 @@ fts_get_max_cache_size(
}
#endif
/*********************************************************************//**
Update the next and last Doc ID in the CONFIG table to be the input
"doc_id" value (+ 1). We would do so after each FTS index build or
table truncate */
void
fts_update_next_doc_id(
/*===================*/
trx_t* trx, /*!< in/out: transaction */
const dict_table_t* table, /*!< in: table */
doc_id_t doc_id) /*!< in: DOC ID to set */
{
table->fts->cache->synced_doc_id = doc_id;
table->fts->cache->next_doc_id = doc_id + 1;
table->fts->cache->first_doc_id = table->fts->cache->next_doc_id;
fts_update_sync_doc_id(
table, table->fts->cache->synced_doc_id, trx);
}
/*********************************************************************//**
Get the next available document id.
@return DB_SUCCESS if OK */
@ -2706,17 +2673,17 @@ func_exit:
return(error);
}
/*********************************************************************//**
Update the last document id. This function could create a new
/** Update the last document id. This function could create a new
transaction to update the last document id.
@return DB_SUCCESS if OK */
static
@param table table to be updated
@param doc_id last document id
@param trx update trx or null
@retval DB_SUCCESS if OK */
dberr_t
fts_update_sync_doc_id(
/*===================*/
const dict_table_t* table, /*!< in: table */
doc_id_t doc_id, /*!< in: last document id */
trx_t* trx) /*!< in: update trx, or NULL */
const dict_table_t* table,
doc_id_t doc_id,
trx_t* trx)
{
byte id[FTS_MAX_ID_LEN];
pars_info_t* info;

View file

@ -2323,63 +2323,6 @@ overflow:
return(~(ulonglong) 0);
}
/********************************************************************//**
Reset the autoinc value in the table.
@return DB_SUCCESS if all went well else error code */
UNIV_INTERN
dberr_t
ha_innobase::innobase_reset_autoinc(
/*================================*/
ulonglong autoinc) /*!< in: value to store */
{
dberr_t error;
error = innobase_lock_autoinc();
if (error == DB_SUCCESS) {
dict_table_autoinc_initialize(m_prebuilt->table, autoinc);
m_prebuilt->table->autoinc_mutex.unlock();
}
return(error);
}
/*******************************************************************//**
Reset the auto-increment counter to the given value, i.e. the next row
inserted will get the given value. This is called e.g. after TRUNCATE
is emulated by doing a 'DELETE FROM t'. HA_ERR_WRONG_COMMAND is
returned by storage engines that don't support this operation.
@return 0 or error code */
UNIV_INTERN
int
ha_innobase::reset_auto_increment(
/*==============================*/
ulonglong value) /*!< in: new value for table autoinc */
{
DBUG_ENTER("ha_innobase::reset_auto_increment");
dberr_t error;
update_thd(ha_thd());
error = row_lock_table_autoinc_for_mysql(m_prebuilt);
if (error != DB_SUCCESS) {
DBUG_RETURN(convert_error_code_to_mysql(
error, m_prebuilt->table->flags, m_user_thd));
}
/* The next value can never be 0. */
if (value == 0) {
value = 1;
}
innobase_reset_autoinc(value);
DBUG_RETURN(0);
}
/*********************************************************************//**
Initializes some fields in an InnoDB transaction object. */
static
@ -8614,16 +8557,6 @@ ha_innobase::delete_row(
error, m_prebuilt->table->flags, m_user_thd));
}
/** Delete all rows from the table.
@return error number or 0 */
int
ha_innobase::delete_all_rows()
{
DBUG_ENTER("ha_innobase::delete_all_rows");
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
/**********************************************************************//**
Removes a new lock set on a row, if it was not read optimistically. This can
be called after a row has been read in the processing of an UPDATE or a DELETE

View file

@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2000, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2013, 2021, MariaDB Corporation.
Copyright (c) 2013, 2022, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@ -105,8 +105,6 @@ public:
double read_time(uint index, uint ranges, ha_rows rows) override;
int delete_all_rows() override;
int write_row(const uchar * buf) override;
int update_row(const uchar * old_data, const uchar * new_data) override;
@ -242,7 +240,6 @@ public:
ulonglong nb_desired_values,
ulonglong* first_value,
ulonglong* nb_reserved_values) override;
int reset_auto_increment(ulonglong value) override;
bool get_error_message(int error, String *buf) override;
@ -450,7 +447,6 @@ protected:
dberr_t innobase_lock_autoinc();
ulonglong innobase_peek_autoinc();
dberr_t innobase_set_max_autoinc(ulonglong auto_inc);
dberr_t innobase_reset_autoinc(ulonglong auto_inc);
/** Resets a query execution 'template'.
@see build_template() */

View file

@ -405,17 +405,6 @@ fts_get_next_doc_id(
/*================*/
const dict_table_t* table, /*!< in: table */
doc_id_t* doc_id);/*!< out: new document id */
/*********************************************************************//**
Update the next and last Doc ID in the CONFIG table to be the input
"doc_id" value (+ 1). We would do so after each FTS index build or
table truncate */
void
fts_update_next_doc_id(
/*===================*/
trx_t* trx, /*!< in/out: transaction */
const dict_table_t* table, /*!< in: table */
doc_id_t doc_id) /*!< in: DOC ID to set */
MY_ATTRIBUTE((nonnull(2)));
/******************************************************************//**
Create a new fts_doc_ids_t.
@ -967,6 +956,18 @@ bool fts_check_aux_table(const char *name,
table_id_t *table_id,
index_id_t *index_id);
/** Update the last document id. This function could create a new
transaction to update the last document id.
@param table table to be updated
@param doc_id last document id
@param trx update trx or null
@retval DB_SUCCESS if OK */
dberr_t
fts_update_sync_doc_id(const dict_table_t *table,
doc_id_t doc_id,
trx_t *trx)
MY_ATTRIBUTE((nonnull(1)));
/** Sync the table during commit phase
@param[in] table table to be synced */
void fts_sync_during_ddl(dict_table_t* table);

View file

@ -2817,7 +2817,21 @@ wait_again:
err = fts_sync_table(const_cast<dict_table_t*>(new_table));
if (err == DB_SUCCESS) {
fts_update_next_doc_id(NULL, new_table, max_doc_id);
new_table->fts->cache->synced_doc_id = max_doc_id;
/* Update the max value as next FTS_DOC_ID */
if (max_doc_id >= new_table->fts->cache->next_doc_id) {
new_table->fts->cache->next_doc_id =
max_doc_id + 1;
}
new_table->fts->cache->first_doc_id =
new_table->fts->cache->next_doc_id;
err= fts_update_sync_doc_id(
new_table,
new_table->fts->cache->synced_doc_id,
NULL);
}
}