mirror of
https://github.com/MariaDB/server.git
synced 2026-04-23 16:55:31 +02:00
Fix Bug #54330 Broken fast index creation. Add additional array to
account for each merge run's start offset, so correct offsets are paired up for multiple merge runs. rb://377 approved by Marko
This commit is contained in:
parent
8e7ce2e9d8
commit
3c0be2c7b3
2 changed files with 61 additions and 31 deletions
|
|
@ -1578,22 +1578,28 @@ row_merge(
|
|||
const dict_index_t* index, /*!< in: index being created */
|
||||
merge_file_t* file, /*!< in/out: file containing
|
||||
index entries */
|
||||
ulint* half, /*!< in/out: half the file */
|
||||
row_merge_block_t* block, /*!< in/out: 3 buffers */
|
||||
int* tmpfd, /*!< in/out: temporary file handle */
|
||||
TABLE* table) /*!< in/out: MySQL table, for
|
||||
TABLE* table, /*!< in/out: MySQL table, for
|
||||
reporting erroneous key value
|
||||
if applicable */
|
||||
ulint* num_run,/*!< in/out: Number of runs remain
|
||||
to be merged */
|
||||
ulint* run_offset) /*!< in/out: Array contains the
|
||||
first offset number for each merge
|
||||
run */
|
||||
{
|
||||
ulint foffs0; /*!< first input offset */
|
||||
ulint foffs1; /*!< second input offset */
|
||||
ulint error; /*!< error code */
|
||||
merge_file_t of; /*!< output file */
|
||||
const ulint ihalf = *half;
|
||||
const ulint ihalf = run_offset[*num_run / 2];
|
||||
/*!< half the input file */
|
||||
ulint ohalf; /*!< half the output file */
|
||||
ulint n_run = 0;
|
||||
/*!< num of runs generated from this merge */
|
||||
|
||||
UNIV_MEM_ASSERT_W(block[0], 3 * sizeof block[0]);
|
||||
|
||||
ut_ad(ihalf < file->offset);
|
||||
|
||||
of.fd = *tmpfd;
|
||||
|
|
@ -1601,17 +1607,20 @@ row_merge(
|
|||
of.n_rec = 0;
|
||||
|
||||
/* Merge blocks to the output file. */
|
||||
ohalf = 0;
|
||||
foffs0 = 0;
|
||||
foffs1 = ihalf;
|
||||
|
||||
UNIV_MEM_INVALID(run_offset, *num_run * sizeof *run_offset);
|
||||
|
||||
for (; foffs0 < ihalf && foffs1 < file->offset; foffs0++, foffs1++) {
|
||||
ulint ahalf; /*!< arithmetic half the input file */
|
||||
|
||||
if (UNIV_UNLIKELY(trx_is_interrupted(trx))) {
|
||||
return(DB_INTERRUPTED);
|
||||
}
|
||||
|
||||
/* Remember the offset number for this run */
|
||||
run_offset[n_run++] = of.offset;
|
||||
|
||||
error = row_merge_blocks(index, file, block,
|
||||
&foffs0, &foffs1, &of, table);
|
||||
|
||||
|
|
@ -1619,21 +1628,6 @@ row_merge(
|
|||
return(error);
|
||||
}
|
||||
|
||||
/* Record the offset of the output file when
|
||||
approximately half the output has been generated. In
|
||||
this way, the next invocation of row_merge() will
|
||||
spend most of the time in this loop. The initial
|
||||
estimate is ohalf==0. */
|
||||
ahalf = file->offset / 2;
|
||||
ut_ad(ohalf <= of.offset);
|
||||
|
||||
/* Improve the estimate until reaching half the input
|
||||
file size, or we can not get any closer to it. All
|
||||
comparands should be non-negative when !(ohalf < ahalf)
|
||||
because ohalf <= of.offset. */
|
||||
if (ohalf < ahalf || of.offset - ahalf < ohalf - ahalf) {
|
||||
ohalf = of.offset;
|
||||
}
|
||||
}
|
||||
|
||||
/* Copy the last blocks, if there are any. */
|
||||
|
|
@ -1643,6 +1637,9 @@ row_merge(
|
|||
return(DB_INTERRUPTED);
|
||||
}
|
||||
|
||||
/* Remember the offset number for this run */
|
||||
run_offset[n_run++] = of.offset;
|
||||
|
||||
if (!row_merge_blocks_copy(index, file, block, &foffs0, &of)) {
|
||||
return(DB_CORRUPTION);
|
||||
}
|
||||
|
|
@ -1655,6 +1652,9 @@ row_merge(
|
|||
return(DB_INTERRUPTED);
|
||||
}
|
||||
|
||||
/* Remember the offset number for this run */
|
||||
run_offset[n_run++] = of.offset;
|
||||
|
||||
if (!row_merge_blocks_copy(index, file, block, &foffs1, &of)) {
|
||||
return(DB_CORRUPTION);
|
||||
}
|
||||
|
|
@ -1666,10 +1666,23 @@ row_merge(
|
|||
return(DB_CORRUPTION);
|
||||
}
|
||||
|
||||
ut_ad(n_run < *num_run);
|
||||
|
||||
*num_run = n_run;
|
||||
|
||||
/* Each run can contain one or more offsets. As merge goes on,
|
||||
the number of runs (to merge) will reduce until we have one
|
||||
single run. So the number of runs will always be smaller than
|
||||
the number of offsets in file */
|
||||
ut_ad((*num_run) <= file->offset);
|
||||
|
||||
/* The number of offsets in output file is always equal or
|
||||
smaller than input file */
|
||||
ut_ad(of.offset <= file->offset);
|
||||
|
||||
/* Swap file descriptors for the next pass. */
|
||||
*tmpfd = file->fd;
|
||||
*file = of;
|
||||
*half = ohalf;
|
||||
|
||||
UNIV_MEM_INVALID(block[0], 3 * sizeof block[0]);
|
||||
|
||||
|
|
@ -1694,27 +1707,39 @@ row_merge_sort(
|
|||
if applicable */
|
||||
{
|
||||
ulint half = file->offset / 2;
|
||||
ulint num_runs;
|
||||
ulint* run_offset;
|
||||
ulint error = DB_SUCCESS;
|
||||
|
||||
/* Record the number of merge runs we need to perform */
|
||||
num_runs = file->offset;
|
||||
|
||||
/* "run_offset" records each run's first offset number */
|
||||
run_offset = (ulint*) mem_alloc(file->offset * sizeof(ulint));
|
||||
|
||||
/* This tells row_merge() where to start for the first round
|
||||
of merge. */
|
||||
run_offset[half] = half;
|
||||
|
||||
/* The file should always contain at least one byte (the end
|
||||
of file marker). Thus, it must be at least one block. */
|
||||
ut_ad(file->offset > 0);
|
||||
|
||||
/* Merge the runs until we have one big run */
|
||||
do {
|
||||
ulint error;
|
||||
error = row_merge(trx, index, file, block, tmpfd,
|
||||
table, &num_runs, run_offset);
|
||||
|
||||
error = row_merge(trx, index, file, &half,
|
||||
block, tmpfd, table);
|
||||
UNIV_MEM_ASSERT_RW(run_offset, num_runs * sizeof *run_offset);
|
||||
|
||||
if (error != DB_SUCCESS) {
|
||||
return(error);
|
||||
break;
|
||||
}
|
||||
} while (num_runs > 1);
|
||||
|
||||
/* half > 0 should hold except when the file consists
|
||||
of one block. No need to merge further then. */
|
||||
ut_ad(half > 0 || file->offset == 1);
|
||||
} while (half < file->offset && half > 0);
|
||||
mem_free(run_offset);
|
||||
|
||||
return(DB_SUCCESS);
|
||||
return(error);
|
||||
}
|
||||
|
||||
/*************************************************************//**
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue