mirror of
https://github.com/MariaDB/server.git
synced 2025-01-15 19:42:28 +01:00
Merge branch '10.3' into 10.4
This commit is contained in:
commit
a977054ee0
61 changed files with 1743 additions and 293 deletions
|
@ -3596,7 +3596,10 @@ print_table_data(MYSQL_RES *result)
|
|||
{
|
||||
print_field_types(result);
|
||||
if (!mysql_num_rows(result))
|
||||
{
|
||||
my_afree((uchar*) num_flag);
|
||||
return;
|
||||
}
|
||||
mysql_field_seek(result,0);
|
||||
}
|
||||
separator.copy("+",1,charset_info);
|
||||
|
|
|
@ -569,14 +569,14 @@ static int file_exists(char * filename)
|
|||
@retval int error = 1, success = 0
|
||||
*/
|
||||
|
||||
static int search_dir(const char * base_path, const char *tool_name,
|
||||
static int search_dir(const char *base_path, const char *tool_name,
|
||||
const char *subdir, char *tool_path)
|
||||
{
|
||||
char new_path[FN_REFLEN];
|
||||
char source_path[FN_REFLEN];
|
||||
|
||||
strcpy(source_path, base_path);
|
||||
strcat(source_path, subdir);
|
||||
safe_strcpy(source_path, sizeof(source_path), base_path);
|
||||
safe_strcat(source_path, sizeof(source_path), subdir);
|
||||
fn_format(new_path, tool_name, source_path, "", MY_UNPACK_FILENAME);
|
||||
if (file_exists(new_path))
|
||||
{
|
||||
|
@ -632,7 +632,7 @@ static int load_plugin_data(char *plugin_name, char *config_file)
|
|||
FILE *file_ptr;
|
||||
char path[FN_REFLEN];
|
||||
char line[1024];
|
||||
char *reason= 0;
|
||||
const char *reason= 0;
|
||||
char *res;
|
||||
int i= -1;
|
||||
|
||||
|
@ -643,14 +643,14 @@ static int load_plugin_data(char *plugin_name, char *config_file)
|
|||
}
|
||||
if (!file_exists(opt_plugin_ini))
|
||||
{
|
||||
reason= (char *)"File does not exist.";
|
||||
reason= "File does not exist.";
|
||||
goto error;
|
||||
}
|
||||
|
||||
file_ptr= fopen(opt_plugin_ini, "r");
|
||||
if (file_ptr == NULL)
|
||||
{
|
||||
reason= (char *)"Cannot open file.";
|
||||
reason= "Cannot open file.";
|
||||
goto error;
|
||||
}
|
||||
|
||||
|
@ -660,17 +660,20 @@ static int load_plugin_data(char *plugin_name, char *config_file)
|
|||
/* Read plugin components */
|
||||
while (i < 16)
|
||||
{
|
||||
size_t line_len;
|
||||
|
||||
res= fgets(line, sizeof(line), file_ptr);
|
||||
line_len= strlen(line);
|
||||
|
||||
/* strip /n */
|
||||
if (line[strlen(line)-1] == '\n')
|
||||
{
|
||||
line[strlen(line)-1]= '\0';
|
||||
}
|
||||
if (line[line_len - 1] == '\n')
|
||||
line[line_len - 1]= '\0';
|
||||
|
||||
if (res == NULL)
|
||||
{
|
||||
if (i < 1)
|
||||
{
|
||||
reason= (char *)"Bad format in plugin configuration file.";
|
||||
reason= "Bad format in plugin configuration file.";
|
||||
fclose(file_ptr);
|
||||
goto error;
|
||||
}
|
||||
|
@ -683,14 +686,19 @@ static int load_plugin_data(char *plugin_name, char *config_file)
|
|||
if (i == -1) /* if first pass, read this line as so_name */
|
||||
{
|
||||
/* Add proper file extension for soname */
|
||||
strcat(line, FN_SOEXT);
|
||||
if (safe_strcpy(line + line_len - 1, sizeof(line), FN_SOEXT))
|
||||
{
|
||||
reason= "Plugin name too long.";
|
||||
fclose(file_ptr);
|
||||
goto error;
|
||||
}
|
||||
/* save so_name */
|
||||
plugin_data.so_name= my_strdup(line, MYF(MY_WME|MY_ZEROFILL));
|
||||
i++;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (strlen(line) > 0)
|
||||
if (line_len > 0)
|
||||
{
|
||||
plugin_data.components[i]= my_strdup(line, MYF(MY_WME));
|
||||
i++;
|
||||
|
@ -779,14 +787,13 @@ static int check_options(int argc, char **argv, char *operation)
|
|||
/* read the plugin config file and check for match against argument */
|
||||
else
|
||||
{
|
||||
if (strlen(argv[i]) + 4 + 1 > FN_REFLEN)
|
||||
if (safe_strcpy(plugin_name, sizeof(plugin_name), argv[i]) ||
|
||||
safe_strcpy(config_file, sizeof(config_file), argv[i]) ||
|
||||
safe_strcat(config_file, sizeof(config_file), ".ini"))
|
||||
{
|
||||
fprintf(stderr, "ERROR: argument is too long.\n");
|
||||
return 1;
|
||||
}
|
||||
strcpy(plugin_name, argv[i]);
|
||||
strcpy(config_file, argv[i]);
|
||||
strcat(config_file, ".ini");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -855,35 +862,30 @@ static int check_options(int argc, char **argv, char *operation)
|
|||
static int process_options(int argc, char *argv[], char *operation)
|
||||
{
|
||||
int error= 0;
|
||||
int i= 0;
|
||||
|
||||
/* Parse and execute command-line options */
|
||||
if ((error= handle_options(&argc, &argv, my_long_options, get_one_option)))
|
||||
goto exit;
|
||||
return error;
|
||||
|
||||
/* If the print defaults option used, exit. */
|
||||
if (opt_print_defaults)
|
||||
{
|
||||
error= -1;
|
||||
goto exit;
|
||||
}
|
||||
return -1;
|
||||
|
||||
/* Add a trailing directory separator if not present */
|
||||
if (opt_basedir)
|
||||
{
|
||||
i= (int)strlength(opt_basedir);
|
||||
if (opt_basedir[i-1] != FN_LIBCHAR || opt_basedir[i-1] != FN_LIBCHAR2)
|
||||
size_t basedir_len= strlength(opt_basedir);
|
||||
if (opt_basedir[basedir_len - 1] != FN_LIBCHAR ||
|
||||
opt_basedir[basedir_len - 1] != FN_LIBCHAR2)
|
||||
{
|
||||
char buff[FN_REFLEN];
|
||||
memset(buff, 0, sizeof(buff));
|
||||
if (basedir_len + 2 > FN_REFLEN)
|
||||
return -1;
|
||||
|
||||
strncpy(buff, opt_basedir, sizeof(buff) - 1);
|
||||
#ifdef __WIN__
|
||||
strncat(buff, "/", sizeof(buff) - strlen(buff) - 1);
|
||||
#else
|
||||
strncat(buff, FN_DIRSEP, sizeof(buff) - strlen(buff) - 1);
|
||||
#endif
|
||||
buff[sizeof(buff) - 1]= 0;
|
||||
memcpy(buff, opt_basedir, basedir_len);
|
||||
buff[basedir_len]= '/';
|
||||
buff[basedir_len + 1]= '\0';
|
||||
|
||||
my_free(opt_basedir);
|
||||
opt_basedir= my_strdup(buff, MYF(MY_FAE));
|
||||
}
|
||||
|
@ -895,10 +897,7 @@ static int process_options(int argc, char *argv[], char *operation)
|
|||
generated when the defaults were read from the file, exit.
|
||||
*/
|
||||
if (!opt_no_defaults && ((error= get_default_values())))
|
||||
{
|
||||
error= -1;
|
||||
goto exit;
|
||||
}
|
||||
return -1;
|
||||
|
||||
/*
|
||||
Check to ensure required options are present and validate the operation.
|
||||
|
@ -906,11 +905,9 @@ static int process_options(int argc, char *argv[], char *operation)
|
|||
read a configuration file named <plugin_name>.ini from the --plugin-dir
|
||||
or --plugin-ini location if the --plugin-ini option presented.
|
||||
*/
|
||||
strcpy(operation, "");
|
||||
if ((error = check_options(argc, argv, operation)))
|
||||
{
|
||||
goto exit;
|
||||
}
|
||||
operation[0]= '\0';
|
||||
if ((error= check_options(argc, argv, operation)))
|
||||
return error;
|
||||
|
||||
if (opt_verbose)
|
||||
{
|
||||
|
@ -922,8 +919,7 @@ static int process_options(int argc, char *argv[], char *operation)
|
|||
printf("# lc_messages_dir = %s\n", opt_lc_messages_dir);
|
||||
}
|
||||
|
||||
exit:
|
||||
return error;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -2468,7 +2468,7 @@ static uint dump_events_for_db(char *db)
|
|||
if (mysql_query_with_error_report(mysql, &event_list_res, "show events"))
|
||||
DBUG_RETURN(0);
|
||||
|
||||
strcpy(delimiter, ";");
|
||||
safe_strcpy(delimiter, sizeof(delimiter), ";");
|
||||
if (mysql_num_rows(event_list_res) > 0)
|
||||
{
|
||||
if (opt_xml)
|
||||
|
|
|
@ -6143,7 +6143,9 @@ int do_done(struct st_command *command)
|
|||
if (*cur_block->delim)
|
||||
{
|
||||
/* Restore "old" delimiter after false if block */
|
||||
strcpy (delimiter, cur_block->delim);
|
||||
if (safe_strcpy(delimiter, sizeof(delimiter), cur_block->delim))
|
||||
die("Delimiter too long, truncated");
|
||||
|
||||
delimiter_length= strlen(delimiter);
|
||||
}
|
||||
/* Pop block from stack, goto next line */
|
||||
|
@ -6398,10 +6400,12 @@ void do_block(enum block_cmd cmd, struct st_command* command)
|
|||
if (cur_block->ok)
|
||||
{
|
||||
cur_block->delim[0]= '\0';
|
||||
} else
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Remember "old" delimiter if entering a false if block */
|
||||
strcpy (cur_block->delim, delimiter);
|
||||
if (safe_strcpy(cur_block->delim, sizeof(cur_block->delim), delimiter))
|
||||
die("Delimiter too long, truncated");
|
||||
}
|
||||
|
||||
DBUG_PRINT("info", ("OK: %d", cur_block->ok));
|
||||
|
@ -11713,9 +11717,8 @@ static int setenv(const char *name, const char *value, int overwrite)
|
|||
char *envvar= (char *)malloc(buflen);
|
||||
if(!envvar)
|
||||
return ENOMEM;
|
||||
strcpy(envvar, name);
|
||||
strcat(envvar, "=");
|
||||
strcat(envvar, value);
|
||||
|
||||
snprintf(envvar, buflen, "%s=%s", name, value);
|
||||
putenv(envvar);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -181,7 +181,7 @@ IF(WIN32)
|
|||
MARK_AS_ADVANCED(SIGNCODE)
|
||||
IF(SIGNCODE)
|
||||
SET(SIGNTOOL_PARAMETERS
|
||||
/a /t http://timestamp.globalsign.com/?signature=sha2
|
||||
/a /fd SHA256 /t http://timestamp.globalsign.com/?signature=sha2
|
||||
CACHE STRING "parameters for signtool (list)")
|
||||
IF(NOT SIGNTOOL_EXECUTABLE)
|
||||
FILE(GLOB path_list
|
||||
|
|
|
@ -508,7 +508,7 @@ static int DbugParse(CODE_STATE *cs, const char *control)
|
|||
stack->delay= stack->next->delay;
|
||||
stack->maxdepth= stack->next->maxdepth;
|
||||
stack->sub_level= stack->next->sub_level;
|
||||
strcpy(stack->name, stack->next->name);
|
||||
safe_strcpy(stack->name, sizeof(stack->name), stack->next->name);
|
||||
stack->out_file= stack->next->out_file;
|
||||
stack->out_file->used++;
|
||||
if (stack->next == &init_settings)
|
||||
|
|
|
@ -835,7 +835,7 @@ parse_page(
|
|||
{
|
||||
unsigned long long id;
|
||||
uint16_t undo_page_type;
|
||||
char str[20]={'\0'};
|
||||
const char *str;
|
||||
ulint n_recs;
|
||||
uint32_t page_no, left_page_no, right_page_no;
|
||||
ulint data_bytes;
|
||||
|
@ -843,11 +843,7 @@ parse_page(
|
|||
ulint size_range_id;
|
||||
|
||||
/* Check whether page is doublewrite buffer. */
|
||||
if(skip_page) {
|
||||
strcpy(str, "Double_write_buffer");
|
||||
} else {
|
||||
strcpy(str, "-");
|
||||
}
|
||||
str = skip_page ? "Double_write_buffer" : "-";
|
||||
|
||||
switch (mach_read_from_2(page + FIL_PAGE_TYPE)) {
|
||||
|
||||
|
|
|
@ -57,6 +57,9 @@ Street, Fifth Floor, Boston, MA 02110-1335 USA
|
|||
#include "backup_copy.h"
|
||||
#include "backup_mysql.h"
|
||||
#include <btr0btr.h>
|
||||
#ifdef _WIN32
|
||||
#include <direct.h> /* rmdir */
|
||||
#endif
|
||||
|
||||
#define ROCKSDB_BACKUP_DIR "#rocksdb"
|
||||
|
||||
|
@ -1619,7 +1622,49 @@ bool backup_finish()
|
|||
return(true);
|
||||
}
|
||||
|
||||
bool
|
||||
|
||||
/*
|
||||
Drop all empty database directories in the base backup
|
||||
that do not exists in the icremental backup.
|
||||
|
||||
This effectively re-plays all DROP DATABASE statements happened
|
||||
in between base backup and incremental backup creation time.
|
||||
|
||||
Note, only checking if base_dir/db/ is empty is not enough,
|
||||
because inc_dir/db/db.opt might have been dropped for some reasons,
|
||||
which may also result into empty base_dir/db/.
|
||||
|
||||
Only the fact that at the same time:
|
||||
- base_dir/db/ exists
|
||||
- inc_dir/db/ does not exist
|
||||
means that DROP DATABASE happened.
|
||||
*/
|
||||
static void
|
||||
ibx_incremental_drop_databases(const char *base_dir,
|
||||
const char *inc_dir)
|
||||
{
|
||||
datadir_node_t node;
|
||||
datadir_node_init(&node);
|
||||
datadir_iter_t *it = datadir_iter_new(base_dir);
|
||||
|
||||
while (datadir_iter_next(it, &node)) {
|
||||
if (node.is_empty_dir) {
|
||||
char path[FN_REFLEN];
|
||||
snprintf(path, sizeof(path), "%s/%s",
|
||||
inc_dir, node.filepath_rel);
|
||||
if (!directory_exists(path, false)) {
|
||||
msg("Removing %s", node.filepath);
|
||||
rmdir(node.filepath);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
datadir_iter_free(it);
|
||||
datadir_node_free(&node);
|
||||
}
|
||||
|
||||
|
||||
static bool
|
||||
ibx_copy_incremental_over_full()
|
||||
{
|
||||
const char *ext_list[] = {"frm", "isl", "MYD", "MYI", "MAD", "MAI",
|
||||
|
@ -1702,6 +1747,8 @@ ibx_copy_incremental_over_full()
|
|||
}
|
||||
copy_or_move_dir(path, ROCKSDB_BACKUP_DIR, true, true);
|
||||
}
|
||||
ibx_incremental_drop_databases(xtrabackup_target_dir,
|
||||
xtrabackup_incremental_dir);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -1676,8 +1676,11 @@ container_list_add_object(container_list *list, const char *name,
|
|||
list->object_count += object_count_step;
|
||||
}
|
||||
assert(list->idx <= list->object_count);
|
||||
strcpy(list->objects[list->idx].name, name);
|
||||
strcpy(list->objects[list->idx].hash, hash);
|
||||
safe_strcpy(list->objects[list->idx].name,
|
||||
sizeof(list->objects[list->idx].name), name);
|
||||
safe_strcpy(list->objects[list->idx].hash,
|
||||
sizeof(list->objects[list->idx].hash), hash);
|
||||
|
||||
list->objects[list->idx].bytes = bytes;
|
||||
++list->idx;
|
||||
}
|
||||
|
|
|
@ -4460,11 +4460,13 @@ static bool xtrabackup_backup_low()
|
|||
|
||||
dst_log_file = NULL;
|
||||
|
||||
if(!xtrabackup_incremental) {
|
||||
strcpy(metadata_type, "full-backuped");
|
||||
if (!xtrabackup_incremental) {
|
||||
safe_strcpy(metadata_type, sizeof(metadata_type),
|
||||
"full-backuped");
|
||||
metadata_from_lsn = 0;
|
||||
} else {
|
||||
strcpy(metadata_type, "incremental");
|
||||
safe_strcpy(metadata_type, sizeof(metadata_type),
|
||||
"incremental");
|
||||
metadata_from_lsn = incremental_lsn;
|
||||
}
|
||||
metadata_last_lsn = log_copy_scanned_lsn;
|
||||
|
@ -6211,7 +6213,8 @@ static bool xtrabackup_prepare_func(char** argv)
|
|||
if (ok) {
|
||||
char filename[FN_REFLEN];
|
||||
|
||||
strcpy(metadata_type, "log-applied");
|
||||
safe_strcpy(metadata_type, sizeof(metadata_type),
|
||||
"log-applied");
|
||||
|
||||
if(xtrabackup_incremental
|
||||
&& metadata_to_lsn < incremental_to_lsn)
|
||||
|
|
|
@ -225,6 +225,44 @@ static inline void lex_string_set3(LEX_CSTRING *lex_str, const char *c_str,
|
|||
lex_str->length= len;
|
||||
}
|
||||
|
||||
/*
|
||||
Copies src into dst and ensures dst is a NULL terminated C string.
|
||||
|
||||
Returns 1 if the src string was truncated due to too small size of dst.
|
||||
Returns 0 if src completely fit within dst. Pads the remaining dst with '\0'
|
||||
|
||||
Note: dst_size must be > 0
|
||||
*/
|
||||
static inline int safe_strcpy(char *dst, size_t dst_size, const char *src)
|
||||
{
|
||||
memset(dst, '\0', dst_size);
|
||||
strncpy(dst, src, dst_size - 1);
|
||||
/*
|
||||
If the first condition is true, we are guaranteed to have src length
|
||||
>= (dst_size - 1), hence safe to access src[dst_size - 1].
|
||||
*/
|
||||
if (dst[dst_size - 2] != '\0' && src[dst_size - 1] != '\0')
|
||||
return 1; /* Truncation of src. */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
Appends src to dst and ensures dst is a NULL terminated C string.
|
||||
|
||||
Returns 1 if the src string was truncated due to too small size of dst.
|
||||
Returns 0 if src completely fit within the remaining dst space. Pads the
|
||||
remaining dst with '\0'.
|
||||
|
||||
Note: dst_size must be > 0
|
||||
*/
|
||||
static inline int safe_strcat(char *dst, size_t dst_size, const char *src)
|
||||
{
|
||||
size_t init_len= strlen(dst);
|
||||
if (unlikely(init_len >= dst_size - 1))
|
||||
return 1;
|
||||
return safe_strcpy(dst + init_len, dst_size - init_len, src);
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
static inline char *safe_str(char *str)
|
||||
{ return str ? str : const_cast<char*>(""); }
|
||||
|
|
|
@ -5604,6 +5604,178 @@ r
|
|||
3
|
||||
drop table t1,t2,t3,x;
|
||||
#
|
||||
# MDEV-30248: Embedded non-recursive CTE referring to base table 'x'
|
||||
# within a CTE with name 'x' used in a subquery from
|
||||
# select list of another CTE
|
||||
#
|
||||
CREATE TABLE x (a int) ENGINE=MyISAM;
|
||||
INSERT INTO x VALUES (3),(7),(1);
|
||||
CREATE TABLE t1 (b int) ENGINE=MYISAM;
|
||||
INSERT INTO t1 VALUES (1);
|
||||
WITH cte AS
|
||||
(
|
||||
SELECT
|
||||
(
|
||||
WITH x AS
|
||||
(WITH x AS (SELECT a FROM x AS t) SELECT 1 AS b)
|
||||
SELECT b FROM x AS r
|
||||
) AS c
|
||||
)
|
||||
SELECT cte.c FROM cte;
|
||||
c
|
||||
1
|
||||
WITH cte AS
|
||||
(
|
||||
SELECT
|
||||
(
|
||||
WITH x AS
|
||||
(WITH x AS (SELECT a FROM x AS t) SELECT b FROM t1)
|
||||
SELECT b FROM x AS r
|
||||
) AS c
|
||||
)
|
||||
SELECT cte.c FROM cte;
|
||||
c
|
||||
1
|
||||
WITH cte AS
|
||||
(
|
||||
SELECT
|
||||
(
|
||||
WITH x AS
|
||||
(WITH y AS (SELECT a FROM x AS t) SELECT b FROM t1)
|
||||
SELECT b FROM x AS r
|
||||
) AS c
|
||||
)
|
||||
SELECT cte.c FROM cte;
|
||||
c
|
||||
1
|
||||
WITH cte AS
|
||||
(
|
||||
SELECT
|
||||
(
|
||||
WITH x AS
|
||||
(WITH y(b) AS (SELECT a FROM x AS t LIMIT 1) SELECT b FROM y)
|
||||
SELECT b FROM x AS r
|
||||
) AS c
|
||||
)
|
||||
SELECT cte.c FROM cte;
|
||||
c
|
||||
3
|
||||
WITH cte AS
|
||||
(
|
||||
SELECT
|
||||
(
|
||||
WITH x AS
|
||||
(WITH x(b) AS (SELECT a FROM x AS t LIMIT 1) SELECT b FROM x)
|
||||
SELECT b FROM x AS r
|
||||
) AS c
|
||||
)
|
||||
SELECT cte.c FROM cte;
|
||||
c
|
||||
3
|
||||
WITH x AS
|
||||
(
|
||||
SELECT
|
||||
(
|
||||
WITH x AS
|
||||
(WITH x AS (SELECT a FROM x AS t) SELECT 1 AS b)
|
||||
SELECT b FROM x AS r
|
||||
) AS c
|
||||
)
|
||||
SELECT x.c from x;
|
||||
c
|
||||
1
|
||||
WITH cte AS
|
||||
(
|
||||
SELECT
|
||||
(
|
||||
WITH x AS
|
||||
(WITH x AS (SELECT a FROM x AS t) SELECT 2 AS b)
|
||||
SELECT r1.b FROM x AS r1, x AS r2 WHERE r1.b=r2.b
|
||||
) AS c
|
||||
)
|
||||
SELECT cte.c from cte;
|
||||
c
|
||||
2
|
||||
DROP TABLE x;
|
||||
WITH cte AS
|
||||
(
|
||||
SELECT
|
||||
(
|
||||
WITH x AS
|
||||
(WITH x AS (SELECT a FROM x AS t) SELECT 1 AS b)
|
||||
SELECT b FROM x AS r
|
||||
) AS c
|
||||
)
|
||||
SELECT cte.c FROM cte;
|
||||
ERROR 42S02: Table 'test.x' doesn't exist
|
||||
WITH cte AS
|
||||
(
|
||||
SELECT
|
||||
(
|
||||
WITH x AS
|
||||
(WITH x AS (SELECT a FROM x AS t) SELECT b FROM t1)
|
||||
SELECT b FROM x AS r
|
||||
) AS c
|
||||
)
|
||||
SELECT cte.c FROM cte;
|
||||
ERROR 42S02: Table 'test.x' doesn't exist
|
||||
WITH cte AS
|
||||
(
|
||||
SELECT
|
||||
(
|
||||
WITH x AS
|
||||
(WITH y AS (SELECT a FROM x AS t) SELECT b FROM t1)
|
||||
SELECT b FROM x AS r
|
||||
) AS c
|
||||
)
|
||||
SELECT cte.c FROM cte;
|
||||
ERROR 42S02: Table 'test.x' doesn't exist
|
||||
WITH cte AS
|
||||
(
|
||||
SELECT
|
||||
(
|
||||
WITH x AS
|
||||
(WITH y(b) AS (SELECT a FROM x AS t LIMIT 1) SELECT b FROM y)
|
||||
SELECT b FROM x AS r
|
||||
) AS c
|
||||
)
|
||||
SELECT cte.c FROM cte;
|
||||
ERROR 42S02: Table 'test.x' doesn't exist
|
||||
WITH cte AS
|
||||
(
|
||||
SELECT
|
||||
(
|
||||
WITH x AS
|
||||
(WITH x(b) AS (SELECT a FROM x AS t LIMIT 1) SELECT b FROM x)
|
||||
SELECT b FROM x AS r
|
||||
) AS c
|
||||
)
|
||||
SELECT cte.c FROM cte;
|
||||
ERROR 42S02: Table 'test.x' doesn't exist
|
||||
WITH x AS
|
||||
(
|
||||
SELECT
|
||||
(
|
||||
WITH x AS
|
||||
(WITH x AS (SELECT a FROM x AS t) SELECT 1 AS b)
|
||||
SELECT b FROM x AS r
|
||||
) AS c
|
||||
)
|
||||
SELECT x.c from x;
|
||||
ERROR 42S02: Table 'test.x' doesn't exist
|
||||
WITH cte AS
|
||||
(
|
||||
SELECT
|
||||
(
|
||||
WITH x AS
|
||||
(WITH x AS (SELECT a FROM x AS t) SELECT 2 AS b)
|
||||
SELECT r1.b FROM x AS r1, x AS r2 WHERE r1.b=r2.b
|
||||
) AS c
|
||||
)
|
||||
SELECT cte.c from cte;
|
||||
ERROR 42S02: Table 'test.x' doesn't exist
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# End of 10.3 tests
|
||||
#
|
||||
#
|
||||
|
|
|
@ -3871,6 +3871,129 @@ select * from cte;
|
|||
|
||||
drop table t1,t2,t3,x;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-30248: Embedded non-recursive CTE referring to base table 'x'
|
||||
--echo # within a CTE with name 'x' used in a subquery from
|
||||
--echo # select list of another CTE
|
||||
--echo #
|
||||
|
||||
CREATE TABLE x (a int) ENGINE=MyISAM;
|
||||
INSERT INTO x VALUES (3),(7),(1);
|
||||
CREATE TABLE t1 (b int) ENGINE=MYISAM;
|
||||
INSERT INTO t1 VALUES (1);
|
||||
|
||||
let $q1=
|
||||
WITH cte AS
|
||||
(
|
||||
SELECT
|
||||
(
|
||||
WITH x AS
|
||||
(WITH x AS (SELECT a FROM x AS t) SELECT 1 AS b)
|
||||
SELECT b FROM x AS r
|
||||
) AS c
|
||||
)
|
||||
SELECT cte.c FROM cte;
|
||||
eval $q1;
|
||||
|
||||
let $q2=
|
||||
WITH cte AS
|
||||
(
|
||||
SELECT
|
||||
(
|
||||
WITH x AS
|
||||
(WITH x AS (SELECT a FROM x AS t) SELECT b FROM t1)
|
||||
SELECT b FROM x AS r
|
||||
) AS c
|
||||
)
|
||||
SELECT cte.c FROM cte;
|
||||
eval $q2;
|
||||
|
||||
let $q3=
|
||||
WITH cte AS
|
||||
(
|
||||
SELECT
|
||||
(
|
||||
WITH x AS
|
||||
(WITH y AS (SELECT a FROM x AS t) SELECT b FROM t1)
|
||||
SELECT b FROM x AS r
|
||||
) AS c
|
||||
)
|
||||
SELECT cte.c FROM cte;
|
||||
eval $q3;
|
||||
|
||||
|
||||
let $q4=
|
||||
WITH cte AS
|
||||
(
|
||||
SELECT
|
||||
(
|
||||
WITH x AS
|
||||
(WITH y(b) AS (SELECT a FROM x AS t LIMIT 1) SELECT b FROM y)
|
||||
SELECT b FROM x AS r
|
||||
) AS c
|
||||
)
|
||||
SELECT cte.c FROM cte;
|
||||
eval $q4;
|
||||
|
||||
let $q5=
|
||||
WITH cte AS
|
||||
(
|
||||
SELECT
|
||||
(
|
||||
WITH x AS
|
||||
(WITH x(b) AS (SELECT a FROM x AS t LIMIT 1) SELECT b FROM x)
|
||||
SELECT b FROM x AS r
|
||||
) AS c
|
||||
)
|
||||
SELECT cte.c FROM cte;
|
||||
eval $q5;
|
||||
|
||||
let $q6=
|
||||
WITH x AS
|
||||
(
|
||||
SELECT
|
||||
(
|
||||
WITH x AS
|
||||
(WITH x AS (SELECT a FROM x AS t) SELECT 1 AS b)
|
||||
SELECT b FROM x AS r
|
||||
) AS c
|
||||
)
|
||||
SELECT x.c from x;
|
||||
eval $q6;
|
||||
|
||||
let $q7=
|
||||
WITH cte AS
|
||||
(
|
||||
SELECT
|
||||
(
|
||||
WITH x AS
|
||||
(WITH x AS (SELECT a FROM x AS t) SELECT 2 AS b)
|
||||
SELECT r1.b FROM x AS r1, x AS r2 WHERE r1.b=r2.b
|
||||
) AS c
|
||||
)
|
||||
SELECT cte.c from cte;
|
||||
eval $q7;
|
||||
|
||||
|
||||
DROP TABLE x;
|
||||
|
||||
--ERROR ER_NO_SUCH_TABLE
|
||||
eval $q1;
|
||||
--ERROR ER_NO_SUCH_TABLE
|
||||
eval $q2;
|
||||
--ERROR ER_NO_SUCH_TABLE
|
||||
eval $q3;
|
||||
--ERROR ER_NO_SUCH_TABLE
|
||||
eval $q4;
|
||||
--ERROR ER_NO_SUCH_TABLE
|
||||
eval $q5;
|
||||
--ERROR ER_NO_SUCH_TABLE
|
||||
eval $q6;
|
||||
--ERROR ER_NO_SUCH_TABLE
|
||||
eval $q7;
|
||||
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo #
|
||||
--echo # End of 10.3 tests
|
||||
--echo #
|
||||
|
|
|
@ -18190,6 +18190,69 @@ DROP TABLE transaction_items;
|
|||
DROP TABLE transactions;
|
||||
DROP TABLE charges;
|
||||
DROP TABLE ledgers;
|
||||
#
|
||||
# MDEV-30081: Splitting from a constant mergeable derived table
|
||||
# used in inner part of an outer join.
|
||||
#
|
||||
CREATE TABLE t1 ( id int PRIMARY KEY ) ENGINE=MyISAM;
|
||||
INSERT INTO t1 VALUES (3),(4),(7);
|
||||
CREATE TABLE t2 (
|
||||
id int, id1 int, wid int, PRIMARY KEY (id), KEY (id1), KEY (wid)
|
||||
) ENGINE=MyISAM;
|
||||
INSERT INTO t2 VALUES (4,4,6),(7,7,7);
|
||||
CREATE TABLE t3 (
|
||||
wid int, wtid int, otid int, oid int,
|
||||
PRIMARY KEY (wid), KEY (wtid), KEY (otid), KEY (oid)
|
||||
) ENGINE=MyISAM;
|
||||
INSERT INTO t3 VALUES (6,30,6,6),(7,17,7,7);
|
||||
CREATE TABLE t4 ( id int, a int, PRIMARY KEY (id), KEY (a) ) ENGINE=MyISAM;
|
||||
INSERT INTO t4 VALUES (1,17),(2,15),(3,49),(4,3),(5,45),(6,38),(7,17);
|
||||
CREATE TABLE t5 (
|
||||
id int, id1 int, PRIMARY KEY (id), KEY id1 (id1)
|
||||
) ENGINE=MyISAM ;
|
||||
INSERT INTO t5 VALUES (1,17),(2,15),(3,49),(4,3),(5,45),(6,38),(7,17);
|
||||
ANALYZE TABLE t1,t2,t3,t4,t5;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 analyze status Engine-independent statistics collected
|
||||
test.t1 analyze status OK
|
||||
test.t2 analyze status Engine-independent statistics collected
|
||||
test.t2 analyze status OK
|
||||
test.t3 analyze status Engine-independent statistics collected
|
||||
test.t3 analyze status OK
|
||||
test.t4 analyze status Engine-independent statistics collected
|
||||
test.t4 analyze status OK
|
||||
test.t5 analyze status Engine-independent statistics collected
|
||||
test.t5 analyze status OK
|
||||
CREATE VIEW v1 AS (SELECT id1 FROM t5 GROUP BY id1);
|
||||
SELECT t3.*, t1.id AS t1_id, t2.id AS t2_id, dt.*, v1.*
|
||||
FROM
|
||||
t1, t2, t3
|
||||
LEFT JOIN
|
||||
(SELECT t4.* FROM t4 WHERE t4.a=3) dt
|
||||
ON t3.oid = dt.id AND t3.otid = 14
|
||||
LEFT JOIN v1
|
||||
ON (v1.id1 = dt.a)
|
||||
WHERE t3.oid = t1.id AND t3.oid = t2.id AND t3.wid = 7;
|
||||
wid wtid otid oid t1_id t2_id id a id1
|
||||
7 17 7 7 7 7 NULL NULL NULL
|
||||
EXPLAIN SELECT t3.*, t1.id AS t1_id, t2.id AS t2_id, dt.*, v1.*
|
||||
FROM
|
||||
t1, t2, t3
|
||||
LEFT JOIN
|
||||
(SELECT t4.* FROM t4 WHERE t4.a=3) dt
|
||||
ON t3.oid = dt.id AND t3.otid = 14
|
||||
LEFT JOIN v1
|
||||
ON (v1.id1 = dt.a)
|
||||
WHERE t3.oid = t1.id AND t3.oid = t2.id AND t3.wid = 7;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 PRIMARY t3 const PRIMARY,oid PRIMARY 4 const 1
|
||||
1 PRIMARY t1 const PRIMARY PRIMARY 4 const 1 Using index
|
||||
1 PRIMARY t2 const PRIMARY PRIMARY 4 const 1 Using index
|
||||
1 PRIMARY t4 const PRIMARY,a NULL NULL NULL 1 Impossible ON condition
|
||||
1 PRIMARY <derived3> ref key0 key0 5 const 0 Using where
|
||||
3 LATERAL DERIVED t5 ref id1 id1 5 const 0 Using index
|
||||
DROP VIEW v1;
|
||||
DROP TABLE t1,t2,t3,t4,t5;
|
||||
# End of 10.3 tests
|
||||
#
|
||||
# MDEV-18679: materialized view with SELECT S containing materialized
|
||||
|
|
|
@ -3870,6 +3870,55 @@ DROP TABLE transactions;
|
|||
DROP TABLE charges;
|
||||
DROP TABLE ledgers;
|
||||
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-30081: Splitting from a constant mergeable derived table
|
||||
--echo # used in inner part of an outer join.
|
||||
--echo #
|
||||
|
||||
CREATE TABLE t1 ( id int PRIMARY KEY ) ENGINE=MyISAM;
|
||||
INSERT INTO t1 VALUES (3),(4),(7);
|
||||
|
||||
CREATE TABLE t2 (
|
||||
id int, id1 int, wid int, PRIMARY KEY (id), KEY (id1), KEY (wid)
|
||||
) ENGINE=MyISAM;
|
||||
INSERT INTO t2 VALUES (4,4,6),(7,7,7);
|
||||
|
||||
CREATE TABLE t3 (
|
||||
wid int, wtid int, otid int, oid int,
|
||||
PRIMARY KEY (wid), KEY (wtid), KEY (otid), KEY (oid)
|
||||
) ENGINE=MyISAM;
|
||||
INSERT INTO t3 VALUES (6,30,6,6),(7,17,7,7);
|
||||
|
||||
CREATE TABLE t4 ( id int, a int, PRIMARY KEY (id), KEY (a) ) ENGINE=MyISAM;
|
||||
INSERT INTO t4 VALUES (1,17),(2,15),(3,49),(4,3),(5,45),(6,38),(7,17);
|
||||
|
||||
CREATE TABLE t5 (
|
||||
id int, id1 int, PRIMARY KEY (id), KEY id1 (id1)
|
||||
) ENGINE=MyISAM ;
|
||||
INSERT INTO t5 VALUES (1,17),(2,15),(3,49),(4,3),(5,45),(6,38),(7,17);
|
||||
|
||||
ANALYZE TABLE t1,t2,t3,t4,t5;
|
||||
|
||||
CREATE VIEW v1 AS (SELECT id1 FROM t5 GROUP BY id1);
|
||||
|
||||
let $q=
|
||||
SELECT t3.*, t1.id AS t1_id, t2.id AS t2_id, dt.*, v1.*
|
||||
FROM
|
||||
t1, t2, t3
|
||||
LEFT JOIN
|
||||
(SELECT t4.* FROM t4 WHERE t4.a=3) dt
|
||||
ON t3.oid = dt.id AND t3.otid = 14
|
||||
LEFT JOIN v1
|
||||
ON (v1.id1 = dt.a)
|
||||
WHERE t3.oid = t1.id AND t3.oid = t2.id AND t3.wid = 7;
|
||||
|
||||
eval $q;
|
||||
eval EXPLAIN $q;
|
||||
|
||||
DROP VIEW v1;
|
||||
DROP TABLE t1,t2,t3,t4,t5;
|
||||
|
||||
--echo # End of 10.3 tests
|
||||
|
||||
--echo #
|
||||
|
|
|
@ -4272,11 +4272,13 @@ GROUP BY
|
|||
LEFT((SYSDATE()), 'foo')
|
||||
WITH ROLLUP;
|
||||
SUM(b) OVER (PARTITION BY a) ROW_NUMBER() OVER (PARTITION BY b)
|
||||
NULL 1
|
||||
NULL 1
|
||||
0 1
|
||||
0 2
|
||||
Warnings:
|
||||
Warning 1292 Truncated incorrect INTEGER value: 'foo'
|
||||
Warning 1292 Truncated incorrect INTEGER value: 'foo'
|
||||
Warning 1292 Truncated incorrect DOUBLE value: 'bar'
|
||||
Warning 1292 Truncated incorrect DOUBLE value: 'bar'
|
||||
drop table t1;
|
||||
#
|
||||
#
|
||||
|
@ -4335,6 +4337,46 @@ pk a bit_or
|
|||
DROP TABLE t2;
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# MDEV-15178: Filesort::make_sortorder: Assertion `pos->field != __null |
|
||||
#
|
||||
CREATE TABLE t1 (i1 int, a int);
|
||||
INSERT INTO t1 VALUES (1, 1), (2, 2),(3, 3);
|
||||
CREATE TABLE t2 (i2 int);
|
||||
INSERT INTO t2 VALUES (1),(2),(5),(1),(7),(4),(3);
|
||||
SELECT
|
||||
a,
|
||||
RANK() OVER (ORDER BY SUM(DISTINCT i1))
|
||||
FROM
|
||||
t1, t2 WHERE t2.i2 = t1.i1
|
||||
GROUP BY
|
||||
a;
|
||||
a RANK() OVER (ORDER BY SUM(DISTINCT i1))
|
||||
1 1
|
||||
2 2
|
||||
3 3
|
||||
DROP TABLE t1, t2;
|
||||
#
|
||||
# MDEV-17014: Crash server using ROW_NUMBER() OVER (PARTITION ..)
|
||||
#
|
||||
CREATE TABLE t1 (UID BIGINT);
|
||||
CREATE TABLE t2 (UID BIGINT);
|
||||
CREATE TABLE t3 (UID BIGINT);
|
||||
insert into t1 VALUES (1),(2);
|
||||
insert into t2 VALUES (1),(2);
|
||||
insert into t3 VALUES (1),(2);
|
||||
SELECT
|
||||
ROW_NUMBER() OVER (PARTITION BY GROUP_CONCAT(TT1.UID))
|
||||
FROM t1 TT1,
|
||||
t2 TT2,
|
||||
t3 TT3
|
||||
WHERE TT3.UID = TT1.UID AND TT2.UID = TT3.UID
|
||||
GROUP BY TT1.UID
|
||||
;
|
||||
ROW_NUMBER() OVER (PARTITION BY GROUP_CONCAT(TT1.UID))
|
||||
1
|
||||
1
|
||||
DROP TABLE t1, t2, t3;
|
||||
#
|
||||
# End of 10.3 tests
|
||||
#
|
||||
#
|
||||
|
|
|
@ -2817,6 +2817,46 @@ DROP TABLE t2;
|
|||
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-15178: Filesort::make_sortorder: Assertion `pos->field != __null |
|
||||
--echo #
|
||||
|
||||
CREATE TABLE t1 (i1 int, a int);
|
||||
INSERT INTO t1 VALUES (1, 1), (2, 2),(3, 3);
|
||||
|
||||
CREATE TABLE t2 (i2 int);
|
||||
INSERT INTO t2 VALUES (1),(2),(5),(1),(7),(4),(3);
|
||||
|
||||
SELECT
|
||||
a,
|
||||
RANK() OVER (ORDER BY SUM(DISTINCT i1))
|
||||
FROM
|
||||
t1, t2 WHERE t2.i2 = t1.i1
|
||||
GROUP BY
|
||||
a;
|
||||
|
||||
DROP TABLE t1, t2;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-17014: Crash server using ROW_NUMBER() OVER (PARTITION ..)
|
||||
--echo #
|
||||
CREATE TABLE t1 (UID BIGINT);
|
||||
CREATE TABLE t2 (UID BIGINT);
|
||||
CREATE TABLE t3 (UID BIGINT);
|
||||
|
||||
insert into t1 VALUES (1),(2);
|
||||
insert into t2 VALUES (1),(2);
|
||||
insert into t3 VALUES (1),(2);
|
||||
SELECT
|
||||
ROW_NUMBER() OVER (PARTITION BY GROUP_CONCAT(TT1.UID))
|
||||
FROM t1 TT1,
|
||||
t2 TT2,
|
||||
t3 TT3
|
||||
WHERE TT3.UID = TT1.UID AND TT2.UID = TT3.UID
|
||||
GROUP BY TT1.UID
|
||||
;
|
||||
|
||||
DROP TABLE t1, t2, t3;
|
||||
|
||||
--echo #
|
||||
--echo # End of 10.3 tests
|
||||
|
|
|
@ -24,3 +24,66 @@ pk count(a) over (order by pk rows between 2 preceding and 2 following)
|
|||
28 5
|
||||
27 5
|
||||
drop table t0,t1;
|
||||
#
|
||||
# MDEV-30052: Crash with a query containing nested WINDOW clauses
|
||||
#
|
||||
CREATE TABLE t1 (c INT);
|
||||
insert into t1 values (1),(2);
|
||||
UPDATE t1 SET c=1
|
||||
WHERE c=2
|
||||
ORDER BY
|
||||
(1 IN ((
|
||||
SELECT *
|
||||
FROM (SELECT * FROM t1) AS v1
|
||||
GROUP BY c
|
||||
WINDOW v2 AS (ORDER BY
|
||||
(SELECT *
|
||||
FROM t1
|
||||
GROUP BY c
|
||||
WINDOW v3 AS (PARTITION BY c)
|
||||
)
|
||||
)
|
||||
))
|
||||
);
|
||||
drop table t1;
|
||||
#
|
||||
# MDEV-29359: Server crashed with heap-use-after-free in
|
||||
# Field::is_null(long long) const (Just testcase)
|
||||
#
|
||||
CREATE TABLE t1 (id int);
|
||||
INSERT INTO t1 VALUES (-1),(0),(84);
|
||||
SELECT
|
||||
id IN (SELECT id
|
||||
FROM t1
|
||||
WINDOW w AS (ORDER BY (SELECT 1
|
||||
FROM t1
|
||||
WHERE
|
||||
EXISTS ( SELECT id
|
||||
FROM t1
|
||||
GROUP BY id
|
||||
WINDOW w2 AS (ORDER BY id)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
FROM t1;
|
||||
id IN (SELECT id
|
||||
FROM t1
|
||||
WINDOW w AS (ORDER BY (SELECT 1
|
||||
FROM t1
|
||||
WHERE
|
||||
EXISTS ( SELECT id
|
||||
FROM t1
|
||||
GROUP BY id
|
||||
WINDOW w2 AS (ORDER BY id)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
1
|
||||
1
|
||||
1
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# End of 10.3 tests
|
||||
#
|
||||
|
|
|
@ -33,3 +33,58 @@ limit 4;
|
|||
--disable_view_protocol
|
||||
|
||||
drop table t0,t1;
|
||||
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-30052: Crash with a query containing nested WINDOW clauses
|
||||
--echo #
|
||||
|
||||
CREATE TABLE t1 (c INT);
|
||||
insert into t1 values (1),(2);
|
||||
UPDATE t1 SET c=1
|
||||
WHERE c=2
|
||||
ORDER BY
|
||||
(1 IN ((
|
||||
SELECT *
|
||||
FROM (SELECT * FROM t1) AS v1
|
||||
GROUP BY c
|
||||
WINDOW v2 AS (ORDER BY
|
||||
(SELECT *
|
||||
FROM t1
|
||||
GROUP BY c
|
||||
WINDOW v3 AS (PARTITION BY c)
|
||||
)
|
||||
)
|
||||
))
|
||||
);
|
||||
drop table t1;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-29359: Server crashed with heap-use-after-free in
|
||||
--echo # Field::is_null(long long) const (Just testcase)
|
||||
--echo #
|
||||
|
||||
CREATE TABLE t1 (id int);
|
||||
INSERT INTO t1 VALUES (-1),(0),(84);
|
||||
|
||||
SELECT
|
||||
id IN (SELECT id
|
||||
FROM t1
|
||||
WINDOW w AS (ORDER BY (SELECT 1
|
||||
FROM t1
|
||||
WHERE
|
||||
EXISTS ( SELECT id
|
||||
FROM t1
|
||||
GROUP BY id
|
||||
WINDOW w2 AS (ORDER BY id)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
FROM t1;
|
||||
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo #
|
||||
--echo # End of 10.3 tests
|
||||
--echo #
|
||||
|
|
|
@ -0,0 +1,52 @@
|
|||
connect pause_purge,localhost,root;
|
||||
START TRANSACTION WITH CONSISTENT SNAPSHOT;
|
||||
connection default;
|
||||
CREATE TABLE t (pk int PRIMARY KEY, sk INT UNIQUE) ENGINE=InnoDB;
|
||||
INSERT INTO t VALUES (10, 100);
|
||||
connect con1,localhost,root;
|
||||
BEGIN;
|
||||
SELECT * FROM t WHERE sk = 100 FOR UPDATE;
|
||||
pk sk
|
||||
10 100
|
||||
connect con2,localhost,root;
|
||||
SET DEBUG_SYNC="lock_wait_suspend_thread_enter SIGNAL insert_wait_started";
|
||||
INSERT INTO t VALUES (5, 100) # trx 1;
|
||||
connect con3,localhost,root;
|
||||
SET TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SET DEBUG_SYNC="now WAIT_FOR insert_wait_started";
|
||||
SET DEBUG_SYNC="lock_wait_suspend_thread_enter SIGNAL delete_started_waiting";
|
||||
BEGIN;
|
||||
UPDATE t SET sk = 200 WHERE sk = 100; # trx 2;
|
||||
connection con1;
|
||||
SET DEBUG_SYNC="now WAIT_FOR delete_started_waiting";
|
||||
DELETE FROM t WHERE sk=100;
|
||||
COMMIT;
|
||||
disconnect con1;
|
||||
connection con2;
|
||||
disconnect con2;
|
||||
connection con3;
|
||||
must be logged in ROW format as the only event of trx 2 (con3)
|
||||
INSERT INTO t VALUES (11, 101);
|
||||
COMMIT;
|
||||
include/show_binlog_events.inc
|
||||
Log_name Pos Event_type Server_id End_log_pos Info
|
||||
master-bin.000001 # Gtid # # BEGIN GTID #-#-#
|
||||
master-bin.000001 # Query # # use `test`; DELETE FROM t WHERE sk=100
|
||||
master-bin.000001 # Xid # # COMMIT /* XID */
|
||||
master-bin.000001 # Gtid # # BEGIN GTID #-#-#
|
||||
master-bin.000001 # Query # # use `test`; INSERT INTO t VALUES (5, 100) # trx 1
|
||||
master-bin.000001 # Xid # # COMMIT /* XID */
|
||||
master-bin.000001 # Gtid # # BEGIN GTID #-#-#
|
||||
master-bin.000001 # Annotate_rows # # INSERT INTO t VALUES (11, 101)
|
||||
master-bin.000001 # Table_map # # table_id: # (test.t)
|
||||
master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
|
||||
master-bin.000001 # Xid # # COMMIT /* XID */
|
||||
disconnect con3;
|
||||
connection default;
|
||||
SELECT * FROM t;
|
||||
pk sk
|
||||
5 100
|
||||
11 101
|
||||
disconnect pause_purge;
|
||||
SET DEBUG_SYNC="RESET";
|
||||
DROP TABLE t;
|
|
@ -0,0 +1,89 @@
|
|||
--source include/have_innodb.inc
|
||||
--source include/have_debug.inc
|
||||
--source include/have_debug_sync.inc
|
||||
--source include/have_binlog_format_mixed.inc
|
||||
--source include/count_sessions.inc
|
||||
|
||||
# MDEV-30010 merely adds is a Read-Committed version MDEV-30225 test
|
||||
# solely to prove the RC isolation yields ROW binlog format as it is
|
||||
# supposed to:
|
||||
# https://mariadb.com/kb/en/unsafe-statements-for-statement-based-replication/#isolation-levels.
|
||||
# The original MDEV-30225 test is adapted to the RC to create
|
||||
# a similar safisticated scenario which does not lead to any deadlock though.
|
||||
|
||||
--connect (pause_purge,localhost,root)
|
||||
START TRANSACTION WITH CONSISTENT SNAPSHOT;
|
||||
|
||||
--connection default
|
||||
CREATE TABLE t (pk int PRIMARY KEY, sk INT UNIQUE) ENGINE=InnoDB;
|
||||
INSERT INTO t VALUES (10, 100);
|
||||
|
||||
--connect (con1,localhost,root)
|
||||
BEGIN; # trx 0
|
||||
SELECT * FROM t WHERE sk = 100 FOR UPDATE;
|
||||
|
||||
--connect (con2,localhost,root)
|
||||
SET DEBUG_SYNC="lock_wait_suspend_thread_enter SIGNAL insert_wait_started";
|
||||
# trx 1 is locked on try to read the record in secondary index during duplicates
|
||||
# check. It's the first in waiting queue, that's why it will be woken up firstly
|
||||
# when trx 0 commits.
|
||||
--send INSERT INTO t VALUES (5, 100) # trx 1
|
||||
|
||||
--connect (con3,localhost,root)
|
||||
SET TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SET DEBUG_SYNC="now WAIT_FOR insert_wait_started";
|
||||
SET DEBUG_SYNC="lock_wait_suspend_thread_enter SIGNAL delete_started_waiting";
|
||||
# trx 2 can delete (5, 100) on master, but not on slave, as on slave trx 1
|
||||
# can insert (5, 100) after trx 2 positioned it's cursor. Trx 2 lock is placed
|
||||
# in waiting queue after trx 1 lock, but its persistent cursor position was
|
||||
# stored on (100, 10) record in secondary index before suspending. After trx 1
|
||||
# is committed, trx 2 will restore persistent cursor position on (100, 10). As
|
||||
# (100, 5) secondary index record was inserted before (100, 10) in logical
|
||||
# order, and (100, 10) record is delete-marked, trx 2 just continues scanning.
|
||||
#
|
||||
# Note. There can be several records with the same key in unique secondary
|
||||
# index, but only one of them must be non-delete-marked. That's why when we do
|
||||
# point query, cursor position is set in the first record in logical order, and
|
||||
# then records are iterated until either non-delete-marked record is found or
|
||||
# all records with the same unique fields are iterated.
|
||||
|
||||
# to prepare showing interesting binlog events
|
||||
--let $binlog_start= query_get_value(SHOW MASTER STATUS, Position, 1)
|
||||
BEGIN;
|
||||
--send UPDATE t SET sk = 200 WHERE sk = 100; # trx 2
|
||||
|
||||
--connection con1
|
||||
SET DEBUG_SYNC="now WAIT_FOR delete_started_waiting";
|
||||
DELETE FROM t WHERE sk=100; # trx 0
|
||||
COMMIT;
|
||||
--disconnect con1
|
||||
|
||||
--connection con2
|
||||
--reap
|
||||
--disconnect con2
|
||||
|
||||
--connection con3
|
||||
--error 0
|
||||
--reap
|
||||
if (`SELECT ROW_COUNT() > 0`)
|
||||
{
|
||||
--echo unexpected effective UPDATE
|
||||
--die
|
||||
}
|
||||
--echo must be logged in ROW format as the only event of trx 2 (con3)
|
||||
INSERT INTO t VALUES (11, 101);
|
||||
COMMIT;
|
||||
--source include/show_binlog_events.inc
|
||||
--disconnect con3
|
||||
|
||||
--connection default
|
||||
# If the bug is not fixed, we will see the row inserted by trx 1 here. This can
|
||||
# cause duplicate key error on slave, when some other trx tries in insert row
|
||||
# with the same secondary key, as was inserted by trx 1, and not deleted by trx
|
||||
# 2.
|
||||
SELECT * FROM t;
|
||||
|
||||
--disconnect pause_purge
|
||||
SET DEBUG_SYNC="RESET";
|
||||
DROP TABLE t;
|
||||
--source include/wait_until_count_sessions.inc
|
|
@ -4278,11 +4278,13 @@ GROUP BY
|
|||
LEFT((SYSDATE()), 'foo')
|
||||
WITH ROLLUP;
|
||||
SUM(b) OVER (PARTITION BY a) ROW_NUMBER() OVER (PARTITION BY b)
|
||||
NULL 1
|
||||
NULL 1
|
||||
0 1
|
||||
0 2
|
||||
Warnings:
|
||||
Warning 1292 Truncated incorrect INTEGER value: 'foo'
|
||||
Warning 1292 Truncated incorrect INTEGER value: 'foo'
|
||||
Warning 1292 Truncated incorrect DOUBLE value: 'bar'
|
||||
Warning 1292 Truncated incorrect DOUBLE value: 'bar'
|
||||
drop table t1;
|
||||
#
|
||||
#
|
||||
|
@ -4341,6 +4343,46 @@ pk a bit_or
|
|||
DROP TABLE t2;
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# MDEV-15178: Filesort::make_sortorder: Assertion `pos->field != __null |
|
||||
#
|
||||
CREATE TABLE t1 (i1 int, a int);
|
||||
INSERT INTO t1 VALUES (1, 1), (2, 2),(3, 3);
|
||||
CREATE TABLE t2 (i2 int);
|
||||
INSERT INTO t2 VALUES (1),(2),(5),(1),(7),(4),(3);
|
||||
SELECT
|
||||
a,
|
||||
RANK() OVER (ORDER BY SUM(DISTINCT i1))
|
||||
FROM
|
||||
t1, t2 WHERE t2.i2 = t1.i1
|
||||
GROUP BY
|
||||
a;
|
||||
a RANK() OVER (ORDER BY SUM(DISTINCT i1))
|
||||
1 1
|
||||
2 2
|
||||
3 3
|
||||
DROP TABLE t1, t2;
|
||||
#
|
||||
# MDEV-17014: Crash server using ROW_NUMBER() OVER (PARTITION ..)
|
||||
#
|
||||
CREATE TABLE t1 (UID BIGINT);
|
||||
CREATE TABLE t2 (UID BIGINT);
|
||||
CREATE TABLE t3 (UID BIGINT);
|
||||
insert into t1 VALUES (1),(2);
|
||||
insert into t2 VALUES (1),(2);
|
||||
insert into t3 VALUES (1),(2);
|
||||
SELECT
|
||||
ROW_NUMBER() OVER (PARTITION BY GROUP_CONCAT(TT1.UID))
|
||||
FROM t1 TT1,
|
||||
t2 TT2,
|
||||
t3 TT3
|
||||
WHERE TT3.UID = TT1.UID AND TT2.UID = TT3.UID
|
||||
GROUP BY TT1.UID
|
||||
;
|
||||
ROW_NUMBER() OVER (PARTITION BY GROUP_CONCAT(TT1.UID))
|
||||
1
|
||||
1
|
||||
DROP TABLE t1, t2, t3;
|
||||
#
|
||||
# End of 10.3 tests
|
||||
#
|
||||
#
|
||||
|
|
30
mysql-test/suite/mariabackup/incremental_drop_db.result
Normal file
30
mysql-test/suite/mariabackup/incremental_drop_db.result
Normal file
|
@ -0,0 +1,30 @@
|
|||
call mtr.add_suppression("InnoDB: New log files created");
|
||||
#
|
||||
# Start of 10.3 tests
|
||||
#
|
||||
#
|
||||
# MDEV-23335 MariaBackup Incremental Does Not Reflect Dropped/Created Databases
|
||||
#
|
||||
CREATE DATABASE db1;
|
||||
CREATE DATABASE db2;
|
||||
CREATE TABLE db1.t1 (a INT) ENGINE=MyISAM;
|
||||
CREATE TABLE db1.t2 (a INT) ENGINE=InnoDB;
|
||||
# Create base backup
|
||||
DROP DATABASE db1;
|
||||
# Create incremental backup
|
||||
# Remove incremental_dir/db2/db.opt file to make incremental_dir/db2/ empty
|
||||
# Prepare base backup, apply incremental one
|
||||
# shutdown server
|
||||
# remove datadir
|
||||
# xtrabackup move back
|
||||
# restart
|
||||
# Expect no 'db1' in the output, because it was really dropped.
|
||||
# Expect 'db2' in the ouput, because it was not dropped!
|
||||
# (its incremental directory was emptied only)
|
||||
SHOW DATABASES LIKE 'db%';
|
||||
Database (db%)
|
||||
db2
|
||||
DROP DATABASE db2;
|
||||
#
|
||||
# End of 10.3 tests
|
||||
#
|
68
mysql-test/suite/mariabackup/incremental_drop_db.test
Normal file
68
mysql-test/suite/mariabackup/incremental_drop_db.test
Normal file
|
@ -0,0 +1,68 @@
|
|||
--source include/have_innodb.inc
|
||||
call mtr.add_suppression("InnoDB: New log files created");
|
||||
|
||||
--echo #
|
||||
--echo # Start of 10.3 tests
|
||||
--echo #
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-23335 MariaBackup Incremental Does Not Reflect Dropped/Created Databases
|
||||
--echo #
|
||||
|
||||
--let $datadir=`SELECT @@datadir`
|
||||
--let $basedir=$MYSQLTEST_VARDIR/tmp/backup
|
||||
--let $incremental_dir=$MYSQLTEST_VARDIR/tmp/backup_inc1
|
||||
|
||||
# Create two databases:
|
||||
# - db1 is dropped normally below
|
||||
# - db2 is used to cover a corner case: its db.opt file is removed
|
||||
|
||||
# Incremental backup contains:
|
||||
# - no directory for db1
|
||||
# - an empty directory for db2 (after we remove db2/db.opt)
|
||||
|
||||
|
||||
CREATE DATABASE db1;
|
||||
CREATE DATABASE db2;
|
||||
|
||||
# Add some tables to db1
|
||||
CREATE TABLE db1.t1 (a INT) ENGINE=MyISAM;
|
||||
CREATE TABLE db1.t2 (a INT) ENGINE=InnoDB;
|
||||
|
||||
--echo # Create base backup
|
||||
--disable_result_log
|
||||
--exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$basedir
|
||||
--enable_result_log
|
||||
|
||||
DROP DATABASE db1;
|
||||
|
||||
--echo # Create incremental backup
|
||||
--exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$incremental_dir --incremental-basedir=$basedir
|
||||
|
||||
--echo # Remove incremental_dir/db2/db.opt file to make incremental_dir/db2/ empty
|
||||
--remove_file $incremental_dir/db2/db.opt
|
||||
|
||||
|
||||
--echo # Prepare base backup, apply incremental one
|
||||
--disable_result_log
|
||||
--exec $XTRABACKUP --prepare --target-dir=$basedir
|
||||
--exec $XTRABACKUP --prepare --target-dir=$basedir --incremental-dir=$incremental_dir
|
||||
--enable_result_log
|
||||
|
||||
--let $targetdir=$basedir
|
||||
--source include/restart_and_restore.inc
|
||||
--enable_result_log
|
||||
|
||||
--echo # Expect no 'db1' in the output, because it was really dropped.
|
||||
--echo # Expect 'db2' in the ouput, because it was not dropped!
|
||||
--echo # (its incremental directory was emptied only)
|
||||
|
||||
SHOW DATABASES LIKE 'db%';
|
||||
DROP DATABASE db2;
|
||||
|
||||
--rmdir $basedir
|
||||
--rmdir $incremental_dir
|
||||
|
||||
--echo #
|
||||
--echo # End of 10.3 tests
|
||||
--echo #
|
75
mysql-test/suite/rpl/include/create_or_drop_sync_func.inc
Normal file
75
mysql-test/suite/rpl/include/create_or_drop_sync_func.inc
Normal file
|
@ -0,0 +1,75 @@
|
|||
# Creates or drops a stored function as a part of debug-sync based
|
||||
# synchronization mechanism between replication servers.
|
||||
#
|
||||
# Parameters:
|
||||
# $create_or_drop= [create]
|
||||
# $server_master = [master]
|
||||
# $server_slave = [slave]
|
||||
if (!$create_or_drop)
|
||||
{
|
||||
--let $create_or_drop=create
|
||||
}
|
||||
|
||||
if (`select strcmp('$create_or_drop', 'create') = 0`)
|
||||
{
|
||||
if (!$server_master)
|
||||
{
|
||||
--let $server_master=master
|
||||
}
|
||||
if (!$server_slave)
|
||||
{
|
||||
--let $server_slave=slave
|
||||
}
|
||||
|
||||
--connection $server_master
|
||||
# Use a stored function to inject a debug_sync into the appropriate THD.
|
||||
# The function does nothing on the master, and on the slave it injects the
|
||||
# desired debug_sync action(s).
|
||||
SET sql_log_bin=0;
|
||||
--delimiter ||
|
||||
CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500))
|
||||
RETURNS INT DETERMINISTIC
|
||||
BEGIN
|
||||
RETURN x;
|
||||
END
|
||||
||
|
||||
--delimiter ;
|
||||
SET sql_log_bin=1;
|
||||
|
||||
--connection $server_slave
|
||||
|
||||
SET sql_log_bin=0;
|
||||
--delimiter ||
|
||||
CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500))
|
||||
RETURNS INT DETERMINISTIC
|
||||
BEGIN
|
||||
IF d1 != '' THEN
|
||||
SET debug_sync = d1;
|
||||
END IF;
|
||||
IF d2 != '' THEN
|
||||
SET debug_sync = d2;
|
||||
END IF;
|
||||
RETURN x;
|
||||
END
|
||||
||
|
||||
--delimiter ;
|
||||
SET sql_log_bin=1;
|
||||
}
|
||||
|
||||
if (`select strcmp('$create_or_drop', 'drop') = 0`)
|
||||
{
|
||||
if (!$server_slave)
|
||||
{
|
||||
--let $server_slave=slave=
|
||||
}
|
||||
if (!$server_master)
|
||||
{
|
||||
--let $server_master=master
|
||||
}
|
||||
--connection $server_slave
|
||||
SET DEBUG_SYNC='RESET';
|
||||
|
||||
--connection $server_master
|
||||
SET DEBUG_SYNC='RESET';
|
||||
DROP FUNCTION foo;
|
||||
}
|
60
mysql-test/suite/rpl/r/rpl_delayed_parallel_slave_sbm.result
Normal file
60
mysql-test/suite/rpl/r/rpl_delayed_parallel_slave_sbm.result
Normal file
|
@ -0,0 +1,60 @@
|
|||
include/master-slave.inc
|
||||
[connection master]
|
||||
connection slave;
|
||||
include/stop_slave.inc
|
||||
change master to master_delay=3, master_use_gtid=Slave_Pos;
|
||||
set @@GLOBAL.slave_parallel_threads=2;
|
||||
include/start_slave.inc
|
||||
connection master;
|
||||
create table t1 (a int);
|
||||
include/sync_slave_sql_with_master.inc
|
||||
#
|
||||
# Pt 1) Ensure SBM is updated immediately upon arrival of the next event
|
||||
# Lock t1 on slave so the first received transaction does not complete/commit
|
||||
connection slave;
|
||||
LOCK TABLES t1 WRITE;
|
||||
connection master;
|
||||
# Sleep 2 to allow a buffer between events for SBM check
|
||||
insert into t1 values (0);
|
||||
include/save_master_gtid.inc
|
||||
connection slave;
|
||||
# Waiting for transaction to arrive on slave and begin SQL Delay..
|
||||
# Validating SBM is updated on event arrival..
|
||||
# ..done
|
||||
connection slave;
|
||||
UNLOCK TABLES;
|
||||
include/sync_with_master_gtid.inc
|
||||
#
|
||||
# Pt 2) If the SQL thread has not entered an idle state, ensure
|
||||
# following events do not update SBM
|
||||
# Stop slave IO thread so it receives both events together on restart
|
||||
connection slave;
|
||||
include/stop_slave_io.inc
|
||||
connection master;
|
||||
# Sleep 2 to allow a buffer between events for SBM check
|
||||
insert into t1 values (1);
|
||||
# Sleep 3 to create gap between events
|
||||
insert into t1 values (2);
|
||||
connection slave;
|
||||
LOCK TABLES t1 WRITE;
|
||||
START SLAVE IO_THREAD;
|
||||
# Wait for first transaction to complete SQL delay and begin execution..
|
||||
# Validate SBM calculation doesn't use the second transaction because SQL thread shouldn't have gone idle..
|
||||
# ..and that SBM wasn't calculated using prior committed transactions
|
||||
# ..done
|
||||
connection slave;
|
||||
UNLOCK TABLES;
|
||||
#
|
||||
# Cleanup
|
||||
# Reset master_delay
|
||||
include/stop_slave.inc
|
||||
CHANGE MASTER TO master_delay=0;
|
||||
set @@GLOBAL.slave_parallel_threads=4;
|
||||
include/start_slave.inc
|
||||
connection master;
|
||||
DROP TABLE t1;
|
||||
include/save_master_gtid.inc
|
||||
connection slave;
|
||||
include/sync_with_master_gtid.inc
|
||||
include/rpl_end.inc
|
||||
# End of rpl_delayed_parallel_slave_sbm.test
|
76
mysql-test/suite/rpl/r/rpl_parallel_analyze.result
Normal file
76
mysql-test/suite/rpl/r/rpl_parallel_analyze.result
Normal file
|
@ -0,0 +1,76 @@
|
|||
include/master-slave.inc
|
||||
[connection master]
|
||||
# Initialize
|
||||
connection slave;
|
||||
ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB;
|
||||
# Setup data
|
||||
connection master;
|
||||
CREATE TABLE t1 (a int PRIMARY KEY) ENGINE=InnoDB;
|
||||
CREATE TABLE ta (a int);
|
||||
include/save_master_gtid.inc
|
||||
connection slave;
|
||||
include/sync_with_master_gtid.inc
|
||||
connection master;
|
||||
SET sql_log_bin=0;
|
||||
CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500))
|
||||
RETURNS INT DETERMINISTIC
|
||||
BEGIN
|
||||
RETURN x;
|
||||
END
|
||||
||
|
||||
SET sql_log_bin=1;
|
||||
connection slave;
|
||||
SET sql_log_bin=0;
|
||||
CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500))
|
||||
RETURNS INT DETERMINISTIC
|
||||
BEGIN
|
||||
IF d1 != '' THEN
|
||||
SET debug_sync = d1;
|
||||
END IF;
|
||||
IF d2 != '' THEN
|
||||
SET debug_sync = d2;
|
||||
END IF;
|
||||
RETURN x;
|
||||
END
|
||||
||
|
||||
SET sql_log_bin=1;
|
||||
include/stop_slave.inc
|
||||
SET @old_parallel_threads =@@GLOBAL.slave_parallel_threads;
|
||||
SET @old_parallel_mode =@@GLOBAL.slave_parallel_mode;
|
||||
SET @old_gtid_strict_mode =@@GLOBAL.gtid_strict_mode;
|
||||
SET GLOBAL slave_parallel_threads=10;
|
||||
SET GLOBAL slave_parallel_mode=conservative;
|
||||
SET GLOBAL gtid_strict_mode=ON;
|
||||
include/start_slave.inc
|
||||
connection master;
|
||||
SET @old_format= @@SESSION.binlog_format;
|
||||
SET binlog_format=statement;
|
||||
INSERT INTO t1 VALUES (foo(1, 'rpl_parallel_after_mark_start_commit WAIT_FOR sig_go', ''));
|
||||
ANALYZE TABLE ta;
|
||||
Table Op Msg_type Msg_text
|
||||
test.ta analyze status Engine-independent statistics collected
|
||||
test.ta analyze status Table is already up to date
|
||||
include/save_master_gtid.inc
|
||||
connection slave;
|
||||
SELECT info FROM information_schema.processlist WHERE state = "Waiting for prior transaction to commit";
|
||||
info
|
||||
ANALYZE TABLE ta
|
||||
set @@debug_sync="now signal sig_go";
|
||||
include/sync_with_master_gtid.inc
|
||||
# Cleanup
|
||||
connection master;
|
||||
DROP TABLE t1,ta;
|
||||
connection slave;
|
||||
SET DEBUG_SYNC='RESET';
|
||||
connection master;
|
||||
SET DEBUG_SYNC='RESET';
|
||||
DROP FUNCTION foo;
|
||||
include/save_master_gtid.inc
|
||||
connection slave;
|
||||
include/sync_with_master_gtid.inc
|
||||
include/stop_slave.inc
|
||||
SET @@GLOBAL.slave_parallel_threads=@old_parallel_threads;
|
||||
SET @@GLOBAL.slave_parallel_mode =@old_parallel_mode;
|
||||
SET @@GLOBAL.gtid_strict_mode =@old_gtid_strict_mode;
|
||||
include/start_slave.inc
|
||||
include/rpl_end.inc
|
|
@ -0,0 +1 @@
|
|||
--slave-parallel-threads=4
|
133
mysql-test/suite/rpl/t/rpl_delayed_parallel_slave_sbm.test
Normal file
133
mysql-test/suite/rpl/t/rpl_delayed_parallel_slave_sbm.test
Normal file
|
@ -0,0 +1,133 @@
|
|||
#
|
||||
# This test ensures that after a delayed parallel slave has idled, i.e.
|
||||
# executed everything in its relay log, the next event group that the SQL
|
||||
# thread reads from the relay log will immediately be used in the
|
||||
# Seconds_Behind_Master. In particular, it ensures that the calculation for
|
||||
# Seconds_Behind_Master is based on the timestamp of the new transaction,
|
||||
# rather than the last committed transaction.
|
||||
#
|
||||
# References:
|
||||
# MDEV-29639: Seconds_Behind_Master is incorrect for Delayed, Parallel
|
||||
# Replicas
|
||||
#
|
||||
|
||||
--source include/master-slave.inc
|
||||
|
||||
--connection slave
|
||||
--source include/stop_slave.inc
|
||||
--let $master_delay= 3
|
||||
--eval change master to master_delay=$master_delay, master_use_gtid=Slave_Pos
|
||||
--let $old_slave_threads= `SELECT @@GLOBAL.slave_parallel_threads`
|
||||
set @@GLOBAL.slave_parallel_threads=2;
|
||||
--source include/start_slave.inc
|
||||
|
||||
--connection master
|
||||
create table t1 (a int);
|
||||
--source include/sync_slave_sql_with_master.inc
|
||||
|
||||
--echo #
|
||||
--echo # Pt 1) Ensure SBM is updated immediately upon arrival of the next event
|
||||
|
||||
--echo # Lock t1 on slave so the first received transaction does not complete/commit
|
||||
--connection slave
|
||||
LOCK TABLES t1 WRITE;
|
||||
|
||||
--connection master
|
||||
--echo # Sleep 2 to allow a buffer between events for SBM check
|
||||
sleep 2;
|
||||
|
||||
--let $ts_trx_before_ins= `SELECT UNIX_TIMESTAMP()`
|
||||
--let insert_ctr= 0
|
||||
--eval insert into t1 values ($insert_ctr)
|
||||
--inc $insert_ctr
|
||||
--source include/save_master_gtid.inc
|
||||
|
||||
--connection slave
|
||||
|
||||
--echo # Waiting for transaction to arrive on slave and begin SQL Delay..
|
||||
--let $wait_condition= SELECT count(*) FROM information_schema.processlist WHERE state LIKE 'Waiting until MASTER_DELAY seconds after master executed event';
|
||||
--source include/wait_condition.inc
|
||||
|
||||
--echo # Validating SBM is updated on event arrival..
|
||||
--let $sbm_trx1_arrive= query_get_value(SHOW SLAVE STATUS, Seconds_Behind_Master, 1)
|
||||
--let $seconds_since_idling= `SELECT UNIX_TIMESTAMP() - $ts_trx_before_ins`
|
||||
if (`SELECT $sbm_trx1_arrive > ($seconds_since_idling + 1)`)
|
||||
{
|
||||
--echo # SBM was $sbm_trx1_arrive yet shouldn't have been larger than $seconds_since_idling + 1 (for possible negative clock_diff_with_master)
|
||||
--die Seconds_Behind_Master should reset after idling
|
||||
}
|
||||
--echo # ..done
|
||||
|
||||
--connection slave
|
||||
UNLOCK TABLES;
|
||||
--source include/sync_with_master_gtid.inc
|
||||
|
||||
--echo #
|
||||
--echo # Pt 2) If the SQL thread has not entered an idle state, ensure
|
||||
--echo # following events do not update SBM
|
||||
|
||||
--echo # Stop slave IO thread so it receives both events together on restart
|
||||
--connection slave
|
||||
--source include/stop_slave_io.inc
|
||||
|
||||
--connection master
|
||||
|
||||
--echo # Sleep 2 to allow a buffer between events for SBM check
|
||||
sleep 2;
|
||||
--let $ts_trxpt2_before_ins= `SELECT UNIX_TIMESTAMP()`
|
||||
--eval insert into t1 values ($insert_ctr)
|
||||
--inc $insert_ctr
|
||||
--echo # Sleep 3 to create gap between events
|
||||
sleep 3;
|
||||
--eval insert into t1 values ($insert_ctr)
|
||||
--inc $insert_ctr
|
||||
--let $ts_trx_after_ins= `SELECT UNIX_TIMESTAMP()`
|
||||
|
||||
--connection slave
|
||||
LOCK TABLES t1 WRITE;
|
||||
|
||||
START SLAVE IO_THREAD;
|
||||
|
||||
--echo # Wait for first transaction to complete SQL delay and begin execution..
|
||||
--let $wait_condition= SELECT count(*) FROM information_schema.processlist WHERE state LIKE 'Waiting for table metadata lock%' AND command LIKE 'Slave_Worker';
|
||||
--source include/wait_condition.inc
|
||||
|
||||
--echo # Validate SBM calculation doesn't use the second transaction because SQL thread shouldn't have gone idle..
|
||||
--let $sbm_after_trx_no_idle= query_get_value(SHOW SLAVE STATUS, Seconds_Behind_Master, 1)
|
||||
--let $timestamp_trxpt2_arrive= `SELECT UNIX_TIMESTAMP()`
|
||||
if (`SELECT $sbm_after_trx_no_idle < $timestamp_trxpt2_arrive - $ts_trx_after_ins`)
|
||||
{
|
||||
--let $cmpv= `SELECT $timestamp_trxpt2_arrive - $ts_trx_after_ins`
|
||||
--echo # SBM $sbm_after_trx_no_idle was more recent than time since last transaction ($cmpv seconds)
|
||||
--die Seconds_Behind_Master should not have used second transaction timestamp
|
||||
}
|
||||
--let $seconds_since_idling= `SELECT ($timestamp_trxpt2_arrive - $ts_trxpt2_before_ins)`
|
||||
--echo # ..and that SBM wasn't calculated using prior committed transactions
|
||||
if (`SELECT $sbm_after_trx_no_idle > ($seconds_since_idling + 1)`)
|
||||
{
|
||||
--echo # SBM was $sbm_after_trx_no_idle yet shouldn't have been larger than $seconds_since_idling + 1 (for possible negative clock_diff_with_master)
|
||||
--die Seconds_Behind_Master calculation should not have used prior committed transaction
|
||||
}
|
||||
--echo # ..done
|
||||
|
||||
--connection slave
|
||||
UNLOCK TABLES;
|
||||
|
||||
--echo #
|
||||
--echo # Cleanup
|
||||
|
||||
--echo # Reset master_delay
|
||||
--source include/stop_slave.inc
|
||||
--eval CHANGE MASTER TO master_delay=0
|
||||
--eval set @@GLOBAL.slave_parallel_threads=$old_slave_threads
|
||||
--source include/start_slave.inc
|
||||
|
||||
--connection master
|
||||
DROP TABLE t1;
|
||||
--source include/save_master_gtid.inc
|
||||
|
||||
--connection slave
|
||||
--source include/sync_with_master_gtid.inc
|
||||
|
||||
--source include/rpl_end.inc
|
||||
--echo # End of rpl_delayed_parallel_slave_sbm.test
|
84
mysql-test/suite/rpl/t/rpl_parallel_analyze.test
Normal file
84
mysql-test/suite/rpl/t/rpl_parallel_analyze.test
Normal file
|
@ -0,0 +1,84 @@
|
|||
# The test file is created to prove fixes to
|
||||
# MDEV-30323 Some DDLs like ANALYZE can complete on parallel slave out of order
|
||||
# Debug-sync tests aiming at parallel replication of ADMIN commands
|
||||
# are welcome here.
|
||||
|
||||
--source include/have_innodb.inc
|
||||
--source include/have_debug_sync.inc
|
||||
--source include/master-slave.inc
|
||||
|
||||
--echo # Initialize
|
||||
--connection slave
|
||||
ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB;
|
||||
|
||||
--echo # Setup data
|
||||
--connection master
|
||||
CREATE TABLE t1 (a int PRIMARY KEY) ENGINE=InnoDB;
|
||||
CREATE TABLE ta (a int);
|
||||
--let $pre_load_gtid=`SELECT @@last_gtid`
|
||||
--source include/save_master_gtid.inc
|
||||
|
||||
--connection slave
|
||||
--source include/sync_with_master_gtid.inc
|
||||
|
||||
--source suite/rpl/include/create_or_drop_sync_func.inc
|
||||
|
||||
# configure MDEV-30323 slave
|
||||
--source include/stop_slave.inc
|
||||
SET @old_parallel_threads =@@GLOBAL.slave_parallel_threads;
|
||||
SET @old_parallel_mode =@@GLOBAL.slave_parallel_mode;
|
||||
SET @old_gtid_strict_mode =@@GLOBAL.gtid_strict_mode;
|
||||
SET GLOBAL slave_parallel_threads=10;
|
||||
SET GLOBAL slave_parallel_mode=conservative;
|
||||
SET GLOBAL gtid_strict_mode=ON;
|
||||
--source include/start_slave.inc
|
||||
|
||||
# MDEV-30323 setup needs two group of events the first of which is a DML
|
||||
# and ANALYZE is the 2nd.
|
||||
# The latter is made to race in slave execution over the DML thanks
|
||||
# to a DML latency simulation.
|
||||
# In the fixed case the race-over should not be a problem: ultimately
|
||||
# ANALYZE must wait for its turn to update slave@@global.gtid_binlog_pos.
|
||||
# Otherwise the reported OOO error must be issued.
|
||||
|
||||
--connection master
|
||||
SET @old_format= @@SESSION.binlog_format;
|
||||
SET binlog_format=statement;
|
||||
INSERT INTO t1 VALUES (foo(1, 'rpl_parallel_after_mark_start_commit WAIT_FOR sig_go', ''));
|
||||
|
||||
ANALYZE TABLE ta;
|
||||
--source include/save_master_gtid.inc
|
||||
|
||||
--connection slave
|
||||
--let $wait_condition= SELECT COUNT(*) = 1 FROM information_schema.processlist WHERE state = "Waiting for prior transaction to commit"
|
||||
--source include/wait_condition.inc
|
||||
|
||||
SELECT info FROM information_schema.processlist WHERE state = "Waiting for prior transaction to commit";
|
||||
if (`select strcmp(@@global.gtid_binlog_pos, '$pre_load_gtid') <> 0 or strcmp(@@global.gtid_slave_pos, '$pre_load_gtid') <> 0`)
|
||||
{
|
||||
--let $bs=`SELECT @@global.gtid_binlog_pos`
|
||||
--let $es=`SELECT @@global.gtid_slave_pos`
|
||||
--echo Mismatch between expected $pre_load_gtid state and the actual binlog state " @@global.gtid_binlog_pos = $bs or/and slave execution state @@global.gtid_slave_pos = $es.
|
||||
--die
|
||||
}
|
||||
|
||||
set @@debug_sync="now signal sig_go";
|
||||
--source include/sync_with_master_gtid.inc
|
||||
|
||||
--echo # Cleanup
|
||||
--connection master
|
||||
DROP TABLE t1,ta;
|
||||
--let $create_or_drop=drop
|
||||
--source suite/rpl/include/create_or_drop_sync_func.inc
|
||||
|
||||
--source include/save_master_gtid.inc
|
||||
|
||||
--connection slave
|
||||
--source include/sync_with_master_gtid.inc
|
||||
--source include/stop_slave.inc
|
||||
SET @@GLOBAL.slave_parallel_threads=@old_parallel_threads;
|
||||
SET @@GLOBAL.slave_parallel_mode =@old_parallel_mode;
|
||||
SET @@GLOBAL.gtid_strict_mode =@old_gtid_strict_mode;
|
||||
--source include/start_slave.inc
|
||||
|
||||
--source include/rpl_end.inc
|
|
@ -10691,7 +10691,7 @@ table_map Item_direct_view_ref::used_tables() const
|
|||
table_map used= (*ref)->used_tables();
|
||||
return (used ?
|
||||
used :
|
||||
((null_ref_table != NO_NULL_TABLE) ?
|
||||
(null_ref_table != NO_NULL_TABLE && !null_ref_table->const_table ?
|
||||
null_ref_table->map :
|
||||
(table_map)0 ));
|
||||
}
|
||||
|
|
|
@ -366,7 +366,14 @@ public:
|
|||
int8 aggr_level; /* nesting level of the aggregating subquery */
|
||||
int8 max_arg_level; /* max level of unbound column references */
|
||||
int8 max_sum_func_level;/* max level of aggregation for embedded functions */
|
||||
bool quick_group; /* If incremental update of fields */
|
||||
|
||||
/*
|
||||
true (the default value) means this aggregate function can be computed
|
||||
with TemporaryTableWithPartialSums algorithm (see end_update()).
|
||||
false means this aggregate function needs OrderedGroupBy algorithm (see
|
||||
end_write_group()).
|
||||
*/
|
||||
bool quick_group;
|
||||
/*
|
||||
This list is used by the check for mixing non aggregated fields and
|
||||
sum functions in the ONLY_FULL_GROUP_BY_MODE. We save all outer fields
|
||||
|
|
|
@ -263,7 +263,7 @@ static char *get_plugindir()
|
|||
{
|
||||
static char plugin_dir[2*MAX_PATH];
|
||||
get_basedir(plugin_dir, sizeof(plugin_dir), mysqld_path);
|
||||
strcat(plugin_dir, "/" STR(INSTALL_PLUGINDIR));
|
||||
safe_strcat(plugin_dir, sizeof(plugin_dir), "/" STR(INSTALL_PLUGINDIR));
|
||||
|
||||
if (access(plugin_dir, 0) == 0)
|
||||
return plugin_dir;
|
||||
|
|
|
@ -5152,12 +5152,11 @@ static int init_server_components()
|
|||
else // full wsrep initialization
|
||||
{
|
||||
// add basedir/bin to PATH to resolve wsrep script names
|
||||
char* const tmp_path= (char*)my_alloca(strlen(mysql_home) +
|
||||
strlen("/bin") + 1);
|
||||
size_t tmp_path_size= strlen(mysql_home) + 5; /* including "/bin" */
|
||||
char* const tmp_path= (char*)my_alloca(tmp_path_size);
|
||||
if (tmp_path)
|
||||
{
|
||||
strcpy(tmp_path, mysql_home);
|
||||
strcat(tmp_path, "/bin");
|
||||
snprintf(tmp_path, tmp_path_size, "%s/bin", mysql_home);
|
||||
wsrep_prepend_PATH(tmp_path);
|
||||
}
|
||||
else
|
||||
|
@ -5907,8 +5906,9 @@ int mysqld_main(int argc, char **argv)
|
|||
char real_server_version[2 * SERVER_VERSION_LENGTH + 10];
|
||||
|
||||
set_server_version(real_server_version, sizeof(real_server_version));
|
||||
strcat(real_server_version, "' as '");
|
||||
strcat(real_server_version, server_version);
|
||||
safe_strcat(real_server_version, sizeof(real_server_version), "' as '");
|
||||
safe_strcat(real_server_version, sizeof(real_server_version),
|
||||
server_version);
|
||||
|
||||
sql_print_information(ER_DEFAULT(ER_STARTUP), my_progname,
|
||||
real_server_version,
|
||||
|
@ -8256,7 +8256,8 @@ static int mysql_init_variables(void)
|
|||
}
|
||||
else
|
||||
my_path(prg_dev, my_progname, "mysql/bin");
|
||||
strcat(prg_dev,"/../"); // Remove 'bin' to get base dir
|
||||
// Remove 'bin' to get base dir
|
||||
safe_strcat(prg_dev, sizeof(prg_dev), "/../");
|
||||
cleanup_dirname(mysql_home,prg_dev);
|
||||
}
|
||||
#else
|
||||
|
|
|
@ -56,8 +56,7 @@ rpt_handle_event(rpl_parallel_thread::queued_event *qev,
|
|||
rgi->event_relay_log_pos= qev->event_relay_log_pos;
|
||||
rgi->future_event_relay_log_pos= qev->future_event_relay_log_pos;
|
||||
strcpy(rgi->future_event_master_log_name, qev->future_event_master_log_name);
|
||||
if (!(ev->is_artificial_event() || ev->is_relay_log_event() ||
|
||||
(ev->when == 0)))
|
||||
if (event_can_update_last_master_timestamp(ev))
|
||||
rgi->last_master_timestamp= ev->when + (time_t)ev->exec_time;
|
||||
err= apply_event_and_update_pos_for_parallel(ev, thd, rgi);
|
||||
|
||||
|
|
31
sql/slave.cc
31
sql/slave.cc
|
@ -4148,10 +4148,10 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli,
|
|||
the user might be surprised to see a claim that the slave is up to date
|
||||
long before those queued events are actually executed.
|
||||
*/
|
||||
if (!rli->mi->using_parallel() &&
|
||||
!(ev->is_artificial_event() || ev->is_relay_log_event() || (ev->when == 0)))
|
||||
if ((!rli->mi->using_parallel()) && event_can_update_last_master_timestamp(ev))
|
||||
{
|
||||
rli->last_master_timestamp= ev->when + (time_t) ev->exec_time;
|
||||
rli->sql_thread_caught_up= false;
|
||||
DBUG_ASSERT(rli->last_master_timestamp >= 0);
|
||||
}
|
||||
|
||||
|
@ -4203,6 +4203,17 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli,
|
|||
|
||||
if (rli->mi->using_parallel())
|
||||
{
|
||||
if (unlikely((rli->last_master_timestamp == 0 ||
|
||||
rli->sql_thread_caught_up) &&
|
||||
event_can_update_last_master_timestamp(ev)))
|
||||
{
|
||||
if (rli->last_master_timestamp < ev->when)
|
||||
{
|
||||
rli->last_master_timestamp= ev->when;
|
||||
rli->sql_thread_caught_up= false;
|
||||
}
|
||||
}
|
||||
|
||||
int res= rli->parallel.do_event(serial_rgi, ev, event_size);
|
||||
/*
|
||||
In parallel replication, we need to update the relay log position
|
||||
|
@ -4223,7 +4234,7 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli,
|
|||
This is the case for pre-10.0 events without GTID, and for handling
|
||||
slave_skip_counter.
|
||||
*/
|
||||
if (!(ev->is_artificial_event() || ev->is_relay_log_event() || (ev->when == 0)))
|
||||
if (event_can_update_last_master_timestamp(ev))
|
||||
{
|
||||
/*
|
||||
Ignore FD's timestamp as it does not reflect the slave execution
|
||||
|
@ -4231,7 +4242,8 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli,
|
|||
data modification event execution last long all this time
|
||||
Seconds_Behind_Master is zero.
|
||||
*/
|
||||
if (ev->get_type_code() != FORMAT_DESCRIPTION_EVENT)
|
||||
if (ev->get_type_code() != FORMAT_DESCRIPTION_EVENT &&
|
||||
rli->last_master_timestamp < ev->when)
|
||||
rli->last_master_timestamp= ev->when + (time_t) ev->exec_time;
|
||||
|
||||
DBUG_ASSERT(rli->last_master_timestamp >= 0);
|
||||
|
@ -7590,7 +7602,6 @@ static Log_event* next_event(rpl_group_info *rgi, ulonglong *event_size)
|
|||
|
||||
if (hot_log)
|
||||
mysql_mutex_unlock(log_lock);
|
||||
rli->sql_thread_caught_up= false;
|
||||
DBUG_RETURN(ev);
|
||||
}
|
||||
if (opt_reckless_slave) // For mysql-test
|
||||
|
@ -7754,7 +7765,6 @@ static Log_event* next_event(rpl_group_info *rgi, ulonglong *event_size)
|
|||
rli->relay_log.wait_for_update_relay_log(rli->sql_driver_thd);
|
||||
// re-acquire data lock since we released it earlier
|
||||
mysql_mutex_lock(&rli->data_lock);
|
||||
rli->sql_thread_caught_up= false;
|
||||
continue;
|
||||
}
|
||||
/*
|
||||
|
@ -7945,12 +7955,19 @@ event(errno: %d cur_log->error: %d)",
|
|||
{
|
||||
sql_print_information("Error reading relay log event: %s",
|
||||
"slave SQL thread was killed");
|
||||
DBUG_RETURN(0);
|
||||
goto end;
|
||||
}
|
||||
|
||||
err:
|
||||
if (errmsg)
|
||||
sql_print_error("Error reading relay log event: %s", errmsg);
|
||||
|
||||
end:
|
||||
/*
|
||||
Set that we are not caught up so if there is a hang/problem on restart,
|
||||
Seconds_Behind_Master will still grow.
|
||||
*/
|
||||
rli->sql_thread_caught_up= false;
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
#ifdef WITH_WSREP
|
||||
|
|
12
sql/slave.h
12
sql/slave.h
|
@ -49,6 +49,7 @@
|
|||
#include "rpl_filter.h"
|
||||
#include "rpl_tblmap.h"
|
||||
#include "rpl_gtid.h"
|
||||
#include "log_event.h"
|
||||
|
||||
#define SLAVE_NET_TIMEOUT 60
|
||||
|
||||
|
@ -293,6 +294,17 @@ extern char *report_host, *report_password;
|
|||
|
||||
extern I_List<THD> threads;
|
||||
|
||||
/*
|
||||
Check that a binlog event (read from the relay log) is valid to update
|
||||
last_master_timestamp. That is, a valid event is one with a consistent
|
||||
timestamp which originated from a primary server.
|
||||
*/
|
||||
static inline bool event_can_update_last_master_timestamp(Log_event *ev)
|
||||
{
|
||||
return ev && !(ev->is_artificial_event() || ev->is_relay_log_event() ||
|
||||
(ev->when == 0));
|
||||
}
|
||||
|
||||
#else
|
||||
#define close_active_mi() /* no-op */
|
||||
#endif /* HAVE_REPLICATION */
|
||||
|
|
|
@ -1271,6 +1271,8 @@ send_result_message:
|
|||
goto err;
|
||||
DEBUG_SYNC(thd, "admin_command_kill_after_modify");
|
||||
}
|
||||
thd->resume_subsequent_commits(suspended_wfc);
|
||||
DBUG_EXECUTE_IF("inject_analyze_table_sleep", my_sleep(500000););
|
||||
if (is_table_modified && is_cmd_replicated &&
|
||||
(!opt_readonly || thd->slave_thread) && !thd->lex->no_write_to_binlog)
|
||||
{
|
||||
|
@ -1280,10 +1282,8 @@ send_result_message:
|
|||
if (res)
|
||||
goto err;
|
||||
}
|
||||
|
||||
my_eof(thd);
|
||||
thd->resume_subsequent_commits(suspended_wfc);
|
||||
DBUG_EXECUTE_IF("inject_analyze_table_sleep", my_sleep(500000););
|
||||
|
||||
DBUG_RETURN(FALSE);
|
||||
|
||||
err:
|
||||
|
|
|
@ -5776,6 +5776,12 @@ public:
|
|||
uint sum_func_count;
|
||||
uint hidden_field_count;
|
||||
uint group_parts,group_length,group_null_parts;
|
||||
|
||||
/*
|
||||
If we're doing a GROUP BY operation, shows which one is used:
|
||||
true TemporaryTableWithPartialSums algorithm (see end_update()).
|
||||
false OrderedGroupBy algorithm (see end_write_group()).
|
||||
*/
|
||||
uint quick_group;
|
||||
/**
|
||||
Enabled when we have atleast one outer_sum_func. Needed when used
|
||||
|
|
|
@ -99,49 +99,6 @@ bool LEX::check_dependencies_in_with_clauses()
|
|||
}
|
||||
|
||||
|
||||
/**
|
||||
@brief
|
||||
Resolve references to CTE in specification of hanging CTE
|
||||
|
||||
@details
|
||||
A CTE to which there are no references in the query is called hanging CTE.
|
||||
Although such CTE is not used for execution its specification must be
|
||||
subject to context analysis. All errors concerning references to
|
||||
non-existing tables or fields occurred in the specification must be
|
||||
reported as well as all other errors caught at the prepare stage.
|
||||
The specification of a hanging CTE might contain references to other
|
||||
CTE outside of the specification and within it if the specification
|
||||
contains a with clause. This function resolves all such references for
|
||||
all hanging CTEs encountered in the processed query.
|
||||
|
||||
@retval
|
||||
false on success
|
||||
true on failure
|
||||
*/
|
||||
|
||||
bool
|
||||
LEX::resolve_references_to_cte_in_hanging_cte()
|
||||
{
|
||||
for (With_clause *with_clause= with_clauses_list;
|
||||
with_clause; with_clause= with_clause->next_with_clause)
|
||||
{
|
||||
for (With_element *with_elem= with_clause->with_list.first;
|
||||
with_elem; with_elem= with_elem->next)
|
||||
{
|
||||
if (!with_elem->is_referenced())
|
||||
{
|
||||
TABLE_LIST *first_tbl=
|
||||
with_elem->spec->first_select()->table_list.first;
|
||||
TABLE_LIST **with_elem_end_pos= with_elem->head->tables_pos.end_pos;
|
||||
if (first_tbl && resolve_references_to_cte(first_tbl, with_elem_end_pos))
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
@brief
|
||||
Resolve table references to CTE from a sub-chain of table references
|
||||
|
@ -288,8 +245,6 @@ LEX::check_cte_dependencies_and_resolve_references()
|
|||
return false;
|
||||
if (resolve_references_to_cte(query_tables, query_tables_last))
|
||||
return true;
|
||||
if (resolve_references_to_cte_in_hanging_cte())
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -488,47 +443,33 @@ With_element *find_table_def_in_with_clauses(TABLE_LIST *tbl,
|
|||
st_unit_ctxt_elem *ctxt)
|
||||
{
|
||||
With_element *found= 0;
|
||||
st_select_lex_unit *top_unit= 0;
|
||||
for (st_unit_ctxt_elem *unit_ctxt_elem= ctxt;
|
||||
unit_ctxt_elem;
|
||||
unit_ctxt_elem= unit_ctxt_elem->prev)
|
||||
{
|
||||
st_select_lex_unit *unit= unit_ctxt_elem->unit;
|
||||
With_clause *with_clause= unit->with_clause;
|
||||
/*
|
||||
First look for the table definition in the with clause attached to 'unit'
|
||||
if there is any such clause.
|
||||
*/
|
||||
if (with_clause)
|
||||
{
|
||||
found= with_clause->find_table_def(tbl, NULL);
|
||||
/*
|
||||
If the reference to tbl that has to be resolved belongs to
|
||||
the FROM clause of a descendant of top_unit->with_element
|
||||
and this with element belongs to with_clause then this
|
||||
element must be used as the barrier for the search in the
|
||||
the list of CTEs from with_clause unless the clause contains
|
||||
RECURSIVE.
|
||||
*/
|
||||
With_element *barrier= 0;
|
||||
if (top_unit && !with_clause->with_recursive &&
|
||||
top_unit->with_element &&
|
||||
top_unit->with_element->get_owner() == with_clause)
|
||||
barrier= top_unit->with_element;
|
||||
found= with_clause->find_table_def(tbl, barrier);
|
||||
if (found)
|
||||
break;
|
||||
}
|
||||
/*
|
||||
If 'unit' is the unit that defines a with element then reset 'unit'
|
||||
to the unit whose attached with clause contains this with element.
|
||||
*/
|
||||
With_element *with_elem= unit->with_element;
|
||||
if (with_elem)
|
||||
{
|
||||
if (!(unit_ctxt_elem= unit_ctxt_elem->prev))
|
||||
break;
|
||||
unit= unit_ctxt_elem->unit;
|
||||
}
|
||||
with_clause= unit->with_clause;
|
||||
/*
|
||||
Now look for the table definition in this with clause. If the with clause
|
||||
contains RECURSIVE the search is performed through all CTE definitions in
|
||||
clause, otherwise up to the definition of 'with_elem' unless it is NULL.
|
||||
*/
|
||||
if (with_clause)
|
||||
{
|
||||
found= with_clause->find_table_def(tbl,
|
||||
with_clause->with_recursive ?
|
||||
NULL : with_elem);
|
||||
if (found)
|
||||
break;
|
||||
}
|
||||
top_unit= unit;
|
||||
}
|
||||
return found;
|
||||
}
|
||||
|
|
|
@ -323,8 +323,6 @@ public:
|
|||
friend
|
||||
bool LEX::resolve_references_to_cte(TABLE_LIST *tables,
|
||||
TABLE_LIST **tables_last);
|
||||
friend
|
||||
bool LEX::resolve_references_to_cte_in_hanging_cte();
|
||||
};
|
||||
|
||||
const uint max_number_of_elements_in_with_clause= sizeof(table_map)*8;
|
||||
|
@ -438,9 +436,6 @@ public:
|
|||
|
||||
friend
|
||||
bool LEX::check_dependencies_in_with_clauses();
|
||||
|
||||
friend
|
||||
bool LEX::resolve_references_to_cte_in_hanging_cte();
|
||||
};
|
||||
|
||||
inline
|
||||
|
|
|
@ -774,8 +774,6 @@ void LEX::start(THD *thd_arg)
|
|||
stmt_var_list.empty();
|
||||
proc_list.elements=0;
|
||||
|
||||
save_group_list.empty();
|
||||
save_order_list.empty();
|
||||
win_ref= NULL;
|
||||
win_frame= NULL;
|
||||
frame_top_bound= NULL;
|
||||
|
|
|
@ -1085,6 +1085,7 @@ public:
|
|||
group_list_ptrs, and re-establish the original list before each execution.
|
||||
*/
|
||||
SQL_I_List<ORDER> group_list;
|
||||
SQL_I_List<ORDER> save_group_list;
|
||||
Group_list_ptrs *group_list_ptrs;
|
||||
|
||||
List<Item> item_list; /* list of fields & expressions */
|
||||
|
@ -1150,6 +1151,7 @@ public:
|
|||
const char *type; /* type of select for EXPLAIN */
|
||||
|
||||
SQL_I_List<ORDER> order_list; /* ORDER clause */
|
||||
SQL_I_List<ORDER> save_order_list;
|
||||
SQL_I_List<ORDER> gorder_list;
|
||||
Item *select_limit, *offset_limit; /* LIMIT clause parameters */
|
||||
bool is_set_query_expr_tail;
|
||||
|
@ -3486,8 +3488,6 @@ public:
|
|||
}
|
||||
|
||||
|
||||
SQL_I_List<ORDER> save_group_list;
|
||||
SQL_I_List<ORDER> save_order_list;
|
||||
LEX_CSTRING *win_ref;
|
||||
Window_frame *win_frame;
|
||||
Window_frame_bound *frame_top_bound;
|
||||
|
@ -4636,12 +4636,11 @@ public:
|
|||
select_stack[0]->is_service_select);
|
||||
}
|
||||
|
||||
|
||||
bool check_dependencies_in_with_clauses();
|
||||
bool resolve_references_to_cte_in_hanging_cte();
|
||||
bool check_cte_dependencies_and_resolve_references();
|
||||
bool resolve_references_to_cte(TABLE_LIST *tables,
|
||||
TABLE_LIST **tables_last);
|
||||
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -54,7 +54,7 @@ public:
|
|||
{
|
||||
elements= tmp.elements;
|
||||
first= tmp.first;
|
||||
next= tmp.next;
|
||||
next= elements ? tmp.next : &first;;
|
||||
return *this;
|
||||
}
|
||||
|
||||
|
|
|
@ -8792,8 +8792,8 @@ TABLE_LIST *st_select_lex::convert_right_join()
|
|||
void st_select_lex::prepare_add_window_spec(THD *thd)
|
||||
{
|
||||
LEX *lex= thd->lex;
|
||||
lex->save_group_list= group_list;
|
||||
lex->save_order_list= order_list;
|
||||
save_group_list= group_list;
|
||||
save_order_list= order_list;
|
||||
lex->win_ref= NULL;
|
||||
lex->win_frame= NULL;
|
||||
lex->frame_top_bound= NULL;
|
||||
|
@ -8820,8 +8820,8 @@ bool st_select_lex::add_window_def(THD *thd,
|
|||
win_part_list_ptr,
|
||||
win_order_list_ptr,
|
||||
win_frame);
|
||||
group_list= thd->lex->save_group_list;
|
||||
order_list= thd->lex->save_order_list;
|
||||
group_list= save_group_list;
|
||||
order_list= save_order_list;
|
||||
if (parsing_place != SELECT_LIST)
|
||||
{
|
||||
fields_in_window_functions+= win_part_list_ptr->elements +
|
||||
|
@ -8847,8 +8847,8 @@ bool st_select_lex::add_window_spec(THD *thd,
|
|||
win_part_list_ptr,
|
||||
win_order_list_ptr,
|
||||
win_frame);
|
||||
group_list= thd->lex->save_group_list;
|
||||
order_list= thd->lex->save_order_list;
|
||||
group_list= save_group_list;
|
||||
order_list= save_order_list;
|
||||
if (parsing_place != SELECT_LIST)
|
||||
{
|
||||
fields_in_window_functions+= win_part_list_ptr->elements +
|
||||
|
|
|
@ -3541,15 +3541,26 @@ bool JOIN::make_aggr_tables_info()
|
|||
|
||||
/*
|
||||
If we have different sort & group then we must sort the data by group
|
||||
and copy it to another tmp table
|
||||
and copy it to another tmp table.
|
||||
|
||||
This code is also used if we are using distinct something
|
||||
we haven't been able to store in the temporary table yet
|
||||
like SEC_TO_TIME(SUM(...)).
|
||||
|
||||
3. Also, this is used when
|
||||
- the query has Window functions,
|
||||
- the GROUP BY operation is done with OrderedGroupBy algorithm.
|
||||
In this case, the first temptable will contain pre-GROUP-BY data. Force
|
||||
the creation of the second temporary table. Post-GROUP-BY dataset will be
|
||||
written there, and then Window Function processing code will be able to
|
||||
process it.
|
||||
*/
|
||||
if ((group_list &&
|
||||
(!test_if_subpart(group_list, order) || select_distinct)) ||
|
||||
(select_distinct && tmp_table_param.using_outer_summary_function))
|
||||
{ /* Must copy to another table */
|
||||
(select_distinct && tmp_table_param.using_outer_summary_function) ||
|
||||
(group_list && !tmp_table_param.quick_group && // (3)
|
||||
select_lex->have_window_funcs())) // (3)
|
||||
{ /* Must copy to another table */
|
||||
DBUG_PRINT("info",("Creating group table"));
|
||||
|
||||
calc_group_buffer(this, group_list);
|
||||
|
@ -22081,11 +22092,17 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
|
|||
|
||||
/*
|
||||
@brief
|
||||
Perform a GROUP BY operation over a stream of rows ordered by their group. The
|
||||
result is sent into join->result.
|
||||
Perform OrderedGroupBy operation and write the output into join->result.
|
||||
|
||||
@detail
|
||||
Also applies HAVING, etc.
|
||||
The input stream is ordered by the GROUP BY expression, so groups come
|
||||
one after another. We only need to accumulate the aggregate value, when
|
||||
a GROUP BY group ends, check the HAVING and send the group.
|
||||
|
||||
Note that the output comes in the GROUP BY order, which is required by
|
||||
the MySQL's GROUP BY semantics. No further sorting is needed.
|
||||
|
||||
@seealso end_write_group() also implements SortAndGroup
|
||||
*/
|
||||
|
||||
enum_nested_loop_state
|
||||
|
@ -22273,13 +22290,26 @@ end:
|
|||
|
||||
/*
|
||||
@brief
|
||||
Perform a GROUP BY operation over rows coming in arbitrary order.
|
||||
|
||||
This is done by looking up the group in a temp.table and updating group
|
||||
values.
|
||||
Perform GROUP BY operation over rows coming in arbitrary order: use
|
||||
TemporaryTableWithPartialSums algorithm.
|
||||
|
||||
@detail
|
||||
The TemporaryTableWithPartialSums algorithm is:
|
||||
|
||||
CREATE TEMPORARY TABLE tmp (
|
||||
group_by_columns PRIMARY KEY,
|
||||
partial_sum
|
||||
);
|
||||
|
||||
for each row R in join output {
|
||||
INSERT INTO tmp (R.group_by_columns, R.sum_value)
|
||||
ON DUPLICATE KEY UPDATE partial_sum=partial_sum + R.sum_value;
|
||||
}
|
||||
|
||||
@detail
|
||||
Also applies HAVING, etc.
|
||||
|
||||
@seealso end_unique_update()
|
||||
*/
|
||||
|
||||
static enum_nested_loop_state
|
||||
|
@ -22427,13 +22457,15 @@ end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
|
|||
|
||||
/*
|
||||
@brief
|
||||
Perform a GROUP BY operation over a stream of rows ordered by their group.
|
||||
Write the result into a temporary table.
|
||||
Perform OrderedGroupBy operation and write the output into the temporary
|
||||
table (join_tab->table).
|
||||
|
||||
@detail
|
||||
Also applies HAVING, etc.
|
||||
The input stream is ordered by the GROUP BY expression, so groups come
|
||||
one after another. We only need to accumulate the aggregate value, when
|
||||
a GROUP BY group ends, check the HAVING and write the group.
|
||||
|
||||
The rows are written into temptable so e.g. filesort can read them.
|
||||
@seealso end_send_group() also implements OrderedGroupBy
|
||||
*/
|
||||
|
||||
enum_nested_loop_state
|
||||
|
|
|
@ -1420,12 +1420,30 @@ public:
|
|||
(set in make_join_statistics())
|
||||
*/
|
||||
bool impossible_where;
|
||||
List<Item> all_fields; ///< to store all fields that used in query
|
||||
|
||||
/*
|
||||
All fields used in the query processing.
|
||||
|
||||
Initially this is a list of fields from the query's SQL text.
|
||||
|
||||
Then, ORDER/GROUP BY and Window Function code add columns that need to
|
||||
be saved to be available in the post-group-by context. These extra columns
|
||||
are added to the front, because this->all_fields points to the suffix of
|
||||
this list.
|
||||
*/
|
||||
List<Item> all_fields;
|
||||
///Above list changed to use temporary table
|
||||
List<Item> tmp_all_fields1, tmp_all_fields2, tmp_all_fields3;
|
||||
///Part, shared with list above, emulate following list
|
||||
List<Item> tmp_fields_list1, tmp_fields_list2, tmp_fields_list3;
|
||||
List<Item> &fields_list; ///< hold field list passed to mysql_select
|
||||
|
||||
/*
|
||||
The original field list as it was passed to mysql_select(). This refers
|
||||
to select_lex->item_list.
|
||||
CAUTION: this list is a suffix of this->all_fields list, that is, it shares
|
||||
elements with that list!
|
||||
*/
|
||||
List<Item> &fields_list;
|
||||
List<Item> procedure_fields_list;
|
||||
int error;
|
||||
|
||||
|
|
|
@ -975,13 +975,13 @@ PSZ ARRAY::MakeArrayList(PGLOBAL g)
|
|||
|
||||
xtrc(1, "Arraylist: len=%d\n", len);
|
||||
p = (char *)PlugSubAlloc(g, NULL, len);
|
||||
strcpy(p, "(");
|
||||
safe_strcpy(p, len, "(");
|
||||
|
||||
for (i = 0; i < Nval;) {
|
||||
Value->SetValue_pvblk(Vblp, i);
|
||||
Value->Prints(g, tp, z);
|
||||
strcat(p, tp);
|
||||
strcat(p, (++i == Nval) ? ")" : ",");
|
||||
safe_strcat(p, len, tp);
|
||||
safe_strcat(p, len, (++i == Nval) ? ")" : ",");
|
||||
} // enfor i
|
||||
|
||||
xtrc(1, "Arraylist: newlen=%d\n", strlen(p));
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
/* Include relevant sections of the MariaDB header file. */
|
||||
/***********************************************************************/
|
||||
#include <my_global.h>
|
||||
#include <m_string.h>
|
||||
|
||||
/***********************************************************************/
|
||||
/* Include application header files: */
|
||||
|
@ -83,7 +84,7 @@ BDOC::BDOC(PGLOBAL G) : BJSON(G, NULL)
|
|||
PBVAL BDOC::ParseJson(PGLOBAL g, char* js, size_t lng)
|
||||
{
|
||||
size_t i;
|
||||
bool b = false, ptyp = (bool *)pty;
|
||||
bool b = false;
|
||||
PBVAL bvp = NULL;
|
||||
|
||||
s = js;
|
||||
|
@ -598,7 +599,7 @@ PSZ BDOC::Serialize(PGLOBAL g, PBVAL bvp, char* fn, int pretty)
|
|||
|
||||
try {
|
||||
if (!bvp) {
|
||||
strcpy(g->Message, "Null json tree");
|
||||
safe_strcpy(g->Message, sizeof(g->Message), "Null json tree");
|
||||
throw 1;
|
||||
} else if (!fn) {
|
||||
// Serialize to a string
|
||||
|
@ -606,9 +607,8 @@ PSZ BDOC::Serialize(PGLOBAL g, PBVAL bvp, char* fn, int pretty)
|
|||
b = pretty == 1;
|
||||
} else {
|
||||
if (!(fs = fopen(fn, "wb"))) {
|
||||
snprintf(g->Message, sizeof(g->Message), MSG(OPEN_MODE_ERROR),
|
||||
"w", (int)errno, fn);
|
||||
strcat(strcat(g->Message, ": "), strerror(errno));
|
||||
snprintf(g->Message, sizeof(g->Message), MSG(OPEN_MODE_ERROR) ": %s",
|
||||
"w", (int)errno, fn, strerror(errno));
|
||||
throw 2;
|
||||
} else if (pretty >= 2) {
|
||||
// Serialize to a pretty file
|
||||
|
|
|
@ -4908,7 +4908,7 @@ char *bbin_make_array(UDF_INIT *initid, UDF_ARGS *args, char *result,
|
|||
} // endfor i
|
||||
|
||||
if ((bsp = BbinAlloc(bnx.G, initid->max_length, arp))) {
|
||||
strcat(bsp->Msg, " array");
|
||||
safe_strcat(bsp->Msg, sizeof(bsp->Msg), " array");
|
||||
|
||||
// Keep result of constant function
|
||||
g->Xchk = (initid->const_item) ? bsp : NULL;
|
||||
|
@ -5106,8 +5106,9 @@ char *bbin_array_grp(UDF_INIT *initid, UDF_ARGS *, char *result,
|
|||
PUSH_WARNING("Result truncated to json_grp_size values");
|
||||
|
||||
if (arp)
|
||||
if ((bsp = BbinAlloc(g, initid->max_length, arp)))
|
||||
strcat(bsp->Msg, " array");
|
||||
if ((bsp = BbinAlloc(g, initid->max_length, arp))) {
|
||||
safe_strcat(bsp->Msg, sizeof(bsp->Msg), " array");
|
||||
}
|
||||
|
||||
if (!bsp) {
|
||||
*res_length = 0;
|
||||
|
@ -5153,8 +5154,9 @@ char *bbin_object_grp(UDF_INIT *initid, UDF_ARGS *, char *result,
|
|||
PUSH_WARNING("Result truncated to json_grp_size values");
|
||||
|
||||
if (bop)
|
||||
if ((bsp = BbinAlloc(g, initid->max_length, bop)))
|
||||
strcat(bsp->Msg, " object");
|
||||
if ((bsp = BbinAlloc(g, initid->max_length, bop))) {
|
||||
safe_strcat(bsp->Msg, sizeof(bsp->Msg), " object");
|
||||
}
|
||||
|
||||
if (!bsp) {
|
||||
*res_length = 0;
|
||||
|
@ -5198,7 +5200,7 @@ char *bbin_make_object(UDF_INIT *initid, UDF_ARGS *args, char *result,
|
|||
bnx.SetKeyValue(objp, bnx.MakeValue(args, i), bnx.MakeKey(args, i));
|
||||
|
||||
if ((bsp = BbinAlloc(bnx.G, initid->max_length, objp))) {
|
||||
strcat(bsp->Msg, " object");
|
||||
safe_strcat(bsp->Msg, sizeof(bsp->Msg), " object");
|
||||
|
||||
// Keep result of constant function
|
||||
g->Xchk = (initid->const_item) ? bsp : NULL;
|
||||
|
@ -5253,7 +5255,7 @@ char *bbin_object_nonull(UDF_INIT *initid, UDF_ARGS *args, char *result,
|
|||
bnx.SetKeyValue(objp, jvp, bnx.MakeKey(args, i));
|
||||
|
||||
if ((bsp = BbinAlloc(bnx.G, initid->max_length, objp))) {
|
||||
strcat(bsp->Msg, " object");
|
||||
safe_strcat(bsp->Msg, sizeof(bsp->Msg), " object");
|
||||
|
||||
// Keep result of constant function
|
||||
g->Xchk = (initid->const_item) ? bsp : NULL;
|
||||
|
@ -5312,7 +5314,7 @@ char *bbin_object_key(UDF_INIT *initid, UDF_ARGS *args, char *result,
|
|||
bnx.SetKeyValue(objp, bnx.MakeValue(args, i + 1), MakePSZ(g, args, i));
|
||||
|
||||
if ((bsp = BbinAlloc(bnx.G, initid->max_length, objp))) {
|
||||
strcat(bsp->Msg, " object");
|
||||
safe_strcat(bsp->Msg, sizeof(bsp->Msg), " object");
|
||||
|
||||
// Keep result of constant function
|
||||
g->Xchk = (initid->const_item) ? bsp : NULL;
|
||||
|
@ -6075,7 +6077,7 @@ char *bbin_file(UDF_INIT *initid, UDF_ARGS *args, char *result,
|
|||
// pretty = pty;
|
||||
|
||||
if ((bsp = BbinAlloc(bnx.G, len, jsp))) {
|
||||
strcat(bsp->Msg, " file");
|
||||
safe_strcat(bsp->Msg, sizeof(bsp->Msg), " file");
|
||||
bsp->Filename = fn;
|
||||
bsp->Pretty = pretty;
|
||||
} else {
|
||||
|
|
|
@ -442,7 +442,7 @@ PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, PTOS topt, bool info)
|
|||
hp->Headlen, hp->Filedate[0], hp->Filedate[1],
|
||||
hp->Filedate[2]);
|
||||
|
||||
strcat(g->Message, buf);
|
||||
safe_strcat(g->Message, sizeof(g->Message), buf);
|
||||
} // endif info
|
||||
#endif // 0
|
||||
|
||||
|
|
|
@ -36,6 +36,8 @@
|
|||
#include <fcntl.h>
|
||||
#endif // !_WIN32
|
||||
|
||||
#include <m_string.h>
|
||||
|
||||
/***********************************************************************/
|
||||
/* Include application header files: */
|
||||
/* global.h is header containing all global declarations. */
|
||||
|
@ -883,7 +885,6 @@ bool BGXFAM::OpenTableFile(PGLOBAL g)
|
|||
FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM |
|
||||
FORMAT_MESSAGE_IGNORE_INSERTS, NULL, rc, 0,
|
||||
(LPTSTR)filename, sizeof(filename), NULL);
|
||||
strcat(g->Message, filename);
|
||||
} else
|
||||
rc = 0;
|
||||
|
||||
|
@ -1004,7 +1005,7 @@ int BGXFAM::Cardinality(PGLOBAL g)
|
|||
FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM |
|
||||
FORMAT_MESSAGE_IGNORE_INSERTS, NULL, rc, 0,
|
||||
(LPTSTR)filename, sizeof(filename), NULL);
|
||||
strcat(g->Message, filename);
|
||||
safe_strcat(g->Message, sizeof(g->Message), filename);
|
||||
return -1;
|
||||
} else
|
||||
return 0; // File does not exist
|
||||
|
@ -1384,7 +1385,8 @@ bool BGXFAM::OpenTempFile(PGLOBAL g)
|
|||
/*********************************************************************/
|
||||
tempname = (char*)PlugSubAlloc(g, NULL, _MAX_PATH);
|
||||
PlugSetPath(tempname, To_File, Tdbp->GetPath());
|
||||
strcat(PlugRemoveType(tempname, tempname), ".t");
|
||||
PlugRemoveType(tempname, tempname);
|
||||
safe_strcat(tempname, _MAX_PATH, ".t");
|
||||
remove(tempname); // Be sure it does not exist yet
|
||||
|
||||
#if defined(_WIN32)
|
||||
|
@ -1393,11 +1395,12 @@ bool BGXFAM::OpenTempFile(PGLOBAL g)
|
|||
|
||||
if (Tfile == INVALID_HANDLE_VALUE) {
|
||||
DWORD rc = GetLastError();
|
||||
snprintf(g->Message, sizeof(g->Message), MSG(OPEN_ERROR), rc, MODE_INSERT, tempname);
|
||||
snprintf(g->Message, sizeof(g->Message), MSG(OPEN_ERROR), rc, MODE_INSERT,
|
||||
tempname);
|
||||
FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM |
|
||||
FORMAT_MESSAGE_IGNORE_INSERTS, NULL, rc, 0,
|
||||
(LPTSTR)tempname, _MAX_PATH, NULL);
|
||||
strcat(g->Message, tempname);
|
||||
safe_strcat(g->Message, sizeof(g->Message), tempname);
|
||||
return true;
|
||||
} // endif Tfile
|
||||
#else // UNIX
|
||||
|
@ -1405,8 +1408,8 @@ bool BGXFAM::OpenTempFile(PGLOBAL g)
|
|||
|
||||
if (Tfile == INVALID_HANDLE_VALUE) {
|
||||
int rc = errno;
|
||||
snprintf(g->Message, sizeof(g->Message), MSG(OPEN_ERROR), rc, MODE_INSERT, tempname);
|
||||
strcat(g->Message, strerror(errno));
|
||||
snprintf(g->Message, sizeof(g->Message), MSG(OPEN_ERROR)" %s", rc,
|
||||
MODE_INSERT, tempname, strerror(errno));
|
||||
return true;
|
||||
} //endif Tfile
|
||||
#endif // UNIX
|
||||
|
|
|
@ -33,6 +33,8 @@
|
|||
#include <fcntl.h>
|
||||
#endif // !_WIN32
|
||||
|
||||
#include <m_string.h>
|
||||
|
||||
/***********************************************************************/
|
||||
/* Include application header files: */
|
||||
/* global.h is header containing all global declarations. */
|
||||
|
@ -128,12 +130,13 @@ int GZFAM::GetFileLength(PGLOBAL g)
|
|||
/***********************************************************************/
|
||||
bool GZFAM::OpenTableFile(PGLOBAL g)
|
||||
{
|
||||
char opmode[4], filename[_MAX_PATH];
|
||||
MODE mode = Tdbp->GetMode();
|
||||
const char *opmode;
|
||||
char filename[_MAX_PATH];
|
||||
MODE mode = Tdbp->GetMode();
|
||||
|
||||
switch (mode) {
|
||||
case MODE_READ:
|
||||
strcpy(opmode, "r");
|
||||
opmode = "rb";
|
||||
break;
|
||||
case MODE_UPDATE:
|
||||
/*****************************************************************/
|
||||
|
@ -147,7 +150,7 @@ bool GZFAM::OpenTableFile(PGLOBAL g)
|
|||
DelRows = Cardinality(g);
|
||||
|
||||
// This will erase the entire file
|
||||
strcpy(opmode, "w");
|
||||
opmode = "wb";
|
||||
// Block = 0; // For ZBKFAM
|
||||
// Last = Nrec; // For ZBKFAM
|
||||
Tdbp->ResetSize();
|
||||
|
@ -158,7 +161,7 @@ bool GZFAM::OpenTableFile(PGLOBAL g)
|
|||
|
||||
break;
|
||||
case MODE_INSERT:
|
||||
strcpy(opmode, "a+");
|
||||
opmode = "a+b";
|
||||
break;
|
||||
default:
|
||||
snprintf(g->Message, sizeof(g->Message), MSG(BAD_OPEN_MODE), mode);
|
||||
|
@ -170,13 +173,11 @@ bool GZFAM::OpenTableFile(PGLOBAL g)
|
|||
/* Use specific zlib functions. */
|
||||
/* Treat files as binary. */
|
||||
/*********************************************************************/
|
||||
strcat(opmode, "b");
|
||||
Zfile = gzopen(PlugSetPath(filename, To_File, Tdbp->GetPath()), opmode);
|
||||
|
||||
if (Zfile == NULL) {
|
||||
snprintf(g->Message, sizeof(g->Message), MSG(GZOPEN_ERROR),
|
||||
opmode, (int)errno, filename);
|
||||
strcat(strcat(g->Message, ": "), strerror(errno));
|
||||
snprintf(g->Message, sizeof(g->Message), MSG(GZOPEN_ERROR) ": %s",
|
||||
opmode, (int)errno, filename, strerror(errno));
|
||||
return (mode == MODE_READ && errno == ENOENT)
|
||||
? PushWarning(g, Tdbp) : true;
|
||||
} // endif Zfile
|
||||
|
|
|
@ -38,6 +38,8 @@
|
|||
#include <fcntl.h>
|
||||
#endif // !_WIN32
|
||||
|
||||
#include <m_string.h>
|
||||
|
||||
/***********************************************************************/
|
||||
/* Include application header files: */
|
||||
/* global.h is header containing all global declarations. */
|
||||
|
@ -593,7 +595,7 @@ bool DOSFAM::OpenTableFile(PGLOBAL g)
|
|||
} // endswitch Mode
|
||||
|
||||
// For blocked I/O or for moving lines, open the table in binary
|
||||
strcat(opmode, (Bin) ? "b" : "t");
|
||||
safe_strcat(opmode, sizeof(opmode), (Bin) ? "b" : "t");
|
||||
|
||||
// Now open the file stream
|
||||
PlugSetPath(filename, To_File, Tdbp->GetPath());
|
||||
|
@ -1081,7 +1083,8 @@ bool DOSFAM::OpenTempFile(PGLOBAL g)
|
|||
/* Open the temporary file, Spos is at the beginning of file. */
|
||||
/*********************************************************************/
|
||||
PlugSetPath(tempname, To_File, Tdbp->GetPath());
|
||||
strcat(PlugRemoveType(tempname, tempname), ".t");
|
||||
PlugRemoveType(tempname, tempname);
|
||||
safe_strcat(tempname, sizeof(tempname), ".t");
|
||||
|
||||
if (!(T_Stream = PlugOpenFile(g, tempname, "wb"))) {
|
||||
if (trace(1))
|
||||
|
@ -1170,7 +1173,8 @@ int DOSFAM::RenameTempFile(PGLOBAL g)
|
|||
|
||||
if (!Abort) {
|
||||
PlugSetPath(filename, To_File, Tdbp->GetPath());
|
||||
strcat(PlugRemoveType(filetemp, filename), ".ttt");
|
||||
PlugRemoveType(filetemp, filename);
|
||||
safe_strcat(filetemp, sizeof(filetemp), ".ttt");
|
||||
remove(filetemp); // May still be there from previous error
|
||||
|
||||
if (rename(filename, filetemp)) { // Save file for security
|
||||
|
|
|
@ -42,6 +42,8 @@
|
|||
#include <fcntl.h>
|
||||
#endif // !_WIN32
|
||||
|
||||
#include <m_string.h>
|
||||
|
||||
/***********************************************************************/
|
||||
/* Include application header files: */
|
||||
/* global.h is header containing all global declarations. */
|
||||
|
@ -194,7 +196,7 @@ int VCTFAM::GetBlockInfo(PGLOBAL g)
|
|||
if (Header == 2)
|
||||
{
|
||||
PlugRemoveType(filename, filename);
|
||||
strncat(filename, ".blk", _MAX_PATH - strlen(filename));
|
||||
safe_strcat(filename, sizeof(filename), ".blk");
|
||||
}
|
||||
|
||||
if ((h = global_open(g, MSGID_CANNOT_OPEN, filename, O_RDONLY)) == -1
|
||||
|
@ -251,7 +253,7 @@ bool VCTFAM::SetBlockInfo(PGLOBAL g)
|
|||
|
||||
} else { // Header == 2
|
||||
PlugRemoveType(filename, filename);
|
||||
strncat(filename, ".blk", _MAX_PATH - strlen(filename));
|
||||
safe_strcat(filename, sizeof(filename), ".blk");
|
||||
s= global_fopen(g, MSGID_CANNOT_OPEN, filename, "wb");
|
||||
} // endif Header
|
||||
|
||||
|
@ -587,7 +589,7 @@ bool VCTFAM::InitInsert(PGLOBAL g)
|
|||
htrc("Exception %d: %s\n", n, g->Message);
|
||||
rc = true;
|
||||
} catch (const char *msg) {
|
||||
strncpy(g->Message, msg, sizeof(g->Message));
|
||||
safe_strcpy(g->Message, sizeof(msg), msg);
|
||||
rc = true;
|
||||
} // end catch
|
||||
|
||||
|
@ -891,8 +893,7 @@ bool VCTFAM::OpenTempFile(PGLOBAL g)
|
|||
/*********************************************************************/
|
||||
PlugSetPath(tempname, To_File, Tdbp->GetPath());
|
||||
PlugRemoveType(tempname, tempname);
|
||||
strncat(tempname, ".t", _MAX_PATH - strlen(tempname));
|
||||
|
||||
safe_strcat(tempname, sizeof(tempname), ".t");
|
||||
if (MaxBlk) {
|
||||
if (MakeEmptyFile(g, tempname))
|
||||
return true;
|
||||
|
@ -1563,7 +1564,7 @@ bool VCMFAM::InitInsert(PGLOBAL g)
|
|||
htrc("Exception %d: %s\n", n, g->Message);
|
||||
rc = true;
|
||||
} catch (const char *msg) {
|
||||
strncpy(g->Message, msg, sizeof(g->Message));
|
||||
safe_strcpy(g->Message, sizeof(g->Message), msg);
|
||||
rc = true;
|
||||
} // end catch
|
||||
|
||||
|
@ -2083,10 +2084,10 @@ bool VECFAM::AllocateBuffer(PGLOBAL g)
|
|||
// Allocate all that is needed to move lines and make Temp
|
||||
if (UseTemp) {
|
||||
Tempat = (char*)PlugSubAlloc(g, NULL, _MAX_PATH);
|
||||
strcpy(Tempat, Colfn);
|
||||
safe_strcpy(Tempat, _MAX_PATH, Colfn);
|
||||
PlugSetPath(Tempat, Tempat, Tdbp->GetPath());
|
||||
PlugRemoveType(Tempat, Tempat);
|
||||
strncat(Tempat, ".t", _MAX_PATH - strlen(Tempat));
|
||||
safe_strcat(Tempat, _MAX_PATH, ".t");
|
||||
T_Fbs = (PFBLOCK *)PlugSubAlloc(g, NULL, Ncol * sizeof(PFBLOCK));
|
||||
} // endif UseTemp
|
||||
|
||||
|
@ -2461,7 +2462,7 @@ int VECFAM::RenameTempFile(PGLOBAL g)
|
|||
snprintf(filename, _MAX_PATH, Colfn, i+1);
|
||||
PlugSetPath(filename, filename, Tdbp->GetPath());
|
||||
PlugRemoveType(filetemp, filename);
|
||||
strncat(filetemp, ".ttt", _MAX_PATH - strlen(filetemp));
|
||||
safe_strcat(filetemp, sizeof(filetemp), ".ttt");
|
||||
remove(filetemp); // May still be there from previous error
|
||||
|
||||
if (rename(filename, filetemp)) { // Save file for security
|
||||
|
@ -3222,7 +3223,7 @@ int BGVFAM::GetBlockInfo(PGLOBAL g)
|
|||
if (Header == 2)
|
||||
{
|
||||
PlugRemoveType(filename, filename);
|
||||
strncat(filename, ".blk", _MAX_PATH - strlen(filename));
|
||||
safe_strcat(filename, sizeof(filename), ".blk");
|
||||
}
|
||||
|
||||
#if defined(_WIN32)
|
||||
|
@ -3301,7 +3302,7 @@ bool BGVFAM::SetBlockInfo(PGLOBAL g)
|
|||
} else // Header == 2
|
||||
{
|
||||
PlugRemoveType(filename, filename);
|
||||
strncat(filename, ".blk", _MAX_PATH - strlen(filename));
|
||||
safe_strcat(filename, sizeof(filename), ".blk");
|
||||
}
|
||||
|
||||
if (h == INVALID_HANDLE_VALUE) {
|
||||
|
@ -3399,7 +3400,7 @@ bool BGVFAM::MakeEmptyFile(PGLOBAL g, PCSZ fn)
|
|||
FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM |
|
||||
FORMAT_MESSAGE_IGNORE_INSERTS, NULL, rc, 0,
|
||||
(LPTSTR)filename, sizeof(filename), NULL);
|
||||
strncat(g->Message, filename, sizeof(g->Message) - strlen(g->Message));
|
||||
safe_strcat(g->Message, sizeof(g->Message), filename);
|
||||
|
||||
if (h != INVALID_HANDLE_VALUE)
|
||||
CloseHandle(h);
|
||||
|
@ -3535,7 +3536,7 @@ bool BGVFAM::OpenTableFile(PGLOBAL g)
|
|||
FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM |
|
||||
FORMAT_MESSAGE_IGNORE_INSERTS, NULL, rc, 0,
|
||||
(LPTSTR)filename, sizeof(filename), NULL);
|
||||
strncat(g->Message, filename, sizeof(g->Message) - strlen(g->Message));
|
||||
safe_strcat(g->Message, sizeof(g->Message), filename);
|
||||
} // endif Hfile
|
||||
|
||||
if (trace(1))
|
||||
|
@ -3623,8 +3624,8 @@ bool BGVFAM::OpenTableFile(PGLOBAL g)
|
|||
|
||||
if (Hfile == INVALID_HANDLE_VALUE) {
|
||||
rc = errno;
|
||||
snprintf(g->Message, sizeof(g->Message), MSG(OPEN_ERROR), rc, mode, filename);
|
||||
strncat(g->Message, strerror(errno), sizeof(g->Message) - strlen(g->Message));
|
||||
snprintf(g->Message, sizeof(g->Message), MSG(OPEN_ERROR)"%s", rc, mode,
|
||||
filename, strerror(errno));
|
||||
} // endif Hfile
|
||||
|
||||
if (trace(1))
|
||||
|
@ -3968,7 +3969,7 @@ bool BGVFAM::OpenTempFile(PGLOBAL g)
|
|||
tempname = (char*)PlugSubAlloc(g, NULL, _MAX_PATH);
|
||||
PlugSetPath(tempname, To_File, Tdbp->GetPath());
|
||||
PlugRemoveType(tempname, tempname);
|
||||
strncat(tempname, ".t", _MAX_PATH - strlen(tempname));
|
||||
safe_strcat(tempname, _MAX_PATH, ".t");
|
||||
|
||||
if (!MaxBlk)
|
||||
remove(tempname); // Be sure it does not exist yet
|
||||
|
@ -3987,7 +3988,7 @@ bool BGVFAM::OpenTempFile(PGLOBAL g)
|
|||
FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM |
|
||||
FORMAT_MESSAGE_IGNORE_INSERTS, NULL, rc, 0,
|
||||
(LPTSTR)tempname, _MAX_PATH, NULL);
|
||||
strncat(g->Message, tempname, sizeof(g->Message) - strlen(g->Message));
|
||||
safe_strcat(g->Message, sizeof(g->Message), tempname);
|
||||
return true;
|
||||
} // endif Tfile
|
||||
#else // UNIX
|
||||
|
@ -3997,8 +3998,8 @@ bool BGVFAM::OpenTempFile(PGLOBAL g)
|
|||
|
||||
if (Tfile == INVALID_HANDLE_VALUE) {
|
||||
int rc = errno;
|
||||
snprintf(g->Message, sizeof(g->Message), MSG(OPEN_ERROR), rc, MODE_INSERT, tempname);
|
||||
strncat(g->Message, strerror(errno), sizeof(g->Message) - strlen(g->Message));
|
||||
snprintf(g->Message, sizeof(g->Message), MSG(OPEN_ERROR) "%s", rc, MODE_INSERT,
|
||||
tempname, strerror(errno));
|
||||
return true;
|
||||
} //endif Tfile
|
||||
#endif // UNIX
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#include <fcntl.h>
|
||||
#endif // !_WIN32
|
||||
#include <time.h>
|
||||
#include <m_string.h>
|
||||
|
||||
/***********************************************************************/
|
||||
/* Include application header files: */
|
||||
|
@ -181,7 +182,8 @@ static bool ZipFiles(PGLOBAL g, ZIPUTIL *zutp, PCSZ pat, char *buf)
|
|||
|
||||
while (true) {
|
||||
if (!(FileData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)) {
|
||||
strcat(strcat(strcpy(filename, drive), direc), FileData.cFileName);
|
||||
snprintf(filename, sizeof(filename), "%s%s%s",
|
||||
drive, direc, FileData.cFileName);
|
||||
|
||||
if (ZipFile(g, zutp, filename, FileData.cFileName, buf)) {
|
||||
FindClose(hSearch);
|
||||
|
@ -217,7 +219,7 @@ static bool ZipFiles(PGLOBAL g, ZIPUTIL *zutp, PCSZ pat, char *buf)
|
|||
struct dirent *entry;
|
||||
|
||||
_splitpath(filename, NULL, direc, pattern, ftype);
|
||||
strcat(pattern, ftype);
|
||||
safe_strcat(pattern, sizeof(pattern), ftype);
|
||||
|
||||
// Start searching files in the target directory.
|
||||
if (!(dir = opendir(direc))) {
|
||||
|
@ -226,7 +228,7 @@ static bool ZipFiles(PGLOBAL g, ZIPUTIL *zutp, PCSZ pat, char *buf)
|
|||
} // endif dir
|
||||
|
||||
while ((entry = readdir(dir))) {
|
||||
strcat(strcpy(fn, direc), entry->d_name);
|
||||
snprintf(fn, sizeof(fn), "%s%s", direc, entry->d_name);
|
||||
|
||||
if (lstat(fn, &fileinfo) < 0) {
|
||||
snprintf(g->Message, sizeof(g->Message), "%s: %s", fn, strerror(errno));
|
||||
|
@ -240,7 +242,7 @@ static bool ZipFiles(PGLOBAL g, ZIPUTIL *zutp, PCSZ pat, char *buf)
|
|||
if (fnmatch(pattern, entry->d_name, 0))
|
||||
continue; // Not a match
|
||||
|
||||
strcat(strcpy(filename, direc), entry->d_name);
|
||||
snprintf(filename, sizeof(filename), "%s%s", direc, entry->d_name);
|
||||
|
||||
if (ZipFile(g, zutp, filename, entry->d_name, buf)) {
|
||||
closedir(dir);
|
||||
|
|
|
@ -33,6 +33,8 @@
|
|||
#define NODW
|
||||
#endif // !_WIN32
|
||||
|
||||
#include <m_string.h>
|
||||
|
||||
/***********************************************************************/
|
||||
/* Required objects includes. */
|
||||
/***********************************************************************/
|
||||
|
@ -231,15 +233,16 @@ bool JAVAConn::GetJVM(PGLOBAL g)
|
|||
#if defined(_WIN32)
|
||||
for (ntry = 0; !LibJvm && ntry < 3; ntry++) {
|
||||
if (!ntry && JvmPath) {
|
||||
strcat(strcpy(soname, JvmPath), "\\jvm.dll");
|
||||
snprintf(soname, sizeof(soname), "%s\\jvm.dll", JvmPath);
|
||||
|
||||
ntry = 3; // No other try
|
||||
} else if (ntry < 2 && getenv("JAVA_HOME")) {
|
||||
strcpy(soname, getenv("JAVA_HOME"));
|
||||
safe_strcpy(soname, sizeof(soname), getenv("JAVA_HOME"));
|
||||
|
||||
if (ntry == 1)
|
||||
strcat(soname, "\\jre");
|
||||
safe_strcat(soname, sizeof(soname), "\\jre");
|
||||
|
||||
strcat(soname, "\\bin\\client\\jvm.dll");
|
||||
safe_strcat(soname, sizeof(soname), "\\bin\\client\\jvm.dll");
|
||||
} else {
|
||||
// Try to find it through the registry
|
||||
char version[16];
|
||||
|
@ -247,11 +250,12 @@ bool JAVAConn::GetJVM(PGLOBAL g)
|
|||
LONG rc;
|
||||
DWORD BufferSize = 16;
|
||||
|
||||
strcpy(soname, "jvm.dll"); // In case it fails
|
||||
safe_strcpy(soname, sizeof(soname), "jvm.dll"); // In case it fails
|
||||
|
||||
if ((rc = RegGetValue(HKEY_LOCAL_MACHINE, javaKey, "CurrentVersion",
|
||||
RRF_RT_ANY, NULL, (PVOID)&version, &BufferSize)) == ERROR_SUCCESS) {
|
||||
strcat(strcat(javaKey, "\\"), version);
|
||||
safe_strcat(javaKey, sizeof(javaKey), "\\");
|
||||
safe_strcat(javaKey, sizeof(javaKey), version);
|
||||
BufferSize = sizeof(soname);
|
||||
|
||||
if ((rc = RegGetValue(HKEY_LOCAL_MACHINE, javaKey, "RuntimeLib",
|
||||
|
@ -272,11 +276,11 @@ bool JAVAConn::GetJVM(PGLOBAL g)
|
|||
char buf[256];
|
||||
DWORD rc = GetLastError();
|
||||
|
||||
snprintf(g->Message, sizeof(g->Message), MSG(DLL_LOAD_ERROR), rc, soname);
|
||||
FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM |
|
||||
FORMAT_MESSAGE_IGNORE_INSERTS, NULL, rc, 0,
|
||||
(LPTSTR)buf, sizeof(buf), NULL);
|
||||
strcat(strcat(g->Message, ": "), buf);
|
||||
snprintf(g->Message, sizeof(g->Message), MSG(DLL_LOAD_ERROR)": %s", rc,
|
||||
soname, buf);
|
||||
} else if (!(CreateJavaVM = (CRTJVM)GetProcAddress((HINSTANCE)LibJvm,
|
||||
"JNI_CreateJavaVM"))) {
|
||||
snprintf(g->Message, sizeof(g->Message), MSG(PROCADD_ERROR), GetLastError(), "JNI_CreateJavaVM");
|
||||
|
@ -301,13 +305,14 @@ bool JAVAConn::GetJVM(PGLOBAL g)
|
|||
|
||||
for (ntry = 0; !LibJvm && ntry < 2; ntry++) {
|
||||
if (!ntry && JvmPath) {
|
||||
strcat(strcpy(soname, JvmPath), "/libjvm.so");
|
||||
snprintf(soname, sizeof(soname), "%s/libjvm.so", JvmPath);
|
||||
ntry = 2;
|
||||
} else if (!ntry && getenv("JAVA_HOME")) {
|
||||
// TODO: Replace i386 by a better guess
|
||||
strcat(strcpy(soname, getenv("JAVA_HOME")), "/jre/lib/i386/client/libjvm.so");
|
||||
snprintf(soname, sizeof(soname), "%s/jre/lib/i386/client/libjvm.so",
|
||||
getenv("JAVA_HOME"));
|
||||
} else { // Will need LD_LIBRARY_PATH to be set
|
||||
strcpy(soname, "libjvm.so");
|
||||
safe_strcpy(soname, sizeof(soname), "libjvm.so");
|
||||
ntry = 2;
|
||||
} // endelse
|
||||
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
/* Include relevant sections of the MariaDB header file. */
|
||||
/***********************************************************************/
|
||||
#include <my_global.h>
|
||||
#include <m_string.h>
|
||||
|
||||
/***********************************************************************/
|
||||
/* Include application header files: */
|
||||
|
@ -270,7 +271,7 @@ PSZ Serialize(PGLOBAL g, PJSON jsp, char* fn, int pretty) {
|
|||
jdp->dfp = GetDefaultPrec();
|
||||
|
||||
if (!jsp) {
|
||||
strcpy(g->Message, "Null json tree");
|
||||
safe_strcpy(g->Message, sizeof(g->Message), "Null json tree");
|
||||
throw 1;
|
||||
} else if (!fn) {
|
||||
// Serialize to a string
|
||||
|
@ -278,9 +279,8 @@ PSZ Serialize(PGLOBAL g, PJSON jsp, char* fn, int pretty) {
|
|||
b = pretty == 1;
|
||||
} else {
|
||||
if (!(fs = fopen(fn, "wb"))) {
|
||||
snprintf(g->Message, sizeof(g->Message), MSG(OPEN_MODE_ERROR),
|
||||
"w", (int)errno, fn);
|
||||
strcat(strcat(g->Message, ": "), strerror(errno));
|
||||
snprintf(g->Message, sizeof(g->Message), MSG(OPEN_MODE_ERROR) ": %s",
|
||||
"w", (int)errno, fn, strerror(errno));
|
||||
throw 2;
|
||||
} else if (pretty >= 2) {
|
||||
// Serialize to a pretty file
|
||||
|
|
|
@ -4755,7 +4755,7 @@ char *jbin_array(UDF_INIT *initid, UDF_ARGS *args, char *result,
|
|||
|
||||
if ((arp = (PJAR)JsonNew(g, TYPE_JAR)) &&
|
||||
(bsp = JbinAlloc(g, args, initid->max_length, arp))) {
|
||||
strcat(bsp->Msg, " array");
|
||||
safe_strcat(bsp->Msg, sizeof(bsp->Msg), " array");
|
||||
|
||||
for (uint i = 0; i < args->arg_count; i++)
|
||||
arp->AddArrayValue(g, MakeValue(g, args, i));
|
||||
|
@ -4832,7 +4832,7 @@ char *jbin_array_add_values(UDF_INIT *initid, UDF_ARGS *args, char *result,
|
|||
arp->InitArray(gb);
|
||||
|
||||
if ((bsp = JbinAlloc(g, args, initid->max_length, top))) {
|
||||
strcat(bsp->Msg, " array");
|
||||
safe_strcat(bsp->Msg, sizeof(bsp->Msg), " array");
|
||||
bsp->Jsp = arp;
|
||||
} // endif bsp
|
||||
|
||||
|
@ -5053,7 +5053,7 @@ char *jbin_object(UDF_INIT *initid, UDF_ARGS *args, char *result,
|
|||
|
||||
|
||||
if ((bsp = JbinAlloc(g, args, initid->max_length, objp)))
|
||||
strcat(bsp->Msg, " object");
|
||||
safe_strcat(bsp->Msg, sizeof(bsp->Msg), " object");
|
||||
|
||||
} else
|
||||
bsp = NULL;
|
||||
|
@ -5109,7 +5109,7 @@ char *jbin_object_nonull(UDF_INIT *initid, UDF_ARGS *args, char *result,
|
|||
objp->SetKeyValue(g, jvp, MakeKey(g, args, i));
|
||||
|
||||
if ((bsp = JbinAlloc(g, args, initid->max_length, objp)))
|
||||
strcat(bsp->Msg, " object");
|
||||
safe_strcat(bsp->Msg, sizeof(bsp->Msg), " object");
|
||||
|
||||
} else
|
||||
bsp = NULL;
|
||||
|
@ -5168,7 +5168,7 @@ char *jbin_object_key(UDF_INIT *initid, UDF_ARGS *args, char *result,
|
|||
objp->SetKeyValue(g, MakeValue(g, args, i + 1), MakePSZ(g, args, i));
|
||||
|
||||
if ((bsp = JbinAlloc(g, args, initid->max_length, objp)))
|
||||
strcat(bsp->Msg, " object");
|
||||
safe_strcat(bsp->Msg, sizeof(bsp->Msg), " object");
|
||||
|
||||
} else
|
||||
bsp = NULL;
|
||||
|
@ -5390,7 +5390,7 @@ char *jbin_object_list(UDF_INIT *initid, UDF_ARGS *args, char *result,
|
|||
} // endif CheckMemory
|
||||
|
||||
if ((bsp = JbinAlloc(g, args, initid->max_length, jarp)))
|
||||
strcat(bsp->Msg, " array");
|
||||
safe_strcat(bsp->Msg, sizeof(bsp->Msg), " array");
|
||||
|
||||
// Keep result of constant function
|
||||
g->Xchk = (initid->const_item) ? bsp : NULL;
|
||||
|
@ -5465,7 +5465,7 @@ char *jbin_get_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
|
|||
jsp = (jvp->GetJsp()) ? jvp->GetJsp() : JvalNew(g, TYPE_JVAL, jvp->GetValue(g));
|
||||
|
||||
if ((bsp = JbinAlloc(g, args, initid->max_length, jsp)))
|
||||
strcat(bsp->Msg, " item");
|
||||
safe_strcat(bsp->Msg, sizeof(bsp->Msg), " item");
|
||||
else
|
||||
*error = 1;
|
||||
|
||||
|
@ -5825,7 +5825,7 @@ char *jbin_file(UDF_INIT *initid, UDF_ARGS *args, char *result,
|
|||
pretty = pty;
|
||||
|
||||
if ((bsp = JbinAlloc(g, args, len, jsp))) {
|
||||
strcat(bsp->Msg, " file");
|
||||
safe_strcat(bsp->Msg, sizeof(bsp->Msg), " file");
|
||||
bsp->Filename = fn;
|
||||
bsp->Pretty = pretty;
|
||||
} else {
|
||||
|
@ -6161,9 +6161,8 @@ char* JUP::UnprettyJsonFile(PGLOBAL g, char *fn, char *outfn, int lrecl) {
|
|||
/* Parse the json file and allocate its tree structure. */
|
||||
/*********************************************************************************/
|
||||
if (!(fs = fopen(outfn, "wb"))) {
|
||||
snprintf(g->Message, sizeof(g->Message), MSG(OPEN_MODE_ERROR),
|
||||
"w", (int)errno, outfn);
|
||||
strcat(strcat(g->Message, ": "), strerror(errno));
|
||||
snprintf(g->Message, sizeof(g->Message), MSG(OPEN_MODE_ERROR)": %s",
|
||||
"w", (int)errno, outfn, strerror(errno));
|
||||
CloseMemMap(mm.memory, len);
|
||||
return NULL;
|
||||
} // endif fs
|
||||
|
|
|
@ -405,18 +405,20 @@ PQRYRES SrcColumns(PGLOBAL g, const char *host, const char *db,
|
|||
port = mysqld_port;
|
||||
|
||||
if (!strnicmp(srcdef, "select ", 7) || strstr(srcdef, "%s")) {
|
||||
query = (char *)PlugSubAlloc(g, NULL, strlen(srcdef) + 10);
|
||||
size_t query_sz = strlen(srcdef) + 10;
|
||||
query = (char *)PlugSubAlloc(g, NULL, query_sz);
|
||||
|
||||
if ((p= strstr(srcdef, "%s")))
|
||||
{
|
||||
/* Replace %s with 1=1 */
|
||||
sprintf(query, "%.*s1=1%s", (int) (p - srcdef), srcdef, p + 2); // dummy where clause
|
||||
snprintf(query, query_sz, "%.*s1=1%s",
|
||||
(int) (p - srcdef), srcdef, p + 2); // dummy where clause
|
||||
}
|
||||
else
|
||||
strcpy(query, srcdef);
|
||||
else
|
||||
safe_strcpy(query, query_sz, srcdef);
|
||||
|
||||
if (!strnicmp(srcdef, "select ", 7))
|
||||
strcat(query, " LIMIT 0");
|
||||
safe_strcat(query, query_sz, " LIMIT 0");
|
||||
|
||||
} else
|
||||
query = (char *)srcdef;
|
||||
|
|
2
storage/mroonga/vendor/groonga/lib/alloc.c
vendored
2
storage/mroonga/vendor/groonga/lib/alloc.c
vendored
|
@ -828,7 +828,7 @@ grn_free_default(grn_ctx *ctx, void *ptr,
|
|||
if (ptr) {
|
||||
GRN_ADD_ALLOC_COUNT(-1);
|
||||
} else {
|
||||
GRN_LOG(ctx, GRN_LOG_ALERT, "free fail (%s:%d) <%d>",
|
||||
GRN_LOG(ctx, GRN_LOG_ALERT, "free fail (nullptr) (%s:%d) <%d>",
|
||||
file, line, alloc_count);
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue