MDEV-20057 Distinct SUM on CROSS JOIN and grouped returns wrong result

SELECT DISTINCT did not work with expressions with sum functions.
Distinct was only done on the values stored in the intermediate temporary
tables, which only stored the value of each sum function.

In other words:
SELECT DISTINCT sum(a),sum(b),avg(c) ... worked.
SELECT DISTINCT sum(a),sum(b) > 2,sum(c)+sum(d) would not work.

The later query would do ONLY apply distinct on the sum(a) part.

Reviewer: Sergei Petrunia <sergey@mariadb.com>


This was fixed by extending remove_dup_with_hash_index() and
remove_dup_with_compare() to take into account the columns in the result
list that where not stored in the temporary table.

Note that in many cases the above dup removal functions are not used as
the optimizer may be able to either remove duplicates early or it will
discover that duplicate remove is not needed. The later happens for
example if the group by fields is part of the result.

Other things:
- Backported from 11.0 the change of Sort_param.tmp_buffer from char* to
  String.
- Changed Type_handler::make_sort_key() to take String as a parameter
  instead of Sort_param. This was done to allow make_sort_key() functions
  to be reused by distinct elimination functions.
  This makes Type_handler_string_result::make_sort_key() similar to code
  in 11.0
- Simplied error handling in remove_dup_with_compare() to remove code
  duplication.
This commit is contained in:
Monty 2023-02-16 14:19:33 +02:00
parent bd0d7ea540
commit 476b24d084
6 changed files with 245 additions and 74 deletions

View file

@ -1070,3 +1070,40 @@ UNION
1
drop table t1;
End of 5.5 tests
#
# MDEV-20057 Distinct SUM on CROSS JOIN and grouped returns wrong result
#
create table t1 (c int, d int);
insert into t1 values (5, 1), (0, 3);
select distinct sum(distinct 1), sum(t1.d) > 2 from (t1 e join t1) group by t1.c;
sum(distinct 1) sum(t1.d) > 2
1 1
1 0
select distinct sum(distinct 1), sum(t1.d) > 2, t1.c from (t1 e join t1) group by t1.c;
sum(distinct 1) sum(t1.d) > 2 c
1 1 0
1 0 5
insert into t1 values (6,6);
select distinct sum(distinct 1), sum(t1.d) > 5 from (t1 e join t1) group by t1.c;
sum(distinct 1) sum(t1.d) > 5
1 1
1 0
select distinct sum(distinct 1), sum(t1.d) > 5, t1.c from (t1 e join t1) group by t1.c;
sum(distinct 1) sum(t1.d) > 5 c
1 1 0
1 0 5
1 1 6
set @@sort_buffer_size=1024;
insert into t1 select -seq,-seq from seq_1_to_100;
select distinct sum(distinct 1), sum(t1.d) > 2, length(group_concat(t1.d)) > 1000 from (t1 e join t1) group by t1.c having t1.c > -2 ;
sum(distinct 1) sum(t1.d) > 2 length(group_concat(t1.d)) > 1000
1 0 0
1 1 0
select distinct sum(distinct 1), sum(t1.d) > 2, length(group_concat(t1.d)) > 1000,t1.c from (t1 e join t1) group by t1.c having t1.c > -2;
sum(distinct 1) sum(t1.d) > 2 length(group_concat(t1.d)) > 1000 c
1 0 0 -1
1 1 0 0
1 1 0 5
1 1 0 6
drop table t1;
# End of 10.4 tests

View file

@ -4,6 +4,7 @@
#
--source include/default_optimizer_switch.inc
--source include/have_sequence.inc
--disable_warnings
drop table if exists t1,t2,t3;
--enable_warnings
@ -818,3 +819,25 @@ UNION
drop table t1;
--echo End of 5.5 tests
--echo #
--echo # MDEV-20057 Distinct SUM on CROSS JOIN and grouped returns wrong result
--echo #
create table t1 (c int, d int);
insert into t1 values (5, 1), (0, 3);
select distinct sum(distinct 1), sum(t1.d) > 2 from (t1 e join t1) group by t1.c;
select distinct sum(distinct 1), sum(t1.d) > 2, t1.c from (t1 e join t1) group by t1.c;
insert into t1 values (6,6);
select distinct sum(distinct 1), sum(t1.d) > 5 from (t1 e join t1) group by t1.c;
select distinct sum(distinct 1), sum(t1.d) > 5, t1.c from (t1 e join t1) group by t1.c;
# Force usage of remove_dup_with_compare() algorithm
set @@sort_buffer_size=1024;
insert into t1 select -seq,-seq from seq_1_to_100;
select distinct sum(distinct 1), sum(t1.d) > 2, length(group_concat(t1.d)) > 1000 from (t1 e join t1) group by t1.c having t1.c > -2 ;
select distinct sum(distinct 1), sum(t1.d) > 2, length(group_concat(t1.d)) > 1000,t1.c from (t1 e join t1) group by t1.c having t1.c > -2;
drop table t1;
--echo # End of 10.4 tests

View file

@ -159,7 +159,7 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort,
MYSQL_FILESORT_START(table->s->db.str, table->s->table_name.str);
DEBUG_SYNC(thd, "filesort_start");
if (!(sort= new SORT_INFO))
if (!(sort= new SORT_INFO)) // Note that this is not automatically freed!
return 0;
if (subselect && subselect->filesort_buffer.is_allocated())
@ -186,10 +186,6 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort,
sort->addon_buf= param.addon_buf;
sort->addon_field= param.addon_field;
sort->unpack= unpack_addon_fields;
if (multi_byte_charset &&
!(param.tmp_buffer= (char*) my_malloc(param.sort_length,
MYF(MY_WME | MY_THREAD_SPECIFIC))))
goto err;
if (select && select->quick)
thd->inc_status_sort_range();
@ -254,6 +250,9 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort,
tracker->report_sort_buffer_size(sort->sort_buffer_size());
}
if (param.tmp_buffer.alloc(param.sort_length))
goto err;
if (open_cached_file(&buffpek_pointers,mysql_tmpdir,TEMP_PREFIX,
DISK_BUFFER_SIZE, MYF(MY_WME)))
goto err;
@ -337,7 +336,7 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort,
error= 0;
err:
my_free(param.tmp_buffer);
param.tmp_buffer.free();
if (!subselect || !subselect->is_uncacheable())
{
sort->free_sort_buffer();
@ -977,17 +976,15 @@ static inline void store_length(uchar *to, uint length, uint pack_length)
void
Type_handler_string_result::make_sort_key(uchar *to, Item *item,
const SORT_FIELD_ATTR *sort_field,
Sort_param *param) const
String *tmp_buffer) const
{
CHARSET_INFO *cs= item->collation.collation;
bool maybe_null= item->maybe_null;
if (maybe_null)
*to++= 1;
char *tmp_buffer= param->tmp_buffer ? param->tmp_buffer : (char*) to;
String tmp(tmp_buffer, param->tmp_buffer ? param->sort_length :
sort_field->length, cs);
String *res= item->str_result(&tmp);
Binary_string *res= item->str_result(tmp_buffer);
if (!res)
{
if (maybe_null)
@ -1015,11 +1012,11 @@ Type_handler_string_result::make_sort_key(uchar *to, Item *item,
size_t tmp_length=
#endif
cs->coll->strnxfrm(cs, to, sort_field->length,
item->max_char_length() *
cs->strxfrm_multiply,
(uchar*) res->ptr(), res->length(),
MY_STRXFRM_PAD_WITH_SPACE |
MY_STRXFRM_PAD_TO_MAXLEN);
item->max_char_length() *
cs->strxfrm_multiply,
(uchar*) res->ptr(), res->length(),
MY_STRXFRM_PAD_WITH_SPACE |
MY_STRXFRM_PAD_TO_MAXLEN);
DBUG_ASSERT(tmp_length == sort_field->length);
}
else
@ -1050,7 +1047,7 @@ Type_handler_string_result::make_sort_key(uchar *to, Item *item,
void
Type_handler_int_result::make_sort_key(uchar *to, Item *item,
const SORT_FIELD_ATTR *sort_field,
Sort_param *param) const
String *tmp_buffer) const
{
longlong value= item->val_int_result();
make_sort_key_longlong(to, item->maybe_null, item->null_value,
@ -1061,7 +1058,7 @@ Type_handler_int_result::make_sort_key(uchar *to, Item *item,
void
Type_handler_temporal_result::make_sort_key(uchar *to, Item *item,
const SORT_FIELD_ATTR *sort_field,
Sort_param *param) const
String *tmp_buffer) const
{
MYSQL_TIME buf;
// This is a temporal type. No nanoseconds. Rounding mode is not important.
@ -1083,7 +1080,7 @@ Type_handler_temporal_result::make_sort_key(uchar *to, Item *item,
void
Type_handler_timestamp_common::make_sort_key(uchar *to, Item *item,
const SORT_FIELD_ATTR *sort_field,
Sort_param *param) const
String *tmp_buffer) const
{
THD *thd= current_thd;
uint binlen= my_timestamp_binary_length(item->decimals);
@ -1147,7 +1144,7 @@ Type_handler::make_sort_key_longlong(uchar *to,
void
Type_handler_decimal_result::make_sort_key(uchar *to, Item *item,
const SORT_FIELD_ATTR *sort_field,
Sort_param *param) const
String *tmp_buffer) const
{
my_decimal dec_buf, *dec_val= item->val_decimal_result(&dec_buf);
if (item->maybe_null)
@ -1167,7 +1164,7 @@ Type_handler_decimal_result::make_sort_key(uchar *to, Item *item,
void
Type_handler_real_result::make_sort_key(uchar *to, Item *item,
const SORT_FIELD_ATTR *sort_field,
Sort_param *param) const
String *tmp_buffer) const
{
double value= item->val_result();
if (item->maybe_null)
@ -1205,7 +1202,8 @@ static void make_sortkey(Sort_param *param, uchar *to, uchar *ref_pos)
else
{ // Item
sort_field->item->type_handler()->make_sort_key(to, sort_field->item,
sort_field, param);
sort_field,
&param->tmp_buffer);
if ((maybe_null= sort_field->item->maybe_null))
to++;
}

View file

@ -244,10 +244,12 @@ static bool find_field_in_item_list (Field *field, void *data);
static bool find_field_in_order_list (Field *field, void *data);
int create_sort_index(THD *thd, JOIN *join, JOIN_TAB *tab, Filesort *fsort);
static int remove_dup_with_compare(THD *thd, TABLE *entry, Field **field,
Item *having);
SORT_FIELD *sortorder, ulong keylength,
Item *having);
static int remove_dup_with_hash_index(THD *thd,TABLE *table,
uint field_count, Field **first_field,
ulong key_length,Item *having);
uint field_count, Field **first_field,
SORT_FIELD *sortorder,
ulong key_length,Item *having);
static bool cmp_buffer_with_ref(THD *thd, TABLE *table, TABLE_REF *tab_ref);
static bool setup_new_fields(THD *thd, List<Item> &fields,
List<Item> &all_fields, ORDER *new_order);
@ -24208,39 +24210,70 @@ JOIN_TAB::remove_duplicates()
{
bool error;
ulong keylength= 0;
uint field_count;
ulong keylength= 0, sort_field_keylength= 0;
uint field_count, item_count;
List<Item> *fields= (this-1)->fields;
Item *item;
THD *thd= join->thd;
SORT_FIELD *sortorder, *sorder;
DBUG_ENTER("remove_duplicates");
DBUG_ASSERT(join->aggr_tables > 0 && table->s->tmp_table != NO_TMP_TABLE);
THD_STAGE_INFO(join->thd, stage_removing_duplicates);
//join->explain->ops_tracker.report_duplicate_removal();
table->reginfo.lock_type=TL_WRITE;
if (!(sortorder= (SORT_FIELD*) my_malloc((fields->elements+1) *
sizeof(SORT_FIELD),
MYF(MY_WME))))
DBUG_RETURN(TRUE);
/* Calculate how many saved fields there is in list */
field_count=0;
List_iterator<Item> it(*fields);
Item *item;
while ((item=it++))
{
if (item->get_tmp_table_field() && ! item->const_item())
field_count++;
}
field_count= item_count= 0;
if (!field_count && !(join->select_options & OPTION_FOUND_ROWS) && !having)
{ // only const items with no OPTION_FOUND_ROWS
List_iterator<Item> it(*fields);
for (sorder= sortorder ; (item=it++) ;)
{
if (!item->const_item())
{
if (item->get_tmp_table_field())
{
/* Field is stored in temporary table, skipp */
field_count++;
}
else
{
/* Item is not stored in temporary table, remember it */
sorder->field= 0; // Safety, not used
sorder->item= item;
/* Calculate sorder->length */
item->type_handler()->sortlength(thd, item, sorder);
sorder++;
item_count++;
}
}
}
sorder->item= 0; // End marker
if ((field_count + item_count == 0) && ! having &&
!(join->select_options & OPTION_FOUND_ROWS))
{
// only const items with no OPTION_FOUND_ROWS
join->unit->select_limit_cnt= 1; // Only send first row
my_free(sortorder);
DBUG_RETURN(false);
}
/*
The table contains first fields that will be in the output, then
temporary results pointed to by the fields list.
Example: SELECT DISTINCT sum(a), sum(d) > 2 FROM ...
In this case the temporary table contains sum(a), sum(d).
*/
Field **first_field=table->field+table->s->fields - field_count;
for (Field **ptr=first_field; *ptr; ptr++)
keylength+= (*ptr)->sort_length() + (*ptr)->maybe_null();
for (SORT_FIELD *ptr= sortorder ; ptr->item ; ptr++)
sort_field_keylength+= ptr->length + (ptr->item->maybe_null ? 1 : 0);
/*
Disable LIMIT ROWS EXAMINED in order to avoid interrupting prematurely
@ -24251,30 +24284,79 @@ JOIN_TAB::remove_duplicates()
thd->reset_killed();
table->file->info(HA_STATUS_VARIABLE);
table->reginfo.lock_type=TL_WRITE;
if (table->s->db_type() == heap_hton ||
(!table->s->blob_fields &&
((ALIGN_SIZE(keylength) + HASH_OVERHEAD) * table->file->stats.records <
thd->variables.sortbuff_size)))
error=remove_dup_with_hash_index(join->thd, table, field_count, first_field,
keylength, having);
error= remove_dup_with_hash_index(join->thd, table, field_count,
first_field, sortorder,
keylength + sort_field_keylength, having);
else
error=remove_dup_with_compare(join->thd, table, first_field, having);
error=remove_dup_with_compare(join->thd, table, first_field, sortorder,
sort_field_keylength, having);
if (join->select_lex != join->select_lex->master_unit()->fake_select_lex)
thd->lex->set_limit_rows_examined();
free_blobs(first_field);
my_free(sortorder);
DBUG_RETURN(error);
}
/*
Create a sort/compare key from items
Key is of fixed length and binary comparable
*/
static uchar *make_sort_key(SORT_FIELD *sortorder, uchar *key_buffer,
String *tmp_value)
{
for (SORT_FIELD *ptr= sortorder ; ptr->item ; ptr++)
{
ptr->item->type_handler()->make_sort_key(key_buffer,
ptr->item,
ptr, tmp_value);
key_buffer+= (ptr->item->maybe_null ? 1 : 0) + ptr->length;
}
return key_buffer;
}
/*
Remove duplicates by comparing all rows with all other rows
@param thd THD
@param table Temporary table
@param first_field Pointer to fields in temporary table that are part of
distinct, ends with null pointer
@param sortorder An array of Items part of distsinct. Terminated with an
element N with sortorder[N]->item=NULL.
@param keylength Length of key produced by sortorder
@param having Having expression (NULL if no having)
*/
static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
SORT_FIELD *sortorder, ulong keylength,
Item *having)
{
handler *file=table->file;
uchar *record=table->record[0];
uchar *record=table->record[0], *key_buffer, *key_buffer2;
char *tmp_buffer;
int error;
String tmp_value;
DBUG_ENTER("remove_dup_with_compare");
if (unlikely(!my_multi_malloc(MYF(MY_WME),
&key_buffer, keylength,
&key_buffer2, keylength,
&tmp_buffer, keylength+1,
NullS)))
DBUG_RETURN(1);
tmp_value.set(tmp_buffer, keylength, &my_charset_bin);
if (unlikely(file->ha_rnd_init_with_error(1)))
DBUG_RETURN(1);
@ -24283,8 +24365,8 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
{
if (unlikely(thd->check_killed()))
{
error=0;
goto err;
error= 1;
goto end;
}
if (unlikely(error))
{
@ -24303,9 +24385,10 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
{
my_message(ER_OUTOFMEMORY, ER_THD(thd,ER_OUTOFMEMORY),
MYF(ME_FATAL));
error=0;
goto err;
error= 1;
goto end;
}
make_sort_key(sortorder, key_buffer, &tmp_value);
store_record(table,record[1]);
/* Read through rest of file and mark duplicated rows deleted */
@ -24318,7 +24401,10 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
break;
goto err;
}
if (compare_record(table, first_field) == 0)
make_sort_key(sortorder, key_buffer2, &tmp_value);
if (compare_record(table, first_field) == 0 &&
(!keylength ||
memcmp(key_buffer, key_buffer2, keylength) == 0))
{
if (unlikely((error= file->ha_delete_row(record))))
goto err;
@ -24337,38 +24423,52 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
goto err;
}
error= 0;
end:
my_free(key_buffer);
file->extra(HA_EXTRA_NO_CACHE);
(void) file->ha_rnd_end();
DBUG_RETURN(0);
DBUG_RETURN(error);
err:
file->extra(HA_EXTRA_NO_CACHE);
(void) file->ha_rnd_end();
if (error)
file->print_error(error,MYF(0));
DBUG_RETURN(1);
DBUG_ASSERT(error);
file->print_error(error,MYF(0));
goto end;
}
/**
Generate a hash index for each row to quickly find duplicate rows.
Generate a hash index for each row to quickly find duplicate rows.
@note
Note that this will not work on tables with blobs!
@param thd THD
@param table Temporary table
@param field_count Number of fields part of distinct
@param first_field Pointer to fields in temporary table that are part of
distinct, ends with null pointer
@param sortorder An array of Items part of distsinct. Terminated with an
element N with sortorder[N]->item=NULL.
@param keylength Length of hash key
@param having Having expression (NULL if no having)
@note
Note that this will not work on tables with blobs!
*/
static int remove_dup_with_hash_index(THD *thd, TABLE *table,
uint field_count,
Field **first_field,
SORT_FIELD *sortorder,
ulong key_length,
Item *having)
{
uchar *key_buffer, *key_pos, *record=table->record[0];
char *tmp_buffer;
int error;
handler *file= table->file;
ulong extra_length= ALIGN_SIZE(key_length)-key_length;
uint *field_lengths, *field_length;
HASH hash;
Field **ptr;
String tmp_value;
DBUG_ENTER("remove_dup_with_hash_index");
if (unlikely(!my_multi_malloc(MYF(MY_WME),
@ -24376,11 +24476,14 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
(uint) ((key_length + extra_length) *
(long) file->stats.records),
&field_lengths,
(uint) (field_count*sizeof(*field_lengths)),
(uint) (field_count * sizeof(*field_lengths)),
&tmp_buffer, key_length+1,
NullS)))
DBUG_RETURN(1);
for (ptr= first_field, field_length=field_lengths ; *ptr ; ptr++)
tmp_value.set(tmp_buffer, key_length, &my_charset_bin);
field_length= field_lengths;
for (Field **ptr= first_field ; *ptr ; ptr++)
(*field_length++)= (*ptr)->sort_length();
if (unlikely(my_hash_init(&hash, &my_charset_bin,
@ -24394,7 +24497,7 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
if (unlikely((error= file->ha_rnd_init(1))))
goto err;
key_pos=key_buffer;
key_pos= key_buffer;
for (;;)
{
uchar *org_key_pos;
@ -24419,11 +24522,14 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
/* copy fields to key buffer */
org_key_pos= key_pos;
field_length=field_lengths;
for (ptr= first_field ; *ptr ; ptr++)
for (Field **ptr= first_field ; *ptr ; ptr++)
{
(*ptr)->make_sort_key(key_pos, *field_length);
key_pos+= (*ptr)->maybe_null() + *field_length++;
}
/* Copy result fields not stored in table to key buffer */
key_pos= make_sort_key(sortorder, key_pos, &tmp_value);
/* Check if it exists before */
if (my_hash_search(&hash, org_key_pos, key_length))
{

View file

@ -19,6 +19,7 @@
#include "my_base.h" /* ha_rows */
#include <my_sys.h> /* qsort2_cmp */
#include "queues.h"
#include "sql_string.h"
typedef struct st_buffpek BUFFPEK;
@ -82,14 +83,20 @@ public:
uchar *unique_buff;
bool not_killable;
char* tmp_buffer;
String tmp_buffer;
// The fields below are used only by Unique class.
qsort2_cmp compare;
BUFFPEK_COMPARE_CONTEXT cmp_context;
Sort_param()
{
memset(this, 0, sizeof(*this));
memset(reinterpret_cast<void*>(this), 0, sizeof(*this));
tmp_buffer.set_thread_specific();
/*
Fix memset() clearing the charset.
TODO: The constructor should be eventually rewritten not to use memset().
*/
tmp_buffer.set_charset(&my_charset_bin);
}
void init_for_filesort(uint sortlen, TABLE *table,
ha_rows maxrows, bool sort_positions);

View file

@ -3734,7 +3734,7 @@ public:
virtual void make_sort_key(uchar *to, Item *item,
const SORT_FIELD_ATTR *sort_field,
Sort_param *param) const= 0;
String *tmp) const= 0;
virtual void sortlength(THD *thd,
const Type_std_attributes *item,
SORT_FIELD_ATTR *attr) const= 0;
@ -4120,7 +4120,7 @@ public:
const Bit_addr &bit,
const Column_definition_attributes *attr,
uint32 flags) const override;
void make_sort_key(uchar *, Item *, const SORT_FIELD_ATTR *, Sort_param *)
void make_sort_key(uchar *, Item *, const SORT_FIELD_ATTR *, String *tmp)
const override
{
MY_ASSERT_UNREACHABLE();
@ -4431,7 +4431,7 @@ public:
const Item *outer,
bool is_in_predicate) const;
void make_sort_key(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field,
Sort_param *param) const;
String *tmp) const;
void sortlength(THD *thd,
const Type_std_attributes *item,
SORT_FIELD_ATTR *attr) const;
@ -4519,7 +4519,7 @@ public:
bool is_in_predicate) const;
Field *make_num_distinct_aggregator_field(MEM_ROOT *, const Item *) const;
void make_sort_key(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field,
Sort_param *param) const;
String *tmp) const;
void sortlength(THD *thd,
const Type_std_attributes *item,
SORT_FIELD_ATTR *attr) const;
@ -4745,7 +4745,7 @@ public:
bool is_in_predicate) const;
Field *make_num_distinct_aggregator_field(MEM_ROOT *, const Item *) const;
void make_sort_key(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field,
Sort_param *param) const;
String *tmp) const;
void sortlength(THD *thd,
const Type_std_attributes *item,
SORT_FIELD_ATTR *attr) const;
@ -4834,7 +4834,7 @@ public:
Item_result cmp_type() const { return TIME_RESULT; }
virtual ~Type_handler_temporal_result() = default;
void make_sort_key(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field,
Sort_param *param) const;
String *tmp) const;
void sortlength(THD *thd,
const Type_std_attributes *item,
SORT_FIELD_ATTR *attr) const;
@ -4921,7 +4921,7 @@ public:
type_handler_adjusted_to_max_octet_length(uint max_octet_length,
CHARSET_INFO *cs) const;
void make_sort_key(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field,
Sort_param *param) const;
String *tmp) const;
void sortlength(THD *thd,
const Type_std_attributes *item,
SORT_FIELD_ATTR *attr) const;
@ -5953,7 +5953,7 @@ public:
cmp_item *make_cmp_item(THD *thd, CHARSET_INFO *cs) const;
in_vector *make_in_vector(THD *thd, const Item_func_in *f, uint nargs) const;
void make_sort_key(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field,
Sort_param *param) const;
String *tmp) const;
void sortlength(THD *thd,
const Type_std_attributes *item,
SORT_FIELD_ATTR *attr) const;