mariadb/sql/multi_range_read.h
Sergey Petrunya 3066c37718 DS-MRR improvements: review feedback
- Switch from one bi-directional buffer class to two 
  virtual inheritance-based forward and backward buffer classes.
2010-09-21 20:19:54 +04:00

692 lines
20 KiB
C++

/*
This file contains declarations for Disk-Sweep MultiRangeRead (DS-MRR)
implementation
*/
/**
A Disk-Sweep implementation of MRR Interface (DS-MRR for short)
This is a "plugin"(*) for storage engines that allows to
1. When doing index scans, read table rows in rowid order;
2. when making many index lookups, do them in key order and don't
lookup the same key value multiple times;
3. Do both #1 and #2, when applicable.
These changes are expected to speed up query execution for disk-based
storage engines running io-bound loads and "big" queries (ie. queries that
do joins and enumerate lots of records).
(*) - only conceptually. No dynamic loading or binary compatibility of any
kind.
General scheme of things:
SQL Layer code
| | |
v v v
-|---|---|---- handler->multi_range_read_XXX() function calls
| | |
_____________________________________
/ DS-MRR module \
| (order/de-duplicate lookup keys, |
| scan indexes in key order, |
| order/de-duplicate rowids, |
| retrieve full record reads in rowid |
| order) |
\_____________________________________/
| | |
-|---|---|----- handler->read_range_first()/read_range_next(),
| | | handler->index_read(), handler->rnd_pos() calls.
| | |
v v v
Storage engine internals
Currently DS-MRR is used by MyISAM, InnoDB/XtraDB and Maria storage engines.
Potentially it can be used with any table handler that has disk-based data
storage and has better performance when reading data in rowid order.
*/
class Forward_lifo_buffer;
class Backward_lifo_buffer;
class Lifo_buffer
{
protected:
/*
Data to be written. write() call will assume that (*write_ptr1) points to
size1 bytes of data to be written.
If write_ptr2 != NULL then the buffer stores pairs, and (*write_ptr2)
points to size2 bytes of data that form the second component.
*/
uchar **write_ptr1;
size_t size1;
uchar **write_ptr2;
size_t size2;
/*
read() will do reading by storing pointer to read data into *read_ptr1 (if
the buffer stores atomic elements), or into {*read_ptr1, *read_ptr2} (if
the buffer stores pairs).
*/
uchar **read_ptr1;
uchar **read_ptr2;
uchar *start; /* points to start of buffer space */
uchar *end; /* points to just beyond the end of buffer space */
public:
enum enum_direction {
BACKWARD=-1, /* buffer is filled/read from bigger to smaller memory addresses */
FORWARD=1 /* buffer is filled/read from smaller to bigger memory addresses */
};
virtual enum_direction type() = 0;
/* Buffer space control functions */
void set_buffer_space(uchar *start_arg, uchar *end_arg)
{
start= start_arg;
end= end_arg;
TRASH(start, end - start);
reset_for_writing();
}
void setup_writing(uchar **data1, size_t len1, uchar **data2, size_t len2)
{
write_ptr1= data1;
size1= len1;
write_ptr2= data2;
size2= len2;
}
void setup_reading(uchar **data1, size_t len1, uchar **data2, size_t len2)
{
read_ptr1= data1;
DBUG_ASSERT(len1 == size1);
read_ptr2= data2;
DBUG_ASSERT(len2 == size2);
}
//virtual void write_bytes(const uchar *data, size_t bytes)=0;
virtual bool read() = 0;
virtual void write() = 0;
bool can_write()
{
return have_space_for(size1 + (write_ptr2 ? size2 : 0));
}
bool is_empty() { return used_size() == 0; }
virtual size_t used_size() = 0;
void sort(qsort2_cmp cmp_func, void *cmp_func_arg)
{
uint elem_size= size1 + (write_ptr2 ? size2 : 0);
uint n_elements= used_size() / elem_size;
my_qsort2(used_area(), n_elements, elem_size, cmp_func, cmp_func_arg);
}
virtual void reset_for_writing() = 0;
virtual uchar *end_of_space() = 0;
bool have_data(size_t bytes)
{
return (used_size() >= bytes);
}
virtual bool have_space_for(size_t bytes) = 0;
//virtual uchar *read_bytes(size_t bytes) = 0;
virtual void remove_unused_space(uchar **unused_start, uchar **unused_end)=0;
virtual uchar *used_area() = 0;
class Iterator
{
public:
virtual void init(Lifo_buffer *buf) = 0;
/*
Read the next value. The calling convention is the same as buf->read()
has.
RETURN
FALSE - Ok
TRUE - EOF, reached the end of the buffer
*/
virtual bool read_next()= 0;
virtual ~Iterator() {}
protected:
Lifo_buffer *buf;
virtual uchar *get_next(size_t nbytes)=0;
};
virtual ~Lifo_buffer() {};
friend class Forward_iterator;
friend class Backward_iterator;
};
class Forward_lifo_buffer: public Lifo_buffer
{
uchar *pos;
public:
enum_direction type() { return FORWARD; }
size_t used_size()
{
return pos - start;
}
void reset_for_writing()
{
pos= start;
}
uchar *end_of_space() { return pos; }
bool have_space_for(size_t bytes)
{
return (pos + bytes < end);
}
void write()
{
write_bytes(*write_ptr1, size1);
if (write_ptr2)
write_bytes(*write_ptr2, size2);
}
void write_bytes(const uchar *data, size_t bytes)
{
DBUG_ASSERT(have_space_for(bytes));
memcpy(pos, data, bytes);
pos += bytes;
}
uchar *read_bytes(size_t bytes)
{
DBUG_ASSERT(have_data(bytes));
pos= pos - bytes;
return pos;
}
bool read()
{
if (!have_data(size1 + (read_ptr2 ? size2 : 0)))
return TRUE;
if (read_ptr2)
*read_ptr2= read_bytes(size2);
*read_ptr1= read_bytes(size1);
return FALSE;
}
/*
Stop using/return the unneded space (the one that we have already wrote
to read from).
*/
void remove_unused_space(uchar **unused_start, uchar **unused_end)
{
DBUG_ASSERT(0); /* Don't need this yet */
}
void grow(uchar *unused_start, uchar *unused_end)
{
/*
Passed memory area can be meaningfully used for growing the buffer if:
- it is adjacent to buffer space we're using
- it is on the end towards which we grow.
*/
DBUG_ASSERT(unused_end >= unused_start);
TRASH(unused_start, unused_end - unused_start);
DBUG_ASSERT(end == unused_start);
end= unused_end;
}
/* Return pointer to start of the memory area that is occupied by the data */
uchar *used_area() { return start; }
friend class Forward_iterator;
};
class Forward_iterator : public Lifo_buffer::Iterator
{
uchar *pos;
/* Return pointer to next chunk of nbytes bytes and avance over it */
uchar *get_next(size_t nbytes)
{
if (pos - nbytes < ((Forward_lifo_buffer*)buf)->start)
return NULL;
pos -= nbytes;
return pos;
}
public:
bool read_next()
{
uchar *res;
if (buf->read_ptr2)
{
if ((res= get_next(buf->size2)))
{
*(buf->read_ptr2)= res;
*buf->read_ptr1= get_next(buf->size1);
return FALSE;
}
}
else
{
if ((res= get_next(buf->size1)))
{
*(buf->read_ptr1)= res;
return FALSE;
}
}
return TRUE; /* EOF */
}
void init(Lifo_buffer *buf_arg)
{
DBUG_ASSERT(buf_arg->type() == Lifo_buffer::FORWARD);
buf= buf_arg;
pos= ((Forward_lifo_buffer*)buf)->pos;
}
};
class Backward_lifo_buffer: public Lifo_buffer
{
uchar *pos;
public:
enum_direction type() { return BACKWARD; }
size_t used_size()
{
return end - pos;
}
void reset_for_writing()
{
pos= end;
}
uchar *end_of_space() { return end; }
bool have_space_for(size_t bytes)
{
return (pos - bytes >= start);
}
void write()
{
if (write_ptr2)
write_bytes(*write_ptr2, size2);
write_bytes(*write_ptr1, size1);
}
void write_bytes(const uchar *data, size_t bytes)
{
DBUG_ASSERT(have_space_for(bytes));
pos -= bytes;
memcpy(pos, data, bytes);
}
bool read()
{
if (!have_data(size1 + (read_ptr2 ? size2 : 0)))
return TRUE;
*read_ptr1= read_bytes(size1);
if (read_ptr2)
*read_ptr2= read_bytes(size2);
return FALSE;
}
uchar *read_bytes(size_t bytes)
{
DBUG_ASSERT(have_data(bytes));
uchar *ret= pos;
pos= pos + bytes;
return ret;
}
/*
Stop using/return the unneded space (the one that we have already wrote
to and have read from).
*/
void remove_unused_space(uchar **unused_start, uchar **unused_end)
{
*unused_start= start;
*unused_end= pos;
start= pos;
}
void grow(uchar *unused_start, uchar *unused_end)
{
/*
Passed memory area can be meaningfully used for growing the buffer if:
- it is adjacent to buffer space we're using
- it is on the end towards which we grow.
*/
/*
DBUG_ASSERT(unused_end >= unused_start);
TRASH(unused_start, unused_end - unused_start);
DBUG_ASSERT(start == unused_end);
start= unused_start;
*/
DBUG_ASSERT(0); //Not used
}
/* Return pointer to start of the memory area that is occupied by the data */
uchar *used_area() { return pos; }
friend class Backward_iterator;
};
class Backward_iterator : public Lifo_buffer::Iterator
{
uchar *pos;
/* Return pointer to next chunk of nbytes bytes and advance over it */
uchar *get_next(size_t nbytes)
{
if (pos + nbytes > ((Backward_lifo_buffer*)buf)->end)
return NULL;
uchar *res= pos;
pos += nbytes;
return res;
}
public:
bool read_next()
{
/*
Always read the first component first (if the buffer is backwards, we
have written the second component first).
*/
uchar *res;
if ((res= get_next(buf->size1)))
{
*(buf->read_ptr1)= res;
if (buf->read_ptr2)
*buf->read_ptr2= get_next(buf->size2);
return FALSE;
}
return TRUE; /* EOF */
}
void init(Lifo_buffer *buf_arg)
{
DBUG_ASSERT(buf_arg->type() == Lifo_buffer::BACKWARD);
buf= buf_arg;
pos= ((Backward_lifo_buffer*)buf)->pos;
}
};
/*
An in-memory buffer used by DS-MRR implementation.
- The buffer contains fixed-size elements. The elements are either atomic
byte sequences or pairs.
- The buffer resides in memory provided by the user. It is possible to
= dynamically (ie. between write operations) add ajacent memory space to
the buffer
= dynamically remove unused space from the buffer.
- Buffer can be set to be either "forward" or "backward".
The intent of the last two properties is to allow to have two buffers on
adjacent memory space, one is being read from (and so its space shrinks)
while the other is being written to (and so it needs more and more space).
Illustration of forward buffer operation:
+-- next read will read from here
|
| +-- next write will write to here
v v
*--------------*===============*----------------*
| ^ | ^ | |
| | read_pos | write_pos |
start | | end
| |
usused space user data
For reverse buffer, start/end have the same meaning, but reading and
writing is done from end to start.
*/
/*
DS-MRR implementation for one table. Create/use one object of this class for
each ha_{myisam/innobase/etc} object. That object will be further referred to
as "the handler"
DsMrr_impl supports has the following execution strategies:
- Bypass DS-MRR, pass all calls to default MRR implementation, which is
an MRR-to-non-MRR call converter.
- Key-Ordered Retrieval
- Rowid-Ordered Retrieval
DsMrr_impl will use one of the above strategies, or combination of them,
according to the following diagram:
(mrr function calls)
|
+----------------->-----------------+
| |
___________v______________ _______________v________________
/ default: use lookup keys \ / KEY-ORDERED RETRIEVAL: \
| (or ranges) in whatever | | sort lookup keys and then make |
| order they are supplied | | index lookups in index order |
\__________________________/ \________________________________/
| | | | |
+---<---+ | +--------------->-----------|----+
| | | |
| | +---------------+ |
| ______v___ ______ | _______________v_______________
| / default: read \ | / ROWID-ORDERED RETRIEVAL: \
| | table records | | | Before reading table records, |
v | in random order | v | sort their rowids and then |
| \_________________/ | | read them in rowid order |
| | | \_______________________________/
| | | |
| | | |
+-->---+ | +----<------+-----------<--------+
| | |
v v v
(table records and range_ids)
The choice of strategy depends on MRR scan properties, table properties
(whether we're scanning clustered primary key), and @@optimizer_flag
settings.
Key-Ordered Retrieval
---------------------
The idea is: if MRR scan is essentially a series of lookups on
tbl.key=value1 OR tbl.key=value2 OR ... OR tbl.key=valueN
then it makes sense to collect and order the set of lookup values, i.e.
sort(value1, value2, .. valueN)
and then do index lookups in index order. This results in fewer index page
fetch operations, and we also can avoid making multiple index lookups for the
same value. That is, if value1=valueN we can easily discover that after
sorting and make one index lookup for them instead of two.
Rowid-Ordered Retrieval
-----------------------
If we do a regular index scan or a series of index lookups, we'll be hitting
table records at random. For disk-based engines, this is much slower than
reading the same records in disk order. We assume that disk ordering of
rows is the same as ordering of their rowids (which is provided by
handler::cmp_ref())
In order to retrieve records in different order, we must separate index
scanning and record fetching, that is, MRR scan uses the following steps:
1. Scan the index (and only index, that is, with HA_EXTRA_KEYREAD on) and
fill a buffer with {rowid, range_id} pairs
2. Sort the buffer by rowid value
3. for each {rowid, range_id} pair in the buffer
get record by rowid and return the {record, range_id} pair
4. Repeat the above steps until we've exhausted the list of ranges we're
scanning.
*/
class DsMrr_impl
{
public:
typedef void (handler::*range_check_toggle_func_t)(bool on);
DsMrr_impl()
: h2(NULL) {};
void init(handler *h_arg, TABLE *table_arg)
{
h= h_arg;
table= table_arg;
}
int dsmrr_init(handler *h, RANGE_SEQ_IF *seq_funcs, void *seq_init_param,
uint n_ranges, uint mode, HANDLER_BUFFER *buf);
void dsmrr_close();
int dsmrr_next(char **range_info);
ha_rows dsmrr_info(uint keyno, uint n_ranges, uint keys, uint key_parts,
uint *bufsz, uint *flags, COST_VECT *cost);
ha_rows dsmrr_info_const(uint keyno, RANGE_SEQ_IF *seq,
void *seq_init_param, uint n_ranges, uint *bufsz,
uint *flags, COST_VECT *cost);
private:
/*
The "owner" handler object (the one that is expected to "own" this object
and call its functions).
*/
handler *h;
TABLE *table; /* Always equal to h->table */
/*
Secondary handler object. (created when needed, we need it when we need
to run both index scan and rnd_pos() at the same time)
*/
handler *h2;
/** Properties of current MRR scan **/
uint keyno; /* index we're running the scan on */
bool use_default_impl; /* TRUE <=> shortcut all calls to default MRR impl */
/* TRUE <=> need range association, buffers hold {rowid, range_id} pairs */
bool is_mrr_assoc;
/* TRUE <=> sort the keys before making index lookups */
bool do_sort_keys;
/* TRUE <=> sort rowids and use rnd_pos() to get and return full records */
bool do_rndpos_scan;
/*
(if do_sort_keys==TRUE) don't copy key values, use pointers to them
instead.
*/
bool use_key_pointers;
/* The whole buffer space that we're using */
uchar *full_buf;
uchar *full_buf_end;
/*
When using both rowid and key buffers: the bound between key and rowid
parts of the buffer. This is the "original" value, actual memory ranges
used by key and rowid parts may be different because of dynamic space
reallocation between them.
*/
uchar *rowid_buffer_end;
/** Index scaning and key buffer-related members **/
/* TRUE <=> We can get at most one index tuple for a lookup key */
bool index_ranges_unique;
/* TRUE<=> we're in a middle of enumerating records for a key range */
bool in_index_range;
/*
One of the following two is used for key buffer: forward is used when
we only need key buffer, backward is used when we need both key and rowid
buffers.
*/
Forward_lifo_buffer forward_key_buf;
Forward_iterator forward_key_it;
Backward_lifo_buffer backward_key_buf;
Backward_iterator backward_key_it;
/* Buffer to store (key, range_id) pairs */
Lifo_buffer *key_buffer;
/* key_buffer.read() reads */
uchar *cur_index_tuple;
/* if in_index_range==TRUE: range_id of the range we're enumerating */
char *cur_range_info;
/*
TRUE <=> we've got index tuples/rowids for all keys (need this flag because
we may have a situation where we've read everything from the key buffer but
haven't finished with getting index tuples for the last key)
*/
bool key_eof;
/* Initially FALSE, becomes TRUE when we've set key_tuple_xxx members */
bool know_key_tuple_params;
uint key_tuple_length; /* Length of index lookup tuple, in bytes */
key_part_map key_tuple_map; /* keyparts used in index lookup tuples */
/*
This is
= key_tuple_length if we copy keys to buffer
= sizeof(void*) if we're using pointers to materialized keys.
*/
uint key_size_in_keybuf;
/* = key_size_in_keybuf [ + sizeof(range_assoc_info) ] */
uint key_buff_elem_size;
/*
TRUE <=> we're doing key-ordered index scan and right now several
subsequent key values are the same as the one we've already retrieved and
returned index tuple for.
*/
bool in_identical_keys_range;
/* range_id of the first of the identical keys */
char *first_identical_range_info;
/* Pointer to the last of the identical key values */
uchar *last_identical_key_ptr;
/*
key_buffer iterator for walking the identical key range (we need to
enumerate the set of (identical_key, range_id) pairs multiple times,
and do that by walking from current buffer read position until we get
last_identical_key_ptr.
*/
Lifo_buffer::Iterator *identical_key_it;
/** rnd_pos() scan and rowid buffer-related members **/
/*
Buffer to store (rowid, range_id) pairs, or just rowids if
is_mrr_assoc==FALSE
*/
Forward_lifo_buffer rowid_buffer;
/* rowid_buffer.read() will set the following: */
uchar *rowid;
uchar *rowids_range_id;
/*
not-NULL: we're traversing a group of (rowid, range_id) pairs with
identical rowid values, and this is the pointer to the last one.
NULL: we're not in the group of indentical rowids.
*/
uchar *last_identical_rowid;
bool dsmrr_eof; /* TRUE <=> We have reached EOF when reading index tuples */
/* = h->ref_length [ + sizeof(range_assoc_info) ] */
uint rowid_buff_elem_size;
bool choose_mrr_impl(uint keyno, ha_rows rows, uint *flags, uint *bufsz,
COST_VECT *cost);
bool get_disk_sweep_mrr_cost(uint keynr, ha_rows rows, uint flags,
uint *buffer_size, COST_VECT *cost);
bool check_cpk_scan(THD *thd, uint keyno, uint mrr_flags);
static int key_tuple_cmp(void* arg, uchar* key1, uchar* key2);
static int key_tuple_cmp_reverse(void* arg, uchar* key1, uchar* key2);
int dsmrr_fill_rowid_buffer();
void dsmrr_fill_key_buffer();
int dsmrr_next_from_index(char **range_info);
void setup_buffer_sizes(key_range *sample_key);
void reallocate_buffer_space();
static range_seq_t key_buf_seq_init(void *init_param, uint n_ranges, uint flags);
static uint key_buf_seq_next(range_seq_t rseq, KEY_MULTI_RANGE *range);
};