mirror of
https://github.com/MariaDB/server.git
synced 2025-01-16 12:02:42 +01:00
Merge mronstrom@bk-internal.mysql.com:/home/bk/mysql-5.1
into mysql.com:/Users/mikron/wl1682 sql/field.cc: Auto merged sql/field.h: Auto merged sql/ha_innodb.cc: Auto merged sql/ha_innodb.h: Auto merged sql/ha_ndbcluster.cc: Auto merged sql/ha_ndbcluster.h: Auto merged sql/handler.cc: Auto merged sql/handler.h: Auto merged sql/item.cc: Auto merged sql/mysql_priv.h: Auto merged sql/opt_range.cc: Auto merged sql/sql_acl.cc: Auto merged sql/sql_base.cc: Auto merged sql/sql_bitmap.h: Auto merged sql/sql_class.h: Auto merged sql/sql_insert.cc: Auto merged sql/sql_select.cc: Auto merged sql/sql_table.cc: Auto merged sql/sql_update.cc: Auto merged sql/table.cc: Auto merged sql/unireg.cc: Auto merged
This commit is contained in:
commit
3e6b7f2b65
30 changed files with 1737 additions and 513 deletions
|
@ -21,10 +21,13 @@
|
|||
|
||||
#define MY_BIT_NONE (~(uint) 0)
|
||||
|
||||
|
||||
typedef struct st_bitmap
|
||||
{
|
||||
uchar *bitmap;
|
||||
uint bitmap_size; /* number of bytes occupied by the above */
|
||||
uint bitmap_size; /* number of bits occupied by the above */
|
||||
uint32 last_word_mask;
|
||||
uint32 *last_word_ptr;
|
||||
/*
|
||||
mutex will be acquired for the duration of each bitmap operation if
|
||||
thread_safe flag in bitmap_init was set. Otherwise, we optimize by not
|
||||
|
@ -38,30 +41,60 @@ typedef struct st_bitmap
|
|||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
extern my_bool bitmap_cmp(const MY_BITMAP *map1, const MY_BITMAP *map2);
|
||||
extern my_bool bitmap_init(MY_BITMAP *map, uchar *buf, uint bitmap_size, my_bool thread_safe);
|
||||
extern my_bool bitmap_is_clear_all(const MY_BITMAP *map);
|
||||
extern my_bool bitmap_is_prefix(const MY_BITMAP *map, uint prefix_size);
|
||||
extern my_bool bitmap_is_set(const MY_BITMAP *map, uint bitmap_bit);
|
||||
extern my_bool bitmap_is_set_all(const MY_BITMAP *map);
|
||||
extern my_bool bitmap_is_subset(const MY_BITMAP *map1, const MY_BITMAP *map2);
|
||||
extern uint bitmap_set_next(MY_BITMAP *map);
|
||||
extern uint bitmap_get_first(const MY_BITMAP *map);
|
||||
extern uint bitmap_get_first_set(const MY_BITMAP *map);
|
||||
extern uint bitmap_bits_set(const MY_BITMAP *map);
|
||||
extern void bitmap_clear_all(MY_BITMAP *map);
|
||||
extern void bitmap_clear_bit(MY_BITMAP *map, uint bitmap_bit);
|
||||
extern void bitmap_free(MY_BITMAP *map);
|
||||
extern void bitmap_intersect(MY_BITMAP *map, const MY_BITMAP *map2);
|
||||
extern void bitmap_set_all(MY_BITMAP *map);
|
||||
extern void bitmap_set_bit(MY_BITMAP *map, uint bitmap_bit);
|
||||
extern void bitmap_set_prefix(MY_BITMAP *map, uint prefix_size);
|
||||
extern void bitmap_intersect(MY_BITMAP *map, const MY_BITMAP *map2);
|
||||
extern void bitmap_subtract(MY_BITMAP *map, const MY_BITMAP *map2);
|
||||
extern void bitmap_union(MY_BITMAP *map, const MY_BITMAP *map2);
|
||||
|
||||
extern uint bitmap_lock_set_next(MY_BITMAP *map);
|
||||
extern void bitmap_lock_clear_bit(MY_BITMAP *map, uint bitmap_bit);
|
||||
#ifdef NOT_USED
|
||||
extern uint bitmap_lock_bits_set(const MY_BITMAP *map);
|
||||
extern my_bool bitmap_lock_is_set_all(const MY_BITMAP *map);
|
||||
extern uint bitmap_lock_get_first(const MY_BITMAP *map);
|
||||
extern uint bitmap_lock_get_first_set(const MY_BITMAP *map);
|
||||
extern my_bool bitmap_lock_is_subset(const MY_BITMAP *map1,
|
||||
const MY_BITMAP *map2);
|
||||
extern my_bool bitmap_lock_is_prefix(const MY_BITMAP *map, uint prefix_size);
|
||||
extern my_bool bitmap_lock_is_set(const MY_BITMAP *map, uint bitmap_bit);
|
||||
extern my_bool bitmap_lock_is_clear_all(const MY_BITMAP *map);
|
||||
extern my_bool bitmap_lock_cmp(const MY_BITMAP *map1, const MY_BITMAP *map2);
|
||||
extern void bitmap_lock_set_all(MY_BITMAP *map);
|
||||
extern void bitmap_lock_clear_all(MY_BITMAP *map);
|
||||
extern void bitmap_lock_set_bit(MY_BITMAP *map, uint bitmap_bit);
|
||||
extern void bitmap_lock_flip_bit(MY_BITMAP *map, uint bitmap_bit);
|
||||
extern void bitmap_lock_set_prefix(MY_BITMAP *map, uint prefix_size);
|
||||
extern void bitmap_lock_intersect(MY_BITMAP *map, const MY_BITMAP *map2);
|
||||
extern void bitmap_lock_subtract(MY_BITMAP *map, const MY_BITMAP *map2);
|
||||
extern void bitmap_lock_union(MY_BITMAP *map, const MY_BITMAP *map2);
|
||||
extern void bitmap_lock_xor(MY_BITMAP *map, const MY_BITMAP *map2);
|
||||
extern void bitmap_lock_invert(MY_BITMAP *map);
|
||||
#endif
|
||||
/* Fast, not thread safe, bitmap functions */
|
||||
#define bitmap_fast_set_bit(MAP, BIT) (MAP)->bitmap[(BIT) / 8] |= (1 << ((BIT) & 7))
|
||||
#define bitmap_fast_clear_bit(MAP, BIT) (MAP)->bitmap[(BIT) / 8] &= ~ (1 << ((BIT) & 7))
|
||||
#define bitmap_fast_is_set(MAP, BIT) (MAP)->bitmap[(BIT) / 8] & (1 << ((BIT) & 7))
|
||||
#define no_bytes_in_map(map) ((map->bitmap_size + 7)/8)
|
||||
#define no_words_in_map(map) ((map->bitmap_size + 31)/32)
|
||||
#define bytes_word_aligned(bytes) (4*((bytes + 3)/4))
|
||||
#define bitmap_set_bit(MAP, BIT) ((MAP)->bitmap[(BIT) / 8] |= (1 << ((BIT) & 7)))
|
||||
#define bitmap_flip_bit(MAP, BIT) ((MAP)->bitmap[(BIT) / 8] ^= (1 << ((BIT) & 7)))
|
||||
#define bitmap_clear_bit(MAP, BIT) ((MAP)->bitmap[(BIT) / 8] &= ~ (1 << ((BIT) & 7)))
|
||||
#define bitmap_is_set(MAP, BIT) ((MAP)->bitmap[(BIT) / 8] & (1 << ((BIT) & 7)))
|
||||
#define bitmap_cmp(MAP1, MAP2) \
|
||||
(memcmp((MAP1)->bitmap, (MAP2)->bitmap, 4*no_words_in_map((MAP1)))==0)
|
||||
#define bitmap_clear_all(MAP) \
|
||||
memset((MAP)->bitmap, 0, 4*no_words_in_map((MAP))); \
|
||||
*(MAP)->last_word_ptr|= (MAP)->last_word_mask
|
||||
#define bitmap_set_all(MAP) \
|
||||
(memset((MAP)->bitmap, 0xFF, 4*no_words_in_map((MAP))))
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -91,6 +91,7 @@ mysqld_SOURCES = sql_lex.cc sql_handler.cc \
|
|||
sql_udf.cc sql_analyse.cc sql_analyse.h sql_cache.cc \
|
||||
slave.cc sql_repl.cc rpl_filter.cc \
|
||||
sql_union.cc sql_derived.cc \
|
||||
bitvector.cc \
|
||||
client.c sql_client.cc mini_client_errors.c pack.c\
|
||||
stacktrace.c repl_failsafe.h repl_failsafe.cc \
|
||||
sql_olap.cc sql_view.cc \
|
||||
|
|
347
sql/bitvector.cc
Normal file
347
sql/bitvector.cc
Normal file
|
@ -0,0 +1,347 @@
|
|||
/* -*- Mode: C++ -*-
|
||||
|
||||
Copyright (C) 2005 MySQL AB
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
||||
|
||||
#include "mysql_priv.h"
|
||||
#include <bitvector.h>
|
||||
|
||||
void bitvector::create_last_word_mask()
|
||||
{
|
||||
|
||||
/* Get the number of used bits (1..8) in the last byte */
|
||||
unsigned int const used= 1U + ((size()-1U) & 0x7U);
|
||||
|
||||
/*
|
||||
* Create a mask with the upper 'unused' bits set and the lower 'used'
|
||||
* bits clear. The bits within each byte is stored in big-endian order.
|
||||
*/
|
||||
unsigned char const mask= (~((1 << used) - 1)) & 255;
|
||||
unsigned int byte_no= ((bytes()-1)) & ~3U;
|
||||
last_word_ptr= (uint32*)&m_data[byte_no];
|
||||
|
||||
/*
|
||||
The first bytes are to be set to zero since they represent real bits
|
||||
in the bitvector. The last bytes are set to 0xFF since they represent
|
||||
bytes not used by the bitvector. Finally the last byte contains bits
|
||||
as set by the mask above.
|
||||
*/
|
||||
|
||||
unsigned char *ptr= (unsigned char*)&last_word_mask;
|
||||
switch (bytes()&3)
|
||||
{
|
||||
case 1:
|
||||
last_word_mask= ~0U;
|
||||
ptr[0]= mask;
|
||||
return;
|
||||
case 2:
|
||||
last_word_mask= ~0U;
|
||||
ptr[0]= 0;
|
||||
ptr[1]= mask;
|
||||
return;
|
||||
case 3:
|
||||
last_word_mask= 0U;
|
||||
ptr[2]= mask;
|
||||
ptr[3]= 0xFFU;
|
||||
return;
|
||||
case 0:
|
||||
last_word_mask= 0U;
|
||||
ptr[3]= mask;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
int bitvector::init(size_t size)
|
||||
{
|
||||
DBUG_ASSERT(size < MYSQL_NO_BIT_FOUND);
|
||||
DBUG_ASSERT(size > 0);
|
||||
m_size= size;
|
||||
m_data= (uchar*)sql_alloc(byte_size_word_aligned(size));
|
||||
if (m_data)
|
||||
{
|
||||
create_last_word_mask();
|
||||
clear_all();
|
||||
return FALSE;
|
||||
}
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
uint bitvector::no_bits_set()
|
||||
{
|
||||
uint no_bytes= bytes(), res=0, i;
|
||||
uchar *ptr= m_data;
|
||||
*last_word_ptr^=last_word_mask; //Reset last bits to zero
|
||||
for (i=0; i< no_bytes; i++, ptr++)
|
||||
res+=my_count_bits_ushort(*ptr);
|
||||
*last_word_ptr^=last_word_mask; //Set last bits to one again
|
||||
return res;
|
||||
}
|
||||
|
||||
uint bitvector::get_first_bit_set()
|
||||
{
|
||||
uchar *byte_ptr;
|
||||
uint32 *data_ptr= (uint32*)data(), bit_found,i,j,k;
|
||||
for (i=0; data_ptr <= last_word_ptr; data_ptr++, i++)
|
||||
{
|
||||
if (*data_ptr)
|
||||
{
|
||||
byte_ptr= (uchar*)data_ptr;
|
||||
for (j=0; j < 4; j++, byte_ptr++)
|
||||
{
|
||||
if (*byte_ptr)
|
||||
{
|
||||
for (k=0; k < 8; k++)
|
||||
{
|
||||
if (*byte_ptr & (1 << k))
|
||||
{
|
||||
bit_found= (i << 5) + (j << 3) + k;
|
||||
if (bit_found == m_size)
|
||||
return MYSQL_NO_BIT_FOUND;
|
||||
else
|
||||
return bit_found;
|
||||
}
|
||||
}
|
||||
DBUG_ASSERT(1);
|
||||
}
|
||||
}
|
||||
DBUG_ASSERT(1);
|
||||
}
|
||||
}
|
||||
return MYSQL_NO_BIT_FOUND;
|
||||
}
|
||||
|
||||
uint bitvector::get_first_bit_clear()
|
||||
{
|
||||
uchar *byte_ptr;
|
||||
uint32 *data_ptr= (uint32*)data(), bit_found,i,j,k;
|
||||
for (i=0; data_ptr <= last_word_ptr; data_ptr++, i++)
|
||||
{
|
||||
if (*data_ptr != 0xFFFFFFFF)
|
||||
{
|
||||
byte_ptr= (uchar*)data_ptr;
|
||||
for (j=0; j < 4; j++, byte_ptr++)
|
||||
{
|
||||
if (*byte_ptr != 0xFF)
|
||||
{
|
||||
for (k=0; k < 8; k++)
|
||||
{
|
||||
if (!(*byte_ptr & (1 << k)))
|
||||
{
|
||||
bit_found= (i << 5) + (j << 3) + k;
|
||||
if (bit_found == m_size)
|
||||
return MYSQL_NO_BIT_FOUND;
|
||||
else
|
||||
return bit_found;
|
||||
}
|
||||
}
|
||||
DBUG_ASSERT(1);
|
||||
}
|
||||
}
|
||||
DBUG_ASSERT(1);
|
||||
}
|
||||
}
|
||||
return MYSQL_NO_BIT_FOUND;
|
||||
}
|
||||
|
||||
#ifdef TEST_BITVECTOR
|
||||
uint get_rand_bit(uint bitsize)
|
||||
{
|
||||
return (rand() % bitsize);
|
||||
}
|
||||
|
||||
bool test_set_get_clear_bit(bitvector *bv, uint bitsize)
|
||||
{
|
||||
uint i, test_bit;
|
||||
uint no_loops= bitsize > 128 ? 128 : bitsize;
|
||||
for (i=0; i < no_loops; i++)
|
||||
{
|
||||
test_bit= get_rand_bit(bitsize);
|
||||
bv->set_bit(test_bit);
|
||||
if (!bv->get_bit(test_bit))
|
||||
goto error1;
|
||||
bv->clear_bit(test_bit);
|
||||
if (bv->get_bit(test_bit))
|
||||
goto error2;
|
||||
}
|
||||
return FALSE;
|
||||
error1:
|
||||
printf("Error in set bit, bit %u, bitsize = %u", test_bit, bitsize);
|
||||
return TRUE;
|
||||
error2:
|
||||
printf("Error in clear bit, bit %u, bitsize = %u", test_bit, bitsize);
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
bool test_flip_bit(bitvector *bv, uint bitsize)
|
||||
{
|
||||
uint i, test_bit;
|
||||
uint no_loops= bitsize > 128 ? 128 : bitsize;
|
||||
for (i=0; i < no_loops; i++)
|
||||
{
|
||||
test_bit= get_rand_bit(bitsize);
|
||||
bv->flip_bit(test_bit);
|
||||
if (!bv->get_bit(test_bit))
|
||||
goto error1;
|
||||
bv->flip_bit(test_bit);
|
||||
if (bv->get_bit(test_bit))
|
||||
goto error2;
|
||||
}
|
||||
return FALSE;
|
||||
error1:
|
||||
printf("Error in flip bit 1, bit %u, bitsize = %u", test_bit, bitsize);
|
||||
return TRUE;
|
||||
error2:
|
||||
printf("Error in flip bit 2, bit %u, bitsize = %u", test_bit, bitsize);
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
bool test_operators(bitvector *bv, uint bitsize)
|
||||
{
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
bool test_get_all_bits(bitvector *bv, uint bitsize)
|
||||
{
|
||||
uint i;
|
||||
bv->set_all();
|
||||
if (!bv->get_all_bits_set())
|
||||
goto error1;
|
||||
bv->clear_all();
|
||||
if (!bv->get_all_bits_clear())
|
||||
goto error2;
|
||||
for (i=0; i<bitsize;i++)
|
||||
bv->set_bit(i);
|
||||
if (!bv->get_all_bits_set())
|
||||
goto error3;
|
||||
for (i=0; i<bitsize;i++)
|
||||
bv->clear_bit(i);
|
||||
if (!bv->get_all_bits_clear())
|
||||
goto error4;
|
||||
return FALSE;
|
||||
error1:
|
||||
printf("Error in set_all, bitsize = %u", bitsize);
|
||||
return TRUE;
|
||||
error2:
|
||||
printf("Error in clear_all, bitsize = %u", bitsize);
|
||||
return TRUE;
|
||||
error3:
|
||||
printf("Error in bitwise set all, bitsize = %u", bitsize);
|
||||
return TRUE;
|
||||
error4:
|
||||
printf("Error in bitwise clear all, bitsize = %u", bitsize);
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
bool test_compare_operators(bitvector *bv, uint bitsize)
|
||||
{
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
bool test_count_bits_set(bitvector *bv, uint bitsize)
|
||||
{
|
||||
uint i, bit_count=0, test_bit;
|
||||
uint no_loops= bitsize > 128 ? 128 : bitsize;
|
||||
for (i=0; i < no_loops; i++)
|
||||
{
|
||||
test_bit=get_rand_bit(bitsize);
|
||||
if (!bv->get_bit(test_bit))
|
||||
{
|
||||
bv->set_bit(test_bit);
|
||||
bit_count++;
|
||||
}
|
||||
}
|
||||
if (bit_count==0 && bitsize > 0)
|
||||
goto error1;
|
||||
if (bv->no_bits_set() != bit_count)
|
||||
goto error2;
|
||||
return FALSE;
|
||||
error1:
|
||||
printf("No bits set bitsize = %u", bitsize);
|
||||
return TRUE;
|
||||
error2:
|
||||
printf("Wrong count of bits set, bitsize = %u", bitsize);
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
bool test_get_first_bit(bitvector *bv, uint bitsize)
|
||||
{
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
bool test_get_next_bit(bitvector *bv, uint bitsize)
|
||||
{
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
bool do_test(uint bitsize)
|
||||
{
|
||||
bitvector *bv;
|
||||
bv = new bitvector;
|
||||
bv->init(bitsize);
|
||||
if (test_set_get_clear_bit(bv,bitsize))
|
||||
goto error;
|
||||
bv->clear_all();
|
||||
if (test_flip_bit(bv,bitsize))
|
||||
goto error;
|
||||
bv->clear_all();
|
||||
if (test_operators(bv,bitsize))
|
||||
goto error;
|
||||
bv->clear_all();
|
||||
if (test_get_all_bits(bv, bitsize))
|
||||
goto error;
|
||||
bv->clear_all();
|
||||
if (test_compare_operators(bv,bitsize))
|
||||
goto error;
|
||||
bv->clear_all();
|
||||
if (test_count_bits_set(bv,bitsize))
|
||||
goto error;
|
||||
bv->clear_all();
|
||||
if (test_get_first_bit(bv,bitsize))
|
||||
goto error;
|
||||
bv->clear_all();
|
||||
if (test_get_next_bit(bv,bitsize))
|
||||
goto error;
|
||||
delete bv;
|
||||
return FALSE;
|
||||
error:
|
||||
delete bv;
|
||||
printf("\n");
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
int main()
|
||||
{
|
||||
int i;
|
||||
for (i= 1; i < 4096; i++)
|
||||
if (do_test(i))
|
||||
return -1;
|
||||
printf("OK\n");
|
||||
return 0;
|
||||
}
|
||||
/*
|
||||
Compile by using the below on a compiled clone
|
||||
|
||||
g++ -DHAVE_CONFIG_H -I. -I. -I.. -I../include -I../regex -I. -I../include
|
||||
-g -fno-omit-frame-pointer -fno-common -felide-constructors -fno-exceptions
|
||||
-fno-rtti -fno-implicit-templates -fno-exceptions -fno-rtti
|
||||
-DUSE_MYSYS_NEW -DDEFINE_CXA_PURE_VIRTUAL -DHAVE_DARWIN_THREADS
|
||||
-D_P1003_1B_VISIBLE -DTEST_BITVECTOR -DSIGNAL_WITH_VIO_CLOSE
|
||||
-DSIGNALS_DONT_BREAK_READ -DIGNORE_SIGHUP_SIGQUIT -o bitvector.o
|
||||
-c bitvector.cc
|
||||
g++ -o bitvector bitvector.o -L../mysys -lmysys -L../dbug -L../strings
|
||||
-lmystrings -ldbug
|
||||
*/
|
||||
#endif
|
205
sql/bitvector.h
205
sql/bitvector.h
|
@ -68,64 +68,28 @@ namespace
|
|||
inlining code.
|
||||
*/
|
||||
|
||||
class bitvector
|
||||
/* Number returned when no bit found */
|
||||
#define MYSQL_NO_BIT_FOUND 1 << 20
|
||||
class bitvector :public Sql_alloc
|
||||
{
|
||||
private:
|
||||
/* Helper classes */
|
||||
struct flip_bit_op
|
||||
{
|
||||
void operator()(byte* p, byte m) { *p^= m; }
|
||||
};
|
||||
|
||||
struct set_bit_op
|
||||
{
|
||||
void operator()(byte* p, byte m) { *p|= m; }
|
||||
};
|
||||
|
||||
struct clear_bit_op
|
||||
{
|
||||
void operator()(byte* p, byte m) { *p&= ~m; }
|
||||
};
|
||||
|
||||
struct test_bit_op
|
||||
{
|
||||
bool operator()(byte* p, byte m) { return *p & m; }
|
||||
};
|
||||
|
||||
/* Compute the number of bytes required to store 'bits' bits in an array. */
|
||||
static inline size_t byte_size(size_t bits)
|
||||
{
|
||||
int const byte_bits = sizeof(byte) * CHAR_BIT;
|
||||
uint const byte_bits = sizeof(byte) * CHAR_BIT;
|
||||
return (bits + (byte_bits-1)) / byte_bits;
|
||||
}
|
||||
|
||||
/* Tidy the last byte (by clearing the unused bits) of the bitvector to make
|
||||
* comparison easy. This code is assuming that we're working with 8-bit
|
||||
* bytes.
|
||||
*/
|
||||
void tidy_last_byte()
|
||||
static inline size_t byte_size_word_aligned(size_t bits)
|
||||
{
|
||||
byte* const last_byte= m_data + bytes() - 1;
|
||||
|
||||
/* Get the number of used bits (1..8) in the last byte */
|
||||
unsigned int const used= 1U + ((size()-1U) & 0x7U);
|
||||
|
||||
/* Create a mask with the upper 'unused' bits clear and the lower 'used'
|
||||
* bits set. The bits within each byte is stored in big-endian order.
|
||||
*/
|
||||
unsigned int const mask= ((1 << used) - 1);
|
||||
|
||||
/* Mask the last byte */
|
||||
*last_byte&= mask;
|
||||
return ((bits + 31) >> 5) << 2;
|
||||
}
|
||||
|
||||
template <class ReturnType, class Func>
|
||||
inline ReturnType apply_to_byte(size_t const pos, Func op) const
|
||||
void create_last_word_mask();
|
||||
|
||||
inline void tidy_last_word()
|
||||
{
|
||||
/* Here I'm assuming that we're working with 8-bit bytes. */
|
||||
ptrdiff_t const byte_pos= pos >> 3;
|
||||
byte const mask= (1 << (pos & 0x7U));
|
||||
return op(&m_data[byte_pos], mask);
|
||||
*last_word_ptr|= last_word_mask;
|
||||
}
|
||||
|
||||
public:
|
||||
|
@ -135,8 +99,11 @@ public:
|
|||
}
|
||||
|
||||
explicit bitvector(size_t size, bool value= false)
|
||||
: m_size(size), m_data(my_malloc(byte_size(size), MYF(0)))
|
||||
: m_size(size),
|
||||
m_data((uchar*)sql_alloc(byte_size_word_aligned(size)))
|
||||
{
|
||||
DBUG_ASSERT(size < MYSQL_NO_BIT_FOUND);
|
||||
create_last_word_mask();
|
||||
if (value)
|
||||
set_all();
|
||||
else
|
||||
|
@ -147,33 +114,39 @@ public:
|
|||
* number of *bits* in the bitvector.
|
||||
*/
|
||||
explicit bitvector(byte const* data, size_t size)
|
||||
: m_size(size), m_data(my_malloc(byte_size(size), MYF(0)))
|
||||
: m_size(size),
|
||||
m_data((uchar*)sql_alloc(byte_size_word_aligned(size)))
|
||||
{
|
||||
/* std::copy(data, data + byte_size(size), m_data); */
|
||||
DBUG_ASSERT(size < MYSQL_NO_BIT_FOUND);
|
||||
create_last_word_mask();
|
||||
memcpy(m_data, data, byte_size(size));
|
||||
tidy_last_byte();
|
||||
tidy_last_word();
|
||||
}
|
||||
|
||||
bitvector(bitvector const& other)
|
||||
: m_size(other.size()), m_data(my_malloc(other.bytes(), MYF(0)))
|
||||
: m_size(other.size()),
|
||||
m_data((uchar*)sql_alloc(other.bytes()))
|
||||
{
|
||||
/* std::copy(other.m_data, other.m_data + other.bytes(), m_data); */
|
||||
DBUG_ASSERT(m_size < MYSQL_NO_BIT_FOUND);
|
||||
create_last_word_mask();
|
||||
memcpy(m_data, other.data(), other.bytes());
|
||||
tidy_last_byte(); /* Just a precaution */
|
||||
tidy_last_word();
|
||||
}
|
||||
|
||||
/* Assignment operator */
|
||||
bitvector& operator=(bitvector other)
|
||||
{
|
||||
swap(other);
|
||||
return *this;
|
||||
}
|
||||
~bitvector() {}
|
||||
|
||||
~bitvector()
|
||||
{
|
||||
if (m_data)
|
||||
my_free(m_data, MYF(0));
|
||||
}
|
||||
/*
|
||||
Allocate memory to the bitvector and create last word mask
|
||||
and clear all bits in the bitvector.
|
||||
*/
|
||||
int init(size_t size);
|
||||
|
||||
/* Get number of bits set in the bitvector */
|
||||
uint no_bits_set();
|
||||
/* Get first bit set/clear in bitvector */
|
||||
uint get_first_bit_set();
|
||||
uint get_first_bit_clear();
|
||||
|
||||
|
||||
/* Swap the guts of this instance with another instance. */
|
||||
void swap(bitvector& other)
|
||||
|
@ -182,8 +155,35 @@ public:
|
|||
my_swap(m_data, other.m_data);
|
||||
}
|
||||
|
||||
bitvector &operator=(const bitvector &rhs)
|
||||
{
|
||||
DBUG_ASSERT(rhs.size() == size());
|
||||
memcpy(m_data, rhs.data(), byte_size_word_aligned(m_size));
|
||||
return *this;
|
||||
}
|
||||
|
||||
bool get_all_bits_set()
|
||||
{
|
||||
uint32 *data_ptr= (uint32*)&m_data[0];
|
||||
for (; data_ptr <= last_word_ptr; data_ptr++)
|
||||
if (*data_ptr != 0xFFFFFFFF)
|
||||
return FALSE;
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
bool get_all_bits_clear()
|
||||
{
|
||||
uint32 *data_ptr= (uint32*)m_data;
|
||||
if (*last_word_ptr != last_word_mask)
|
||||
return FALSE;
|
||||
for (; data_ptr < last_word_ptr; data_ptr++)
|
||||
if (*data_ptr)
|
||||
return FALSE;
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
/* A pointer to the bytes representing the bits */
|
||||
byte const *data() const { return m_data; }
|
||||
uchar const *data() const { return m_data; }
|
||||
|
||||
/* The size of the data in *bytes* */
|
||||
size_t bytes() const { return byte_size(m_size); }
|
||||
|
@ -194,47 +194,55 @@ public:
|
|||
/* Set all bits in the vector */
|
||||
void set_all()
|
||||
{
|
||||
/* std::fill_n(m_data, bytes(), 255); */
|
||||
memset(m_data, 255, bytes());
|
||||
tidy_last_byte();
|
||||
memset(m_data, 255, byte_size_word_aligned(m_size));
|
||||
}
|
||||
|
||||
/* Set a bit to a value */
|
||||
void set_bit(size_t pos)
|
||||
{
|
||||
apply_to_byte<void>(pos, set_bit_op());
|
||||
DBUG_ASSERT(pos < m_size);
|
||||
m_data[pos>>3]|= (uchar)(1 << (pos & 0x7U));
|
||||
}
|
||||
|
||||
/* Reset (clear) all bits in the vector */
|
||||
void clear_all()
|
||||
{
|
||||
/* std::fill_n(m_data, bytes(), 0); */
|
||||
memset(m_data, 0, bytes());
|
||||
tidy_last_byte();
|
||||
tidy_last_word();
|
||||
}
|
||||
|
||||
/* Reset one bit in the vector */
|
||||
void clear_bit(size_t pos)
|
||||
{
|
||||
apply_to_byte<void>(pos, clear_bit_op());
|
||||
DBUG_ASSERT(pos < m_size);
|
||||
m_data[pos>>3]&= ~(uchar)(1 << (pos & 0x7U));
|
||||
}
|
||||
|
||||
void flip_bit(size_t pos)
|
||||
{
|
||||
apply_to_byte<void>(pos, flip_bit_op());
|
||||
DBUG_ASSERT(pos < m_size);
|
||||
m_data[pos>>3]^= (uchar)(1 << (pos & 0x7U));
|
||||
}
|
||||
|
||||
bool get_bit(size_t pos) const
|
||||
{
|
||||
return apply_to_byte<bool>(pos, test_bit_op());
|
||||
DBUG_ASSERT(pos < m_size);
|
||||
/*
|
||||
!! provides the most effective implementation of conversion to
|
||||
bool
|
||||
*/
|
||||
uchar *byte_word= m_data + (pos >> 3);
|
||||
uchar mask= 1 << (pos & 0x7U);
|
||||
bool ret_value= !!(*byte_word & mask);
|
||||
return ret_value;
|
||||
};
|
||||
|
||||
bool operator==(bitvector const& rhs) const
|
||||
{
|
||||
if (size() != rhs.size())
|
||||
return false;
|
||||
/* This works since I have ensured that the last byte of the array contain
|
||||
* sensible data.
|
||||
/* This works since I have ensured that the last byte of the array
|
||||
* contain sensible data.
|
||||
*/
|
||||
if (memcmp(data(), rhs.data(), bytes()) != 0)
|
||||
return false;
|
||||
|
@ -246,9 +254,52 @@ public:
|
|||
return !(*this == rhs);
|
||||
}
|
||||
|
||||
bitvector &operator&=(bitvector const& rhs)
|
||||
{
|
||||
DBUG_ASSERT(size() == rhs.size());
|
||||
uint32 *data_ptr=(uint32*)data(), *rhs_data_ptr=(uint32*)rhs.data();
|
||||
uint32 *last_ptr= last_word_ptr;
|
||||
for (; data_ptr <= last_ptr; data_ptr++, rhs_data_ptr++)
|
||||
*data_ptr&=*rhs_data_ptr;
|
||||
return *this;
|
||||
}
|
||||
|
||||
bitvector &operator|=(bitvector const& rhs)
|
||||
{
|
||||
DBUG_ASSERT(size() == rhs.size());
|
||||
uint32 *data_ptr=(uint32*)data(), *rhs_data_ptr=(uint32*)rhs.data();
|
||||
uint32 *last_ptr= last_word_ptr;
|
||||
for (; data_ptr <= last_ptr; data_ptr++, rhs_data_ptr++)
|
||||
*data_ptr|=*rhs_data_ptr;
|
||||
return *this;
|
||||
}
|
||||
|
||||
bitvector &operator^=(bitvector const& rhs)
|
||||
{
|
||||
DBUG_ASSERT(size() == rhs.size());
|
||||
uint32 *data_ptr=(uint32*)data(), *rhs_data_ptr=(uint32*)rhs.data();
|
||||
uint32 *last_ptr= last_word_ptr;
|
||||
for (; data_ptr <= last_ptr; data_ptr++, rhs_data_ptr++)
|
||||
*data_ptr^=*rhs_data_ptr;
|
||||
tidy_last_word();
|
||||
return *this;
|
||||
}
|
||||
|
||||
bitvector &operator~()
|
||||
{
|
||||
uint32 *data_ptr= (uint32*)data();
|
||||
uint32 *last_ptr= last_word_ptr;
|
||||
for (; data_ptr <= last_ptr; data_ptr++)
|
||||
*data_ptr^=0xFFFFFFFF;
|
||||
tidy_last_word();
|
||||
return *this;
|
||||
}
|
||||
|
||||
private:
|
||||
size_t m_size;
|
||||
byte *m_data;
|
||||
uint32 last_word_mask;
|
||||
uchar *m_data;
|
||||
uint32 *last_word_ptr;
|
||||
};
|
||||
|
||||
#endif /* BITVECTOR_H */
|
||||
|
|
|
@ -1198,6 +1198,7 @@ Field::Field(char *ptr_arg,uint32 length_arg,uchar *null_ptr_arg,
|
|||
flags=null_ptr ? 0: NOT_NULL_FLAG;
|
||||
comment.str= (char*) "";
|
||||
comment.length=0;
|
||||
fieldnr= 0;
|
||||
}
|
||||
|
||||
uint Field::offset()
|
||||
|
|
|
@ -86,6 +86,10 @@ public:
|
|||
utype unireg_check;
|
||||
uint32 field_length; // Length of field
|
||||
uint16 flags;
|
||||
/* fieldnr is the id of the field (first field = 1) as is also
|
||||
used in key_part.
|
||||
*/
|
||||
uint16 fieldnr;
|
||||
uchar null_bit; // Bit used to test null bit
|
||||
|
||||
Field(char *ptr_arg,uint32 length_arg,uchar *null_ptr_arg,uchar null_bit_arg,
|
||||
|
|
|
@ -1075,11 +1075,8 @@ inline uint field_in_record_is_null(TABLE *table,
|
|||
|
||||
int ha_federated::write_row(byte *buf)
|
||||
{
|
||||
uint x= 0, num_fields= 0;
|
||||
uint x, num_fields;
|
||||
Field **field;
|
||||
ulong current_query_id= 1;
|
||||
ulong tmp_query_id= 1;
|
||||
uint all_fields_have_same_query_id= 1;
|
||||
|
||||
char insert_buffer[IO_SIZE];
|
||||
char values_buffer[IO_SIZE], insert_field_value_buffer[IO_SIZE];
|
||||
|
@ -1104,14 +1101,6 @@ int ha_federated::write_row(byte *buf)
|
|||
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
|
||||
table->timestamp_field->set_time();
|
||||
|
||||
/*
|
||||
get the current query id - the fields that we add to the insert
|
||||
statement to send to the foreign will not be appended unless they match
|
||||
this query id
|
||||
*/
|
||||
current_query_id= table->in_use->query_id;
|
||||
DBUG_PRINT("info", ("current query id %d", current_query_id));
|
||||
|
||||
/* start off our string */
|
||||
insert_string.append("INSERT INTO `");
|
||||
insert_string.append(share->table_base_name);
|
||||
|
@ -1119,47 +1108,29 @@ int ha_federated::write_row(byte *buf)
|
|||
/* start both our field and field values strings */
|
||||
insert_string.append(" (");
|
||||
values_string.append(" VALUES (");
|
||||
|
||||
/*
|
||||
Even if one field is different, all_fields_same_query_id can't remain
|
||||
0 if it remains 0, then that means no fields were specified in the query
|
||||
such as in the case of INSERT INTO table VALUES (val1, val2, valN)
|
||||
*/
|
||||
for (field= table->field; *field; field++, x++)
|
||||
{
|
||||
if (x > 0 && tmp_query_id != (*field)->query_id)
|
||||
all_fields_have_same_query_id= 0;
|
||||
|
||||
tmp_query_id= (*field)->query_id;
|
||||
}
|
||||
/*
|
||||
loop through the field pointer array, add any fields to both the values
|
||||
list and the fields list that match the current query id
|
||||
list and the fields list that is part of the write set
|
||||
*/
|
||||
x=0;
|
||||
for (field= table->field; *field; field++, x++)
|
||||
for (num_fields= 0, field= table->field; *field; field++)
|
||||
{
|
||||
/* if there is a query id and if it's equal to the current query id */
|
||||
if (((*field)->query_id && (*field)->query_id == current_query_id)
|
||||
|| all_fields_have_same_query_id)
|
||||
if (ha_get_bit_in_write_set((*field)->fieldnr))
|
||||
{
|
||||
num_fields++;
|
||||
|
||||
if ((*field)->is_null())
|
||||
{
|
||||
DBUG_PRINT("info",
|
||||
("column %d current query id %d field is_null query id %d",
|
||||
x, current_query_id, (*field)->query_id));
|
||||
DBUG_PRINT("info", ("column %d field is_null", num_fields-1));
|
||||
insert_field_value_string.append("NULL");
|
||||
}
|
||||
else
|
||||
{
|
||||
DBUG_PRINT("info",
|
||||
("column %d current query id %d field is not null query ID %d",
|
||||
x, current_query_id, (*field)->query_id));
|
||||
DBUG_PRINT("info", ("column %d field is not null", num_fields-1));
|
||||
(*field)->val_str(&insert_field_value_string);
|
||||
/* quote these fields if they require it */
|
||||
(*field)->quote_data(&insert_field_value_string); }
|
||||
(*field)->quote_data(&insert_field_value_string);
|
||||
}
|
||||
/* append the field name */
|
||||
insert_string.append((*field)->field_name);
|
||||
|
||||
|
|
|
@ -2950,7 +2950,8 @@ build_template(
|
|||
(!(fetch_all_in_key && index_contains_field) &&
|
||||
!(fetch_primary_key_cols &&
|
||||
dict_table_col_in_clustered_key(index->table, i)) &&
|
||||
thd->query_id != field->query_id))) {
|
||||
(!(table->file->ha_get_bit_in_read_set(i+1) ||
|
||||
table->file->ha_get_bit_in_write_set(i+1)))))) {
|
||||
|
||||
/* This field is not needed in the query, skip it */
|
||||
|
||||
|
|
|
@ -165,6 +165,16 @@ class ha_innobase: public handler
|
|||
int transactional_table_lock(THD *thd, int lock_type);
|
||||
int start_stmt(THD *thd);
|
||||
|
||||
int ha_retrieve_all_cols()
|
||||
{
|
||||
ha_set_all_bits_in_read_set();
|
||||
return extra(HA_EXTRA_RETRIEVE_ALL_COLS);
|
||||
}
|
||||
int ha_retrieve_all_pk()
|
||||
{
|
||||
ha_set_primary_key_in_read_set();
|
||||
return extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY);
|
||||
}
|
||||
void position(byte *record);
|
||||
ha_rows records_in_range(uint inx, key_range *min_key, key_range
|
||||
*max_key);
|
||||
|
|
|
@ -860,12 +860,10 @@ int ha_ndbcluster::get_ndb_value(NdbOperation *ndb_op, Field *field,
|
|||
/*
|
||||
Check if any set or get of blob value in current query.
|
||||
*/
|
||||
bool ha_ndbcluster::uses_blob_value(bool all_fields)
|
||||
bool ha_ndbcluster::uses_blob_value()
|
||||
{
|
||||
if (table->s->blob_fields == 0)
|
||||
return FALSE;
|
||||
if (all_fields)
|
||||
return TRUE;
|
||||
{
|
||||
uint no_fields= table->s->fields;
|
||||
int i;
|
||||
|
@ -874,7 +872,8 @@ bool ha_ndbcluster::uses_blob_value(bool all_fields)
|
|||
for (i= no_fields - 1; i >= 0; i--)
|
||||
{
|
||||
Field *field= table->field[i];
|
||||
if (thd->query_id == field->query_id)
|
||||
if ((m_write_op && ha_get_bit_in_write_set(i+1)) ||
|
||||
(!m_write_op && ha_get_bit_in_read_set(i+1)))
|
||||
{
|
||||
return TRUE;
|
||||
}
|
||||
|
@ -1150,7 +1149,7 @@ int ha_ndbcluster::get_ndb_lock_type(enum thr_lock_type type)
|
|||
{
|
||||
if (type >= TL_WRITE_ALLOW_WRITE)
|
||||
return NdbOperation::LM_Exclusive;
|
||||
else if (uses_blob_value(m_retrieve_all_fields))
|
||||
else if (uses_blob_value())
|
||||
return NdbOperation::LM_Read;
|
||||
else
|
||||
return NdbOperation::LM_CommittedRead;
|
||||
|
@ -1328,9 +1327,8 @@ int ha_ndbcluster::define_read_attrs(byte* buf, NdbOperation* op)
|
|||
for (i= 0; i < table->s->fields; i++)
|
||||
{
|
||||
Field *field= table->field[i];
|
||||
if ((thd->query_id == field->query_id) ||
|
||||
((field->flags & PRI_KEY_FLAG)) ||
|
||||
m_retrieve_all_fields)
|
||||
if (ha_get_bit_in_read_set(i+1) ||
|
||||
((field->flags & PRI_KEY_FLAG)))
|
||||
{
|
||||
if (get_ndb_value(op, field, i, buf))
|
||||
ERR_RETURN(op->getNdbError());
|
||||
|
@ -1371,6 +1369,7 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf)
|
|||
DBUG_ENTER("pk_read");
|
||||
DBUG_PRINT("enter", ("key_len: %u", key_len));
|
||||
DBUG_DUMP("key", (char*)key, key_len);
|
||||
m_write_op= FALSE;
|
||||
|
||||
NdbOperation::LockMode lm=
|
||||
(NdbOperation::LockMode)get_ndb_lock_type(m_lock.type);
|
||||
|
@ -1422,10 +1421,13 @@ int ha_ndbcluster::complemented_pk_read(const byte *old_data, byte *new_data)
|
|||
NdbOperation *op;
|
||||
THD *thd= current_thd;
|
||||
DBUG_ENTER("complemented_pk_read");
|
||||
m_write_op= FALSE;
|
||||
|
||||
if (m_retrieve_all_fields)
|
||||
if (ha_get_all_bit_in_read_set())
|
||||
{
|
||||
// We have allready retrieved all fields, nothing to complement
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
NdbOperation::LockMode lm=
|
||||
(NdbOperation::LockMode)get_ndb_lock_type(m_lock.type);
|
||||
|
@ -1442,7 +1444,7 @@ int ha_ndbcluster::complemented_pk_read(const byte *old_data, byte *new_data)
|
|||
{
|
||||
Field *field= table->field[i];
|
||||
if (!((field->flags & PRI_KEY_FLAG) ||
|
||||
(thd->query_id == field->query_id)))
|
||||
(ha_get_bit_in_read_set(i+1))))
|
||||
{
|
||||
if (get_ndb_value(op, field, i, new_data))
|
||||
ERR_RETURN(trans->getNdbError());
|
||||
|
@ -1466,7 +1468,7 @@ int ha_ndbcluster::complemented_pk_read(const byte *old_data, byte *new_data)
|
|||
{
|
||||
Field *field= table->field[i];
|
||||
if (!((field->flags & PRI_KEY_FLAG) ||
|
||||
(thd->query_id == field->query_id)))
|
||||
(ha_get_bit_in_read_set(i+1))))
|
||||
{
|
||||
m_value[i].ptr= NULL;
|
||||
}
|
||||
|
@ -1844,6 +1846,7 @@ int ha_ndbcluster::ordered_index_scan(const key_range *start_key,
|
|||
DBUG_PRINT("enter", ("index: %u, sorted: %d, descending: %d",
|
||||
active_index, sorted, descending));
|
||||
DBUG_PRINT("enter", ("Starting new ordered scan on %s", m_tabname));
|
||||
m_write_op= FALSE;
|
||||
|
||||
// Check that sorted seems to be initialised
|
||||
DBUG_ASSERT(sorted == 0 || sorted == 1);
|
||||
|
@ -1903,6 +1906,7 @@ int ha_ndbcluster::full_table_scan(byte *buf)
|
|||
|
||||
DBUG_ENTER("full_table_scan");
|
||||
DBUG_PRINT("enter", ("Starting new scan on %s", m_tabname));
|
||||
m_write_op= FALSE;
|
||||
|
||||
NdbOperation::LockMode lm=
|
||||
(NdbOperation::LockMode)get_ndb_lock_type(m_lock.type);
|
||||
|
@ -1932,6 +1936,7 @@ int ha_ndbcluster::write_row(byte *record)
|
|||
NdbOperation *op;
|
||||
int res;
|
||||
THD *thd= current_thd;
|
||||
m_write_op= TRUE;
|
||||
|
||||
DBUG_ENTER("write_row");
|
||||
|
||||
|
@ -2120,13 +2125,13 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
|
|||
NdbOperation *op;
|
||||
uint i;
|
||||
DBUG_ENTER("update_row");
|
||||
m_write_op= TRUE;
|
||||
|
||||
statistic_increment(thd->status_var.ha_update_count, &LOCK_status);
|
||||
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
|
||||
{
|
||||
table->timestamp_field->set_time();
|
||||
// Set query_id so that field is really updated
|
||||
table->timestamp_field->query_id= thd->query_id;
|
||||
ha_set_bit_in_write_set(table->timestamp_field->fieldnr);
|
||||
}
|
||||
|
||||
/* Check for update of primary key for special handling */
|
||||
|
@ -2186,7 +2191,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
|
|||
if (!(op= cursor->updateCurrentTuple()))
|
||||
ERR_RETURN(trans->getNdbError());
|
||||
m_ops_pending++;
|
||||
if (uses_blob_value(FALSE))
|
||||
if (uses_blob_value())
|
||||
m_blobs_pending= TRUE;
|
||||
}
|
||||
else
|
||||
|
@ -2224,7 +2229,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
|
|||
for (i= 0; i < table->s->fields; i++)
|
||||
{
|
||||
Field *field= table->field[i];
|
||||
if (((thd->query_id == field->query_id) || m_retrieve_all_fields) &&
|
||||
if (ha_get_bit_in_write_set(i+1) &&
|
||||
(!(field->flags & PRI_KEY_FLAG)) &&
|
||||
set_ndb_value(op, field, i))
|
||||
ERR_RETURN(op->getNdbError());
|
||||
|
@ -2251,6 +2256,7 @@ int ha_ndbcluster::delete_row(const byte *record)
|
|||
NdbScanOperation* cursor= m_active_cursor;
|
||||
NdbOperation *op;
|
||||
DBUG_ENTER("delete_row");
|
||||
m_write_op= TRUE;
|
||||
|
||||
statistic_increment(thd->status_var.ha_delete_count,&LOCK_status);
|
||||
m_rows_changed++;
|
||||
|
@ -2515,6 +2521,7 @@ int ha_ndbcluster::index_read(byte *buf,
|
|||
int error;
|
||||
ndb_index_type type= get_index_type(active_index);
|
||||
const KEY* key_info= table->key_info+active_index;
|
||||
m_write_op= FALSE;
|
||||
switch (type){
|
||||
case PRIMARY_KEY_ORDERED_INDEX:
|
||||
case PRIMARY_KEY_INDEX:
|
||||
|
@ -2681,6 +2688,7 @@ int ha_ndbcluster::read_range_first(const key_range *start_key,
|
|||
{
|
||||
byte* buf= table->record[0];
|
||||
DBUG_ENTER("ha_ndbcluster::read_range_first");
|
||||
m_write_op= FALSE;
|
||||
|
||||
DBUG_RETURN(read_range_first_to_buf(start_key,
|
||||
end_key,
|
||||
|
@ -2902,83 +2910,11 @@ int ha_ndbcluster::extra(enum ha_extra_function operation)
|
|||
{
|
||||
DBUG_ENTER("extra");
|
||||
switch (operation) {
|
||||
case HA_EXTRA_NORMAL: /* Optimize for space (def) */
|
||||
DBUG_PRINT("info", ("HA_EXTRA_NORMAL"));
|
||||
break;
|
||||
case HA_EXTRA_QUICK: /* Optimize for speed */
|
||||
DBUG_PRINT("info", ("HA_EXTRA_QUICK"));
|
||||
break;
|
||||
case HA_EXTRA_RESET: /* Reset database to after open */
|
||||
DBUG_PRINT("info", ("HA_EXTRA_RESET"));
|
||||
DBUG_PRINT("info", ("Clearing condition stack"));
|
||||
cond_clear();
|
||||
break;
|
||||
case HA_EXTRA_CACHE: /* Cash record in HA_rrnd() */
|
||||
DBUG_PRINT("info", ("HA_EXTRA_CACHE"));
|
||||
break;
|
||||
case HA_EXTRA_NO_CACHE: /* End cacheing of records (def) */
|
||||
DBUG_PRINT("info", ("HA_EXTRA_NO_CACHE"));
|
||||
break;
|
||||
case HA_EXTRA_NO_READCHECK: /* No readcheck on update */
|
||||
DBUG_PRINT("info", ("HA_EXTRA_NO_READCHECK"));
|
||||
break;
|
||||
case HA_EXTRA_READCHECK: /* Use readcheck (def) */
|
||||
DBUG_PRINT("info", ("HA_EXTRA_READCHECK"));
|
||||
break;
|
||||
case HA_EXTRA_KEYREAD: /* Read only key to database */
|
||||
DBUG_PRINT("info", ("HA_EXTRA_KEYREAD"));
|
||||
break;
|
||||
case HA_EXTRA_NO_KEYREAD: /* Normal read of records (def) */
|
||||
DBUG_PRINT("info", ("HA_EXTRA_NO_KEYREAD"));
|
||||
break;
|
||||
case HA_EXTRA_NO_USER_CHANGE: /* No user is allowed to write */
|
||||
DBUG_PRINT("info", ("HA_EXTRA_NO_USER_CHANGE"));
|
||||
break;
|
||||
case HA_EXTRA_KEY_CACHE:
|
||||
DBUG_PRINT("info", ("HA_EXTRA_KEY_CACHE"));
|
||||
break;
|
||||
case HA_EXTRA_NO_KEY_CACHE:
|
||||
DBUG_PRINT("info", ("HA_EXTRA_NO_KEY_CACHE"));
|
||||
break;
|
||||
case HA_EXTRA_WAIT_LOCK: /* Wait until file is avalably (def) */
|
||||
DBUG_PRINT("info", ("HA_EXTRA_WAIT_LOCK"));
|
||||
break;
|
||||
case HA_EXTRA_NO_WAIT_LOCK: /* If file is locked, return quickly */
|
||||
DBUG_PRINT("info", ("HA_EXTRA_NO_WAIT_LOCK"));
|
||||
break;
|
||||
case HA_EXTRA_WRITE_CACHE: /* Use write cache in ha_write() */
|
||||
DBUG_PRINT("info", ("HA_EXTRA_WRITE_CACHE"));
|
||||
break;
|
||||
case HA_EXTRA_FLUSH_CACHE: /* flush write_record_cache */
|
||||
DBUG_PRINT("info", ("HA_EXTRA_FLUSH_CACHE"));
|
||||
break;
|
||||
case HA_EXTRA_NO_KEYS: /* Remove all update of keys */
|
||||
DBUG_PRINT("info", ("HA_EXTRA_NO_KEYS"));
|
||||
break;
|
||||
case HA_EXTRA_KEYREAD_CHANGE_POS: /* Keyread, but change pos */
|
||||
DBUG_PRINT("info", ("HA_EXTRA_KEYREAD_CHANGE_POS")); /* xxxxchk -r must be used */
|
||||
break;
|
||||
case HA_EXTRA_REMEMBER_POS: /* Remember pos for next/prev */
|
||||
DBUG_PRINT("info", ("HA_EXTRA_REMEMBER_POS"));
|
||||
break;
|
||||
case HA_EXTRA_RESTORE_POS:
|
||||
DBUG_PRINT("info", ("HA_EXTRA_RESTORE_POS"));
|
||||
break;
|
||||
case HA_EXTRA_REINIT_CACHE: /* init cache from current record */
|
||||
DBUG_PRINT("info", ("HA_EXTRA_REINIT_CACHE"));
|
||||
break;
|
||||
case HA_EXTRA_FORCE_REOPEN: /* Datafile have changed on disk */
|
||||
DBUG_PRINT("info", ("HA_EXTRA_FORCE_REOPEN"));
|
||||
break;
|
||||
case HA_EXTRA_FLUSH: /* Flush tables to disk */
|
||||
DBUG_PRINT("info", ("HA_EXTRA_FLUSH"));
|
||||
break;
|
||||
case HA_EXTRA_NO_ROWS: /* Don't write rows */
|
||||
DBUG_PRINT("info", ("HA_EXTRA_NO_ROWS"));
|
||||
break;
|
||||
case HA_EXTRA_RESET_STATE: /* Reset positions */
|
||||
DBUG_PRINT("info", ("HA_EXTRA_RESET_STATE"));
|
||||
break;
|
||||
case HA_EXTRA_IGNORE_DUP_KEY: /* Dup keys don't rollback everything*/
|
||||
DBUG_PRINT("info", ("HA_EXTRA_IGNORE_DUP_KEY"));
|
||||
if (current_thd->lex->sql_command == SQLCOM_REPLACE)
|
||||
|
@ -2997,34 +2933,6 @@ int ha_ndbcluster::extra(enum ha_extra_function operation)
|
|||
m_use_write= FALSE;
|
||||
m_ignore_dup_key= FALSE;
|
||||
break;
|
||||
case HA_EXTRA_RETRIEVE_ALL_COLS: /* Retrieve all columns, not just those
|
||||
where field->query_id is the same as
|
||||
the current query id */
|
||||
DBUG_PRINT("info", ("HA_EXTRA_RETRIEVE_ALL_COLS"));
|
||||
m_retrieve_all_fields= TRUE;
|
||||
break;
|
||||
case HA_EXTRA_PREPARE_FOR_DELETE:
|
||||
DBUG_PRINT("info", ("HA_EXTRA_PREPARE_FOR_DELETE"));
|
||||
break;
|
||||
case HA_EXTRA_PREPARE_FOR_UPDATE: /* Remove read cache if problems */
|
||||
DBUG_PRINT("info", ("HA_EXTRA_PREPARE_FOR_UPDATE"));
|
||||
break;
|
||||
case HA_EXTRA_PRELOAD_BUFFER_SIZE:
|
||||
DBUG_PRINT("info", ("HA_EXTRA_PRELOAD_BUFFER_SIZE"));
|
||||
break;
|
||||
case HA_EXTRA_RETRIEVE_PRIMARY_KEY:
|
||||
DBUG_PRINT("info", ("HA_EXTRA_RETRIEVE_PRIMARY_KEY"));
|
||||
m_retrieve_primary_key= TRUE;
|
||||
break;
|
||||
case HA_EXTRA_CHANGE_KEY_TO_UNIQUE:
|
||||
DBUG_PRINT("info", ("HA_EXTRA_CHANGE_KEY_TO_UNIQUE"));
|
||||
break;
|
||||
case HA_EXTRA_CHANGE_KEY_TO_DUP:
|
||||
DBUG_PRINT("info", ("HA_EXTRA_CHANGE_KEY_TO_DUP"));
|
||||
case HA_EXTRA_KEYREAD_PRESERVE_FIELDS:
|
||||
DBUG_PRINT("info", ("HA_EXTRA_KEYREAD_PRESERVE_FIELDS"));
|
||||
break;
|
||||
|
||||
}
|
||||
|
||||
DBUG_RETURN(0);
|
||||
|
@ -3296,8 +3204,6 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
|
|||
DBUG_ASSERT(m_active_trans);
|
||||
// Start of transaction
|
||||
m_rows_changed= 0;
|
||||
m_retrieve_all_fields= FALSE;
|
||||
m_retrieve_primary_key= FALSE;
|
||||
m_ops_pending= 0;
|
||||
{
|
||||
NDBDICT *dict= ndb->getDictionary();
|
||||
|
@ -3433,8 +3339,6 @@ int ha_ndbcluster::start_stmt(THD *thd)
|
|||
m_active_trans= trans;
|
||||
|
||||
// Start of statement
|
||||
m_retrieve_all_fields= FALSE;
|
||||
m_retrieve_primary_key= FALSE;
|
||||
m_ops_pending= 0;
|
||||
|
||||
DBUG_RETURN(error);
|
||||
|
@ -4224,8 +4128,6 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
|
|||
m_use_write(FALSE),
|
||||
m_ignore_dup_key(FALSE),
|
||||
m_primary_key_update(FALSE),
|
||||
m_retrieve_all_fields(FALSE),
|
||||
m_retrieve_primary_key(FALSE),
|
||||
m_rows_to_insert((ha_rows) 1),
|
||||
m_rows_inserted((ha_rows) 0),
|
||||
m_bulk_insert_rows((ha_rows) 1024),
|
||||
|
@ -5546,6 +5448,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
|
|||
HANDLER_BUFFER *buffer)
|
||||
{
|
||||
DBUG_ENTER("ha_ndbcluster::read_multi_range_first");
|
||||
m_write_op= FALSE;
|
||||
|
||||
int res;
|
||||
KEY* key_info= table->key_info + active_index;
|
||||
|
@ -5553,7 +5456,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
|
|||
ulong reclength= table->s->reclength;
|
||||
NdbOperation* op;
|
||||
|
||||
if (uses_blob_value(m_retrieve_all_fields))
|
||||
if (uses_blob_value())
|
||||
{
|
||||
/**
|
||||
* blobs can't be batched currently
|
||||
|
|
|
@ -560,7 +560,7 @@ private:
|
|||
ulonglong get_auto_increment();
|
||||
void invalidate_dictionary_cache(bool global);
|
||||
int ndb_err(NdbTransaction*);
|
||||
bool uses_blob_value(bool all_fields);
|
||||
bool uses_blob_value();
|
||||
|
||||
char *update_table_comment(const char * comment);
|
||||
|
||||
|
@ -611,8 +611,7 @@ private:
|
|||
bool m_use_write;
|
||||
bool m_ignore_dup_key;
|
||||
bool m_primary_key_update;
|
||||
bool m_retrieve_all_fields;
|
||||
bool m_retrieve_primary_key;
|
||||
bool m_write_op;
|
||||
ha_rows m_rows_to_insert;
|
||||
ha_rows m_rows_inserted;
|
||||
ha_rows m_bulk_insert_rows;
|
||||
|
|
132
sql/handler.cc
132
sql/handler.cc
|
@ -194,54 +194,67 @@ enum db_type ha_checktype(enum db_type database_type)
|
|||
|
||||
handler *get_new_handler(TABLE *table, enum db_type db_type)
|
||||
{
|
||||
handler *file;
|
||||
switch (db_type) {
|
||||
#ifndef NO_HASH
|
||||
case DB_TYPE_HASH:
|
||||
return new ha_hash(table);
|
||||
file= new ha_hash(table);
|
||||
#endif
|
||||
#ifdef HAVE_ISAM
|
||||
case DB_TYPE_MRG_ISAM:
|
||||
return new ha_isammrg(table);
|
||||
file= new ha_isammrg(table);
|
||||
break;
|
||||
case DB_TYPE_ISAM:
|
||||
return new ha_isam(table);
|
||||
file= new ha_isam(table);
|
||||
break;
|
||||
#else
|
||||
case DB_TYPE_MRG_ISAM:
|
||||
return new ha_myisammrg(table);
|
||||
file= new ha_myisammrg(table);
|
||||
break;
|
||||
#endif
|
||||
#ifdef HAVE_BERKELEY_DB
|
||||
case DB_TYPE_BERKELEY_DB:
|
||||
return new ha_berkeley(table);
|
||||
file= new ha_berkeley(table);
|
||||
break;
|
||||
#endif
|
||||
#ifdef HAVE_INNOBASE_DB
|
||||
case DB_TYPE_INNODB:
|
||||
return new ha_innobase(table);
|
||||
file= new ha_innobase(table);
|
||||
break;
|
||||
#endif
|
||||
#ifdef HAVE_EXAMPLE_DB
|
||||
case DB_TYPE_EXAMPLE_DB:
|
||||
return new ha_example(table);
|
||||
file= new ha_example(table);
|
||||
break;
|
||||
#endif
|
||||
#ifdef HAVE_ARCHIVE_DB
|
||||
case DB_TYPE_ARCHIVE_DB:
|
||||
return new ha_archive(table);
|
||||
file= new ha_archive(table);
|
||||
break;
|
||||
#endif
|
||||
#ifdef HAVE_BLACKHOLE_DB
|
||||
case DB_TYPE_BLACKHOLE_DB:
|
||||
return new ha_blackhole(table);
|
||||
file= new ha_blackhole(table);
|
||||
break;
|
||||
#endif
|
||||
#ifdef HAVE_FEDERATED_DB
|
||||
case DB_TYPE_FEDERATED_DB:
|
||||
return new ha_federated(table);
|
||||
file= new ha_federated(table);
|
||||
break;
|
||||
#endif
|
||||
#ifdef HAVE_CSV_DB
|
||||
case DB_TYPE_CSV_DB:
|
||||
return new ha_tina(table);
|
||||
file= new ha_tina(table);
|
||||
break;
|
||||
#endif
|
||||
#ifdef HAVE_NDBCLUSTER_DB
|
||||
case DB_TYPE_NDBCLUSTER:
|
||||
return new ha_ndbcluster(table);
|
||||
file= new ha_ndbcluster(table);
|
||||
break;
|
||||
#endif
|
||||
case DB_TYPE_HEAP:
|
||||
return new ha_heap(table);
|
||||
file= new ha_heap(table);
|
||||
break;
|
||||
default: // should never happen
|
||||
{
|
||||
enum db_type def=(enum db_type) current_thd->variables.table_type;
|
||||
|
@ -251,10 +264,21 @@ handler *get_new_handler(TABLE *table, enum db_type db_type)
|
|||
}
|
||||
/* Fall back to MyISAM */
|
||||
case DB_TYPE_MYISAM:
|
||||
return new ha_myisam(table);
|
||||
file= new ha_myisam(table);
|
||||
break;
|
||||
case DB_TYPE_MRG_MYISAM:
|
||||
return new ha_myisammrg(table);
|
||||
file= new ha_myisammrg(table);
|
||||
break;
|
||||
}
|
||||
if (file)
|
||||
{
|
||||
if (file->ha_initialise())
|
||||
{
|
||||
delete file;
|
||||
file=0;
|
||||
}
|
||||
}
|
||||
return file;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1320,6 +1344,84 @@ int handler::ha_open(const char *name, int mode, int test_if_locked)
|
|||
DBUG_RETURN(error);
|
||||
}
|
||||
|
||||
int handler::ha_initialise()
|
||||
{
|
||||
DBUG_ENTER("ha_initialise");
|
||||
if (table && table->s->fields &&
|
||||
ha_allocate_read_write_set(table->s->fields))
|
||||
{
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
DBUG_RETURN(FALSE);
|
||||
}
|
||||
|
||||
int handler::ha_allocate_read_write_set(ulong no_fields)
|
||||
{
|
||||
uint bitmap_size= 4*(((no_fields+1)+31)/32);
|
||||
uchar *read_buf, *write_buf;
|
||||
DBUG_ENTER("ha_allocate_read_write_set");
|
||||
DBUG_PRINT("info", ("no_fields = %d", no_fields));
|
||||
read_set= (MY_BITMAP*)sql_alloc(sizeof(MY_BITMAP));
|
||||
write_set= (MY_BITMAP*)sql_alloc(sizeof(MY_BITMAP));
|
||||
read_buf= (uchar*)sql_alloc(bitmap_size);
|
||||
write_buf= (uchar*)sql_alloc(bitmap_size);
|
||||
DBUG_ASSERT(!bitmap_init(read_set, read_buf, (no_fields+1), FALSE));
|
||||
DBUG_ASSERT(!bitmap_init(write_set, write_buf, (no_fields+1), FALSE));
|
||||
if (!read_set || !write_set || !read_buf || !write_buf)
|
||||
{
|
||||
ha_deallocate_read_write_set();
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
ha_clear_all_set();
|
||||
DBUG_RETURN(FALSE);
|
||||
}
|
||||
|
||||
void handler::ha_deallocate_read_write_set()
|
||||
{
|
||||
DBUG_ENTER("ha_deallocate_read_write_set");
|
||||
read_set=write_set=0;
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
void handler::ha_clear_all_set()
|
||||
{
|
||||
DBUG_ENTER("ha_clear_all_set");
|
||||
bitmap_clear_all(read_set);
|
||||
bitmap_clear_all(write_set);
|
||||
bitmap_set_bit(read_set, 0);
|
||||
bitmap_set_bit(write_set, 0);
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
int handler::ha_retrieve_all_cols()
|
||||
{
|
||||
DBUG_ENTER("handler::ha_retrieve_all_cols");
|
||||
bitmap_set_all(read_set);
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
int handler::ha_retrieve_all_pk()
|
||||
{
|
||||
DBUG_ENTER("ha_retrieve_all_pk");
|
||||
ha_set_primary_key_in_read_set();
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
void handler::ha_set_primary_key_in_read_set()
|
||||
{
|
||||
ulong prim_key= table->s->primary_key;
|
||||
DBUG_ENTER("handler::ha_set_primary_key_in_read_set");
|
||||
DBUG_PRINT("info", ("Primary key = %d", prim_key));
|
||||
if (prim_key != MAX_KEY)
|
||||
{
|
||||
KEY_PART_INFO *key_part= table->key_info[prim_key].key_part;
|
||||
KEY_PART_INFO *key_part_end= key_part +
|
||||
table->key_info[prim_key].key_parts;
|
||||
for (;key_part != key_part_end; ++key_part)
|
||||
ha_set_bit_in_read_set(key_part->fieldnr);
|
||||
}
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
/*
|
||||
Read first row (only) from a table
|
||||
This is never called for InnoDB or BDB tables, as these table types
|
||||
|
|
142
sql/handler.h
142
sql/handler.h
|
@ -23,6 +23,7 @@
|
|||
|
||||
#include <ft_global.h>
|
||||
#include <keycache.h>
|
||||
#include <bitvector.h>
|
||||
|
||||
#ifndef NO_HASH
|
||||
#define NO_HASH /* Not yet implemented */
|
||||
|
@ -442,6 +443,8 @@ class handler :public Sql_alloc
|
|||
virtual int rnd_init(bool scan) =0;
|
||||
virtual int rnd_end() { return 0; }
|
||||
|
||||
private:
|
||||
virtual int reset() { return extra(HA_EXTRA_RESET); }
|
||||
public:
|
||||
byte *ref; /* Pointer to current row */
|
||||
byte *dupp_ref; /* Pointer to dupp row */
|
||||
|
@ -483,6 +486,8 @@ public:
|
|||
bool auto_increment_column_changed;
|
||||
bool implicit_emptied; /* Can be !=0 only if HEAP */
|
||||
const COND *pushed_cond;
|
||||
MY_BITMAP *read_set;
|
||||
MY_BITMAP *write_set;
|
||||
|
||||
handler(TABLE *table_arg) :table(table_arg),
|
||||
ref(0), data_file_length(0), max_data_file_length(0), index_file_length(0),
|
||||
|
@ -494,7 +499,12 @@ public:
|
|||
raid_type(0), ft_handler(0), inited(NONE), implicit_emptied(0),
|
||||
pushed_cond(NULL)
|
||||
{}
|
||||
virtual ~handler(void) { /* TODO: DBUG_ASSERT(inited == NONE); */ }
|
||||
virtual ~handler(void)
|
||||
{
|
||||
ha_deallocate_read_write_set();
|
||||
/* TODO: DBUG_ASSERT(inited == NONE); */
|
||||
}
|
||||
virtual int ha_initialise();
|
||||
int ha_open(const char *name, int mode, int test_if_locked);
|
||||
void update_auto_increment();
|
||||
virtual void print_error(int error, myf errflag);
|
||||
|
@ -554,11 +564,140 @@ public:
|
|||
inited=NONE;
|
||||
DBUG_RETURN(rnd_end());
|
||||
}
|
||||
int ha_reset()
|
||||
{
|
||||
DBUG_ENTER("ha_reset");
|
||||
ha_clear_all_set();
|
||||
DBUG_RETURN(reset());
|
||||
}
|
||||
|
||||
/* this is necessary in many places, e.g. in HANDLER command */
|
||||
int ha_index_or_rnd_end()
|
||||
{
|
||||
return inited == INDEX ? ha_index_end() : inited == RND ? ha_rnd_end() : 0;
|
||||
}
|
||||
/*
|
||||
These are a set of routines used to enable handlers to only read/write
|
||||
partial lists of the fields in the table. The bit vector is maintained
|
||||
by the server part and is used by the handler at calls to read/write
|
||||
data in the table.
|
||||
It replaces the use of query id's for this purpose. The benefit is that
|
||||
the handler can also set bits in the read/write set if it has special
|
||||
needs and it is also easy for other parts of the server to interact
|
||||
with the handler (e.g. the replication part for row-level logging).
|
||||
The routines are all part of the general handler and are not possible
|
||||
to override by a handler. A handler can however set/reset bits by
|
||||
calling these routines.
|
||||
|
||||
The methods ha_retrieve_all_cols and ha_retrieve_all_pk are made
|
||||
virtual to handle InnoDB specifics. If InnoDB doesn't need the
|
||||
extra parameters HA_EXTRA_RETRIEVE_ALL_COLS and
|
||||
HA_EXTRA_RETRIEVE_PRIMARY_KEY anymore then these methods need not be
|
||||
virtual anymore.
|
||||
*/
|
||||
virtual int ha_retrieve_all_cols();
|
||||
virtual int ha_retrieve_all_pk();
|
||||
void ha_set_all_bits_in_read_set()
|
||||
{
|
||||
DBUG_ENTER("ha_set_all_bits_in_read_set");
|
||||
bitmap_set_all(read_set);
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
void ha_set_all_bits_in_write_set()
|
||||
{
|
||||
DBUG_ENTER("ha_set_all_bits_in_write_set");
|
||||
bitmap_set_all(write_set);
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
void ha_set_bit_in_read_set(uint fieldnr)
|
||||
{
|
||||
DBUG_ENTER("ha_set_bit_in_read_set");
|
||||
DBUG_PRINT("info", ("fieldnr = %d", fieldnr));
|
||||
bitmap_set_bit(read_set, fieldnr);
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
void ha_clear_bit_in_read_set(uint fieldnr)
|
||||
{
|
||||
DBUG_ENTER("ha_clear_bit_in_read_set");
|
||||
DBUG_PRINT("info", ("fieldnr = %d", fieldnr));
|
||||
bitmap_clear_bit(read_set, fieldnr);
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
void ha_set_bit_in_write_set(uint fieldnr)
|
||||
{
|
||||
DBUG_ENTER("ha_set_bit_in_write_set");
|
||||
DBUG_PRINT("info", ("fieldnr = %d", fieldnr));
|
||||
bitmap_set_bit(write_set, fieldnr);
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
void ha_clear_bit_in_write_set(uint fieldnr)
|
||||
{
|
||||
DBUG_ENTER("ha_clear_bit_in_write_set");
|
||||
DBUG_PRINT("info", ("fieldnr = %d", fieldnr));
|
||||
bitmap_clear_bit(write_set, fieldnr);
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
void ha_set_bit_in_rw_set(uint fieldnr, bool write_op)
|
||||
{
|
||||
DBUG_ENTER("ha_set_bit_in_rw_set");
|
||||
DBUG_PRINT("info", ("Set bit %u in read set", fieldnr));
|
||||
bitmap_set_bit(read_set, fieldnr);
|
||||
if (!write_op) {
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
else
|
||||
{
|
||||
DBUG_PRINT("info", ("Set bit %u in read and write set", fieldnr));
|
||||
bitmap_set_bit(write_set, fieldnr);
|
||||
}
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
bool ha_get_bit_in_read_set(uint fieldnr)
|
||||
{
|
||||
bool bit_set=bitmap_is_set(read_set,fieldnr);
|
||||
DBUG_ENTER("ha_get_bit_in_read_set");
|
||||
DBUG_PRINT("info", ("bit %u = %u", fieldnr, bit_set));
|
||||
DBUG_RETURN(bit_set);
|
||||
}
|
||||
bool ha_get_bit_in_write_set(uint fieldnr)
|
||||
{
|
||||
bool bit_set=bitmap_is_set(write_set,fieldnr);
|
||||
DBUG_ENTER("ha_get_bit_in_write_set");
|
||||
DBUG_PRINT("info", ("bit %u = %u", fieldnr, bit_set));
|
||||
DBUG_RETURN(bit_set);
|
||||
}
|
||||
bool ha_get_all_bit_in_read_set()
|
||||
{
|
||||
bool all_bits_set= bitmap_is_set_all(read_set);
|
||||
DBUG_ENTER("ha_get_all_bit_in_read_set");
|
||||
DBUG_PRINT("info", ("all bits set = %u", all_bits_set));
|
||||
DBUG_RETURN(all_bits_set);
|
||||
}
|
||||
bool ha_get_all_bit_in_read_clear()
|
||||
{
|
||||
bool all_bits_set= bitmap_is_clear_all(read_set);
|
||||
DBUG_ENTER("ha_get_all_bit_in_read_clear");
|
||||
DBUG_PRINT("info", ("all bits clear = %u", all_bits_set));
|
||||
DBUG_RETURN(all_bits_set);
|
||||
}
|
||||
bool ha_get_all_bit_in_write_set()
|
||||
{
|
||||
bool all_bits_set= bitmap_is_set_all(write_set);
|
||||
DBUG_ENTER("ha_get_all_bit_in_write_set");
|
||||
DBUG_PRINT("info", ("all bits set = %u", all_bits_set));
|
||||
DBUG_RETURN(all_bits_set);
|
||||
}
|
||||
bool ha_get_all_bit_in_write_clear()
|
||||
{
|
||||
bool all_bits_set= bitmap_is_clear_all(write_set);
|
||||
DBUG_ENTER("ha_get_all_bit_in_write_clear");
|
||||
DBUG_PRINT("info", ("all bits clear = %u", all_bits_set));
|
||||
DBUG_RETURN(all_bits_set);
|
||||
}
|
||||
void ha_set_primary_key_in_read_set();
|
||||
int ha_allocate_read_write_set(ulong no_fields);
|
||||
void ha_deallocate_read_write_set();
|
||||
void ha_clear_all_set();
|
||||
uint get_index(void) const { return active_index; }
|
||||
virtual int open(const char *name, int mode, uint test_if_locked)=0;
|
||||
virtual int close(void)=0;
|
||||
|
@ -696,7 +835,6 @@ public:
|
|||
{ return 0; }
|
||||
virtual int extra_opt(enum ha_extra_function operation, ulong cache_size)
|
||||
{ return extra(operation); }
|
||||
virtual int reset() { return extra(HA_EXTRA_RESET); }
|
||||
virtual int external_lock(THD *thd, int lock_type) { return 0; }
|
||||
virtual void unlock_row() {}
|
||||
virtual int start_stmt(THD *thd) {return 0;}
|
||||
|
|
17
sql/item.cc
17
sql/item.cc
|
@ -3000,13 +3000,18 @@ bool Item_field::fix_fields(THD *thd, TABLE_LIST *tables, Item **reference)
|
|||
|
||||
set_field(from_field);
|
||||
}
|
||||
else if (thd->set_query_id && field->query_id != thd->query_id)
|
||||
else if (thd->set_query_id)
|
||||
{
|
||||
/* We only come here in unions */
|
||||
TABLE *table=field->table;
|
||||
field->query_id=thd->query_id;
|
||||
table->used_fields++;
|
||||
table->used_keys.intersect(field->part_of_key);
|
||||
TABLE *table= field->table;
|
||||
table->file->ha_set_bit_in_rw_set(field->fieldnr,
|
||||
(bool)(thd->set_query_id-1));
|
||||
if (field->query_id != thd->query_id)
|
||||
{
|
||||
/* We only come here in unions */
|
||||
field->query_id=thd->query_id;
|
||||
table->used_fields++;
|
||||
table->used_keys.intersect(field->part_of_key);
|
||||
}
|
||||
}
|
||||
#ifndef NO_EMBEDDED_ACCESS_CHECKS
|
||||
if (any_privileges)
|
||||
|
|
|
@ -180,7 +180,6 @@ static int lock_external(THD *thd, TABLE **tables, uint count)
|
|||
((*tables)->reginfo.lock_type >= TL_READ &&
|
||||
(*tables)->reginfo.lock_type <= TL_READ_NO_INSERT))
|
||||
lock_type=F_RDLCK;
|
||||
|
||||
if ((error=(*tables)->file->external_lock(thd,lock_type)))
|
||||
{
|
||||
print_lock_error(error, (*tables)->file->table_type());
|
||||
|
|
|
@ -889,7 +889,7 @@ bool setup_tables(THD *thd, TABLE_LIST *tables, Item **conds,
|
|||
int setup_wild(THD *thd, TABLE_LIST *tables, List<Item> &fields,
|
||||
List<Item> *sum_func_list, uint wild_num);
|
||||
bool setup_fields(THD *thd, Item** ref_pointer_array, TABLE_LIST *tables,
|
||||
List<Item> &item, bool set_query_id,
|
||||
List<Item> &item, ulong set_query_id,
|
||||
List<Item> *sum_func_list, bool allow_sum_func);
|
||||
int setup_conds(THD *thd, TABLE_LIST *tables, TABLE_LIST *leaves,
|
||||
COND **conds);
|
||||
|
|
|
@ -778,9 +778,10 @@ QUICK_RANGE_SELECT::~QUICK_RANGE_SELECT()
|
|||
{
|
||||
DBUG_PRINT("info", ("Freeing separate handler %p (free=%d)", file,
|
||||
free_file));
|
||||
file->reset();
|
||||
file->ha_reset();
|
||||
file->external_lock(current_thd, F_UNLCK);
|
||||
file->close();
|
||||
delete file;
|
||||
}
|
||||
}
|
||||
delete_dynamic(&ranges); /* ranges are allocated in alloc */
|
||||
|
@ -916,7 +917,7 @@ int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler)
|
|||
{
|
||||
DBUG_PRINT("info", ("Reusing handler %p", file));
|
||||
if (file->extra(HA_EXTRA_KEYREAD) ||
|
||||
file->extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) ||
|
||||
file->ha_retrieve_all_pk() ||
|
||||
init() || reset())
|
||||
{
|
||||
DBUG_RETURN(1);
|
||||
|
@ -944,7 +945,7 @@ int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler)
|
|||
goto failure;
|
||||
|
||||
if (file->extra(HA_EXTRA_KEYREAD) ||
|
||||
file->extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) ||
|
||||
file->ha_retrieve_all_pk() ||
|
||||
init() || reset())
|
||||
{
|
||||
file->external_lock(thd, F_UNLCK);
|
||||
|
@ -956,6 +957,8 @@ int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler)
|
|||
DBUG_RETURN(0);
|
||||
|
||||
failure:
|
||||
if (file)
|
||||
delete file;
|
||||
file= save_file;
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
|
@ -1563,7 +1566,8 @@ static int fill_used_fields_bitmap(PARAM *param)
|
|||
param->fields_bitmap_size= (table->s->fields/8 + 1);
|
||||
uchar *tmp;
|
||||
uint pk;
|
||||
if (!(tmp= (uchar*)alloc_root(param->mem_root,param->fields_bitmap_size)) ||
|
||||
if (!(tmp= (uchar*)alloc_root(param->mem_root,
|
||||
bytes_word_aligned(param->fields_bitmap_size))) ||
|
||||
bitmap_init(¶m->needed_fields, tmp, param->fields_bitmap_size*8,
|
||||
FALSE))
|
||||
return 1;
|
||||
|
@ -2318,7 +2322,7 @@ ROR_SCAN_INFO *make_ror_scan(const PARAM *param, int idx, SEL_ARG *sel_arg)
|
|||
ror_scan->records= param->table->quick_rows[keynr];
|
||||
|
||||
if (!(bitmap_buf= (uchar*)alloc_root(param->mem_root,
|
||||
param->fields_bitmap_size)))
|
||||
bytes_word_aligned(param->fields_bitmap_size))))
|
||||
DBUG_RETURN(NULL);
|
||||
|
||||
if (bitmap_init(&ror_scan->covered_fields, bitmap_buf,
|
||||
|
@ -2438,7 +2442,8 @@ ROR_INTERSECT_INFO* ror_intersect_init(const PARAM *param)
|
|||
sizeof(ROR_INTERSECT_INFO))))
|
||||
return NULL;
|
||||
info->param= param;
|
||||
if (!(buf= (uchar*)alloc_root(param->mem_root, param->fields_bitmap_size)))
|
||||
if (!(buf= (uchar*)alloc_root(param->mem_root,
|
||||
bytes_word_aligned(param->fields_bitmap_size))))
|
||||
return NULL;
|
||||
if (bitmap_init(&info->covered_fields, buf, param->fields_bitmap_size*8,
|
||||
FALSE))
|
||||
|
@ -2995,7 +3000,8 @@ TRP_ROR_INTERSECT *get_best_covering_ror_intersect(PARAM *param,
|
|||
/*I=set of all covering indexes */
|
||||
ror_scan_mark= tree->ror_scans;
|
||||
|
||||
uchar buf[MAX_KEY/8+1];
|
||||
uint32 int_buf[MAX_KEY/32+1];
|
||||
uchar *buf= (uchar*)&int_buf;
|
||||
MY_BITMAP covered_fields;
|
||||
if (bitmap_init(&covered_fields, buf, nbits, FALSE))
|
||||
DBUG_RETURN(0);
|
||||
|
@ -5643,7 +5649,7 @@ int QUICK_INDEX_MERGE_SELECT::read_keys_and_merge()
|
|||
(This also creates a deficiency - it is possible that we will retrieve
|
||||
parts of key that are not used by current query at all.)
|
||||
*/
|
||||
if (head->file->extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY))
|
||||
if (head->file->ha_retrieve_all_pk())
|
||||
DBUG_RETURN(1);
|
||||
|
||||
cur_quick_it.rewind();
|
||||
|
|
|
@ -1525,7 +1525,7 @@ static bool update_user_table(THD *thd, const char *host, const char *user,
|
|||
key_copy((byte *) user_key, table->record[0], table->key_info,
|
||||
table->key_info->key_length);
|
||||
|
||||
table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
|
||||
table->file->ha_retrieve_all_cols();
|
||||
if (table->file->index_read_idx(table->record[0], 0,
|
||||
(byte *) user_key, table->key_info->key_length,
|
||||
HA_READ_KEY_EXACT))
|
||||
|
@ -1618,7 +1618,7 @@ static int replace_user_table(THD *thd, TABLE *table, const LEX_USER &combo,
|
|||
key_copy(user_key, table->record[0], table->key_info,
|
||||
table->key_info->key_length);
|
||||
|
||||
table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
|
||||
table->file->ha_retrieve_all_cols();
|
||||
if (table->file->index_read_idx(table->record[0], 0,
|
||||
user_key, table->key_info->key_length,
|
||||
HA_READ_KEY_EXACT))
|
||||
|
@ -1751,7 +1751,7 @@ static int replace_user_table(THD *thd, TABLE *table, const LEX_USER &combo,
|
|||
We should NEVER delete from the user table, as a uses can still
|
||||
use mysqld even if he doesn't have any privileges in the user table!
|
||||
*/
|
||||
table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
|
||||
table->file->ha_retrieve_all_cols();
|
||||
if (cmp_record(table,record[1]) &&
|
||||
(error=table->file->update_row(table->record[1],table->record[0])))
|
||||
{ // This should never happen
|
||||
|
@ -1833,7 +1833,7 @@ static int replace_db_table(TABLE *table, const char *db,
|
|||
key_copy(user_key, table->record[0], table->key_info,
|
||||
table->key_info->key_length);
|
||||
|
||||
table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
|
||||
table->file->ha_retrieve_all_cols();
|
||||
if (table->file->index_read_idx(table->record[0],0,
|
||||
user_key, table->key_info->key_length,
|
||||
HA_READ_KEY_EXACT))
|
||||
|
@ -1869,7 +1869,7 @@ static int replace_db_table(TABLE *table, const char *db,
|
|||
/* update old existing row */
|
||||
if (rights)
|
||||
{
|
||||
table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
|
||||
table->file->ha_retrieve_all_cols();
|
||||
if ((error=table->file->update_row(table->record[1],table->record[0])))
|
||||
goto table_error; /* purecov: deadcode */
|
||||
}
|
||||
|
@ -2206,7 +2206,7 @@ static int replace_column_table(GRANT_TABLE *g_t,
|
|||
key_copy(user_key, table->record[0], table->key_info,
|
||||
table->key_info->key_length);
|
||||
|
||||
table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
|
||||
table->file->ha_retrieve_all_cols();
|
||||
if (table->file->index_read(table->record[0], user_key,
|
||||
table->key_info->key_length,
|
||||
HA_READ_KEY_EXACT))
|
||||
|
@ -2284,7 +2284,7 @@ static int replace_column_table(GRANT_TABLE *g_t,
|
|||
key_copy(user_key, table->record[0], table->key_info,
|
||||
key_prefix_length);
|
||||
|
||||
table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
|
||||
table->file->ha_retrieve_all_cols();
|
||||
if (table->file->index_read(table->record[0], user_key,
|
||||
key_prefix_length,
|
||||
HA_READ_KEY_EXACT))
|
||||
|
@ -2382,7 +2382,7 @@ static int replace_table_table(THD *thd, GRANT_TABLE *grant_table,
|
|||
key_copy(user_key, table->record[0], table->key_info,
|
||||
table->key_info->key_length);
|
||||
|
||||
table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
|
||||
table->file->ha_retrieve_all_cols();
|
||||
if (table->file->index_read_idx(table->record[0], 0,
|
||||
user_key, table->key_info->key_length,
|
||||
HA_READ_KEY_EXACT))
|
||||
|
|
|
@ -563,7 +563,7 @@ bool close_thread_table(THD *thd, TABLE **table_ptr)
|
|||
else
|
||||
{
|
||||
// Free memory and reset for next loop
|
||||
table->file->reset();
|
||||
table->file->ha_reset();
|
||||
}
|
||||
table->in_use=0;
|
||||
if (unused_tables)
|
||||
|
@ -2589,6 +2589,8 @@ Field *find_field_in_real_table(THD *thd, TABLE *table,
|
|||
|
||||
if (thd->set_query_id)
|
||||
{
|
||||
table->file->ha_set_bit_in_rw_set(field->fieldnr,
|
||||
(bool)(thd->set_query_id-1));
|
||||
if (field->query_id != thd->query_id)
|
||||
{
|
||||
field->query_id=thd->query_id;
|
||||
|
@ -3110,7 +3112,7 @@ int setup_wild(THD *thd, TABLE_LIST *tables, List<Item> &fields,
|
|||
****************************************************************************/
|
||||
|
||||
bool setup_fields(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables,
|
||||
List<Item> &fields, bool set_query_id,
|
||||
List<Item> &fields, ulong set_query_id,
|
||||
List<Item> *sum_func_list, bool allow_sum_func)
|
||||
{
|
||||
reg2 Item *item;
|
||||
|
@ -3559,7 +3561,10 @@ insert_fields(THD *thd, TABLE_LIST *tables, const char *db_name,
|
|||
fields marked in setup_tables during fix_fields of view columns
|
||||
*/
|
||||
if (table)
|
||||
{
|
||||
table->used_fields= table->s->fields;
|
||||
table->file->ha_set_all_bits_in_read_set();
|
||||
}
|
||||
}
|
||||
}
|
||||
if (found)
|
||||
|
@ -3711,12 +3716,14 @@ int setup_conds(THD *thd, TABLE_LIST *tables, TABLE_LIST *leaves, COND **conds)
|
|||
goto err;
|
||||
/* Mark field used for table cache */
|
||||
t2_field->query_id= thd->query_id;
|
||||
t2->file->ha_set_bit_in_read_set(t2_field->fieldnr);
|
||||
t2->used_keys.intersect(t2_field->part_of_key);
|
||||
}
|
||||
if ((t1_field= iterator->field()))
|
||||
{
|
||||
/* Mark field used for table cache */
|
||||
t1_field->query_id= thd->query_id;
|
||||
t1->file->ha_set_bit_in_read_set(t1_field->fieldnr);
|
||||
t1->used_keys.intersect(t1_field->part_of_key);
|
||||
}
|
||||
Item_func_eq *tmp= new Item_func_eq(iterator->item(thd),
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
template <uint default_width> class Bitmap
|
||||
{
|
||||
MY_BITMAP map;
|
||||
uchar buffer[(default_width+7)/8];
|
||||
uint32 buffer[(default_width+31)/32];
|
||||
public:
|
||||
Bitmap() { init(); }
|
||||
Bitmap(Bitmap& from) { *this=from; }
|
||||
|
@ -62,17 +62,18 @@ public:
|
|||
char *print(char *buf) const
|
||||
{
|
||||
char *s=buf; int i;
|
||||
uchar *uchar_buffer= (uchar*)&buffer;
|
||||
for (i=sizeof(buffer)-1; i>=0 ; i--)
|
||||
{
|
||||
if ((*s=_dig_vec_upper[buffer[i] >> 4]) != '0')
|
||||
if ((*s=_dig_vec_upper[uchar_buffer[i] >> 4]) != '0')
|
||||
break;
|
||||
if ((*s=_dig_vec_upper[buffer[i] & 15]) != '0')
|
||||
if ((*s=_dig_vec_upper[uchar_buffer[i] & 15]) != '0')
|
||||
break;
|
||||
}
|
||||
for (s++, i-- ; i>=0 ; i--)
|
||||
{
|
||||
*s++=_dig_vec_upper[buffer[i] >> 4];
|
||||
*s++=_dig_vec_upper[buffer[i] & 15];
|
||||
*s++=_dig_vec_upper[uchar_buffer[i] >> 4];
|
||||
*s++=_dig_vec_upper[uchar_buffer[i] & 15];
|
||||
}
|
||||
*s=0;
|
||||
return buf;
|
||||
|
|
|
@ -761,8 +761,15 @@ public:
|
|||
/*
|
||||
- if set_query_id=1, we set field->query_id for all fields. In that case
|
||||
field list can not contain duplicates.
|
||||
0: Means query_id is not set and no indicator to handler of fields used
|
||||
is set
|
||||
1: Means query_id is set for fields in list and bit in read set is set
|
||||
to inform handler of that field is to be read
|
||||
2: Means query is set for fields in list and bit is set in update set
|
||||
to inform handler that it needs to update this field in write_row
|
||||
and update_row
|
||||
*/
|
||||
bool set_query_id;
|
||||
ulong set_query_id;
|
||||
/*
|
||||
This variable is used in post-parse stage to declare that sum-functions,
|
||||
or functions which have sense only if GROUP BY is present, are allowed.
|
||||
|
|
|
@ -105,6 +105,11 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list,
|
|||
#endif
|
||||
clear_timestamp_auto_bits(table->timestamp_field_type,
|
||||
TIMESTAMP_AUTO_SET_ON_INSERT);
|
||||
/*
|
||||
No fields are provided so all fields must be provided in the values.
|
||||
Thus we set all bits in the write set.
|
||||
*/
|
||||
table->file->ha_set_all_bits_in_write_set();
|
||||
}
|
||||
else
|
||||
{ // Part field list
|
||||
|
@ -120,7 +125,11 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list,
|
|||
thd->lex->select_lex.no_wrap_view_item= 1;
|
||||
save_next= table_list->next_local; // fields only from first table
|
||||
table_list->next_local= 0;
|
||||
res= setup_fields(thd, 0, table_list, fields, 1, 0, 0);
|
||||
/*
|
||||
Indicate fields in list is to be updated by setting set_query_id
|
||||
parameter to 2. This sets the bit in the write_set for each field.
|
||||
*/
|
||||
res= setup_fields(thd, 0, table_list, fields, 2, 0, 0);
|
||||
table_list->next_local= save_next;
|
||||
thd->lex->select_lex.no_wrap_view_item= 0;
|
||||
if (res)
|
||||
|
@ -209,9 +218,10 @@ static int check_update_fields(THD *thd, TABLE_LIST *insert_table_list,
|
|||
|
||||
/*
|
||||
Check the fields we are going to modify. This will set the query_id
|
||||
of all used fields to the threads query_id.
|
||||
of all used fields to the threads query_id. It will also set all
|
||||
fields into the write set of this table.
|
||||
*/
|
||||
if (setup_fields(thd, 0, insert_table_list, update_fields, 1, 0, 0))
|
||||
if (setup_fields(thd, 0, insert_table_list, update_fields, 2, 0, 0))
|
||||
return -1;
|
||||
|
||||
if (table->timestamp_field)
|
||||
|
@ -221,7 +231,10 @@ static int check_update_fields(THD *thd, TABLE_LIST *insert_table_list,
|
|||
clear_timestamp_auto_bits(table->timestamp_field_type,
|
||||
TIMESTAMP_AUTO_SET_ON_UPDATE);
|
||||
else
|
||||
{
|
||||
table->timestamp_field->query_id= timestamp_query_id;
|
||||
table->file->ha_set_bit_in_write_set(table->timestamp_field->fieldnr);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -788,7 +801,7 @@ bool mysql_prepare_insert(THD *thd, TABLE_LIST *table_list, TABLE *table,
|
|||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
if (duplic == DUP_UPDATE || duplic == DUP_REPLACE)
|
||||
table->file->extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY);
|
||||
table->file->ha_retrieve_all_pk();
|
||||
thd->lex->select_lex.first_execution= 0;
|
||||
DBUG_RETURN(FALSE);
|
||||
}
|
||||
|
@ -1985,7 +1998,7 @@ select_insert::~select_insert()
|
|||
if (table)
|
||||
{
|
||||
table->next_number_field=0;
|
||||
table->file->reset();
|
||||
table->file->ha_reset();
|
||||
}
|
||||
thd->count_cuted_fields= CHECK_FIELD_IGNORE;
|
||||
thd->abort_on_warning= 0;
|
||||
|
|
|
@ -163,7 +163,7 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
|
|||
|
||||
The main thing to fix to remove this restriction is to ensure that the
|
||||
table is marked to be 'used for insert' in which case we should never
|
||||
mark this table as as 'const table' (ie, one that has only one row).
|
||||
mark this table as 'const table' (ie, one that has only one row).
|
||||
*/
|
||||
if (unique_table(table_list, table_list->next_global))
|
||||
{
|
||||
|
@ -179,6 +179,10 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
|
|||
Field **field;
|
||||
for (field=table->field; *field ; field++)
|
||||
fields_vars.push_back(new Item_field(*field));
|
||||
/*
|
||||
Since all fields are set we set all bits in the write set
|
||||
*/
|
||||
table->file->ha_set_all_bits_in_write_set();
|
||||
table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
|
||||
/*
|
||||
Let us also prepare SET clause, altough it is probably empty
|
||||
|
@ -191,8 +195,15 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
|
|||
else
|
||||
{ // Part field list
|
||||
/* TODO: use this conds for 'WITH CHECK OPTIONS' */
|
||||
if (setup_fields(thd, 0, table_list, fields_vars, 1, 0, 0) ||
|
||||
setup_fields(thd, 0, table_list, set_fields, 1, 0, 0) ||
|
||||
/*
|
||||
Indicate that both variables in field list and fields in update_list
|
||||
is to be included in write set of table. We do however set all bits
|
||||
in write set anyways since it is not allowed to specify NULLs in
|
||||
LOAD DATA
|
||||
*/
|
||||
table->file->ha_set_all_bits_in_write_set();
|
||||
if (setup_fields(thd, 0, table_list, fields_vars, 2, 0, 0) ||
|
||||
setup_fields(thd, 0, table_list, set_fields, 2, 0, 0) ||
|
||||
check_that_all_fields_are_given_values(thd, table))
|
||||
DBUG_RETURN(TRUE);
|
||||
/*
|
||||
|
|
|
@ -936,23 +936,19 @@ JOIN::optimize()
|
|||
|
||||
}
|
||||
/*
|
||||
Need to tell Innobase that to play it safe, it should fetch all
|
||||
columns of the tables: this is because MySQL may build row
|
||||
pointers for the rows, and for all columns of the primary key the
|
||||
field->query_id has not necessarily been set to thd->query_id by
|
||||
MySQL.
|
||||
Need to tell handlers that to play it safe, it should fetch all
|
||||
columns of the primary key of the tables: this is because MySQL may
|
||||
build row pointers for the rows, and for all columns of the primary key
|
||||
the read set has not necessarily been set by the server code.
|
||||
*/
|
||||
|
||||
#ifdef HAVE_INNOBASE_DB
|
||||
if (need_tmp || select_distinct || group_list || order)
|
||||
{
|
||||
for (uint i_h = const_tables; i_h < tables; i_h++)
|
||||
{
|
||||
TABLE* table_h = join_tab[i_h].table;
|
||||
table_h->file->extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY);
|
||||
table_h->file->ha_retrieve_all_pk();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
DBUG_EXECUTE("info",TEST_join(this););
|
||||
/*
|
||||
|
@ -7927,7 +7923,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
|
|||
uint hidden_null_count, hidden_null_pack_length, hidden_field_count;
|
||||
uint blob_count,group_null_items, string_count;
|
||||
uint temp_pool_slot=MY_BIT_NONE;
|
||||
ulong reclength, string_total_length;
|
||||
ulong reclength, string_total_length, fieldnr= 0;
|
||||
bool using_unique_constraint= 0;
|
||||
bool use_packed_rows= 0;
|
||||
bool not_all_columns= !(select_options & TMP_TABLE_ALL_COLUMNS);
|
||||
|
@ -7950,7 +7946,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
|
|||
statistic_increment(thd->status_var.created_tmp_tables, &LOCK_status);
|
||||
|
||||
if (use_temp_pool)
|
||||
temp_pool_slot = bitmap_set_next(&temp_pool);
|
||||
temp_pool_slot = bitmap_lock_set_next(&temp_pool);
|
||||
|
||||
if (temp_pool_slot != MY_BIT_NONE) // we got a slot
|
||||
sprintf(path, "%s_%lx_%i", tmp_file_prefix,
|
||||
|
@ -8002,12 +7998,12 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
|
|||
param->group_length : 0,
|
||||
NullS))
|
||||
{
|
||||
bitmap_clear_bit(&temp_pool, temp_pool_slot);
|
||||
bitmap_lock_clear_bit(&temp_pool, temp_pool_slot);
|
||||
DBUG_RETURN(NULL); /* purecov: inspected */
|
||||
}
|
||||
if (!(param->copy_field=copy=new Copy_field[field_count]))
|
||||
{
|
||||
bitmap_clear_bit(&temp_pool, temp_pool_slot);
|
||||
bitmap_lock_clear_bit(&temp_pool, temp_pool_slot);
|
||||
my_free((gptr) table,MYF(0)); /* purecov: inspected */
|
||||
DBUG_RETURN(NULL); /* purecov: inspected */
|
||||
}
|
||||
|
@ -8038,6 +8034,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
|
|||
table->s->tmp_table= TMP_TABLE;
|
||||
table->s->db_low_byte_first=1; // True for HEAP and MyISAM
|
||||
table->s->table_charset= param->table_charset;
|
||||
table->s->primary_key= MAX_KEY; //Indicate no primary key
|
||||
table->s->keys_for_keyread.init();
|
||||
table->s->keys_in_use.init();
|
||||
/* For easier error reporting */
|
||||
|
@ -8111,6 +8108,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
|
|||
(*argp)->maybe_null=1;
|
||||
}
|
||||
new_field->query_id= thd->query_id;
|
||||
new_field->fieldnr= ++fieldnr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -8158,6 +8156,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
|
|||
new_field->flags|= GROUP_FLAG;
|
||||
}
|
||||
new_field->query_id= thd->query_id;
|
||||
new_field->fieldnr= ++fieldnr;
|
||||
*(reg_field++) =new_field;
|
||||
}
|
||||
if (!--hidden_field_count)
|
||||
|
@ -8166,6 +8165,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
|
|||
DBUG_ASSERT(field_count >= (uint) (reg_field - table->field));
|
||||
field_count= (uint) (reg_field - table->field);
|
||||
*blob_field= 0; // End marker
|
||||
table->s->fields= field_count;
|
||||
|
||||
/* If result table is small; use a heap */
|
||||
if (blob_count || using_unique_constraint ||
|
||||
|
@ -8182,7 +8182,11 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
|
|||
{
|
||||
table->file=get_new_handler(table,table->s->db_type= DB_TYPE_HEAP);
|
||||
}
|
||||
|
||||
if (table->s->fields)
|
||||
{
|
||||
table->file->ha_set_all_bits_in_read_set();
|
||||
table->file->ha_set_all_bits_in_write_set();
|
||||
}
|
||||
if (!using_unique_constraint)
|
||||
reclength+= group_null_items; // null flag is stored separately
|
||||
|
||||
|
@ -8208,7 +8212,6 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
|
|||
string_total_length / string_count >= AVG_STRING_LENGTH_TO_PACK_ROWS))
|
||||
use_packed_rows= 1;
|
||||
|
||||
table->s->fields= field_count;
|
||||
table->s->reclength= reclength;
|
||||
{
|
||||
uint alloc_length=ALIGN_SIZE(reclength+MI_UNIQUE_HASH_LENGTH+1);
|
||||
|
@ -8446,7 +8449,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
|
|||
|
||||
err:
|
||||
free_tmp_table(thd,table); /* purecov: inspected */
|
||||
bitmap_clear_bit(&temp_pool, temp_pool_slot);
|
||||
bitmap_lock_clear_bit(&temp_pool, temp_pool_slot);
|
||||
DBUG_RETURN(NULL); /* purecov: inspected */
|
||||
}
|
||||
|
||||
|
@ -8720,7 +8723,7 @@ free_tmp_table(THD *thd, TABLE *entry)
|
|||
my_free((gptr) entry->record[0],MYF(0));
|
||||
free_io_cache(entry);
|
||||
|
||||
bitmap_clear_bit(&temp_pool, entry->temp_pool_slot);
|
||||
bitmap_lock_clear_bit(&temp_pool, entry->temp_pool_slot);
|
||||
|
||||
my_free((gptr) entry,MYF(0));
|
||||
thd->proc_info=save_proc_info;
|
||||
|
@ -8816,8 +8819,8 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
|
|||
(void) new_table.file->close();
|
||||
err1:
|
||||
new_table.file->delete_table(new_table.s->table_name);
|
||||
delete new_table.file;
|
||||
err2:
|
||||
delete new_table.file;
|
||||
thd->proc_info=save_proc_info;
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
|
|
|
@ -1635,6 +1635,7 @@ bool mysql_create_table(THD *thd,const char *db, const char *table_name,
|
|||
end:
|
||||
VOID(pthread_mutex_unlock(&LOCK_open));
|
||||
start_waiting_global_read_lock(thd);
|
||||
delete file;
|
||||
thd->proc_info="After create";
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
|
@ -3836,7 +3837,8 @@ copy_data_between_tables(TABLE *from,TABLE *to,
|
|||
this function does not set field->query_id in the columns to the
|
||||
current query id
|
||||
*/
|
||||
from->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
|
||||
to->file->ha_set_all_bits_in_write_set();
|
||||
from->file->ha_retrieve_all_cols();
|
||||
init_read_record(&info, thd, from, (SQL_SELECT *) 0, 1,1);
|
||||
if (ignore ||
|
||||
handle_duplicates == DUP_REPLACE)
|
||||
|
@ -3999,10 +4001,11 @@ bool mysql_checksum_table(THD *thd, TABLE_LIST *tables, HA_CHECK_OPT *check_opt)
|
|||
/* calculating table's checksum */
|
||||
ha_checksum crc= 0;
|
||||
|
||||
/* InnoDB must be told explicitly to retrieve all columns, because
|
||||
this function does not set field->query_id in the columns to the
|
||||
current query id */
|
||||
t->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
|
||||
/*
|
||||
Set all bits in read set and inform InnoDB that we are reading all
|
||||
fields
|
||||
*/
|
||||
t->file->ha_retrieve_all_cols();
|
||||
|
||||
if (t->file->ha_rnd_init(1))
|
||||
protocol->store_null();
|
||||
|
|
|
@ -526,7 +526,7 @@ int mysql_drop_function(THD *thd,const LEX_STRING *udf_name)
|
|||
if (!(table = open_ltable(thd,&tables,TL_WRITE)))
|
||||
goto err;
|
||||
table->field[0]->store(udf_name->str, udf_name->length, system_charset_info);
|
||||
table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
|
||||
table->file->ha_retrieve_all_cols();
|
||||
if (!table->file->index_read_idx(table->record[0], 0,
|
||||
(byte*) table->field[0]->ptr,
|
||||
table->key_info[0].key_length,
|
||||
|
|
|
@ -188,7 +188,11 @@ int mysql_update(THD *thd,
|
|||
{
|
||||
bool res;
|
||||
select_lex->no_wrap_view_item= 1;
|
||||
res= setup_fields(thd, 0, table_list, fields, 1, 0, 0);
|
||||
/*
|
||||
Indicate that the set of fields is to be updated by passing 2 for
|
||||
set_query_id.
|
||||
*/
|
||||
res= setup_fields(thd, 0, table_list, fields, 2, 0, 0);
|
||||
select_lex->no_wrap_view_item= 0;
|
||||
if (res)
|
||||
DBUG_RETURN(1); /* purecov: inspected */
|
||||
|
@ -208,7 +212,10 @@ int mysql_update(THD *thd,
|
|||
if (table->timestamp_field->query_id == thd->query_id)
|
||||
table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
|
||||
else
|
||||
{
|
||||
table->timestamp_field->query_id=timestamp_query_id;
|
||||
table->file->ha_set_bit_in_write_set(table->timestamp_field->fieldnr);
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef NO_EMBEDDED_ACCESS_CHECKS
|
||||
|
@ -268,7 +275,7 @@ int mysql_update(THD *thd,
|
|||
We can't update table directly; We must first search after all
|
||||
matching rows before updating the table!
|
||||
*/
|
||||
table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
|
||||
table->file->ha_retrieve_all_cols();
|
||||
if (used_index < MAX_KEY && old_used_keys.is_set(used_index))
|
||||
{
|
||||
table->key_read=1;
|
||||
|
@ -739,7 +746,7 @@ bool mysql_multi_update_prepare(THD *thd)
|
|||
leaves= lex->select_lex.leaf_tables;
|
||||
|
||||
if ((lex->select_lex.no_wrap_view_item= 1,
|
||||
res= setup_fields(thd, 0, table_list, *fields, 1, 0, 0),
|
||||
res= setup_fields(thd, 0, table_list, *fields, 2, 0, 0),
|
||||
lex->select_lex.no_wrap_view_item= 0,
|
||||
res))
|
||||
DBUG_RETURN(TRUE);
|
||||
|
@ -856,7 +863,7 @@ bool mysql_multi_update_prepare(THD *thd)
|
|||
if (setup_tables(thd, table_list, &lex->select_lex.where,
|
||||
&lex->select_lex.leaf_tables, FALSE, FALSE) ||
|
||||
(lex->select_lex.no_wrap_view_item= 1,
|
||||
res= setup_fields(thd, 0, table_list, *fields, 1, 0, 0),
|
||||
res= setup_fields(thd, 0, table_list, *fields, 2, 0, 0),
|
||||
lex->select_lex.no_wrap_view_item= 0,
|
||||
res))
|
||||
DBUG_RETURN(TRUE);
|
||||
|
|
|
@ -572,6 +572,7 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
|
|||
error= 4;
|
||||
goto err; /* purecov: inspected */
|
||||
}
|
||||
reg_field->fieldnr= i+1; //Set field number
|
||||
reg_field->comment=comment;
|
||||
if (field_type == FIELD_TYPE_BIT && !f_bit_as_char(pack_flag))
|
||||
{
|
||||
|
@ -801,6 +802,8 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
|
|||
(*save++)= i;
|
||||
}
|
||||
}
|
||||
if (outparam->file->ha_allocate_read_write_set(share->fields))
|
||||
goto err;
|
||||
|
||||
/* The table struct is now initialized; Open the table */
|
||||
error=2;
|
||||
|
|
|
@ -721,6 +721,7 @@ static bool make_empty_rec(THD *thd, File file,enum db_type table_type,
|
|||
{
|
||||
my_error(ER_INVALID_DEFAULT, MYF(0), regfield->field_name);
|
||||
error= 1;
|
||||
delete regfield; //To avoid memory leak
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue