2011-07-04 01:25:49 +02:00
|
|
|
/*
|
|
|
|
Copyright (c) 2006, 2011, Oracle and/or its affiliates. All rights reserved.
|
2006-01-12 19:51:02 +01:00
|
|
|
|
|
|
|
This program is free software; you can redistribute it and/or modify
|
|
|
|
it under the terms of the GNU General Public License as published by
|
2006-12-27 02:23:51 +01:00
|
|
|
the Free Software Foundation; version 2 of the License.
|
2006-01-12 19:51:02 +01:00
|
|
|
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
GNU General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
|
|
along with this program; if not, write to the Free Software
|
2011-07-04 01:25:49 +02:00
|
|
|
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
|
|
|
*/
|
2006-01-12 19:51:02 +01:00
|
|
|
|
2010-03-31 16:05:33 +02:00
|
|
|
#include "sql_priv.h"
|
|
|
|
#include "unireg.h" // REQUIRED: for other includes
|
2006-11-15 11:13:49 +01:00
|
|
|
#include "sql_show.h"
|
2006-04-13 22:49:29 +02:00
|
|
|
#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
|
2006-01-12 19:51:02 +01:00
|
|
|
#include "ha_ndbcluster.h"
|
|
|
|
|
|
|
|
#ifdef HAVE_NDB_BINLOG
|
|
|
|
#include "rpl_injector.h"
|
2006-02-07 19:02:38 +01:00
|
|
|
#include "rpl_filter.h"
|
2006-01-12 19:51:02 +01:00
|
|
|
#include "slave.h"
|
|
|
|
#include "ha_ndbcluster_binlog.h"
|
2006-03-08 14:12:26 +01:00
|
|
|
#include "NdbDictionary.hpp"
|
2006-08-30 11:41:21 +02:00
|
|
|
#include "ndb_cluster_connection.hpp"
|
2006-05-19 17:34:50 +02:00
|
|
|
#include <util/NdbAutoPtr.hpp>
|
2006-01-12 19:51:02 +01:00
|
|
|
|
2010-03-31 16:05:33 +02:00
|
|
|
#include "sql_base.h" // close_thread_tables
|
|
|
|
#include "sql_table.h" // build_table_filename
|
|
|
|
#include "table.h" // open_table_from_share
|
|
|
|
#include "discover.h" // readfrm, writefrm
|
|
|
|
#include "lock.h" // MYSQL_LOCK_IGNORE_FLUSH,
|
|
|
|
// mysql_unlock_tables
|
|
|
|
#include "sql_parse.h" // mysql_parse
|
2010-07-29 18:15:37 +02:00
|
|
|
#include "transaction.h"
|
2010-03-31 16:05:33 +02:00
|
|
|
|
2006-01-25 22:22:50 +01:00
|
|
|
#ifdef ndb_dynamite
|
|
|
|
#undef assert
|
|
|
|
#define assert(x) do { if(x) break; ::printf("%s %d: assert failed: %s\n", __FILE__, __LINE__, #x); ::fflush(stdout); ::signal(SIGABRT,SIG_DFL); ::abort(); ::kill(::getpid(),6); ::kill(::getpid(),9); } while (0)
|
|
|
|
#endif
|
|
|
|
|
2009-12-22 10:35:56 +01:00
|
|
|
extern my_bool opt_ndb_log_binlog_index;
|
|
|
|
extern ulong opt_ndb_extra_logging;
|
2006-01-12 19:51:02 +01:00
|
|
|
/*
|
|
|
|
defines for cluster replication table names
|
|
|
|
*/
|
|
|
|
#include "ha_ndbcluster_tables.h"
|
|
|
|
#define NDB_APPLY_TABLE_FILE "./" NDB_REP_DB "/" NDB_APPLY_TABLE
|
|
|
|
#define NDB_SCHEMA_TABLE_FILE "./" NDB_REP_DB "/" NDB_SCHEMA_TABLE
|
|
|
|
|
2006-05-31 01:52:14 +02:00
|
|
|
/*
|
|
|
|
Timeout for syncing schema events between
|
|
|
|
mysql servers, and between mysql server and the binlog
|
|
|
|
*/
|
2009-12-22 10:35:56 +01:00
|
|
|
static const int DEFAULT_SYNC_TIMEOUT= 120;
|
|
|
|
|
2006-05-31 01:52:14 +02:00
|
|
|
|
2006-01-12 19:51:02 +01:00
|
|
|
/*
|
|
|
|
Flag showing if the ndb injector thread is running, if so == 1
|
|
|
|
-1 if it was started but later stopped for some reason
|
|
|
|
0 if never started
|
|
|
|
*/
|
2009-12-22 10:35:56 +01:00
|
|
|
static int ndb_binlog_thread_running= 0;
|
|
|
|
|
2006-02-01 01:12:11 +01:00
|
|
|
/*
|
|
|
|
Flag showing if the ndb binlog should be created, if so == TRUE
|
|
|
|
FALSE if not
|
|
|
|
*/
|
|
|
|
my_bool ndb_binlog_running= FALSE;
|
2006-04-10 16:08:40 +02:00
|
|
|
my_bool ndb_binlog_tables_inited= FALSE;
|
2006-01-12 19:51:02 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
Global reference to the ndb injector thread THD oject
|
|
|
|
|
|
|
|
Has one sole purpose, for setting the in_use table member variable
|
|
|
|
in get_share(...)
|
|
|
|
*/
|
|
|
|
THD *injector_thd= 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
Global reference to ndb injector thd object.
|
|
|
|
|
|
|
|
Used mainly by the binlog index thread, but exposed to the client sql
|
|
|
|
thread for one reason; to setup the events operations for a table
|
|
|
|
to enable ndb injector thread receiving events.
|
|
|
|
|
|
|
|
Must therefore always be used with a surrounding
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_lock(&injector_mutex), when doing create/dropEventOperation
|
2006-01-12 19:51:02 +01:00
|
|
|
*/
|
|
|
|
static Ndb *injector_ndb= 0;
|
|
|
|
static Ndb *schema_ndb= 0;
|
|
|
|
|
2006-12-20 22:57:23 +01:00
|
|
|
static int ndbcluster_binlog_inited= 0;
|
2007-06-17 19:47:20 +02:00
|
|
|
/*
|
|
|
|
Flag "ndbcluster_binlog_terminating" set when shutting down mysqld.
|
|
|
|
Server main loop should call handlerton function:
|
|
|
|
|
|
|
|
ndbcluster_hton->binlog_func ==
|
|
|
|
ndbcluster_binlog_func(...,BFN_BINLOG_END,...) ==
|
|
|
|
ndbcluster_binlog_end
|
|
|
|
|
|
|
|
at shutdown, which sets the flag. And then server needs to wait for it
|
|
|
|
to complete. Otherwise binlog will not be complete.
|
|
|
|
|
|
|
|
ndbcluster_hton->panic == ndbcluster_end() will not return until
|
|
|
|
ndb binlog is completed
|
|
|
|
*/
|
2007-02-06 22:06:13 +01:00
|
|
|
static int ndbcluster_binlog_terminating= 0;
|
2006-12-20 22:57:23 +01:00
|
|
|
|
2006-01-12 19:51:02 +01:00
|
|
|
/*
|
|
|
|
Mutex and condition used for interacting between client sql thread
|
|
|
|
and injector thread
|
|
|
|
*/
|
|
|
|
pthread_t ndb_binlog_thread;
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_t injector_mutex;
|
|
|
|
mysql_cond_t injector_cond;
|
2006-01-12 19:51:02 +01:00
|
|
|
|
|
|
|
/* NDB Injector thread (used for binlog creation) */
|
|
|
|
static ulonglong ndb_latest_applied_binlog_epoch= 0;
|
|
|
|
static ulonglong ndb_latest_handled_binlog_epoch= 0;
|
|
|
|
static ulonglong ndb_latest_received_binlog_epoch= 0;
|
|
|
|
|
2006-12-01 15:49:07 +01:00
|
|
|
NDB_SHARE *ndb_apply_status_share= 0;
|
|
|
|
NDB_SHARE *ndb_schema_share= 0;
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_t ndb_schema_share_mutex;
|
2006-01-12 19:51:02 +01:00
|
|
|
|
2007-07-25 07:24:25 +02:00
|
|
|
extern my_bool opt_log_slave_updates;
|
|
|
|
static my_bool g_ndb_log_slave_updates;
|
|
|
|
|
2006-04-03 19:11:20 +02:00
|
|
|
/* Schema object distribution handling */
|
|
|
|
HASH ndb_schema_objects;
|
|
|
|
typedef struct st_ndb_schema_object {
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_t mutex;
|
2006-04-03 19:11:20 +02:00
|
|
|
char *key;
|
|
|
|
uint key_length;
|
|
|
|
uint use_count;
|
|
|
|
MY_BITMAP slock_bitmap;
|
|
|
|
uint32 slock[256/32]; // 256 bits for lock status of table
|
|
|
|
} NDB_SCHEMA_OBJECT;
|
|
|
|
static NDB_SCHEMA_OBJECT *ndb_get_schema_object(const char *key,
|
|
|
|
my_bool create_if_not_exists,
|
|
|
|
my_bool have_lock);
|
|
|
|
static void ndb_free_schema_object(NDB_SCHEMA_OBJECT **ndb_schema_object,
|
|
|
|
bool have_lock);
|
|
|
|
|
2006-08-30 11:41:21 +02:00
|
|
|
static Uint64 *p_latest_trans_gci= 0;
|
2006-01-12 19:51:02 +01:00
|
|
|
|
|
|
|
/*
|
2006-12-01 15:49:07 +01:00
|
|
|
Global variables for holding the ndb_binlog_index table reference
|
2006-01-12 19:51:02 +01:00
|
|
|
*/
|
2006-12-01 15:49:07 +01:00
|
|
|
static TABLE *ndb_binlog_index= 0;
|
2006-01-12 19:51:02 +01:00
|
|
|
static TABLE_LIST binlog_tables;
|
|
|
|
|
|
|
|
/*
|
|
|
|
Helper functions
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef DBUG_OFF
|
2006-06-28 13:03:08 +02:00
|
|
|
/* purecov: begin deadcode */
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
static void print_records(TABLE *table, const uchar *record)
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
2006-02-14 22:36:11 +01:00
|
|
|
for (uint j= 0; j < table->s->fields; j++)
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
2006-02-14 22:36:11 +01:00
|
|
|
char buf[40];
|
|
|
|
int pos= 0;
|
|
|
|
Field *field= table->field[j];
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
const uchar* field_ptr= field->ptr - table->record[0] + record;
|
2006-02-14 22:36:11 +01:00
|
|
|
int pack_len= field->pack_length();
|
|
|
|
int n= pack_len < 10 ? pack_len : 10;
|
|
|
|
|
|
|
|
for (int i= 0; i < n && pos < 20; i++)
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
2006-06-19 11:45:34 +02:00
|
|
|
pos+= sprintf(&buf[pos]," %x", (int) (uchar) field_ptr[i]);
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
2006-02-14 22:36:11 +01:00
|
|
|
buf[pos]= 0;
|
|
|
|
DBUG_PRINT("info",("[%u]field_ptr[0->%d]: %s", j, n, buf));
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
|
|
|
}
|
2006-06-28 13:03:08 +02:00
|
|
|
/* purecov: end */
|
2006-01-12 19:51:02 +01:00
|
|
|
#else
|
|
|
|
#define print_records(a,b)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
#ifndef DBUG_OFF
|
|
|
|
static void dbug_print_table(const char *info, TABLE *table)
|
|
|
|
{
|
|
|
|
if (table == 0)
|
|
|
|
{
|
|
|
|
DBUG_PRINT("info",("%s: (null)", info));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
DBUG_PRINT("info",
|
|
|
|
("%s: %s.%s s->fields: %d "
|
2006-11-27 00:47:38 +01:00
|
|
|
"reclength: %lu rec_buff_length: %u record[0]: 0x%lx "
|
|
|
|
"record[1]: 0x%lx",
|
2006-01-12 19:51:02 +01:00
|
|
|
info,
|
|
|
|
table->s->db.str,
|
|
|
|
table->s->table_name.str,
|
|
|
|
table->s->fields,
|
|
|
|
table->s->reclength,
|
|
|
|
table->s->rec_buff_length,
|
2006-11-27 00:47:38 +01:00
|
|
|
(long) table->record[0],
|
|
|
|
(long) table->record[1]));
|
2006-01-12 19:51:02 +01:00
|
|
|
|
|
|
|
for (unsigned int i= 0; i < table->s->fields; i++)
|
|
|
|
{
|
|
|
|
Field *f= table->field[i];
|
|
|
|
DBUG_PRINT("info",
|
|
|
|
("[%d] \"%s\"(0x%lx:%s%s%s%s%s%s) type: %d pack_length: %d "
|
|
|
|
"ptr: 0x%lx[+%d] null_bit: %u null_ptr: 0x%lx[+%d]",
|
|
|
|
i,
|
|
|
|
f->field_name,
|
2006-11-27 00:47:38 +01:00
|
|
|
(long) f->flags,
|
2006-01-12 19:51:02 +01:00
|
|
|
(f->flags & PRI_KEY_FLAG) ? "pri" : "attr",
|
|
|
|
(f->flags & NOT_NULL_FLAG) ? "" : ",nullable",
|
|
|
|
(f->flags & UNSIGNED_FLAG) ? ",unsigned" : ",signed",
|
|
|
|
(f->flags & ZEROFILL_FLAG) ? ",zerofill" : "",
|
|
|
|
(f->flags & BLOB_FLAG) ? ",blob" : "",
|
|
|
|
(f->flags & BINARY_FLAG) ? ",binary" : "",
|
|
|
|
f->real_type(),
|
|
|
|
f->pack_length(),
|
2006-11-27 00:47:38 +01:00
|
|
|
(long) f->ptr, (int) (f->ptr - table->record[0]),
|
2006-01-12 19:51:02 +01:00
|
|
|
f->null_bit,
|
2006-11-27 00:47:38 +01:00
|
|
|
(long) f->null_ptr,
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
(int) ((uchar*) f->null_ptr - table->record[0])));
|
2006-01-12 19:51:02 +01:00
|
|
|
if (f->type() == MYSQL_TYPE_BIT)
|
|
|
|
{
|
|
|
|
Field_bit *g= (Field_bit*) f;
|
|
|
|
DBUG_PRINT("MYSQL_TYPE_BIT",("field_length: %d bit_ptr: 0x%lx[+%d] "
|
2006-11-27 00:47:38 +01:00
|
|
|
"bit_ofs: %d bit_len: %u",
|
|
|
|
g->field_length, (long) g->bit_ptr,
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
(int) ((uchar*) g->bit_ptr -
|
2006-11-27 00:47:38 +01:00
|
|
|
table->record[0]),
|
2006-01-12 19:51:02 +01:00
|
|
|
g->bit_ofs, g->bit_len));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
#define dbug_print_table(a,b)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
Run a query through mysql_parse
|
|
|
|
|
|
|
|
Used to:
|
2006-12-01 15:49:07 +01:00
|
|
|
- purging the ndb_binlog_index
|
|
|
|
- creating the ndb_apply_status table
|
2006-01-12 19:51:02 +01:00
|
|
|
*/
|
|
|
|
static void run_query(THD *thd, char *buf, char *end,
|
2007-06-17 19:47:20 +02:00
|
|
|
const int *no_print_error, my_bool disable_binlog)
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
2009-10-16 12:29:42 +02:00
|
|
|
ulong save_thd_query_length= thd->query_length();
|
|
|
|
char *save_thd_query= thd->query();
|
2007-12-11 19:51:44 +01:00
|
|
|
ulong save_thread_id= thd->variables.pseudo_thread_id;
|
2007-11-06 10:27:56 +01:00
|
|
|
struct system_status_var save_thd_status_var= thd->status_var;
|
|
|
|
THD_TRANS save_thd_transaction_all= thd->transaction.all;
|
|
|
|
THD_TRANS save_thd_transaction_stmt= thd->transaction.stmt;
|
2009-12-22 10:35:56 +01:00
|
|
|
ulonglong save_thd_options= thd->variables.option_bits;
|
|
|
|
DBUG_ASSERT(sizeof(save_thd_options) == sizeof(thd->variables.option_bits));
|
2007-11-06 10:27:56 +01:00
|
|
|
NET save_thd_net= thd->net;
|
2006-01-12 19:51:02 +01:00
|
|
|
|
|
|
|
bzero((char*) &thd->net, sizeof(NET));
|
2009-07-24 18:04:55 +02:00
|
|
|
thd->set_query(buf, (uint) (end - buf));
|
2006-01-12 19:51:02 +01:00
|
|
|
thd->variables.pseudo_thread_id= thread_id;
|
2007-11-06 10:27:56 +01:00
|
|
|
thd->transaction.stmt.modified_non_trans_table= FALSE;
|
2006-01-12 19:51:02 +01:00
|
|
|
if (disable_binlog)
|
2009-12-22 10:35:56 +01:00
|
|
|
thd->variables.option_bits&= ~OPTION_BIN_LOG;
|
2006-01-12 19:51:02 +01:00
|
|
|
|
2009-10-16 12:29:42 +02:00
|
|
|
DBUG_PRINT("query", ("%s", thd->query()));
|
2007-12-12 16:21:01 +01:00
|
|
|
|
|
|
|
DBUG_ASSERT(!thd->in_sub_stmt);
|
2009-12-01 15:39:03 +01:00
|
|
|
DBUG_ASSERT(!thd->locked_tables_mode);
|
2007-12-12 16:21:01 +01:00
|
|
|
|
2010-05-17 14:10:26 +02:00
|
|
|
{
|
2010-06-11 22:35:28 +02:00
|
|
|
Parser_state parser_state;
|
|
|
|
if (!parser_state.init(thd, thd->query(), thd->query_length()))
|
|
|
|
mysql_parse(thd, thd->query(), thd->query_length(), &parser_state);
|
2010-05-17 14:10:26 +02:00
|
|
|
}
|
2006-01-12 19:51:02 +01:00
|
|
|
|
2007-10-19 23:20:38 +02:00
|
|
|
if (no_print_error && thd->is_slave_error)
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
2007-06-17 19:47:20 +02:00
|
|
|
int i;
|
|
|
|
Thd_ndb *thd_ndb= get_thd_ndb(thd);
|
|
|
|
for (i= 0; no_print_error[i]; i++)
|
2007-08-30 10:41:19 +02:00
|
|
|
if ((thd_ndb->m_error_code == no_print_error[i]) ||
|
2009-09-10 11:18:29 +02:00
|
|
|
(thd->stmt_da->sql_errno() == (unsigned) no_print_error[i]))
|
2007-06-17 19:47:20 +02:00
|
|
|
break;
|
|
|
|
if (!no_print_error[i])
|
|
|
|
sql_print_error("NDB: %s: error %s %d(ndb: %d) %d %d",
|
2007-12-12 16:21:01 +01:00
|
|
|
buf,
|
2009-09-10 11:18:29 +02:00
|
|
|
thd->stmt_da->message(),
|
|
|
|
thd->stmt_da->sql_errno(),
|
2007-08-30 10:41:19 +02:00
|
|
|
thd_ndb->m_error_code,
|
2007-10-30 18:08:16 +01:00
|
|
|
(int) thd->is_error(), thd->is_slave_error);
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
2007-12-12 16:21:01 +01:00
|
|
|
/*
|
|
|
|
XXX: this code is broken. mysql_parse()/mysql_reset_thd_for_next_command()
|
|
|
|
can not be called from within a statement, and
|
|
|
|
run_query() can be called from anywhere, including from within
|
|
|
|
a sub-statement.
|
|
|
|
This particular reset is a temporary hack to avoid an assert
|
|
|
|
for double assignment of the diagnostics area when run_query()
|
|
|
|
is called from ndbcluster_reset_logs(), which is called from
|
|
|
|
mysql_flush().
|
|
|
|
*/
|
2009-09-10 11:18:29 +02:00
|
|
|
thd->stmt_da->reset_diagnostics_area();
|
2006-01-12 19:51:02 +01:00
|
|
|
|
2009-12-22 10:35:56 +01:00
|
|
|
thd->variables.option_bits= save_thd_options;
|
2009-07-24 18:04:55 +02:00
|
|
|
thd->set_query(save_thd_query, save_thd_query_length);
|
2007-12-11 19:51:44 +01:00
|
|
|
thd->variables.pseudo_thread_id= save_thread_id;
|
2007-11-06 10:27:56 +01:00
|
|
|
thd->status_var= save_thd_status_var;
|
|
|
|
thd->transaction.all= save_thd_transaction_all;
|
|
|
|
thd->transaction.stmt= save_thd_transaction_stmt;
|
|
|
|
thd->net= save_thd_net;
|
2009-09-30 18:00:22 +02:00
|
|
|
thd->set_current_stmt_binlog_format_row();
|
2006-01-12 19:51:02 +01:00
|
|
|
|
|
|
|
if (thd == injector_thd)
|
|
|
|
{
|
|
|
|
/*
|
2006-12-01 15:49:07 +01:00
|
|
|
running the query will close all tables, including the ndb_binlog_index
|
2006-01-12 19:51:02 +01:00
|
|
|
used in injector_thd
|
|
|
|
*/
|
2006-12-01 15:49:07 +01:00
|
|
|
ndb_binlog_index= 0;
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-03-09 15:50:26 +01:00
|
|
|
static void
|
|
|
|
ndbcluster_binlog_close_table(THD *thd, NDB_SHARE *share)
|
|
|
|
{
|
|
|
|
DBUG_ENTER("ndbcluster_binlog_close_table");
|
|
|
|
if (share->table_share)
|
|
|
|
{
|
2006-05-08 18:09:01 +02:00
|
|
|
closefrm(share->table, 1);
|
2006-03-09 15:50:26 +01:00
|
|
|
share->table_share= 0;
|
|
|
|
share->table= 0;
|
|
|
|
}
|
|
|
|
DBUG_ASSERT(share->table == 0);
|
|
|
|
DBUG_VOID_RETURN;
|
|
|
|
}
|
|
|
|
|
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes
Changes that requires code changes in other code of other storage engines.
(Note that all changes are very straightforward and one should find all issues
by compiling a --debug build and fixing all compiler errors and all
asserts in field.cc while running the test suite),
- New optional handler function introduced: reset()
This is called after every DML statement to make it easy for a handler to
statement specific cleanups.
(The only case it's not called is if force the file to be closed)
- handler::extra(HA_EXTRA_RESET) is removed. Code that was there before
should be moved to handler::reset()
- table->read_set contains a bitmap over all columns that are needed
in the query. read_row() and similar functions only needs to read these
columns
- table->write_set contains a bitmap over all columns that will be updated
in the query. write_row() and update_row() only needs to update these
columns.
The above bitmaps should now be up to date in all context
(including ALTER TABLE, filesort()).
The handler is informed of any changes to the bitmap after
fix_fields() by calling the virtual function
handler::column_bitmaps_signal(). If the handler does caching of
these bitmaps (instead of using table->read_set, table->write_set),
it should redo the caching in this code. as the signal() may be sent
several times, it's probably best to set a variable in the signal
and redo the caching on read_row() / write_row() if the variable was
set.
- Removed the read_set and write_set bitmap objects from the handler class
- Removed all column bit handling functions from the handler class.
(Now one instead uses the normal bitmap functions in my_bitmap.c instead
of handler dedicated bitmap functions)
- field->query_id is removed. One should instead instead check
table->read_set and table->write_set if a field is used in the query.
- handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and
handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now
instead use table->read_set to check for which columns to retrieve.
- If a handler needs to call Field->val() or Field->store() on columns
that are not used in the query, one should install a temporary
all-columns-used map while doing so. For this, we provide the following
functions:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
field->val();
dbug_tmp_restore_column_map(table->read_set, old_map);
and similar for the write map:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
field->val();
dbug_tmp_restore_column_map(table->write_set, old_map);
If this is not done, you will sooner or later hit a DBUG_ASSERT
in the field store() / val() functions.
(For not DBUG binaries, the dbug_tmp_restore_column_map() and
dbug_tmp_restore_column_map() are inline dummy functions and should
be optimized away be the compiler).
- If one needs to temporary set the column map for all binaries (and not
just to avoid the DBUG_ASSERT() in the Field::store() / Field::val()
methods) one should use the functions tmp_use_all_columns() and
tmp_restore_column_map() instead of the above dbug_ variants.
- All 'status' fields in the handler base class (like records,
data_file_length etc) are now stored in a 'stats' struct. This makes
it easier to know what status variables are provided by the base
handler. This requires some trivial variable names in the extra()
function.
- New virtual function handler::records(). This is called to optimize
COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true.
(stats.records is not supposed to be an exact value. It's only has to
be 'reasonable enough' for the optimizer to be able to choose a good
optimization path).
- Non virtual handler::init() function added for caching of virtual
constants from engine.
- Removed has_transactions() virtual method. Now one should instead return
HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support
transactions.
- The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument
that is to be used with 'new handler_name()' to allocate the handler
in the right area. The xxxx_create_handler() function is also
responsible for any initialization of the object before returning.
For example, one should change:
static handler *myisam_create_handler(TABLE_SHARE *table)
{
return new ha_myisam(table);
}
->
static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root)
{
return new (mem_root) ha_myisam(table);
}
- New optional virtual function: use_hidden_primary_key().
This is called in case of an update/delete when
(table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined
but we don't have a primary key. This allows the handler to take precisions
in remembering any hidden primary key to able to update/delete any
found row. The default handler marks all columns to be read.
- handler::table_flags() now returns a ulonglong (to allow for more flags).
- New/changed table_flags()
- HA_HAS_RECORDS Set if ::records() is supported
- HA_NO_TRANSACTIONS Set if engine doesn't support transactions
- HA_PRIMARY_KEY_REQUIRED_FOR_DELETE
Set if we should mark all primary key columns for
read when reading rows as part of a DELETE
statement. If there is no primary key,
all columns are marked for read.
- HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some
cases (based on table->read_set)
- HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS
Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION.
- HA_DUPP_POS Renamed to HA_DUPLICATE_POS
- HA_REQUIRES_KEY_COLUMNS_FOR_DELETE
Set this if we should mark ALL key columns for
read when when reading rows as part of a DELETE
statement. In case of an update we will mark
all keys for read for which key part changed
value.
- HA_STATS_RECORDS_IS_EXACT
Set this if stats.records is exact.
(This saves us some extra records() calls
when optimizing COUNT(*))
- Removed table_flags()
- HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if
handler::records() gives an exact count() and
HA_STATS_RECORDS_IS_EXACT if stats.records is exact.
- HA_READ_RND_SAME Removed (no one supported this one)
- Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk()
- Renamed handler::dupp_pos to handler::dup_pos
- Removed not used variable handler::sortkey
Upper level handler changes:
- ha_reset() now does some overall checks and calls ::reset()
- ha_table_flags() added. This is a cached version of table_flags(). The
cache is updated on engine creation time and updated on open.
MySQL level changes (not obvious from the above):
- DBUG_ASSERT() added to check that column usage matches what is set
in the column usage bit maps. (This found a LOT of bugs in current
column marking code).
- In 5.1 before, all used columns was marked in read_set and only updated
columns was marked in write_set. Now we only mark columns for which we
need a value in read_set.
- Column bitmaps are created in open_binary_frm() and open_table_from_share().
(Before this was in table.cc)
- handler::table_flags() calls are replaced with handler::ha_table_flags()
- For calling field->val() you must have the corresponding bit set in
table->read_set. For calling field->store() you must have the
corresponding bit set in table->write_set. (There are asserts in
all store()/val() functions to catch wrong usage)
- thd->set_query_id is renamed to thd->mark_used_columns and instead
of setting this to an integer value, this has now the values:
MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE
Changed also all variables named 'set_query_id' to mark_used_columns.
- In filesort() we now inform the handler of exactly which columns are needed
doing the sort and choosing the rows.
- The TABLE_SHARE object has a 'all_set' column bitmap one can use
when one needs a column bitmap with all columns set.
(This is used for table->use_all_columns() and other places)
- The TABLE object has 3 column bitmaps:
- def_read_set Default bitmap for columns to be read
- def_write_set Default bitmap for columns to be written
- tmp_set Can be used as a temporary bitmap when needed.
The table object has also two pointer to bitmaps read_set and write_set
that the handler should use to find out which columns are used in which way.
- count() optimization now calls handler::records() instead of using
handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true).
- Added extra argument to Item::walk() to indicate if we should also
traverse sub queries.
- Added TABLE parameter to cp_buffer_from_ref()
- Don't close tables created with CREATE ... SELECT but keep them in
the table cache. (Faster usage of newly created tables).
New interfaces:
- table->clear_column_bitmaps() to initialize the bitmaps for tables
at start of new statements.
- table->column_bitmaps_set() to set up new column bitmaps and signal
the handler about this.
- table->column_bitmaps_set_no_signal() for some few cases where we need
to setup new column bitmaps but don't signal the handler (as the handler
has already been signaled about these before). Used for the momement
only in opt_range.cc when doing ROR scans.
- table->use_all_columns() to install a bitmap where all columns are marked
as use in the read and the write set.
- table->default_column_bitmaps() to install the normal read and write
column bitmaps, but not signaling the handler about this.
This is mainly used when creating TABLE instances.
- table->mark_columns_needed_for_delete(),
table->mark_columns_needed_for_delete() and
table->mark_columns_needed_for_insert() to allow us to put additional
columns in column usage maps if handler so requires.
(The handler indicates what it neads in handler->table_flags())
- table->prepare_for_position() to allow us to tell handler that it
needs to read primary key parts to be able to store them in
future table->position() calls.
(This replaces the table->file->ha_retrieve_all_pk function)
- table->mark_auto_increment_column() to tell handler are going to update
columns part of any auto_increment key.
- table->mark_columns_used_by_index() to mark all columns that is part of
an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow
it to quickly know that it only needs to read colums that are part
of the key. (The handler can also use the column map for detecting this,
but simpler/faster handler can just monitor the extra() call).
- table->mark_columns_used_by_index_no_reset() to in addition to other columns,
also mark all columns that is used by the given key.
- table->restore_column_maps_after_mark_index() to restore to default
column maps after a call to table->mark_columns_used_by_index().
- New item function register_field_in_read_map(), for marking used columns
in table->read_map. Used by filesort() to mark all used columns
- Maintain in TABLE->merge_keys set of all keys that are used in query.
(Simplices some optimization loops)
- Maintain Field->part_of_key_not_clustered which is like Field->part_of_key
but the field in the clustered key is not assumed to be part of all index.
(used in opt_range.cc for faster loops)
- dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map()
tmp_use_all_columns() and tmp_restore_column_map() functions to temporally
mark all columns as usable. The 'dbug_' version is primarily intended
inside a handler when it wants to just call Field:store() & Field::val()
functions, but don't need the column maps set for any other usage.
(ie:: bitmap_is_set() is never called)
- We can't use compare_records() to skip updates for handlers that returns
a partial column set and the read_set doesn't cover all columns in the
write set. The reason for this is that if we have a column marked only for
write we can't in the MySQL level know if the value changed or not.
The reason this worked before was that MySQL marked all to be written
columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden
bug'.
- open_table_from_share() does not anymore setup temporary MEM_ROOT
object as a thread specific variable for the handler. Instead we
send the to-be-used MEMROOT to get_new_handler().
(Simpler, faster code)
Bugs fixed:
- Column marking was not done correctly in a lot of cases.
(ALTER TABLE, when using triggers, auto_increment fields etc)
(Could potentially result in wrong values inserted in table handlers
relying on that the old column maps or field->set_query_id was correct)
Especially when it comes to triggers, there may be cases where the
old code would cause lost/wrong values for NDB and/or InnoDB tables.
- Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags:
OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG.
This allowed me to remove some wrong warnings about:
"Some non-transactional changed tables couldn't be rolled back"
- Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset
(thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose
some warnings about
"Some non-transactional changed tables couldn't be rolled back")
- Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table()
which could cause delete_table to report random failures.
- Fixed core dumps for some tests when running with --debug
- Added missing FN_LIBCHAR in mysql_rm_tmp_tables()
(This has probably caused us to not properly remove temporary files after
crash)
- slow_logs was not properly initialized, which could maybe cause
extra/lost entries in slow log.
- If we get an duplicate row on insert, change column map to read and
write all columns while retrying the operation. This is required by
the definition of REPLACE and also ensures that fields that are only
part of UPDATE are properly handled. This fixed a bug in NDB and
REPLACE where REPLACE wrongly copied some column values from the replaced
row.
- For table handler that doesn't support NULL in keys, we would give an error
when creating a primary key with NULL fields, even after the fields has been
automaticly converted to NOT NULL.
- Creating a primary key on a SPATIAL key, would fail if field was not
declared as NOT NULL.
Cleanups:
- Removed not used condition argument to setup_tables
- Removed not needed item function reset_query_id_processor().
- Field->add_index is removed. Now this is instead maintained in
(field->flags & FIELD_IN_ADD_INDEX)
- Field->fieldnr is removed (use field->field_index instead)
- New argument to filesort() to indicate that it should return a set of
row pointers (not used columns). This allowed me to remove some references
to sql_command in filesort and should also enable us to return column
results in some cases where we couldn't before.
- Changed column bitmap handling in opt_range.cc to be aligned with TABLE
bitmap, which allowed me to use bitmap functions instead of looping over
all fields to create some needed bitmaps. (Faster and smaller code)
- Broke up found too long lines
- Moved some variable declaration at start of function for better code
readability.
- Removed some not used arguments from functions.
(setup_fields(), mysql_prepare_insert_check_table())
- setup_fields() now takes an enum instead of an int for marking columns
usage.
- For internal temporary tables, use handler::write_row(),
handler::delete_row() and handler::update_row() instead of
handler::ha_xxxx() for faster execution.
- Changed some constants to enum's and define's.
- Using separate column read and write sets allows for easier checking
of timestamp field was set by statement.
- Remove calls to free_io_cache() as this is now done automaticly in ha_reset()
- Don't build table->normalized_path as this is now identical to table->path
(after bar's fixes to convert filenames)
- Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to
do comparision with the 'convert-dbug-for-diff' tool.
Things left to do in 5.1:
- We wrongly log failed CREATE TABLE ... SELECT in some cases when using
row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result)
Mats has promised to look into this.
- Test that my fix for CREATE TABLE ... SELECT is indeed correct.
(I added several test cases for this, but in this case it's better that
someone else also tests this throughly).
Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
Creates a TABLE object for the ndb cluster table
|
|
|
|
|
|
|
|
NOTES
|
|
|
|
This does not open the underlying table
|
|
|
|
*/
|
|
|
|
|
2006-03-09 15:50:26 +01:00
|
|
|
static int
|
2006-02-16 10:07:31 +01:00
|
|
|
ndbcluster_binlog_open_table(THD *thd, NDB_SHARE *share,
|
2006-05-19 17:34:50 +02:00
|
|
|
TABLE_SHARE *table_share, TABLE *table,
|
|
|
|
int reopen)
|
2006-02-16 10:07:31 +01:00
|
|
|
{
|
|
|
|
int error;
|
|
|
|
DBUG_ENTER("ndbcluster_binlog_open_table");
|
|
|
|
|
Bug#26379 - Combination of FLUSH TABLE and REPAIR TABLE
corrupts a MERGE table
Bug 26867 - LOCK TABLES + REPAIR + merge table result in
memory/cpu hogging
Bug 26377 - Deadlock with MERGE and FLUSH TABLE
Bug 25038 - Waiting TRUNCATE
Bug 25700 - merge base tables get corrupted by
optimize/analyze/repair table
Bug 30275 - Merge tables: flush tables or unlock tables
causes server to crash
Bug 19627 - temporary merge table locking
Bug 27660 - Falcon: merge table possible
Bug 30273 - merge tables: Can't lock file (errno: 155)
The problems were:
Bug 26379 - Combination of FLUSH TABLE and REPAIR TABLE
corrupts a MERGE table
1. A thread trying to lock a MERGE table performs busy waiting while
REPAIR TABLE or a similar table administration task is ongoing on
one or more of its MyISAM tables.
2. A thread trying to lock a MERGE table performs busy waiting until all
threads that did REPAIR TABLE or similar table administration tasks
on one or more of its MyISAM tables in LOCK TABLES segments do UNLOCK
TABLES. The difference against problem #1 is that the busy waiting
takes place *after* the administration task. It is terminated by
UNLOCK TABLES only.
3. Two FLUSH TABLES within a LOCK TABLES segment can invalidate the
lock. This does *not* require a MERGE table. The first FLUSH TABLES
can be replaced by any statement that requires other threads to
reopen the table. In 5.0 and 5.1 a single FLUSH TABLES can provoke
the problem.
Bug 26867 - LOCK TABLES + REPAIR + merge table result in
memory/cpu hogging
Trying DML on a MERGE table, which has a child locked and
repaired by another thread, made an infinite loop in the server.
Bug 26377 - Deadlock with MERGE and FLUSH TABLE
Locking a MERGE table and its children in parent-child order
and flushing the child deadlocked the server.
Bug 25038 - Waiting TRUNCATE
Truncating a MERGE child, while the MERGE table was in use,
let the truncate fail instead of waiting for the table to
become free.
Bug 25700 - merge base tables get corrupted by
optimize/analyze/repair table
Repairing a child of an open MERGE table corrupted the child.
It was necessary to FLUSH the child first.
Bug 30275 - Merge tables: flush tables or unlock tables
causes server to crash
Flushing and optimizing locked MERGE children crashed the server.
Bug 19627 - temporary merge table locking
Use of a temporary MERGE table with non-temporary children
could corrupt the children.
Temporary tables are never locked. So we do now prohibit
non-temporary chidlren of a temporary MERGE table.
Bug 27660 - Falcon: merge table possible
It was possible to create a MERGE table with non-MyISAM children.
Bug 30273 - merge tables: Can't lock file (errno: 155)
This was a Windows-only bug. Table administration statements
sometimes failed with "Can't lock file (errno: 155)".
These bugs are fixed by a new implementation of MERGE table open.
When opening a MERGE table in open_tables() we do now add the
child tables to the list of tables to be opened by open_tables()
(the "query_list"). The children are not opened in the handler at
this stage.
After opening the parent, open_tables() opens each child from the
now extended query_list. When the last child is opened, we remove
the children from the query_list again and attach the children to
the parent. This behaves similar to the old open. However it does
not open the MyISAM tables directly, but grabs them from the already
open children.
When closing a MERGE table in close_thread_table() we detach the
children only. Closing of the children is done implicitly because
they are in thd->open_tables.
For more detail see the comment at the top of ha_myisammrg.cc.
Changed from open_ltable() to open_and_lock_tables() in all places
that can be relevant for MERGE tables. The latter can handle tables
added to the list on the fly. When open_ltable() was used in a loop
over a list of tables, the list must be temporarily terminated
after every table for open_and_lock_tables().
table_list->required_type is set to FRMTYPE_TABLE to avoid open of
special tables. Handling of derived tables is suppressed.
These details are handled by the new function
open_n_lock_single_table(), which has nearly the same signature as
open_ltable() and can replace it in most cases.
In reopen_tables() some of the tables open by a thread can be
closed and reopened. When a MERGE child is affected, the parent
must be closed and reopened too. Closing of the parent is forced
before the first child is closed. Reopen happens in the order of
thd->open_tables. MERGE parents do not attach their children
automatically at open. This is done after all tables are reopened.
So all children are open when attaching them.
Special lock handling like mysql_lock_abort() or mysql_lock_remove()
needs to be suppressed for MERGE children or forwarded to the parent.
This depends on the situation. In loops over all open tables one
suppresses child lock handling. When a single table is touched,
forwarding is done.
Behavioral changes:
===================
This patch changes the behavior of temporary MERGE tables.
Temporary MERGE must have temporary children.
The old behavior was wrong. A temporary table is not locked. Hence
even non-temporary children were not locked. See
Bug 19627 - temporary merge table locking.
You cannot change the union list of a non-temporary MERGE table
when LOCK TABLES is in effect. The following does *not* work:
CREATE TABLE m1 ... ENGINE=MRG_MYISAM ...;
LOCK TABLES t1 WRITE, t2 WRITE, m1 WRITE;
ALTER TABLE m1 ... UNION=(t1,t2) ...;
However, you can do this with a temporary MERGE table.
You cannot create a MERGE table with CREATE ... SELECT, neither
as a temporary MERGE table, nor as a non-temporary MERGE table.
CREATE TABLE m1 ... ENGINE=MRG_MYISAM ... SELECT ...;
Gives error message: table is not BASE TABLE.
2007-11-15 20:25:43 +01:00
|
|
|
init_tmp_table_share(thd, table_share, share->db, 0, share->table_name,
|
2006-02-16 10:07:31 +01:00
|
|
|
share->key);
|
|
|
|
if ((error= open_table_def(thd, table_share, 0)))
|
|
|
|
{
|
2007-08-30 11:46:30 +02:00
|
|
|
DBUG_PRINT("error", ("open_table_def failed: %d my_errno: %d", error, my_errno));
|
2006-02-26 15:03:43 +01:00
|
|
|
free_table_share(table_share);
|
2006-02-16 10:07:31 +01:00
|
|
|
DBUG_RETURN(error);
|
|
|
|
}
|
2006-05-19 17:34:50 +02:00
|
|
|
if ((error= open_table_from_share(thd, table_share, "", 0 /* fon't allocate buffers */,
|
2006-02-16 10:07:31 +01:00
|
|
|
(uint) READ_ALL, 0, table, FALSE)))
|
|
|
|
{
|
2007-08-30 11:46:30 +02:00
|
|
|
DBUG_PRINT("error", ("open_table_from_share failed %d my_errno: %d", error, my_errno));
|
2006-02-26 15:03:43 +01:00
|
|
|
free_table_share(table_share);
|
2006-02-16 10:07:31 +01:00
|
|
|
DBUG_RETURN(error);
|
|
|
|
}
|
2010-08-09 20:33:47 +02:00
|
|
|
mysql_mutex_lock(&LOCK_open);
|
2006-02-27 17:23:20 +01:00
|
|
|
assign_new_table_id(table_share);
|
2010-08-09 20:33:47 +02:00
|
|
|
mysql_mutex_unlock(&LOCK_open);
|
2006-05-19 17:34:50 +02:00
|
|
|
|
|
|
|
if (!reopen)
|
|
|
|
{
|
|
|
|
// allocate memory on ndb share so it can be reused after online alter table
|
2006-06-22 16:42:50 +02:00
|
|
|
(void)multi_alloc_root(&share->mem_root,
|
|
|
|
&(share->record[0]), table->s->rec_buff_length,
|
|
|
|
&(share->record[1]), table->s->rec_buff_length,
|
|
|
|
NULL);
|
2006-05-19 17:34:50 +02:00
|
|
|
}
|
2006-02-16 10:07:31 +01:00
|
|
|
{
|
2006-05-19 17:34:50 +02:00
|
|
|
my_ptrdiff_t row_offset= share->record[0] - table->record[0];
|
|
|
|
Field **p_field;
|
|
|
|
for (p_field= table->field; *p_field; p_field++)
|
|
|
|
(*p_field)->move_field_offset(row_offset);
|
|
|
|
table->record[0]= share->record[0];
|
|
|
|
table->record[1]= share->record[1];
|
2006-02-16 10:07:31 +01:00
|
|
|
}
|
2006-05-19 17:34:50 +02:00
|
|
|
|
2006-02-16 10:07:31 +01:00
|
|
|
table->in_use= injector_thd;
|
|
|
|
|
|
|
|
table->s->db.str= share->db;
|
|
|
|
table->s->db.length= strlen(share->db);
|
|
|
|
table->s->table_name.str= share->table_name;
|
|
|
|
table->s->table_name.length= strlen(share->table_name);
|
|
|
|
|
2006-03-09 15:50:26 +01:00
|
|
|
DBUG_ASSERT(share->table_share == 0);
|
2006-02-16 10:07:31 +01:00
|
|
|
share->table_share= table_share;
|
2006-03-09 15:50:26 +01:00
|
|
|
DBUG_ASSERT(share->table == 0);
|
2006-02-16 10:07:31 +01:00
|
|
|
share->table= table;
|
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes
Changes that requires code changes in other code of other storage engines.
(Note that all changes are very straightforward and one should find all issues
by compiling a --debug build and fixing all compiler errors and all
asserts in field.cc while running the test suite),
- New optional handler function introduced: reset()
This is called after every DML statement to make it easy for a handler to
statement specific cleanups.
(The only case it's not called is if force the file to be closed)
- handler::extra(HA_EXTRA_RESET) is removed. Code that was there before
should be moved to handler::reset()
- table->read_set contains a bitmap over all columns that are needed
in the query. read_row() and similar functions only needs to read these
columns
- table->write_set contains a bitmap over all columns that will be updated
in the query. write_row() and update_row() only needs to update these
columns.
The above bitmaps should now be up to date in all context
(including ALTER TABLE, filesort()).
The handler is informed of any changes to the bitmap after
fix_fields() by calling the virtual function
handler::column_bitmaps_signal(). If the handler does caching of
these bitmaps (instead of using table->read_set, table->write_set),
it should redo the caching in this code. as the signal() may be sent
several times, it's probably best to set a variable in the signal
and redo the caching on read_row() / write_row() if the variable was
set.
- Removed the read_set and write_set bitmap objects from the handler class
- Removed all column bit handling functions from the handler class.
(Now one instead uses the normal bitmap functions in my_bitmap.c instead
of handler dedicated bitmap functions)
- field->query_id is removed. One should instead instead check
table->read_set and table->write_set if a field is used in the query.
- handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and
handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now
instead use table->read_set to check for which columns to retrieve.
- If a handler needs to call Field->val() or Field->store() on columns
that are not used in the query, one should install a temporary
all-columns-used map while doing so. For this, we provide the following
functions:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
field->val();
dbug_tmp_restore_column_map(table->read_set, old_map);
and similar for the write map:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
field->val();
dbug_tmp_restore_column_map(table->write_set, old_map);
If this is not done, you will sooner or later hit a DBUG_ASSERT
in the field store() / val() functions.
(For not DBUG binaries, the dbug_tmp_restore_column_map() and
dbug_tmp_restore_column_map() are inline dummy functions and should
be optimized away be the compiler).
- If one needs to temporary set the column map for all binaries (and not
just to avoid the DBUG_ASSERT() in the Field::store() / Field::val()
methods) one should use the functions tmp_use_all_columns() and
tmp_restore_column_map() instead of the above dbug_ variants.
- All 'status' fields in the handler base class (like records,
data_file_length etc) are now stored in a 'stats' struct. This makes
it easier to know what status variables are provided by the base
handler. This requires some trivial variable names in the extra()
function.
- New virtual function handler::records(). This is called to optimize
COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true.
(stats.records is not supposed to be an exact value. It's only has to
be 'reasonable enough' for the optimizer to be able to choose a good
optimization path).
- Non virtual handler::init() function added for caching of virtual
constants from engine.
- Removed has_transactions() virtual method. Now one should instead return
HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support
transactions.
- The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument
that is to be used with 'new handler_name()' to allocate the handler
in the right area. The xxxx_create_handler() function is also
responsible for any initialization of the object before returning.
For example, one should change:
static handler *myisam_create_handler(TABLE_SHARE *table)
{
return new ha_myisam(table);
}
->
static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root)
{
return new (mem_root) ha_myisam(table);
}
- New optional virtual function: use_hidden_primary_key().
This is called in case of an update/delete when
(table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined
but we don't have a primary key. This allows the handler to take precisions
in remembering any hidden primary key to able to update/delete any
found row. The default handler marks all columns to be read.
- handler::table_flags() now returns a ulonglong (to allow for more flags).
- New/changed table_flags()
- HA_HAS_RECORDS Set if ::records() is supported
- HA_NO_TRANSACTIONS Set if engine doesn't support transactions
- HA_PRIMARY_KEY_REQUIRED_FOR_DELETE
Set if we should mark all primary key columns for
read when reading rows as part of a DELETE
statement. If there is no primary key,
all columns are marked for read.
- HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some
cases (based on table->read_set)
- HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS
Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION.
- HA_DUPP_POS Renamed to HA_DUPLICATE_POS
- HA_REQUIRES_KEY_COLUMNS_FOR_DELETE
Set this if we should mark ALL key columns for
read when when reading rows as part of a DELETE
statement. In case of an update we will mark
all keys for read for which key part changed
value.
- HA_STATS_RECORDS_IS_EXACT
Set this if stats.records is exact.
(This saves us some extra records() calls
when optimizing COUNT(*))
- Removed table_flags()
- HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if
handler::records() gives an exact count() and
HA_STATS_RECORDS_IS_EXACT if stats.records is exact.
- HA_READ_RND_SAME Removed (no one supported this one)
- Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk()
- Renamed handler::dupp_pos to handler::dup_pos
- Removed not used variable handler::sortkey
Upper level handler changes:
- ha_reset() now does some overall checks and calls ::reset()
- ha_table_flags() added. This is a cached version of table_flags(). The
cache is updated on engine creation time and updated on open.
MySQL level changes (not obvious from the above):
- DBUG_ASSERT() added to check that column usage matches what is set
in the column usage bit maps. (This found a LOT of bugs in current
column marking code).
- In 5.1 before, all used columns was marked in read_set and only updated
columns was marked in write_set. Now we only mark columns for which we
need a value in read_set.
- Column bitmaps are created in open_binary_frm() and open_table_from_share().
(Before this was in table.cc)
- handler::table_flags() calls are replaced with handler::ha_table_flags()
- For calling field->val() you must have the corresponding bit set in
table->read_set. For calling field->store() you must have the
corresponding bit set in table->write_set. (There are asserts in
all store()/val() functions to catch wrong usage)
- thd->set_query_id is renamed to thd->mark_used_columns and instead
of setting this to an integer value, this has now the values:
MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE
Changed also all variables named 'set_query_id' to mark_used_columns.
- In filesort() we now inform the handler of exactly which columns are needed
doing the sort and choosing the rows.
- The TABLE_SHARE object has a 'all_set' column bitmap one can use
when one needs a column bitmap with all columns set.
(This is used for table->use_all_columns() and other places)
- The TABLE object has 3 column bitmaps:
- def_read_set Default bitmap for columns to be read
- def_write_set Default bitmap for columns to be written
- tmp_set Can be used as a temporary bitmap when needed.
The table object has also two pointer to bitmaps read_set and write_set
that the handler should use to find out which columns are used in which way.
- count() optimization now calls handler::records() instead of using
handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true).
- Added extra argument to Item::walk() to indicate if we should also
traverse sub queries.
- Added TABLE parameter to cp_buffer_from_ref()
- Don't close tables created with CREATE ... SELECT but keep them in
the table cache. (Faster usage of newly created tables).
New interfaces:
- table->clear_column_bitmaps() to initialize the bitmaps for tables
at start of new statements.
- table->column_bitmaps_set() to set up new column bitmaps and signal
the handler about this.
- table->column_bitmaps_set_no_signal() for some few cases where we need
to setup new column bitmaps but don't signal the handler (as the handler
has already been signaled about these before). Used for the momement
only in opt_range.cc when doing ROR scans.
- table->use_all_columns() to install a bitmap where all columns are marked
as use in the read and the write set.
- table->default_column_bitmaps() to install the normal read and write
column bitmaps, but not signaling the handler about this.
This is mainly used when creating TABLE instances.
- table->mark_columns_needed_for_delete(),
table->mark_columns_needed_for_delete() and
table->mark_columns_needed_for_insert() to allow us to put additional
columns in column usage maps if handler so requires.
(The handler indicates what it neads in handler->table_flags())
- table->prepare_for_position() to allow us to tell handler that it
needs to read primary key parts to be able to store them in
future table->position() calls.
(This replaces the table->file->ha_retrieve_all_pk function)
- table->mark_auto_increment_column() to tell handler are going to update
columns part of any auto_increment key.
- table->mark_columns_used_by_index() to mark all columns that is part of
an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow
it to quickly know that it only needs to read colums that are part
of the key. (The handler can also use the column map for detecting this,
but simpler/faster handler can just monitor the extra() call).
- table->mark_columns_used_by_index_no_reset() to in addition to other columns,
also mark all columns that is used by the given key.
- table->restore_column_maps_after_mark_index() to restore to default
column maps after a call to table->mark_columns_used_by_index().
- New item function register_field_in_read_map(), for marking used columns
in table->read_map. Used by filesort() to mark all used columns
- Maintain in TABLE->merge_keys set of all keys that are used in query.
(Simplices some optimization loops)
- Maintain Field->part_of_key_not_clustered which is like Field->part_of_key
but the field in the clustered key is not assumed to be part of all index.
(used in opt_range.cc for faster loops)
- dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map()
tmp_use_all_columns() and tmp_restore_column_map() functions to temporally
mark all columns as usable. The 'dbug_' version is primarily intended
inside a handler when it wants to just call Field:store() & Field::val()
functions, but don't need the column maps set for any other usage.
(ie:: bitmap_is_set() is never called)
- We can't use compare_records() to skip updates for handlers that returns
a partial column set and the read_set doesn't cover all columns in the
write set. The reason for this is that if we have a column marked only for
write we can't in the MySQL level know if the value changed or not.
The reason this worked before was that MySQL marked all to be written
columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden
bug'.
- open_table_from_share() does not anymore setup temporary MEM_ROOT
object as a thread specific variable for the handler. Instead we
send the to-be-used MEMROOT to get_new_handler().
(Simpler, faster code)
Bugs fixed:
- Column marking was not done correctly in a lot of cases.
(ALTER TABLE, when using triggers, auto_increment fields etc)
(Could potentially result in wrong values inserted in table handlers
relying on that the old column maps or field->set_query_id was correct)
Especially when it comes to triggers, there may be cases where the
old code would cause lost/wrong values for NDB and/or InnoDB tables.
- Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags:
OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG.
This allowed me to remove some wrong warnings about:
"Some non-transactional changed tables couldn't be rolled back"
- Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset
(thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose
some warnings about
"Some non-transactional changed tables couldn't be rolled back")
- Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table()
which could cause delete_table to report random failures.
- Fixed core dumps for some tests when running with --debug
- Added missing FN_LIBCHAR in mysql_rm_tmp_tables()
(This has probably caused us to not properly remove temporary files after
crash)
- slow_logs was not properly initialized, which could maybe cause
extra/lost entries in slow log.
- If we get an duplicate row on insert, change column map to read and
write all columns while retrying the operation. This is required by
the definition of REPLACE and also ensures that fields that are only
part of UPDATE are properly handled. This fixed a bug in NDB and
REPLACE where REPLACE wrongly copied some column values from the replaced
row.
- For table handler that doesn't support NULL in keys, we would give an error
when creating a primary key with NULL fields, even after the fields has been
automaticly converted to NOT NULL.
- Creating a primary key on a SPATIAL key, would fail if field was not
declared as NOT NULL.
Cleanups:
- Removed not used condition argument to setup_tables
- Removed not needed item function reset_query_id_processor().
- Field->add_index is removed. Now this is instead maintained in
(field->flags & FIELD_IN_ADD_INDEX)
- Field->fieldnr is removed (use field->field_index instead)
- New argument to filesort() to indicate that it should return a set of
row pointers (not used columns). This allowed me to remove some references
to sql_command in filesort and should also enable us to return column
results in some cases where we couldn't before.
- Changed column bitmap handling in opt_range.cc to be aligned with TABLE
bitmap, which allowed me to use bitmap functions instead of looping over
all fields to create some needed bitmaps. (Faster and smaller code)
- Broke up found too long lines
- Moved some variable declaration at start of function for better code
readability.
- Removed some not used arguments from functions.
(setup_fields(), mysql_prepare_insert_check_table())
- setup_fields() now takes an enum instead of an int for marking columns
usage.
- For internal temporary tables, use handler::write_row(),
handler::delete_row() and handler::update_row() instead of
handler::ha_xxxx() for faster execution.
- Changed some constants to enum's and define's.
- Using separate column read and write sets allows for easier checking
of timestamp field was set by statement.
- Remove calls to free_io_cache() as this is now done automaticly in ha_reset()
- Don't build table->normalized_path as this is now identical to table->path
(after bar's fixes to convert filenames)
- Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to
do comparision with the 'convert-dbug-for-diff' tool.
Things left to do in 5.1:
- We wrongly log failed CREATE TABLE ... SELECT in some cases when using
row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result)
Mats has promised to look into this.
- Test that my fix for CREATE TABLE ... SELECT is indeed correct.
(I added several test cases for this, but in this case it's better that
someone else also tests this throughly).
Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
|
|
|
/* We can't use 'use_all_columns()' as the file object is not setup yet */
|
|
|
|
table->column_bitmaps_set_no_signal(&table->s->all_set, &table->s->all_set);
|
2006-02-16 10:07:31 +01:00
|
|
|
#ifndef DBUG_OFF
|
|
|
|
dbug_print_table("table", table);
|
|
|
|
#endif
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-01-12 19:51:02 +01:00
|
|
|
/*
|
|
|
|
Initialize the binlog part of the NDB_SHARE
|
|
|
|
*/
|
2007-08-30 11:46:30 +02:00
|
|
|
int ndbcluster_binlog_init_share(NDB_SHARE *share, TABLE *_table)
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
|
|
|
THD *thd= current_thd;
|
|
|
|
MEM_ROOT *mem_root= &share->mem_root;
|
2006-02-16 00:30:56 +01:00
|
|
|
int do_event_op= ndb_binlog_running;
|
2007-08-30 11:46:30 +02:00
|
|
|
int error= 0;
|
2006-02-26 15:03:43 +01:00
|
|
|
DBUG_ENTER("ndbcluster_binlog_init_share");
|
2006-01-12 19:51:02 +01:00
|
|
|
|
2007-02-05 07:21:18 +01:00
|
|
|
share->connect_count= g_ndb_cluster_connection->get_connect_count();
|
|
|
|
|
2006-01-12 19:51:02 +01:00
|
|
|
share->op= 0;
|
|
|
|
share->table= 0;
|
2006-02-16 00:30:56 +01:00
|
|
|
|
2006-12-01 15:49:07 +01:00
|
|
|
if (!ndb_schema_share &&
|
2006-02-16 00:30:56 +01:00
|
|
|
strcmp(share->db, NDB_REP_DB) == 0 &&
|
|
|
|
strcmp(share->table_name, NDB_SCHEMA_TABLE) == 0)
|
|
|
|
do_event_op= 1;
|
2007-02-06 06:40:26 +01:00
|
|
|
else if (!ndb_apply_status_share &&
|
|
|
|
strcmp(share->db, NDB_REP_DB) == 0 &&
|
|
|
|
strcmp(share->table_name, NDB_APPLY_TABLE) == 0)
|
|
|
|
do_event_op= 1;
|
2006-02-16 00:30:56 +01:00
|
|
|
|
|
|
|
{
|
|
|
|
int i, no_nodes= g_ndb_cluster_connection->no_db_nodes();
|
|
|
|
share->subscriber_bitmap= (MY_BITMAP*)
|
|
|
|
alloc_root(mem_root, no_nodes * sizeof(MY_BITMAP));
|
|
|
|
for (i= 0; i < no_nodes; i++)
|
|
|
|
{
|
|
|
|
bitmap_init(&share->subscriber_bitmap[i],
|
|
|
|
(Uint32*)alloc_root(mem_root, max_ndb_nodes/8),
|
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes
Changes that requires code changes in other code of other storage engines.
(Note that all changes are very straightforward and one should find all issues
by compiling a --debug build and fixing all compiler errors and all
asserts in field.cc while running the test suite),
- New optional handler function introduced: reset()
This is called after every DML statement to make it easy for a handler to
statement specific cleanups.
(The only case it's not called is if force the file to be closed)
- handler::extra(HA_EXTRA_RESET) is removed. Code that was there before
should be moved to handler::reset()
- table->read_set contains a bitmap over all columns that are needed
in the query. read_row() and similar functions only needs to read these
columns
- table->write_set contains a bitmap over all columns that will be updated
in the query. write_row() and update_row() only needs to update these
columns.
The above bitmaps should now be up to date in all context
(including ALTER TABLE, filesort()).
The handler is informed of any changes to the bitmap after
fix_fields() by calling the virtual function
handler::column_bitmaps_signal(). If the handler does caching of
these bitmaps (instead of using table->read_set, table->write_set),
it should redo the caching in this code. as the signal() may be sent
several times, it's probably best to set a variable in the signal
and redo the caching on read_row() / write_row() if the variable was
set.
- Removed the read_set and write_set bitmap objects from the handler class
- Removed all column bit handling functions from the handler class.
(Now one instead uses the normal bitmap functions in my_bitmap.c instead
of handler dedicated bitmap functions)
- field->query_id is removed. One should instead instead check
table->read_set and table->write_set if a field is used in the query.
- handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and
handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now
instead use table->read_set to check for which columns to retrieve.
- If a handler needs to call Field->val() or Field->store() on columns
that are not used in the query, one should install a temporary
all-columns-used map while doing so. For this, we provide the following
functions:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
field->val();
dbug_tmp_restore_column_map(table->read_set, old_map);
and similar for the write map:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
field->val();
dbug_tmp_restore_column_map(table->write_set, old_map);
If this is not done, you will sooner or later hit a DBUG_ASSERT
in the field store() / val() functions.
(For not DBUG binaries, the dbug_tmp_restore_column_map() and
dbug_tmp_restore_column_map() are inline dummy functions and should
be optimized away be the compiler).
- If one needs to temporary set the column map for all binaries (and not
just to avoid the DBUG_ASSERT() in the Field::store() / Field::val()
methods) one should use the functions tmp_use_all_columns() and
tmp_restore_column_map() instead of the above dbug_ variants.
- All 'status' fields in the handler base class (like records,
data_file_length etc) are now stored in a 'stats' struct. This makes
it easier to know what status variables are provided by the base
handler. This requires some trivial variable names in the extra()
function.
- New virtual function handler::records(). This is called to optimize
COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true.
(stats.records is not supposed to be an exact value. It's only has to
be 'reasonable enough' for the optimizer to be able to choose a good
optimization path).
- Non virtual handler::init() function added for caching of virtual
constants from engine.
- Removed has_transactions() virtual method. Now one should instead return
HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support
transactions.
- The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument
that is to be used with 'new handler_name()' to allocate the handler
in the right area. The xxxx_create_handler() function is also
responsible for any initialization of the object before returning.
For example, one should change:
static handler *myisam_create_handler(TABLE_SHARE *table)
{
return new ha_myisam(table);
}
->
static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root)
{
return new (mem_root) ha_myisam(table);
}
- New optional virtual function: use_hidden_primary_key().
This is called in case of an update/delete when
(table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined
but we don't have a primary key. This allows the handler to take precisions
in remembering any hidden primary key to able to update/delete any
found row. The default handler marks all columns to be read.
- handler::table_flags() now returns a ulonglong (to allow for more flags).
- New/changed table_flags()
- HA_HAS_RECORDS Set if ::records() is supported
- HA_NO_TRANSACTIONS Set if engine doesn't support transactions
- HA_PRIMARY_KEY_REQUIRED_FOR_DELETE
Set if we should mark all primary key columns for
read when reading rows as part of a DELETE
statement. If there is no primary key,
all columns are marked for read.
- HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some
cases (based on table->read_set)
- HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS
Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION.
- HA_DUPP_POS Renamed to HA_DUPLICATE_POS
- HA_REQUIRES_KEY_COLUMNS_FOR_DELETE
Set this if we should mark ALL key columns for
read when when reading rows as part of a DELETE
statement. In case of an update we will mark
all keys for read for which key part changed
value.
- HA_STATS_RECORDS_IS_EXACT
Set this if stats.records is exact.
(This saves us some extra records() calls
when optimizing COUNT(*))
- Removed table_flags()
- HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if
handler::records() gives an exact count() and
HA_STATS_RECORDS_IS_EXACT if stats.records is exact.
- HA_READ_RND_SAME Removed (no one supported this one)
- Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk()
- Renamed handler::dupp_pos to handler::dup_pos
- Removed not used variable handler::sortkey
Upper level handler changes:
- ha_reset() now does some overall checks and calls ::reset()
- ha_table_flags() added. This is a cached version of table_flags(). The
cache is updated on engine creation time and updated on open.
MySQL level changes (not obvious from the above):
- DBUG_ASSERT() added to check that column usage matches what is set
in the column usage bit maps. (This found a LOT of bugs in current
column marking code).
- In 5.1 before, all used columns was marked in read_set and only updated
columns was marked in write_set. Now we only mark columns for which we
need a value in read_set.
- Column bitmaps are created in open_binary_frm() and open_table_from_share().
(Before this was in table.cc)
- handler::table_flags() calls are replaced with handler::ha_table_flags()
- For calling field->val() you must have the corresponding bit set in
table->read_set. For calling field->store() you must have the
corresponding bit set in table->write_set. (There are asserts in
all store()/val() functions to catch wrong usage)
- thd->set_query_id is renamed to thd->mark_used_columns and instead
of setting this to an integer value, this has now the values:
MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE
Changed also all variables named 'set_query_id' to mark_used_columns.
- In filesort() we now inform the handler of exactly which columns are needed
doing the sort and choosing the rows.
- The TABLE_SHARE object has a 'all_set' column bitmap one can use
when one needs a column bitmap with all columns set.
(This is used for table->use_all_columns() and other places)
- The TABLE object has 3 column bitmaps:
- def_read_set Default bitmap for columns to be read
- def_write_set Default bitmap for columns to be written
- tmp_set Can be used as a temporary bitmap when needed.
The table object has also two pointer to bitmaps read_set and write_set
that the handler should use to find out which columns are used in which way.
- count() optimization now calls handler::records() instead of using
handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true).
- Added extra argument to Item::walk() to indicate if we should also
traverse sub queries.
- Added TABLE parameter to cp_buffer_from_ref()
- Don't close tables created with CREATE ... SELECT but keep them in
the table cache. (Faster usage of newly created tables).
New interfaces:
- table->clear_column_bitmaps() to initialize the bitmaps for tables
at start of new statements.
- table->column_bitmaps_set() to set up new column bitmaps and signal
the handler about this.
- table->column_bitmaps_set_no_signal() for some few cases where we need
to setup new column bitmaps but don't signal the handler (as the handler
has already been signaled about these before). Used for the momement
only in opt_range.cc when doing ROR scans.
- table->use_all_columns() to install a bitmap where all columns are marked
as use in the read and the write set.
- table->default_column_bitmaps() to install the normal read and write
column bitmaps, but not signaling the handler about this.
This is mainly used when creating TABLE instances.
- table->mark_columns_needed_for_delete(),
table->mark_columns_needed_for_delete() and
table->mark_columns_needed_for_insert() to allow us to put additional
columns in column usage maps if handler so requires.
(The handler indicates what it neads in handler->table_flags())
- table->prepare_for_position() to allow us to tell handler that it
needs to read primary key parts to be able to store them in
future table->position() calls.
(This replaces the table->file->ha_retrieve_all_pk function)
- table->mark_auto_increment_column() to tell handler are going to update
columns part of any auto_increment key.
- table->mark_columns_used_by_index() to mark all columns that is part of
an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow
it to quickly know that it only needs to read colums that are part
of the key. (The handler can also use the column map for detecting this,
but simpler/faster handler can just monitor the extra() call).
- table->mark_columns_used_by_index_no_reset() to in addition to other columns,
also mark all columns that is used by the given key.
- table->restore_column_maps_after_mark_index() to restore to default
column maps after a call to table->mark_columns_used_by_index().
- New item function register_field_in_read_map(), for marking used columns
in table->read_map. Used by filesort() to mark all used columns
- Maintain in TABLE->merge_keys set of all keys that are used in query.
(Simplices some optimization loops)
- Maintain Field->part_of_key_not_clustered which is like Field->part_of_key
but the field in the clustered key is not assumed to be part of all index.
(used in opt_range.cc for faster loops)
- dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map()
tmp_use_all_columns() and tmp_restore_column_map() functions to temporally
mark all columns as usable. The 'dbug_' version is primarily intended
inside a handler when it wants to just call Field:store() & Field::val()
functions, but don't need the column maps set for any other usage.
(ie:: bitmap_is_set() is never called)
- We can't use compare_records() to skip updates for handlers that returns
a partial column set and the read_set doesn't cover all columns in the
write set. The reason for this is that if we have a column marked only for
write we can't in the MySQL level know if the value changed or not.
The reason this worked before was that MySQL marked all to be written
columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden
bug'.
- open_table_from_share() does not anymore setup temporary MEM_ROOT
object as a thread specific variable for the handler. Instead we
send the to-be-used MEMROOT to get_new_handler().
(Simpler, faster code)
Bugs fixed:
- Column marking was not done correctly in a lot of cases.
(ALTER TABLE, when using triggers, auto_increment fields etc)
(Could potentially result in wrong values inserted in table handlers
relying on that the old column maps or field->set_query_id was correct)
Especially when it comes to triggers, there may be cases where the
old code would cause lost/wrong values for NDB and/or InnoDB tables.
- Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags:
OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG.
This allowed me to remove some wrong warnings about:
"Some non-transactional changed tables couldn't be rolled back"
- Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset
(thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose
some warnings about
"Some non-transactional changed tables couldn't be rolled back")
- Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table()
which could cause delete_table to report random failures.
- Fixed core dumps for some tests when running with --debug
- Added missing FN_LIBCHAR in mysql_rm_tmp_tables()
(This has probably caused us to not properly remove temporary files after
crash)
- slow_logs was not properly initialized, which could maybe cause
extra/lost entries in slow log.
- If we get an duplicate row on insert, change column map to read and
write all columns while retrying the operation. This is required by
the definition of REPLACE and also ensures that fields that are only
part of UPDATE are properly handled. This fixed a bug in NDB and
REPLACE where REPLACE wrongly copied some column values from the replaced
row.
- For table handler that doesn't support NULL in keys, we would give an error
when creating a primary key with NULL fields, even after the fields has been
automaticly converted to NOT NULL.
- Creating a primary key on a SPATIAL key, would fail if field was not
declared as NOT NULL.
Cleanups:
- Removed not used condition argument to setup_tables
- Removed not needed item function reset_query_id_processor().
- Field->add_index is removed. Now this is instead maintained in
(field->flags & FIELD_IN_ADD_INDEX)
- Field->fieldnr is removed (use field->field_index instead)
- New argument to filesort() to indicate that it should return a set of
row pointers (not used columns). This allowed me to remove some references
to sql_command in filesort and should also enable us to return column
results in some cases where we couldn't before.
- Changed column bitmap handling in opt_range.cc to be aligned with TABLE
bitmap, which allowed me to use bitmap functions instead of looping over
all fields to create some needed bitmaps. (Faster and smaller code)
- Broke up found too long lines
- Moved some variable declaration at start of function for better code
readability.
- Removed some not used arguments from functions.
(setup_fields(), mysql_prepare_insert_check_table())
- setup_fields() now takes an enum instead of an int for marking columns
usage.
- For internal temporary tables, use handler::write_row(),
handler::delete_row() and handler::update_row() instead of
handler::ha_xxxx() for faster execution.
- Changed some constants to enum's and define's.
- Using separate column read and write sets allows for easier checking
of timestamp field was set by statement.
- Remove calls to free_io_cache() as this is now done automaticly in ha_reset()
- Don't build table->normalized_path as this is now identical to table->path
(after bar's fixes to convert filenames)
- Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to
do comparision with the 'convert-dbug-for-diff' tool.
Things left to do in 5.1:
- We wrongly log failed CREATE TABLE ... SELECT in some cases when using
row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result)
Mats has promised to look into this.
- Test that my fix for CREATE TABLE ... SELECT is indeed correct.
(I added several test cases for this, but in this case it's better that
someone else also tests this throughly).
Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
|
|
|
max_ndb_nodes, FALSE);
|
2006-02-16 00:30:56 +01:00
|
|
|
bitmap_clear_all(&share->subscriber_bitmap[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!do_event_op)
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
2006-01-31 15:40:26 +01:00
|
|
|
if (_table)
|
|
|
|
{
|
|
|
|
if (_table->s->primary_key == MAX_KEY)
|
|
|
|
share->flags|= NSF_HIDDEN_PK;
|
|
|
|
if (_table->s->blob_fields != 0)
|
|
|
|
share->flags|= NSF_BLOB_FLAG;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
share->flags|= NSF_NO_BINLOG;
|
|
|
|
}
|
2007-08-30 11:46:30 +02:00
|
|
|
DBUG_RETURN(error);
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
|
|
|
while (1)
|
|
|
|
{
|
2006-02-16 10:07:31 +01:00
|
|
|
int error;
|
2006-05-19 15:44:46 +02:00
|
|
|
TABLE_SHARE *table_share= (TABLE_SHARE *) alloc_root(mem_root, sizeof(*table_share));
|
|
|
|
TABLE *table= (TABLE*) alloc_root(mem_root, sizeof(*table));
|
2006-05-19 17:34:50 +02:00
|
|
|
if ((error= ndbcluster_binlog_open_table(thd, share, table_share, table, 0)))
|
2006-01-12 19:51:02 +01:00
|
|
|
break;
|
2006-03-09 15:50:26 +01:00
|
|
|
/*
|
|
|
|
! do not touch the contents of the table
|
|
|
|
it may be in use by the injector thread
|
|
|
|
*/
|
|
|
|
MEM_ROOT *mem_root= &share->mem_root;
|
|
|
|
share->ndb_value[0]= (NdbValue*)
|
|
|
|
alloc_root(mem_root, sizeof(NdbValue) *
|
|
|
|
(table->s->fields + 2 /*extra for hidden key and part key*/));
|
|
|
|
share->ndb_value[1]= (NdbValue*)
|
|
|
|
alloc_root(mem_root, sizeof(NdbValue) *
|
|
|
|
(table->s->fields + 2 /*extra for hidden key and part key*/));
|
|
|
|
|
2006-01-12 19:51:02 +01:00
|
|
|
if (table->s->primary_key == MAX_KEY)
|
|
|
|
share->flags|= NSF_HIDDEN_PK;
|
2006-01-25 22:22:50 +01:00
|
|
|
if (table->s->blob_fields != 0)
|
|
|
|
share->flags|= NSF_BLOB_FLAG;
|
2006-01-12 19:51:02 +01:00
|
|
|
break;
|
|
|
|
}
|
2007-08-30 11:46:30 +02:00
|
|
|
DBUG_RETURN(error);
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*****************************************************************
|
|
|
|
functions called from master sql client threads
|
|
|
|
****************************************************************/
|
|
|
|
|
|
|
|
/*
|
|
|
|
called in mysql_show_binlog_events and reset_logs to make sure we wait for
|
|
|
|
all events originating from this mysql server to arrive in the binlog
|
|
|
|
|
|
|
|
Wait for the last epoch in which the last transaction is a part of.
|
|
|
|
|
|
|
|
Wait a maximum of 30 seconds.
|
|
|
|
*/
|
|
|
|
static void ndbcluster_binlog_wait(THD *thd)
|
|
|
|
{
|
2006-02-01 01:12:11 +01:00
|
|
|
if (ndb_binlog_running)
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
|
|
|
DBUG_ENTER("ndbcluster_binlog_wait");
|
|
|
|
const char *save_info= thd ? thd->proc_info : 0;
|
2006-08-30 11:41:21 +02:00
|
|
|
ulonglong wait_epoch= *p_latest_trans_gci;
|
2006-01-12 19:51:02 +01:00
|
|
|
int count= 30;
|
|
|
|
if (thd)
|
|
|
|
thd->proc_info= "Waiting for ndbcluster binlog update to "
|
|
|
|
"reach current position";
|
2006-02-01 01:12:11 +01:00
|
|
|
while (count && ndb_binlog_running &&
|
2006-01-12 19:51:02 +01:00
|
|
|
ndb_latest_handled_binlog_epoch < wait_epoch)
|
|
|
|
{
|
|
|
|
count--;
|
|
|
|
sleep(1);
|
|
|
|
}
|
|
|
|
if (thd)
|
|
|
|
thd->proc_info= save_info;
|
|
|
|
DBUG_VOID_RETURN;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2006-05-05 08:45:58 +02:00
|
|
|
Called from MYSQL_BIN_LOG::reset_logs in log.cc when binlog is emptied
|
2006-01-12 19:51:02 +01:00
|
|
|
*/
|
|
|
|
static int ndbcluster_reset_logs(THD *thd)
|
|
|
|
{
|
2006-02-01 01:12:11 +01:00
|
|
|
if (!ndb_binlog_running)
|
2006-01-12 19:51:02 +01:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
DBUG_ENTER("ndbcluster_reset_logs");
|
|
|
|
|
|
|
|
/*
|
|
|
|
Wait for all events orifinating from this mysql server has
|
|
|
|
reached the binlog before continuing to reset
|
|
|
|
*/
|
|
|
|
ndbcluster_binlog_wait(thd);
|
|
|
|
|
|
|
|
char buf[1024];
|
|
|
|
char *end= strmov(buf, "DELETE FROM " NDB_REP_DB "." NDB_REP_TABLE);
|
|
|
|
|
2007-06-17 19:47:20 +02:00
|
|
|
run_query(thd, buf, end, NULL, TRUE);
|
2006-01-12 19:51:02 +01:00
|
|
|
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2006-05-05 08:45:58 +02:00
|
|
|
Called from MYSQL_BIN_LOG::purge_logs in log.cc when the binlog "file"
|
2006-01-12 19:51:02 +01:00
|
|
|
is removed
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int
|
|
|
|
ndbcluster_binlog_index_purge_file(THD *thd, const char *file)
|
|
|
|
{
|
2007-03-29 18:42:00 +02:00
|
|
|
if (!ndb_binlog_running || thd->slave_thread)
|
2006-01-12 19:51:02 +01:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
DBUG_ENTER("ndbcluster_binlog_index_purge_file");
|
|
|
|
DBUG_PRINT("enter", ("file: %s", file));
|
|
|
|
|
|
|
|
char buf[1024];
|
|
|
|
char *end= strmov(strmov(strmov(buf,
|
|
|
|
"DELETE FROM "
|
|
|
|
NDB_REP_DB "." NDB_REP_TABLE
|
|
|
|
" WHERE File='"), file), "'");
|
|
|
|
|
2007-06-17 19:47:20 +02:00
|
|
|
run_query(thd, buf, end, NULL, TRUE);
|
2006-01-12 19:51:02 +01:00
|
|
|
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2006-09-30 21:49:46 +02:00
|
|
|
ndbcluster_binlog_log_query(handlerton *hton, THD *thd, enum_binlog_command binlog_command,
|
2006-01-12 19:51:02 +01:00
|
|
|
const char *query, uint query_length,
|
|
|
|
const char *db, const char *table_name)
|
|
|
|
{
|
|
|
|
DBUG_ENTER("ndbcluster_binlog_log_query");
|
|
|
|
DBUG_PRINT("enter", ("db: %s table_name: %s query: %s",
|
|
|
|
db, table_name, query));
|
2006-02-01 01:12:11 +01:00
|
|
|
enum SCHEMA_OP_TYPE type;
|
|
|
|
int log= 0;
|
|
|
|
switch (binlog_command)
|
|
|
|
{
|
|
|
|
case LOGCOM_CREATE_TABLE:
|
|
|
|
type= SOT_CREATE_TABLE;
|
2006-05-31 01:52:14 +02:00
|
|
|
DBUG_ASSERT(FALSE);
|
2006-02-01 01:12:11 +01:00
|
|
|
break;
|
|
|
|
case LOGCOM_ALTER_TABLE:
|
|
|
|
type= SOT_ALTER_TABLE;
|
2006-02-06 11:47:12 +01:00
|
|
|
log= 1;
|
2006-02-01 01:12:11 +01:00
|
|
|
break;
|
|
|
|
case LOGCOM_RENAME_TABLE:
|
|
|
|
type= SOT_RENAME_TABLE;
|
2006-05-31 01:52:14 +02:00
|
|
|
DBUG_ASSERT(FALSE);
|
2006-02-01 01:12:11 +01:00
|
|
|
break;
|
|
|
|
case LOGCOM_DROP_TABLE:
|
|
|
|
type= SOT_DROP_TABLE;
|
2006-05-31 01:52:14 +02:00
|
|
|
DBUG_ASSERT(FALSE);
|
2006-02-01 01:12:11 +01:00
|
|
|
break;
|
|
|
|
case LOGCOM_CREATE_DB:
|
|
|
|
type= SOT_CREATE_DB;
|
|
|
|
log= 1;
|
|
|
|
break;
|
|
|
|
case LOGCOM_ALTER_DB:
|
|
|
|
type= SOT_ALTER_DB;
|
|
|
|
log= 1;
|
|
|
|
break;
|
|
|
|
case LOGCOM_DROP_DB:
|
|
|
|
type= SOT_DROP_DB;
|
2006-05-31 01:52:14 +02:00
|
|
|
DBUG_ASSERT(FALSE);
|
2006-02-01 01:12:11 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (log)
|
2006-02-06 11:47:12 +01:00
|
|
|
{
|
2006-02-01 01:12:11 +01:00
|
|
|
ndbcluster_log_schema_op(thd, 0, query, query_length,
|
2006-05-31 01:52:14 +02:00
|
|
|
db, table_name, 0, 0, type,
|
2010-08-09 20:33:47 +02:00
|
|
|
0, 0);
|
2006-02-06 11:47:12 +01:00
|
|
|
}
|
2006-01-12 19:51:02 +01:00
|
|
|
DBUG_VOID_RETURN;
|
|
|
|
}
|
|
|
|
|
2006-12-20 22:57:23 +01:00
|
|
|
|
2006-01-12 19:51:02 +01:00
|
|
|
/*
|
2006-12-20 22:57:23 +01:00
|
|
|
End use of the NDB Cluster binlog
|
|
|
|
- wait for binlog thread to shutdown
|
2006-01-12 19:51:02 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
static int ndbcluster_binlog_end(THD *thd)
|
|
|
|
{
|
2006-12-20 22:57:23 +01:00
|
|
|
DBUG_ENTER("ndbcluster_binlog_end");
|
2006-01-12 19:51:02 +01:00
|
|
|
|
2006-12-20 22:57:23 +01:00
|
|
|
if (!ndbcluster_binlog_inited)
|
2006-01-12 19:51:02 +01:00
|
|
|
DBUG_RETURN(0);
|
2006-12-20 22:57:23 +01:00
|
|
|
ndbcluster_binlog_inited= 0;
|
2006-01-12 19:51:02 +01:00
|
|
|
|
|
|
|
#ifdef HAVE_NDB_BINLOG
|
2007-04-16 16:08:29 +02:00
|
|
|
if (ndb_util_thread_running > 0)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
Wait for util thread to die (as this uses the injector mutex)
|
|
|
|
There is a very small change that ndb_util_thread dies and the
|
|
|
|
following mutex is freed before it's accessed. This shouldn't
|
|
|
|
however be a likely case as the ndbcluster_binlog_end is supposed to
|
|
|
|
be called before ndb_cluster_end().
|
|
|
|
*/
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_lock(&LOCK_ndb_util_thread);
|
2007-04-16 16:08:29 +02:00
|
|
|
/* Ensure mutex are not freed if ndb_cluster_end is running at same time */
|
|
|
|
ndb_util_thread_running++;
|
|
|
|
ndbcluster_terminating= 1;
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_cond_signal(&COND_ndb_util_thread);
|
2007-04-16 16:08:29 +02:00
|
|
|
while (ndb_util_thread_running > 1)
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_cond_wait(&COND_ndb_util_ready, &LOCK_ndb_util_thread);
|
2007-04-16 16:08:29 +02:00
|
|
|
ndb_util_thread_running--;
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_unlock(&LOCK_ndb_util_thread);
|
2007-04-16 16:08:29 +02:00
|
|
|
}
|
|
|
|
|
2006-01-12 19:51:02 +01:00
|
|
|
/* wait for injector thread to finish */
|
2007-02-06 22:06:13 +01:00
|
|
|
ndbcluster_binlog_terminating= 1;
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_lock(&injector_mutex);
|
|
|
|
mysql_cond_signal(&injector_cond);
|
2007-02-06 22:06:13 +01:00
|
|
|
while (ndb_binlog_thread_running > 0)
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_cond_wait(&injector_cond, &injector_mutex);
|
|
|
|
mysql_mutex_unlock(&injector_mutex);
|
2006-12-20 22:57:23 +01:00
|
|
|
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_destroy(&injector_mutex);
|
|
|
|
mysql_cond_destroy(&injector_cond);
|
|
|
|
mysql_mutex_destroy(&ndb_schema_share_mutex);
|
2006-01-12 19:51:02 +01:00
|
|
|
#endif
|
2007-02-06 22:06:13 +01:00
|
|
|
|
2006-01-12 19:51:02 +01:00
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*****************************************************************
|
|
|
|
functions called from slave sql client threads
|
|
|
|
****************************************************************/
|
|
|
|
static void ndbcluster_reset_slave(THD *thd)
|
|
|
|
{
|
2006-02-01 01:12:11 +01:00
|
|
|
if (!ndb_binlog_running)
|
2006-01-12 19:51:02 +01:00
|
|
|
return;
|
|
|
|
|
|
|
|
DBUG_ENTER("ndbcluster_reset_slave");
|
|
|
|
char buf[1024];
|
|
|
|
char *end= strmov(buf, "DELETE FROM " NDB_REP_DB "." NDB_APPLY_TABLE);
|
2007-06-17 19:47:20 +02:00
|
|
|
run_query(thd, buf, end, NULL, TRUE);
|
2006-01-12 19:51:02 +01:00
|
|
|
DBUG_VOID_RETURN;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
Initialize the binlog part of the ndb handlerton
|
|
|
|
*/
|
2008-02-20 12:52:04 +01:00
|
|
|
|
|
|
|
/**
|
|
|
|
Upon the sql command flush logs, we need to ensure that all outstanding
|
|
|
|
ndb data to be logged has made it to the binary log to get a deterministic
|
|
|
|
behavior on the rotation of the log.
|
|
|
|
*/
|
|
|
|
static bool ndbcluster_flush_logs(handlerton *hton)
|
|
|
|
{
|
|
|
|
ndbcluster_binlog_wait(current_thd);
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
|
2006-09-30 21:49:46 +02:00
|
|
|
static int ndbcluster_binlog_func(handlerton *hton, THD *thd,
|
|
|
|
enum_binlog_func fn,
|
|
|
|
void *arg)
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
|
|
|
switch(fn)
|
|
|
|
{
|
|
|
|
case BFN_RESET_LOGS:
|
|
|
|
ndbcluster_reset_logs(thd);
|
|
|
|
break;
|
|
|
|
case BFN_RESET_SLAVE:
|
|
|
|
ndbcluster_reset_slave(thd);
|
|
|
|
break;
|
|
|
|
case BFN_BINLOG_WAIT:
|
|
|
|
ndbcluster_binlog_wait(thd);
|
|
|
|
break;
|
|
|
|
case BFN_BINLOG_END:
|
|
|
|
ndbcluster_binlog_end(thd);
|
|
|
|
break;
|
|
|
|
case BFN_BINLOG_PURGE_FILE:
|
|
|
|
ndbcluster_binlog_index_purge_file(thd, (const char *)arg);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void ndbcluster_binlog_init_handlerton()
|
|
|
|
{
|
2006-09-15 19:28:00 +02:00
|
|
|
handlerton *h= ndbcluster_hton;
|
2008-02-20 12:52:04 +01:00
|
|
|
h->flush_logs= ndbcluster_flush_logs;
|
2006-09-15 19:28:00 +02:00
|
|
|
h->binlog_func= ndbcluster_binlog_func;
|
|
|
|
h->binlog_log_query= ndbcluster_binlog_log_query;
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
2006-12-01 15:49:07 +01:00
|
|
|
check the availability af the ndb_apply_status share
|
2006-01-12 19:51:02 +01:00
|
|
|
- return share, but do not increase refcount
|
|
|
|
- return 0 if there is no share
|
|
|
|
*/
|
2006-12-01 15:49:07 +01:00
|
|
|
static NDB_SHARE *ndbcluster_check_ndb_apply_status_share()
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_lock(&ndbcluster_mutex);
|
2006-01-12 19:51:02 +01:00
|
|
|
|
2009-10-14 18:37:38 +02:00
|
|
|
void *share= my_hash_search(&ndbcluster_open_tables,
|
|
|
|
(uchar*) NDB_APPLY_TABLE_FILE,
|
|
|
|
sizeof(NDB_APPLY_TABLE_FILE) - 1);
|
2006-12-01 15:49:07 +01:00
|
|
|
DBUG_PRINT("info",("ndbcluster_check_ndb_apply_status_share %s 0x%lx",
|
2006-11-27 00:47:38 +01:00
|
|
|
NDB_APPLY_TABLE_FILE, (long) share));
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_unlock(&ndbcluster_mutex);
|
2006-01-12 19:51:02 +01:00
|
|
|
return (NDB_SHARE*) share;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2006-03-01 13:31:21 +01:00
|
|
|
check the availability af the schema share
|
2006-01-12 19:51:02 +01:00
|
|
|
- return share, but do not increase refcount
|
|
|
|
- return 0 if there is no share
|
|
|
|
*/
|
2006-12-01 15:49:07 +01:00
|
|
|
static NDB_SHARE *ndbcluster_check_ndb_schema_share()
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_lock(&ndbcluster_mutex);
|
2006-01-12 19:51:02 +01:00
|
|
|
|
2009-10-14 18:37:38 +02:00
|
|
|
void *share= my_hash_search(&ndbcluster_open_tables,
|
|
|
|
(uchar*) NDB_SCHEMA_TABLE_FILE,
|
|
|
|
sizeof(NDB_SCHEMA_TABLE_FILE) - 1);
|
2006-12-01 15:49:07 +01:00
|
|
|
DBUG_PRINT("info",("ndbcluster_check_ndb_schema_share %s 0x%lx",
|
2006-11-27 00:47:38 +01:00
|
|
|
NDB_SCHEMA_TABLE_FILE, (long) share));
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_unlock(&ndbcluster_mutex);
|
2006-01-12 19:51:02 +01:00
|
|
|
return (NDB_SHARE*) share;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2006-12-01 15:49:07 +01:00
|
|
|
Create the ndb_apply_status table
|
2006-01-12 19:51:02 +01:00
|
|
|
*/
|
2006-12-01 15:49:07 +01:00
|
|
|
static int ndbcluster_create_ndb_apply_status_table(THD *thd)
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
2006-12-01 15:49:07 +01:00
|
|
|
DBUG_ENTER("ndbcluster_create_ndb_apply_status_table");
|
2006-01-12 19:51:02 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
Check if we already have the apply status table.
|
|
|
|
If so it should have been discovered at startup
|
|
|
|
and thus have a share
|
|
|
|
*/
|
|
|
|
|
2006-12-01 15:49:07 +01:00
|
|
|
if (ndbcluster_check_ndb_apply_status_share())
|
2006-01-12 19:51:02 +01:00
|
|
|
DBUG_RETURN(0);
|
|
|
|
|
|
|
|
if (g_ndb_cluster_connection->get_no_ready() <= 0)
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
|
2009-06-19 10:24:43 +02:00
|
|
|
char buf[1024 + 1], *end;
|
2006-01-12 19:51:02 +01:00
|
|
|
|
2009-12-22 10:35:56 +01:00
|
|
|
if (opt_ndb_extra_logging)
|
2006-01-12 19:51:02 +01:00
|
|
|
sql_print_information("NDB: Creating " NDB_REP_DB "." NDB_APPLY_TABLE);
|
|
|
|
|
|
|
|
/*
|
|
|
|
Check if apply status table exists in MySQL "dictionary"
|
|
|
|
if so, remove it since there is none in Ndb
|
|
|
|
*/
|
|
|
|
{
|
2009-06-19 10:24:43 +02:00
|
|
|
build_table_filename(buf, sizeof(buf) - 1,
|
2006-08-02 17:57:06 +02:00
|
|
|
NDB_REP_DB, NDB_APPLY_TABLE, reg_ext, 0);
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_file_delete(key_file_frm, buf, MYF(0));
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
Note, updating this table schema must be reflected in ndb_restore
|
|
|
|
*/
|
|
|
|
end= strmov(buf, "CREATE TABLE IF NOT EXISTS "
|
|
|
|
NDB_REP_DB "." NDB_APPLY_TABLE
|
|
|
|
" ( server_id INT UNSIGNED NOT NULL,"
|
|
|
|
" epoch BIGINT UNSIGNED NOT NULL, "
|
2007-03-07 19:39:45 +01:00
|
|
|
" log_name VARCHAR(255) BINARY NOT NULL, "
|
|
|
|
" start_pos BIGINT UNSIGNED NOT NULL, "
|
|
|
|
" end_pos BIGINT UNSIGNED NOT NULL, "
|
2007-12-14 17:11:49 +01:00
|
|
|
" PRIMARY KEY USING HASH (server_id) ) ENGINE=NDB CHARACTER SET latin1");
|
2006-01-12 19:51:02 +01:00
|
|
|
|
2008-04-09 18:42:05 +02:00
|
|
|
const int no_print_error[6]= {ER_TABLE_EXISTS_ERROR,
|
2007-08-30 10:41:19 +02:00
|
|
|
701,
|
2007-11-02 23:44:17 +01:00
|
|
|
702,
|
2008-04-09 18:42:05 +02:00
|
|
|
721, // Table already exist
|
2007-08-30 10:41:19 +02:00
|
|
|
4009,
|
|
|
|
0}; // do not print error 701 etc
|
2007-06-17 19:47:20 +02:00
|
|
|
run_query(thd, buf, end, no_print_error, TRUE);
|
2006-01-12 19:51:02 +01:00
|
|
|
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
2006-03-01 13:31:21 +01:00
|
|
|
Create the schema table
|
2006-01-12 19:51:02 +01:00
|
|
|
*/
|
|
|
|
static int ndbcluster_create_schema_table(THD *thd)
|
|
|
|
{
|
|
|
|
DBUG_ENTER("ndbcluster_create_schema_table");
|
|
|
|
|
|
|
|
/*
|
|
|
|
Check if we already have the schema table.
|
|
|
|
If so it should have been discovered at startup
|
|
|
|
and thus have a share
|
|
|
|
*/
|
|
|
|
|
2006-12-01 15:49:07 +01:00
|
|
|
if (ndbcluster_check_ndb_schema_share())
|
2006-01-12 19:51:02 +01:00
|
|
|
DBUG_RETURN(0);
|
|
|
|
|
|
|
|
if (g_ndb_cluster_connection->get_no_ready() <= 0)
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
|
2009-06-19 10:24:43 +02:00
|
|
|
char buf[1024 + 1], *end;
|
2006-01-12 19:51:02 +01:00
|
|
|
|
2009-12-22 10:35:56 +01:00
|
|
|
if (opt_ndb_extra_logging)
|
2006-01-12 19:51:02 +01:00
|
|
|
sql_print_information("NDB: Creating " NDB_REP_DB "." NDB_SCHEMA_TABLE);
|
|
|
|
|
|
|
|
/*
|
|
|
|
Check if schema table exists in MySQL "dictionary"
|
|
|
|
if so, remove it since there is none in Ndb
|
|
|
|
*/
|
|
|
|
{
|
2009-06-19 10:24:43 +02:00
|
|
|
build_table_filename(buf, sizeof(buf) - 1,
|
2006-08-02 17:57:06 +02:00
|
|
|
NDB_REP_DB, NDB_SCHEMA_TABLE, reg_ext, 0);
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_file_delete(key_file_frm, buf, MYF(0));
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
Update the defines below to reflect the table schema
|
|
|
|
*/
|
|
|
|
end= strmov(buf, "CREATE TABLE IF NOT EXISTS "
|
|
|
|
NDB_REP_DB "." NDB_SCHEMA_TABLE
|
2006-03-23 04:59:14 +01:00
|
|
|
" ( db VARBINARY(63) NOT NULL,"
|
|
|
|
" name VARBINARY(63) NOT NULL,"
|
2006-01-12 19:51:02 +01:00
|
|
|
" slock BINARY(32) NOT NULL,"
|
2006-03-23 04:59:14 +01:00
|
|
|
" query BLOB NOT NULL,"
|
2006-01-12 19:51:02 +01:00
|
|
|
" node_id INT UNSIGNED NOT NULL,"
|
|
|
|
" epoch BIGINT UNSIGNED NOT NULL,"
|
|
|
|
" id INT UNSIGNED NOT NULL,"
|
|
|
|
" version INT UNSIGNED NOT NULL,"
|
|
|
|
" type INT UNSIGNED NOT NULL,"
|
2007-12-14 17:11:49 +01:00
|
|
|
" PRIMARY KEY USING HASH (db,name) ) ENGINE=NDB CHARACTER SET latin1");
|
2006-01-12 19:51:02 +01:00
|
|
|
|
2008-04-09 18:42:05 +02:00
|
|
|
const int no_print_error[6]= {ER_TABLE_EXISTS_ERROR,
|
2007-08-30 10:41:19 +02:00
|
|
|
701,
|
2007-11-02 23:44:17 +01:00
|
|
|
702,
|
2008-04-09 18:42:05 +02:00
|
|
|
721, // Table already exist
|
2007-08-30 10:41:19 +02:00
|
|
|
4009,
|
|
|
|
0}; // do not print error 701 etc
|
2007-06-17 19:47:20 +02:00
|
|
|
run_query(thd, buf, end, no_print_error, TRUE);
|
2006-01-12 19:51:02 +01:00
|
|
|
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
2006-04-10 16:08:40 +02:00
|
|
|
int ndbcluster_setup_binlog_table_shares(THD *thd)
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
2006-12-01 15:49:07 +01:00
|
|
|
if (!ndb_schema_share &&
|
|
|
|
ndbcluster_check_ndb_schema_share() == 0)
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
2006-04-10 16:08:40 +02:00
|
|
|
ndb_create_table_from_engine(thd, NDB_REP_DB, NDB_SCHEMA_TABLE);
|
2006-12-01 15:49:07 +01:00
|
|
|
if (!ndb_schema_share)
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
2006-04-10 16:08:40 +02:00
|
|
|
ndbcluster_create_schema_table(thd);
|
|
|
|
// always make sure we create the 'schema' first
|
2006-12-01 15:49:07 +01:00
|
|
|
if (!ndb_schema_share)
|
2006-04-10 16:08:40 +02:00
|
|
|
return 1;
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
|
|
|
}
|
2006-12-01 15:49:07 +01:00
|
|
|
if (!ndb_apply_status_share &&
|
|
|
|
ndbcluster_check_ndb_apply_status_share() == 0)
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
2006-04-10 16:08:40 +02:00
|
|
|
ndb_create_table_from_engine(thd, NDB_REP_DB, NDB_APPLY_TABLE);
|
2006-12-01 15:49:07 +01:00
|
|
|
if (!ndb_apply_status_share)
|
2006-04-10 16:08:40 +02:00
|
|
|
{
|
2006-12-01 15:49:07 +01:00
|
|
|
ndbcluster_create_ndb_apply_status_table(thd);
|
|
|
|
if (!ndb_apply_status_share)
|
2006-04-10 16:08:40 +02:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!ndbcluster_find_all_files(thd))
|
|
|
|
{
|
|
|
|
ndb_binlog_tables_inited= TRUE;
|
2009-12-22 10:35:56 +01:00
|
|
|
if (opt_ndb_extra_logging)
|
2008-02-28 18:55:46 +01:00
|
|
|
sql_print_information("NDB Binlog: ndb tables writable");
|
2010-08-12 15:50:23 +02:00
|
|
|
close_cached_tables(NULL, NULL, FALSE, LONG_TIMEOUT);
|
2006-04-10 16:08:40 +02:00
|
|
|
/* Signal injector thread that all is setup */
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_cond_signal(&injector_cond);
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
2006-04-10 16:08:40 +02:00
|
|
|
return 0;
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
Defines and struct for schema table.
|
|
|
|
Should reflect table definition above.
|
|
|
|
*/
|
|
|
|
#define SCHEMA_DB_I 0u
|
|
|
|
#define SCHEMA_NAME_I 1u
|
|
|
|
#define SCHEMA_SLOCK_I 2u
|
|
|
|
#define SCHEMA_QUERY_I 3u
|
|
|
|
#define SCHEMA_NODE_ID_I 4u
|
|
|
|
#define SCHEMA_EPOCH_I 5u
|
|
|
|
#define SCHEMA_ID_I 6u
|
|
|
|
#define SCHEMA_VERSION_I 7u
|
|
|
|
#define SCHEMA_TYPE_I 8u
|
|
|
|
#define SCHEMA_SIZE 9u
|
|
|
|
#define SCHEMA_SLOCK_SIZE 32u
|
|
|
|
|
2006-03-21 16:54:56 +01:00
|
|
|
struct Cluster_schema
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
2006-06-19 11:45:34 +02:00
|
|
|
uchar db_length;
|
2006-01-12 19:51:02 +01:00
|
|
|
char db[64];
|
2006-06-19 11:45:34 +02:00
|
|
|
uchar name_length;
|
2006-01-12 19:51:02 +01:00
|
|
|
char name[64];
|
2006-06-19 11:45:34 +02:00
|
|
|
uchar slock_length;
|
2006-01-12 19:51:02 +01:00
|
|
|
uint32 slock[SCHEMA_SLOCK_SIZE/4];
|
|
|
|
unsigned short query_length;
|
2006-03-23 04:59:14 +01:00
|
|
|
char *query;
|
2006-01-12 19:51:02 +01:00
|
|
|
Uint64 epoch;
|
|
|
|
uint32 node_id;
|
|
|
|
uint32 id;
|
|
|
|
uint32 version;
|
|
|
|
uint32 type;
|
2007-04-18 16:02:20 +02:00
|
|
|
uint32 any_value;
|
2006-01-12 19:51:02 +01:00
|
|
|
};
|
|
|
|
|
2009-09-10 11:18:29 +02:00
|
|
|
static void print_could_not_discover_error(THD *thd,
|
|
|
|
const Cluster_schema *schema)
|
|
|
|
{
|
|
|
|
sql_print_error("NDB Binlog: Could not discover table '%s.%s' from "
|
|
|
|
"binlog schema event '%s' from node %d. "
|
|
|
|
"my_errno: %d",
|
|
|
|
schema->db, schema->name, schema->query,
|
|
|
|
schema->node_id, my_errno);
|
|
|
|
List_iterator_fast<MYSQL_ERROR> it(thd->warning_info->warn_list());
|
|
|
|
MYSQL_ERROR *err;
|
|
|
|
while ((err= it++))
|
|
|
|
sql_print_warning("NDB Binlog: (%d)%s", err->get_sql_errno(),
|
|
|
|
err->get_message_text());
|
|
|
|
}
|
|
|
|
|
2006-01-12 19:51:02 +01:00
|
|
|
/*
|
|
|
|
Transfer schema table data into corresponding struct
|
|
|
|
*/
|
2006-03-23 04:59:14 +01:00
|
|
|
static void ndbcluster_get_schema(NDB_SHARE *share,
|
2006-03-21 16:54:56 +01:00
|
|
|
Cluster_schema *s)
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
2006-03-23 04:59:14 +01:00
|
|
|
TABLE *table= share->table;
|
2006-01-12 19:51:02 +01:00
|
|
|
Field **field;
|
2006-03-23 04:59:14 +01:00
|
|
|
/* unpack blob values */
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
uchar* blobs_buffer= 0;
|
2006-03-23 04:59:14 +01:00
|
|
|
uint blobs_buffer_size= 0;
|
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes
Changes that requires code changes in other code of other storage engines.
(Note that all changes are very straightforward and one should find all issues
by compiling a --debug build and fixing all compiler errors and all
asserts in field.cc while running the test suite),
- New optional handler function introduced: reset()
This is called after every DML statement to make it easy for a handler to
statement specific cleanups.
(The only case it's not called is if force the file to be closed)
- handler::extra(HA_EXTRA_RESET) is removed. Code that was there before
should be moved to handler::reset()
- table->read_set contains a bitmap over all columns that are needed
in the query. read_row() and similar functions only needs to read these
columns
- table->write_set contains a bitmap over all columns that will be updated
in the query. write_row() and update_row() only needs to update these
columns.
The above bitmaps should now be up to date in all context
(including ALTER TABLE, filesort()).
The handler is informed of any changes to the bitmap after
fix_fields() by calling the virtual function
handler::column_bitmaps_signal(). If the handler does caching of
these bitmaps (instead of using table->read_set, table->write_set),
it should redo the caching in this code. as the signal() may be sent
several times, it's probably best to set a variable in the signal
and redo the caching on read_row() / write_row() if the variable was
set.
- Removed the read_set and write_set bitmap objects from the handler class
- Removed all column bit handling functions from the handler class.
(Now one instead uses the normal bitmap functions in my_bitmap.c instead
of handler dedicated bitmap functions)
- field->query_id is removed. One should instead instead check
table->read_set and table->write_set if a field is used in the query.
- handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and
handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now
instead use table->read_set to check for which columns to retrieve.
- If a handler needs to call Field->val() or Field->store() on columns
that are not used in the query, one should install a temporary
all-columns-used map while doing so. For this, we provide the following
functions:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
field->val();
dbug_tmp_restore_column_map(table->read_set, old_map);
and similar for the write map:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
field->val();
dbug_tmp_restore_column_map(table->write_set, old_map);
If this is not done, you will sooner or later hit a DBUG_ASSERT
in the field store() / val() functions.
(For not DBUG binaries, the dbug_tmp_restore_column_map() and
dbug_tmp_restore_column_map() are inline dummy functions and should
be optimized away be the compiler).
- If one needs to temporary set the column map for all binaries (and not
just to avoid the DBUG_ASSERT() in the Field::store() / Field::val()
methods) one should use the functions tmp_use_all_columns() and
tmp_restore_column_map() instead of the above dbug_ variants.
- All 'status' fields in the handler base class (like records,
data_file_length etc) are now stored in a 'stats' struct. This makes
it easier to know what status variables are provided by the base
handler. This requires some trivial variable names in the extra()
function.
- New virtual function handler::records(). This is called to optimize
COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true.
(stats.records is not supposed to be an exact value. It's only has to
be 'reasonable enough' for the optimizer to be able to choose a good
optimization path).
- Non virtual handler::init() function added for caching of virtual
constants from engine.
- Removed has_transactions() virtual method. Now one should instead return
HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support
transactions.
- The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument
that is to be used with 'new handler_name()' to allocate the handler
in the right area. The xxxx_create_handler() function is also
responsible for any initialization of the object before returning.
For example, one should change:
static handler *myisam_create_handler(TABLE_SHARE *table)
{
return new ha_myisam(table);
}
->
static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root)
{
return new (mem_root) ha_myisam(table);
}
- New optional virtual function: use_hidden_primary_key().
This is called in case of an update/delete when
(table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined
but we don't have a primary key. This allows the handler to take precisions
in remembering any hidden primary key to able to update/delete any
found row. The default handler marks all columns to be read.
- handler::table_flags() now returns a ulonglong (to allow for more flags).
- New/changed table_flags()
- HA_HAS_RECORDS Set if ::records() is supported
- HA_NO_TRANSACTIONS Set if engine doesn't support transactions
- HA_PRIMARY_KEY_REQUIRED_FOR_DELETE
Set if we should mark all primary key columns for
read when reading rows as part of a DELETE
statement. If there is no primary key,
all columns are marked for read.
- HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some
cases (based on table->read_set)
- HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS
Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION.
- HA_DUPP_POS Renamed to HA_DUPLICATE_POS
- HA_REQUIRES_KEY_COLUMNS_FOR_DELETE
Set this if we should mark ALL key columns for
read when when reading rows as part of a DELETE
statement. In case of an update we will mark
all keys for read for which key part changed
value.
- HA_STATS_RECORDS_IS_EXACT
Set this if stats.records is exact.
(This saves us some extra records() calls
when optimizing COUNT(*))
- Removed table_flags()
- HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if
handler::records() gives an exact count() and
HA_STATS_RECORDS_IS_EXACT if stats.records is exact.
- HA_READ_RND_SAME Removed (no one supported this one)
- Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk()
- Renamed handler::dupp_pos to handler::dup_pos
- Removed not used variable handler::sortkey
Upper level handler changes:
- ha_reset() now does some overall checks and calls ::reset()
- ha_table_flags() added. This is a cached version of table_flags(). The
cache is updated on engine creation time and updated on open.
MySQL level changes (not obvious from the above):
- DBUG_ASSERT() added to check that column usage matches what is set
in the column usage bit maps. (This found a LOT of bugs in current
column marking code).
- In 5.1 before, all used columns was marked in read_set and only updated
columns was marked in write_set. Now we only mark columns for which we
need a value in read_set.
- Column bitmaps are created in open_binary_frm() and open_table_from_share().
(Before this was in table.cc)
- handler::table_flags() calls are replaced with handler::ha_table_flags()
- For calling field->val() you must have the corresponding bit set in
table->read_set. For calling field->store() you must have the
corresponding bit set in table->write_set. (There are asserts in
all store()/val() functions to catch wrong usage)
- thd->set_query_id is renamed to thd->mark_used_columns and instead
of setting this to an integer value, this has now the values:
MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE
Changed also all variables named 'set_query_id' to mark_used_columns.
- In filesort() we now inform the handler of exactly which columns are needed
doing the sort and choosing the rows.
- The TABLE_SHARE object has a 'all_set' column bitmap one can use
when one needs a column bitmap with all columns set.
(This is used for table->use_all_columns() and other places)
- The TABLE object has 3 column bitmaps:
- def_read_set Default bitmap for columns to be read
- def_write_set Default bitmap for columns to be written
- tmp_set Can be used as a temporary bitmap when needed.
The table object has also two pointer to bitmaps read_set and write_set
that the handler should use to find out which columns are used in which way.
- count() optimization now calls handler::records() instead of using
handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true).
- Added extra argument to Item::walk() to indicate if we should also
traverse sub queries.
- Added TABLE parameter to cp_buffer_from_ref()
- Don't close tables created with CREATE ... SELECT but keep them in
the table cache. (Faster usage of newly created tables).
New interfaces:
- table->clear_column_bitmaps() to initialize the bitmaps for tables
at start of new statements.
- table->column_bitmaps_set() to set up new column bitmaps and signal
the handler about this.
- table->column_bitmaps_set_no_signal() for some few cases where we need
to setup new column bitmaps but don't signal the handler (as the handler
has already been signaled about these before). Used for the momement
only in opt_range.cc when doing ROR scans.
- table->use_all_columns() to install a bitmap where all columns are marked
as use in the read and the write set.
- table->default_column_bitmaps() to install the normal read and write
column bitmaps, but not signaling the handler about this.
This is mainly used when creating TABLE instances.
- table->mark_columns_needed_for_delete(),
table->mark_columns_needed_for_delete() and
table->mark_columns_needed_for_insert() to allow us to put additional
columns in column usage maps if handler so requires.
(The handler indicates what it neads in handler->table_flags())
- table->prepare_for_position() to allow us to tell handler that it
needs to read primary key parts to be able to store them in
future table->position() calls.
(This replaces the table->file->ha_retrieve_all_pk function)
- table->mark_auto_increment_column() to tell handler are going to update
columns part of any auto_increment key.
- table->mark_columns_used_by_index() to mark all columns that is part of
an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow
it to quickly know that it only needs to read colums that are part
of the key. (The handler can also use the column map for detecting this,
but simpler/faster handler can just monitor the extra() call).
- table->mark_columns_used_by_index_no_reset() to in addition to other columns,
also mark all columns that is used by the given key.
- table->restore_column_maps_after_mark_index() to restore to default
column maps after a call to table->mark_columns_used_by_index().
- New item function register_field_in_read_map(), for marking used columns
in table->read_map. Used by filesort() to mark all used columns
- Maintain in TABLE->merge_keys set of all keys that are used in query.
(Simplices some optimization loops)
- Maintain Field->part_of_key_not_clustered which is like Field->part_of_key
but the field in the clustered key is not assumed to be part of all index.
(used in opt_range.cc for faster loops)
- dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map()
tmp_use_all_columns() and tmp_restore_column_map() functions to temporally
mark all columns as usable. The 'dbug_' version is primarily intended
inside a handler when it wants to just call Field:store() & Field::val()
functions, but don't need the column maps set for any other usage.
(ie:: bitmap_is_set() is never called)
- We can't use compare_records() to skip updates for handlers that returns
a partial column set and the read_set doesn't cover all columns in the
write set. The reason for this is that if we have a column marked only for
write we can't in the MySQL level know if the value changed or not.
The reason this worked before was that MySQL marked all to be written
columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden
bug'.
- open_table_from_share() does not anymore setup temporary MEM_ROOT
object as a thread specific variable for the handler. Instead we
send the to-be-used MEMROOT to get_new_handler().
(Simpler, faster code)
Bugs fixed:
- Column marking was not done correctly in a lot of cases.
(ALTER TABLE, when using triggers, auto_increment fields etc)
(Could potentially result in wrong values inserted in table handlers
relying on that the old column maps or field->set_query_id was correct)
Especially when it comes to triggers, there may be cases where the
old code would cause lost/wrong values for NDB and/or InnoDB tables.
- Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags:
OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG.
This allowed me to remove some wrong warnings about:
"Some non-transactional changed tables couldn't be rolled back"
- Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset
(thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose
some warnings about
"Some non-transactional changed tables couldn't be rolled back")
- Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table()
which could cause delete_table to report random failures.
- Fixed core dumps for some tests when running with --debug
- Added missing FN_LIBCHAR in mysql_rm_tmp_tables()
(This has probably caused us to not properly remove temporary files after
crash)
- slow_logs was not properly initialized, which could maybe cause
extra/lost entries in slow log.
- If we get an duplicate row on insert, change column map to read and
write all columns while retrying the operation. This is required by
the definition of REPLACE and also ensures that fields that are only
part of UPDATE are properly handled. This fixed a bug in NDB and
REPLACE where REPLACE wrongly copied some column values from the replaced
row.
- For table handler that doesn't support NULL in keys, we would give an error
when creating a primary key with NULL fields, even after the fields has been
automaticly converted to NOT NULL.
- Creating a primary key on a SPATIAL key, would fail if field was not
declared as NOT NULL.
Cleanups:
- Removed not used condition argument to setup_tables
- Removed not needed item function reset_query_id_processor().
- Field->add_index is removed. Now this is instead maintained in
(field->flags & FIELD_IN_ADD_INDEX)
- Field->fieldnr is removed (use field->field_index instead)
- New argument to filesort() to indicate that it should return a set of
row pointers (not used columns). This allowed me to remove some references
to sql_command in filesort and should also enable us to return column
results in some cases where we couldn't before.
- Changed column bitmap handling in opt_range.cc to be aligned with TABLE
bitmap, which allowed me to use bitmap functions instead of looping over
all fields to create some needed bitmaps. (Faster and smaller code)
- Broke up found too long lines
- Moved some variable declaration at start of function for better code
readability.
- Removed some not used arguments from functions.
(setup_fields(), mysql_prepare_insert_check_table())
- setup_fields() now takes an enum instead of an int for marking columns
usage.
- For internal temporary tables, use handler::write_row(),
handler::delete_row() and handler::update_row() instead of
handler::ha_xxxx() for faster execution.
- Changed some constants to enum's and define's.
- Using separate column read and write sets allows for easier checking
of timestamp field was set by statement.
- Remove calls to free_io_cache() as this is now done automaticly in ha_reset()
- Don't build table->normalized_path as this is now identical to table->path
(after bar's fixes to convert filenames)
- Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to
do comparision with the 'convert-dbug-for-diff' tool.
Things left to do in 5.1:
- We wrongly log failed CREATE TABLE ... SELECT in some cases when using
row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result)
Mats has promised to look into this.
- Test that my fix for CREATE TABLE ... SELECT is indeed correct.
(I added several test cases for this, but in this case it's better that
someone else also tests this throughly).
Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
|
|
|
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
|
2006-03-23 04:59:14 +01:00
|
|
|
{
|
|
|
|
ptrdiff_t ptrdiff= 0;
|
|
|
|
int ret= get_ndb_blobs_value(table, share->ndb_value[0],
|
|
|
|
blobs_buffer, blobs_buffer_size,
|
|
|
|
ptrdiff);
|
|
|
|
if (ret != 0)
|
|
|
|
{
|
Bug#34043: Server loops excessively in _checkchunk() when safemalloc is enabled
Essentially, the problem is that safemalloc is excruciatingly
slow as it checks all allocated blocks for overrun at each
memory management primitive, yielding a almost exponential
slowdown for the memory management functions (malloc, realloc,
free). The overrun check basically consists of verifying some
bytes of a block for certain magic keys, which catches some
simple forms of overrun. Another minor problem is violation
of aliasing rules and that its own internal list of blocks
is prone to corruption.
Another issue with safemalloc is rather the maintenance cost
as the tool has a significant impact on the server code.
Given the magnitude of memory debuggers available nowadays,
especially those that are provided with the platform malloc
implementation, maintenance of a in-house and largely obsolete
memory debugger becomes a burden that is not worth the effort
due to its slowness and lack of support for detecting more
common forms of heap corruption.
Since there are third-party tools that can provide the same
functionality at a lower or comparable performance cost, the
solution is to simply remove safemalloc. Third-party tools
can provide the same functionality at a lower or comparable
performance cost.
The removal of safemalloc also allows a simplification of the
malloc wrappers, removing quite a bit of kludge: redefinition
of my_malloc, my_free and the removal of the unused second
argument of my_free. Since free() always check whether the
supplied pointer is null, redudant checks are also removed.
Also, this patch adds unit testing for my_malloc and moves
my_realloc implementation into the same file as the other
memory allocation primitives.
2010-07-08 23:20:08 +02:00
|
|
|
my_free(blobs_buffer);
|
2006-03-23 04:59:14 +01:00
|
|
|
DBUG_PRINT("info", ("blob read error"));
|
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes
Changes that requires code changes in other code of other storage engines.
(Note that all changes are very straightforward and one should find all issues
by compiling a --debug build and fixing all compiler errors and all
asserts in field.cc while running the test suite),
- New optional handler function introduced: reset()
This is called after every DML statement to make it easy for a handler to
statement specific cleanups.
(The only case it's not called is if force the file to be closed)
- handler::extra(HA_EXTRA_RESET) is removed. Code that was there before
should be moved to handler::reset()
- table->read_set contains a bitmap over all columns that are needed
in the query. read_row() and similar functions only needs to read these
columns
- table->write_set contains a bitmap over all columns that will be updated
in the query. write_row() and update_row() only needs to update these
columns.
The above bitmaps should now be up to date in all context
(including ALTER TABLE, filesort()).
The handler is informed of any changes to the bitmap after
fix_fields() by calling the virtual function
handler::column_bitmaps_signal(). If the handler does caching of
these bitmaps (instead of using table->read_set, table->write_set),
it should redo the caching in this code. as the signal() may be sent
several times, it's probably best to set a variable in the signal
and redo the caching on read_row() / write_row() if the variable was
set.
- Removed the read_set and write_set bitmap objects from the handler class
- Removed all column bit handling functions from the handler class.
(Now one instead uses the normal bitmap functions in my_bitmap.c instead
of handler dedicated bitmap functions)
- field->query_id is removed. One should instead instead check
table->read_set and table->write_set if a field is used in the query.
- handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and
handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now
instead use table->read_set to check for which columns to retrieve.
- If a handler needs to call Field->val() or Field->store() on columns
that are not used in the query, one should install a temporary
all-columns-used map while doing so. For this, we provide the following
functions:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
field->val();
dbug_tmp_restore_column_map(table->read_set, old_map);
and similar for the write map:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
field->val();
dbug_tmp_restore_column_map(table->write_set, old_map);
If this is not done, you will sooner or later hit a DBUG_ASSERT
in the field store() / val() functions.
(For not DBUG binaries, the dbug_tmp_restore_column_map() and
dbug_tmp_restore_column_map() are inline dummy functions and should
be optimized away be the compiler).
- If one needs to temporary set the column map for all binaries (and not
just to avoid the DBUG_ASSERT() in the Field::store() / Field::val()
methods) one should use the functions tmp_use_all_columns() and
tmp_restore_column_map() instead of the above dbug_ variants.
- All 'status' fields in the handler base class (like records,
data_file_length etc) are now stored in a 'stats' struct. This makes
it easier to know what status variables are provided by the base
handler. This requires some trivial variable names in the extra()
function.
- New virtual function handler::records(). This is called to optimize
COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true.
(stats.records is not supposed to be an exact value. It's only has to
be 'reasonable enough' for the optimizer to be able to choose a good
optimization path).
- Non virtual handler::init() function added for caching of virtual
constants from engine.
- Removed has_transactions() virtual method. Now one should instead return
HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support
transactions.
- The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument
that is to be used with 'new handler_name()' to allocate the handler
in the right area. The xxxx_create_handler() function is also
responsible for any initialization of the object before returning.
For example, one should change:
static handler *myisam_create_handler(TABLE_SHARE *table)
{
return new ha_myisam(table);
}
->
static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root)
{
return new (mem_root) ha_myisam(table);
}
- New optional virtual function: use_hidden_primary_key().
This is called in case of an update/delete when
(table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined
but we don't have a primary key. This allows the handler to take precisions
in remembering any hidden primary key to able to update/delete any
found row. The default handler marks all columns to be read.
- handler::table_flags() now returns a ulonglong (to allow for more flags).
- New/changed table_flags()
- HA_HAS_RECORDS Set if ::records() is supported
- HA_NO_TRANSACTIONS Set if engine doesn't support transactions
- HA_PRIMARY_KEY_REQUIRED_FOR_DELETE
Set if we should mark all primary key columns for
read when reading rows as part of a DELETE
statement. If there is no primary key,
all columns are marked for read.
- HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some
cases (based on table->read_set)
- HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS
Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION.
- HA_DUPP_POS Renamed to HA_DUPLICATE_POS
- HA_REQUIRES_KEY_COLUMNS_FOR_DELETE
Set this if we should mark ALL key columns for
read when when reading rows as part of a DELETE
statement. In case of an update we will mark
all keys for read for which key part changed
value.
- HA_STATS_RECORDS_IS_EXACT
Set this if stats.records is exact.
(This saves us some extra records() calls
when optimizing COUNT(*))
- Removed table_flags()
- HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if
handler::records() gives an exact count() and
HA_STATS_RECORDS_IS_EXACT if stats.records is exact.
- HA_READ_RND_SAME Removed (no one supported this one)
- Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk()
- Renamed handler::dupp_pos to handler::dup_pos
- Removed not used variable handler::sortkey
Upper level handler changes:
- ha_reset() now does some overall checks and calls ::reset()
- ha_table_flags() added. This is a cached version of table_flags(). The
cache is updated on engine creation time and updated on open.
MySQL level changes (not obvious from the above):
- DBUG_ASSERT() added to check that column usage matches what is set
in the column usage bit maps. (This found a LOT of bugs in current
column marking code).
- In 5.1 before, all used columns was marked in read_set and only updated
columns was marked in write_set. Now we only mark columns for which we
need a value in read_set.
- Column bitmaps are created in open_binary_frm() and open_table_from_share().
(Before this was in table.cc)
- handler::table_flags() calls are replaced with handler::ha_table_flags()
- For calling field->val() you must have the corresponding bit set in
table->read_set. For calling field->store() you must have the
corresponding bit set in table->write_set. (There are asserts in
all store()/val() functions to catch wrong usage)
- thd->set_query_id is renamed to thd->mark_used_columns and instead
of setting this to an integer value, this has now the values:
MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE
Changed also all variables named 'set_query_id' to mark_used_columns.
- In filesort() we now inform the handler of exactly which columns are needed
doing the sort and choosing the rows.
- The TABLE_SHARE object has a 'all_set' column bitmap one can use
when one needs a column bitmap with all columns set.
(This is used for table->use_all_columns() and other places)
- The TABLE object has 3 column bitmaps:
- def_read_set Default bitmap for columns to be read
- def_write_set Default bitmap for columns to be written
- tmp_set Can be used as a temporary bitmap when needed.
The table object has also two pointer to bitmaps read_set and write_set
that the handler should use to find out which columns are used in which way.
- count() optimization now calls handler::records() instead of using
handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true).
- Added extra argument to Item::walk() to indicate if we should also
traverse sub queries.
- Added TABLE parameter to cp_buffer_from_ref()
- Don't close tables created with CREATE ... SELECT but keep them in
the table cache. (Faster usage of newly created tables).
New interfaces:
- table->clear_column_bitmaps() to initialize the bitmaps for tables
at start of new statements.
- table->column_bitmaps_set() to set up new column bitmaps and signal
the handler about this.
- table->column_bitmaps_set_no_signal() for some few cases where we need
to setup new column bitmaps but don't signal the handler (as the handler
has already been signaled about these before). Used for the momement
only in opt_range.cc when doing ROR scans.
- table->use_all_columns() to install a bitmap where all columns are marked
as use in the read and the write set.
- table->default_column_bitmaps() to install the normal read and write
column bitmaps, but not signaling the handler about this.
This is mainly used when creating TABLE instances.
- table->mark_columns_needed_for_delete(),
table->mark_columns_needed_for_delete() and
table->mark_columns_needed_for_insert() to allow us to put additional
columns in column usage maps if handler so requires.
(The handler indicates what it neads in handler->table_flags())
- table->prepare_for_position() to allow us to tell handler that it
needs to read primary key parts to be able to store them in
future table->position() calls.
(This replaces the table->file->ha_retrieve_all_pk function)
- table->mark_auto_increment_column() to tell handler are going to update
columns part of any auto_increment key.
- table->mark_columns_used_by_index() to mark all columns that is part of
an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow
it to quickly know that it only needs to read colums that are part
of the key. (The handler can also use the column map for detecting this,
but simpler/faster handler can just monitor the extra() call).
- table->mark_columns_used_by_index_no_reset() to in addition to other columns,
also mark all columns that is used by the given key.
- table->restore_column_maps_after_mark_index() to restore to default
column maps after a call to table->mark_columns_used_by_index().
- New item function register_field_in_read_map(), for marking used columns
in table->read_map. Used by filesort() to mark all used columns
- Maintain in TABLE->merge_keys set of all keys that are used in query.
(Simplices some optimization loops)
- Maintain Field->part_of_key_not_clustered which is like Field->part_of_key
but the field in the clustered key is not assumed to be part of all index.
(used in opt_range.cc for faster loops)
- dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map()
tmp_use_all_columns() and tmp_restore_column_map() functions to temporally
mark all columns as usable. The 'dbug_' version is primarily intended
inside a handler when it wants to just call Field:store() & Field::val()
functions, but don't need the column maps set for any other usage.
(ie:: bitmap_is_set() is never called)
- We can't use compare_records() to skip updates for handlers that returns
a partial column set and the read_set doesn't cover all columns in the
write set. The reason for this is that if we have a column marked only for
write we can't in the MySQL level know if the value changed or not.
The reason this worked before was that MySQL marked all to be written
columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden
bug'.
- open_table_from_share() does not anymore setup temporary MEM_ROOT
object as a thread specific variable for the handler. Instead we
send the to-be-used MEMROOT to get_new_handler().
(Simpler, faster code)
Bugs fixed:
- Column marking was not done correctly in a lot of cases.
(ALTER TABLE, when using triggers, auto_increment fields etc)
(Could potentially result in wrong values inserted in table handlers
relying on that the old column maps or field->set_query_id was correct)
Especially when it comes to triggers, there may be cases where the
old code would cause lost/wrong values for NDB and/or InnoDB tables.
- Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags:
OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG.
This allowed me to remove some wrong warnings about:
"Some non-transactional changed tables couldn't be rolled back"
- Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset
(thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose
some warnings about
"Some non-transactional changed tables couldn't be rolled back")
- Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table()
which could cause delete_table to report random failures.
- Fixed core dumps for some tests when running with --debug
- Added missing FN_LIBCHAR in mysql_rm_tmp_tables()
(This has probably caused us to not properly remove temporary files after
crash)
- slow_logs was not properly initialized, which could maybe cause
extra/lost entries in slow log.
- If we get an duplicate row on insert, change column map to read and
write all columns while retrying the operation. This is required by
the definition of REPLACE and also ensures that fields that are only
part of UPDATE are properly handled. This fixed a bug in NDB and
REPLACE where REPLACE wrongly copied some column values from the replaced
row.
- For table handler that doesn't support NULL in keys, we would give an error
when creating a primary key with NULL fields, even after the fields has been
automaticly converted to NOT NULL.
- Creating a primary key on a SPATIAL key, would fail if field was not
declared as NOT NULL.
Cleanups:
- Removed not used condition argument to setup_tables
- Removed not needed item function reset_query_id_processor().
- Field->add_index is removed. Now this is instead maintained in
(field->flags & FIELD_IN_ADD_INDEX)
- Field->fieldnr is removed (use field->field_index instead)
- New argument to filesort() to indicate that it should return a set of
row pointers (not used columns). This allowed me to remove some references
to sql_command in filesort and should also enable us to return column
results in some cases where we couldn't before.
- Changed column bitmap handling in opt_range.cc to be aligned with TABLE
bitmap, which allowed me to use bitmap functions instead of looping over
all fields to create some needed bitmaps. (Faster and smaller code)
- Broke up found too long lines
- Moved some variable declaration at start of function for better code
readability.
- Removed some not used arguments from functions.
(setup_fields(), mysql_prepare_insert_check_table())
- setup_fields() now takes an enum instead of an int for marking columns
usage.
- For internal temporary tables, use handler::write_row(),
handler::delete_row() and handler::update_row() instead of
handler::ha_xxxx() for faster execution.
- Changed some constants to enum's and define's.
- Using separate column read and write sets allows for easier checking
of timestamp field was set by statement.
- Remove calls to free_io_cache() as this is now done automaticly in ha_reset()
- Don't build table->normalized_path as this is now identical to table->path
(after bar's fixes to convert filenames)
- Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to
do comparision with the 'convert-dbug-for-diff' tool.
Things left to do in 5.1:
- We wrongly log failed CREATE TABLE ... SELECT in some cases when using
row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result)
Mats has promised to look into this.
- Test that my fix for CREATE TABLE ... SELECT is indeed correct.
(I added several test cases for this, but in this case it's better that
someone else also tests this throughly).
Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
|
|
|
DBUG_ASSERT(FALSE);
|
2006-03-23 04:59:14 +01:00
|
|
|
}
|
|
|
|
}
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
/* db varchar 1 length uchar */
|
2006-01-12 19:51:02 +01:00
|
|
|
field= table->field;
|
|
|
|
s->db_length= *(uint8*)(*field)->ptr;
|
|
|
|
DBUG_ASSERT(s->db_length <= (*field)->field_length);
|
|
|
|
DBUG_ASSERT((*field)->field_length + 1 == sizeof(s->db));
|
|
|
|
memcpy(s->db, (*field)->ptr + 1, s->db_length);
|
|
|
|
s->db[s->db_length]= 0;
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
/* name varchar 1 length uchar */
|
2006-01-12 19:51:02 +01:00
|
|
|
field++;
|
|
|
|
s->name_length= *(uint8*)(*field)->ptr;
|
|
|
|
DBUG_ASSERT(s->name_length <= (*field)->field_length);
|
|
|
|
DBUG_ASSERT((*field)->field_length + 1 == sizeof(s->name));
|
|
|
|
memcpy(s->name, (*field)->ptr + 1, s->name_length);
|
|
|
|
s->name[s->name_length]= 0;
|
|
|
|
/* slock fixed length */
|
|
|
|
field++;
|
|
|
|
s->slock_length= (*field)->field_length;
|
|
|
|
DBUG_ASSERT((*field)->field_length == sizeof(s->slock));
|
|
|
|
memcpy(s->slock, (*field)->ptr, s->slock_length);
|
2006-03-23 04:59:14 +01:00
|
|
|
/* query blob */
|
2006-01-12 19:51:02 +01:00
|
|
|
field++;
|
2006-03-23 04:59:14 +01:00
|
|
|
{
|
|
|
|
Field_blob *field_blob= (Field_blob*)(*field);
|
|
|
|
uint blob_len= field_blob->get_length((*field)->ptr);
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
uchar *blob_ptr= 0;
|
2006-03-23 04:59:14 +01:00
|
|
|
field_blob->get_ptr(&blob_ptr);
|
2008-02-06 11:44:07 +01:00
|
|
|
DBUG_ASSERT(blob_len == 0 || blob_ptr != 0);
|
2006-03-23 04:59:14 +01:00
|
|
|
s->query_length= blob_len;
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
s->query= sql_strmake((char*) blob_ptr, blob_len);
|
2006-03-23 04:59:14 +01:00
|
|
|
}
|
2006-01-12 19:51:02 +01:00
|
|
|
/* node_id */
|
|
|
|
field++;
|
|
|
|
s->node_id= ((Field_long *)*field)->val_int();
|
|
|
|
/* epoch */
|
|
|
|
field++;
|
|
|
|
s->epoch= ((Field_long *)*field)->val_int();
|
|
|
|
/* id */
|
|
|
|
field++;
|
|
|
|
s->id= ((Field_long *)*field)->val_int();
|
|
|
|
/* version */
|
|
|
|
field++;
|
|
|
|
s->version= ((Field_long *)*field)->val_int();
|
|
|
|
/* type */
|
|
|
|
field++;
|
|
|
|
s->type= ((Field_long *)*field)->val_int();
|
2006-03-23 04:59:14 +01:00
|
|
|
/* free blobs buffer */
|
Bug#34043: Server loops excessively in _checkchunk() when safemalloc is enabled
Essentially, the problem is that safemalloc is excruciatingly
slow as it checks all allocated blocks for overrun at each
memory management primitive, yielding a almost exponential
slowdown for the memory management functions (malloc, realloc,
free). The overrun check basically consists of verifying some
bytes of a block for certain magic keys, which catches some
simple forms of overrun. Another minor problem is violation
of aliasing rules and that its own internal list of blocks
is prone to corruption.
Another issue with safemalloc is rather the maintenance cost
as the tool has a significant impact on the server code.
Given the magnitude of memory debuggers available nowadays,
especially those that are provided with the platform malloc
implementation, maintenance of a in-house and largely obsolete
memory debugger becomes a burden that is not worth the effort
due to its slowness and lack of support for detecting more
common forms of heap corruption.
Since there are third-party tools that can provide the same
functionality at a lower or comparable performance cost, the
solution is to simply remove safemalloc. Third-party tools
can provide the same functionality at a lower or comparable
performance cost.
The removal of safemalloc also allows a simplification of the
malloc wrappers, removing quite a bit of kludge: redefinition
of my_malloc, my_free and the removal of the unused second
argument of my_free. Since free() always check whether the
supplied pointer is null, redudant checks are also removed.
Also, this patch adds unit testing for my_malloc and moves
my_realloc implementation into the same file as the other
memory allocation primitives.
2010-07-08 23:20:08 +02:00
|
|
|
my_free(blobs_buffer);
|
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes
Changes that requires code changes in other code of other storage engines.
(Note that all changes are very straightforward and one should find all issues
by compiling a --debug build and fixing all compiler errors and all
asserts in field.cc while running the test suite),
- New optional handler function introduced: reset()
This is called after every DML statement to make it easy for a handler to
statement specific cleanups.
(The only case it's not called is if force the file to be closed)
- handler::extra(HA_EXTRA_RESET) is removed. Code that was there before
should be moved to handler::reset()
- table->read_set contains a bitmap over all columns that are needed
in the query. read_row() and similar functions only needs to read these
columns
- table->write_set contains a bitmap over all columns that will be updated
in the query. write_row() and update_row() only needs to update these
columns.
The above bitmaps should now be up to date in all context
(including ALTER TABLE, filesort()).
The handler is informed of any changes to the bitmap after
fix_fields() by calling the virtual function
handler::column_bitmaps_signal(). If the handler does caching of
these bitmaps (instead of using table->read_set, table->write_set),
it should redo the caching in this code. as the signal() may be sent
several times, it's probably best to set a variable in the signal
and redo the caching on read_row() / write_row() if the variable was
set.
- Removed the read_set and write_set bitmap objects from the handler class
- Removed all column bit handling functions from the handler class.
(Now one instead uses the normal bitmap functions in my_bitmap.c instead
of handler dedicated bitmap functions)
- field->query_id is removed. One should instead instead check
table->read_set and table->write_set if a field is used in the query.
- handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and
handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now
instead use table->read_set to check for which columns to retrieve.
- If a handler needs to call Field->val() or Field->store() on columns
that are not used in the query, one should install a temporary
all-columns-used map while doing so. For this, we provide the following
functions:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
field->val();
dbug_tmp_restore_column_map(table->read_set, old_map);
and similar for the write map:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
field->val();
dbug_tmp_restore_column_map(table->write_set, old_map);
If this is not done, you will sooner or later hit a DBUG_ASSERT
in the field store() / val() functions.
(For not DBUG binaries, the dbug_tmp_restore_column_map() and
dbug_tmp_restore_column_map() are inline dummy functions and should
be optimized away be the compiler).
- If one needs to temporary set the column map for all binaries (and not
just to avoid the DBUG_ASSERT() in the Field::store() / Field::val()
methods) one should use the functions tmp_use_all_columns() and
tmp_restore_column_map() instead of the above dbug_ variants.
- All 'status' fields in the handler base class (like records,
data_file_length etc) are now stored in a 'stats' struct. This makes
it easier to know what status variables are provided by the base
handler. This requires some trivial variable names in the extra()
function.
- New virtual function handler::records(). This is called to optimize
COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true.
(stats.records is not supposed to be an exact value. It's only has to
be 'reasonable enough' for the optimizer to be able to choose a good
optimization path).
- Non virtual handler::init() function added for caching of virtual
constants from engine.
- Removed has_transactions() virtual method. Now one should instead return
HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support
transactions.
- The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument
that is to be used with 'new handler_name()' to allocate the handler
in the right area. The xxxx_create_handler() function is also
responsible for any initialization of the object before returning.
For example, one should change:
static handler *myisam_create_handler(TABLE_SHARE *table)
{
return new ha_myisam(table);
}
->
static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root)
{
return new (mem_root) ha_myisam(table);
}
- New optional virtual function: use_hidden_primary_key().
This is called in case of an update/delete when
(table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined
but we don't have a primary key. This allows the handler to take precisions
in remembering any hidden primary key to able to update/delete any
found row. The default handler marks all columns to be read.
- handler::table_flags() now returns a ulonglong (to allow for more flags).
- New/changed table_flags()
- HA_HAS_RECORDS Set if ::records() is supported
- HA_NO_TRANSACTIONS Set if engine doesn't support transactions
- HA_PRIMARY_KEY_REQUIRED_FOR_DELETE
Set if we should mark all primary key columns for
read when reading rows as part of a DELETE
statement. If there is no primary key,
all columns are marked for read.
- HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some
cases (based on table->read_set)
- HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS
Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION.
- HA_DUPP_POS Renamed to HA_DUPLICATE_POS
- HA_REQUIRES_KEY_COLUMNS_FOR_DELETE
Set this if we should mark ALL key columns for
read when when reading rows as part of a DELETE
statement. In case of an update we will mark
all keys for read for which key part changed
value.
- HA_STATS_RECORDS_IS_EXACT
Set this if stats.records is exact.
(This saves us some extra records() calls
when optimizing COUNT(*))
- Removed table_flags()
- HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if
handler::records() gives an exact count() and
HA_STATS_RECORDS_IS_EXACT if stats.records is exact.
- HA_READ_RND_SAME Removed (no one supported this one)
- Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk()
- Renamed handler::dupp_pos to handler::dup_pos
- Removed not used variable handler::sortkey
Upper level handler changes:
- ha_reset() now does some overall checks and calls ::reset()
- ha_table_flags() added. This is a cached version of table_flags(). The
cache is updated on engine creation time and updated on open.
MySQL level changes (not obvious from the above):
- DBUG_ASSERT() added to check that column usage matches what is set
in the column usage bit maps. (This found a LOT of bugs in current
column marking code).
- In 5.1 before, all used columns was marked in read_set and only updated
columns was marked in write_set. Now we only mark columns for which we
need a value in read_set.
- Column bitmaps are created in open_binary_frm() and open_table_from_share().
(Before this was in table.cc)
- handler::table_flags() calls are replaced with handler::ha_table_flags()
- For calling field->val() you must have the corresponding bit set in
table->read_set. For calling field->store() you must have the
corresponding bit set in table->write_set. (There are asserts in
all store()/val() functions to catch wrong usage)
- thd->set_query_id is renamed to thd->mark_used_columns and instead
of setting this to an integer value, this has now the values:
MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE
Changed also all variables named 'set_query_id' to mark_used_columns.
- In filesort() we now inform the handler of exactly which columns are needed
doing the sort and choosing the rows.
- The TABLE_SHARE object has a 'all_set' column bitmap one can use
when one needs a column bitmap with all columns set.
(This is used for table->use_all_columns() and other places)
- The TABLE object has 3 column bitmaps:
- def_read_set Default bitmap for columns to be read
- def_write_set Default bitmap for columns to be written
- tmp_set Can be used as a temporary bitmap when needed.
The table object has also two pointer to bitmaps read_set and write_set
that the handler should use to find out which columns are used in which way.
- count() optimization now calls handler::records() instead of using
handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true).
- Added extra argument to Item::walk() to indicate if we should also
traverse sub queries.
- Added TABLE parameter to cp_buffer_from_ref()
- Don't close tables created with CREATE ... SELECT but keep them in
the table cache. (Faster usage of newly created tables).
New interfaces:
- table->clear_column_bitmaps() to initialize the bitmaps for tables
at start of new statements.
- table->column_bitmaps_set() to set up new column bitmaps and signal
the handler about this.
- table->column_bitmaps_set_no_signal() for some few cases where we need
to setup new column bitmaps but don't signal the handler (as the handler
has already been signaled about these before). Used for the momement
only in opt_range.cc when doing ROR scans.
- table->use_all_columns() to install a bitmap where all columns are marked
as use in the read and the write set.
- table->default_column_bitmaps() to install the normal read and write
column bitmaps, but not signaling the handler about this.
This is mainly used when creating TABLE instances.
- table->mark_columns_needed_for_delete(),
table->mark_columns_needed_for_delete() and
table->mark_columns_needed_for_insert() to allow us to put additional
columns in column usage maps if handler so requires.
(The handler indicates what it neads in handler->table_flags())
- table->prepare_for_position() to allow us to tell handler that it
needs to read primary key parts to be able to store them in
future table->position() calls.
(This replaces the table->file->ha_retrieve_all_pk function)
- table->mark_auto_increment_column() to tell handler are going to update
columns part of any auto_increment key.
- table->mark_columns_used_by_index() to mark all columns that is part of
an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow
it to quickly know that it only needs to read colums that are part
of the key. (The handler can also use the column map for detecting this,
but simpler/faster handler can just monitor the extra() call).
- table->mark_columns_used_by_index_no_reset() to in addition to other columns,
also mark all columns that is used by the given key.
- table->restore_column_maps_after_mark_index() to restore to default
column maps after a call to table->mark_columns_used_by_index().
- New item function register_field_in_read_map(), for marking used columns
in table->read_map. Used by filesort() to mark all used columns
- Maintain in TABLE->merge_keys set of all keys that are used in query.
(Simplices some optimization loops)
- Maintain Field->part_of_key_not_clustered which is like Field->part_of_key
but the field in the clustered key is not assumed to be part of all index.
(used in opt_range.cc for faster loops)
- dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map()
tmp_use_all_columns() and tmp_restore_column_map() functions to temporally
mark all columns as usable. The 'dbug_' version is primarily intended
inside a handler when it wants to just call Field:store() & Field::val()
functions, but don't need the column maps set for any other usage.
(ie:: bitmap_is_set() is never called)
- We can't use compare_records() to skip updates for handlers that returns
a partial column set and the read_set doesn't cover all columns in the
write set. The reason for this is that if we have a column marked only for
write we can't in the MySQL level know if the value changed or not.
The reason this worked before was that MySQL marked all to be written
columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden
bug'.
- open_table_from_share() does not anymore setup temporary MEM_ROOT
object as a thread specific variable for the handler. Instead we
send the to-be-used MEMROOT to get_new_handler().
(Simpler, faster code)
Bugs fixed:
- Column marking was not done correctly in a lot of cases.
(ALTER TABLE, when using triggers, auto_increment fields etc)
(Could potentially result in wrong values inserted in table handlers
relying on that the old column maps or field->set_query_id was correct)
Especially when it comes to triggers, there may be cases where the
old code would cause lost/wrong values for NDB and/or InnoDB tables.
- Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags:
OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG.
This allowed me to remove some wrong warnings about:
"Some non-transactional changed tables couldn't be rolled back"
- Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset
(thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose
some warnings about
"Some non-transactional changed tables couldn't be rolled back")
- Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table()
which could cause delete_table to report random failures.
- Fixed core dumps for some tests when running with --debug
- Added missing FN_LIBCHAR in mysql_rm_tmp_tables()
(This has probably caused us to not properly remove temporary files after
crash)
- slow_logs was not properly initialized, which could maybe cause
extra/lost entries in slow log.
- If we get an duplicate row on insert, change column map to read and
write all columns while retrying the operation. This is required by
the definition of REPLACE and also ensures that fields that are only
part of UPDATE are properly handled. This fixed a bug in NDB and
REPLACE where REPLACE wrongly copied some column values from the replaced
row.
- For table handler that doesn't support NULL in keys, we would give an error
when creating a primary key with NULL fields, even after the fields has been
automaticly converted to NOT NULL.
- Creating a primary key on a SPATIAL key, would fail if field was not
declared as NOT NULL.
Cleanups:
- Removed not used condition argument to setup_tables
- Removed not needed item function reset_query_id_processor().
- Field->add_index is removed. Now this is instead maintained in
(field->flags & FIELD_IN_ADD_INDEX)
- Field->fieldnr is removed (use field->field_index instead)
- New argument to filesort() to indicate that it should return a set of
row pointers (not used columns). This allowed me to remove some references
to sql_command in filesort and should also enable us to return column
results in some cases where we couldn't before.
- Changed column bitmap handling in opt_range.cc to be aligned with TABLE
bitmap, which allowed me to use bitmap functions instead of looping over
all fields to create some needed bitmaps. (Faster and smaller code)
- Broke up found too long lines
- Moved some variable declaration at start of function for better code
readability.
- Removed some not used arguments from functions.
(setup_fields(), mysql_prepare_insert_check_table())
- setup_fields() now takes an enum instead of an int for marking columns
usage.
- For internal temporary tables, use handler::write_row(),
handler::delete_row() and handler::update_row() instead of
handler::ha_xxxx() for faster execution.
- Changed some constants to enum's and define's.
- Using separate column read and write sets allows for easier checking
of timestamp field was set by statement.
- Remove calls to free_io_cache() as this is now done automaticly in ha_reset()
- Don't build table->normalized_path as this is now identical to table->path
(after bar's fixes to convert filenames)
- Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to
do comparision with the 'convert-dbug-for-diff' tool.
Things left to do in 5.1:
- We wrongly log failed CREATE TABLE ... SELECT in some cases when using
row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result)
Mats has promised to look into this.
- Test that my fix for CREATE TABLE ... SELECT is indeed correct.
(I added several test cases for this, but in this case it's better that
someone else also tests this throughly).
Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
|
|
|
dbug_tmp_restore_column_map(table->read_set, old_map);
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
helper function to pack a ndb varchar
|
|
|
|
*/
|
2007-04-03 07:20:55 +02:00
|
|
|
char *ndb_pack_varchar(const NDBCOL *col, char *buf,
|
|
|
|
const char *str, int sz)
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
|
|
|
switch (col->getArrayType())
|
|
|
|
{
|
|
|
|
case NDBCOL::ArrayTypeFixed:
|
|
|
|
memcpy(buf, str, sz);
|
|
|
|
break;
|
|
|
|
case NDBCOL::ArrayTypeShortVar:
|
2006-06-19 11:45:34 +02:00
|
|
|
*(uchar*)buf= (uchar)sz;
|
2006-01-12 19:51:02 +01:00
|
|
|
memcpy(buf + 1, str, sz);
|
|
|
|
break;
|
|
|
|
case NDBCOL::ArrayTypeMediumVar:
|
|
|
|
int2store(buf, sz);
|
|
|
|
memcpy(buf + 2, str, sz);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
2006-05-31 16:16:03 +02:00
|
|
|
/*
|
|
|
|
acknowledge handling of schema operation
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
ndbcluster_update_slock(THD *thd,
|
|
|
|
const char *db,
|
|
|
|
const char *table_name)
|
|
|
|
{
|
|
|
|
DBUG_ENTER("ndbcluster_update_slock");
|
2006-12-01 15:49:07 +01:00
|
|
|
if (!ndb_schema_share)
|
2006-05-31 16:16:03 +02:00
|
|
|
{
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
const NdbError *ndb_error= 0;
|
|
|
|
uint32 node_id= g_ndb_cluster_connection->node_id();
|
|
|
|
Ndb *ndb= check_ndb_in_thd(thd);
|
|
|
|
char save_db[FN_HEADLEN];
|
|
|
|
strcpy(save_db, ndb->getDatabaseName());
|
|
|
|
|
|
|
|
char tmp_buf[FN_REFLEN];
|
|
|
|
NDBDICT *dict= ndb->getDictionary();
|
|
|
|
ndb->setDatabaseName(NDB_REP_DB);
|
|
|
|
Ndb_table_guard ndbtab_g(dict, NDB_SCHEMA_TABLE);
|
|
|
|
const NDBTAB *ndbtab= ndbtab_g.get_table();
|
|
|
|
NdbTransaction *trans= 0;
|
|
|
|
int retries= 100;
|
2007-04-25 15:25:23 +02:00
|
|
|
int retry_sleep= 10; /* 10 milliseconds, transaction */
|
2006-05-31 16:16:03 +02:00
|
|
|
const NDBCOL *col[SCHEMA_SIZE];
|
|
|
|
unsigned sz[SCHEMA_SIZE];
|
|
|
|
|
|
|
|
MY_BITMAP slock;
|
|
|
|
uint32 bitbuf[SCHEMA_SLOCK_SIZE/4];
|
|
|
|
bitmap_init(&slock, bitbuf, sizeof(bitbuf)*8, false);
|
|
|
|
|
|
|
|
if (ndbtab == 0)
|
|
|
|
{
|
|
|
|
abort();
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
uint i;
|
|
|
|
for (i= 0; i < SCHEMA_SIZE; i++)
|
|
|
|
{
|
|
|
|
col[i]= ndbtab->getColumn(i);
|
|
|
|
if (i != SCHEMA_QUERY_I)
|
|
|
|
{
|
|
|
|
sz[i]= col[i]->getLength();
|
|
|
|
DBUG_ASSERT(sz[i] <= sizeof(tmp_buf));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
while (1)
|
|
|
|
{
|
|
|
|
if ((trans= ndb->startTransaction()) == 0)
|
|
|
|
goto err;
|
|
|
|
{
|
|
|
|
NdbOperation *op= 0;
|
|
|
|
int r= 0;
|
|
|
|
|
|
|
|
/* read the bitmap exlusive */
|
|
|
|
r|= (op= trans->getNdbOperation(ndbtab)) == 0;
|
|
|
|
DBUG_ASSERT(r == 0);
|
|
|
|
r|= op->readTupleExclusive();
|
|
|
|
DBUG_ASSERT(r == 0);
|
|
|
|
|
|
|
|
/* db */
|
|
|
|
ndb_pack_varchar(col[SCHEMA_DB_I], tmp_buf, db, strlen(db));
|
|
|
|
r|= op->equal(SCHEMA_DB_I, tmp_buf);
|
|
|
|
DBUG_ASSERT(r == 0);
|
|
|
|
/* name */
|
|
|
|
ndb_pack_varchar(col[SCHEMA_NAME_I], tmp_buf, table_name,
|
|
|
|
strlen(table_name));
|
|
|
|
r|= op->equal(SCHEMA_NAME_I, tmp_buf);
|
|
|
|
DBUG_ASSERT(r == 0);
|
|
|
|
/* slock */
|
|
|
|
r|= op->getValue(SCHEMA_SLOCK_I, (char*)slock.bitmap) == 0;
|
|
|
|
DBUG_ASSERT(r == 0);
|
|
|
|
}
|
|
|
|
if (trans->execute(NdbTransaction::NoCommit))
|
|
|
|
goto err;
|
|
|
|
bitmap_clear_bit(&slock, node_id);
|
|
|
|
{
|
|
|
|
NdbOperation *op= 0;
|
|
|
|
int r= 0;
|
|
|
|
|
|
|
|
/* now update the tuple */
|
|
|
|
r|= (op= trans->getNdbOperation(ndbtab)) == 0;
|
|
|
|
DBUG_ASSERT(r == 0);
|
|
|
|
r|= op->updateTuple();
|
|
|
|
DBUG_ASSERT(r == 0);
|
|
|
|
|
|
|
|
/* db */
|
|
|
|
ndb_pack_varchar(col[SCHEMA_DB_I], tmp_buf, db, strlen(db));
|
|
|
|
r|= op->equal(SCHEMA_DB_I, tmp_buf);
|
|
|
|
DBUG_ASSERT(r == 0);
|
|
|
|
/* name */
|
|
|
|
ndb_pack_varchar(col[SCHEMA_NAME_I], tmp_buf, table_name,
|
|
|
|
strlen(table_name));
|
|
|
|
r|= op->equal(SCHEMA_NAME_I, tmp_buf);
|
|
|
|
DBUG_ASSERT(r == 0);
|
|
|
|
/* slock */
|
|
|
|
r|= op->setValue(SCHEMA_SLOCK_I, (char*)slock.bitmap);
|
|
|
|
DBUG_ASSERT(r == 0);
|
|
|
|
/* node_id */
|
|
|
|
r|= op->setValue(SCHEMA_NODE_ID_I, node_id);
|
|
|
|
DBUG_ASSERT(r == 0);
|
|
|
|
/* type */
|
|
|
|
r|= op->setValue(SCHEMA_TYPE_I, (uint32)SOT_CLEAR_SLOCK);
|
|
|
|
DBUG_ASSERT(r == 0);
|
|
|
|
}
|
|
|
|
if (trans->execute(NdbTransaction::Commit) == 0)
|
|
|
|
{
|
|
|
|
dict->forceGCPWait();
|
|
|
|
DBUG_PRINT("info", ("node %d cleared lock on '%s.%s'",
|
|
|
|
node_id, db, table_name));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
err:
|
|
|
|
const NdbError *this_error= trans ?
|
|
|
|
&trans->getNdbError() : &ndb->getNdbError();
|
|
|
|
if (this_error->status == NdbError::TemporaryError)
|
|
|
|
{
|
|
|
|
if (retries--)
|
|
|
|
{
|
|
|
|
if (trans)
|
|
|
|
ndb->closeTransaction(trans);
|
2007-04-25 15:25:23 +02:00
|
|
|
my_sleep(retry_sleep);
|
2006-05-31 16:16:03 +02:00
|
|
|
continue; // retry
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ndb_error= this_error;
|
|
|
|
break;
|
|
|
|
}
|
2007-01-29 00:47:35 +01:00
|
|
|
|
2006-05-31 16:16:03 +02:00
|
|
|
if (ndb_error)
|
Fix for BUG#11755168 '46895: test "outfile_loaddata" fails (reproducible)'.
In sql_class.cc, 'row_count', of type 'ha_rows', was used as last argument for
ER_TRUNCATED_WRONG_VALUE_FOR_FIELD which is
"Incorrect %-.32s value: '%-.128s' for column '%.192s' at row %ld".
So 'ha_rows' was used as 'long'.
On SPARC32 Solaris builds, 'long' is 4 bytes and 'ha_rows' is 'longlong' i.e. 8 bytes.
So the printf-like code was reading only the first 4 bytes.
Because the CPU is big-endian, 1LL is 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x01
so the first four bytes yield 0. So the warning message had "row 0" instead of
"row 1" in test outfile_loaddata.test:
-Warning 1366 Incorrect string value: '\xE1\xE2\xF7' for column 'b' at row 1
+Warning 1366 Incorrect string value: '\xE1\xE2\xF7' for column 'b' at row 0
All error-messaging functions which internally invoke some printf-life function
are potential candidate for such mistakes.
One apparently easy way to catch such mistakes is to use
ATTRIBUTE_FORMAT (from my_attribute.h).
But this works only when call site has both:
a) the format as a string literal
b) the types of arguments.
So:
func(ER(ER_BLAH), 10);
will silently not be checked, because ER(ER_BLAH) is not known at
compile time (it is known at run-time, and depends on the chosen
language).
And
func("%s", a va_list argument);
has the same problem, as the *real* type of arguments is not
known at this site at compile time (it's known in some caller).
Moreover,
func(ER(ER_BLAH));
though possibly correct (if ER(ER_BLAH) has no '%' markers), will not
compile (gcc says "error: format not a string literal and no format
arguments").
Consequences:
1) ATTRIBUTE_FORMAT is here added only to functions which in practice
take "string literal" formats: "my_error_reporter" and "print_admin_msg".
2) it cannot be added to the other functions: my_error(),
push_warning_printf(), Table_check_intact::report_error(),
general_log_print().
To do a one-time check of functions listed in (2), the following
"static code analysis" has been done:
1) replace
my_error(ER_xxx, arguments for substitution in format)
with the equivalent
my_printf_error(ER_xxx,ER(ER_xxx), arguments for substitution in
format),
so that we have ER(ER_xxx) and the arguments *in the same call site*
2) add ATTRIBUTE_FORMAT to push_warning_printf(),
Table_check_intact::report_error(), general_log_print()
3) replace ER(xxx) with the hard-coded English text found in
errmsg.txt (like: ER(ER_UNKNOWN_ERROR) is replaced with
"Unknown error"), so that a call site has the format as string literal
4) this way, ATTRIBUTE_FORMAT can effectively do its job
5) compile, fix errors detected by ATTRIBUTE_FORMAT
6) revert steps 1-2-3.
The present patch has no compiler error when submitted again to the
static code analysis above.
It cannot catch all problems though: see Field::set_warning(), in
which a call to push_warning_printf() has a variable error
(thus, not replacable by a string literal); I checked set_warning() calls
by hand though.
See also WL 5883 for one proposal to avoid such bugs from appearing
again in the future.
The issues fixed in the patch are:
a) mismatch in types (like 'int' passed to '%ld')
b) more arguments passed than specified in the format.
This patch resolves mismatches by changing the type/number of arguments,
not by changing error messages of sql/share/errmsg.txt. The latter would be wrong,
per the following old rule: errmsg.txt must be as stable as possible; no insertions
or deletions of messages, no changes of type or number of printf-like format specifiers,
are allowed, as long as the change impacts a message already released in a GA version.
If this rule is not followed:
- Connectors, which use error message numbers, will be confused (by insertions/deletions
of messages)
- using errmsg.sys of MySQL 5.1.n with mysqld of MySQL 5.1.(n+1)
could produce wrong messages or crash; such usage can easily happen if
installing 5.1.(n+1) while /etc/my.cnf still has --language=/path/to/5.1.n/xxx;
or if copying mysqld from 5.1.(n+1) into a 5.1.n installation.
When fixing b), I have verified that the superfluous arguments were not used in the format
in the first 5.1 GA (5.1.30 'bteam@astra04-20081114162938-z8mctjp6st27uobm').
Had they been used, then passing them today, even if the message doesn't use them
anymore, would have been necessary, as explained above.
2011-05-16 22:04:01 +02:00
|
|
|
{
|
|
|
|
char buf[1024];
|
|
|
|
my_snprintf(buf, sizeof(buf), "Could not release lock on '%s.%s'",
|
|
|
|
db, table_name);
|
2009-09-10 11:18:29 +02:00
|
|
|
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
|
2006-05-31 16:16:03 +02:00
|
|
|
ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
|
Fix for BUG#11755168 '46895: test "outfile_loaddata" fails (reproducible)'.
In sql_class.cc, 'row_count', of type 'ha_rows', was used as last argument for
ER_TRUNCATED_WRONG_VALUE_FOR_FIELD which is
"Incorrect %-.32s value: '%-.128s' for column '%.192s' at row %ld".
So 'ha_rows' was used as 'long'.
On SPARC32 Solaris builds, 'long' is 4 bytes and 'ha_rows' is 'longlong' i.e. 8 bytes.
So the printf-like code was reading only the first 4 bytes.
Because the CPU is big-endian, 1LL is 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x01
so the first four bytes yield 0. So the warning message had "row 0" instead of
"row 1" in test outfile_loaddata.test:
-Warning 1366 Incorrect string value: '\xE1\xE2\xF7' for column 'b' at row 1
+Warning 1366 Incorrect string value: '\xE1\xE2\xF7' for column 'b' at row 0
All error-messaging functions which internally invoke some printf-life function
are potential candidate for such mistakes.
One apparently easy way to catch such mistakes is to use
ATTRIBUTE_FORMAT (from my_attribute.h).
But this works only when call site has both:
a) the format as a string literal
b) the types of arguments.
So:
func(ER(ER_BLAH), 10);
will silently not be checked, because ER(ER_BLAH) is not known at
compile time (it is known at run-time, and depends on the chosen
language).
And
func("%s", a va_list argument);
has the same problem, as the *real* type of arguments is not
known at this site at compile time (it's known in some caller).
Moreover,
func(ER(ER_BLAH));
though possibly correct (if ER(ER_BLAH) has no '%' markers), will not
compile (gcc says "error: format not a string literal and no format
arguments").
Consequences:
1) ATTRIBUTE_FORMAT is here added only to functions which in practice
take "string literal" formats: "my_error_reporter" and "print_admin_msg".
2) it cannot be added to the other functions: my_error(),
push_warning_printf(), Table_check_intact::report_error(),
general_log_print().
To do a one-time check of functions listed in (2), the following
"static code analysis" has been done:
1) replace
my_error(ER_xxx, arguments for substitution in format)
with the equivalent
my_printf_error(ER_xxx,ER(ER_xxx), arguments for substitution in
format),
so that we have ER(ER_xxx) and the arguments *in the same call site*
2) add ATTRIBUTE_FORMAT to push_warning_printf(),
Table_check_intact::report_error(), general_log_print()
3) replace ER(xxx) with the hard-coded English text found in
errmsg.txt (like: ER(ER_UNKNOWN_ERROR) is replaced with
"Unknown error"), so that a call site has the format as string literal
4) this way, ATTRIBUTE_FORMAT can effectively do its job
5) compile, fix errors detected by ATTRIBUTE_FORMAT
6) revert steps 1-2-3.
The present patch has no compiler error when submitted again to the
static code analysis above.
It cannot catch all problems though: see Field::set_warning(), in
which a call to push_warning_printf() has a variable error
(thus, not replacable by a string literal); I checked set_warning() calls
by hand though.
See also WL 5883 for one proposal to avoid such bugs from appearing
again in the future.
The issues fixed in the patch are:
a) mismatch in types (like 'int' passed to '%ld')
b) more arguments passed than specified in the format.
This patch resolves mismatches by changing the type/number of arguments,
not by changing error messages of sql/share/errmsg.txt. The latter would be wrong,
per the following old rule: errmsg.txt must be as stable as possible; no insertions
or deletions of messages, no changes of type or number of printf-like format specifiers,
are allowed, as long as the change impacts a message already released in a GA version.
If this rule is not followed:
- Connectors, which use error message numbers, will be confused (by insertions/deletions
of messages)
- using errmsg.sys of MySQL 5.1.n with mysqld of MySQL 5.1.(n+1)
could produce wrong messages or crash; such usage can easily happen if
installing 5.1.(n+1) while /etc/my.cnf still has --language=/path/to/5.1.n/xxx;
or if copying mysqld from 5.1.(n+1) into a 5.1.n installation.
When fixing b), I have verified that the superfluous arguments were not used in the format
in the first 5.1 GA (5.1.30 'bteam@astra04-20081114162938-z8mctjp6st27uobm').
Had they been used, then passing them today, even if the message doesn't use them
anymore, would have been necessary, as explained above.
2011-05-16 22:04:01 +02:00
|
|
|
ndb_error->code, ndb_error->message, buf);
|
|
|
|
}
|
2006-05-31 16:16:03 +02:00
|
|
|
if (trans)
|
|
|
|
ndb->closeTransaction(trans);
|
|
|
|
ndb->setDatabaseName(save_db);
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
2006-01-12 19:51:02 +01:00
|
|
|
/*
|
|
|
|
log query in schema table
|
|
|
|
*/
|
2006-04-10 16:08:40 +02:00
|
|
|
static void ndb_report_waiting(const char *key,
|
|
|
|
int the_time,
|
|
|
|
const char *op,
|
|
|
|
const char *obj)
|
|
|
|
{
|
|
|
|
ulonglong ndb_latest_epoch= 0;
|
|
|
|
const char *proc_info= "<no info>";
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_lock(&injector_mutex);
|
2006-04-10 16:08:40 +02:00
|
|
|
if (injector_ndb)
|
|
|
|
ndb_latest_epoch= injector_ndb->getLatestGCI();
|
|
|
|
if (injector_thd)
|
|
|
|
proc_info= injector_thd->proc_info;
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_unlock(&injector_mutex);
|
2006-04-10 16:08:40 +02:00
|
|
|
sql_print_information("NDB %s:"
|
|
|
|
" waiting max %u sec for %s %s."
|
|
|
|
" epochs: (%u,%u,%u)"
|
|
|
|
" injector proc_info: %s"
|
|
|
|
,key, the_time, op, obj
|
|
|
|
,(uint)ndb_latest_handled_binlog_epoch
|
|
|
|
,(uint)ndb_latest_received_binlog_epoch
|
|
|
|
,(uint)ndb_latest_epoch
|
|
|
|
,proc_info
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2006-01-12 19:51:02 +01:00
|
|
|
int ndbcluster_log_schema_op(THD *thd, NDB_SHARE *share,
|
|
|
|
const char *query, int query_length,
|
|
|
|
const char *db, const char *table_name,
|
|
|
|
uint32 ndb_table_id,
|
|
|
|
uint32 ndb_table_version,
|
2006-03-01 18:23:00 +01:00
|
|
|
enum SCHEMA_OP_TYPE type,
|
2010-08-09 20:33:47 +02:00
|
|
|
const char *new_db, const char *new_table_name)
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
|
|
|
DBUG_ENTER("ndbcluster_log_schema_op");
|
|
|
|
Thd_ndb *thd_ndb= get_thd_ndb(thd);
|
|
|
|
if (!thd_ndb)
|
|
|
|
{
|
|
|
|
if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb()))
|
|
|
|
{
|
|
|
|
sql_print_error("Could not allocate Thd_ndb object");
|
|
|
|
DBUG_RETURN(1);
|
|
|
|
}
|
|
|
|
set_thd_ndb(thd, thd_ndb);
|
|
|
|
}
|
|
|
|
|
|
|
|
DBUG_PRINT("enter",
|
|
|
|
("query: %s db: %s table_name: %s thd_ndb->options: %d",
|
|
|
|
query, db, table_name, thd_ndb->options));
|
2006-12-01 15:49:07 +01:00
|
|
|
if (!ndb_schema_share || thd_ndb->options & TNO_NO_LOG_SCHEMA_OP)
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
char tmp_buf2[FN_REFLEN];
|
2006-04-10 16:08:40 +02:00
|
|
|
const char *type_str;
|
2006-01-12 19:51:02 +01:00
|
|
|
switch (type)
|
|
|
|
{
|
|
|
|
case SOT_DROP_TABLE:
|
|
|
|
/* drop database command, do not log at drop table */
|
|
|
|
if (thd->lex->sql_command == SQLCOM_DROP_DB)
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
/* redo the drop table query as is may contain several tables */
|
|
|
|
query= tmp_buf2;
|
|
|
|
query_length= (uint) (strxmov(tmp_buf2, "drop table `",
|
2006-02-06 11:47:12 +01:00
|
|
|
table_name, "`", NullS) - tmp_buf2);
|
2006-04-10 16:08:40 +02:00
|
|
|
type_str= "drop table";
|
2006-03-01 18:23:00 +01:00
|
|
|
break;
|
2006-01-12 19:51:02 +01:00
|
|
|
case SOT_RENAME_TABLE:
|
2006-03-01 18:23:00 +01:00
|
|
|
/* redo the rename table query as is may contain several tables */
|
|
|
|
query= tmp_buf2;
|
|
|
|
query_length= (uint) (strxmov(tmp_buf2, "rename table `",
|
2006-05-04 13:58:17 +02:00
|
|
|
db, ".", table_name, "` to `",
|
|
|
|
new_db, ".", new_table_name, "`", NullS) - tmp_buf2);
|
2006-04-10 16:08:40 +02:00
|
|
|
type_str= "rename table";
|
2006-03-01 18:23:00 +01:00
|
|
|
break;
|
|
|
|
case SOT_CREATE_TABLE:
|
2006-04-10 16:08:40 +02:00
|
|
|
type_str= "create table";
|
|
|
|
break;
|
2006-01-12 19:51:02 +01:00
|
|
|
case SOT_ALTER_TABLE:
|
2006-07-08 03:26:13 +02:00
|
|
|
type_str= "alter table";
|
2006-01-12 19:51:02 +01:00
|
|
|
break;
|
|
|
|
case SOT_DROP_DB:
|
2006-04-10 16:08:40 +02:00
|
|
|
type_str= "drop db";
|
2006-01-12 19:51:02 +01:00
|
|
|
break;
|
|
|
|
case SOT_CREATE_DB:
|
2006-04-10 16:08:40 +02:00
|
|
|
type_str= "create db";
|
2006-01-12 19:51:02 +01:00
|
|
|
break;
|
|
|
|
case SOT_ALTER_DB:
|
2006-04-10 16:08:40 +02:00
|
|
|
type_str= "alter db";
|
2006-01-12 19:51:02 +01:00
|
|
|
break;
|
2006-02-01 01:12:11 +01:00
|
|
|
case SOT_TABLESPACE:
|
2006-04-10 16:08:40 +02:00
|
|
|
type_str= "tablespace";
|
2006-02-01 01:12:11 +01:00
|
|
|
break;
|
|
|
|
case SOT_LOGFILE_GROUP:
|
2006-04-10 16:08:40 +02:00
|
|
|
type_str= "logfile group";
|
2006-02-01 01:12:11 +01:00
|
|
|
break;
|
2006-06-12 14:23:21 +02:00
|
|
|
case SOT_TRUNCATE_TABLE:
|
|
|
|
type_str= "truncate table";
|
|
|
|
break;
|
2006-01-12 19:51:02 +01:00
|
|
|
default:
|
|
|
|
abort(); /* should not happen, programming error */
|
|
|
|
}
|
|
|
|
|
2006-04-03 19:11:20 +02:00
|
|
|
NDB_SCHEMA_OBJECT *ndb_schema_object;
|
2006-02-06 11:47:12 +01:00
|
|
|
{
|
2009-06-19 10:24:43 +02:00
|
|
|
char key[FN_REFLEN + 1];
|
|
|
|
build_table_filename(key, sizeof(key) - 1, db, table_name, "", 0);
|
2006-04-03 19:11:20 +02:00
|
|
|
ndb_schema_object= ndb_get_schema_object(key, TRUE, FALSE);
|
2006-02-06 11:47:12 +01:00
|
|
|
}
|
|
|
|
|
2006-01-12 19:51:02 +01:00
|
|
|
const NdbError *ndb_error= 0;
|
|
|
|
uint32 node_id= g_ndb_cluster_connection->node_id();
|
|
|
|
Uint64 epoch= 0;
|
|
|
|
MY_BITMAP schema_subscribers;
|
2006-04-03 19:11:20 +02:00
|
|
|
uint32 bitbuf[sizeof(ndb_schema_object->slock)/4];
|
2006-05-31 16:16:03 +02:00
|
|
|
char bitbuf_e[sizeof(bitbuf)];
|
|
|
|
bzero(bitbuf_e, sizeof(bitbuf_e));
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
2006-04-19 14:54:39 +02:00
|
|
|
int i, updated= 0;
|
|
|
|
int no_storage_nodes= g_ndb_cluster_connection->no_db_nodes();
|
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes
Changes that requires code changes in other code of other storage engines.
(Note that all changes are very straightforward and one should find all issues
by compiling a --debug build and fixing all compiler errors and all
asserts in field.cc while running the test suite),
- New optional handler function introduced: reset()
This is called after every DML statement to make it easy for a handler to
statement specific cleanups.
(The only case it's not called is if force the file to be closed)
- handler::extra(HA_EXTRA_RESET) is removed. Code that was there before
should be moved to handler::reset()
- table->read_set contains a bitmap over all columns that are needed
in the query. read_row() and similar functions only needs to read these
columns
- table->write_set contains a bitmap over all columns that will be updated
in the query. write_row() and update_row() only needs to update these
columns.
The above bitmaps should now be up to date in all context
(including ALTER TABLE, filesort()).
The handler is informed of any changes to the bitmap after
fix_fields() by calling the virtual function
handler::column_bitmaps_signal(). If the handler does caching of
these bitmaps (instead of using table->read_set, table->write_set),
it should redo the caching in this code. as the signal() may be sent
several times, it's probably best to set a variable in the signal
and redo the caching on read_row() / write_row() if the variable was
set.
- Removed the read_set and write_set bitmap objects from the handler class
- Removed all column bit handling functions from the handler class.
(Now one instead uses the normal bitmap functions in my_bitmap.c instead
of handler dedicated bitmap functions)
- field->query_id is removed. One should instead instead check
table->read_set and table->write_set if a field is used in the query.
- handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and
handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now
instead use table->read_set to check for which columns to retrieve.
- If a handler needs to call Field->val() or Field->store() on columns
that are not used in the query, one should install a temporary
all-columns-used map while doing so. For this, we provide the following
functions:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
field->val();
dbug_tmp_restore_column_map(table->read_set, old_map);
and similar for the write map:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
field->val();
dbug_tmp_restore_column_map(table->write_set, old_map);
If this is not done, you will sooner or later hit a DBUG_ASSERT
in the field store() / val() functions.
(For not DBUG binaries, the dbug_tmp_restore_column_map() and
dbug_tmp_restore_column_map() are inline dummy functions and should
be optimized away be the compiler).
- If one needs to temporary set the column map for all binaries (and not
just to avoid the DBUG_ASSERT() in the Field::store() / Field::val()
methods) one should use the functions tmp_use_all_columns() and
tmp_restore_column_map() instead of the above dbug_ variants.
- All 'status' fields in the handler base class (like records,
data_file_length etc) are now stored in a 'stats' struct. This makes
it easier to know what status variables are provided by the base
handler. This requires some trivial variable names in the extra()
function.
- New virtual function handler::records(). This is called to optimize
COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true.
(stats.records is not supposed to be an exact value. It's only has to
be 'reasonable enough' for the optimizer to be able to choose a good
optimization path).
- Non virtual handler::init() function added for caching of virtual
constants from engine.
- Removed has_transactions() virtual method. Now one should instead return
HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support
transactions.
- The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument
that is to be used with 'new handler_name()' to allocate the handler
in the right area. The xxxx_create_handler() function is also
responsible for any initialization of the object before returning.
For example, one should change:
static handler *myisam_create_handler(TABLE_SHARE *table)
{
return new ha_myisam(table);
}
->
static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root)
{
return new (mem_root) ha_myisam(table);
}
- New optional virtual function: use_hidden_primary_key().
This is called in case of an update/delete when
(table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined
but we don't have a primary key. This allows the handler to take precisions
in remembering any hidden primary key to able to update/delete any
found row. The default handler marks all columns to be read.
- handler::table_flags() now returns a ulonglong (to allow for more flags).
- New/changed table_flags()
- HA_HAS_RECORDS Set if ::records() is supported
- HA_NO_TRANSACTIONS Set if engine doesn't support transactions
- HA_PRIMARY_KEY_REQUIRED_FOR_DELETE
Set if we should mark all primary key columns for
read when reading rows as part of a DELETE
statement. If there is no primary key,
all columns are marked for read.
- HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some
cases (based on table->read_set)
- HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS
Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION.
- HA_DUPP_POS Renamed to HA_DUPLICATE_POS
- HA_REQUIRES_KEY_COLUMNS_FOR_DELETE
Set this if we should mark ALL key columns for
read when when reading rows as part of a DELETE
statement. In case of an update we will mark
all keys for read for which key part changed
value.
- HA_STATS_RECORDS_IS_EXACT
Set this if stats.records is exact.
(This saves us some extra records() calls
when optimizing COUNT(*))
- Removed table_flags()
- HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if
handler::records() gives an exact count() and
HA_STATS_RECORDS_IS_EXACT if stats.records is exact.
- HA_READ_RND_SAME Removed (no one supported this one)
- Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk()
- Renamed handler::dupp_pos to handler::dup_pos
- Removed not used variable handler::sortkey
Upper level handler changes:
- ha_reset() now does some overall checks and calls ::reset()
- ha_table_flags() added. This is a cached version of table_flags(). The
cache is updated on engine creation time and updated on open.
MySQL level changes (not obvious from the above):
- DBUG_ASSERT() added to check that column usage matches what is set
in the column usage bit maps. (This found a LOT of bugs in current
column marking code).
- In 5.1 before, all used columns was marked in read_set and only updated
columns was marked in write_set. Now we only mark columns for which we
need a value in read_set.
- Column bitmaps are created in open_binary_frm() and open_table_from_share().
(Before this was in table.cc)
- handler::table_flags() calls are replaced with handler::ha_table_flags()
- For calling field->val() you must have the corresponding bit set in
table->read_set. For calling field->store() you must have the
corresponding bit set in table->write_set. (There are asserts in
all store()/val() functions to catch wrong usage)
- thd->set_query_id is renamed to thd->mark_used_columns and instead
of setting this to an integer value, this has now the values:
MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE
Changed also all variables named 'set_query_id' to mark_used_columns.
- In filesort() we now inform the handler of exactly which columns are needed
doing the sort and choosing the rows.
- The TABLE_SHARE object has a 'all_set' column bitmap one can use
when one needs a column bitmap with all columns set.
(This is used for table->use_all_columns() and other places)
- The TABLE object has 3 column bitmaps:
- def_read_set Default bitmap for columns to be read
- def_write_set Default bitmap for columns to be written
- tmp_set Can be used as a temporary bitmap when needed.
The table object has also two pointer to bitmaps read_set and write_set
that the handler should use to find out which columns are used in which way.
- count() optimization now calls handler::records() instead of using
handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true).
- Added extra argument to Item::walk() to indicate if we should also
traverse sub queries.
- Added TABLE parameter to cp_buffer_from_ref()
- Don't close tables created with CREATE ... SELECT but keep them in
the table cache. (Faster usage of newly created tables).
New interfaces:
- table->clear_column_bitmaps() to initialize the bitmaps for tables
at start of new statements.
- table->column_bitmaps_set() to set up new column bitmaps and signal
the handler about this.
- table->column_bitmaps_set_no_signal() for some few cases where we need
to setup new column bitmaps but don't signal the handler (as the handler
has already been signaled about these before). Used for the momement
only in opt_range.cc when doing ROR scans.
- table->use_all_columns() to install a bitmap where all columns are marked
as use in the read and the write set.
- table->default_column_bitmaps() to install the normal read and write
column bitmaps, but not signaling the handler about this.
This is mainly used when creating TABLE instances.
- table->mark_columns_needed_for_delete(),
table->mark_columns_needed_for_delete() and
table->mark_columns_needed_for_insert() to allow us to put additional
columns in column usage maps if handler so requires.
(The handler indicates what it neads in handler->table_flags())
- table->prepare_for_position() to allow us to tell handler that it
needs to read primary key parts to be able to store them in
future table->position() calls.
(This replaces the table->file->ha_retrieve_all_pk function)
- table->mark_auto_increment_column() to tell handler are going to update
columns part of any auto_increment key.
- table->mark_columns_used_by_index() to mark all columns that is part of
an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow
it to quickly know that it only needs to read colums that are part
of the key. (The handler can also use the column map for detecting this,
but simpler/faster handler can just monitor the extra() call).
- table->mark_columns_used_by_index_no_reset() to in addition to other columns,
also mark all columns that is used by the given key.
- table->restore_column_maps_after_mark_index() to restore to default
column maps after a call to table->mark_columns_used_by_index().
- New item function register_field_in_read_map(), for marking used columns
in table->read_map. Used by filesort() to mark all used columns
- Maintain in TABLE->merge_keys set of all keys that are used in query.
(Simplices some optimization loops)
- Maintain Field->part_of_key_not_clustered which is like Field->part_of_key
but the field in the clustered key is not assumed to be part of all index.
(used in opt_range.cc for faster loops)
- dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map()
tmp_use_all_columns() and tmp_restore_column_map() functions to temporally
mark all columns as usable. The 'dbug_' version is primarily intended
inside a handler when it wants to just call Field:store() & Field::val()
functions, but don't need the column maps set for any other usage.
(ie:: bitmap_is_set() is never called)
- We can't use compare_records() to skip updates for handlers that returns
a partial column set and the read_set doesn't cover all columns in the
write set. The reason for this is that if we have a column marked only for
write we can't in the MySQL level know if the value changed or not.
The reason this worked before was that MySQL marked all to be written
columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden
bug'.
- open_table_from_share() does not anymore setup temporary MEM_ROOT
object as a thread specific variable for the handler. Instead we
send the to-be-used MEMROOT to get_new_handler().
(Simpler, faster code)
Bugs fixed:
- Column marking was not done correctly in a lot of cases.
(ALTER TABLE, when using triggers, auto_increment fields etc)
(Could potentially result in wrong values inserted in table handlers
relying on that the old column maps or field->set_query_id was correct)
Especially when it comes to triggers, there may be cases where the
old code would cause lost/wrong values for NDB and/or InnoDB tables.
- Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags:
OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG.
This allowed me to remove some wrong warnings about:
"Some non-transactional changed tables couldn't be rolled back"
- Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset
(thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose
some warnings about
"Some non-transactional changed tables couldn't be rolled back")
- Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table()
which could cause delete_table to report random failures.
- Fixed core dumps for some tests when running with --debug
- Added missing FN_LIBCHAR in mysql_rm_tmp_tables()
(This has probably caused us to not properly remove temporary files after
crash)
- slow_logs was not properly initialized, which could maybe cause
extra/lost entries in slow log.
- If we get an duplicate row on insert, change column map to read and
write all columns while retrying the operation. This is required by
the definition of REPLACE and also ensures that fields that are only
part of UPDATE are properly handled. This fixed a bug in NDB and
REPLACE where REPLACE wrongly copied some column values from the replaced
row.
- For table handler that doesn't support NULL in keys, we would give an error
when creating a primary key with NULL fields, even after the fields has been
automaticly converted to NOT NULL.
- Creating a primary key on a SPATIAL key, would fail if field was not
declared as NOT NULL.
Cleanups:
- Removed not used condition argument to setup_tables
- Removed not needed item function reset_query_id_processor().
- Field->add_index is removed. Now this is instead maintained in
(field->flags & FIELD_IN_ADD_INDEX)
- Field->fieldnr is removed (use field->field_index instead)
- New argument to filesort() to indicate that it should return a set of
row pointers (not used columns). This allowed me to remove some references
to sql_command in filesort and should also enable us to return column
results in some cases where we couldn't before.
- Changed column bitmap handling in opt_range.cc to be aligned with TABLE
bitmap, which allowed me to use bitmap functions instead of looping over
all fields to create some needed bitmaps. (Faster and smaller code)
- Broke up found too long lines
- Moved some variable declaration at start of function for better code
readability.
- Removed some not used arguments from functions.
(setup_fields(), mysql_prepare_insert_check_table())
- setup_fields() now takes an enum instead of an int for marking columns
usage.
- For internal temporary tables, use handler::write_row(),
handler::delete_row() and handler::update_row() instead of
handler::ha_xxxx() for faster execution.
- Changed some constants to enum's and define's.
- Using separate column read and write sets allows for easier checking
of timestamp field was set by statement.
- Remove calls to free_io_cache() as this is now done automaticly in ha_reset()
- Don't build table->normalized_path as this is now identical to table->path
(after bar's fixes to convert filenames)
- Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to
do comparision with the 'convert-dbug-for-diff' tool.
Things left to do in 5.1:
- We wrongly log failed CREATE TABLE ... SELECT in some cases when using
row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result)
Mats has promised to look into this.
- Test that my fix for CREATE TABLE ... SELECT is indeed correct.
(I added several test cases for this, but in this case it's better that
someone else also tests this throughly).
Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
|
|
|
bitmap_init(&schema_subscribers, bitbuf, sizeof(bitbuf)*8, FALSE);
|
2006-01-12 19:51:02 +01:00
|
|
|
bitmap_set_all(&schema_subscribers);
|
2007-02-05 06:04:36 +01:00
|
|
|
|
|
|
|
/* begin protect ndb_schema_share */
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_lock(&ndb_schema_share_mutex);
|
2007-02-05 06:04:36 +01:00
|
|
|
if (ndb_schema_share == 0)
|
|
|
|
{
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_unlock(&ndb_schema_share_mutex);
|
2007-02-05 06:04:36 +01:00
|
|
|
if (ndb_schema_object)
|
|
|
|
ndb_free_schema_object(&ndb_schema_object, FALSE);
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_lock(&ndb_schema_share->mutex);
|
2006-04-19 14:54:39 +02:00
|
|
|
for (i= 0; i < no_storage_nodes; i++)
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
2006-12-01 15:49:07 +01:00
|
|
|
MY_BITMAP *table_subscribers= &ndb_schema_share->subscriber_bitmap[i];
|
2006-01-12 19:51:02 +01:00
|
|
|
if (!bitmap_is_clear_all(table_subscribers))
|
2006-04-19 14:54:39 +02:00
|
|
|
{
|
2006-01-12 19:51:02 +01:00
|
|
|
bitmap_intersect(&schema_subscribers,
|
|
|
|
table_subscribers);
|
2006-04-19 14:54:39 +02:00
|
|
|
updated= 1;
|
|
|
|
}
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_unlock(&ndb_schema_share->mutex);
|
|
|
|
mysql_mutex_unlock(&ndb_schema_share_mutex);
|
2007-02-05 06:04:36 +01:00
|
|
|
/* end protect ndb_schema_share */
|
|
|
|
|
2006-04-19 14:54:39 +02:00
|
|
|
if (updated)
|
2006-05-31 16:16:03 +02:00
|
|
|
{
|
2006-04-19 14:54:39 +02:00
|
|
|
bitmap_clear_bit(&schema_subscribers, node_id);
|
2006-05-31 16:16:03 +02:00
|
|
|
/*
|
|
|
|
if setting own acknowledge bit it is important that
|
|
|
|
no other mysqld's are registred, as subsequent code
|
|
|
|
will cause the original event to be hidden (by blob
|
|
|
|
merge event code)
|
|
|
|
*/
|
|
|
|
if (bitmap_is_clear_all(&schema_subscribers))
|
|
|
|
bitmap_set_bit(&schema_subscribers, node_id);
|
|
|
|
}
|
2006-04-19 14:54:39 +02:00
|
|
|
else
|
|
|
|
bitmap_clear_all(&schema_subscribers);
|
|
|
|
|
2006-04-03 19:11:20 +02:00
|
|
|
if (ndb_schema_object)
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_lock(&ndb_schema_object->mutex);
|
2006-04-03 19:11:20 +02:00
|
|
|
memcpy(ndb_schema_object->slock, schema_subscribers.bitmap,
|
|
|
|
sizeof(ndb_schema_object->slock));
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_unlock(&ndb_schema_object->mutex);
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
|
|
|
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
DBUG_DUMP("schema_subscribers", (uchar*)schema_subscribers.bitmap,
|
2006-01-12 19:51:02 +01:00
|
|
|
no_bytes_in_map(&schema_subscribers));
|
|
|
|
DBUG_PRINT("info", ("bitmap_is_clear_all(&schema_subscribers): %d",
|
|
|
|
bitmap_is_clear_all(&schema_subscribers)));
|
|
|
|
}
|
|
|
|
|
|
|
|
Ndb *ndb= thd_ndb->ndb;
|
2006-03-01 18:23:00 +01:00
|
|
|
char save_db[FN_REFLEN];
|
|
|
|
strcpy(save_db, ndb->getDatabaseName());
|
2006-01-12 19:51:02 +01:00
|
|
|
|
2006-03-23 04:59:14 +01:00
|
|
|
char tmp_buf[FN_REFLEN];
|
2006-01-12 19:51:02 +01:00
|
|
|
NDBDICT *dict= ndb->getDictionary();
|
|
|
|
ndb->setDatabaseName(NDB_REP_DB);
|
2006-05-04 13:58:17 +02:00
|
|
|
Ndb_table_guard ndbtab_g(dict, NDB_SCHEMA_TABLE);
|
|
|
|
const NDBTAB *ndbtab= ndbtab_g.get_table();
|
2006-01-12 19:51:02 +01:00
|
|
|
NdbTransaction *trans= 0;
|
|
|
|
int retries= 100;
|
2007-04-25 15:25:23 +02:00
|
|
|
int retry_sleep= 10; /* 10 milliseconds, transaction */
|
2006-01-12 19:51:02 +01:00
|
|
|
const NDBCOL *col[SCHEMA_SIZE];
|
|
|
|
unsigned sz[SCHEMA_SIZE];
|
|
|
|
|
|
|
|
if (ndbtab == 0)
|
|
|
|
{
|
|
|
|
if (strcmp(NDB_REP_DB, db) != 0 ||
|
|
|
|
strcmp(NDB_SCHEMA_TABLE, table_name))
|
|
|
|
{
|
|
|
|
ndb_error= &dict->getNdbError();
|
|
|
|
}
|
2006-02-06 11:47:12 +01:00
|
|
|
goto end;
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
uint i;
|
|
|
|
for (i= 0; i < SCHEMA_SIZE; i++)
|
|
|
|
{
|
|
|
|
col[i]= ndbtab->getColumn(i);
|
2006-03-23 04:59:14 +01:00
|
|
|
if (i != SCHEMA_QUERY_I)
|
|
|
|
{
|
|
|
|
sz[i]= col[i]->getLength();
|
|
|
|
DBUG_ASSERT(sz[i] <= sizeof(tmp_buf));
|
|
|
|
}
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
while (1)
|
|
|
|
{
|
2006-05-04 13:58:17 +02:00
|
|
|
const char *log_db= db;
|
|
|
|
const char *log_tab= table_name;
|
|
|
|
const char *log_subscribers= (char*)schema_subscribers.bitmap;
|
|
|
|
uint32 log_type= (uint32)type;
|
2006-01-12 19:51:02 +01:00
|
|
|
if ((trans= ndb->startTransaction()) == 0)
|
|
|
|
goto err;
|
2006-05-04 13:58:17 +02:00
|
|
|
while (1)
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
|
|
|
NdbOperation *op= 0;
|
|
|
|
int r= 0;
|
|
|
|
r|= (op= trans->getNdbOperation(ndbtab)) == 0;
|
|
|
|
DBUG_ASSERT(r == 0);
|
|
|
|
r|= op->writeTuple();
|
|
|
|
DBUG_ASSERT(r == 0);
|
|
|
|
|
|
|
|
/* db */
|
2006-05-04 13:58:17 +02:00
|
|
|
ndb_pack_varchar(col[SCHEMA_DB_I], tmp_buf, log_db, strlen(log_db));
|
2006-01-12 19:51:02 +01:00
|
|
|
r|= op->equal(SCHEMA_DB_I, tmp_buf);
|
|
|
|
DBUG_ASSERT(r == 0);
|
|
|
|
/* name */
|
2006-05-04 13:58:17 +02:00
|
|
|
ndb_pack_varchar(col[SCHEMA_NAME_I], tmp_buf, log_tab,
|
|
|
|
strlen(log_tab));
|
2006-01-12 19:51:02 +01:00
|
|
|
r|= op->equal(SCHEMA_NAME_I, tmp_buf);
|
|
|
|
DBUG_ASSERT(r == 0);
|
|
|
|
/* slock */
|
|
|
|
DBUG_ASSERT(sz[SCHEMA_SLOCK_I] == sizeof(bitbuf));
|
2006-05-04 13:58:17 +02:00
|
|
|
r|= op->setValue(SCHEMA_SLOCK_I, log_subscribers);
|
2006-01-12 19:51:02 +01:00
|
|
|
DBUG_ASSERT(r == 0);
|
|
|
|
/* query */
|
2006-03-23 04:59:14 +01:00
|
|
|
{
|
|
|
|
NdbBlob *ndb_blob= op->getBlobHandle(SCHEMA_QUERY_I);
|
|
|
|
DBUG_ASSERT(ndb_blob != 0);
|
|
|
|
uint blob_len= query_length;
|
|
|
|
const char* blob_ptr= query;
|
|
|
|
r|= ndb_blob->setValue(blob_ptr, blob_len);
|
|
|
|
DBUG_ASSERT(r == 0);
|
|
|
|
}
|
2006-01-12 19:51:02 +01:00
|
|
|
/* node_id */
|
|
|
|
r|= op->setValue(SCHEMA_NODE_ID_I, node_id);
|
|
|
|
DBUG_ASSERT(r == 0);
|
|
|
|
/* epoch */
|
|
|
|
r|= op->setValue(SCHEMA_EPOCH_I, epoch);
|
|
|
|
DBUG_ASSERT(r == 0);
|
|
|
|
/* id */
|
|
|
|
r|= op->setValue(SCHEMA_ID_I, ndb_table_id);
|
|
|
|
DBUG_ASSERT(r == 0);
|
|
|
|
/* version */
|
|
|
|
r|= op->setValue(SCHEMA_VERSION_I, ndb_table_version);
|
|
|
|
DBUG_ASSERT(r == 0);
|
|
|
|
/* type */
|
2006-05-04 13:58:17 +02:00
|
|
|
r|= op->setValue(SCHEMA_TYPE_I, log_type);
|
2006-01-12 19:51:02 +01:00
|
|
|
DBUG_ASSERT(r == 0);
|
2007-04-18 16:02:20 +02:00
|
|
|
/* any value */
|
2009-12-22 10:35:56 +01:00
|
|
|
if (!(thd->variables.option_bits & OPTION_BIN_LOG))
|
2007-04-18 16:02:20 +02:00
|
|
|
r|= op->setAnyValue(NDB_ANYVALUE_FOR_NOLOGGING);
|
|
|
|
else
|
|
|
|
r|= op->setAnyValue(thd->server_id);
|
|
|
|
DBUG_ASSERT(r == 0);
|
2006-05-04 13:58:17 +02:00
|
|
|
if (log_db != new_db && new_db && new_table_name)
|
|
|
|
{
|
|
|
|
log_db= new_db;
|
|
|
|
log_tab= new_table_name;
|
2006-05-31 16:16:03 +02:00
|
|
|
log_subscribers= bitbuf_e; // no ack expected on this
|
2006-05-04 13:58:17 +02:00
|
|
|
log_type= (uint32)SOT_RENAME_TABLE_NEW;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
break;
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
|
|
|
if (trans->execute(NdbTransaction::Commit) == 0)
|
|
|
|
{
|
|
|
|
DBUG_PRINT("info", ("logged: %s", query));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
err:
|
2006-02-01 23:55:04 +01:00
|
|
|
const NdbError *this_error= trans ?
|
|
|
|
&trans->getNdbError() : &ndb->getNdbError();
|
|
|
|
if (this_error->status == NdbError::TemporaryError)
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
|
|
|
if (retries--)
|
|
|
|
{
|
2006-02-01 23:55:04 +01:00
|
|
|
if (trans)
|
|
|
|
ndb->closeTransaction(trans);
|
2007-04-25 15:25:23 +02:00
|
|
|
my_sleep(retry_sleep);
|
2006-01-12 19:51:02 +01:00
|
|
|
continue; // retry
|
|
|
|
}
|
|
|
|
}
|
2006-02-01 23:55:04 +01:00
|
|
|
ndb_error= this_error;
|
2006-01-12 19:51:02 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
end:
|
|
|
|
if (ndb_error)
|
2009-09-10 11:18:29 +02:00
|
|
|
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
|
2006-01-12 19:51:02 +01:00
|
|
|
ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
|
|
|
|
ndb_error->code,
|
|
|
|
ndb_error->message,
|
|
|
|
"Could not log query '%s' on other mysqld's");
|
|
|
|
|
|
|
|
if (trans)
|
|
|
|
ndb->closeTransaction(trans);
|
2006-03-01 18:23:00 +01:00
|
|
|
ndb->setDatabaseName(save_db);
|
2006-01-12 19:51:02 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
Wait for other mysqld's to acknowledge the table operation
|
|
|
|
*/
|
|
|
|
if (ndb_error == 0 &&
|
|
|
|
!bitmap_is_clear_all(&schema_subscribers))
|
|
|
|
{
|
2006-05-31 16:16:03 +02:00
|
|
|
/*
|
|
|
|
if own nodeid is set we are a single mysqld registred
|
|
|
|
as an optimization we update the slock directly
|
|
|
|
*/
|
|
|
|
if (bitmap_is_set(&schema_subscribers, node_id))
|
|
|
|
ndbcluster_update_slock(thd, db, table_name);
|
|
|
|
else
|
|
|
|
dict->forceGCPWait();
|
|
|
|
|
2009-12-22 10:35:56 +01:00
|
|
|
int max_timeout= DEFAULT_SYNC_TIMEOUT;
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_lock(&ndb_schema_object->mutex);
|
2006-01-12 19:51:02 +01:00
|
|
|
while (1)
|
|
|
|
{
|
|
|
|
struct timespec abstime;
|
|
|
|
int i;
|
2006-04-19 14:54:39 +02:00
|
|
|
int no_storage_nodes= g_ndb_cluster_connection->no_db_nodes();
|
2006-01-12 19:51:02 +01:00
|
|
|
set_timespec(abstime, 1);
|
2010-01-07 06:42:07 +01:00
|
|
|
int ret= mysql_cond_timedwait(&injector_cond,
|
|
|
|
&ndb_schema_object->mutex,
|
|
|
|
&abstime);
|
2006-05-31 16:16:03 +02:00
|
|
|
if (thd->killed)
|
|
|
|
break;
|
2007-02-05 06:04:36 +01:00
|
|
|
|
|
|
|
/* begin protect ndb_schema_share */
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_lock(&ndb_schema_share_mutex);
|
2007-02-05 06:04:36 +01:00
|
|
|
if (ndb_schema_share == 0)
|
|
|
|
{
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_unlock(&ndb_schema_share_mutex);
|
2007-02-05 06:04:36 +01:00
|
|
|
break;
|
|
|
|
}
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_lock(&ndb_schema_share->mutex);
|
2006-04-19 14:54:39 +02:00
|
|
|
for (i= 0; i < no_storage_nodes; i++)
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
|
|
|
/* remove any unsubscribed from schema_subscribers */
|
2006-12-01 15:49:07 +01:00
|
|
|
MY_BITMAP *tmp= &ndb_schema_share->subscriber_bitmap[i];
|
2006-01-12 19:51:02 +01:00
|
|
|
if (!bitmap_is_clear_all(tmp))
|
|
|
|
bitmap_intersect(&schema_subscribers, tmp);
|
|
|
|
}
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_unlock(&ndb_schema_share->mutex);
|
|
|
|
mysql_mutex_unlock(&ndb_schema_share_mutex);
|
2007-02-05 06:04:36 +01:00
|
|
|
/* end protect ndb_schema_share */
|
2006-01-12 19:51:02 +01:00
|
|
|
|
2006-04-03 19:11:20 +02:00
|
|
|
/* remove any unsubscribed from ndb_schema_object->slock */
|
|
|
|
bitmap_intersect(&ndb_schema_object->slock_bitmap, &schema_subscribers);
|
2006-01-12 19:51:02 +01:00
|
|
|
|
2006-04-03 19:11:20 +02:00
|
|
|
DBUG_DUMP("ndb_schema_object->slock_bitmap.bitmap",
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
(uchar*)ndb_schema_object->slock_bitmap.bitmap,
|
2006-04-03 19:11:20 +02:00
|
|
|
no_bytes_in_map(&ndb_schema_object->slock_bitmap));
|
2006-01-12 19:51:02 +01:00
|
|
|
|
2006-04-03 19:11:20 +02:00
|
|
|
if (bitmap_is_clear_all(&ndb_schema_object->slock_bitmap))
|
2006-01-12 19:51:02 +01:00
|
|
|
break;
|
|
|
|
|
2006-04-12 18:01:19 +02:00
|
|
|
if (ret)
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
2006-04-12 18:01:19 +02:00
|
|
|
max_timeout--;
|
|
|
|
if (max_timeout == 0)
|
|
|
|
{
|
2006-04-13 09:37:43 +02:00
|
|
|
sql_print_error("NDB %s: distributing %s timed out. Ignoring...",
|
2006-04-12 18:01:19 +02:00
|
|
|
type_str, ndb_schema_object->key);
|
|
|
|
break;
|
|
|
|
}
|
2009-12-22 10:35:56 +01:00
|
|
|
if (opt_ndb_extra_logging)
|
2006-04-12 18:01:19 +02:00
|
|
|
ndb_report_waiting(type_str, max_timeout,
|
|
|
|
"distributing", ndb_schema_object->key);
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
|
|
|
}
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_unlock(&ndb_schema_object->mutex);
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
2006-02-06 11:47:12 +01:00
|
|
|
|
2006-04-03 19:11:20 +02:00
|
|
|
if (ndb_schema_object)
|
|
|
|
ndb_free_schema_object(&ndb_schema_object, FALSE);
|
2006-02-06 11:47:12 +01:00
|
|
|
|
2006-01-12 19:51:02 +01:00
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
Handle _non_ data events from the storage nodes
|
|
|
|
*/
|
2006-02-17 09:44:12 +01:00
|
|
|
int
|
2006-01-12 19:51:02 +01:00
|
|
|
ndb_handle_schema_change(THD *thd, Ndb *ndb, NdbEventOperation *pOp,
|
|
|
|
NDB_SHARE *share)
|
|
|
|
{
|
2006-02-13 11:23:13 +01:00
|
|
|
DBUG_ENTER("ndb_handle_schema_change");
|
2006-04-06 10:47:48 +02:00
|
|
|
TABLE* table= share->table;
|
2006-05-08 18:09:01 +02:00
|
|
|
TABLE_SHARE *table_share= share->table_share;
|
2006-04-06 10:47:48 +02:00
|
|
|
const char *dbname= table_share->db.str;
|
2006-04-18 08:57:37 +02:00
|
|
|
const char *tabname= table_share->table_name.str;
|
2006-03-09 01:04:13 +01:00
|
|
|
bool do_close_cached_tables= FALSE;
|
|
|
|
bool is_online_alter_table= FALSE;
|
|
|
|
bool is_rename_table= FALSE;
|
|
|
|
bool is_remote_change=
|
|
|
|
(uint) pOp->getReqNodeId() != g_ndb_cluster_connection->node_id();
|
2006-01-12 19:51:02 +01:00
|
|
|
|
2006-03-09 01:04:13 +01:00
|
|
|
if (pOp->getEventType() == NDBEVENT::TE_ALTER)
|
|
|
|
{
|
|
|
|
if (pOp->tableFrmChanged())
|
|
|
|
{
|
2006-05-04 13:58:17 +02:00
|
|
|
DBUG_PRINT("info", ("NDBEVENT::TE_ALTER: table frm changed"));
|
2006-03-09 01:04:13 +01:00
|
|
|
is_online_alter_table= TRUE;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2006-05-04 13:58:17 +02:00
|
|
|
DBUG_PRINT("info", ("NDBEVENT::TE_ALTER: name changed"));
|
2006-03-09 01:04:13 +01:00
|
|
|
DBUG_ASSERT(pOp->tableNameChanged());
|
|
|
|
is_rename_table= TRUE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-05-04 13:58:17 +02:00
|
|
|
{
|
|
|
|
ndb->setDatabaseName(dbname);
|
|
|
|
Ndb_table_guard ndbtab_g(ndb->getDictionary(), tabname);
|
|
|
|
const NDBTAB *ev_tab= pOp->getTable();
|
|
|
|
const NDBTAB *cache_tab= ndbtab_g.get_table();
|
|
|
|
if (cache_tab &&
|
|
|
|
cache_tab->getObjectId() == ev_tab->getObjectId() &&
|
|
|
|
cache_tab->getObjectVersion() <= ev_tab->getObjectVersion())
|
|
|
|
ndbtab_g.invalidate();
|
|
|
|
}
|
|
|
|
|
2006-04-06 10:47:48 +02:00
|
|
|
/*
|
|
|
|
Refresh local frm file and dictionary cache if
|
|
|
|
remote on-line alter table
|
|
|
|
*/
|
|
|
|
if (is_remote_change && is_online_alter_table)
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
2006-04-06 10:47:48 +02:00
|
|
|
const char *tabname= table_share->table_name.str;
|
2009-06-19 10:24:43 +02:00
|
|
|
char key[FN_REFLEN + 1];
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
uchar *data= 0, *pack_data= 0;
|
|
|
|
size_t length, pack_length;
|
2006-04-06 10:47:48 +02:00
|
|
|
int error;
|
|
|
|
NDBDICT *dict= ndb->getDictionary();
|
|
|
|
const NDBTAB *altered_table= pOp->getTable();
|
2006-02-17 09:44:12 +01:00
|
|
|
|
2006-04-06 10:47:48 +02:00
|
|
|
DBUG_PRINT("info", ("Detected frm change of table %s.%s",
|
|
|
|
dbname, tabname));
|
2009-06-19 10:24:43 +02:00
|
|
|
build_table_filename(key, FN_LEN - 1, dbname, tabname, NullS, 0);
|
2006-04-06 10:47:48 +02:00
|
|
|
/*
|
2006-09-12 16:34:12 +02:00
|
|
|
If the there is no local table shadowing the altered table and
|
|
|
|
it has an frm that is different than the one on disk then
|
|
|
|
overwrite it with the new table definition
|
2006-02-17 09:44:12 +01:00
|
|
|
*/
|
2006-09-12 16:34:12 +02:00
|
|
|
if (!ndbcluster_check_if_local_table(dbname, tabname) &&
|
|
|
|
readfrm(key, &data, &length) == 0 &&
|
2006-04-06 10:47:48 +02:00
|
|
|
packfrm(data, length, &pack_data, &pack_length) == 0 &&
|
|
|
|
cmp_frm(altered_table, pack_data, pack_length))
|
2006-03-09 01:04:13 +01:00
|
|
|
{
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
DBUG_DUMP("frm", (uchar*) altered_table->getFrmData(),
|
2006-04-06 10:47:48 +02:00
|
|
|
altered_table->getFrmLength());
|
2006-05-04 13:58:17 +02:00
|
|
|
Ndb_table_guard ndbtab_g(dict, tabname);
|
|
|
|
const NDBTAB *old= ndbtab_g.get_table();
|
2006-04-06 10:47:48 +02:00
|
|
|
if (!old &&
|
|
|
|
old->getObjectVersion() != altered_table->getObjectVersion())
|
|
|
|
dict->putTable(altered_table);
|
|
|
|
|
Bug#34043: Server loops excessively in _checkchunk() when safemalloc is enabled
Essentially, the problem is that safemalloc is excruciatingly
slow as it checks all allocated blocks for overrun at each
memory management primitive, yielding a almost exponential
slowdown for the memory management functions (malloc, realloc,
free). The overrun check basically consists of verifying some
bytes of a block for certain magic keys, which catches some
simple forms of overrun. Another minor problem is violation
of aliasing rules and that its own internal list of blocks
is prone to corruption.
Another issue with safemalloc is rather the maintenance cost
as the tool has a significant impact on the server code.
Given the magnitude of memory debuggers available nowadays,
especially those that are provided with the platform malloc
implementation, maintenance of a in-house and largely obsolete
memory debugger becomes a burden that is not worth the effort
due to its slowness and lack of support for detecting more
common forms of heap corruption.
Since there are third-party tools that can provide the same
functionality at a lower or comparable performance cost, the
solution is to simply remove safemalloc. Third-party tools
can provide the same functionality at a lower or comparable
performance cost.
The removal of safemalloc also allows a simplification of the
malloc wrappers, removing quite a bit of kludge: redefinition
of my_malloc, my_free and the removal of the unused second
argument of my_free. Since free() always check whether the
supplied pointer is null, redudant checks are also removed.
Also, this patch adds unit testing for my_malloc and moves
my_realloc implementation into the same file as the other
memory allocation primitives.
2010-07-08 23:20:08 +02:00
|
|
|
my_free(data);
|
2006-05-08 18:09:01 +02:00
|
|
|
data= NULL;
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
if ((error= unpackfrm(&data, &length,
|
|
|
|
(const uchar*) altered_table->getFrmData())) ||
|
2006-04-06 10:47:48 +02:00
|
|
|
(error= writefrm(key, data, length)))
|
2006-02-13 11:23:13 +01:00
|
|
|
{
|
2006-04-06 10:47:48 +02:00
|
|
|
sql_print_information("NDB: Failed write frm for %s.%s, error %d",
|
|
|
|
dbname, tabname, error);
|
2006-02-13 11:23:13 +01:00
|
|
|
}
|
2006-05-19 17:34:50 +02:00
|
|
|
|
|
|
|
// copy names as memory will be freed
|
2006-05-19 20:31:02 +02:00
|
|
|
NdbAutoPtr<char> a1((char *)(dbname= strdup(dbname)));
|
|
|
|
NdbAutoPtr<char> a2((char *)(tabname= strdup(tabname)));
|
2006-04-06 10:47:48 +02:00
|
|
|
ndbcluster_binlog_close_table(thd, share);
|
2006-05-04 13:58:17 +02:00
|
|
|
|
|
|
|
TABLE_LIST table_list;
|
|
|
|
bzero((char*) &table_list,sizeof(table_list));
|
|
|
|
table_list.db= (char *)dbname;
|
|
|
|
table_list.alias= table_list.table_name= (char *)tabname;
|
2010-08-12 15:50:23 +02:00
|
|
|
close_cached_tables(thd, &table_list, FALSE, LONG_TIMEOUT);
|
2006-05-04 13:58:17 +02:00
|
|
|
|
2006-05-19 17:34:50 +02:00
|
|
|
if ((error= ndbcluster_binlog_open_table(thd, share,
|
|
|
|
table_share, table, 1)))
|
2006-04-06 10:47:48 +02:00
|
|
|
sql_print_information("NDB: Failed to re-open table %s.%s",
|
|
|
|
dbname, tabname);
|
2006-05-19 17:34:50 +02:00
|
|
|
|
|
|
|
table= share->table;
|
|
|
|
table_share= share->table_share;
|
|
|
|
dbname= table_share->db.str;
|
|
|
|
tabname= table_share->table_name.str;
|
2006-02-13 11:23:13 +01:00
|
|
|
}
|
Bug#34043: Server loops excessively in _checkchunk() when safemalloc is enabled
Essentially, the problem is that safemalloc is excruciatingly
slow as it checks all allocated blocks for overrun at each
memory management primitive, yielding a almost exponential
slowdown for the memory management functions (malloc, realloc,
free). The overrun check basically consists of verifying some
bytes of a block for certain magic keys, which catches some
simple forms of overrun. Another minor problem is violation
of aliasing rules and that its own internal list of blocks
is prone to corruption.
Another issue with safemalloc is rather the maintenance cost
as the tool has a significant impact on the server code.
Given the magnitude of memory debuggers available nowadays,
especially those that are provided with the platform malloc
implementation, maintenance of a in-house and largely obsolete
memory debugger becomes a burden that is not worth the effort
due to its slowness and lack of support for detecting more
common forms of heap corruption.
Since there are third-party tools that can provide the same
functionality at a lower or comparable performance cost, the
solution is to simply remove safemalloc. Third-party tools
can provide the same functionality at a lower or comparable
performance cost.
The removal of safemalloc also allows a simplification of the
malloc wrappers, removing quite a bit of kludge: redefinition
of my_malloc, my_free and the removal of the unused second
argument of my_free. Since free() always check whether the
supplied pointer is null, redudant checks are also removed.
Also, this patch adds unit testing for my_malloc and moves
my_realloc implementation into the same file as the other
memory allocation primitives.
2010-07-08 23:20:08 +02:00
|
|
|
my_free(data);
|
|
|
|
my_free(pack_data);
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
|
|
|
|
2006-02-13 11:23:13 +01:00
|
|
|
// If only frm was changed continue replicating
|
2006-03-09 01:04:13 +01:00
|
|
|
if (is_online_alter_table)
|
2006-02-13 11:23:13 +01:00
|
|
|
{
|
|
|
|
/* Signal ha_ndbcluster::alter_table that drop is done */
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_cond_signal(&injector_cond);
|
2006-02-13 11:23:13 +01:00
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_lock(&share->mutex);
|
2006-03-09 01:04:13 +01:00
|
|
|
if (is_rename_table && !is_remote_change)
|
|
|
|
{
|
|
|
|
DBUG_PRINT("info", ("Detected name change of table %s.%s",
|
|
|
|
share->db, share->table_name));
|
|
|
|
/* ToDo: remove printout */
|
2009-12-22 10:35:56 +01:00
|
|
|
if (opt_ndb_extra_logging)
|
2006-03-09 01:04:13 +01:00
|
|
|
sql_print_information("NDB Binlog: rename table %s%s/%s -> %s.",
|
|
|
|
share_prefix, share->table->s->db.str,
|
|
|
|
share->table->s->table_name.str,
|
|
|
|
share->key);
|
2006-05-04 13:58:17 +02:00
|
|
|
{
|
|
|
|
ndb->setDatabaseName(share->table->s->db.str);
|
|
|
|
Ndb_table_guard ndbtab_g(ndb->getDictionary(),
|
|
|
|
share->table->s->table_name.str);
|
|
|
|
const NDBTAB *ev_tab= pOp->getTable();
|
|
|
|
const NDBTAB *cache_tab= ndbtab_g.get_table();
|
|
|
|
if (cache_tab &&
|
|
|
|
cache_tab->getObjectId() == ev_tab->getObjectId() &&
|
|
|
|
cache_tab->getObjectVersion() <= ev_tab->getObjectVersion())
|
|
|
|
ndbtab_g.invalidate();
|
|
|
|
}
|
2006-03-09 01:04:13 +01:00
|
|
|
/* do the rename of the table in the share */
|
|
|
|
share->table->s->db.str= share->db;
|
|
|
|
share->table->s->db.length= strlen(share->db);
|
|
|
|
share->table->s->table_name.str= share->table_name;
|
|
|
|
share->table->s->table_name.length= strlen(share->table_name);
|
|
|
|
}
|
2006-01-12 19:51:02 +01:00
|
|
|
DBUG_ASSERT(share->op == pOp || share->op_old == pOp);
|
|
|
|
if (share->op_old == pOp)
|
|
|
|
share->op_old= 0;
|
|
|
|
else
|
|
|
|
share->op= 0;
|
|
|
|
// either just us or drop table handling as well
|
|
|
|
|
|
|
|
/* Signal ha_ndbcluster::delete/rename_table that drop is done */
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_unlock(&share->mutex);
|
|
|
|
mysql_cond_signal(&injector_cond);
|
2006-01-12 19:51:02 +01:00
|
|
|
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_lock(&ndbcluster_mutex);
|
2007-02-06 06:40:26 +01:00
|
|
|
/* ndb_share reference binlog free */
|
|
|
|
DBUG_PRINT("NDB_SHARE", ("%s binlog free use_count: %u",
|
|
|
|
share->key, share->use_count));
|
2006-01-12 19:51:02 +01:00
|
|
|
free_share(&share, TRUE);
|
2006-03-09 01:04:13 +01:00
|
|
|
if (is_remote_change && share && share->state != NSS_DROPPED)
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
2006-03-09 01:04:13 +01:00
|
|
|
DBUG_PRINT("info", ("remote change"));
|
2006-05-04 13:58:17 +02:00
|
|
|
share->state= NSS_DROPPED;
|
2006-01-12 19:51:02 +01:00
|
|
|
if (share->use_count != 1)
|
2007-02-06 06:40:26 +01:00
|
|
|
{
|
|
|
|
/* open handler holding reference */
|
|
|
|
/* wait with freeing create ndb_share to below */
|
2006-03-09 01:04:13 +01:00
|
|
|
do_close_cached_tables= TRUE;
|
2007-02-06 06:40:26 +01:00
|
|
|
}
|
2006-05-04 13:58:17 +02:00
|
|
|
else
|
|
|
|
{
|
2007-02-06 06:40:26 +01:00
|
|
|
/* ndb_share reference create free */
|
|
|
|
DBUG_PRINT("NDB_SHARE", ("%s create free use_count: %u",
|
|
|
|
share->key, share->use_count));
|
2006-05-04 13:58:17 +02:00
|
|
|
free_share(&share, TRUE);
|
|
|
|
share= 0;
|
|
|
|
}
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
2006-05-04 13:58:17 +02:00
|
|
|
else
|
|
|
|
share= 0;
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_unlock(&ndbcluster_mutex);
|
2006-01-12 19:51:02 +01:00
|
|
|
|
|
|
|
pOp->setCustomData(0);
|
2006-04-13 09:37:43 +02:00
|
|
|
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_lock(&injector_mutex);
|
2006-04-13 09:37:43 +02:00
|
|
|
ndb->dropEventOperation(pOp);
|
2006-01-12 19:51:02 +01:00
|
|
|
pOp= 0;
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_unlock(&injector_mutex);
|
2006-01-12 19:51:02 +01:00
|
|
|
|
|
|
|
if (do_close_cached_tables)
|
2006-05-04 13:58:17 +02:00
|
|
|
{
|
|
|
|
TABLE_LIST table_list;
|
|
|
|
bzero((char*) &table_list,sizeof(table_list));
|
|
|
|
table_list.db= (char *)dbname;
|
|
|
|
table_list.alias= table_list.table_name= (char *)tabname;
|
2010-08-12 15:50:23 +02:00
|
|
|
close_cached_tables(thd, &table_list, FALSE, LONG_TIMEOUT);
|
2007-02-06 06:40:26 +01:00
|
|
|
/* ndb_share reference create free */
|
|
|
|
DBUG_PRINT("NDB_SHARE", ("%s create free use_count: %u",
|
|
|
|
share->key, share->use_count));
|
2006-05-04 13:58:17 +02:00
|
|
|
free_share(&share);
|
|
|
|
}
|
2006-03-10 10:41:52 +01:00
|
|
|
DBUG_RETURN(0);
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
|
|
|
|
2007-04-18 16:02:20 +02:00
|
|
|
static void ndb_binlog_query(THD *thd, Cluster_schema *schema)
|
|
|
|
{
|
|
|
|
if (schema->any_value & NDB_ANYVALUE_RESERVED)
|
|
|
|
{
|
|
|
|
if (schema->any_value != NDB_ANYVALUE_FOR_NOLOGGING)
|
|
|
|
sql_print_warning("NDB: unknown value for binlog signalling 0x%X, "
|
|
|
|
"query not logged",
|
|
|
|
schema->any_value);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
uint32 thd_server_id_save= thd->server_id;
|
|
|
|
DBUG_ASSERT(sizeof(thd_server_id_save) == sizeof(thd->server_id));
|
|
|
|
char *thd_db_save= thd->db;
|
|
|
|
if (schema->any_value == 0)
|
|
|
|
thd->server_id= ::server_id;
|
|
|
|
else
|
|
|
|
thd->server_id= schema->any_value;
|
|
|
|
thd->db= schema->db;
|
2009-05-30 15:32:28 +02:00
|
|
|
int errcode = query_error_code(thd, thd->killed == THD::NOT_KILLED);
|
2007-04-18 16:02:20 +02:00
|
|
|
thd->binlog_query(THD::STMT_QUERY_TYPE, schema->query,
|
2009-11-03 20:02:56 +01:00
|
|
|
schema->query_length, FALSE, TRUE,
|
2009-05-30 15:32:28 +02:00
|
|
|
schema->name[0] == 0 || thd->db[0] == 0,
|
|
|
|
errcode);
|
2007-04-18 16:02:20 +02:00
|
|
|
thd->server_id= thd_server_id_save;
|
|
|
|
thd->db= thd_db_save;
|
|
|
|
}
|
|
|
|
|
2006-01-12 19:51:02 +01:00
|
|
|
static int
|
|
|
|
ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb,
|
|
|
|
NdbEventOperation *pOp,
|
2006-03-21 16:54:56 +01:00
|
|
|
List<Cluster_schema>
|
2006-02-06 11:47:12 +01:00
|
|
|
*post_epoch_log_list,
|
2006-03-21 16:54:56 +01:00
|
|
|
List<Cluster_schema>
|
2006-02-06 11:47:12 +01:00
|
|
|
*post_epoch_unlock_list,
|
|
|
|
MEM_ROOT *mem_root)
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
|
|
|
DBUG_ENTER("ndb_binlog_thread_handle_schema_event");
|
2006-04-03 19:11:20 +02:00
|
|
|
NDB_SHARE *tmp_share= (NDB_SHARE *)pOp->getCustomData();
|
2006-12-01 15:49:07 +01:00
|
|
|
if (tmp_share && ndb_schema_share == tmp_share)
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
|
|
|
NDBEVENT::TableEvent ev_type= pOp->getEventType();
|
|
|
|
DBUG_PRINT("enter", ("%s.%s ev_type: %d",
|
2006-04-03 19:11:20 +02:00
|
|
|
tmp_share->db, tmp_share->table_name, ev_type));
|
2006-03-21 16:54:56 +01:00
|
|
|
if (ev_type == NDBEVENT::TE_UPDATE ||
|
|
|
|
ev_type == NDBEVENT::TE_INSERT)
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
2006-03-21 16:54:56 +01:00
|
|
|
Cluster_schema *schema= (Cluster_schema *)
|
|
|
|
sql_alloc(sizeof(Cluster_schema));
|
2006-01-12 19:51:02 +01:00
|
|
|
MY_BITMAP slock;
|
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes
Changes that requires code changes in other code of other storage engines.
(Note that all changes are very straightforward and one should find all issues
by compiling a --debug build and fixing all compiler errors and all
asserts in field.cc while running the test suite),
- New optional handler function introduced: reset()
This is called after every DML statement to make it easy for a handler to
statement specific cleanups.
(The only case it's not called is if force the file to be closed)
- handler::extra(HA_EXTRA_RESET) is removed. Code that was there before
should be moved to handler::reset()
- table->read_set contains a bitmap over all columns that are needed
in the query. read_row() and similar functions only needs to read these
columns
- table->write_set contains a bitmap over all columns that will be updated
in the query. write_row() and update_row() only needs to update these
columns.
The above bitmaps should now be up to date in all context
(including ALTER TABLE, filesort()).
The handler is informed of any changes to the bitmap after
fix_fields() by calling the virtual function
handler::column_bitmaps_signal(). If the handler does caching of
these bitmaps (instead of using table->read_set, table->write_set),
it should redo the caching in this code. as the signal() may be sent
several times, it's probably best to set a variable in the signal
and redo the caching on read_row() / write_row() if the variable was
set.
- Removed the read_set and write_set bitmap objects from the handler class
- Removed all column bit handling functions from the handler class.
(Now one instead uses the normal bitmap functions in my_bitmap.c instead
of handler dedicated bitmap functions)
- field->query_id is removed. One should instead instead check
table->read_set and table->write_set if a field is used in the query.
- handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and
handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now
instead use table->read_set to check for which columns to retrieve.
- If a handler needs to call Field->val() or Field->store() on columns
that are not used in the query, one should install a temporary
all-columns-used map while doing so. For this, we provide the following
functions:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
field->val();
dbug_tmp_restore_column_map(table->read_set, old_map);
and similar for the write map:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
field->val();
dbug_tmp_restore_column_map(table->write_set, old_map);
If this is not done, you will sooner or later hit a DBUG_ASSERT
in the field store() / val() functions.
(For not DBUG binaries, the dbug_tmp_restore_column_map() and
dbug_tmp_restore_column_map() are inline dummy functions and should
be optimized away be the compiler).
- If one needs to temporary set the column map for all binaries (and not
just to avoid the DBUG_ASSERT() in the Field::store() / Field::val()
methods) one should use the functions tmp_use_all_columns() and
tmp_restore_column_map() instead of the above dbug_ variants.
- All 'status' fields in the handler base class (like records,
data_file_length etc) are now stored in a 'stats' struct. This makes
it easier to know what status variables are provided by the base
handler. This requires some trivial variable names in the extra()
function.
- New virtual function handler::records(). This is called to optimize
COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true.
(stats.records is not supposed to be an exact value. It's only has to
be 'reasonable enough' for the optimizer to be able to choose a good
optimization path).
- Non virtual handler::init() function added for caching of virtual
constants from engine.
- Removed has_transactions() virtual method. Now one should instead return
HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support
transactions.
- The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument
that is to be used with 'new handler_name()' to allocate the handler
in the right area. The xxxx_create_handler() function is also
responsible for any initialization of the object before returning.
For example, one should change:
static handler *myisam_create_handler(TABLE_SHARE *table)
{
return new ha_myisam(table);
}
->
static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root)
{
return new (mem_root) ha_myisam(table);
}
- New optional virtual function: use_hidden_primary_key().
This is called in case of an update/delete when
(table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined
but we don't have a primary key. This allows the handler to take precisions
in remembering any hidden primary key to able to update/delete any
found row. The default handler marks all columns to be read.
- handler::table_flags() now returns a ulonglong (to allow for more flags).
- New/changed table_flags()
- HA_HAS_RECORDS Set if ::records() is supported
- HA_NO_TRANSACTIONS Set if engine doesn't support transactions
- HA_PRIMARY_KEY_REQUIRED_FOR_DELETE
Set if we should mark all primary key columns for
read when reading rows as part of a DELETE
statement. If there is no primary key,
all columns are marked for read.
- HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some
cases (based on table->read_set)
- HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS
Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION.
- HA_DUPP_POS Renamed to HA_DUPLICATE_POS
- HA_REQUIRES_KEY_COLUMNS_FOR_DELETE
Set this if we should mark ALL key columns for
read when when reading rows as part of a DELETE
statement. In case of an update we will mark
all keys for read for which key part changed
value.
- HA_STATS_RECORDS_IS_EXACT
Set this if stats.records is exact.
(This saves us some extra records() calls
when optimizing COUNT(*))
- Removed table_flags()
- HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if
handler::records() gives an exact count() and
HA_STATS_RECORDS_IS_EXACT if stats.records is exact.
- HA_READ_RND_SAME Removed (no one supported this one)
- Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk()
- Renamed handler::dupp_pos to handler::dup_pos
- Removed not used variable handler::sortkey
Upper level handler changes:
- ha_reset() now does some overall checks and calls ::reset()
- ha_table_flags() added. This is a cached version of table_flags(). The
cache is updated on engine creation time and updated on open.
MySQL level changes (not obvious from the above):
- DBUG_ASSERT() added to check that column usage matches what is set
in the column usage bit maps. (This found a LOT of bugs in current
column marking code).
- In 5.1 before, all used columns was marked in read_set and only updated
columns was marked in write_set. Now we only mark columns for which we
need a value in read_set.
- Column bitmaps are created in open_binary_frm() and open_table_from_share().
(Before this was in table.cc)
- handler::table_flags() calls are replaced with handler::ha_table_flags()
- For calling field->val() you must have the corresponding bit set in
table->read_set. For calling field->store() you must have the
corresponding bit set in table->write_set. (There are asserts in
all store()/val() functions to catch wrong usage)
- thd->set_query_id is renamed to thd->mark_used_columns and instead
of setting this to an integer value, this has now the values:
MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE
Changed also all variables named 'set_query_id' to mark_used_columns.
- In filesort() we now inform the handler of exactly which columns are needed
doing the sort and choosing the rows.
- The TABLE_SHARE object has a 'all_set' column bitmap one can use
when one needs a column bitmap with all columns set.
(This is used for table->use_all_columns() and other places)
- The TABLE object has 3 column bitmaps:
- def_read_set Default bitmap for columns to be read
- def_write_set Default bitmap for columns to be written
- tmp_set Can be used as a temporary bitmap when needed.
The table object has also two pointer to bitmaps read_set and write_set
that the handler should use to find out which columns are used in which way.
- count() optimization now calls handler::records() instead of using
handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true).
- Added extra argument to Item::walk() to indicate if we should also
traverse sub queries.
- Added TABLE parameter to cp_buffer_from_ref()
- Don't close tables created with CREATE ... SELECT but keep them in
the table cache. (Faster usage of newly created tables).
New interfaces:
- table->clear_column_bitmaps() to initialize the bitmaps for tables
at start of new statements.
- table->column_bitmaps_set() to set up new column bitmaps and signal
the handler about this.
- table->column_bitmaps_set_no_signal() for some few cases where we need
to setup new column bitmaps but don't signal the handler (as the handler
has already been signaled about these before). Used for the momement
only in opt_range.cc when doing ROR scans.
- table->use_all_columns() to install a bitmap where all columns are marked
as use in the read and the write set.
- table->default_column_bitmaps() to install the normal read and write
column bitmaps, but not signaling the handler about this.
This is mainly used when creating TABLE instances.
- table->mark_columns_needed_for_delete(),
table->mark_columns_needed_for_delete() and
table->mark_columns_needed_for_insert() to allow us to put additional
columns in column usage maps if handler so requires.
(The handler indicates what it neads in handler->table_flags())
- table->prepare_for_position() to allow us to tell handler that it
needs to read primary key parts to be able to store them in
future table->position() calls.
(This replaces the table->file->ha_retrieve_all_pk function)
- table->mark_auto_increment_column() to tell handler are going to update
columns part of any auto_increment key.
- table->mark_columns_used_by_index() to mark all columns that is part of
an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow
it to quickly know that it only needs to read colums that are part
of the key. (The handler can also use the column map for detecting this,
but simpler/faster handler can just monitor the extra() call).
- table->mark_columns_used_by_index_no_reset() to in addition to other columns,
also mark all columns that is used by the given key.
- table->restore_column_maps_after_mark_index() to restore to default
column maps after a call to table->mark_columns_used_by_index().
- New item function register_field_in_read_map(), for marking used columns
in table->read_map. Used by filesort() to mark all used columns
- Maintain in TABLE->merge_keys set of all keys that are used in query.
(Simplices some optimization loops)
- Maintain Field->part_of_key_not_clustered which is like Field->part_of_key
but the field in the clustered key is not assumed to be part of all index.
(used in opt_range.cc for faster loops)
- dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map()
tmp_use_all_columns() and tmp_restore_column_map() functions to temporally
mark all columns as usable. The 'dbug_' version is primarily intended
inside a handler when it wants to just call Field:store() & Field::val()
functions, but don't need the column maps set for any other usage.
(ie:: bitmap_is_set() is never called)
- We can't use compare_records() to skip updates for handlers that returns
a partial column set and the read_set doesn't cover all columns in the
write set. The reason for this is that if we have a column marked only for
write we can't in the MySQL level know if the value changed or not.
The reason this worked before was that MySQL marked all to be written
columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden
bug'.
- open_table_from_share() does not anymore setup temporary MEM_ROOT
object as a thread specific variable for the handler. Instead we
send the to-be-used MEMROOT to get_new_handler().
(Simpler, faster code)
Bugs fixed:
- Column marking was not done correctly in a lot of cases.
(ALTER TABLE, when using triggers, auto_increment fields etc)
(Could potentially result in wrong values inserted in table handlers
relying on that the old column maps or field->set_query_id was correct)
Especially when it comes to triggers, there may be cases where the
old code would cause lost/wrong values for NDB and/or InnoDB tables.
- Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags:
OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG.
This allowed me to remove some wrong warnings about:
"Some non-transactional changed tables couldn't be rolled back"
- Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset
(thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose
some warnings about
"Some non-transactional changed tables couldn't be rolled back")
- Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table()
which could cause delete_table to report random failures.
- Fixed core dumps for some tests when running with --debug
- Added missing FN_LIBCHAR in mysql_rm_tmp_tables()
(This has probably caused us to not properly remove temporary files after
crash)
- slow_logs was not properly initialized, which could maybe cause
extra/lost entries in slow log.
- If we get an duplicate row on insert, change column map to read and
write all columns while retrying the operation. This is required by
the definition of REPLACE and also ensures that fields that are only
part of UPDATE are properly handled. This fixed a bug in NDB and
REPLACE where REPLACE wrongly copied some column values from the replaced
row.
- For table handler that doesn't support NULL in keys, we would give an error
when creating a primary key with NULL fields, even after the fields has been
automaticly converted to NOT NULL.
- Creating a primary key on a SPATIAL key, would fail if field was not
declared as NOT NULL.
Cleanups:
- Removed not used condition argument to setup_tables
- Removed not needed item function reset_query_id_processor().
- Field->add_index is removed. Now this is instead maintained in
(field->flags & FIELD_IN_ADD_INDEX)
- Field->fieldnr is removed (use field->field_index instead)
- New argument to filesort() to indicate that it should return a set of
row pointers (not used columns). This allowed me to remove some references
to sql_command in filesort and should also enable us to return column
results in some cases where we couldn't before.
- Changed column bitmap handling in opt_range.cc to be aligned with TABLE
bitmap, which allowed me to use bitmap functions instead of looping over
all fields to create some needed bitmaps. (Faster and smaller code)
- Broke up found too long lines
- Moved some variable declaration at start of function for better code
readability.
- Removed some not used arguments from functions.
(setup_fields(), mysql_prepare_insert_check_table())
- setup_fields() now takes an enum instead of an int for marking columns
usage.
- For internal temporary tables, use handler::write_row(),
handler::delete_row() and handler::update_row() instead of
handler::ha_xxxx() for faster execution.
- Changed some constants to enum's and define's.
- Using separate column read and write sets allows for easier checking
of timestamp field was set by statement.
- Remove calls to free_io_cache() as this is now done automaticly in ha_reset()
- Don't build table->normalized_path as this is now identical to table->path
(after bar's fixes to convert filenames)
- Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to
do comparision with the 'convert-dbug-for-diff' tool.
Things left to do in 5.1:
- We wrongly log failed CREATE TABLE ... SELECT in some cases when using
row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result)
Mats has promised to look into this.
- Test that my fix for CREATE TABLE ... SELECT is indeed correct.
(I added several test cases for this, but in this case it's better that
someone else also tests this throughly).
Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
|
|
|
bitmap_init(&slock, schema->slock, 8*SCHEMA_SLOCK_SIZE, FALSE);
|
2006-01-12 19:51:02 +01:00
|
|
|
uint node_id= g_ndb_cluster_connection->node_id();
|
2007-04-18 16:02:20 +02:00
|
|
|
{
|
|
|
|
ndbcluster_get_schema(tmp_share, schema);
|
|
|
|
schema->any_value= pOp->getAnyValue();
|
|
|
|
}
|
2006-05-31 23:24:25 +02:00
|
|
|
enum SCHEMA_OP_TYPE schema_type= (enum SCHEMA_OP_TYPE)schema->type;
|
2006-05-31 16:16:03 +02:00
|
|
|
DBUG_PRINT("info",
|
|
|
|
("%s.%s: log query_length: %d query: '%s' type: %d",
|
|
|
|
schema->db, schema->name,
|
|
|
|
schema->query_length, schema->query,
|
2006-05-31 23:24:25 +02:00
|
|
|
schema_type));
|
|
|
|
if (schema_type == SOT_CLEAR_SLOCK)
|
2006-05-31 16:16:03 +02:00
|
|
|
{
|
2006-05-31 23:24:25 +02:00
|
|
|
/*
|
|
|
|
handle slock after epoch is completed to ensure that
|
|
|
|
schema events get inserted in the binlog after any data
|
|
|
|
events
|
|
|
|
*/
|
|
|
|
post_epoch_log_list->push_back(schema, mem_root);
|
2006-05-31 16:16:03 +02:00
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
2006-01-12 19:51:02 +01:00
|
|
|
if (schema->node_id != node_id)
|
|
|
|
{
|
2006-03-21 16:54:56 +01:00
|
|
|
int log_query= 0, post_epoch_unlock= 0;
|
2006-05-31 23:24:25 +02:00
|
|
|
switch (schema_type)
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
|
|
|
case SOT_DROP_TABLE:
|
2006-05-04 13:58:17 +02:00
|
|
|
// fall through
|
2006-01-12 19:51:02 +01:00
|
|
|
case SOT_RENAME_TABLE:
|
2006-05-04 13:58:17 +02:00
|
|
|
// fall through
|
|
|
|
case SOT_RENAME_TABLE_NEW:
|
|
|
|
// fall through
|
2006-01-12 19:51:02 +01:00
|
|
|
case SOT_ALTER_TABLE:
|
2006-05-04 13:58:17 +02:00
|
|
|
post_epoch_log_list->push_back(schema, mem_root);
|
|
|
|
/* acknowledge this query _after_ epoch completion */
|
|
|
|
post_epoch_unlock= 1;
|
|
|
|
break;
|
2006-06-12 14:23:21 +02:00
|
|
|
case SOT_TRUNCATE_TABLE:
|
2006-06-28 02:35:13 +02:00
|
|
|
{
|
2009-06-19 10:24:43 +02:00
|
|
|
char key[FN_REFLEN + 1];
|
|
|
|
build_table_filename(key, sizeof(key) - 1,
|
2006-08-02 17:57:06 +02:00
|
|
|
schema->db, schema->name, "", 0);
|
2007-02-06 06:40:26 +01:00
|
|
|
/* ndb_share reference temporary, free below */
|
2006-06-28 02:35:13 +02:00
|
|
|
NDB_SHARE *share= get_share(key, 0, FALSE, FALSE);
|
2007-02-06 06:40:26 +01:00
|
|
|
if (share)
|
|
|
|
{
|
|
|
|
DBUG_PRINT("NDB_SHARE", ("%s temporary use_count: %u",
|
|
|
|
share->key, share->use_count));
|
|
|
|
}
|
2006-06-28 02:35:13 +02:00
|
|
|
// invalidation already handled by binlog thread
|
|
|
|
if (!share || !share->op)
|
|
|
|
{
|
|
|
|
{
|
|
|
|
injector_ndb->setDatabaseName(schema->db);
|
|
|
|
Ndb_table_guard ndbtab_g(injector_ndb->getDictionary(),
|
|
|
|
schema->name);
|
|
|
|
ndbtab_g.invalidate();
|
|
|
|
}
|
|
|
|
TABLE_LIST table_list;
|
|
|
|
bzero((char*) &table_list,sizeof(table_list));
|
|
|
|
table_list.db= schema->db;
|
|
|
|
table_list.alias= table_list.table_name= schema->name;
|
2010-08-12 15:50:23 +02:00
|
|
|
close_cached_tables(thd, &table_list, FALSE, LONG_TIMEOUT);
|
2006-06-28 02:35:13 +02:00
|
|
|
}
|
2007-02-06 06:40:26 +01:00
|
|
|
/* ndb_share reference temporary free */
|
2006-06-28 02:35:13 +02:00
|
|
|
if (share)
|
2007-02-06 06:40:26 +01:00
|
|
|
{
|
|
|
|
DBUG_PRINT("NDB_SHARE", ("%s temporary free use_count: %u",
|
|
|
|
share->key, share->use_count));
|
2006-06-28 02:35:13 +02:00
|
|
|
free_share(&share);
|
2007-02-06 06:40:26 +01:00
|
|
|
}
|
2006-06-28 02:35:13 +02:00
|
|
|
}
|
|
|
|
// fall through
|
|
|
|
case SOT_CREATE_TABLE:
|
2007-02-27 10:27:04 +01:00
|
|
|
if (ndbcluster_check_if_local_table(schema->db, schema->name))
|
|
|
|
{
|
2007-05-31 16:45:22 +02:00
|
|
|
DBUG_PRINT("info", ("NDB Binlog: Skipping locally defined table '%s.%s'",
|
2007-02-27 10:27:04 +01:00
|
|
|
schema->db, schema->name));
|
2007-05-31 16:45:22 +02:00
|
|
|
sql_print_error("NDB Binlog: Skipping locally defined table '%s.%s' from "
|
2006-09-12 16:34:12 +02:00
|
|
|
"binlog schema event '%s' from node %d. ",
|
|
|
|
schema->db, schema->name, schema->query,
|
|
|
|
schema->node_id);
|
2007-02-27 10:27:04 +01:00
|
|
|
}
|
2006-09-12 16:34:12 +02:00
|
|
|
else if (ndb_create_table_from_engine(thd, schema->db, schema->name))
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
2009-09-10 11:18:29 +02:00
|
|
|
print_could_not_discover_error(thd, schema);
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
|
|
|
log_query= 1;
|
|
|
|
break;
|
|
|
|
case SOT_DROP_DB:
|
2007-02-27 10:27:04 +01:00
|
|
|
/* Drop the database locally if it only contains ndb tables */
|
|
|
|
if (! ndbcluster_check_if_local_tables_in_db(thd, schema->db))
|
|
|
|
{
|
2007-06-17 19:47:20 +02:00
|
|
|
const int no_print_error[1]= {0};
|
2007-02-27 10:27:04 +01:00
|
|
|
run_query(thd, schema->query,
|
|
|
|
schema->query + schema->query_length,
|
2007-06-17 19:47:20 +02:00
|
|
|
no_print_error, /* print error */
|
2007-02-27 10:27:04 +01:00
|
|
|
TRUE); /* don't binlog the query */
|
|
|
|
/* binlog dropping database after any table operations */
|
|
|
|
post_epoch_log_list->push_back(schema, mem_root);
|
|
|
|
/* acknowledge this query _after_ epoch completion */
|
|
|
|
post_epoch_unlock= 1;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Database contained local tables, leave it */
|
2007-05-31 16:45:22 +02:00
|
|
|
sql_print_error("NDB Binlog: Skipping drop database '%s' since it contained local tables "
|
2006-11-15 11:38:22 +01:00
|
|
|
"binlog schema event '%s' from node %d. ",
|
|
|
|
schema->db, schema->query,
|
|
|
|
schema->node_id);
|
2007-02-27 10:27:04 +01:00
|
|
|
log_query= 1;
|
|
|
|
}
|
2006-01-12 19:51:02 +01:00
|
|
|
break;
|
|
|
|
case SOT_CREATE_DB:
|
|
|
|
/* fall through */
|
|
|
|
case SOT_ALTER_DB:
|
2007-06-17 19:47:20 +02:00
|
|
|
{
|
|
|
|
const int no_print_error[1]= {0};
|
2006-01-12 19:51:02 +01:00
|
|
|
run_query(thd, schema->query,
|
|
|
|
schema->query + schema->query_length,
|
2007-06-17 19:47:20 +02:00
|
|
|
no_print_error, /* print error */
|
2007-04-18 16:02:20 +02:00
|
|
|
TRUE); /* don't binlog the query */
|
|
|
|
log_query= 1;
|
2006-01-12 19:51:02 +01:00
|
|
|
break;
|
2007-06-17 19:47:20 +02:00
|
|
|
}
|
2006-02-01 01:12:11 +01:00
|
|
|
case SOT_TABLESPACE:
|
|
|
|
case SOT_LOGFILE_GROUP:
|
|
|
|
log_query= 1;
|
|
|
|
break;
|
2006-05-31 16:16:03 +02:00
|
|
|
case SOT_CLEAR_SLOCK:
|
|
|
|
abort();
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
2006-02-16 00:30:56 +01:00
|
|
|
if (log_query && ndb_binlog_running)
|
2007-04-18 16:02:20 +02:00
|
|
|
ndb_binlog_query(thd, schema);
|
2006-03-21 16:54:56 +01:00
|
|
|
/* signal that schema operation has been handled */
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
DBUG_DUMP("slock", (uchar*) schema->slock, schema->slock_length);
|
2006-03-21 16:54:56 +01:00
|
|
|
if (bitmap_is_set(&slock, node_id))
|
|
|
|
{
|
|
|
|
if (post_epoch_unlock)
|
|
|
|
post_epoch_unlock_list->push_back(schema, mem_root);
|
|
|
|
else
|
|
|
|
ndbcluster_update_slock(thd, schema->db, schema->name);
|
|
|
|
}
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
2006-03-21 16:54:56 +01:00
|
|
|
DBUG_RETURN(0);
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
2006-03-21 16:54:56 +01:00
|
|
|
/*
|
|
|
|
the normal case of UPDATE/INSERT has already been handled
|
|
|
|
*/
|
|
|
|
switch (ev_type)
|
|
|
|
{
|
2006-01-12 19:51:02 +01:00
|
|
|
case NDBEVENT::TE_DELETE:
|
|
|
|
// skip
|
|
|
|
break;
|
|
|
|
case NDBEVENT::TE_CLUSTER_FAILURE:
|
2009-12-22 10:35:56 +01:00
|
|
|
if (opt_ndb_extra_logging)
|
2006-05-19 12:54:12 +02:00
|
|
|
sql_print_information("NDB Binlog: cluster failure for %s at epoch %u.",
|
2006-12-01 15:49:07 +01:00
|
|
|
ndb_schema_share->key, (unsigned) pOp->getGCI());
|
2006-04-10 16:08:40 +02:00
|
|
|
// fall through
|
2006-01-12 19:51:02 +01:00
|
|
|
case NDBEVENT::TE_DROP:
|
2009-12-22 10:35:56 +01:00
|
|
|
if (opt_ndb_extra_logging &&
|
2006-04-10 16:08:40 +02:00
|
|
|
ndb_binlog_tables_inited && ndb_binlog_running)
|
|
|
|
sql_print_information("NDB Binlog: ndb tables initially "
|
|
|
|
"read only on reconnect.");
|
2007-02-05 06:04:36 +01:00
|
|
|
|
|
|
|
/* begin protect ndb_schema_share */
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_lock(&ndb_schema_share_mutex);
|
2007-02-06 06:40:26 +01:00
|
|
|
/* ndb_share reference binlog extra free */
|
|
|
|
DBUG_PRINT("NDB_SHARE", ("%s binlog extra free use_count: %u",
|
|
|
|
ndb_schema_share->key,
|
|
|
|
ndb_schema_share->use_count));
|
2006-12-01 15:49:07 +01:00
|
|
|
free_share(&ndb_schema_share);
|
|
|
|
ndb_schema_share= 0;
|
2008-02-28 18:55:46 +01:00
|
|
|
ndb_binlog_tables_inited= 0;
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_unlock(&ndb_schema_share_mutex);
|
2007-02-05 06:04:36 +01:00
|
|
|
/* end protect ndb_schema_share */
|
|
|
|
|
2010-08-12 15:50:23 +02:00
|
|
|
close_cached_tables(NULL, NULL, FALSE, LONG_TIMEOUT);
|
2006-03-09 01:04:13 +01:00
|
|
|
// fall through
|
|
|
|
case NDBEVENT::TE_ALTER:
|
2006-04-03 19:11:20 +02:00
|
|
|
ndb_handle_schema_change(thd, ndb, pOp, tmp_share);
|
2006-01-12 19:51:02 +01:00
|
|
|
break;
|
|
|
|
case NDBEVENT::TE_NODE_FAILURE:
|
|
|
|
{
|
|
|
|
uint8 node_id= g_node_id_map[pOp->getNdbdNodeId()];
|
|
|
|
DBUG_ASSERT(node_id != 0xFF);
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_lock(&tmp_share->mutex);
|
2006-04-03 19:11:20 +02:00
|
|
|
bitmap_clear_all(&tmp_share->subscriber_bitmap[node_id]);
|
2006-01-12 19:51:02 +01:00
|
|
|
DBUG_PRINT("info",("NODE_FAILURE UNSUBSCRIBE[%d]", node_id));
|
2009-12-22 10:35:56 +01:00
|
|
|
if (opt_ndb_extra_logging)
|
2006-02-02 16:12:18 +01:00
|
|
|
{
|
|
|
|
sql_print_information("NDB Binlog: Node: %d, down,"
|
|
|
|
" Subscriber bitmask %x%x",
|
|
|
|
pOp->getNdbdNodeId(),
|
2006-04-03 19:11:20 +02:00
|
|
|
tmp_share->subscriber_bitmap[node_id].bitmap[1],
|
|
|
|
tmp_share->subscriber_bitmap[node_id].bitmap[0]);
|
2006-02-02 16:12:18 +01:00
|
|
|
}
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_unlock(&tmp_share->mutex);
|
|
|
|
mysql_cond_signal(&injector_cond);
|
2006-01-12 19:51:02 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case NDBEVENT::TE_SUBSCRIBE:
|
|
|
|
{
|
|
|
|
uint8 node_id= g_node_id_map[pOp->getNdbdNodeId()];
|
|
|
|
uint8 req_id= pOp->getReqNodeId();
|
|
|
|
DBUG_ASSERT(req_id != 0 && node_id != 0xFF);
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_lock(&tmp_share->mutex);
|
2006-04-03 19:11:20 +02:00
|
|
|
bitmap_set_bit(&tmp_share->subscriber_bitmap[node_id], req_id);
|
2006-01-12 19:51:02 +01:00
|
|
|
DBUG_PRINT("info",("SUBSCRIBE[%d] %d", node_id, req_id));
|
2009-12-22 10:35:56 +01:00
|
|
|
if (opt_ndb_extra_logging)
|
2006-02-02 16:12:18 +01:00
|
|
|
{
|
|
|
|
sql_print_information("NDB Binlog: Node: %d, subscribe from node %d,"
|
|
|
|
" Subscriber bitmask %x%x",
|
|
|
|
pOp->getNdbdNodeId(),
|
|
|
|
req_id,
|
2006-04-03 19:11:20 +02:00
|
|
|
tmp_share->subscriber_bitmap[node_id].bitmap[1],
|
|
|
|
tmp_share->subscriber_bitmap[node_id].bitmap[0]);
|
2006-02-02 16:12:18 +01:00
|
|
|
}
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_unlock(&tmp_share->mutex);
|
|
|
|
mysql_cond_signal(&injector_cond);
|
2006-01-12 19:51:02 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case NDBEVENT::TE_UNSUBSCRIBE:
|
|
|
|
{
|
|
|
|
uint8 node_id= g_node_id_map[pOp->getNdbdNodeId()];
|
|
|
|
uint8 req_id= pOp->getReqNodeId();
|
|
|
|
DBUG_ASSERT(req_id != 0 && node_id != 0xFF);
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_lock(&tmp_share->mutex);
|
2006-04-03 19:11:20 +02:00
|
|
|
bitmap_clear_bit(&tmp_share->subscriber_bitmap[node_id], req_id);
|
2006-01-12 19:51:02 +01:00
|
|
|
DBUG_PRINT("info",("UNSUBSCRIBE[%d] %d", node_id, req_id));
|
2009-12-22 10:35:56 +01:00
|
|
|
if (opt_ndb_extra_logging)
|
2006-02-02 16:12:18 +01:00
|
|
|
{
|
|
|
|
sql_print_information("NDB Binlog: Node: %d, unsubscribe from node %d,"
|
|
|
|
" Subscriber bitmask %x%x",
|
|
|
|
pOp->getNdbdNodeId(),
|
|
|
|
req_id,
|
2006-04-03 19:11:20 +02:00
|
|
|
tmp_share->subscriber_bitmap[node_id].bitmap[1],
|
|
|
|
tmp_share->subscriber_bitmap[node_id].bitmap[0]);
|
2006-02-02 16:12:18 +01:00
|
|
|
}
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_unlock(&tmp_share->mutex);
|
|
|
|
mysql_cond_signal(&injector_cond);
|
2006-01-12 19:51:02 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
sql_print_error("NDB Binlog: unknown non data event %d for %s. "
|
2006-04-03 19:11:20 +02:00
|
|
|
"Ignoring...", (unsigned) ev_type, tmp_share->key);
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
2006-03-09 01:04:13 +01:00
|
|
|
/*
|
|
|
|
process any operations that should be done after
|
|
|
|
the epoch is complete
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
ndb_binlog_thread_handle_schema_event_post_epoch(THD *thd,
|
2006-03-21 16:54:56 +01:00
|
|
|
List<Cluster_schema>
|
2006-03-09 01:04:13 +01:00
|
|
|
*post_epoch_log_list,
|
2006-03-21 16:54:56 +01:00
|
|
|
List<Cluster_schema>
|
2006-03-09 01:04:13 +01:00
|
|
|
*post_epoch_unlock_list)
|
|
|
|
{
|
2006-05-04 13:58:17 +02:00
|
|
|
if (post_epoch_log_list->elements == 0)
|
|
|
|
return;
|
2006-03-09 01:04:13 +01:00
|
|
|
DBUG_ENTER("ndb_binlog_thread_handle_schema_event_post_epoch");
|
2006-03-21 16:54:56 +01:00
|
|
|
Cluster_schema *schema;
|
2006-03-09 01:04:13 +01:00
|
|
|
while ((schema= post_epoch_log_list->pop()))
|
|
|
|
{
|
2006-05-04 13:58:17 +02:00
|
|
|
DBUG_PRINT("info",
|
|
|
|
("%s.%s: log query_length: %d query: '%s' type: %d",
|
|
|
|
schema->db, schema->name,
|
|
|
|
schema->query_length, schema->query,
|
|
|
|
schema->type));
|
|
|
|
int log_query= 0;
|
2006-03-09 01:04:13 +01:00
|
|
|
{
|
2006-05-31 23:24:25 +02:00
|
|
|
enum SCHEMA_OP_TYPE schema_type= (enum SCHEMA_OP_TYPE)schema->type;
|
2009-06-19 10:24:43 +02:00
|
|
|
char key[FN_REFLEN + 1];
|
|
|
|
build_table_filename(key, sizeof(key) - 1, schema->db, schema->name, "", 0);
|
2006-05-31 23:24:25 +02:00
|
|
|
if (schema_type == SOT_CLEAR_SLOCK)
|
|
|
|
{
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_lock(&ndbcluster_mutex);
|
2006-05-31 23:24:25 +02:00
|
|
|
NDB_SCHEMA_OBJECT *ndb_schema_object=
|
2009-10-14 18:37:38 +02:00
|
|
|
(NDB_SCHEMA_OBJECT*) my_hash_search(&ndb_schema_objects,
|
|
|
|
(uchar*) key, strlen(key));
|
2006-05-31 23:24:25 +02:00
|
|
|
if (ndb_schema_object)
|
|
|
|
{
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_lock(&ndb_schema_object->mutex);
|
2006-05-31 23:24:25 +02:00
|
|
|
memcpy(ndb_schema_object->slock, schema->slock,
|
|
|
|
sizeof(ndb_schema_object->slock));
|
|
|
|
DBUG_DUMP("ndb_schema_object->slock_bitmap.bitmap",
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
(uchar*)ndb_schema_object->slock_bitmap.bitmap,
|
2006-05-31 23:24:25 +02:00
|
|
|
no_bytes_in_map(&ndb_schema_object->slock_bitmap));
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_unlock(&ndb_schema_object->mutex);
|
|
|
|
mysql_cond_signal(&injector_cond);
|
2006-05-31 23:24:25 +02:00
|
|
|
}
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_unlock(&ndbcluster_mutex);
|
2006-05-31 23:24:25 +02:00
|
|
|
continue;
|
|
|
|
}
|
2007-02-06 06:40:26 +01:00
|
|
|
/* ndb_share reference temporary, free below */
|
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes
Changes that requires code changes in other code of other storage engines.
(Note that all changes are very straightforward and one should find all issues
by compiling a --debug build and fixing all compiler errors and all
asserts in field.cc while running the test suite),
- New optional handler function introduced: reset()
This is called after every DML statement to make it easy for a handler to
statement specific cleanups.
(The only case it's not called is if force the file to be closed)
- handler::extra(HA_EXTRA_RESET) is removed. Code that was there before
should be moved to handler::reset()
- table->read_set contains a bitmap over all columns that are needed
in the query. read_row() and similar functions only needs to read these
columns
- table->write_set contains a bitmap over all columns that will be updated
in the query. write_row() and update_row() only needs to update these
columns.
The above bitmaps should now be up to date in all context
(including ALTER TABLE, filesort()).
The handler is informed of any changes to the bitmap after
fix_fields() by calling the virtual function
handler::column_bitmaps_signal(). If the handler does caching of
these bitmaps (instead of using table->read_set, table->write_set),
it should redo the caching in this code. as the signal() may be sent
several times, it's probably best to set a variable in the signal
and redo the caching on read_row() / write_row() if the variable was
set.
- Removed the read_set and write_set bitmap objects from the handler class
- Removed all column bit handling functions from the handler class.
(Now one instead uses the normal bitmap functions in my_bitmap.c instead
of handler dedicated bitmap functions)
- field->query_id is removed. One should instead instead check
table->read_set and table->write_set if a field is used in the query.
- handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and
handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now
instead use table->read_set to check for which columns to retrieve.
- If a handler needs to call Field->val() or Field->store() on columns
that are not used in the query, one should install a temporary
all-columns-used map while doing so. For this, we provide the following
functions:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
field->val();
dbug_tmp_restore_column_map(table->read_set, old_map);
and similar for the write map:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
field->val();
dbug_tmp_restore_column_map(table->write_set, old_map);
If this is not done, you will sooner or later hit a DBUG_ASSERT
in the field store() / val() functions.
(For not DBUG binaries, the dbug_tmp_restore_column_map() and
dbug_tmp_restore_column_map() are inline dummy functions and should
be optimized away be the compiler).
- If one needs to temporary set the column map for all binaries (and not
just to avoid the DBUG_ASSERT() in the Field::store() / Field::val()
methods) one should use the functions tmp_use_all_columns() and
tmp_restore_column_map() instead of the above dbug_ variants.
- All 'status' fields in the handler base class (like records,
data_file_length etc) are now stored in a 'stats' struct. This makes
it easier to know what status variables are provided by the base
handler. This requires some trivial variable names in the extra()
function.
- New virtual function handler::records(). This is called to optimize
COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true.
(stats.records is not supposed to be an exact value. It's only has to
be 'reasonable enough' for the optimizer to be able to choose a good
optimization path).
- Non virtual handler::init() function added for caching of virtual
constants from engine.
- Removed has_transactions() virtual method. Now one should instead return
HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support
transactions.
- The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument
that is to be used with 'new handler_name()' to allocate the handler
in the right area. The xxxx_create_handler() function is also
responsible for any initialization of the object before returning.
For example, one should change:
static handler *myisam_create_handler(TABLE_SHARE *table)
{
return new ha_myisam(table);
}
->
static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root)
{
return new (mem_root) ha_myisam(table);
}
- New optional virtual function: use_hidden_primary_key().
This is called in case of an update/delete when
(table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined
but we don't have a primary key. This allows the handler to take precisions
in remembering any hidden primary key to able to update/delete any
found row. The default handler marks all columns to be read.
- handler::table_flags() now returns a ulonglong (to allow for more flags).
- New/changed table_flags()
- HA_HAS_RECORDS Set if ::records() is supported
- HA_NO_TRANSACTIONS Set if engine doesn't support transactions
- HA_PRIMARY_KEY_REQUIRED_FOR_DELETE
Set if we should mark all primary key columns for
read when reading rows as part of a DELETE
statement. If there is no primary key,
all columns are marked for read.
- HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some
cases (based on table->read_set)
- HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS
Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION.
- HA_DUPP_POS Renamed to HA_DUPLICATE_POS
- HA_REQUIRES_KEY_COLUMNS_FOR_DELETE
Set this if we should mark ALL key columns for
read when when reading rows as part of a DELETE
statement. In case of an update we will mark
all keys for read for which key part changed
value.
- HA_STATS_RECORDS_IS_EXACT
Set this if stats.records is exact.
(This saves us some extra records() calls
when optimizing COUNT(*))
- Removed table_flags()
- HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if
handler::records() gives an exact count() and
HA_STATS_RECORDS_IS_EXACT if stats.records is exact.
- HA_READ_RND_SAME Removed (no one supported this one)
- Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk()
- Renamed handler::dupp_pos to handler::dup_pos
- Removed not used variable handler::sortkey
Upper level handler changes:
- ha_reset() now does some overall checks and calls ::reset()
- ha_table_flags() added. This is a cached version of table_flags(). The
cache is updated on engine creation time and updated on open.
MySQL level changes (not obvious from the above):
- DBUG_ASSERT() added to check that column usage matches what is set
in the column usage bit maps. (This found a LOT of bugs in current
column marking code).
- In 5.1 before, all used columns was marked in read_set and only updated
columns was marked in write_set. Now we only mark columns for which we
need a value in read_set.
- Column bitmaps are created in open_binary_frm() and open_table_from_share().
(Before this was in table.cc)
- handler::table_flags() calls are replaced with handler::ha_table_flags()
- For calling field->val() you must have the corresponding bit set in
table->read_set. For calling field->store() you must have the
corresponding bit set in table->write_set. (There are asserts in
all store()/val() functions to catch wrong usage)
- thd->set_query_id is renamed to thd->mark_used_columns and instead
of setting this to an integer value, this has now the values:
MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE
Changed also all variables named 'set_query_id' to mark_used_columns.
- In filesort() we now inform the handler of exactly which columns are needed
doing the sort and choosing the rows.
- The TABLE_SHARE object has a 'all_set' column bitmap one can use
when one needs a column bitmap with all columns set.
(This is used for table->use_all_columns() and other places)
- The TABLE object has 3 column bitmaps:
- def_read_set Default bitmap for columns to be read
- def_write_set Default bitmap for columns to be written
- tmp_set Can be used as a temporary bitmap when needed.
The table object has also two pointer to bitmaps read_set and write_set
that the handler should use to find out which columns are used in which way.
- count() optimization now calls handler::records() instead of using
handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true).
- Added extra argument to Item::walk() to indicate if we should also
traverse sub queries.
- Added TABLE parameter to cp_buffer_from_ref()
- Don't close tables created with CREATE ... SELECT but keep them in
the table cache. (Faster usage of newly created tables).
New interfaces:
- table->clear_column_bitmaps() to initialize the bitmaps for tables
at start of new statements.
- table->column_bitmaps_set() to set up new column bitmaps and signal
the handler about this.
- table->column_bitmaps_set_no_signal() for some few cases where we need
to setup new column bitmaps but don't signal the handler (as the handler
has already been signaled about these before). Used for the momement
only in opt_range.cc when doing ROR scans.
- table->use_all_columns() to install a bitmap where all columns are marked
as use in the read and the write set.
- table->default_column_bitmaps() to install the normal read and write
column bitmaps, but not signaling the handler about this.
This is mainly used when creating TABLE instances.
- table->mark_columns_needed_for_delete(),
table->mark_columns_needed_for_delete() and
table->mark_columns_needed_for_insert() to allow us to put additional
columns in column usage maps if handler so requires.
(The handler indicates what it neads in handler->table_flags())
- table->prepare_for_position() to allow us to tell handler that it
needs to read primary key parts to be able to store them in
future table->position() calls.
(This replaces the table->file->ha_retrieve_all_pk function)
- table->mark_auto_increment_column() to tell handler are going to update
columns part of any auto_increment key.
- table->mark_columns_used_by_index() to mark all columns that is part of
an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow
it to quickly know that it only needs to read colums that are part
of the key. (The handler can also use the column map for detecting this,
but simpler/faster handler can just monitor the extra() call).
- table->mark_columns_used_by_index_no_reset() to in addition to other columns,
also mark all columns that is used by the given key.
- table->restore_column_maps_after_mark_index() to restore to default
column maps after a call to table->mark_columns_used_by_index().
- New item function register_field_in_read_map(), for marking used columns
in table->read_map. Used by filesort() to mark all used columns
- Maintain in TABLE->merge_keys set of all keys that are used in query.
(Simplices some optimization loops)
- Maintain Field->part_of_key_not_clustered which is like Field->part_of_key
but the field in the clustered key is not assumed to be part of all index.
(used in opt_range.cc for faster loops)
- dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map()
tmp_use_all_columns() and tmp_restore_column_map() functions to temporally
mark all columns as usable. The 'dbug_' version is primarily intended
inside a handler when it wants to just call Field:store() & Field::val()
functions, but don't need the column maps set for any other usage.
(ie:: bitmap_is_set() is never called)
- We can't use compare_records() to skip updates for handlers that returns
a partial column set and the read_set doesn't cover all columns in the
write set. The reason for this is that if we have a column marked only for
write we can't in the MySQL level know if the value changed or not.
The reason this worked before was that MySQL marked all to be written
columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden
bug'.
- open_table_from_share() does not anymore setup temporary MEM_ROOT
object as a thread specific variable for the handler. Instead we
send the to-be-used MEMROOT to get_new_handler().
(Simpler, faster code)
Bugs fixed:
- Column marking was not done correctly in a lot of cases.
(ALTER TABLE, when using triggers, auto_increment fields etc)
(Could potentially result in wrong values inserted in table handlers
relying on that the old column maps or field->set_query_id was correct)
Especially when it comes to triggers, there may be cases where the
old code would cause lost/wrong values for NDB and/or InnoDB tables.
- Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags:
OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG.
This allowed me to remove some wrong warnings about:
"Some non-transactional changed tables couldn't be rolled back"
- Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset
(thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose
some warnings about
"Some non-transactional changed tables couldn't be rolled back")
- Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table()
which could cause delete_table to report random failures.
- Fixed core dumps for some tests when running with --debug
- Added missing FN_LIBCHAR in mysql_rm_tmp_tables()
(This has probably caused us to not properly remove temporary files after
crash)
- slow_logs was not properly initialized, which could maybe cause
extra/lost entries in slow log.
- If we get an duplicate row on insert, change column map to read and
write all columns while retrying the operation. This is required by
the definition of REPLACE and also ensures that fields that are only
part of UPDATE are properly handled. This fixed a bug in NDB and
REPLACE where REPLACE wrongly copied some column values from the replaced
row.
- For table handler that doesn't support NULL in keys, we would give an error
when creating a primary key with NULL fields, even after the fields has been
automaticly converted to NOT NULL.
- Creating a primary key on a SPATIAL key, would fail if field was not
declared as NOT NULL.
Cleanups:
- Removed not used condition argument to setup_tables
- Removed not needed item function reset_query_id_processor().
- Field->add_index is removed. Now this is instead maintained in
(field->flags & FIELD_IN_ADD_INDEX)
- Field->fieldnr is removed (use field->field_index instead)
- New argument to filesort() to indicate that it should return a set of
row pointers (not used columns). This allowed me to remove some references
to sql_command in filesort and should also enable us to return column
results in some cases where we couldn't before.
- Changed column bitmap handling in opt_range.cc to be aligned with TABLE
bitmap, which allowed me to use bitmap functions instead of looping over
all fields to create some needed bitmaps. (Faster and smaller code)
- Broke up found too long lines
- Moved some variable declaration at start of function for better code
readability.
- Removed some not used arguments from functions.
(setup_fields(), mysql_prepare_insert_check_table())
- setup_fields() now takes an enum instead of an int for marking columns
usage.
- For internal temporary tables, use handler::write_row(),
handler::delete_row() and handler::update_row() instead of
handler::ha_xxxx() for faster execution.
- Changed some constants to enum's and define's.
- Using separate column read and write sets allows for easier checking
of timestamp field was set by statement.
- Remove calls to free_io_cache() as this is now done automaticly in ha_reset()
- Don't build table->normalized_path as this is now identical to table->path
(after bar's fixes to convert filenames)
- Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to
do comparision with the 'convert-dbug-for-diff' tool.
Things left to do in 5.1:
- We wrongly log failed CREATE TABLE ... SELECT in some cases when using
row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result)
Mats has promised to look into this.
- Test that my fix for CREATE TABLE ... SELECT is indeed correct.
(I added several test cases for this, but in this case it's better that
someone else also tests this throughly).
Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
|
|
|
NDB_SHARE *share= get_share(key, 0, FALSE, FALSE);
|
2007-02-06 06:40:26 +01:00
|
|
|
if (share)
|
|
|
|
{
|
|
|
|
DBUG_PRINT("NDB_SHARE", ("%s temporary use_count: %u",
|
|
|
|
share->key, share->use_count));
|
|
|
|
}
|
2006-05-04 13:58:17 +02:00
|
|
|
switch (schema_type)
|
2006-03-09 01:04:13 +01:00
|
|
|
{
|
|
|
|
case SOT_DROP_DB:
|
2006-05-04 13:58:17 +02:00
|
|
|
log_query= 1;
|
2006-03-09 01:04:13 +01:00
|
|
|
break;
|
2006-05-04 13:58:17 +02:00
|
|
|
case SOT_DROP_TABLE:
|
2007-04-18 21:39:45 +02:00
|
|
|
log_query= 1;
|
2006-05-04 13:58:17 +02:00
|
|
|
// invalidation already handled by binlog thread
|
|
|
|
if (share && share->op)
|
|
|
|
{
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
// fall through
|
2006-03-09 01:04:13 +01:00
|
|
|
case SOT_RENAME_TABLE:
|
2006-05-04 13:58:17 +02:00
|
|
|
// fall through
|
2006-03-09 01:04:13 +01:00
|
|
|
case SOT_ALTER_TABLE:
|
2006-05-04 13:58:17 +02:00
|
|
|
// invalidation already handled by binlog thread
|
|
|
|
if (!share || !share->op)
|
2006-03-09 01:04:13 +01:00
|
|
|
{
|
2006-05-04 13:58:17 +02:00
|
|
|
{
|
|
|
|
injector_ndb->setDatabaseName(schema->db);
|
|
|
|
Ndb_table_guard ndbtab_g(injector_ndb->getDictionary(),
|
|
|
|
schema->name);
|
|
|
|
ndbtab_g.invalidate();
|
|
|
|
}
|
|
|
|
TABLE_LIST table_list;
|
|
|
|
bzero((char*) &table_list,sizeof(table_list));
|
|
|
|
table_list.db= schema->db;
|
|
|
|
table_list.alias= table_list.table_name= schema->name;
|
2010-08-12 15:50:23 +02:00
|
|
|
close_cached_tables(thd, &table_list, FALSE, LONG_TIMEOUT);
|
2006-03-09 01:04:13 +01:00
|
|
|
}
|
2006-05-04 13:58:17 +02:00
|
|
|
if (schema_type != SOT_ALTER_TABLE)
|
|
|
|
break;
|
|
|
|
// fall through
|
|
|
|
case SOT_RENAME_TABLE_NEW:
|
|
|
|
log_query= 1;
|
2006-05-10 16:54:29 +02:00
|
|
|
if (ndb_binlog_running && (!share || !share->op))
|
2006-03-09 01:04:13 +01:00
|
|
|
{
|
2006-05-04 13:58:17 +02:00
|
|
|
/*
|
|
|
|
we need to free any share here as command below
|
|
|
|
may need to call handle_trailing_share
|
|
|
|
*/
|
|
|
|
if (share)
|
|
|
|
{
|
2007-02-06 06:40:26 +01:00
|
|
|
/* ndb_share reference temporary free */
|
|
|
|
DBUG_PRINT("NDB_SHARE", ("%s temporary free use_count: %u",
|
|
|
|
share->key, share->use_count));
|
2006-05-04 13:58:17 +02:00
|
|
|
free_share(&share);
|
|
|
|
share= 0;
|
|
|
|
}
|
2007-02-27 10:27:04 +01:00
|
|
|
if (ndbcluster_check_if_local_table(schema->db, schema->name))
|
|
|
|
{
|
2007-05-31 16:45:22 +02:00
|
|
|
DBUG_PRINT("info", ("NDB Binlog: Skipping locally defined table '%s.%s'",
|
2007-02-27 10:27:04 +01:00
|
|
|
schema->db, schema->name));
|
2007-05-31 16:45:22 +02:00
|
|
|
sql_print_error("NDB Binlog: Skipping locally defined table '%s.%s' from "
|
2006-09-12 16:34:12 +02:00
|
|
|
"binlog schema event '%s' from node %d. ",
|
|
|
|
schema->db, schema->name, schema->query,
|
|
|
|
schema->node_id);
|
2007-02-27 10:27:04 +01:00
|
|
|
}
|
2006-09-12 16:34:12 +02:00
|
|
|
else if (ndb_create_table_from_engine(thd, schema->db, schema->name))
|
2007-02-27 10:27:04 +01:00
|
|
|
{
|
2009-09-10 11:18:29 +02:00
|
|
|
print_could_not_discover_error(thd, schema);
|
2006-05-04 13:58:17 +02:00
|
|
|
}
|
2006-03-09 01:04:13 +01:00
|
|
|
}
|
2006-05-04 13:58:17 +02:00
|
|
|
break;
|
2006-03-09 01:04:13 +01:00
|
|
|
default:
|
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes
Changes that requires code changes in other code of other storage engines.
(Note that all changes are very straightforward and one should find all issues
by compiling a --debug build and fixing all compiler errors and all
asserts in field.cc while running the test suite),
- New optional handler function introduced: reset()
This is called after every DML statement to make it easy for a handler to
statement specific cleanups.
(The only case it's not called is if force the file to be closed)
- handler::extra(HA_EXTRA_RESET) is removed. Code that was there before
should be moved to handler::reset()
- table->read_set contains a bitmap over all columns that are needed
in the query. read_row() and similar functions only needs to read these
columns
- table->write_set contains a bitmap over all columns that will be updated
in the query. write_row() and update_row() only needs to update these
columns.
The above bitmaps should now be up to date in all context
(including ALTER TABLE, filesort()).
The handler is informed of any changes to the bitmap after
fix_fields() by calling the virtual function
handler::column_bitmaps_signal(). If the handler does caching of
these bitmaps (instead of using table->read_set, table->write_set),
it should redo the caching in this code. as the signal() may be sent
several times, it's probably best to set a variable in the signal
and redo the caching on read_row() / write_row() if the variable was
set.
- Removed the read_set and write_set bitmap objects from the handler class
- Removed all column bit handling functions from the handler class.
(Now one instead uses the normal bitmap functions in my_bitmap.c instead
of handler dedicated bitmap functions)
- field->query_id is removed. One should instead instead check
table->read_set and table->write_set if a field is used in the query.
- handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and
handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now
instead use table->read_set to check for which columns to retrieve.
- If a handler needs to call Field->val() or Field->store() on columns
that are not used in the query, one should install a temporary
all-columns-used map while doing so. For this, we provide the following
functions:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
field->val();
dbug_tmp_restore_column_map(table->read_set, old_map);
and similar for the write map:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
field->val();
dbug_tmp_restore_column_map(table->write_set, old_map);
If this is not done, you will sooner or later hit a DBUG_ASSERT
in the field store() / val() functions.
(For not DBUG binaries, the dbug_tmp_restore_column_map() and
dbug_tmp_restore_column_map() are inline dummy functions and should
be optimized away be the compiler).
- If one needs to temporary set the column map for all binaries (and not
just to avoid the DBUG_ASSERT() in the Field::store() / Field::val()
methods) one should use the functions tmp_use_all_columns() and
tmp_restore_column_map() instead of the above dbug_ variants.
- All 'status' fields in the handler base class (like records,
data_file_length etc) are now stored in a 'stats' struct. This makes
it easier to know what status variables are provided by the base
handler. This requires some trivial variable names in the extra()
function.
- New virtual function handler::records(). This is called to optimize
COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true.
(stats.records is not supposed to be an exact value. It's only has to
be 'reasonable enough' for the optimizer to be able to choose a good
optimization path).
- Non virtual handler::init() function added for caching of virtual
constants from engine.
- Removed has_transactions() virtual method. Now one should instead return
HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support
transactions.
- The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument
that is to be used with 'new handler_name()' to allocate the handler
in the right area. The xxxx_create_handler() function is also
responsible for any initialization of the object before returning.
For example, one should change:
static handler *myisam_create_handler(TABLE_SHARE *table)
{
return new ha_myisam(table);
}
->
static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root)
{
return new (mem_root) ha_myisam(table);
}
- New optional virtual function: use_hidden_primary_key().
This is called in case of an update/delete when
(table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined
but we don't have a primary key. This allows the handler to take precisions
in remembering any hidden primary key to able to update/delete any
found row. The default handler marks all columns to be read.
- handler::table_flags() now returns a ulonglong (to allow for more flags).
- New/changed table_flags()
- HA_HAS_RECORDS Set if ::records() is supported
- HA_NO_TRANSACTIONS Set if engine doesn't support transactions
- HA_PRIMARY_KEY_REQUIRED_FOR_DELETE
Set if we should mark all primary key columns for
read when reading rows as part of a DELETE
statement. If there is no primary key,
all columns are marked for read.
- HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some
cases (based on table->read_set)
- HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS
Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION.
- HA_DUPP_POS Renamed to HA_DUPLICATE_POS
- HA_REQUIRES_KEY_COLUMNS_FOR_DELETE
Set this if we should mark ALL key columns for
read when when reading rows as part of a DELETE
statement. In case of an update we will mark
all keys for read for which key part changed
value.
- HA_STATS_RECORDS_IS_EXACT
Set this if stats.records is exact.
(This saves us some extra records() calls
when optimizing COUNT(*))
- Removed table_flags()
- HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if
handler::records() gives an exact count() and
HA_STATS_RECORDS_IS_EXACT if stats.records is exact.
- HA_READ_RND_SAME Removed (no one supported this one)
- Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk()
- Renamed handler::dupp_pos to handler::dup_pos
- Removed not used variable handler::sortkey
Upper level handler changes:
- ha_reset() now does some overall checks and calls ::reset()
- ha_table_flags() added. This is a cached version of table_flags(). The
cache is updated on engine creation time and updated on open.
MySQL level changes (not obvious from the above):
- DBUG_ASSERT() added to check that column usage matches what is set
in the column usage bit maps. (This found a LOT of bugs in current
column marking code).
- In 5.1 before, all used columns was marked in read_set and only updated
columns was marked in write_set. Now we only mark columns for which we
need a value in read_set.
- Column bitmaps are created in open_binary_frm() and open_table_from_share().
(Before this was in table.cc)
- handler::table_flags() calls are replaced with handler::ha_table_flags()
- For calling field->val() you must have the corresponding bit set in
table->read_set. For calling field->store() you must have the
corresponding bit set in table->write_set. (There are asserts in
all store()/val() functions to catch wrong usage)
- thd->set_query_id is renamed to thd->mark_used_columns and instead
of setting this to an integer value, this has now the values:
MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE
Changed also all variables named 'set_query_id' to mark_used_columns.
- In filesort() we now inform the handler of exactly which columns are needed
doing the sort and choosing the rows.
- The TABLE_SHARE object has a 'all_set' column bitmap one can use
when one needs a column bitmap with all columns set.
(This is used for table->use_all_columns() and other places)
- The TABLE object has 3 column bitmaps:
- def_read_set Default bitmap for columns to be read
- def_write_set Default bitmap for columns to be written
- tmp_set Can be used as a temporary bitmap when needed.
The table object has also two pointer to bitmaps read_set and write_set
that the handler should use to find out which columns are used in which way.
- count() optimization now calls handler::records() instead of using
handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true).
- Added extra argument to Item::walk() to indicate if we should also
traverse sub queries.
- Added TABLE parameter to cp_buffer_from_ref()
- Don't close tables created with CREATE ... SELECT but keep them in
the table cache. (Faster usage of newly created tables).
New interfaces:
- table->clear_column_bitmaps() to initialize the bitmaps for tables
at start of new statements.
- table->column_bitmaps_set() to set up new column bitmaps and signal
the handler about this.
- table->column_bitmaps_set_no_signal() for some few cases where we need
to setup new column bitmaps but don't signal the handler (as the handler
has already been signaled about these before). Used for the momement
only in opt_range.cc when doing ROR scans.
- table->use_all_columns() to install a bitmap where all columns are marked
as use in the read and the write set.
- table->default_column_bitmaps() to install the normal read and write
column bitmaps, but not signaling the handler about this.
This is mainly used when creating TABLE instances.
- table->mark_columns_needed_for_delete(),
table->mark_columns_needed_for_delete() and
table->mark_columns_needed_for_insert() to allow us to put additional
columns in column usage maps if handler so requires.
(The handler indicates what it neads in handler->table_flags())
- table->prepare_for_position() to allow us to tell handler that it
needs to read primary key parts to be able to store them in
future table->position() calls.
(This replaces the table->file->ha_retrieve_all_pk function)
- table->mark_auto_increment_column() to tell handler are going to update
columns part of any auto_increment key.
- table->mark_columns_used_by_index() to mark all columns that is part of
an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow
it to quickly know that it only needs to read colums that are part
of the key. (The handler can also use the column map for detecting this,
but simpler/faster handler can just monitor the extra() call).
- table->mark_columns_used_by_index_no_reset() to in addition to other columns,
also mark all columns that is used by the given key.
- table->restore_column_maps_after_mark_index() to restore to default
column maps after a call to table->mark_columns_used_by_index().
- New item function register_field_in_read_map(), for marking used columns
in table->read_map. Used by filesort() to mark all used columns
- Maintain in TABLE->merge_keys set of all keys that are used in query.
(Simplices some optimization loops)
- Maintain Field->part_of_key_not_clustered which is like Field->part_of_key
but the field in the clustered key is not assumed to be part of all index.
(used in opt_range.cc for faster loops)
- dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map()
tmp_use_all_columns() and tmp_restore_column_map() functions to temporally
mark all columns as usable. The 'dbug_' version is primarily intended
inside a handler when it wants to just call Field:store() & Field::val()
functions, but don't need the column maps set for any other usage.
(ie:: bitmap_is_set() is never called)
- We can't use compare_records() to skip updates for handlers that returns
a partial column set and the read_set doesn't cover all columns in the
write set. The reason for this is that if we have a column marked only for
write we can't in the MySQL level know if the value changed or not.
The reason this worked before was that MySQL marked all to be written
columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden
bug'.
- open_table_from_share() does not anymore setup temporary MEM_ROOT
object as a thread specific variable for the handler. Instead we
send the to-be-used MEMROOT to get_new_handler().
(Simpler, faster code)
Bugs fixed:
- Column marking was not done correctly in a lot of cases.
(ALTER TABLE, when using triggers, auto_increment fields etc)
(Could potentially result in wrong values inserted in table handlers
relying on that the old column maps or field->set_query_id was correct)
Especially when it comes to triggers, there may be cases where the
old code would cause lost/wrong values for NDB and/or InnoDB tables.
- Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags:
OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG.
This allowed me to remove some wrong warnings about:
"Some non-transactional changed tables couldn't be rolled back"
- Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset
(thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose
some warnings about
"Some non-transactional changed tables couldn't be rolled back")
- Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table()
which could cause delete_table to report random failures.
- Fixed core dumps for some tests when running with --debug
- Added missing FN_LIBCHAR in mysql_rm_tmp_tables()
(This has probably caused us to not properly remove temporary files after
crash)
- slow_logs was not properly initialized, which could maybe cause
extra/lost entries in slow log.
- If we get an duplicate row on insert, change column map to read and
write all columns while retrying the operation. This is required by
the definition of REPLACE and also ensures that fields that are only
part of UPDATE are properly handled. This fixed a bug in NDB and
REPLACE where REPLACE wrongly copied some column values from the replaced
row.
- For table handler that doesn't support NULL in keys, we would give an error
when creating a primary key with NULL fields, even after the fields has been
automaticly converted to NOT NULL.
- Creating a primary key on a SPATIAL key, would fail if field was not
declared as NOT NULL.
Cleanups:
- Removed not used condition argument to setup_tables
- Removed not needed item function reset_query_id_processor().
- Field->add_index is removed. Now this is instead maintained in
(field->flags & FIELD_IN_ADD_INDEX)
- Field->fieldnr is removed (use field->field_index instead)
- New argument to filesort() to indicate that it should return a set of
row pointers (not used columns). This allowed me to remove some references
to sql_command in filesort and should also enable us to return column
results in some cases where we couldn't before.
- Changed column bitmap handling in opt_range.cc to be aligned with TABLE
bitmap, which allowed me to use bitmap functions instead of looping over
all fields to create some needed bitmaps. (Faster and smaller code)
- Broke up found too long lines
- Moved some variable declaration at start of function for better code
readability.
- Removed some not used arguments from functions.
(setup_fields(), mysql_prepare_insert_check_table())
- setup_fields() now takes an enum instead of an int for marking columns
usage.
- For internal temporary tables, use handler::write_row(),
handler::delete_row() and handler::update_row() instead of
handler::ha_xxxx() for faster execution.
- Changed some constants to enum's and define's.
- Using separate column read and write sets allows for easier checking
of timestamp field was set by statement.
- Remove calls to free_io_cache() as this is now done automaticly in ha_reset()
- Don't build table->normalized_path as this is now identical to table->path
(after bar's fixes to convert filenames)
- Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to
do comparision with the 'convert-dbug-for-diff' tool.
Things left to do in 5.1:
- We wrongly log failed CREATE TABLE ... SELECT in some cases when using
row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result)
Mats has promised to look into this.
- Test that my fix for CREATE TABLE ... SELECT is indeed correct.
(I added several test cases for this, but in this case it's better that
someone else also tests this throughly).
Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
|
|
|
DBUG_ASSERT(FALSE);
|
2006-03-09 01:04:13 +01:00
|
|
|
}
|
|
|
|
if (share)
|
2006-03-09 15:50:26 +01:00
|
|
|
{
|
2007-02-06 06:40:26 +01:00
|
|
|
/* ndb_share reference temporary free */
|
|
|
|
DBUG_PRINT("NDB_SHARE", ("%s temporary free use_count: %u",
|
|
|
|
share->key, share->use_count));
|
2006-03-09 01:04:13 +01:00
|
|
|
free_share(&share);
|
2006-03-09 15:50:26 +01:00
|
|
|
share= 0;
|
|
|
|
}
|
2006-03-09 01:04:13 +01:00
|
|
|
}
|
2006-05-04 13:58:17 +02:00
|
|
|
if (ndb_binlog_running && log_query)
|
2007-04-18 16:02:20 +02:00
|
|
|
ndb_binlog_query(thd, schema);
|
2006-03-09 01:04:13 +01:00
|
|
|
}
|
|
|
|
while ((schema= post_epoch_unlock_list->pop()))
|
|
|
|
{
|
|
|
|
ndbcluster_update_slock(thd, schema->db, schema->name);
|
2006-03-09 09:30:32 +01:00
|
|
|
}
|
|
|
|
DBUG_VOID_RETURN;
|
2006-03-09 01:04:13 +01:00
|
|
|
}
|
|
|
|
|
2006-01-12 19:51:02 +01:00
|
|
|
/*
|
|
|
|
Timer class for doing performance measurements
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
Internal helper functions for handeling of the cluster replication tables
|
2006-12-01 15:49:07 +01:00
|
|
|
- ndb_binlog_index
|
|
|
|
- ndb_apply_status
|
2006-01-12 19:51:02 +01:00
|
|
|
*********************************************************************/
|
|
|
|
|
|
|
|
/*
|
|
|
|
struct to hold the data to be inserted into the
|
2006-12-01 15:49:07 +01:00
|
|
|
ndb_binlog_index table
|
2006-01-12 19:51:02 +01:00
|
|
|
*/
|
2006-12-01 15:49:07 +01:00
|
|
|
struct ndb_binlog_index_row {
|
2006-01-12 19:51:02 +01:00
|
|
|
ulonglong gci;
|
|
|
|
const char *master_log_file;
|
|
|
|
ulonglong master_log_pos;
|
|
|
|
ulonglong n_inserts;
|
|
|
|
ulonglong n_updates;
|
|
|
|
ulonglong n_deletes;
|
|
|
|
ulonglong n_schemaops;
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
2006-12-01 15:49:07 +01:00
|
|
|
Open the ndb_binlog_index table
|
2006-01-12 19:51:02 +01:00
|
|
|
*/
|
2009-11-30 16:55:03 +01:00
|
|
|
static int open_ndb_binlog_index(THD *thd, TABLE **ndb_binlog_index)
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
|
|
|
static char repdb[]= NDB_REP_DB;
|
|
|
|
static char reptable[]= NDB_REP_TABLE;
|
|
|
|
const char *save_proc_info= thd->proc_info;
|
2009-11-30 16:55:03 +01:00
|
|
|
TABLE_LIST *tables= &binlog_tables;
|
2006-01-12 19:51:02 +01:00
|
|
|
|
2009-12-08 10:57:07 +01:00
|
|
|
tables->init_one_table(repdb, strlen(repdb), reptable, strlen(reptable),
|
|
|
|
reptable, TL_WRITE);
|
2006-01-12 19:51:02 +01:00
|
|
|
thd->proc_info= "Opening " NDB_REP_DB "." NDB_REP_TABLE;
|
2009-12-08 10:57:07 +01:00
|
|
|
|
2006-01-12 19:51:02 +01:00
|
|
|
tables->required_type= FRMTYPE_TABLE;
|
|
|
|
thd->clear_error();
|
2010-02-24 18:04:00 +01:00
|
|
|
if (open_and_lock_tables(thd, tables, FALSE, 0))
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
2007-12-15 20:24:01 +01:00
|
|
|
if (thd->killed)
|
|
|
|
sql_print_error("NDB Binlog: Opening ndb_binlog_index: killed");
|
|
|
|
else
|
|
|
|
sql_print_error("NDB Binlog: Opening ndb_binlog_index: %d, '%s'",
|
2009-09-10 11:18:29 +02:00
|
|
|
thd->stmt_da->sql_errno(),
|
|
|
|
thd->stmt_da->message());
|
2006-01-12 19:51:02 +01:00
|
|
|
thd->proc_info= save_proc_info;
|
|
|
|
return -1;
|
|
|
|
}
|
2006-12-01 15:49:07 +01:00
|
|
|
*ndb_binlog_index= tables->table;
|
2006-01-12 19:51:02 +01:00
|
|
|
thd->proc_info= save_proc_info;
|
2006-12-01 15:49:07 +01:00
|
|
|
(*ndb_binlog_index)->use_all_columns();
|
2006-01-12 19:51:02 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes
Changes that requires code changes in other code of other storage engines.
(Note that all changes are very straightforward and one should find all issues
by compiling a --debug build and fixing all compiler errors and all
asserts in field.cc while running the test suite),
- New optional handler function introduced: reset()
This is called after every DML statement to make it easy for a handler to
statement specific cleanups.
(The only case it's not called is if force the file to be closed)
- handler::extra(HA_EXTRA_RESET) is removed. Code that was there before
should be moved to handler::reset()
- table->read_set contains a bitmap over all columns that are needed
in the query. read_row() and similar functions only needs to read these
columns
- table->write_set contains a bitmap over all columns that will be updated
in the query. write_row() and update_row() only needs to update these
columns.
The above bitmaps should now be up to date in all context
(including ALTER TABLE, filesort()).
The handler is informed of any changes to the bitmap after
fix_fields() by calling the virtual function
handler::column_bitmaps_signal(). If the handler does caching of
these bitmaps (instead of using table->read_set, table->write_set),
it should redo the caching in this code. as the signal() may be sent
several times, it's probably best to set a variable in the signal
and redo the caching on read_row() / write_row() if the variable was
set.
- Removed the read_set and write_set bitmap objects from the handler class
- Removed all column bit handling functions from the handler class.
(Now one instead uses the normal bitmap functions in my_bitmap.c instead
of handler dedicated bitmap functions)
- field->query_id is removed. One should instead instead check
table->read_set and table->write_set if a field is used in the query.
- handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and
handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now
instead use table->read_set to check for which columns to retrieve.
- If a handler needs to call Field->val() or Field->store() on columns
that are not used in the query, one should install a temporary
all-columns-used map while doing so. For this, we provide the following
functions:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
field->val();
dbug_tmp_restore_column_map(table->read_set, old_map);
and similar for the write map:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
field->val();
dbug_tmp_restore_column_map(table->write_set, old_map);
If this is not done, you will sooner or later hit a DBUG_ASSERT
in the field store() / val() functions.
(For not DBUG binaries, the dbug_tmp_restore_column_map() and
dbug_tmp_restore_column_map() are inline dummy functions and should
be optimized away be the compiler).
- If one needs to temporary set the column map for all binaries (and not
just to avoid the DBUG_ASSERT() in the Field::store() / Field::val()
methods) one should use the functions tmp_use_all_columns() and
tmp_restore_column_map() instead of the above dbug_ variants.
- All 'status' fields in the handler base class (like records,
data_file_length etc) are now stored in a 'stats' struct. This makes
it easier to know what status variables are provided by the base
handler. This requires some trivial variable names in the extra()
function.
- New virtual function handler::records(). This is called to optimize
COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true.
(stats.records is not supposed to be an exact value. It's only has to
be 'reasonable enough' for the optimizer to be able to choose a good
optimization path).
- Non virtual handler::init() function added for caching of virtual
constants from engine.
- Removed has_transactions() virtual method. Now one should instead return
HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support
transactions.
- The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument
that is to be used with 'new handler_name()' to allocate the handler
in the right area. The xxxx_create_handler() function is also
responsible for any initialization of the object before returning.
For example, one should change:
static handler *myisam_create_handler(TABLE_SHARE *table)
{
return new ha_myisam(table);
}
->
static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root)
{
return new (mem_root) ha_myisam(table);
}
- New optional virtual function: use_hidden_primary_key().
This is called in case of an update/delete when
(table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined
but we don't have a primary key. This allows the handler to take precisions
in remembering any hidden primary key to able to update/delete any
found row. The default handler marks all columns to be read.
- handler::table_flags() now returns a ulonglong (to allow for more flags).
- New/changed table_flags()
- HA_HAS_RECORDS Set if ::records() is supported
- HA_NO_TRANSACTIONS Set if engine doesn't support transactions
- HA_PRIMARY_KEY_REQUIRED_FOR_DELETE
Set if we should mark all primary key columns for
read when reading rows as part of a DELETE
statement. If there is no primary key,
all columns are marked for read.
- HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some
cases (based on table->read_set)
- HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS
Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION.
- HA_DUPP_POS Renamed to HA_DUPLICATE_POS
- HA_REQUIRES_KEY_COLUMNS_FOR_DELETE
Set this if we should mark ALL key columns for
read when when reading rows as part of a DELETE
statement. In case of an update we will mark
all keys for read for which key part changed
value.
- HA_STATS_RECORDS_IS_EXACT
Set this if stats.records is exact.
(This saves us some extra records() calls
when optimizing COUNT(*))
- Removed table_flags()
- HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if
handler::records() gives an exact count() and
HA_STATS_RECORDS_IS_EXACT if stats.records is exact.
- HA_READ_RND_SAME Removed (no one supported this one)
- Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk()
- Renamed handler::dupp_pos to handler::dup_pos
- Removed not used variable handler::sortkey
Upper level handler changes:
- ha_reset() now does some overall checks and calls ::reset()
- ha_table_flags() added. This is a cached version of table_flags(). The
cache is updated on engine creation time and updated on open.
MySQL level changes (not obvious from the above):
- DBUG_ASSERT() added to check that column usage matches what is set
in the column usage bit maps. (This found a LOT of bugs in current
column marking code).
- In 5.1 before, all used columns was marked in read_set and only updated
columns was marked in write_set. Now we only mark columns for which we
need a value in read_set.
- Column bitmaps are created in open_binary_frm() and open_table_from_share().
(Before this was in table.cc)
- handler::table_flags() calls are replaced with handler::ha_table_flags()
- For calling field->val() you must have the corresponding bit set in
table->read_set. For calling field->store() you must have the
corresponding bit set in table->write_set. (There are asserts in
all store()/val() functions to catch wrong usage)
- thd->set_query_id is renamed to thd->mark_used_columns and instead
of setting this to an integer value, this has now the values:
MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE
Changed also all variables named 'set_query_id' to mark_used_columns.
- In filesort() we now inform the handler of exactly which columns are needed
doing the sort and choosing the rows.
- The TABLE_SHARE object has a 'all_set' column bitmap one can use
when one needs a column bitmap with all columns set.
(This is used for table->use_all_columns() and other places)
- The TABLE object has 3 column bitmaps:
- def_read_set Default bitmap for columns to be read
- def_write_set Default bitmap for columns to be written
- tmp_set Can be used as a temporary bitmap when needed.
The table object has also two pointer to bitmaps read_set and write_set
that the handler should use to find out which columns are used in which way.
- count() optimization now calls handler::records() instead of using
handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true).
- Added extra argument to Item::walk() to indicate if we should also
traverse sub queries.
- Added TABLE parameter to cp_buffer_from_ref()
- Don't close tables created with CREATE ... SELECT but keep them in
the table cache. (Faster usage of newly created tables).
New interfaces:
- table->clear_column_bitmaps() to initialize the bitmaps for tables
at start of new statements.
- table->column_bitmaps_set() to set up new column bitmaps and signal
the handler about this.
- table->column_bitmaps_set_no_signal() for some few cases where we need
to setup new column bitmaps but don't signal the handler (as the handler
has already been signaled about these before). Used for the momement
only in opt_range.cc when doing ROR scans.
- table->use_all_columns() to install a bitmap where all columns are marked
as use in the read and the write set.
- table->default_column_bitmaps() to install the normal read and write
column bitmaps, but not signaling the handler about this.
This is mainly used when creating TABLE instances.
- table->mark_columns_needed_for_delete(),
table->mark_columns_needed_for_delete() and
table->mark_columns_needed_for_insert() to allow us to put additional
columns in column usage maps if handler so requires.
(The handler indicates what it neads in handler->table_flags())
- table->prepare_for_position() to allow us to tell handler that it
needs to read primary key parts to be able to store them in
future table->position() calls.
(This replaces the table->file->ha_retrieve_all_pk function)
- table->mark_auto_increment_column() to tell handler are going to update
columns part of any auto_increment key.
- table->mark_columns_used_by_index() to mark all columns that is part of
an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow
it to quickly know that it only needs to read colums that are part
of the key. (The handler can also use the column map for detecting this,
but simpler/faster handler can just monitor the extra() call).
- table->mark_columns_used_by_index_no_reset() to in addition to other columns,
also mark all columns that is used by the given key.
- table->restore_column_maps_after_mark_index() to restore to default
column maps after a call to table->mark_columns_used_by_index().
- New item function register_field_in_read_map(), for marking used columns
in table->read_map. Used by filesort() to mark all used columns
- Maintain in TABLE->merge_keys set of all keys that are used in query.
(Simplices some optimization loops)
- Maintain Field->part_of_key_not_clustered which is like Field->part_of_key
but the field in the clustered key is not assumed to be part of all index.
(used in opt_range.cc for faster loops)
- dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map()
tmp_use_all_columns() and tmp_restore_column_map() functions to temporally
mark all columns as usable. The 'dbug_' version is primarily intended
inside a handler when it wants to just call Field:store() & Field::val()
functions, but don't need the column maps set for any other usage.
(ie:: bitmap_is_set() is never called)
- We can't use compare_records() to skip updates for handlers that returns
a partial column set and the read_set doesn't cover all columns in the
write set. The reason for this is that if we have a column marked only for
write we can't in the MySQL level know if the value changed or not.
The reason this worked before was that MySQL marked all to be written
columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden
bug'.
- open_table_from_share() does not anymore setup temporary MEM_ROOT
object as a thread specific variable for the handler. Instead we
send the to-be-used MEMROOT to get_new_handler().
(Simpler, faster code)
Bugs fixed:
- Column marking was not done correctly in a lot of cases.
(ALTER TABLE, when using triggers, auto_increment fields etc)
(Could potentially result in wrong values inserted in table handlers
relying on that the old column maps or field->set_query_id was correct)
Especially when it comes to triggers, there may be cases where the
old code would cause lost/wrong values for NDB and/or InnoDB tables.
- Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags:
OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG.
This allowed me to remove some wrong warnings about:
"Some non-transactional changed tables couldn't be rolled back"
- Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset
(thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose
some warnings about
"Some non-transactional changed tables couldn't be rolled back")
- Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table()
which could cause delete_table to report random failures.
- Fixed core dumps for some tests when running with --debug
- Added missing FN_LIBCHAR in mysql_rm_tmp_tables()
(This has probably caused us to not properly remove temporary files after
crash)
- slow_logs was not properly initialized, which could maybe cause
extra/lost entries in slow log.
- If we get an duplicate row on insert, change column map to read and
write all columns while retrying the operation. This is required by
the definition of REPLACE and also ensures that fields that are only
part of UPDATE are properly handled. This fixed a bug in NDB and
REPLACE where REPLACE wrongly copied some column values from the replaced
row.
- For table handler that doesn't support NULL in keys, we would give an error
when creating a primary key with NULL fields, even after the fields has been
automaticly converted to NOT NULL.
- Creating a primary key on a SPATIAL key, would fail if field was not
declared as NOT NULL.
Cleanups:
- Removed not used condition argument to setup_tables
- Removed not needed item function reset_query_id_processor().
- Field->add_index is removed. Now this is instead maintained in
(field->flags & FIELD_IN_ADD_INDEX)
- Field->fieldnr is removed (use field->field_index instead)
- New argument to filesort() to indicate that it should return a set of
row pointers (not used columns). This allowed me to remove some references
to sql_command in filesort and should also enable us to return column
results in some cases where we couldn't before.
- Changed column bitmap handling in opt_range.cc to be aligned with TABLE
bitmap, which allowed me to use bitmap functions instead of looping over
all fields to create some needed bitmaps. (Faster and smaller code)
- Broke up found too long lines
- Moved some variable declaration at start of function for better code
readability.
- Removed some not used arguments from functions.
(setup_fields(), mysql_prepare_insert_check_table())
- setup_fields() now takes an enum instead of an int for marking columns
usage.
- For internal temporary tables, use handler::write_row(),
handler::delete_row() and handler::update_row() instead of
handler::ha_xxxx() for faster execution.
- Changed some constants to enum's and define's.
- Using separate column read and write sets allows for easier checking
of timestamp field was set by statement.
- Remove calls to free_io_cache() as this is now done automaticly in ha_reset()
- Don't build table->normalized_path as this is now identical to table->path
(after bar's fixes to convert filenames)
- Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to
do comparision with the 'convert-dbug-for-diff' tool.
Things left to do in 5.1:
- We wrongly log failed CREATE TABLE ... SELECT in some cases when using
row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result)
Mats has promised to look into this.
- Test that my fix for CREATE TABLE ... SELECT is indeed correct.
(I added several test cases for this, but in this case it's better that
someone else also tests this throughly).
Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
|
|
|
|
2006-01-12 19:51:02 +01:00
|
|
|
/*
|
2006-12-01 15:49:07 +01:00
|
|
|
Insert one row in the ndb_binlog_index
|
2006-01-12 19:51:02 +01:00
|
|
|
*/
|
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes
Changes that requires code changes in other code of other storage engines.
(Note that all changes are very straightforward and one should find all issues
by compiling a --debug build and fixing all compiler errors and all
asserts in field.cc while running the test suite),
- New optional handler function introduced: reset()
This is called after every DML statement to make it easy for a handler to
statement specific cleanups.
(The only case it's not called is if force the file to be closed)
- handler::extra(HA_EXTRA_RESET) is removed. Code that was there before
should be moved to handler::reset()
- table->read_set contains a bitmap over all columns that are needed
in the query. read_row() and similar functions only needs to read these
columns
- table->write_set contains a bitmap over all columns that will be updated
in the query. write_row() and update_row() only needs to update these
columns.
The above bitmaps should now be up to date in all context
(including ALTER TABLE, filesort()).
The handler is informed of any changes to the bitmap after
fix_fields() by calling the virtual function
handler::column_bitmaps_signal(). If the handler does caching of
these bitmaps (instead of using table->read_set, table->write_set),
it should redo the caching in this code. as the signal() may be sent
several times, it's probably best to set a variable in the signal
and redo the caching on read_row() / write_row() if the variable was
set.
- Removed the read_set and write_set bitmap objects from the handler class
- Removed all column bit handling functions from the handler class.
(Now one instead uses the normal bitmap functions in my_bitmap.c instead
of handler dedicated bitmap functions)
- field->query_id is removed. One should instead instead check
table->read_set and table->write_set if a field is used in the query.
- handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and
handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now
instead use table->read_set to check for which columns to retrieve.
- If a handler needs to call Field->val() or Field->store() on columns
that are not used in the query, one should install a temporary
all-columns-used map while doing so. For this, we provide the following
functions:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
field->val();
dbug_tmp_restore_column_map(table->read_set, old_map);
and similar for the write map:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
field->val();
dbug_tmp_restore_column_map(table->write_set, old_map);
If this is not done, you will sooner or later hit a DBUG_ASSERT
in the field store() / val() functions.
(For not DBUG binaries, the dbug_tmp_restore_column_map() and
dbug_tmp_restore_column_map() are inline dummy functions and should
be optimized away be the compiler).
- If one needs to temporary set the column map for all binaries (and not
just to avoid the DBUG_ASSERT() in the Field::store() / Field::val()
methods) one should use the functions tmp_use_all_columns() and
tmp_restore_column_map() instead of the above dbug_ variants.
- All 'status' fields in the handler base class (like records,
data_file_length etc) are now stored in a 'stats' struct. This makes
it easier to know what status variables are provided by the base
handler. This requires some trivial variable names in the extra()
function.
- New virtual function handler::records(). This is called to optimize
COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true.
(stats.records is not supposed to be an exact value. It's only has to
be 'reasonable enough' for the optimizer to be able to choose a good
optimization path).
- Non virtual handler::init() function added for caching of virtual
constants from engine.
- Removed has_transactions() virtual method. Now one should instead return
HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support
transactions.
- The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument
that is to be used with 'new handler_name()' to allocate the handler
in the right area. The xxxx_create_handler() function is also
responsible for any initialization of the object before returning.
For example, one should change:
static handler *myisam_create_handler(TABLE_SHARE *table)
{
return new ha_myisam(table);
}
->
static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root)
{
return new (mem_root) ha_myisam(table);
}
- New optional virtual function: use_hidden_primary_key().
This is called in case of an update/delete when
(table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined
but we don't have a primary key. This allows the handler to take precisions
in remembering any hidden primary key to able to update/delete any
found row. The default handler marks all columns to be read.
- handler::table_flags() now returns a ulonglong (to allow for more flags).
- New/changed table_flags()
- HA_HAS_RECORDS Set if ::records() is supported
- HA_NO_TRANSACTIONS Set if engine doesn't support transactions
- HA_PRIMARY_KEY_REQUIRED_FOR_DELETE
Set if we should mark all primary key columns for
read when reading rows as part of a DELETE
statement. If there is no primary key,
all columns are marked for read.
- HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some
cases (based on table->read_set)
- HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS
Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION.
- HA_DUPP_POS Renamed to HA_DUPLICATE_POS
- HA_REQUIRES_KEY_COLUMNS_FOR_DELETE
Set this if we should mark ALL key columns for
read when when reading rows as part of a DELETE
statement. In case of an update we will mark
all keys for read for which key part changed
value.
- HA_STATS_RECORDS_IS_EXACT
Set this if stats.records is exact.
(This saves us some extra records() calls
when optimizing COUNT(*))
- Removed table_flags()
- HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if
handler::records() gives an exact count() and
HA_STATS_RECORDS_IS_EXACT if stats.records is exact.
- HA_READ_RND_SAME Removed (no one supported this one)
- Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk()
- Renamed handler::dupp_pos to handler::dup_pos
- Removed not used variable handler::sortkey
Upper level handler changes:
- ha_reset() now does some overall checks and calls ::reset()
- ha_table_flags() added. This is a cached version of table_flags(). The
cache is updated on engine creation time and updated on open.
MySQL level changes (not obvious from the above):
- DBUG_ASSERT() added to check that column usage matches what is set
in the column usage bit maps. (This found a LOT of bugs in current
column marking code).
- In 5.1 before, all used columns was marked in read_set and only updated
columns was marked in write_set. Now we only mark columns for which we
need a value in read_set.
- Column bitmaps are created in open_binary_frm() and open_table_from_share().
(Before this was in table.cc)
- handler::table_flags() calls are replaced with handler::ha_table_flags()
- For calling field->val() you must have the corresponding bit set in
table->read_set. For calling field->store() you must have the
corresponding bit set in table->write_set. (There are asserts in
all store()/val() functions to catch wrong usage)
- thd->set_query_id is renamed to thd->mark_used_columns and instead
of setting this to an integer value, this has now the values:
MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE
Changed also all variables named 'set_query_id' to mark_used_columns.
- In filesort() we now inform the handler of exactly which columns are needed
doing the sort and choosing the rows.
- The TABLE_SHARE object has a 'all_set' column bitmap one can use
when one needs a column bitmap with all columns set.
(This is used for table->use_all_columns() and other places)
- The TABLE object has 3 column bitmaps:
- def_read_set Default bitmap for columns to be read
- def_write_set Default bitmap for columns to be written
- tmp_set Can be used as a temporary bitmap when needed.
The table object has also two pointer to bitmaps read_set and write_set
that the handler should use to find out which columns are used in which way.
- count() optimization now calls handler::records() instead of using
handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true).
- Added extra argument to Item::walk() to indicate if we should also
traverse sub queries.
- Added TABLE parameter to cp_buffer_from_ref()
- Don't close tables created with CREATE ... SELECT but keep them in
the table cache. (Faster usage of newly created tables).
New interfaces:
- table->clear_column_bitmaps() to initialize the bitmaps for tables
at start of new statements.
- table->column_bitmaps_set() to set up new column bitmaps and signal
the handler about this.
- table->column_bitmaps_set_no_signal() for some few cases where we need
to setup new column bitmaps but don't signal the handler (as the handler
has already been signaled about these before). Used for the momement
only in opt_range.cc when doing ROR scans.
- table->use_all_columns() to install a bitmap where all columns are marked
as use in the read and the write set.
- table->default_column_bitmaps() to install the normal read and write
column bitmaps, but not signaling the handler about this.
This is mainly used when creating TABLE instances.
- table->mark_columns_needed_for_delete(),
table->mark_columns_needed_for_delete() and
table->mark_columns_needed_for_insert() to allow us to put additional
columns in column usage maps if handler so requires.
(The handler indicates what it neads in handler->table_flags())
- table->prepare_for_position() to allow us to tell handler that it
needs to read primary key parts to be able to store them in
future table->position() calls.
(This replaces the table->file->ha_retrieve_all_pk function)
- table->mark_auto_increment_column() to tell handler are going to update
columns part of any auto_increment key.
- table->mark_columns_used_by_index() to mark all columns that is part of
an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow
it to quickly know that it only needs to read colums that are part
of the key. (The handler can also use the column map for detecting this,
but simpler/faster handler can just monitor the extra() call).
- table->mark_columns_used_by_index_no_reset() to in addition to other columns,
also mark all columns that is used by the given key.
- table->restore_column_maps_after_mark_index() to restore to default
column maps after a call to table->mark_columns_used_by_index().
- New item function register_field_in_read_map(), for marking used columns
in table->read_map. Used by filesort() to mark all used columns
- Maintain in TABLE->merge_keys set of all keys that are used in query.
(Simplices some optimization loops)
- Maintain Field->part_of_key_not_clustered which is like Field->part_of_key
but the field in the clustered key is not assumed to be part of all index.
(used in opt_range.cc for faster loops)
- dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map()
tmp_use_all_columns() and tmp_restore_column_map() functions to temporally
mark all columns as usable. The 'dbug_' version is primarily intended
inside a handler when it wants to just call Field:store() & Field::val()
functions, but don't need the column maps set for any other usage.
(ie:: bitmap_is_set() is never called)
- We can't use compare_records() to skip updates for handlers that returns
a partial column set and the read_set doesn't cover all columns in the
write set. The reason for this is that if we have a column marked only for
write we can't in the MySQL level know if the value changed or not.
The reason this worked before was that MySQL marked all to be written
columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden
bug'.
- open_table_from_share() does not anymore setup temporary MEM_ROOT
object as a thread specific variable for the handler. Instead we
send the to-be-used MEMROOT to get_new_handler().
(Simpler, faster code)
Bugs fixed:
- Column marking was not done correctly in a lot of cases.
(ALTER TABLE, when using triggers, auto_increment fields etc)
(Could potentially result in wrong values inserted in table handlers
relying on that the old column maps or field->set_query_id was correct)
Especially when it comes to triggers, there may be cases where the
old code would cause lost/wrong values for NDB and/or InnoDB tables.
- Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags:
OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG.
This allowed me to remove some wrong warnings about:
"Some non-transactional changed tables couldn't be rolled back"
- Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset
(thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose
some warnings about
"Some non-transactional changed tables couldn't be rolled back")
- Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table()
which could cause delete_table to report random failures.
- Fixed core dumps for some tests when running with --debug
- Added missing FN_LIBCHAR in mysql_rm_tmp_tables()
(This has probably caused us to not properly remove temporary files after
crash)
- slow_logs was not properly initialized, which could maybe cause
extra/lost entries in slow log.
- If we get an duplicate row on insert, change column map to read and
write all columns while retrying the operation. This is required by
the definition of REPLACE and also ensures that fields that are only
part of UPDATE are properly handled. This fixed a bug in NDB and
REPLACE where REPLACE wrongly copied some column values from the replaced
row.
- For table handler that doesn't support NULL in keys, we would give an error
when creating a primary key with NULL fields, even after the fields has been
automaticly converted to NOT NULL.
- Creating a primary key on a SPATIAL key, would fail if field was not
declared as NOT NULL.
Cleanups:
- Removed not used condition argument to setup_tables
- Removed not needed item function reset_query_id_processor().
- Field->add_index is removed. Now this is instead maintained in
(field->flags & FIELD_IN_ADD_INDEX)
- Field->fieldnr is removed (use field->field_index instead)
- New argument to filesort() to indicate that it should return a set of
row pointers (not used columns). This allowed me to remove some references
to sql_command in filesort and should also enable us to return column
results in some cases where we couldn't before.
- Changed column bitmap handling in opt_range.cc to be aligned with TABLE
bitmap, which allowed me to use bitmap functions instead of looping over
all fields to create some needed bitmaps. (Faster and smaller code)
- Broke up found too long lines
- Moved some variable declaration at start of function for better code
readability.
- Removed some not used arguments from functions.
(setup_fields(), mysql_prepare_insert_check_table())
- setup_fields() now takes an enum instead of an int for marking columns
usage.
- For internal temporary tables, use handler::write_row(),
handler::delete_row() and handler::update_row() instead of
handler::ha_xxxx() for faster execution.
- Changed some constants to enum's and define's.
- Using separate column read and write sets allows for easier checking
of timestamp field was set by statement.
- Remove calls to free_io_cache() as this is now done automaticly in ha_reset()
- Don't build table->normalized_path as this is now identical to table->path
(after bar's fixes to convert filenames)
- Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to
do comparision with the 'convert-dbug-for-diff' tool.
Things left to do in 5.1:
- We wrongly log failed CREATE TABLE ... SELECT in some cases when using
row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result)
Mats has promised to look into this.
- Test that my fix for CREATE TABLE ... SELECT is indeed correct.
(I added several test cases for this, but in this case it's better that
someone else also tests this throughly).
Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
|
|
|
|
2006-12-01 15:49:07 +01:00
|
|
|
int ndb_add_ndb_binlog_index(THD *thd, void *_row)
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
2006-12-01 15:49:07 +01:00
|
|
|
ndb_binlog_index_row &row= *(ndb_binlog_index_row *) _row;
|
2006-01-12 19:51:02 +01:00
|
|
|
int error= 0;
|
2006-02-24 16:19:55 +01:00
|
|
|
/*
|
|
|
|
Turn of binlogging to prevent the table changes to be written to
|
|
|
|
the binary log.
|
|
|
|
*/
|
2009-12-22 10:35:56 +01:00
|
|
|
ulong saved_options= thd->variables.option_bits;
|
|
|
|
thd->variables.option_bits&= ~OPTION_BIN_LOG;
|
2006-02-24 16:19:55 +01:00
|
|
|
|
Backport of revno ## 2617.31.1, 2617.31.3, 2617.31.4, 2617.31.5,
2617.31.12, 2617.31.15, 2617.31.15, 2617.31.16, 2617.43.1
- initial changeset that introduced the fix for
Bug#989 and follow up fixes for all test suite failures
introduced in the initial changeset.
------------------------------------------------------------
revno: 2617.31.1
committer: Davi Arnaut <Davi.Arnaut@Sun.COM>
branch nick: 4284-6.0
timestamp: Fri 2009-03-06 19:17:00 -0300
message:
Bug#989: If DROP TABLE while there's an active transaction, wrong binlog order
WL#4284: Transactional DDL locking
Currently the MySQL server does not keep metadata locks on
schema objects for the duration of a transaction, thus failing
to guarantee the integrity of the schema objects being used
during the transaction and to protect then from concurrent
DDL operations. This also poses a problem for replication as
a DDL operation might be replicated even thought there are
active transactions using the object being modified.
The solution is to defer the release of metadata locks until
a active transaction is either committed or rolled back. This
prevents other statements from modifying the table for the
entire duration of the transaction. This provides commitment
ordering for guaranteeing serializability across multiple
transactions.
- Incompatible change:
If MySQL's metadata locking system encounters a lock conflict,
the usual schema is to use the try and back-off technique to
avoid deadlocks -- this schema consists in releasing all locks
and trying to acquire them all in one go.
But in a transactional context this algorithm can't be utilized
as its not possible to release locks acquired during the course
of the transaction without breaking the transaction commitments.
To avoid deadlocks in this case, the ER_LOCK_DEADLOCK will be
returned if a lock conflict is encountered during a transaction.
Let's consider an example:
A transaction has two statements that modify table t1, then table
t2, and then commits. The first statement of the transaction will
acquire a shared metadata lock on table t1, and it will be kept
utill COMMIT to ensure serializability.
At the moment when the second statement attempts to acquire a
shared metadata lock on t2, a concurrent ALTER or DROP statement
might have locked t2 exclusively. The prescription of the current
locking protocol is that the acquirer of the shared lock backs off
-- gives up all his current locks and retries. This implies that
the entire multi-statement transaction has to be rolled back.
- Incompatible change:
FLUSH commands such as FLUSH PRIVILEGES and FLUSH TABLES WITH READ
LOCK won't cause locked tables to be implicitly unlocked anymore.
2009-12-05 00:02:48 +01:00
|
|
|
if (!ndb_binlog_index && open_ndb_binlog_index(thd, &ndb_binlog_index))
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
Backport of revno ## 2617.31.1, 2617.31.3, 2617.31.4, 2617.31.5,
2617.31.12, 2617.31.15, 2617.31.15, 2617.31.16, 2617.43.1
- initial changeset that introduced the fix for
Bug#989 and follow up fixes for all test suite failures
introduced in the initial changeset.
------------------------------------------------------------
revno: 2617.31.1
committer: Davi Arnaut <Davi.Arnaut@Sun.COM>
branch nick: 4284-6.0
timestamp: Fri 2009-03-06 19:17:00 -0300
message:
Bug#989: If DROP TABLE while there's an active transaction, wrong binlog order
WL#4284: Transactional DDL locking
Currently the MySQL server does not keep metadata locks on
schema objects for the duration of a transaction, thus failing
to guarantee the integrity of the schema objects being used
during the transaction and to protect then from concurrent
DDL operations. This also poses a problem for replication as
a DDL operation might be replicated even thought there are
active transactions using the object being modified.
The solution is to defer the release of metadata locks until
a active transaction is either committed or rolled back. This
prevents other statements from modifying the table for the
entire duration of the transaction. This provides commitment
ordering for guaranteeing serializability across multiple
transactions.
- Incompatible change:
If MySQL's metadata locking system encounters a lock conflict,
the usual schema is to use the try and back-off technique to
avoid deadlocks -- this schema consists in releasing all locks
and trying to acquire them all in one go.
But in a transactional context this algorithm can't be utilized
as its not possible to release locks acquired during the course
of the transaction without breaking the transaction commitments.
To avoid deadlocks in this case, the ER_LOCK_DEADLOCK will be
returned if a lock conflict is encountered during a transaction.
Let's consider an example:
A transaction has two statements that modify table t1, then table
t2, and then commits. The first statement of the transaction will
acquire a shared metadata lock on table t1, and it will be kept
utill COMMIT to ensure serializability.
At the moment when the second statement attempts to acquire a
shared metadata lock on t2, a concurrent ALTER or DROP statement
might have locked t2 exclusively. The prescription of the current
locking protocol is that the acquirer of the shared lock backs off
-- gives up all his current locks and retries. This implies that
the entire multi-statement transaction has to be rolled back.
- Incompatible change:
FLUSH commands such as FLUSH PRIVILEGES and FLUSH TABLES WITH READ
LOCK won't cause locked tables to be implicitly unlocked anymore.
2009-12-05 00:02:48 +01:00
|
|
|
sql_print_error("NDB Binlog: Unable to lock table ndb_binlog_index");
|
|
|
|
error= -1;
|
|
|
|
goto add_ndb_binlog_index_err;
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
|
|
|
|
2007-04-05 15:59:42 +02:00
|
|
|
/*
|
|
|
|
Intialize ndb_binlog_index->record[0]
|
|
|
|
*/
|
|
|
|
empty_record(ndb_binlog_index);
|
|
|
|
|
2006-12-01 15:49:07 +01:00
|
|
|
ndb_binlog_index->field[0]->store(row.master_log_pos);
|
|
|
|
ndb_binlog_index->field[1]->store(row.master_log_file,
|
2006-01-12 19:51:02 +01:00
|
|
|
strlen(row.master_log_file),
|
|
|
|
&my_charset_bin);
|
2006-12-01 15:49:07 +01:00
|
|
|
ndb_binlog_index->field[2]->store(row.gci);
|
|
|
|
ndb_binlog_index->field[3]->store(row.n_inserts);
|
|
|
|
ndb_binlog_index->field[4]->store(row.n_updates);
|
|
|
|
ndb_binlog_index->field[5]->store(row.n_deletes);
|
|
|
|
ndb_binlog_index->field[6]->store(row.n_schemaops);
|
2006-01-12 19:51:02 +01:00
|
|
|
|
2006-12-01 15:49:07 +01:00
|
|
|
if ((error= ndb_binlog_index->file->ha_write_row(ndb_binlog_index->record[0])))
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
2006-12-01 15:49:07 +01:00
|
|
|
sql_print_error("NDB Binlog: Writing row to ndb_binlog_index: %d", error);
|
2006-01-12 19:51:02 +01:00
|
|
|
error= -1;
|
2006-12-01 15:49:07 +01:00
|
|
|
goto add_ndb_binlog_index_err;
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
|
|
|
|
2006-12-01 15:49:07 +01:00
|
|
|
add_ndb_binlog_index_err:
|
2010-07-27 12:25:53 +02:00
|
|
|
thd->stmt_da->can_overwrite_status= TRUE;
|
|
|
|
thd->is_error() ? trans_rollback_stmt(thd) : trans_commit_stmt(thd);
|
|
|
|
thd->stmt_da->can_overwrite_status= FALSE;
|
2006-01-12 19:51:02 +01:00
|
|
|
close_thread_tables(thd);
|
2010-07-27 12:25:53 +02:00
|
|
|
thd->mdl_context.release_transactional_locks();
|
2006-12-01 15:49:07 +01:00
|
|
|
ndb_binlog_index= 0;
|
2009-12-22 10:35:56 +01:00
|
|
|
thd->variables.option_bits= saved_options;
|
2006-01-12 19:51:02 +01:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
Functions for start, stop, wait for ndbcluster binlog thread
|
|
|
|
*********************************************************************/
|
|
|
|
|
2006-05-16 20:56:45 +02:00
|
|
|
enum Binlog_thread_state
|
|
|
|
{
|
|
|
|
BCCC_running= 0,
|
|
|
|
BCCC_exit= 1,
|
|
|
|
BCCC_restart= 2
|
|
|
|
};
|
|
|
|
|
|
|
|
static enum Binlog_thread_state do_ndbcluster_binlog_close_connection= BCCC_restart;
|
2006-01-12 19:51:02 +01:00
|
|
|
|
|
|
|
int ndbcluster_binlog_start()
|
|
|
|
{
|
|
|
|
DBUG_ENTER("ndbcluster_binlog_start");
|
|
|
|
|
2007-04-18 16:02:20 +02:00
|
|
|
if (::server_id == 0)
|
|
|
|
{
|
|
|
|
sql_print_warning("NDB: server id set to zero will cause any other mysqld "
|
|
|
|
"with bin log to log with wrong server id");
|
|
|
|
}
|
|
|
|
else if (::server_id & 0x1 << 31)
|
|
|
|
{
|
|
|
|
sql_print_error("NDB: server id's with high bit set is reserved for internal "
|
|
|
|
"purposes");
|
|
|
|
DBUG_RETURN(-1);
|
|
|
|
}
|
|
|
|
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_init(key_injector_mutex, &injector_mutex, MY_MUTEX_INIT_FAST);
|
|
|
|
mysql_cond_init(key_injector_cond, &injector_cond, NULL);
|
|
|
|
mysql_mutex_init(key_ndb_schema_share_mutex,
|
|
|
|
&ndb_schema_share_mutex, MY_MUTEX_INIT_FAST);
|
2006-01-12 19:51:02 +01:00
|
|
|
|
|
|
|
/* Create injector thread */
|
2010-01-07 06:42:07 +01:00
|
|
|
if (mysql_thread_create(key_thread_ndb_binlog,
|
|
|
|
&ndb_binlog_thread, &connection_attrib,
|
|
|
|
ndb_binlog_thread_func, 0))
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
|
|
|
DBUG_PRINT("error", ("Could not create ndb injector thread"));
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_cond_destroy(&injector_cond);
|
|
|
|
mysql_mutex_destroy(&injector_mutex);
|
2006-01-12 19:51:02 +01:00
|
|
|
DBUG_RETURN(-1);
|
|
|
|
}
|
|
|
|
|
2006-12-20 22:57:23 +01:00
|
|
|
ndbcluster_binlog_inited= 1;
|
|
|
|
|
|
|
|
/* Wait for the injector thread to start */
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_lock(&injector_mutex);
|
2006-01-12 19:51:02 +01:00
|
|
|
while (!ndb_binlog_thread_running)
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_cond_wait(&injector_cond, &injector_mutex);
|
|
|
|
mysql_mutex_unlock(&injector_mutex);
|
2006-12-20 22:57:23 +01:00
|
|
|
|
2006-01-12 19:51:02 +01:00
|
|
|
if (ndb_binlog_thread_running < 0)
|
|
|
|
DBUG_RETURN(-1);
|
|
|
|
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**************************************************************
|
|
|
|
Internal helper functions for creating/dropping ndb events
|
|
|
|
used by the client sql threads
|
|
|
|
**************************************************************/
|
|
|
|
void
|
|
|
|
ndb_rep_event_name(String *event_name,const char *db, const char *tbl)
|
|
|
|
{
|
|
|
|
event_name->set_ascii("REPL$", 5);
|
|
|
|
event_name->append(db);
|
|
|
|
if (tbl)
|
|
|
|
{
|
|
|
|
event_name->append('/');
|
|
|
|
event_name->append(tbl);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-09-12 16:34:12 +02:00
|
|
|
bool
|
|
|
|
ndbcluster_check_if_local_table(const char *dbname, const char *tabname)
|
|
|
|
{
|
2009-06-19 10:24:43 +02:00
|
|
|
char key[FN_REFLEN + 1];
|
|
|
|
char ndb_file[FN_REFLEN + 1];
|
2006-09-12 16:34:12 +02:00
|
|
|
|
|
|
|
DBUG_ENTER("ndbcluster_check_if_local_table");
|
|
|
|
build_table_filename(key, FN_LEN-1, dbname, tabname, reg_ext, 0);
|
|
|
|
build_table_filename(ndb_file, FN_LEN-1, dbname, tabname, ha_ndb_ext, 0);
|
|
|
|
/* Check that any defined table is an ndb table */
|
|
|
|
DBUG_PRINT("info", ("Looking for file %s and %s", key, ndb_file));
|
|
|
|
if ((! my_access(key, F_OK)) && my_access(ndb_file, F_OK))
|
|
|
|
{
|
|
|
|
DBUG_PRINT("info", ("table file %s not on disk, local table", ndb_file));
|
|
|
|
|
|
|
|
|
|
|
|
DBUG_RETURN(true);
|
|
|
|
}
|
|
|
|
|
|
|
|
DBUG_RETURN(false);
|
|
|
|
}
|
|
|
|
|
2006-11-15 11:13:49 +01:00
|
|
|
bool
|
|
|
|
ndbcluster_check_if_local_tables_in_db(THD *thd, const char *dbname)
|
|
|
|
{
|
|
|
|
DBUG_ENTER("ndbcluster_check_if_local_tables_in_db");
|
|
|
|
DBUG_PRINT("info", ("Looking for files in directory %s", dbname));
|
2007-08-03 00:14:05 +02:00
|
|
|
LEX_STRING *tabname;
|
|
|
|
List<LEX_STRING> files;
|
2009-06-19 10:24:43 +02:00
|
|
|
char path[FN_REFLEN + 1];
|
2006-11-15 11:13:49 +01:00
|
|
|
|
2009-06-19 10:24:43 +02:00
|
|
|
build_table_filename(path, sizeof(path) - 1, dbname, "", "", 0);
|
2006-11-15 11:13:49 +01:00
|
|
|
if (find_files(thd, &files, dbname, path, NullS, 0) != FIND_FILES_OK)
|
|
|
|
{
|
|
|
|
DBUG_PRINT("info", ("Failed to find files"));
|
|
|
|
DBUG_RETURN(true);
|
|
|
|
}
|
|
|
|
DBUG_PRINT("info",("found: %d files", files.elements));
|
|
|
|
while ((tabname= files.pop()))
|
|
|
|
{
|
2007-08-03 00:14:05 +02:00
|
|
|
DBUG_PRINT("info", ("Found table %s", tabname->str));
|
|
|
|
if (ndbcluster_check_if_local_table(dbname, tabname->str))
|
2006-11-15 11:13:49 +01:00
|
|
|
DBUG_RETURN(true);
|
|
|
|
}
|
|
|
|
|
|
|
|
DBUG_RETURN(false);
|
|
|
|
}
|
|
|
|
|
2006-01-12 19:51:02 +01:00
|
|
|
/*
|
|
|
|
Common function for setting up everything for logging a table at
|
|
|
|
create/discover.
|
|
|
|
*/
|
|
|
|
int ndbcluster_create_binlog_setup(Ndb *ndb, const char *key,
|
2006-01-31 15:40:26 +01:00
|
|
|
uint key_len,
|
2006-01-12 19:51:02 +01:00
|
|
|
const char *db,
|
|
|
|
const char *table_name,
|
2006-01-31 15:40:26 +01:00
|
|
|
my_bool share_may_exist)
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
2006-02-16 00:30:56 +01:00
|
|
|
int do_event_op= ndb_binlog_running;
|
2006-01-12 19:51:02 +01:00
|
|
|
DBUG_ENTER("ndbcluster_create_binlog_setup");
|
2006-01-31 15:40:26 +01:00
|
|
|
DBUG_PRINT("enter",("key: %s key_len: %d %s.%s share_may_exist: %d",
|
|
|
|
key, key_len, db, table_name, share_may_exist));
|
2006-01-25 22:22:50 +01:00
|
|
|
DBUG_ASSERT(! IS_NDB_BLOB_PREFIX(table_name));
|
2006-01-31 15:40:26 +01:00
|
|
|
DBUG_ASSERT(strlen(key) == key_len);
|
2006-01-12 19:51:02 +01:00
|
|
|
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_lock(&ndbcluster_mutex);
|
2006-01-12 19:51:02 +01:00
|
|
|
|
|
|
|
/* Handle any trailing share */
|
2009-10-14 18:37:38 +02:00
|
|
|
NDB_SHARE *share= (NDB_SHARE*) my_hash_search(&ndbcluster_open_tables,
|
|
|
|
(uchar*) key, key_len);
|
2006-02-16 10:07:31 +01:00
|
|
|
|
2006-01-31 15:40:26 +01:00
|
|
|
if (share && share_may_exist)
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
2006-01-31 15:40:26 +01:00
|
|
|
if (share->flags & NSF_NO_BINLOG ||
|
|
|
|
share->op != 0 ||
|
|
|
|
share->op_old != 0)
|
|
|
|
{
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_unlock(&ndbcluster_mutex);
|
2006-01-31 15:40:26 +01:00
|
|
|
DBUG_RETURN(0); // replication already setup, or should not
|
|
|
|
}
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
2006-01-31 15:40:26 +01:00
|
|
|
|
|
|
|
if (share)
|
|
|
|
{
|
2006-02-01 13:16:36 +01:00
|
|
|
if (share->op || share->op_old)
|
|
|
|
{
|
|
|
|
my_errno= HA_ERR_TABLE_EXIST;
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_unlock(&ndbcluster_mutex);
|
2006-02-01 13:16:36 +01:00
|
|
|
DBUG_RETURN(1);
|
|
|
|
}
|
2007-02-06 06:40:26 +01:00
|
|
|
if (!share_may_exist || share->connect_count !=
|
2007-02-05 07:21:18 +01:00
|
|
|
g_ndb_cluster_connection->get_connect_count())
|
|
|
|
{
|
|
|
|
handle_trailing_share(share);
|
|
|
|
share= NULL;
|
|
|
|
}
|
2006-01-31 15:40:26 +01:00
|
|
|
}
|
|
|
|
|
2006-01-12 19:51:02 +01:00
|
|
|
/* Create share which is needed to hold replication information */
|
2007-02-05 07:21:18 +01:00
|
|
|
if (share)
|
|
|
|
{
|
2007-02-06 06:40:26 +01:00
|
|
|
/* ndb_share reference create */
|
2007-02-05 07:21:18 +01:00
|
|
|
++share->use_count;
|
2007-02-06 06:40:26 +01:00
|
|
|
DBUG_PRINT("NDB_SHARE", ("%s create use_count: %u",
|
|
|
|
share->key, share->use_count));
|
2007-02-05 07:21:18 +01:00
|
|
|
}
|
2007-02-06 06:40:26 +01:00
|
|
|
/* ndb_share reference create */
|
|
|
|
else if (!(share= get_share(key, 0, TRUE, TRUE)))
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
|
|
|
sql_print_error("NDB Binlog: "
|
|
|
|
"allocating table share for %s failed", key);
|
|
|
|
}
|
2007-02-06 06:40:26 +01:00
|
|
|
else
|
|
|
|
{
|
|
|
|
DBUG_PRINT("NDB_SHARE", ("%s create use_count: %u",
|
|
|
|
share->key, share->use_count));
|
|
|
|
}
|
2006-01-31 15:40:26 +01:00
|
|
|
|
2006-12-01 15:49:07 +01:00
|
|
|
if (!ndb_schema_share &&
|
2006-02-16 00:30:56 +01:00
|
|
|
strcmp(share->db, NDB_REP_DB) == 0 &&
|
|
|
|
strcmp(share->table_name, NDB_SCHEMA_TABLE) == 0)
|
|
|
|
do_event_op= 1;
|
2007-02-06 06:40:26 +01:00
|
|
|
else if (!ndb_apply_status_share &&
|
|
|
|
strcmp(share->db, NDB_REP_DB) == 0 &&
|
|
|
|
strcmp(share->table_name, NDB_APPLY_TABLE) == 0)
|
|
|
|
do_event_op= 1;
|
2006-02-16 00:30:56 +01:00
|
|
|
|
|
|
|
if (!do_event_op)
|
2006-01-31 15:40:26 +01:00
|
|
|
{
|
|
|
|
share->flags|= NSF_NO_BINLOG;
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_unlock(&ndbcluster_mutex);
|
2006-01-31 15:40:26 +01:00
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_unlock(&ndbcluster_mutex);
|
2006-01-12 19:51:02 +01:00
|
|
|
|
|
|
|
while (share && !IS_TMP_PREFIX(table_name))
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
ToDo make sanity check of share so that the table is actually the same
|
|
|
|
I.e. we need to do open file from frm in this case
|
|
|
|
Currently awaiting this to be fixed in the 4.1 tree in the general
|
|
|
|
case
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Create the event in NDB */
|
|
|
|
ndb->setDatabaseName(db);
|
|
|
|
|
|
|
|
NDBDICT *dict= ndb->getDictionary();
|
2006-05-04 13:58:17 +02:00
|
|
|
Ndb_table_guard ndbtab_g(dict, table_name);
|
|
|
|
const NDBTAB *ndbtab= ndbtab_g.get_table();
|
2006-01-12 19:51:02 +01:00
|
|
|
if (ndbtab == 0)
|
|
|
|
{
|
2009-12-22 10:35:56 +01:00
|
|
|
if (opt_ndb_extra_logging)
|
2006-01-12 19:51:02 +01:00
|
|
|
sql_print_information("NDB Binlog: Failed to get table %s from ndb: "
|
|
|
|
"%s, %d", key, dict->getNdbError().message,
|
|
|
|
dict->getNdbError().code);
|
|
|
|
break; // error
|
|
|
|
}
|
|
|
|
String event_name(INJECTOR_EVENT_LEN);
|
|
|
|
ndb_rep_event_name(&event_name, db, table_name);
|
|
|
|
/*
|
|
|
|
event should have been created by someone else,
|
|
|
|
but let's make sure, and create if it doesn't exist
|
|
|
|
*/
|
2006-05-04 13:58:17 +02:00
|
|
|
const NDBEVENT *ev= dict->getEvent(event_name.c_ptr());
|
|
|
|
if (!ev)
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
|
|
|
if (ndbcluster_create_event(ndb, ndbtab, event_name.c_ptr(), share))
|
|
|
|
{
|
|
|
|
sql_print_error("NDB Binlog: "
|
|
|
|
"FAILED CREATE (DISCOVER) TABLE Event: %s",
|
|
|
|
event_name.c_ptr());
|
|
|
|
break; // error
|
|
|
|
}
|
2009-12-22 10:35:56 +01:00
|
|
|
if (opt_ndb_extra_logging)
|
2006-01-12 19:51:02 +01:00
|
|
|
sql_print_information("NDB Binlog: "
|
|
|
|
"CREATE (DISCOVER) TABLE Event: %s",
|
|
|
|
event_name.c_ptr());
|
|
|
|
}
|
|
|
|
else
|
2006-05-04 13:58:17 +02:00
|
|
|
{
|
|
|
|
delete ev;
|
2009-12-22 10:35:56 +01:00
|
|
|
if (opt_ndb_extra_logging)
|
2006-01-12 19:51:02 +01:00
|
|
|
sql_print_information("NDB Binlog: DISCOVER TABLE Event: %s",
|
|
|
|
event_name.c_ptr());
|
2006-05-04 13:58:17 +02:00
|
|
|
}
|
2006-01-12 19:51:02 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
create the event operations for receiving logging events
|
|
|
|
*/
|
2006-02-20 12:36:10 +01:00
|
|
|
if (ndbcluster_create_event_ops(share, ndbtab, event_name.c_ptr()))
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
|
|
|
sql_print_error("NDB Binlog:"
|
|
|
|
"FAILED CREATE (DISCOVER) EVENT OPERATIONS Event: %s",
|
|
|
|
event_name.c_ptr());
|
|
|
|
/* a warning has been issued to the client */
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
DBUG_RETURN(-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
ndbcluster_create_event(Ndb *ndb, const NDBTAB *ndbtab,
|
2006-02-20 12:36:10 +01:00
|
|
|
const char *event_name, NDB_SHARE *share,
|
|
|
|
int push_warning)
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
2006-05-31 16:16:03 +02:00
|
|
|
THD *thd= current_thd;
|
2006-01-12 19:51:02 +01:00
|
|
|
DBUG_ENTER("ndbcluster_create_event");
|
2006-01-25 22:22:50 +01:00
|
|
|
DBUG_PRINT("info", ("table=%s version=%d event=%s share=%s",
|
|
|
|
ndbtab->getName(), ndbtab->getObjectVersion(),
|
|
|
|
event_name, share ? share->key : "(nil)"));
|
|
|
|
DBUG_ASSERT(! IS_NDB_BLOB_PREFIX(ndbtab->getName()));
|
2006-01-12 19:51:02 +01:00
|
|
|
if (!share)
|
2006-01-17 07:36:50 +01:00
|
|
|
{
|
|
|
|
DBUG_PRINT("info", ("share == NULL"));
|
2006-01-12 19:51:02 +01:00
|
|
|
DBUG_RETURN(0);
|
2006-01-17 07:36:50 +01:00
|
|
|
}
|
|
|
|
if (share->flags & NSF_NO_BINLOG)
|
|
|
|
{
|
2006-03-21 16:54:56 +01:00
|
|
|
DBUG_PRINT("info", ("share->flags & NSF_NO_BINLOG, flags: %x %d",
|
|
|
|
share->flags, share->flags & NSF_NO_BINLOG));
|
2006-01-17 07:36:50 +01:00
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
2006-01-12 19:51:02 +01:00
|
|
|
NDBDICT *dict= ndb->getDictionary();
|
|
|
|
NDBEVENT my_event(event_name);
|
|
|
|
my_event.setTable(*ndbtab);
|
|
|
|
my_event.addTableEvent(NDBEVENT::TE_ALL);
|
|
|
|
if (share->flags & NSF_HIDDEN_PK)
|
|
|
|
{
|
2006-01-25 22:22:50 +01:00
|
|
|
if (share->flags & NSF_BLOB_FLAG)
|
|
|
|
{
|
|
|
|
sql_print_error("NDB Binlog: logging of table %s "
|
2006-02-20 12:36:10 +01:00
|
|
|
"with BLOB attribute and no PK is not supported",
|
2006-01-25 22:22:50 +01:00
|
|
|
share->key);
|
2006-02-20 12:36:10 +01:00
|
|
|
if (push_warning)
|
2009-09-10 11:18:29 +02:00
|
|
|
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
|
2006-03-21 16:54:56 +01:00
|
|
|
ER_ILLEGAL_HA_CREATE_OPTION,
|
|
|
|
ER(ER_ILLEGAL_HA_CREATE_OPTION),
|
2006-05-28 14:51:01 +02:00
|
|
|
ndbcluster_hton_name,
|
2006-02-20 12:36:10 +01:00
|
|
|
"Binlog of table with BLOB attribute and no PK");
|
2006-05-28 14:51:01 +02:00
|
|
|
|
2006-01-31 15:40:26 +01:00
|
|
|
share->flags|= NSF_NO_BINLOG;
|
2006-01-25 22:22:50 +01:00
|
|
|
DBUG_RETURN(-1);
|
|
|
|
}
|
|
|
|
/* No primary key, subscribe for all attributes */
|
2006-01-12 19:51:02 +01:00
|
|
|
my_event.setReport(NDBEVENT::ER_ALL);
|
|
|
|
DBUG_PRINT("info", ("subscription all"));
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2006-12-01 15:49:07 +01:00
|
|
|
if (ndb_schema_share || strcmp(share->db, NDB_REP_DB) ||
|
2006-01-12 19:51:02 +01:00
|
|
|
strcmp(share->table_name, NDB_SCHEMA_TABLE))
|
|
|
|
{
|
|
|
|
my_event.setReport(NDBEVENT::ER_UPDATED);
|
|
|
|
DBUG_PRINT("info", ("subscription only updated"));
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
my_event.setReport((NDBEVENT::EventReport)
|
|
|
|
(NDBEVENT::ER_ALL | NDBEVENT::ER_SUBSCRIBE));
|
|
|
|
DBUG_PRINT("info", ("subscription all and subscribe"));
|
|
|
|
}
|
|
|
|
}
|
2006-01-25 22:22:50 +01:00
|
|
|
if (share->flags & NSF_BLOB_FLAG)
|
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes
Changes that requires code changes in other code of other storage engines.
(Note that all changes are very straightforward and one should find all issues
by compiling a --debug build and fixing all compiler errors and all
asserts in field.cc while running the test suite),
- New optional handler function introduced: reset()
This is called after every DML statement to make it easy for a handler to
statement specific cleanups.
(The only case it's not called is if force the file to be closed)
- handler::extra(HA_EXTRA_RESET) is removed. Code that was there before
should be moved to handler::reset()
- table->read_set contains a bitmap over all columns that are needed
in the query. read_row() and similar functions only needs to read these
columns
- table->write_set contains a bitmap over all columns that will be updated
in the query. write_row() and update_row() only needs to update these
columns.
The above bitmaps should now be up to date in all context
(including ALTER TABLE, filesort()).
The handler is informed of any changes to the bitmap after
fix_fields() by calling the virtual function
handler::column_bitmaps_signal(). If the handler does caching of
these bitmaps (instead of using table->read_set, table->write_set),
it should redo the caching in this code. as the signal() may be sent
several times, it's probably best to set a variable in the signal
and redo the caching on read_row() / write_row() if the variable was
set.
- Removed the read_set and write_set bitmap objects from the handler class
- Removed all column bit handling functions from the handler class.
(Now one instead uses the normal bitmap functions in my_bitmap.c instead
of handler dedicated bitmap functions)
- field->query_id is removed. One should instead instead check
table->read_set and table->write_set if a field is used in the query.
- handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and
handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now
instead use table->read_set to check for which columns to retrieve.
- If a handler needs to call Field->val() or Field->store() on columns
that are not used in the query, one should install a temporary
all-columns-used map while doing so. For this, we provide the following
functions:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
field->val();
dbug_tmp_restore_column_map(table->read_set, old_map);
and similar for the write map:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
field->val();
dbug_tmp_restore_column_map(table->write_set, old_map);
If this is not done, you will sooner or later hit a DBUG_ASSERT
in the field store() / val() functions.
(For not DBUG binaries, the dbug_tmp_restore_column_map() and
dbug_tmp_restore_column_map() are inline dummy functions and should
be optimized away be the compiler).
- If one needs to temporary set the column map for all binaries (and not
just to avoid the DBUG_ASSERT() in the Field::store() / Field::val()
methods) one should use the functions tmp_use_all_columns() and
tmp_restore_column_map() instead of the above dbug_ variants.
- All 'status' fields in the handler base class (like records,
data_file_length etc) are now stored in a 'stats' struct. This makes
it easier to know what status variables are provided by the base
handler. This requires some trivial variable names in the extra()
function.
- New virtual function handler::records(). This is called to optimize
COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true.
(stats.records is not supposed to be an exact value. It's only has to
be 'reasonable enough' for the optimizer to be able to choose a good
optimization path).
- Non virtual handler::init() function added for caching of virtual
constants from engine.
- Removed has_transactions() virtual method. Now one should instead return
HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support
transactions.
- The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument
that is to be used with 'new handler_name()' to allocate the handler
in the right area. The xxxx_create_handler() function is also
responsible for any initialization of the object before returning.
For example, one should change:
static handler *myisam_create_handler(TABLE_SHARE *table)
{
return new ha_myisam(table);
}
->
static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root)
{
return new (mem_root) ha_myisam(table);
}
- New optional virtual function: use_hidden_primary_key().
This is called in case of an update/delete when
(table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined
but we don't have a primary key. This allows the handler to take precisions
in remembering any hidden primary key to able to update/delete any
found row. The default handler marks all columns to be read.
- handler::table_flags() now returns a ulonglong (to allow for more flags).
- New/changed table_flags()
- HA_HAS_RECORDS Set if ::records() is supported
- HA_NO_TRANSACTIONS Set if engine doesn't support transactions
- HA_PRIMARY_KEY_REQUIRED_FOR_DELETE
Set if we should mark all primary key columns for
read when reading rows as part of a DELETE
statement. If there is no primary key,
all columns are marked for read.
- HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some
cases (based on table->read_set)
- HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS
Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION.
- HA_DUPP_POS Renamed to HA_DUPLICATE_POS
- HA_REQUIRES_KEY_COLUMNS_FOR_DELETE
Set this if we should mark ALL key columns for
read when when reading rows as part of a DELETE
statement. In case of an update we will mark
all keys for read for which key part changed
value.
- HA_STATS_RECORDS_IS_EXACT
Set this if stats.records is exact.
(This saves us some extra records() calls
when optimizing COUNT(*))
- Removed table_flags()
- HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if
handler::records() gives an exact count() and
HA_STATS_RECORDS_IS_EXACT if stats.records is exact.
- HA_READ_RND_SAME Removed (no one supported this one)
- Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk()
- Renamed handler::dupp_pos to handler::dup_pos
- Removed not used variable handler::sortkey
Upper level handler changes:
- ha_reset() now does some overall checks and calls ::reset()
- ha_table_flags() added. This is a cached version of table_flags(). The
cache is updated on engine creation time and updated on open.
MySQL level changes (not obvious from the above):
- DBUG_ASSERT() added to check that column usage matches what is set
in the column usage bit maps. (This found a LOT of bugs in current
column marking code).
- In 5.1 before, all used columns was marked in read_set and only updated
columns was marked in write_set. Now we only mark columns for which we
need a value in read_set.
- Column bitmaps are created in open_binary_frm() and open_table_from_share().
(Before this was in table.cc)
- handler::table_flags() calls are replaced with handler::ha_table_flags()
- For calling field->val() you must have the corresponding bit set in
table->read_set. For calling field->store() you must have the
corresponding bit set in table->write_set. (There are asserts in
all store()/val() functions to catch wrong usage)
- thd->set_query_id is renamed to thd->mark_used_columns and instead
of setting this to an integer value, this has now the values:
MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE
Changed also all variables named 'set_query_id' to mark_used_columns.
- In filesort() we now inform the handler of exactly which columns are needed
doing the sort and choosing the rows.
- The TABLE_SHARE object has a 'all_set' column bitmap one can use
when one needs a column bitmap with all columns set.
(This is used for table->use_all_columns() and other places)
- The TABLE object has 3 column bitmaps:
- def_read_set Default bitmap for columns to be read
- def_write_set Default bitmap for columns to be written
- tmp_set Can be used as a temporary bitmap when needed.
The table object has also two pointer to bitmaps read_set and write_set
that the handler should use to find out which columns are used in which way.
- count() optimization now calls handler::records() instead of using
handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true).
- Added extra argument to Item::walk() to indicate if we should also
traverse sub queries.
- Added TABLE parameter to cp_buffer_from_ref()
- Don't close tables created with CREATE ... SELECT but keep them in
the table cache. (Faster usage of newly created tables).
New interfaces:
- table->clear_column_bitmaps() to initialize the bitmaps for tables
at start of new statements.
- table->column_bitmaps_set() to set up new column bitmaps and signal
the handler about this.
- table->column_bitmaps_set_no_signal() for some few cases where we need
to setup new column bitmaps but don't signal the handler (as the handler
has already been signaled about these before). Used for the momement
only in opt_range.cc when doing ROR scans.
- table->use_all_columns() to install a bitmap where all columns are marked
as use in the read and the write set.
- table->default_column_bitmaps() to install the normal read and write
column bitmaps, but not signaling the handler about this.
This is mainly used when creating TABLE instances.
- table->mark_columns_needed_for_delete(),
table->mark_columns_needed_for_delete() and
table->mark_columns_needed_for_insert() to allow us to put additional
columns in column usage maps if handler so requires.
(The handler indicates what it neads in handler->table_flags())
- table->prepare_for_position() to allow us to tell handler that it
needs to read primary key parts to be able to store them in
future table->position() calls.
(This replaces the table->file->ha_retrieve_all_pk function)
- table->mark_auto_increment_column() to tell handler are going to update
columns part of any auto_increment key.
- table->mark_columns_used_by_index() to mark all columns that is part of
an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow
it to quickly know that it only needs to read colums that are part
of the key. (The handler can also use the column map for detecting this,
but simpler/faster handler can just monitor the extra() call).
- table->mark_columns_used_by_index_no_reset() to in addition to other columns,
also mark all columns that is used by the given key.
- table->restore_column_maps_after_mark_index() to restore to default
column maps after a call to table->mark_columns_used_by_index().
- New item function register_field_in_read_map(), for marking used columns
in table->read_map. Used by filesort() to mark all used columns
- Maintain in TABLE->merge_keys set of all keys that are used in query.
(Simplices some optimization loops)
- Maintain Field->part_of_key_not_clustered which is like Field->part_of_key
but the field in the clustered key is not assumed to be part of all index.
(used in opt_range.cc for faster loops)
- dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map()
tmp_use_all_columns() and tmp_restore_column_map() functions to temporally
mark all columns as usable. The 'dbug_' version is primarily intended
inside a handler when it wants to just call Field:store() & Field::val()
functions, but don't need the column maps set for any other usage.
(ie:: bitmap_is_set() is never called)
- We can't use compare_records() to skip updates for handlers that returns
a partial column set and the read_set doesn't cover all columns in the
write set. The reason for this is that if we have a column marked only for
write we can't in the MySQL level know if the value changed or not.
The reason this worked before was that MySQL marked all to be written
columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden
bug'.
- open_table_from_share() does not anymore setup temporary MEM_ROOT
object as a thread specific variable for the handler. Instead we
send the to-be-used MEMROOT to get_new_handler().
(Simpler, faster code)
Bugs fixed:
- Column marking was not done correctly in a lot of cases.
(ALTER TABLE, when using triggers, auto_increment fields etc)
(Could potentially result in wrong values inserted in table handlers
relying on that the old column maps or field->set_query_id was correct)
Especially when it comes to triggers, there may be cases where the
old code would cause lost/wrong values for NDB and/or InnoDB tables.
- Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags:
OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG.
This allowed me to remove some wrong warnings about:
"Some non-transactional changed tables couldn't be rolled back"
- Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset
(thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose
some warnings about
"Some non-transactional changed tables couldn't be rolled back")
- Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table()
which could cause delete_table to report random failures.
- Fixed core dumps for some tests when running with --debug
- Added missing FN_LIBCHAR in mysql_rm_tmp_tables()
(This has probably caused us to not properly remove temporary files after
crash)
- slow_logs was not properly initialized, which could maybe cause
extra/lost entries in slow log.
- If we get an duplicate row on insert, change column map to read and
write all columns while retrying the operation. This is required by
the definition of REPLACE and also ensures that fields that are only
part of UPDATE are properly handled. This fixed a bug in NDB and
REPLACE where REPLACE wrongly copied some column values from the replaced
row.
- For table handler that doesn't support NULL in keys, we would give an error
when creating a primary key with NULL fields, even after the fields has been
automaticly converted to NOT NULL.
- Creating a primary key on a SPATIAL key, would fail if field was not
declared as NOT NULL.
Cleanups:
- Removed not used condition argument to setup_tables
- Removed not needed item function reset_query_id_processor().
- Field->add_index is removed. Now this is instead maintained in
(field->flags & FIELD_IN_ADD_INDEX)
- Field->fieldnr is removed (use field->field_index instead)
- New argument to filesort() to indicate that it should return a set of
row pointers (not used columns). This allowed me to remove some references
to sql_command in filesort and should also enable us to return column
results in some cases where we couldn't before.
- Changed column bitmap handling in opt_range.cc to be aligned with TABLE
bitmap, which allowed me to use bitmap functions instead of looping over
all fields to create some needed bitmaps. (Faster and smaller code)
- Broke up found too long lines
- Moved some variable declaration at start of function for better code
readability.
- Removed some not used arguments from functions.
(setup_fields(), mysql_prepare_insert_check_table())
- setup_fields() now takes an enum instead of an int for marking columns
usage.
- For internal temporary tables, use handler::write_row(),
handler::delete_row() and handler::update_row() instead of
handler::ha_xxxx() for faster execution.
- Changed some constants to enum's and define's.
- Using separate column read and write sets allows for easier checking
of timestamp field was set by statement.
- Remove calls to free_io_cache() as this is now done automaticly in ha_reset()
- Don't build table->normalized_path as this is now identical to table->path
(after bar's fixes to convert filenames)
- Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to
do comparision with the 'convert-dbug-for-diff' tool.
Things left to do in 5.1:
- We wrongly log failed CREATE TABLE ... SELECT in some cases when using
row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result)
Mats has promised to look into this.
- Test that my fix for CREATE TABLE ... SELECT is indeed correct.
(I added several test cases for this, but in this case it's better that
someone else also tests this throughly).
Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
|
|
|
my_event.mergeEvents(TRUE);
|
2006-01-12 19:51:02 +01:00
|
|
|
|
|
|
|
/* add all columns to the event */
|
|
|
|
int n_cols= ndbtab->getNoOfColumns();
|
|
|
|
for(int a= 0; a < n_cols; a++)
|
|
|
|
my_event.addEventColumn(a);
|
|
|
|
|
|
|
|
if (dict->createEvent(my_event)) // Add event to database
|
|
|
|
{
|
|
|
|
if (dict->getNdbError().classification != NdbError::SchemaObjectExists)
|
|
|
|
{
|
2006-02-20 12:36:10 +01:00
|
|
|
/*
|
|
|
|
failed, print a warning
|
|
|
|
*/
|
2006-07-08 03:26:13 +02:00
|
|
|
if (push_warning > 1)
|
2009-09-10 11:18:29 +02:00
|
|
|
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
|
2006-02-20 12:36:10 +01:00
|
|
|
ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
|
|
|
|
dict->getNdbError().code,
|
|
|
|
dict->getNdbError().message, "NDB");
|
2006-01-12 19:51:02 +01:00
|
|
|
sql_print_error("NDB Binlog: Unable to create event in database. "
|
|
|
|
"Event: %s Error Code: %d Message: %s", event_name,
|
|
|
|
dict->getNdbError().code, dict->getNdbError().message);
|
|
|
|
DBUG_RETURN(-1);
|
|
|
|
}
|
|
|
|
|
2006-03-23 22:49:02 +01:00
|
|
|
/*
|
|
|
|
try retrieving the event, if table version/id matches, we will get
|
|
|
|
a valid event. Otherwise we have a trailing event from before
|
|
|
|
*/
|
2006-05-04 13:58:17 +02:00
|
|
|
const NDBEVENT *ev;
|
|
|
|
if ((ev= dict->getEvent(event_name)))
|
2006-03-23 22:49:02 +01:00
|
|
|
{
|
2006-05-04 13:58:17 +02:00
|
|
|
delete ev;
|
2006-03-23 22:49:02 +01:00
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
2006-01-12 19:51:02 +01:00
|
|
|
/*
|
|
|
|
trailing event from before; an error, but try to correct it
|
|
|
|
*/
|
2006-03-23 22:49:02 +01:00
|
|
|
if (dict->getNdbError().code == NDB_INVALID_SCHEMA_OBJECT &&
|
|
|
|
dict->dropEvent(my_event.getName()))
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
2006-07-08 03:26:13 +02:00
|
|
|
if (push_warning > 1)
|
2009-09-10 11:18:29 +02:00
|
|
|
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
|
2006-02-20 12:36:10 +01:00
|
|
|
ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
|
|
|
|
dict->getNdbError().code,
|
|
|
|
dict->getNdbError().message, "NDB");
|
2006-01-12 19:51:02 +01:00
|
|
|
sql_print_error("NDB Binlog: Unable to create event in database. "
|
|
|
|
" Attempt to correct with drop failed. "
|
|
|
|
"Event: %s Error Code: %d Message: %s",
|
|
|
|
event_name,
|
|
|
|
dict->getNdbError().code,
|
|
|
|
dict->getNdbError().message);
|
|
|
|
DBUG_RETURN(-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
try to add the event again
|
|
|
|
*/
|
|
|
|
if (dict->createEvent(my_event))
|
|
|
|
{
|
2006-07-08 03:26:13 +02:00
|
|
|
if (push_warning > 1)
|
2009-09-10 11:18:29 +02:00
|
|
|
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
|
2006-02-20 12:36:10 +01:00
|
|
|
ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
|
|
|
|
dict->getNdbError().code,
|
|
|
|
dict->getNdbError().message, "NDB");
|
2006-01-12 19:51:02 +01:00
|
|
|
sql_print_error("NDB Binlog: Unable to create event in database. "
|
|
|
|
" Attempt to correct with drop ok, but create failed. "
|
|
|
|
"Event: %s Error Code: %d Message: %s",
|
|
|
|
event_name,
|
|
|
|
dict->getNdbError().code,
|
|
|
|
dict->getNdbError().message);
|
|
|
|
DBUG_RETURN(-1);
|
|
|
|
}
|
|
|
|
#ifdef NDB_BINLOG_EXTRA_WARNINGS
|
2009-09-10 11:18:29 +02:00
|
|
|
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
|
2006-01-12 19:51:02 +01:00
|
|
|
ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
|
|
|
|
0, "NDB Binlog: Removed trailing event",
|
|
|
|
"NDB");
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
inline int is_ndb_compatible_type(Field *field)
|
|
|
|
{
|
|
|
|
return
|
|
|
|
!(field->flags & BLOB_FLAG) &&
|
|
|
|
field->type() != MYSQL_TYPE_BIT &&
|
|
|
|
field->pack_length() != 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
- create eventOperations for receiving log events
|
|
|
|
- setup ndb recattrs for reception of log event data
|
|
|
|
- "start" the event operation
|
|
|
|
|
|
|
|
used at create/discover of tables
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
ndbcluster_create_event_ops(NDB_SHARE *share, const NDBTAB *ndbtab,
|
|
|
|
const char *event_name)
|
|
|
|
{
|
2006-05-31 16:16:03 +02:00
|
|
|
THD *thd= current_thd;
|
2006-01-12 19:51:02 +01:00
|
|
|
/*
|
|
|
|
we are in either create table or rename table so table should be
|
|
|
|
locked, hence we can work with the share without locks
|
|
|
|
*/
|
|
|
|
|
|
|
|
DBUG_ENTER("ndbcluster_create_event_ops");
|
2006-03-23 04:59:14 +01:00
|
|
|
DBUG_PRINT("enter", ("table: %s event: %s", ndbtab->getName(), event_name));
|
2006-01-25 22:22:50 +01:00
|
|
|
DBUG_ASSERT(! IS_NDB_BLOB_PREFIX(ndbtab->getName()));
|
2006-01-12 19:51:02 +01:00
|
|
|
|
|
|
|
DBUG_ASSERT(share != 0);
|
|
|
|
|
2006-01-17 07:36:50 +01:00
|
|
|
if (share->flags & NSF_NO_BINLOG)
|
|
|
|
{
|
2006-03-21 16:54:56 +01:00
|
|
|
DBUG_PRINT("info", ("share->flags & NSF_NO_BINLOG, flags: %x",
|
|
|
|
share->flags));
|
2006-01-17 07:36:50 +01:00
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
2006-12-01 15:49:07 +01:00
|
|
|
int do_ndb_schema_share= 0, do_ndb_apply_status_share= 0;
|
|
|
|
if (!ndb_schema_share && strcmp(share->db, NDB_REP_DB) == 0 &&
|
2006-05-06 01:25:44 +02:00
|
|
|
strcmp(share->table_name, NDB_SCHEMA_TABLE) == 0)
|
2006-12-01 15:49:07 +01:00
|
|
|
do_ndb_schema_share= 1;
|
|
|
|
else if (!ndb_apply_status_share && strcmp(share->db, NDB_REP_DB) == 0 &&
|
2006-05-06 01:25:44 +02:00
|
|
|
strcmp(share->table_name, NDB_APPLY_TABLE) == 0)
|
2006-12-01 15:49:07 +01:00
|
|
|
do_ndb_apply_status_share= 1;
|
2007-02-06 06:40:26 +01:00
|
|
|
else if (!binlog_filter->db_ok(share->db) || !ndb_binlog_running)
|
2006-02-07 19:02:38 +01:00
|
|
|
{
|
|
|
|
share->flags|= NSF_NO_BINLOG;
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
2006-01-12 19:51:02 +01:00
|
|
|
if (share->op)
|
|
|
|
{
|
|
|
|
assert(share->op->getCustomData() == (void *) share);
|
|
|
|
|
|
|
|
DBUG_ASSERT(share->use_count > 1);
|
|
|
|
sql_print_error("NDB Binlog: discover reusing old ev op");
|
2007-02-06 06:40:26 +01:00
|
|
|
/* ndb_share reference ToDo free */
|
|
|
|
DBUG_PRINT("NDB_SHARE", ("%s ToDo free use_count: %u",
|
|
|
|
share->key, share->use_count));
|
2006-01-12 19:51:02 +01:00
|
|
|
free_share(&share); // old event op already has reference
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
TABLE *table= share->table;
|
|
|
|
|
|
|
|
int retries= 100;
|
2007-04-25 15:25:23 +02:00
|
|
|
/*
|
|
|
|
100 milliseconds, temporary error on schema operation can
|
|
|
|
take some time to be resolved
|
|
|
|
*/
|
|
|
|
int retry_sleep= 100;
|
2006-01-12 19:51:02 +01:00
|
|
|
while (1)
|
|
|
|
{
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_lock(&injector_mutex);
|
2006-01-12 19:51:02 +01:00
|
|
|
Ndb *ndb= injector_ndb;
|
2006-12-01 15:49:07 +01:00
|
|
|
if (do_ndb_schema_share)
|
2006-01-12 19:51:02 +01:00
|
|
|
ndb= schema_ndb;
|
|
|
|
|
|
|
|
if (ndb == 0)
|
|
|
|
{
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_unlock(&injector_mutex);
|
2006-01-12 19:51:02 +01:00
|
|
|
DBUG_RETURN(-1);
|
|
|
|
}
|
|
|
|
|
2006-02-20 12:07:12 +01:00
|
|
|
NdbEventOperation* op;
|
2006-12-01 15:49:07 +01:00
|
|
|
if (do_ndb_schema_share)
|
2006-02-20 12:07:12 +01:00
|
|
|
op= ndb->createEventOperation(event_name);
|
|
|
|
else
|
|
|
|
{
|
|
|
|
// set injector_ndb database/schema from table internal name
|
|
|
|
int ret= ndb->setDatabaseAndSchemaName(ndbtab);
|
|
|
|
assert(ret == 0);
|
|
|
|
op= ndb->createEventOperation(event_name);
|
|
|
|
// reset to catch errors
|
|
|
|
ndb->setDatabaseName("");
|
|
|
|
}
|
2006-01-12 19:51:02 +01:00
|
|
|
if (!op)
|
|
|
|
{
|
|
|
|
sql_print_error("NDB Binlog: Creating NdbEventOperation failed for"
|
|
|
|
" %s",event_name);
|
2009-09-10 11:18:29 +02:00
|
|
|
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
|
2006-01-12 19:51:02 +01:00
|
|
|
ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
|
|
|
|
ndb->getNdbError().code,
|
|
|
|
ndb->getNdbError().message,
|
|
|
|
"NDB");
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_unlock(&injector_mutex);
|
2006-01-12 19:51:02 +01:00
|
|
|
DBUG_RETURN(-1);
|
|
|
|
}
|
|
|
|
|
2006-01-25 22:22:50 +01:00
|
|
|
if (share->flags & NSF_BLOB_FLAG)
|
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes
Changes that requires code changes in other code of other storage engines.
(Note that all changes are very straightforward and one should find all issues
by compiling a --debug build and fixing all compiler errors and all
asserts in field.cc while running the test suite),
- New optional handler function introduced: reset()
This is called after every DML statement to make it easy for a handler to
statement specific cleanups.
(The only case it's not called is if force the file to be closed)
- handler::extra(HA_EXTRA_RESET) is removed. Code that was there before
should be moved to handler::reset()
- table->read_set contains a bitmap over all columns that are needed
in the query. read_row() and similar functions only needs to read these
columns
- table->write_set contains a bitmap over all columns that will be updated
in the query. write_row() and update_row() only needs to update these
columns.
The above bitmaps should now be up to date in all context
(including ALTER TABLE, filesort()).
The handler is informed of any changes to the bitmap after
fix_fields() by calling the virtual function
handler::column_bitmaps_signal(). If the handler does caching of
these bitmaps (instead of using table->read_set, table->write_set),
it should redo the caching in this code. as the signal() may be sent
several times, it's probably best to set a variable in the signal
and redo the caching on read_row() / write_row() if the variable was
set.
- Removed the read_set and write_set bitmap objects from the handler class
- Removed all column bit handling functions from the handler class.
(Now one instead uses the normal bitmap functions in my_bitmap.c instead
of handler dedicated bitmap functions)
- field->query_id is removed. One should instead instead check
table->read_set and table->write_set if a field is used in the query.
- handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and
handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now
instead use table->read_set to check for which columns to retrieve.
- If a handler needs to call Field->val() or Field->store() on columns
that are not used in the query, one should install a temporary
all-columns-used map while doing so. For this, we provide the following
functions:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
field->val();
dbug_tmp_restore_column_map(table->read_set, old_map);
and similar for the write map:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
field->val();
dbug_tmp_restore_column_map(table->write_set, old_map);
If this is not done, you will sooner or later hit a DBUG_ASSERT
in the field store() / val() functions.
(For not DBUG binaries, the dbug_tmp_restore_column_map() and
dbug_tmp_restore_column_map() are inline dummy functions and should
be optimized away be the compiler).
- If one needs to temporary set the column map for all binaries (and not
just to avoid the DBUG_ASSERT() in the Field::store() / Field::val()
methods) one should use the functions tmp_use_all_columns() and
tmp_restore_column_map() instead of the above dbug_ variants.
- All 'status' fields in the handler base class (like records,
data_file_length etc) are now stored in a 'stats' struct. This makes
it easier to know what status variables are provided by the base
handler. This requires some trivial variable names in the extra()
function.
- New virtual function handler::records(). This is called to optimize
COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true.
(stats.records is not supposed to be an exact value. It's only has to
be 'reasonable enough' for the optimizer to be able to choose a good
optimization path).
- Non virtual handler::init() function added for caching of virtual
constants from engine.
- Removed has_transactions() virtual method. Now one should instead return
HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support
transactions.
- The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument
that is to be used with 'new handler_name()' to allocate the handler
in the right area. The xxxx_create_handler() function is also
responsible for any initialization of the object before returning.
For example, one should change:
static handler *myisam_create_handler(TABLE_SHARE *table)
{
return new ha_myisam(table);
}
->
static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root)
{
return new (mem_root) ha_myisam(table);
}
- New optional virtual function: use_hidden_primary_key().
This is called in case of an update/delete when
(table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined
but we don't have a primary key. This allows the handler to take precisions
in remembering any hidden primary key to able to update/delete any
found row. The default handler marks all columns to be read.
- handler::table_flags() now returns a ulonglong (to allow for more flags).
- New/changed table_flags()
- HA_HAS_RECORDS Set if ::records() is supported
- HA_NO_TRANSACTIONS Set if engine doesn't support transactions
- HA_PRIMARY_KEY_REQUIRED_FOR_DELETE
Set if we should mark all primary key columns for
read when reading rows as part of a DELETE
statement. If there is no primary key,
all columns are marked for read.
- HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some
cases (based on table->read_set)
- HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS
Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION.
- HA_DUPP_POS Renamed to HA_DUPLICATE_POS
- HA_REQUIRES_KEY_COLUMNS_FOR_DELETE
Set this if we should mark ALL key columns for
read when when reading rows as part of a DELETE
statement. In case of an update we will mark
all keys for read for which key part changed
value.
- HA_STATS_RECORDS_IS_EXACT
Set this if stats.records is exact.
(This saves us some extra records() calls
when optimizing COUNT(*))
- Removed table_flags()
- HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if
handler::records() gives an exact count() and
HA_STATS_RECORDS_IS_EXACT if stats.records is exact.
- HA_READ_RND_SAME Removed (no one supported this one)
- Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk()
- Renamed handler::dupp_pos to handler::dup_pos
- Removed not used variable handler::sortkey
Upper level handler changes:
- ha_reset() now does some overall checks and calls ::reset()
- ha_table_flags() added. This is a cached version of table_flags(). The
cache is updated on engine creation time and updated on open.
MySQL level changes (not obvious from the above):
- DBUG_ASSERT() added to check that column usage matches what is set
in the column usage bit maps. (This found a LOT of bugs in current
column marking code).
- In 5.1 before, all used columns was marked in read_set and only updated
columns was marked in write_set. Now we only mark columns for which we
need a value in read_set.
- Column bitmaps are created in open_binary_frm() and open_table_from_share().
(Before this was in table.cc)
- handler::table_flags() calls are replaced with handler::ha_table_flags()
- For calling field->val() you must have the corresponding bit set in
table->read_set. For calling field->store() you must have the
corresponding bit set in table->write_set. (There are asserts in
all store()/val() functions to catch wrong usage)
- thd->set_query_id is renamed to thd->mark_used_columns and instead
of setting this to an integer value, this has now the values:
MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE
Changed also all variables named 'set_query_id' to mark_used_columns.
- In filesort() we now inform the handler of exactly which columns are needed
doing the sort and choosing the rows.
- The TABLE_SHARE object has a 'all_set' column bitmap one can use
when one needs a column bitmap with all columns set.
(This is used for table->use_all_columns() and other places)
- The TABLE object has 3 column bitmaps:
- def_read_set Default bitmap for columns to be read
- def_write_set Default bitmap for columns to be written
- tmp_set Can be used as a temporary bitmap when needed.
The table object has also two pointer to bitmaps read_set and write_set
that the handler should use to find out which columns are used in which way.
- count() optimization now calls handler::records() instead of using
handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true).
- Added extra argument to Item::walk() to indicate if we should also
traverse sub queries.
- Added TABLE parameter to cp_buffer_from_ref()
- Don't close tables created with CREATE ... SELECT but keep them in
the table cache. (Faster usage of newly created tables).
New interfaces:
- table->clear_column_bitmaps() to initialize the bitmaps for tables
at start of new statements.
- table->column_bitmaps_set() to set up new column bitmaps and signal
the handler about this.
- table->column_bitmaps_set_no_signal() for some few cases where we need
to setup new column bitmaps but don't signal the handler (as the handler
has already been signaled about these before). Used for the momement
only in opt_range.cc when doing ROR scans.
- table->use_all_columns() to install a bitmap where all columns are marked
as use in the read and the write set.
- table->default_column_bitmaps() to install the normal read and write
column bitmaps, but not signaling the handler about this.
This is mainly used when creating TABLE instances.
- table->mark_columns_needed_for_delete(),
table->mark_columns_needed_for_delete() and
table->mark_columns_needed_for_insert() to allow us to put additional
columns in column usage maps if handler so requires.
(The handler indicates what it neads in handler->table_flags())
- table->prepare_for_position() to allow us to tell handler that it
needs to read primary key parts to be able to store them in
future table->position() calls.
(This replaces the table->file->ha_retrieve_all_pk function)
- table->mark_auto_increment_column() to tell handler are going to update
columns part of any auto_increment key.
- table->mark_columns_used_by_index() to mark all columns that is part of
an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow
it to quickly know that it only needs to read colums that are part
of the key. (The handler can also use the column map for detecting this,
but simpler/faster handler can just monitor the extra() call).
- table->mark_columns_used_by_index_no_reset() to in addition to other columns,
also mark all columns that is used by the given key.
- table->restore_column_maps_after_mark_index() to restore to default
column maps after a call to table->mark_columns_used_by_index().
- New item function register_field_in_read_map(), for marking used columns
in table->read_map. Used by filesort() to mark all used columns
- Maintain in TABLE->merge_keys set of all keys that are used in query.
(Simplices some optimization loops)
- Maintain Field->part_of_key_not_clustered which is like Field->part_of_key
but the field in the clustered key is not assumed to be part of all index.
(used in opt_range.cc for faster loops)
- dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map()
tmp_use_all_columns() and tmp_restore_column_map() functions to temporally
mark all columns as usable. The 'dbug_' version is primarily intended
inside a handler when it wants to just call Field:store() & Field::val()
functions, but don't need the column maps set for any other usage.
(ie:: bitmap_is_set() is never called)
- We can't use compare_records() to skip updates for handlers that returns
a partial column set and the read_set doesn't cover all columns in the
write set. The reason for this is that if we have a column marked only for
write we can't in the MySQL level know if the value changed or not.
The reason this worked before was that MySQL marked all to be written
columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden
bug'.
- open_table_from_share() does not anymore setup temporary MEM_ROOT
object as a thread specific variable for the handler. Instead we
send the to-be-used MEMROOT to get_new_handler().
(Simpler, faster code)
Bugs fixed:
- Column marking was not done correctly in a lot of cases.
(ALTER TABLE, when using triggers, auto_increment fields etc)
(Could potentially result in wrong values inserted in table handlers
relying on that the old column maps or field->set_query_id was correct)
Especially when it comes to triggers, there may be cases where the
old code would cause lost/wrong values for NDB and/or InnoDB tables.
- Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags:
OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG.
This allowed me to remove some wrong warnings about:
"Some non-transactional changed tables couldn't be rolled back"
- Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset
(thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose
some warnings about
"Some non-transactional changed tables couldn't be rolled back")
- Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table()
which could cause delete_table to report random failures.
- Fixed core dumps for some tests when running with --debug
- Added missing FN_LIBCHAR in mysql_rm_tmp_tables()
(This has probably caused us to not properly remove temporary files after
crash)
- slow_logs was not properly initialized, which could maybe cause
extra/lost entries in slow log.
- If we get an duplicate row on insert, change column map to read and
write all columns while retrying the operation. This is required by
the definition of REPLACE and also ensures that fields that are only
part of UPDATE are properly handled. This fixed a bug in NDB and
REPLACE where REPLACE wrongly copied some column values from the replaced
row.
- For table handler that doesn't support NULL in keys, we would give an error
when creating a primary key with NULL fields, even after the fields has been
automaticly converted to NOT NULL.
- Creating a primary key on a SPATIAL key, would fail if field was not
declared as NOT NULL.
Cleanups:
- Removed not used condition argument to setup_tables
- Removed not needed item function reset_query_id_processor().
- Field->add_index is removed. Now this is instead maintained in
(field->flags & FIELD_IN_ADD_INDEX)
- Field->fieldnr is removed (use field->field_index instead)
- New argument to filesort() to indicate that it should return a set of
row pointers (not used columns). This allowed me to remove some references
to sql_command in filesort and should also enable us to return column
results in some cases where we couldn't before.
- Changed column bitmap handling in opt_range.cc to be aligned with TABLE
bitmap, which allowed me to use bitmap functions instead of looping over
all fields to create some needed bitmaps. (Faster and smaller code)
- Broke up found too long lines
- Moved some variable declaration at start of function for better code
readability.
- Removed some not used arguments from functions.
(setup_fields(), mysql_prepare_insert_check_table())
- setup_fields() now takes an enum instead of an int for marking columns
usage.
- For internal temporary tables, use handler::write_row(),
handler::delete_row() and handler::update_row() instead of
handler::ha_xxxx() for faster execution.
- Changed some constants to enum's and define's.
- Using separate column read and write sets allows for easier checking
of timestamp field was set by statement.
- Remove calls to free_io_cache() as this is now done automaticly in ha_reset()
- Don't build table->normalized_path as this is now identical to table->path
(after bar's fixes to convert filenames)
- Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to
do comparision with the 'convert-dbug-for-diff' tool.
Things left to do in 5.1:
- We wrongly log failed CREATE TABLE ... SELECT in some cases when using
row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result)
Mats has promised to look into this.
- Test that my fix for CREATE TABLE ... SELECT is indeed correct.
(I added several test cases for this, but in this case it's better that
someone else also tests this throughly).
Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
|
|
|
op->mergeEvents(TRUE); // currently not inherited from event
|
2006-01-25 22:22:50 +01:00
|
|
|
|
2006-11-27 00:47:38 +01:00
|
|
|
DBUG_PRINT("info", ("share->ndb_value[0]: 0x%lx share->ndb_value[1]: 0x%lx",
|
|
|
|
(long) share->ndb_value[0],
|
|
|
|
(long) share->ndb_value[1]));
|
2006-01-12 19:51:02 +01:00
|
|
|
int n_columns= ndbtab->getNoOfColumns();
|
2006-01-25 22:22:50 +01:00
|
|
|
int n_fields= table ? table->s->fields : 0; // XXX ???
|
2006-01-12 19:51:02 +01:00
|
|
|
for (int j= 0; j < n_columns; j++)
|
|
|
|
{
|
|
|
|
const char *col_name= ndbtab->getColumn(j)->getName();
|
2006-01-25 22:22:50 +01:00
|
|
|
NdbValue attr0, attr1;
|
2006-01-12 19:51:02 +01:00
|
|
|
if (j < n_fields)
|
|
|
|
{
|
|
|
|
Field *f= share->table->field[j];
|
|
|
|
if (is_ndb_compatible_type(f))
|
|
|
|
{
|
|
|
|
DBUG_PRINT("info", ("%s compatible", col_name));
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
attr0.rec= op->getValue(col_name, (char*) f->ptr);
|
2006-01-25 22:22:50 +01:00
|
|
|
attr1.rec= op->getPreValue(col_name,
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
(f->ptr - share->table->record[0]) +
|
|
|
|
(char*) share->table->record[1]);
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
2006-01-25 22:22:50 +01:00
|
|
|
else if (! (f->flags & BLOB_FLAG))
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
|
|
|
DBUG_PRINT("info", ("%s non compatible", col_name));
|
2006-01-25 22:22:50 +01:00
|
|
|
attr0.rec= op->getValue(col_name);
|
|
|
|
attr1.rec= op->getPreValue(col_name);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
DBUG_PRINT("info", ("%s blob", col_name));
|
2006-03-23 04:59:14 +01:00
|
|
|
DBUG_ASSERT(share->flags & NSF_BLOB_FLAG);
|
2006-01-25 22:22:50 +01:00
|
|
|
attr0.blob= op->getBlobHandle(col_name);
|
|
|
|
attr1.blob= op->getPreBlobHandle(col_name);
|
2006-02-05 19:11:11 +01:00
|
|
|
if (attr0.blob == NULL || attr1.blob == NULL)
|
|
|
|
{
|
|
|
|
sql_print_error("NDB Binlog: Creating NdbEventOperation"
|
|
|
|
" blob field %u handles failed (code=%d) for %s",
|
|
|
|
j, op->getNdbError().code, event_name);
|
2009-09-10 11:18:29 +02:00
|
|
|
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
|
2006-02-05 19:11:11 +01:00
|
|
|
ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
|
|
|
|
op->getNdbError().code,
|
|
|
|
op->getNdbError().message,
|
|
|
|
"NDB");
|
|
|
|
ndb->dropEventOperation(op);
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_unlock(&injector_mutex);
|
2006-02-05 19:11:11 +01:00
|
|
|
DBUG_RETURN(-1);
|
|
|
|
}
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
DBUG_PRINT("info", ("%s hidden key", col_name));
|
2006-01-25 22:22:50 +01:00
|
|
|
attr0.rec= op->getValue(col_name);
|
|
|
|
attr1.rec= op->getPreValue(col_name);
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
2006-01-25 22:22:50 +01:00
|
|
|
share->ndb_value[0][j].ptr= attr0.ptr;
|
|
|
|
share->ndb_value[1][j].ptr= attr1.ptr;
|
2006-11-27 00:47:38 +01:00
|
|
|
DBUG_PRINT("info", ("&share->ndb_value[0][%d]: 0x%lx "
|
|
|
|
"share->ndb_value[0][%d]: 0x%lx",
|
|
|
|
j, (long) &share->ndb_value[0][j],
|
|
|
|
j, (long) attr0.ptr));
|
|
|
|
DBUG_PRINT("info", ("&share->ndb_value[1][%d]: 0x%lx "
|
|
|
|
"share->ndb_value[1][%d]: 0x%lx",
|
|
|
|
j, (long) &share->ndb_value[0][j],
|
|
|
|
j, (long) attr1.ptr));
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
|
|
|
op->setCustomData((void *) share); // set before execute
|
|
|
|
share->op= op; // assign op in NDB_SHARE
|
|
|
|
if (op->execute())
|
|
|
|
{
|
|
|
|
share->op= NULL;
|
|
|
|
retries--;
|
|
|
|
if (op->getNdbError().status != NdbError::TemporaryError &&
|
|
|
|
op->getNdbError().code != 1407)
|
|
|
|
retries= 0;
|
|
|
|
if (retries == 0)
|
|
|
|
{
|
2009-09-10 11:18:29 +02:00
|
|
|
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
|
2006-01-12 19:51:02 +01:00
|
|
|
ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
|
|
|
|
op->getNdbError().code, op->getNdbError().message,
|
|
|
|
"NDB");
|
|
|
|
sql_print_error("NDB Binlog: ndbevent->execute failed for %s; %d %s",
|
|
|
|
event_name,
|
|
|
|
op->getNdbError().code, op->getNdbError().message);
|
|
|
|
}
|
|
|
|
ndb->dropEventOperation(op);
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_unlock(&injector_mutex);
|
2006-01-12 19:51:02 +01:00
|
|
|
if (retries)
|
2007-04-25 15:25:23 +02:00
|
|
|
{
|
|
|
|
my_sleep(retry_sleep);
|
2006-01-12 19:51:02 +01:00
|
|
|
continue;
|
2007-04-25 15:25:23 +02:00
|
|
|
}
|
2006-01-12 19:51:02 +01:00
|
|
|
DBUG_RETURN(-1);
|
|
|
|
}
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_unlock(&injector_mutex);
|
2006-01-12 19:51:02 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2007-02-06 06:40:26 +01:00
|
|
|
/* ndb_share reference binlog */
|
2006-01-12 19:51:02 +01:00
|
|
|
get_share(share);
|
2007-02-06 06:40:26 +01:00
|
|
|
DBUG_PRINT("NDB_SHARE", ("%s binlog use_count: %u",
|
|
|
|
share->key, share->use_count));
|
2006-12-01 15:49:07 +01:00
|
|
|
if (do_ndb_apply_status_share)
|
2006-04-10 16:08:40 +02:00
|
|
|
{
|
2007-02-06 06:40:26 +01:00
|
|
|
/* ndb_share reference binlog extra */
|
2006-12-01 15:49:07 +01:00
|
|
|
ndb_apply_status_share= get_share(share);
|
2007-02-06 06:40:26 +01:00
|
|
|
DBUG_PRINT("NDB_SHARE", ("%s binlog extra use_count: %u",
|
|
|
|
share->key, share->use_count));
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_cond_signal(&injector_cond);
|
2006-04-10 16:08:40 +02:00
|
|
|
}
|
2006-12-01 15:49:07 +01:00
|
|
|
else if (do_ndb_schema_share)
|
2006-04-10 16:08:40 +02:00
|
|
|
{
|
2007-02-06 06:40:26 +01:00
|
|
|
/* ndb_share reference binlog extra */
|
2006-12-01 15:49:07 +01:00
|
|
|
ndb_schema_share= get_share(share);
|
2007-02-06 06:40:26 +01:00
|
|
|
DBUG_PRINT("NDB_SHARE", ("%s binlog extra use_count: %u",
|
|
|
|
share->key, share->use_count));
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_cond_signal(&injector_cond);
|
2006-04-10 16:08:40 +02:00
|
|
|
}
|
2006-01-12 19:51:02 +01:00
|
|
|
|
2006-11-27 00:47:38 +01:00
|
|
|
DBUG_PRINT("info",("%s share->op: 0x%lx share->use_count: %u",
|
|
|
|
share->key, (long) share->op, share->use_count));
|
2006-01-12 19:51:02 +01:00
|
|
|
|
2009-12-22 10:35:56 +01:00
|
|
|
if (opt_ndb_extra_logging)
|
2006-01-12 19:51:02 +01:00
|
|
|
sql_print_information("NDB Binlog: logging %s", share->key);
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
when entering the calling thread should have a share lock id share != 0
|
|
|
|
then the injector thread will have one as well, i.e. share->use_count == 0
|
|
|
|
(unless it has already dropped... then share->op == 0)
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
ndbcluster_handle_drop_table(Ndb *ndb, const char *event_name,
|
2006-04-10 16:08:40 +02:00
|
|
|
NDB_SHARE *share, const char *type_str)
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
|
|
|
DBUG_ENTER("ndbcluster_handle_drop_table");
|
2006-05-31 01:52:14 +02:00
|
|
|
THD *thd= current_thd;
|
2006-01-12 19:51:02 +01:00
|
|
|
|
|
|
|
NDBDICT *dict= ndb->getDictionary();
|
|
|
|
if (event_name && dict->dropEvent(event_name))
|
|
|
|
{
|
|
|
|
if (dict->getNdbError().code != 4710)
|
|
|
|
{
|
|
|
|
/* drop event failed for some reason, issue a warning */
|
2009-09-10 11:18:29 +02:00
|
|
|
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
|
2006-01-12 19:51:02 +01:00
|
|
|
ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
|
|
|
|
dict->getNdbError().code,
|
|
|
|
dict->getNdbError().message, "NDB");
|
|
|
|
/* error is not that the event did not exist */
|
|
|
|
sql_print_error("NDB Binlog: Unable to drop event in database. "
|
|
|
|
"Event: %s Error Code: %d Message: %s",
|
|
|
|
event_name,
|
|
|
|
dict->getNdbError().code,
|
|
|
|
dict->getNdbError().message);
|
|
|
|
/* ToDo; handle error? */
|
|
|
|
if (share && share->op &&
|
|
|
|
share->op->getState() == NdbEventOperation::EO_EXECUTING &&
|
2007-09-07 11:15:07 +02:00
|
|
|
dict->getNdbError().mysql_code != HA_ERR_NO_CONNECTION)
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes
Changes that requires code changes in other code of other storage engines.
(Note that all changes are very straightforward and one should find all issues
by compiling a --debug build and fixing all compiler errors and all
asserts in field.cc while running the test suite),
- New optional handler function introduced: reset()
This is called after every DML statement to make it easy for a handler to
statement specific cleanups.
(The only case it's not called is if force the file to be closed)
- handler::extra(HA_EXTRA_RESET) is removed. Code that was there before
should be moved to handler::reset()
- table->read_set contains a bitmap over all columns that are needed
in the query. read_row() and similar functions only needs to read these
columns
- table->write_set contains a bitmap over all columns that will be updated
in the query. write_row() and update_row() only needs to update these
columns.
The above bitmaps should now be up to date in all context
(including ALTER TABLE, filesort()).
The handler is informed of any changes to the bitmap after
fix_fields() by calling the virtual function
handler::column_bitmaps_signal(). If the handler does caching of
these bitmaps (instead of using table->read_set, table->write_set),
it should redo the caching in this code. as the signal() may be sent
several times, it's probably best to set a variable in the signal
and redo the caching on read_row() / write_row() if the variable was
set.
- Removed the read_set and write_set bitmap objects from the handler class
- Removed all column bit handling functions from the handler class.
(Now one instead uses the normal bitmap functions in my_bitmap.c instead
of handler dedicated bitmap functions)
- field->query_id is removed. One should instead instead check
table->read_set and table->write_set if a field is used in the query.
- handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and
handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now
instead use table->read_set to check for which columns to retrieve.
- If a handler needs to call Field->val() or Field->store() on columns
that are not used in the query, one should install a temporary
all-columns-used map while doing so. For this, we provide the following
functions:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
field->val();
dbug_tmp_restore_column_map(table->read_set, old_map);
and similar for the write map:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
field->val();
dbug_tmp_restore_column_map(table->write_set, old_map);
If this is not done, you will sooner or later hit a DBUG_ASSERT
in the field store() / val() functions.
(For not DBUG binaries, the dbug_tmp_restore_column_map() and
dbug_tmp_restore_column_map() are inline dummy functions and should
be optimized away be the compiler).
- If one needs to temporary set the column map for all binaries (and not
just to avoid the DBUG_ASSERT() in the Field::store() / Field::val()
methods) one should use the functions tmp_use_all_columns() and
tmp_restore_column_map() instead of the above dbug_ variants.
- All 'status' fields in the handler base class (like records,
data_file_length etc) are now stored in a 'stats' struct. This makes
it easier to know what status variables are provided by the base
handler. This requires some trivial variable names in the extra()
function.
- New virtual function handler::records(). This is called to optimize
COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true.
(stats.records is not supposed to be an exact value. It's only has to
be 'reasonable enough' for the optimizer to be able to choose a good
optimization path).
- Non virtual handler::init() function added for caching of virtual
constants from engine.
- Removed has_transactions() virtual method. Now one should instead return
HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support
transactions.
- The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument
that is to be used with 'new handler_name()' to allocate the handler
in the right area. The xxxx_create_handler() function is also
responsible for any initialization of the object before returning.
For example, one should change:
static handler *myisam_create_handler(TABLE_SHARE *table)
{
return new ha_myisam(table);
}
->
static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root)
{
return new (mem_root) ha_myisam(table);
}
- New optional virtual function: use_hidden_primary_key().
This is called in case of an update/delete when
(table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined
but we don't have a primary key. This allows the handler to take precisions
in remembering any hidden primary key to able to update/delete any
found row. The default handler marks all columns to be read.
- handler::table_flags() now returns a ulonglong (to allow for more flags).
- New/changed table_flags()
- HA_HAS_RECORDS Set if ::records() is supported
- HA_NO_TRANSACTIONS Set if engine doesn't support transactions
- HA_PRIMARY_KEY_REQUIRED_FOR_DELETE
Set if we should mark all primary key columns for
read when reading rows as part of a DELETE
statement. If there is no primary key,
all columns are marked for read.
- HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some
cases (based on table->read_set)
- HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS
Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION.
- HA_DUPP_POS Renamed to HA_DUPLICATE_POS
- HA_REQUIRES_KEY_COLUMNS_FOR_DELETE
Set this if we should mark ALL key columns for
read when when reading rows as part of a DELETE
statement. In case of an update we will mark
all keys for read for which key part changed
value.
- HA_STATS_RECORDS_IS_EXACT
Set this if stats.records is exact.
(This saves us some extra records() calls
when optimizing COUNT(*))
- Removed table_flags()
- HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if
handler::records() gives an exact count() and
HA_STATS_RECORDS_IS_EXACT if stats.records is exact.
- HA_READ_RND_SAME Removed (no one supported this one)
- Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk()
- Renamed handler::dupp_pos to handler::dup_pos
- Removed not used variable handler::sortkey
Upper level handler changes:
- ha_reset() now does some overall checks and calls ::reset()
- ha_table_flags() added. This is a cached version of table_flags(). The
cache is updated on engine creation time and updated on open.
MySQL level changes (not obvious from the above):
- DBUG_ASSERT() added to check that column usage matches what is set
in the column usage bit maps. (This found a LOT of bugs in current
column marking code).
- In 5.1 before, all used columns was marked in read_set and only updated
columns was marked in write_set. Now we only mark columns for which we
need a value in read_set.
- Column bitmaps are created in open_binary_frm() and open_table_from_share().
(Before this was in table.cc)
- handler::table_flags() calls are replaced with handler::ha_table_flags()
- For calling field->val() you must have the corresponding bit set in
table->read_set. For calling field->store() you must have the
corresponding bit set in table->write_set. (There are asserts in
all store()/val() functions to catch wrong usage)
- thd->set_query_id is renamed to thd->mark_used_columns and instead
of setting this to an integer value, this has now the values:
MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE
Changed also all variables named 'set_query_id' to mark_used_columns.
- In filesort() we now inform the handler of exactly which columns are needed
doing the sort and choosing the rows.
- The TABLE_SHARE object has a 'all_set' column bitmap one can use
when one needs a column bitmap with all columns set.
(This is used for table->use_all_columns() and other places)
- The TABLE object has 3 column bitmaps:
- def_read_set Default bitmap for columns to be read
- def_write_set Default bitmap for columns to be written
- tmp_set Can be used as a temporary bitmap when needed.
The table object has also two pointer to bitmaps read_set and write_set
that the handler should use to find out which columns are used in which way.
- count() optimization now calls handler::records() instead of using
handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true).
- Added extra argument to Item::walk() to indicate if we should also
traverse sub queries.
- Added TABLE parameter to cp_buffer_from_ref()
- Don't close tables created with CREATE ... SELECT but keep them in
the table cache. (Faster usage of newly created tables).
New interfaces:
- table->clear_column_bitmaps() to initialize the bitmaps for tables
at start of new statements.
- table->column_bitmaps_set() to set up new column bitmaps and signal
the handler about this.
- table->column_bitmaps_set_no_signal() for some few cases where we need
to setup new column bitmaps but don't signal the handler (as the handler
has already been signaled about these before). Used for the momement
only in opt_range.cc when doing ROR scans.
- table->use_all_columns() to install a bitmap where all columns are marked
as use in the read and the write set.
- table->default_column_bitmaps() to install the normal read and write
column bitmaps, but not signaling the handler about this.
This is mainly used when creating TABLE instances.
- table->mark_columns_needed_for_delete(),
table->mark_columns_needed_for_delete() and
table->mark_columns_needed_for_insert() to allow us to put additional
columns in column usage maps if handler so requires.
(The handler indicates what it neads in handler->table_flags())
- table->prepare_for_position() to allow us to tell handler that it
needs to read primary key parts to be able to store them in
future table->position() calls.
(This replaces the table->file->ha_retrieve_all_pk function)
- table->mark_auto_increment_column() to tell handler are going to update
columns part of any auto_increment key.
- table->mark_columns_used_by_index() to mark all columns that is part of
an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow
it to quickly know that it only needs to read colums that are part
of the key. (The handler can also use the column map for detecting this,
but simpler/faster handler can just monitor the extra() call).
- table->mark_columns_used_by_index_no_reset() to in addition to other columns,
also mark all columns that is used by the given key.
- table->restore_column_maps_after_mark_index() to restore to default
column maps after a call to table->mark_columns_used_by_index().
- New item function register_field_in_read_map(), for marking used columns
in table->read_map. Used by filesort() to mark all used columns
- Maintain in TABLE->merge_keys set of all keys that are used in query.
(Simplices some optimization loops)
- Maintain Field->part_of_key_not_clustered which is like Field->part_of_key
but the field in the clustered key is not assumed to be part of all index.
(used in opt_range.cc for faster loops)
- dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map()
tmp_use_all_columns() and tmp_restore_column_map() functions to temporally
mark all columns as usable. The 'dbug_' version is primarily intended
inside a handler when it wants to just call Field:store() & Field::val()
functions, but don't need the column maps set for any other usage.
(ie:: bitmap_is_set() is never called)
- We can't use compare_records() to skip updates for handlers that returns
a partial column set and the read_set doesn't cover all columns in the
write set. The reason for this is that if we have a column marked only for
write we can't in the MySQL level know if the value changed or not.
The reason this worked before was that MySQL marked all to be written
columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden
bug'.
- open_table_from_share() does not anymore setup temporary MEM_ROOT
object as a thread specific variable for the handler. Instead we
send the to-be-used MEMROOT to get_new_handler().
(Simpler, faster code)
Bugs fixed:
- Column marking was not done correctly in a lot of cases.
(ALTER TABLE, when using triggers, auto_increment fields etc)
(Could potentially result in wrong values inserted in table handlers
relying on that the old column maps or field->set_query_id was correct)
Especially when it comes to triggers, there may be cases where the
old code would cause lost/wrong values for NDB and/or InnoDB tables.
- Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags:
OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG.
This allowed me to remove some wrong warnings about:
"Some non-transactional changed tables couldn't be rolled back"
- Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset
(thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose
some warnings about
"Some non-transactional changed tables couldn't be rolled back")
- Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table()
which could cause delete_table to report random failures.
- Fixed core dumps for some tests when running with --debug
- Added missing FN_LIBCHAR in mysql_rm_tmp_tables()
(This has probably caused us to not properly remove temporary files after
crash)
- slow_logs was not properly initialized, which could maybe cause
extra/lost entries in slow log.
- If we get an duplicate row on insert, change column map to read and
write all columns while retrying the operation. This is required by
the definition of REPLACE and also ensures that fields that are only
part of UPDATE are properly handled. This fixed a bug in NDB and
REPLACE where REPLACE wrongly copied some column values from the replaced
row.
- For table handler that doesn't support NULL in keys, we would give an error
when creating a primary key with NULL fields, even after the fields has been
automaticly converted to NOT NULL.
- Creating a primary key on a SPATIAL key, would fail if field was not
declared as NOT NULL.
Cleanups:
- Removed not used condition argument to setup_tables
- Removed not needed item function reset_query_id_processor().
- Field->add_index is removed. Now this is instead maintained in
(field->flags & FIELD_IN_ADD_INDEX)
- Field->fieldnr is removed (use field->field_index instead)
- New argument to filesort() to indicate that it should return a set of
row pointers (not used columns). This allowed me to remove some references
to sql_command in filesort and should also enable us to return column
results in some cases where we couldn't before.
- Changed column bitmap handling in opt_range.cc to be aligned with TABLE
bitmap, which allowed me to use bitmap functions instead of looping over
all fields to create some needed bitmaps. (Faster and smaller code)
- Broke up found too long lines
- Moved some variable declaration at start of function for better code
readability.
- Removed some not used arguments from functions.
(setup_fields(), mysql_prepare_insert_check_table())
- setup_fields() now takes an enum instead of an int for marking columns
usage.
- For internal temporary tables, use handler::write_row(),
handler::delete_row() and handler::update_row() instead of
handler::ha_xxxx() for faster execution.
- Changed some constants to enum's and define's.
- Using separate column read and write sets allows for easier checking
of timestamp field was set by statement.
- Remove calls to free_io_cache() as this is now done automaticly in ha_reset()
- Don't build table->normalized_path as this is now identical to table->path
(after bar's fixes to convert filenames)
- Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to
do comparision with the 'convert-dbug-for-diff' tool.
Things left to do in 5.1:
- We wrongly log failed CREATE TABLE ... SELECT in some cases when using
row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result)
Mats has promised to look into this.
- Test that my fix for CREATE TABLE ... SELECT is indeed correct.
(I added several test cases for this, but in this case it's better that
someone else also tests this throughly).
Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
|
|
|
DBUG_ASSERT(FALSE);
|
2006-01-12 19:51:02 +01:00
|
|
|
DBUG_RETURN(-1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (share == 0 || share->op == 0)
|
|
|
|
{
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
Syncronized drop between client thread and injector thread is
|
|
|
|
neccessary in order to maintain ordering in the binlog,
|
|
|
|
such that the drop occurs _after_ any inserts/updates/deletes.
|
|
|
|
|
|
|
|
The penalty for this is that the drop table becomes slow.
|
|
|
|
|
|
|
|
This wait is however not strictly neccessary to produce a binlog
|
|
|
|
that is usable. However the slave does not currently handle
|
|
|
|
these out of order, thus we are keeping the SYNC_DROP_ defined
|
|
|
|
for now.
|
|
|
|
*/
|
2006-05-31 01:52:14 +02:00
|
|
|
const char *save_proc_info= thd->proc_info;
|
2006-01-12 19:51:02 +01:00
|
|
|
#define SYNC_DROP_
|
|
|
|
#ifdef SYNC_DROP_
|
2006-05-31 01:52:14 +02:00
|
|
|
thd->proc_info= "Syncing ndb table schema operation and binlog";
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_lock(&share->mutex);
|
2009-12-22 10:35:56 +01:00
|
|
|
int max_timeout= DEFAULT_SYNC_TIMEOUT;
|
2006-01-12 19:51:02 +01:00
|
|
|
while (share->op)
|
|
|
|
{
|
|
|
|
struct timespec abstime;
|
|
|
|
set_timespec(abstime, 1);
|
2010-01-07 06:42:07 +01:00
|
|
|
int ret= mysql_cond_timedwait(&injector_cond,
|
|
|
|
&share->mutex,
|
|
|
|
&abstime);
|
2006-05-31 16:16:03 +02:00
|
|
|
if (thd->killed ||
|
|
|
|
share->op == 0)
|
2006-01-12 19:51:02 +01:00
|
|
|
break;
|
2006-04-12 18:01:19 +02:00
|
|
|
if (ret)
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
2006-04-12 18:01:19 +02:00
|
|
|
max_timeout--;
|
|
|
|
if (max_timeout == 0)
|
|
|
|
{
|
2006-04-13 09:37:43 +02:00
|
|
|
sql_print_error("NDB %s: %s timed out. Ignoring...",
|
|
|
|
type_str, share->key);
|
2006-04-12 18:01:19 +02:00
|
|
|
break;
|
|
|
|
}
|
2009-12-22 10:35:56 +01:00
|
|
|
if (opt_ndb_extra_logging)
|
2006-04-12 18:01:19 +02:00
|
|
|
ndb_report_waiting(type_str, max_timeout,
|
|
|
|
type_str, share->key);
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
|
|
|
}
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_unlock(&share->mutex);
|
2006-01-12 19:51:02 +01:00
|
|
|
#else
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_lock(&share->mutex);
|
2006-01-12 19:51:02 +01:00
|
|
|
share->op_old= share->op;
|
|
|
|
share->op= 0;
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_unlock(&share->mutex);
|
2006-01-12 19:51:02 +01:00
|
|
|
#endif
|
2006-05-31 01:52:14 +02:00
|
|
|
thd->proc_info= save_proc_info;
|
2006-01-12 19:51:02 +01:00
|
|
|
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/********************************************************************
|
|
|
|
Internal helper functions for differentd events from the stoarage nodes
|
|
|
|
used by the ndb injector thread
|
|
|
|
********************************************************************/
|
|
|
|
|
|
|
|
/*
|
|
|
|
Handle error states on events from the storage nodes
|
|
|
|
*/
|
|
|
|
static int ndb_binlog_thread_handle_error(Ndb *ndb, NdbEventOperation *pOp,
|
2006-12-01 15:49:07 +01:00
|
|
|
ndb_binlog_index_row &row)
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
|
|
|
NDB_SHARE *share= (NDB_SHARE *)pOp->getCustomData();
|
|
|
|
DBUG_ENTER("ndb_binlog_thread_handle_error");
|
|
|
|
|
|
|
|
int overrun= pOp->isOverrun();
|
|
|
|
if (overrun)
|
|
|
|
{
|
|
|
|
/*
|
2006-12-01 15:49:07 +01:00
|
|
|
ToDo: this error should rather clear the ndb_binlog_index...
|
2006-01-12 19:51:02 +01:00
|
|
|
and continue
|
|
|
|
*/
|
|
|
|
sql_print_error("NDB Binlog: Overrun in event buffer, "
|
|
|
|
"this means we have dropped events. Cannot "
|
|
|
|
"continue binlog for %s", share->key);
|
|
|
|
pOp->clearError();
|
|
|
|
DBUG_RETURN(-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!pOp->isConsistent())
|
|
|
|
{
|
|
|
|
/*
|
2006-12-01 15:49:07 +01:00
|
|
|
ToDo: this error should rather clear the ndb_binlog_index...
|
2006-01-12 19:51:02 +01:00
|
|
|
and continue
|
|
|
|
*/
|
|
|
|
sql_print_error("NDB Binlog: Not Consistent. Cannot "
|
|
|
|
"continue binlog for %s. Error code: %d"
|
|
|
|
" Message: %s", share->key,
|
|
|
|
pOp->getNdbError().code,
|
|
|
|
pOp->getNdbError().message);
|
|
|
|
pOp->clearError();
|
|
|
|
DBUG_RETURN(-1);
|
|
|
|
}
|
|
|
|
sql_print_error("NDB Binlog: unhandled error %d for table %s",
|
|
|
|
pOp->hasError(), share->key);
|
|
|
|
pOp->clearError();
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2006-04-12 18:01:19 +02:00
|
|
|
ndb_binlog_thread_handle_non_data_event(THD *thd, Ndb *ndb,
|
|
|
|
NdbEventOperation *pOp,
|
2006-12-01 15:49:07 +01:00
|
|
|
ndb_binlog_index_row &row)
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
|
|
|
NDB_SHARE *share= (NDB_SHARE *)pOp->getCustomData();
|
|
|
|
NDBEVENT::TableEvent type= pOp->getEventType();
|
|
|
|
|
|
|
|
switch (type)
|
|
|
|
{
|
|
|
|
case NDBEVENT::TE_CLUSTER_FAILURE:
|
2009-12-22 10:35:56 +01:00
|
|
|
if (opt_ndb_extra_logging)
|
2006-05-19 12:54:12 +02:00
|
|
|
sql_print_information("NDB Binlog: cluster failure for %s at epoch %u.",
|
|
|
|
share->key, (unsigned) pOp->getGCI());
|
2006-12-01 15:49:07 +01:00
|
|
|
if (ndb_apply_status_share == share)
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
2009-12-22 10:35:56 +01:00
|
|
|
if (opt_ndb_extra_logging &&
|
2006-04-10 16:08:40 +02:00
|
|
|
ndb_binlog_tables_inited && ndb_binlog_running)
|
|
|
|
sql_print_information("NDB Binlog: ndb tables initially "
|
|
|
|
"read only on reconnect.");
|
2007-02-06 06:40:26 +01:00
|
|
|
/* ndb_share reference binlog extra free */
|
|
|
|
DBUG_PRINT("NDB_SHARE", ("%s binlog extra free use_count: %u",
|
|
|
|
share->key, share->use_count));
|
2006-12-01 15:49:07 +01:00
|
|
|
free_share(&ndb_apply_status_share);
|
|
|
|
ndb_apply_status_share= 0;
|
2008-02-28 18:55:46 +01:00
|
|
|
ndb_binlog_tables_inited= 0;
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
2006-11-27 00:47:38 +01:00
|
|
|
DBUG_PRINT("error", ("CLUSTER FAILURE EVENT: "
|
|
|
|
"%s received share: 0x%lx op: 0x%lx share op: 0x%lx "
|
|
|
|
"op_old: 0x%lx",
|
|
|
|
share->key, (long) share, (long) pOp,
|
|
|
|
(long) share->op, (long) share->op_old));
|
2006-01-12 19:51:02 +01:00
|
|
|
break;
|
|
|
|
case NDBEVENT::TE_DROP:
|
2006-12-01 15:49:07 +01:00
|
|
|
if (ndb_apply_status_share == share)
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
2009-12-22 10:35:56 +01:00
|
|
|
if (opt_ndb_extra_logging &&
|
2006-04-10 16:08:40 +02:00
|
|
|
ndb_binlog_tables_inited && ndb_binlog_running)
|
|
|
|
sql_print_information("NDB Binlog: ndb tables initially "
|
|
|
|
"read only on reconnect.");
|
2007-02-06 06:40:26 +01:00
|
|
|
/* ndb_share reference binlog extra free */
|
|
|
|
DBUG_PRINT("NDB_SHARE", ("%s binlog extra free use_count: %u",
|
|
|
|
share->key, share->use_count));
|
2006-12-01 15:49:07 +01:00
|
|
|
free_share(&ndb_apply_status_share);
|
|
|
|
ndb_apply_status_share= 0;
|
2008-02-28 18:55:46 +01:00
|
|
|
ndb_binlog_tables_inited= 0;
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
|
|
|
/* ToDo: remove printout */
|
2009-12-22 10:35:56 +01:00
|
|
|
if (opt_ndb_extra_logging)
|
2006-01-12 19:51:02 +01:00
|
|
|
sql_print_information("NDB Binlog: drop table %s.", share->key);
|
2006-03-09 01:04:13 +01:00
|
|
|
// fall through
|
|
|
|
case NDBEVENT::TE_ALTER:
|
2006-01-12 19:51:02 +01:00
|
|
|
row.n_schemaops++;
|
2006-11-27 00:47:38 +01:00
|
|
|
DBUG_PRINT("info", ("TABLE %s EVENT: %s received share: 0x%lx op: 0x%lx "
|
|
|
|
"share op: 0x%lx op_old: 0x%lx",
|
|
|
|
type == NDBEVENT::TE_DROP ? "DROP" : "ALTER",
|
|
|
|
share->key, (long) share, (long) pOp,
|
|
|
|
(long) share->op, (long) share->op_old));
|
2006-01-12 19:51:02 +01:00
|
|
|
break;
|
|
|
|
case NDBEVENT::TE_NODE_FAILURE:
|
|
|
|
/* fall through */
|
|
|
|
case NDBEVENT::TE_SUBSCRIBE:
|
|
|
|
/* fall through */
|
|
|
|
case NDBEVENT::TE_UNSUBSCRIBE:
|
|
|
|
/* ignore */
|
|
|
|
return 0;
|
|
|
|
default:
|
|
|
|
sql_print_error("NDB Binlog: unknown non data event %d for %s. "
|
|
|
|
"Ignoring...", (unsigned) type, share->key);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-04-12 18:01:19 +02:00
|
|
|
ndb_handle_schema_change(thd, ndb, pOp, share);
|
2006-01-12 19:51:02 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
Handle data events from the storage nodes
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
ndb_binlog_thread_handle_data_event(Ndb *ndb, NdbEventOperation *pOp,
|
2006-12-01 15:49:07 +01:00
|
|
|
ndb_binlog_index_row &row,
|
2006-01-12 19:51:02 +01:00
|
|
|
injector::transaction &trans)
|
|
|
|
{
|
|
|
|
NDB_SHARE *share= (NDB_SHARE*) pOp->getCustomData();
|
2006-12-01 15:49:07 +01:00
|
|
|
if (share == ndb_apply_status_share)
|
2006-01-12 19:51:02 +01:00
|
|
|
return 0;
|
|
|
|
|
2007-04-18 16:02:20 +02:00
|
|
|
uint32 originating_server_id= pOp->getAnyValue();
|
2007-04-03 12:57:18 +02:00
|
|
|
if (originating_server_id == 0)
|
|
|
|
originating_server_id= ::server_id;
|
2007-04-18 16:02:20 +02:00
|
|
|
else if (originating_server_id & NDB_ANYVALUE_RESERVED)
|
|
|
|
{
|
|
|
|
if (originating_server_id != NDB_ANYVALUE_FOR_NOLOGGING)
|
|
|
|
sql_print_warning("NDB: unknown value for binlog signalling 0x%X, "
|
|
|
|
"event not logged",
|
|
|
|
originating_server_id);
|
|
|
|
return 0;
|
|
|
|
}
|
2007-07-25 07:24:25 +02:00
|
|
|
else if (!g_ndb_log_slave_updates)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
This event comes from a slave applier since it has an originating
|
|
|
|
server id set. Since option to log slave updates is not set, skip it.
|
|
|
|
*/
|
|
|
|
return 0;
|
|
|
|
}
|
2007-04-03 12:57:18 +02:00
|
|
|
|
|
|
|
TABLE *table= share->table;
|
2006-03-11 06:58:48 +01:00
|
|
|
DBUG_ASSERT(trans.good());
|
|
|
|
DBUG_ASSERT(table != 0);
|
2006-01-12 19:51:02 +01:00
|
|
|
|
|
|
|
dbug_print_table("table", table);
|
|
|
|
|
|
|
|
TABLE_SHARE *table_s= table->s;
|
|
|
|
uint n_fields= table_s->fields;
|
|
|
|
MY_BITMAP b;
|
|
|
|
/* Potential buffer for the bitmap */
|
|
|
|
uint32 bitbuf[128 / (sizeof(uint32) * 8)];
|
|
|
|
bitmap_init(&b, n_fields <= sizeof(bitbuf) * 8 ? bitbuf : NULL,
|
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes
Changes that requires code changes in other code of other storage engines.
(Note that all changes are very straightforward and one should find all issues
by compiling a --debug build and fixing all compiler errors and all
asserts in field.cc while running the test suite),
- New optional handler function introduced: reset()
This is called after every DML statement to make it easy for a handler to
statement specific cleanups.
(The only case it's not called is if force the file to be closed)
- handler::extra(HA_EXTRA_RESET) is removed. Code that was there before
should be moved to handler::reset()
- table->read_set contains a bitmap over all columns that are needed
in the query. read_row() and similar functions only needs to read these
columns
- table->write_set contains a bitmap over all columns that will be updated
in the query. write_row() and update_row() only needs to update these
columns.
The above bitmaps should now be up to date in all context
(including ALTER TABLE, filesort()).
The handler is informed of any changes to the bitmap after
fix_fields() by calling the virtual function
handler::column_bitmaps_signal(). If the handler does caching of
these bitmaps (instead of using table->read_set, table->write_set),
it should redo the caching in this code. as the signal() may be sent
several times, it's probably best to set a variable in the signal
and redo the caching on read_row() / write_row() if the variable was
set.
- Removed the read_set and write_set bitmap objects from the handler class
- Removed all column bit handling functions from the handler class.
(Now one instead uses the normal bitmap functions in my_bitmap.c instead
of handler dedicated bitmap functions)
- field->query_id is removed. One should instead instead check
table->read_set and table->write_set if a field is used in the query.
- handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and
handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now
instead use table->read_set to check for which columns to retrieve.
- If a handler needs to call Field->val() or Field->store() on columns
that are not used in the query, one should install a temporary
all-columns-used map while doing so. For this, we provide the following
functions:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
field->val();
dbug_tmp_restore_column_map(table->read_set, old_map);
and similar for the write map:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
field->val();
dbug_tmp_restore_column_map(table->write_set, old_map);
If this is not done, you will sooner or later hit a DBUG_ASSERT
in the field store() / val() functions.
(For not DBUG binaries, the dbug_tmp_restore_column_map() and
dbug_tmp_restore_column_map() are inline dummy functions and should
be optimized away be the compiler).
- If one needs to temporary set the column map for all binaries (and not
just to avoid the DBUG_ASSERT() in the Field::store() / Field::val()
methods) one should use the functions tmp_use_all_columns() and
tmp_restore_column_map() instead of the above dbug_ variants.
- All 'status' fields in the handler base class (like records,
data_file_length etc) are now stored in a 'stats' struct. This makes
it easier to know what status variables are provided by the base
handler. This requires some trivial variable names in the extra()
function.
- New virtual function handler::records(). This is called to optimize
COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true.
(stats.records is not supposed to be an exact value. It's only has to
be 'reasonable enough' for the optimizer to be able to choose a good
optimization path).
- Non virtual handler::init() function added for caching of virtual
constants from engine.
- Removed has_transactions() virtual method. Now one should instead return
HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support
transactions.
- The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument
that is to be used with 'new handler_name()' to allocate the handler
in the right area. The xxxx_create_handler() function is also
responsible for any initialization of the object before returning.
For example, one should change:
static handler *myisam_create_handler(TABLE_SHARE *table)
{
return new ha_myisam(table);
}
->
static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root)
{
return new (mem_root) ha_myisam(table);
}
- New optional virtual function: use_hidden_primary_key().
This is called in case of an update/delete when
(table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined
but we don't have a primary key. This allows the handler to take precisions
in remembering any hidden primary key to able to update/delete any
found row. The default handler marks all columns to be read.
- handler::table_flags() now returns a ulonglong (to allow for more flags).
- New/changed table_flags()
- HA_HAS_RECORDS Set if ::records() is supported
- HA_NO_TRANSACTIONS Set if engine doesn't support transactions
- HA_PRIMARY_KEY_REQUIRED_FOR_DELETE
Set if we should mark all primary key columns for
read when reading rows as part of a DELETE
statement. If there is no primary key,
all columns are marked for read.
- HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some
cases (based on table->read_set)
- HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS
Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION.
- HA_DUPP_POS Renamed to HA_DUPLICATE_POS
- HA_REQUIRES_KEY_COLUMNS_FOR_DELETE
Set this if we should mark ALL key columns for
read when when reading rows as part of a DELETE
statement. In case of an update we will mark
all keys for read for which key part changed
value.
- HA_STATS_RECORDS_IS_EXACT
Set this if stats.records is exact.
(This saves us some extra records() calls
when optimizing COUNT(*))
- Removed table_flags()
- HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if
handler::records() gives an exact count() and
HA_STATS_RECORDS_IS_EXACT if stats.records is exact.
- HA_READ_RND_SAME Removed (no one supported this one)
- Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk()
- Renamed handler::dupp_pos to handler::dup_pos
- Removed not used variable handler::sortkey
Upper level handler changes:
- ha_reset() now does some overall checks and calls ::reset()
- ha_table_flags() added. This is a cached version of table_flags(). The
cache is updated on engine creation time and updated on open.
MySQL level changes (not obvious from the above):
- DBUG_ASSERT() added to check that column usage matches what is set
in the column usage bit maps. (This found a LOT of bugs in current
column marking code).
- In 5.1 before, all used columns was marked in read_set and only updated
columns was marked in write_set. Now we only mark columns for which we
need a value in read_set.
- Column bitmaps are created in open_binary_frm() and open_table_from_share().
(Before this was in table.cc)
- handler::table_flags() calls are replaced with handler::ha_table_flags()
- For calling field->val() you must have the corresponding bit set in
table->read_set. For calling field->store() you must have the
corresponding bit set in table->write_set. (There are asserts in
all store()/val() functions to catch wrong usage)
- thd->set_query_id is renamed to thd->mark_used_columns and instead
of setting this to an integer value, this has now the values:
MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE
Changed also all variables named 'set_query_id' to mark_used_columns.
- In filesort() we now inform the handler of exactly which columns are needed
doing the sort and choosing the rows.
- The TABLE_SHARE object has a 'all_set' column bitmap one can use
when one needs a column bitmap with all columns set.
(This is used for table->use_all_columns() and other places)
- The TABLE object has 3 column bitmaps:
- def_read_set Default bitmap for columns to be read
- def_write_set Default bitmap for columns to be written
- tmp_set Can be used as a temporary bitmap when needed.
The table object has also two pointer to bitmaps read_set and write_set
that the handler should use to find out which columns are used in which way.
- count() optimization now calls handler::records() instead of using
handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true).
- Added extra argument to Item::walk() to indicate if we should also
traverse sub queries.
- Added TABLE parameter to cp_buffer_from_ref()
- Don't close tables created with CREATE ... SELECT but keep them in
the table cache. (Faster usage of newly created tables).
New interfaces:
- table->clear_column_bitmaps() to initialize the bitmaps for tables
at start of new statements.
- table->column_bitmaps_set() to set up new column bitmaps and signal
the handler about this.
- table->column_bitmaps_set_no_signal() for some few cases where we need
to setup new column bitmaps but don't signal the handler (as the handler
has already been signaled about these before). Used for the momement
only in opt_range.cc when doing ROR scans.
- table->use_all_columns() to install a bitmap where all columns are marked
as use in the read and the write set.
- table->default_column_bitmaps() to install the normal read and write
column bitmaps, but not signaling the handler about this.
This is mainly used when creating TABLE instances.
- table->mark_columns_needed_for_delete(),
table->mark_columns_needed_for_delete() and
table->mark_columns_needed_for_insert() to allow us to put additional
columns in column usage maps if handler so requires.
(The handler indicates what it neads in handler->table_flags())
- table->prepare_for_position() to allow us to tell handler that it
needs to read primary key parts to be able to store them in
future table->position() calls.
(This replaces the table->file->ha_retrieve_all_pk function)
- table->mark_auto_increment_column() to tell handler are going to update
columns part of any auto_increment key.
- table->mark_columns_used_by_index() to mark all columns that is part of
an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow
it to quickly know that it only needs to read colums that are part
of the key. (The handler can also use the column map for detecting this,
but simpler/faster handler can just monitor the extra() call).
- table->mark_columns_used_by_index_no_reset() to in addition to other columns,
also mark all columns that is used by the given key.
- table->restore_column_maps_after_mark_index() to restore to default
column maps after a call to table->mark_columns_used_by_index().
- New item function register_field_in_read_map(), for marking used columns
in table->read_map. Used by filesort() to mark all used columns
- Maintain in TABLE->merge_keys set of all keys that are used in query.
(Simplices some optimization loops)
- Maintain Field->part_of_key_not_clustered which is like Field->part_of_key
but the field in the clustered key is not assumed to be part of all index.
(used in opt_range.cc for faster loops)
- dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map()
tmp_use_all_columns() and tmp_restore_column_map() functions to temporally
mark all columns as usable. The 'dbug_' version is primarily intended
inside a handler when it wants to just call Field:store() & Field::val()
functions, but don't need the column maps set for any other usage.
(ie:: bitmap_is_set() is never called)
- We can't use compare_records() to skip updates for handlers that returns
a partial column set and the read_set doesn't cover all columns in the
write set. The reason for this is that if we have a column marked only for
write we can't in the MySQL level know if the value changed or not.
The reason this worked before was that MySQL marked all to be written
columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden
bug'.
- open_table_from_share() does not anymore setup temporary MEM_ROOT
object as a thread specific variable for the handler. Instead we
send the to-be-used MEMROOT to get_new_handler().
(Simpler, faster code)
Bugs fixed:
- Column marking was not done correctly in a lot of cases.
(ALTER TABLE, when using triggers, auto_increment fields etc)
(Could potentially result in wrong values inserted in table handlers
relying on that the old column maps or field->set_query_id was correct)
Especially when it comes to triggers, there may be cases where the
old code would cause lost/wrong values for NDB and/or InnoDB tables.
- Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags:
OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG.
This allowed me to remove some wrong warnings about:
"Some non-transactional changed tables couldn't be rolled back"
- Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset
(thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose
some warnings about
"Some non-transactional changed tables couldn't be rolled back")
- Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table()
which could cause delete_table to report random failures.
- Fixed core dumps for some tests when running with --debug
- Added missing FN_LIBCHAR in mysql_rm_tmp_tables()
(This has probably caused us to not properly remove temporary files after
crash)
- slow_logs was not properly initialized, which could maybe cause
extra/lost entries in slow log.
- If we get an duplicate row on insert, change column map to read and
write all columns while retrying the operation. This is required by
the definition of REPLACE and also ensures that fields that are only
part of UPDATE are properly handled. This fixed a bug in NDB and
REPLACE where REPLACE wrongly copied some column values from the replaced
row.
- For table handler that doesn't support NULL in keys, we would give an error
when creating a primary key with NULL fields, even after the fields has been
automaticly converted to NOT NULL.
- Creating a primary key on a SPATIAL key, would fail if field was not
declared as NOT NULL.
Cleanups:
- Removed not used condition argument to setup_tables
- Removed not needed item function reset_query_id_processor().
- Field->add_index is removed. Now this is instead maintained in
(field->flags & FIELD_IN_ADD_INDEX)
- Field->fieldnr is removed (use field->field_index instead)
- New argument to filesort() to indicate that it should return a set of
row pointers (not used columns). This allowed me to remove some references
to sql_command in filesort and should also enable us to return column
results in some cases where we couldn't before.
- Changed column bitmap handling in opt_range.cc to be aligned with TABLE
bitmap, which allowed me to use bitmap functions instead of looping over
all fields to create some needed bitmaps. (Faster and smaller code)
- Broke up found too long lines
- Moved some variable declaration at start of function for better code
readability.
- Removed some not used arguments from functions.
(setup_fields(), mysql_prepare_insert_check_table())
- setup_fields() now takes an enum instead of an int for marking columns
usage.
- For internal temporary tables, use handler::write_row(),
handler::delete_row() and handler::update_row() instead of
handler::ha_xxxx() for faster execution.
- Changed some constants to enum's and define's.
- Using separate column read and write sets allows for easier checking
of timestamp field was set by statement.
- Remove calls to free_io_cache() as this is now done automaticly in ha_reset()
- Don't build table->normalized_path as this is now identical to table->path
(after bar's fixes to convert filenames)
- Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to
do comparision with the 'convert-dbug-for-diff' tool.
Things left to do in 5.1:
- We wrongly log failed CREATE TABLE ... SELECT in some cases when using
row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result)
Mats has promised to look into this.
- Test that my fix for CREATE TABLE ... SELECT is indeed correct.
(I added several test cases for this, but in this case it's better that
someone else also tests this throughly).
Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
|
|
|
n_fields, FALSE);
|
2006-01-12 19:51:02 +01:00
|
|
|
bitmap_set_all(&b);
|
|
|
|
|
|
|
|
/*
|
|
|
|
row data is already in table->record[0]
|
|
|
|
As we told the NdbEventOperation to do this
|
|
|
|
(saves moving data about many times)
|
|
|
|
*/
|
|
|
|
|
2006-01-25 22:22:50 +01:00
|
|
|
/*
|
|
|
|
for now malloc/free blobs buffer each time
|
|
|
|
TODO if possible share single permanent buffer with handlers
|
|
|
|
*/
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
uchar* blobs_buffer[2] = { 0, 0 };
|
2006-01-25 22:22:50 +01:00
|
|
|
uint blobs_buffer_size[2] = { 0, 0 };
|
|
|
|
|
2006-01-12 19:51:02 +01:00
|
|
|
switch(pOp->getEventType())
|
|
|
|
{
|
|
|
|
case NDBEVENT::TE_INSERT:
|
|
|
|
row.n_inserts++;
|
2006-03-21 16:54:56 +01:00
|
|
|
DBUG_PRINT("info", ("INSERT INTO %s.%s",
|
|
|
|
table_s->db.str, table_s->table_name.str));
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
2006-01-25 22:22:50 +01:00
|
|
|
if (share->flags & NSF_BLOB_FLAG)
|
|
|
|
{
|
|
|
|
my_ptrdiff_t ptrdiff= 0;
|
2009-10-30 19:13:58 +01:00
|
|
|
int ret __attribute__((unused))= get_ndb_blobs_value(table, share->ndb_value[0],
|
2007-02-27 10:27:04 +01:00
|
|
|
blobs_buffer[0],
|
|
|
|
blobs_buffer_size[0],
|
|
|
|
ptrdiff);
|
2006-01-25 22:22:50 +01:00
|
|
|
DBUG_ASSERT(ret == 0);
|
|
|
|
}
|
2006-01-12 19:51:02 +01:00
|
|
|
ndb_unpack_record(table, share->ndb_value[0], &b, table->record[0]);
|
2009-10-30 19:13:58 +01:00
|
|
|
int ret __attribute__((unused))= trans.write_row(originating_server_id,
|
2007-02-27 10:27:04 +01:00
|
|
|
injector::transaction::table(table,
|
|
|
|
TRUE),
|
|
|
|
&b, n_fields, table->record[0]);
|
2006-03-11 15:52:38 +01:00
|
|
|
DBUG_ASSERT(ret == 0);
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NDBEVENT::TE_DELETE:
|
|
|
|
row.n_deletes++;
|
2006-03-21 16:54:56 +01:00
|
|
|
DBUG_PRINT("info",("DELETE FROM %s.%s",
|
|
|
|
table_s->db.str, table_s->table_name.str));
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
table->record[0] contains only the primary key in this case
|
|
|
|
since we do not have an after image
|
|
|
|
*/
|
|
|
|
int n;
|
|
|
|
if (table->s->primary_key != MAX_KEY)
|
|
|
|
n= 0; /*
|
|
|
|
use the primary key only as it save time and space and
|
|
|
|
it is the only thing needed to log the delete
|
2007-02-27 10:27:04 +01:00
|
|
|
*/
|
2006-01-12 19:51:02 +01:00
|
|
|
else
|
|
|
|
n= 1; /*
|
|
|
|
we use the before values since we don't have a primary key
|
|
|
|
since the mysql server does not handle the hidden primary
|
|
|
|
key
|
2007-02-27 10:27:04 +01:00
|
|
|
*/
|
2006-01-12 19:51:02 +01:00
|
|
|
|
2006-01-25 22:22:50 +01:00
|
|
|
if (share->flags & NSF_BLOB_FLAG)
|
|
|
|
{
|
|
|
|
my_ptrdiff_t ptrdiff= table->record[n] - table->record[0];
|
2009-10-30 19:13:58 +01:00
|
|
|
int ret __attribute__((unused))= get_ndb_blobs_value(table, share->ndb_value[n],
|
2007-02-27 10:27:04 +01:00
|
|
|
blobs_buffer[n],
|
|
|
|
blobs_buffer_size[n],
|
|
|
|
ptrdiff);
|
2006-01-25 22:22:50 +01:00
|
|
|
DBUG_ASSERT(ret == 0);
|
|
|
|
}
|
2006-01-12 19:51:02 +01:00
|
|
|
ndb_unpack_record(table, share->ndb_value[n], &b, table->record[n]);
|
2006-02-14 22:36:11 +01:00
|
|
|
DBUG_EXECUTE("info", print_records(table, table->record[n]););
|
2009-10-30 19:13:58 +01:00
|
|
|
int ret __attribute__((unused))= trans.delete_row(originating_server_id,
|
2007-02-27 10:27:04 +01:00
|
|
|
injector::transaction::table(table,
|
|
|
|
TRUE),
|
|
|
|
&b, n_fields, table->record[n]);
|
2006-03-11 15:52:38 +01:00
|
|
|
DBUG_ASSERT(ret == 0);
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NDBEVENT::TE_UPDATE:
|
|
|
|
row.n_updates++;
|
2006-03-21 16:54:56 +01:00
|
|
|
DBUG_PRINT("info", ("UPDATE %s.%s",
|
|
|
|
table_s->db.str, table_s->table_name.str));
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
2006-01-25 22:22:50 +01:00
|
|
|
if (share->flags & NSF_BLOB_FLAG)
|
|
|
|
{
|
|
|
|
my_ptrdiff_t ptrdiff= 0;
|
2009-10-30 19:13:58 +01:00
|
|
|
int ret __attribute__((unused))= get_ndb_blobs_value(table, share->ndb_value[0],
|
2007-02-27 10:27:04 +01:00
|
|
|
blobs_buffer[0],
|
|
|
|
blobs_buffer_size[0],
|
|
|
|
ptrdiff);
|
2006-01-25 22:22:50 +01:00
|
|
|
DBUG_ASSERT(ret == 0);
|
|
|
|
}
|
2006-01-12 19:51:02 +01:00
|
|
|
ndb_unpack_record(table, share->ndb_value[0],
|
|
|
|
&b, table->record[0]);
|
2006-02-14 22:36:11 +01:00
|
|
|
DBUG_EXECUTE("info", print_records(table, table->record[0]););
|
2006-01-12 19:51:02 +01:00
|
|
|
if (table->s->primary_key != MAX_KEY)
|
|
|
|
{
|
|
|
|
/*
|
2006-01-25 22:22:50 +01:00
|
|
|
since table has a primary key, we can do a write
|
2006-01-12 19:51:02 +01:00
|
|
|
using only after values
|
2007-02-27 10:27:04 +01:00
|
|
|
*/
|
2007-04-03 12:57:18 +02:00
|
|
|
trans.write_row(originating_server_id,
|
|
|
|
injector::transaction::table(table, TRUE),
|
2006-01-12 19:51:02 +01:00
|
|
|
&b, n_fields, table->record[0]);// after values
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
mysql server cannot handle the ndb hidden key and
|
|
|
|
therefore needs the before image as well
|
2007-02-27 10:27:04 +01:00
|
|
|
*/
|
2006-01-25 22:22:50 +01:00
|
|
|
if (share->flags & NSF_BLOB_FLAG)
|
|
|
|
{
|
|
|
|
my_ptrdiff_t ptrdiff= table->record[1] - table->record[0];
|
2009-10-30 19:13:58 +01:00
|
|
|
int ret __attribute__((unused))= get_ndb_blobs_value(table, share->ndb_value[1],
|
2007-02-27 10:27:04 +01:00
|
|
|
blobs_buffer[1],
|
|
|
|
blobs_buffer_size[1],
|
|
|
|
ptrdiff);
|
2006-01-25 22:22:50 +01:00
|
|
|
DBUG_ASSERT(ret == 0);
|
|
|
|
}
|
2006-01-12 19:51:02 +01:00
|
|
|
ndb_unpack_record(table, share->ndb_value[1], &b, table->record[1]);
|
2006-02-14 22:36:11 +01:00
|
|
|
DBUG_EXECUTE("info", print_records(table, table->record[1]););
|
2009-10-30 19:13:58 +01:00
|
|
|
int ret __attribute__((unused))= trans.update_row(originating_server_id,
|
2007-02-27 10:27:04 +01:00
|
|
|
injector::transaction::table(table,
|
|
|
|
TRUE),
|
|
|
|
&b, n_fields,
|
|
|
|
table->record[1], // before values
|
|
|
|
table->record[0]);// after values
|
2006-03-11 15:52:38 +01:00
|
|
|
DBUG_ASSERT(ret == 0);
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* We should REALLY never get here. */
|
|
|
|
DBUG_PRINT("info", ("default - uh oh, a brain exploded."));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2006-01-25 22:22:50 +01:00
|
|
|
if (share->flags & NSF_BLOB_FLAG)
|
|
|
|
{
|
Bug#34043: Server loops excessively in _checkchunk() when safemalloc is enabled
Essentially, the problem is that safemalloc is excruciatingly
slow as it checks all allocated blocks for overrun at each
memory management primitive, yielding a almost exponential
slowdown for the memory management functions (malloc, realloc,
free). The overrun check basically consists of verifying some
bytes of a block for certain magic keys, which catches some
simple forms of overrun. Another minor problem is violation
of aliasing rules and that its own internal list of blocks
is prone to corruption.
Another issue with safemalloc is rather the maintenance cost
as the tool has a significant impact on the server code.
Given the magnitude of memory debuggers available nowadays,
especially those that are provided with the platform malloc
implementation, maintenance of a in-house and largely obsolete
memory debugger becomes a burden that is not worth the effort
due to its slowness and lack of support for detecting more
common forms of heap corruption.
Since there are third-party tools that can provide the same
functionality at a lower or comparable performance cost, the
solution is to simply remove safemalloc. Third-party tools
can provide the same functionality at a lower or comparable
performance cost.
The removal of safemalloc also allows a simplification of the
malloc wrappers, removing quite a bit of kludge: redefinition
of my_malloc, my_free and the removal of the unused second
argument of my_free. Since free() always check whether the
supplied pointer is null, redudant checks are also removed.
Also, this patch adds unit testing for my_malloc and moves
my_realloc implementation into the same file as the other
memory allocation primitives.
2010-07-08 23:20:08 +02:00
|
|
|
my_free(blobs_buffer[0]);
|
|
|
|
my_free(blobs_buffer[1]);
|
2006-01-25 22:22:50 +01:00
|
|
|
}
|
|
|
|
|
2006-01-12 19:51:02 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
//#define RUN_NDB_BINLOG_TIMER
|
|
|
|
#ifdef RUN_NDB_BINLOG_TIMER
|
|
|
|
class Timer
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
Timer() { start(); }
|
|
|
|
void start() { gettimeofday(&m_start, 0); }
|
|
|
|
void stop() { gettimeofday(&m_stop, 0); }
|
|
|
|
ulong elapsed_ms()
|
|
|
|
{
|
|
|
|
return (ulong)
|
|
|
|
(((longlong) m_stop.tv_sec - (longlong) m_start.tv_sec) * 1000 +
|
|
|
|
((longlong) m_stop.tv_usec -
|
|
|
|
(longlong) m_start.tv_usec + 999) / 1000);
|
|
|
|
}
|
|
|
|
private:
|
|
|
|
struct timeval m_start,m_stop;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/****************************************************************
|
|
|
|
Injector thread main loop
|
|
|
|
****************************************************************/
|
|
|
|
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
static uchar *
|
|
|
|
ndb_schema_objects_get_key(NDB_SCHEMA_OBJECT *schema_object,
|
|
|
|
size_t *length,
|
|
|
|
my_bool not_used __attribute__((unused)))
|
2006-04-03 19:11:20 +02:00
|
|
|
{
|
|
|
|
*length= schema_object->key_length;
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
return (uchar*) schema_object->key;
|
2006-04-03 19:11:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static NDB_SCHEMA_OBJECT *ndb_get_schema_object(const char *key,
|
|
|
|
my_bool create_if_not_exists,
|
|
|
|
my_bool have_lock)
|
|
|
|
{
|
|
|
|
NDB_SCHEMA_OBJECT *ndb_schema_object;
|
|
|
|
uint length= (uint) strlen(key);
|
|
|
|
DBUG_ENTER("ndb_get_schema_object");
|
|
|
|
DBUG_PRINT("enter", ("key: '%s'", key));
|
|
|
|
|
|
|
|
if (!have_lock)
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_lock(&ndbcluster_mutex);
|
2006-04-03 19:11:20 +02:00
|
|
|
while (!(ndb_schema_object=
|
2009-10-14 18:37:38 +02:00
|
|
|
(NDB_SCHEMA_OBJECT*) my_hash_search(&ndb_schema_objects,
|
|
|
|
(uchar*) key,
|
|
|
|
length)))
|
2006-04-03 19:11:20 +02:00
|
|
|
{
|
|
|
|
if (!create_if_not_exists)
|
|
|
|
{
|
|
|
|
DBUG_PRINT("info", ("does not exist"));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!(ndb_schema_object=
|
|
|
|
(NDB_SCHEMA_OBJECT*) my_malloc(sizeof(*ndb_schema_object) + length + 1,
|
|
|
|
MYF(MY_WME | MY_ZEROFILL))))
|
|
|
|
{
|
|
|
|
DBUG_PRINT("info", ("malloc error"));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
ndb_schema_object->key= (char *)(ndb_schema_object+1);
|
|
|
|
memcpy(ndb_schema_object->key, key, length + 1);
|
|
|
|
ndb_schema_object->key_length= length;
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
if (my_hash_insert(&ndb_schema_objects, (uchar*) ndb_schema_object))
|
2006-04-03 19:11:20 +02:00
|
|
|
{
|
Bug#34043: Server loops excessively in _checkchunk() when safemalloc is enabled
Essentially, the problem is that safemalloc is excruciatingly
slow as it checks all allocated blocks for overrun at each
memory management primitive, yielding a almost exponential
slowdown for the memory management functions (malloc, realloc,
free). The overrun check basically consists of verifying some
bytes of a block for certain magic keys, which catches some
simple forms of overrun. Another minor problem is violation
of aliasing rules and that its own internal list of blocks
is prone to corruption.
Another issue with safemalloc is rather the maintenance cost
as the tool has a significant impact on the server code.
Given the magnitude of memory debuggers available nowadays,
especially those that are provided with the platform malloc
implementation, maintenance of a in-house and largely obsolete
memory debugger becomes a burden that is not worth the effort
due to its slowness and lack of support for detecting more
common forms of heap corruption.
Since there are third-party tools that can provide the same
functionality at a lower or comparable performance cost, the
solution is to simply remove safemalloc. Third-party tools
can provide the same functionality at a lower or comparable
performance cost.
The removal of safemalloc also allows a simplification of the
malloc wrappers, removing quite a bit of kludge: redefinition
of my_malloc, my_free and the removal of the unused second
argument of my_free. Since free() always check whether the
supplied pointer is null, redudant checks are also removed.
Also, this patch adds unit testing for my_malloc and moves
my_realloc implementation into the same file as the other
memory allocation primitives.
2010-07-08 23:20:08 +02:00
|
|
|
my_free(ndb_schema_object);
|
2006-04-03 19:11:20 +02:00
|
|
|
break;
|
|
|
|
}
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_init(key_ndb_schema_object_mutex, &ndb_schema_object->mutex, MY_MUTEX_INIT_FAST);
|
2006-04-03 19:11:20 +02:00
|
|
|
bitmap_init(&ndb_schema_object->slock_bitmap, ndb_schema_object->slock,
|
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes
Changes that requires code changes in other code of other storage engines.
(Note that all changes are very straightforward and one should find all issues
by compiling a --debug build and fixing all compiler errors and all
asserts in field.cc while running the test suite),
- New optional handler function introduced: reset()
This is called after every DML statement to make it easy for a handler to
statement specific cleanups.
(The only case it's not called is if force the file to be closed)
- handler::extra(HA_EXTRA_RESET) is removed. Code that was there before
should be moved to handler::reset()
- table->read_set contains a bitmap over all columns that are needed
in the query. read_row() and similar functions only needs to read these
columns
- table->write_set contains a bitmap over all columns that will be updated
in the query. write_row() and update_row() only needs to update these
columns.
The above bitmaps should now be up to date in all context
(including ALTER TABLE, filesort()).
The handler is informed of any changes to the bitmap after
fix_fields() by calling the virtual function
handler::column_bitmaps_signal(). If the handler does caching of
these bitmaps (instead of using table->read_set, table->write_set),
it should redo the caching in this code. as the signal() may be sent
several times, it's probably best to set a variable in the signal
and redo the caching on read_row() / write_row() if the variable was
set.
- Removed the read_set and write_set bitmap objects from the handler class
- Removed all column bit handling functions from the handler class.
(Now one instead uses the normal bitmap functions in my_bitmap.c instead
of handler dedicated bitmap functions)
- field->query_id is removed. One should instead instead check
table->read_set and table->write_set if a field is used in the query.
- handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and
handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now
instead use table->read_set to check for which columns to retrieve.
- If a handler needs to call Field->val() or Field->store() on columns
that are not used in the query, one should install a temporary
all-columns-used map while doing so. For this, we provide the following
functions:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
field->val();
dbug_tmp_restore_column_map(table->read_set, old_map);
and similar for the write map:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
field->val();
dbug_tmp_restore_column_map(table->write_set, old_map);
If this is not done, you will sooner or later hit a DBUG_ASSERT
in the field store() / val() functions.
(For not DBUG binaries, the dbug_tmp_restore_column_map() and
dbug_tmp_restore_column_map() are inline dummy functions and should
be optimized away be the compiler).
- If one needs to temporary set the column map for all binaries (and not
just to avoid the DBUG_ASSERT() in the Field::store() / Field::val()
methods) one should use the functions tmp_use_all_columns() and
tmp_restore_column_map() instead of the above dbug_ variants.
- All 'status' fields in the handler base class (like records,
data_file_length etc) are now stored in a 'stats' struct. This makes
it easier to know what status variables are provided by the base
handler. This requires some trivial variable names in the extra()
function.
- New virtual function handler::records(). This is called to optimize
COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true.
(stats.records is not supposed to be an exact value. It's only has to
be 'reasonable enough' for the optimizer to be able to choose a good
optimization path).
- Non virtual handler::init() function added for caching of virtual
constants from engine.
- Removed has_transactions() virtual method. Now one should instead return
HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support
transactions.
- The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument
that is to be used with 'new handler_name()' to allocate the handler
in the right area. The xxxx_create_handler() function is also
responsible for any initialization of the object before returning.
For example, one should change:
static handler *myisam_create_handler(TABLE_SHARE *table)
{
return new ha_myisam(table);
}
->
static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root)
{
return new (mem_root) ha_myisam(table);
}
- New optional virtual function: use_hidden_primary_key().
This is called in case of an update/delete when
(table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined
but we don't have a primary key. This allows the handler to take precisions
in remembering any hidden primary key to able to update/delete any
found row. The default handler marks all columns to be read.
- handler::table_flags() now returns a ulonglong (to allow for more flags).
- New/changed table_flags()
- HA_HAS_RECORDS Set if ::records() is supported
- HA_NO_TRANSACTIONS Set if engine doesn't support transactions
- HA_PRIMARY_KEY_REQUIRED_FOR_DELETE
Set if we should mark all primary key columns for
read when reading rows as part of a DELETE
statement. If there is no primary key,
all columns are marked for read.
- HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some
cases (based on table->read_set)
- HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS
Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION.
- HA_DUPP_POS Renamed to HA_DUPLICATE_POS
- HA_REQUIRES_KEY_COLUMNS_FOR_DELETE
Set this if we should mark ALL key columns for
read when when reading rows as part of a DELETE
statement. In case of an update we will mark
all keys for read for which key part changed
value.
- HA_STATS_RECORDS_IS_EXACT
Set this if stats.records is exact.
(This saves us some extra records() calls
when optimizing COUNT(*))
- Removed table_flags()
- HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if
handler::records() gives an exact count() and
HA_STATS_RECORDS_IS_EXACT if stats.records is exact.
- HA_READ_RND_SAME Removed (no one supported this one)
- Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk()
- Renamed handler::dupp_pos to handler::dup_pos
- Removed not used variable handler::sortkey
Upper level handler changes:
- ha_reset() now does some overall checks and calls ::reset()
- ha_table_flags() added. This is a cached version of table_flags(). The
cache is updated on engine creation time and updated on open.
MySQL level changes (not obvious from the above):
- DBUG_ASSERT() added to check that column usage matches what is set
in the column usage bit maps. (This found a LOT of bugs in current
column marking code).
- In 5.1 before, all used columns was marked in read_set and only updated
columns was marked in write_set. Now we only mark columns for which we
need a value in read_set.
- Column bitmaps are created in open_binary_frm() and open_table_from_share().
(Before this was in table.cc)
- handler::table_flags() calls are replaced with handler::ha_table_flags()
- For calling field->val() you must have the corresponding bit set in
table->read_set. For calling field->store() you must have the
corresponding bit set in table->write_set. (There are asserts in
all store()/val() functions to catch wrong usage)
- thd->set_query_id is renamed to thd->mark_used_columns and instead
of setting this to an integer value, this has now the values:
MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE
Changed also all variables named 'set_query_id' to mark_used_columns.
- In filesort() we now inform the handler of exactly which columns are needed
doing the sort and choosing the rows.
- The TABLE_SHARE object has a 'all_set' column bitmap one can use
when one needs a column bitmap with all columns set.
(This is used for table->use_all_columns() and other places)
- The TABLE object has 3 column bitmaps:
- def_read_set Default bitmap for columns to be read
- def_write_set Default bitmap for columns to be written
- tmp_set Can be used as a temporary bitmap when needed.
The table object has also two pointer to bitmaps read_set and write_set
that the handler should use to find out which columns are used in which way.
- count() optimization now calls handler::records() instead of using
handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true).
- Added extra argument to Item::walk() to indicate if we should also
traverse sub queries.
- Added TABLE parameter to cp_buffer_from_ref()
- Don't close tables created with CREATE ... SELECT but keep them in
the table cache. (Faster usage of newly created tables).
New interfaces:
- table->clear_column_bitmaps() to initialize the bitmaps for tables
at start of new statements.
- table->column_bitmaps_set() to set up new column bitmaps and signal
the handler about this.
- table->column_bitmaps_set_no_signal() for some few cases where we need
to setup new column bitmaps but don't signal the handler (as the handler
has already been signaled about these before). Used for the momement
only in opt_range.cc when doing ROR scans.
- table->use_all_columns() to install a bitmap where all columns are marked
as use in the read and the write set.
- table->default_column_bitmaps() to install the normal read and write
column bitmaps, but not signaling the handler about this.
This is mainly used when creating TABLE instances.
- table->mark_columns_needed_for_delete(),
table->mark_columns_needed_for_delete() and
table->mark_columns_needed_for_insert() to allow us to put additional
columns in column usage maps if handler so requires.
(The handler indicates what it neads in handler->table_flags())
- table->prepare_for_position() to allow us to tell handler that it
needs to read primary key parts to be able to store them in
future table->position() calls.
(This replaces the table->file->ha_retrieve_all_pk function)
- table->mark_auto_increment_column() to tell handler are going to update
columns part of any auto_increment key.
- table->mark_columns_used_by_index() to mark all columns that is part of
an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow
it to quickly know that it only needs to read colums that are part
of the key. (The handler can also use the column map for detecting this,
but simpler/faster handler can just monitor the extra() call).
- table->mark_columns_used_by_index_no_reset() to in addition to other columns,
also mark all columns that is used by the given key.
- table->restore_column_maps_after_mark_index() to restore to default
column maps after a call to table->mark_columns_used_by_index().
- New item function register_field_in_read_map(), for marking used columns
in table->read_map. Used by filesort() to mark all used columns
- Maintain in TABLE->merge_keys set of all keys that are used in query.
(Simplices some optimization loops)
- Maintain Field->part_of_key_not_clustered which is like Field->part_of_key
but the field in the clustered key is not assumed to be part of all index.
(used in opt_range.cc for faster loops)
- dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map()
tmp_use_all_columns() and tmp_restore_column_map() functions to temporally
mark all columns as usable. The 'dbug_' version is primarily intended
inside a handler when it wants to just call Field:store() & Field::val()
functions, but don't need the column maps set for any other usage.
(ie:: bitmap_is_set() is never called)
- We can't use compare_records() to skip updates for handlers that returns
a partial column set and the read_set doesn't cover all columns in the
write set. The reason for this is that if we have a column marked only for
write we can't in the MySQL level know if the value changed or not.
The reason this worked before was that MySQL marked all to be written
columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden
bug'.
- open_table_from_share() does not anymore setup temporary MEM_ROOT
object as a thread specific variable for the handler. Instead we
send the to-be-used MEMROOT to get_new_handler().
(Simpler, faster code)
Bugs fixed:
- Column marking was not done correctly in a lot of cases.
(ALTER TABLE, when using triggers, auto_increment fields etc)
(Could potentially result in wrong values inserted in table handlers
relying on that the old column maps or field->set_query_id was correct)
Especially when it comes to triggers, there may be cases where the
old code would cause lost/wrong values for NDB and/or InnoDB tables.
- Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags:
OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG.
This allowed me to remove some wrong warnings about:
"Some non-transactional changed tables couldn't be rolled back"
- Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset
(thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose
some warnings about
"Some non-transactional changed tables couldn't be rolled back")
- Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table()
which could cause delete_table to report random failures.
- Fixed core dumps for some tests when running with --debug
- Added missing FN_LIBCHAR in mysql_rm_tmp_tables()
(This has probably caused us to not properly remove temporary files after
crash)
- slow_logs was not properly initialized, which could maybe cause
extra/lost entries in slow log.
- If we get an duplicate row on insert, change column map to read and
write all columns while retrying the operation. This is required by
the definition of REPLACE and also ensures that fields that are only
part of UPDATE are properly handled. This fixed a bug in NDB and
REPLACE where REPLACE wrongly copied some column values from the replaced
row.
- For table handler that doesn't support NULL in keys, we would give an error
when creating a primary key with NULL fields, even after the fields has been
automaticly converted to NOT NULL.
- Creating a primary key on a SPATIAL key, would fail if field was not
declared as NOT NULL.
Cleanups:
- Removed not used condition argument to setup_tables
- Removed not needed item function reset_query_id_processor().
- Field->add_index is removed. Now this is instead maintained in
(field->flags & FIELD_IN_ADD_INDEX)
- Field->fieldnr is removed (use field->field_index instead)
- New argument to filesort() to indicate that it should return a set of
row pointers (not used columns). This allowed me to remove some references
to sql_command in filesort and should also enable us to return column
results in some cases where we couldn't before.
- Changed column bitmap handling in opt_range.cc to be aligned with TABLE
bitmap, which allowed me to use bitmap functions instead of looping over
all fields to create some needed bitmaps. (Faster and smaller code)
- Broke up found too long lines
- Moved some variable declaration at start of function for better code
readability.
- Removed some not used arguments from functions.
(setup_fields(), mysql_prepare_insert_check_table())
- setup_fields() now takes an enum instead of an int for marking columns
usage.
- For internal temporary tables, use handler::write_row(),
handler::delete_row() and handler::update_row() instead of
handler::ha_xxxx() for faster execution.
- Changed some constants to enum's and define's.
- Using separate column read and write sets allows for easier checking
of timestamp field was set by statement.
- Remove calls to free_io_cache() as this is now done automaticly in ha_reset()
- Don't build table->normalized_path as this is now identical to table->path
(after bar's fixes to convert filenames)
- Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to
do comparision with the 'convert-dbug-for-diff' tool.
Things left to do in 5.1:
- We wrongly log failed CREATE TABLE ... SELECT in some cases when using
row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result)
Mats has promised to look into this.
- Test that my fix for CREATE TABLE ... SELECT is indeed correct.
(I added several test cases for this, but in this case it's better that
someone else also tests this throughly).
Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
|
|
|
sizeof(ndb_schema_object->slock)*8, FALSE);
|
2006-04-03 19:11:20 +02:00
|
|
|
bitmap_clear_all(&ndb_schema_object->slock_bitmap);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (ndb_schema_object)
|
|
|
|
{
|
|
|
|
ndb_schema_object->use_count++;
|
|
|
|
DBUG_PRINT("info", ("use_count: %d", ndb_schema_object->use_count));
|
|
|
|
}
|
|
|
|
if (!have_lock)
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_unlock(&ndbcluster_mutex);
|
2006-04-03 19:11:20 +02:00
|
|
|
DBUG_RETURN(ndb_schema_object);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void ndb_free_schema_object(NDB_SCHEMA_OBJECT **ndb_schema_object,
|
|
|
|
bool have_lock)
|
|
|
|
{
|
|
|
|
DBUG_ENTER("ndb_free_schema_object");
|
|
|
|
DBUG_PRINT("enter", ("key: '%s'", (*ndb_schema_object)->key));
|
|
|
|
if (!have_lock)
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_lock(&ndbcluster_mutex);
|
2006-04-03 19:11:20 +02:00
|
|
|
if (!--(*ndb_schema_object)->use_count)
|
|
|
|
{
|
|
|
|
DBUG_PRINT("info", ("use_count: %d", (*ndb_schema_object)->use_count));
|
2009-10-14 18:37:38 +02:00
|
|
|
my_hash_delete(&ndb_schema_objects, (uchar*) *ndb_schema_object);
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_destroy(&(*ndb_schema_object)->mutex);
|
Bug#34043: Server loops excessively in _checkchunk() when safemalloc is enabled
Essentially, the problem is that safemalloc is excruciatingly
slow as it checks all allocated blocks for overrun at each
memory management primitive, yielding a almost exponential
slowdown for the memory management functions (malloc, realloc,
free). The overrun check basically consists of verifying some
bytes of a block for certain magic keys, which catches some
simple forms of overrun. Another minor problem is violation
of aliasing rules and that its own internal list of blocks
is prone to corruption.
Another issue with safemalloc is rather the maintenance cost
as the tool has a significant impact on the server code.
Given the magnitude of memory debuggers available nowadays,
especially those that are provided with the platform malloc
implementation, maintenance of a in-house and largely obsolete
memory debugger becomes a burden that is not worth the effort
due to its slowness and lack of support for detecting more
common forms of heap corruption.
Since there are third-party tools that can provide the same
functionality at a lower or comparable performance cost, the
solution is to simply remove safemalloc. Third-party tools
can provide the same functionality at a lower or comparable
performance cost.
The removal of safemalloc also allows a simplification of the
malloc wrappers, removing quite a bit of kludge: redefinition
of my_malloc, my_free and the removal of the unused second
argument of my_free. Since free() always check whether the
supplied pointer is null, redudant checks are also removed.
Also, this patch adds unit testing for my_malloc and moves
my_realloc implementation into the same file as the other
memory allocation primitives.
2010-07-08 23:20:08 +02:00
|
|
|
my_free(*ndb_schema_object);
|
2006-04-03 19:11:20 +02:00
|
|
|
*ndb_schema_object= 0;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
DBUG_PRINT("info", ("use_count: %d", (*ndb_schema_object)->use_count));
|
|
|
|
}
|
|
|
|
if (!have_lock)
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_unlock(&ndbcluster_mutex);
|
2006-04-03 19:11:20 +02:00
|
|
|
DBUG_VOID_RETURN;
|
|
|
|
}
|
|
|
|
|
2009-12-22 10:35:56 +01:00
|
|
|
extern ulong opt_ndb_report_thresh_binlog_epoch_slip;
|
|
|
|
extern ulong opt_ndb_report_thresh_binlog_mem_usage;
|
2006-08-30 11:41:21 +02:00
|
|
|
|
2006-01-12 19:51:02 +01:00
|
|
|
pthread_handler_t ndb_binlog_thread_func(void *arg)
|
|
|
|
{
|
|
|
|
THD *thd; /* needs to be first for thread_stack */
|
2006-04-12 18:01:19 +02:00
|
|
|
Ndb *i_ndb= 0;
|
|
|
|
Ndb *s_ndb= 0;
|
2006-01-12 19:51:02 +01:00
|
|
|
Thd_ndb *thd_ndb=0;
|
2006-12-01 15:49:07 +01:00
|
|
|
int ndb_update_ndb_binlog_index= 1;
|
2006-01-12 19:51:02 +01:00
|
|
|
injector *inj= injector::instance();
|
2007-11-01 15:08:00 +01:00
|
|
|
uint incident_id= 0;
|
2006-08-30 11:41:21 +02:00
|
|
|
|
2006-04-21 18:28:00 +02:00
|
|
|
#ifdef RUN_NDB_BINLOG_TIMER
|
|
|
|
Timer main_timer;
|
|
|
|
#endif
|
2006-01-12 19:51:02 +01:00
|
|
|
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_lock(&injector_mutex);
|
2006-01-12 19:51:02 +01:00
|
|
|
/*
|
|
|
|
Set up the Thread
|
|
|
|
*/
|
|
|
|
my_thread_init();
|
|
|
|
DBUG_ENTER("ndb_binlog_thread");
|
|
|
|
|
|
|
|
thd= new THD; /* note that contructor of THD uses DBUG_ */
|
|
|
|
THD_CHECK_SENTRY(thd);
|
2009-09-30 18:00:22 +02:00
|
|
|
thd->set_current_stmt_binlog_format_row();
|
2006-01-12 19:51:02 +01:00
|
|
|
|
2006-06-22 16:42:50 +02:00
|
|
|
/* We need to set thd->thread_id before thd->store_globals, or it will
|
|
|
|
set an invalid value for thd->variables.pseudo_thread_id.
|
|
|
|
*/
|
2010-01-12 02:47:27 +01:00
|
|
|
mysql_mutex_lock(&LOCK_thread_count);
|
2006-06-22 16:42:50 +02:00
|
|
|
thd->thread_id= thread_id++;
|
2010-01-12 02:47:27 +01:00
|
|
|
mysql_mutex_unlock(&LOCK_thread_count);
|
2006-06-22 16:42:50 +02:00
|
|
|
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_thread_set_psi_id(thd->thread_id);
|
|
|
|
|
2006-01-12 19:51:02 +01:00
|
|
|
thd->thread_stack= (char*) &thd; /* remember where our stack is */
|
|
|
|
if (thd->store_globals())
|
|
|
|
{
|
|
|
|
thd->cleanup();
|
|
|
|
delete thd;
|
|
|
|
ndb_binlog_thread_running= -1;
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_unlock(&injector_mutex);
|
|
|
|
mysql_cond_signal(&injector_cond);
|
2009-09-23 15:10:23 +02:00
|
|
|
|
|
|
|
DBUG_LEAVE; // Must match DBUG_ENTER()
|
2006-01-12 19:51:02 +01:00
|
|
|
my_thread_end();
|
|
|
|
pthread_exit(0);
|
2009-09-23 15:10:23 +02:00
|
|
|
return NULL; // Avoid compiler warnings
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
thd->init_for_queries();
|
|
|
|
thd->command= COM_DAEMON;
|
|
|
|
thd->system_thread= SYSTEM_THREAD_NDBCLUSTER_BINLOG;
|
|
|
|
thd->main_security_ctx.host_or_ip= "";
|
|
|
|
thd->client_capabilities= 0;
|
|
|
|
my_net_init(&thd->net, 0);
|
|
|
|
thd->main_security_ctx.master_access= ~0;
|
2010-08-09 10:32:50 +02:00
|
|
|
thd->main_security_ctx.priv_user[0]= 0;
|
2010-02-24 18:04:00 +01:00
|
|
|
/* Do not use user-supplied timeout value for system threads. */
|
|
|
|
thd->variables.lock_wait_timeout= LONG_TIMEOUT;
|
2006-01-12 19:51:02 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
Set up ndb binlog
|
|
|
|
*/
|
|
|
|
sql_print_information("Starting MySQL Cluster Binlog Thread");
|
|
|
|
|
|
|
|
pthread_detach_this_thread();
|
|
|
|
thd->real_id= pthread_self();
|
2010-01-12 02:47:27 +01:00
|
|
|
mysql_mutex_lock(&LOCK_thread_count);
|
2006-01-12 19:51:02 +01:00
|
|
|
threads.append(thd);
|
2010-01-12 02:47:27 +01:00
|
|
|
mysql_mutex_unlock(&LOCK_thread_count);
|
2006-01-12 19:51:02 +01:00
|
|
|
thd->lex->start_transaction_opt= 0;
|
|
|
|
|
2006-04-12 18:01:19 +02:00
|
|
|
if (!(s_ndb= new Ndb(g_ndb_cluster_connection, "")) ||
|
|
|
|
s_ndb->init())
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
|
|
|
sql_print_error("NDB Binlog: Getting Schema Ndb object failed");
|
2007-02-06 22:06:13 +01:00
|
|
|
ndb_binlog_thread_running= -1;
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_unlock(&injector_mutex);
|
|
|
|
mysql_cond_signal(&injector_cond);
|
2006-01-12 19:51:02 +01:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2006-02-22 15:19:22 +01:00
|
|
|
// empty database
|
2006-04-12 18:01:19 +02:00
|
|
|
if (!(i_ndb= new Ndb(g_ndb_cluster_connection, "")) ||
|
|
|
|
i_ndb->init())
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
|
|
|
sql_print_error("NDB Binlog: Getting Ndb object failed");
|
|
|
|
ndb_binlog_thread_running= -1;
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_unlock(&injector_mutex);
|
|
|
|
mysql_cond_signal(&injector_cond);
|
2006-01-12 19:51:02 +01:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2006-04-03 19:11:20 +02:00
|
|
|
/* init hash for schema object distribution */
|
2009-10-14 18:37:38 +02:00
|
|
|
(void) my_hash_init(&ndb_schema_objects, system_charset_info, 32, 0, 0,
|
|
|
|
(my_hash_get_key)ndb_schema_objects_get_key, 0, 0);
|
2006-04-03 19:11:20 +02:00
|
|
|
|
2006-01-12 19:51:02 +01:00
|
|
|
/*
|
|
|
|
Expose global reference to our ndb object.
|
|
|
|
|
|
|
|
Used by both sql client thread and binlog thread to interact
|
|
|
|
with the storage
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_lock(&injector_mutex);
|
2006-01-12 19:51:02 +01:00
|
|
|
*/
|
|
|
|
injector_thd= thd;
|
2006-04-12 18:01:19 +02:00
|
|
|
injector_ndb= i_ndb;
|
2006-08-30 11:41:21 +02:00
|
|
|
p_latest_trans_gci=
|
|
|
|
injector_ndb->get_ndb_cluster_connection().get_latest_trans_gci();
|
2006-04-12 18:01:19 +02:00
|
|
|
schema_ndb= s_ndb;
|
2007-02-06 22:06:13 +01:00
|
|
|
|
2006-02-01 01:12:11 +01:00
|
|
|
if (opt_bin_log)
|
|
|
|
{
|
2007-06-27 22:28:18 +02:00
|
|
|
ndb_binlog_running= TRUE;
|
2006-02-01 01:12:11 +01:00
|
|
|
}
|
2007-02-06 22:06:13 +01:00
|
|
|
|
|
|
|
/* Thread start up completed */
|
|
|
|
ndb_binlog_thread_running= 1;
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_unlock(&injector_mutex);
|
|
|
|
mysql_cond_signal(&injector_cond);
|
2006-01-12 19:51:02 +01:00
|
|
|
|
2007-11-01 15:08:00 +01:00
|
|
|
/*
|
|
|
|
wait for mysql server to start (so that the binlog is started
|
|
|
|
and thus can receive the first GAP event)
|
|
|
|
*/
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_lock(&LOCK_server_started);
|
2007-11-01 15:08:00 +01:00
|
|
|
while (!mysqld_server_started)
|
|
|
|
{
|
|
|
|
struct timespec abstime;
|
|
|
|
set_timespec(abstime, 1);
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_cond_timedwait(&COND_server_started, &LOCK_server_started,
|
|
|
|
&abstime);
|
2007-11-01 15:08:00 +01:00
|
|
|
if (ndbcluster_terminating)
|
|
|
|
{
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_unlock(&LOCK_server_started);
|
2007-11-01 15:08:00 +01:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
}
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_unlock(&LOCK_server_started);
|
2006-05-19 18:11:47 +02:00
|
|
|
restart:
|
2006-01-12 19:51:02 +01:00
|
|
|
/*
|
|
|
|
Main NDB Injector loop
|
|
|
|
*/
|
2007-11-06 15:12:27 +01:00
|
|
|
while (ndb_binlog_running)
|
2007-04-03 14:31:46 +02:00
|
|
|
{
|
|
|
|
/*
|
2007-11-06 15:12:27 +01:00
|
|
|
check if it is the first log, if so we do not insert a GAP event
|
|
|
|
as there is really no log to have a GAP in
|
2007-04-03 14:31:46 +02:00
|
|
|
*/
|
2007-11-06 22:28:44 +01:00
|
|
|
if (incident_id == 0)
|
2007-11-06 15:12:27 +01:00
|
|
|
{
|
|
|
|
LOG_INFO log_info;
|
|
|
|
mysql_bin_log.get_current_log(&log_info);
|
|
|
|
int len= strlen(log_info.log_file_name);
|
|
|
|
uint no= 0;
|
|
|
|
if ((sscanf(log_info.log_file_name + len - 6, "%u", &no) == 1) &&
|
|
|
|
no == 1)
|
|
|
|
{
|
|
|
|
/* this is the fist log, so skip GAP event */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-04-03 14:31:46 +02:00
|
|
|
/*
|
2007-11-01 15:08:00 +01:00
|
|
|
Always insert a GAP event as we cannot know what has happened
|
|
|
|
in the cluster while not being connected.
|
2007-04-03 14:31:46 +02:00
|
|
|
*/
|
2007-11-01 15:08:00 +01:00
|
|
|
LEX_STRING const msg[2]=
|
|
|
|
{
|
|
|
|
{ C_STRING_WITH_LEN("mysqld startup") },
|
|
|
|
{ C_STRING_WITH_LEN("cluster disconnect")}
|
|
|
|
};
|
2009-10-30 19:13:58 +01:00
|
|
|
int error __attribute__((unused))=
|
2007-11-01 15:08:00 +01:00
|
|
|
inj->record_incident(thd, INCIDENT_LOST_EVENTS, msg[incident_id]);
|
|
|
|
DBUG_ASSERT(!error);
|
2007-11-06 15:12:27 +01:00
|
|
|
break;
|
2007-04-03 14:31:46 +02:00
|
|
|
}
|
2007-11-06 15:12:27 +01:00
|
|
|
incident_id= 1;
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
2006-05-19 12:54:12 +02:00
|
|
|
thd->proc_info= "Waiting for ndbcluster to start";
|
|
|
|
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_lock(&injector_mutex);
|
2006-12-01 15:49:07 +01:00
|
|
|
while (!ndb_schema_share ||
|
|
|
|
(ndb_binlog_running && !ndb_apply_status_share))
|
2006-05-19 12:54:12 +02:00
|
|
|
{
|
|
|
|
/* ndb not connected yet */
|
|
|
|
struct timespec abstime;
|
|
|
|
set_timespec(abstime, 1);
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_cond_timedwait(&injector_cond, &injector_mutex, &abstime);
|
2007-02-06 22:06:13 +01:00
|
|
|
if (ndbcluster_binlog_terminating)
|
2006-05-19 12:54:12 +02:00
|
|
|
{
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_unlock(&injector_mutex);
|
2006-05-19 12:54:12 +02:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
}
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_unlock(&injector_mutex);
|
2006-05-19 18:11:47 +02:00
|
|
|
|
|
|
|
if (thd_ndb == NULL)
|
|
|
|
{
|
2006-09-15 19:28:00 +02:00
|
|
|
DBUG_ASSERT(ndbcluster_hton->slot != ~(uint)0);
|
2006-05-19 18:11:47 +02:00
|
|
|
if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb()))
|
|
|
|
{
|
|
|
|
sql_print_error("Could not allocate Thd_ndb object");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
set_thd_ndb(thd, thd_ndb);
|
|
|
|
thd_ndb->options|= TNO_NO_LOG_SCHEMA_OP;
|
|
|
|
thd->query_id= 0; // to keep valgrind quiet
|
|
|
|
}
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
{
|
2006-05-16 20:56:45 +02:00
|
|
|
// wait for the first event
|
|
|
|
thd->proc_info= "Waiting for first event from ndbcluster";
|
2006-07-08 03:26:13 +02:00
|
|
|
int schema_res, res;
|
|
|
|
Uint64 schema_gci;
|
|
|
|
do
|
2006-05-16 20:56:45 +02:00
|
|
|
{
|
2007-02-06 22:06:13 +01:00
|
|
|
DBUG_PRINT("info", ("Waiting for the first event"));
|
|
|
|
|
|
|
|
if (ndbcluster_binlog_terminating)
|
2006-07-08 03:26:13 +02:00
|
|
|
goto err;
|
2007-02-06 22:06:13 +01:00
|
|
|
|
2006-05-16 20:56:45 +02:00
|
|
|
schema_res= s_ndb->pollEvents(100, &schema_gci);
|
2006-12-20 15:34:45 +01:00
|
|
|
} while (schema_gci == 0 || ndb_latest_received_binlog_epoch == schema_gci);
|
2006-07-08 03:26:13 +02:00
|
|
|
if (ndb_binlog_running)
|
2006-05-16 20:56:45 +02:00
|
|
|
{
|
2006-07-08 03:26:13 +02:00
|
|
|
Uint64 gci= i_ndb->getLatestGCI();
|
|
|
|
while (gci < schema_gci || gci == ndb_latest_received_binlog_epoch)
|
2006-07-05 17:36:18 +02:00
|
|
|
{
|
2007-02-06 22:06:13 +01:00
|
|
|
if (ndbcluster_binlog_terminating)
|
2006-07-08 03:26:13 +02:00
|
|
|
goto err;
|
|
|
|
res= i_ndb->pollEvents(10, &gci);
|
2006-07-05 17:36:18 +02:00
|
|
|
}
|
|
|
|
if (gci > schema_gci)
|
|
|
|
{
|
|
|
|
schema_gci= gci;
|
|
|
|
}
|
2006-07-08 03:26:13 +02:00
|
|
|
}
|
|
|
|
// now check that we have epochs consistant with what we had before the restart
|
2006-11-27 00:47:38 +01:00
|
|
|
DBUG_PRINT("info", ("schema_res: %d schema_gci: %lu", schema_res,
|
|
|
|
(long) schema_gci));
|
2006-07-08 03:26:13 +02:00
|
|
|
{
|
2006-05-18 23:38:07 +02:00
|
|
|
i_ndb->flushIncompleteEvents(schema_gci);
|
|
|
|
s_ndb->flushIncompleteEvents(schema_gci);
|
2006-05-16 20:56:45 +02:00
|
|
|
if (schema_gci < ndb_latest_handled_binlog_epoch)
|
|
|
|
{
|
|
|
|
sql_print_error("NDB Binlog: cluster has been restarted --initial or with older filesystem. "
|
|
|
|
"ndb_latest_handled_binlog_epoch: %u, while current epoch: %u. "
|
|
|
|
"RESET MASTER should be issued. Resetting ndb_latest_handled_binlog_epoch.",
|
|
|
|
(unsigned) ndb_latest_handled_binlog_epoch, (unsigned) schema_gci);
|
2006-08-30 11:41:21 +02:00
|
|
|
*p_latest_trans_gci= 0;
|
2006-05-16 20:56:45 +02:00
|
|
|
ndb_latest_handled_binlog_epoch= 0;
|
|
|
|
ndb_latest_applied_binlog_epoch= 0;
|
|
|
|
ndb_latest_received_binlog_epoch= 0;
|
|
|
|
}
|
2006-07-05 18:36:18 +02:00
|
|
|
else if (ndb_latest_applied_binlog_epoch > 0)
|
|
|
|
{
|
|
|
|
sql_print_warning("NDB Binlog: cluster has reconnected. "
|
|
|
|
"Changes to the database that occured while "
|
|
|
|
"disconnected will not be in the binlog");
|
|
|
|
}
|
2009-12-22 10:35:56 +01:00
|
|
|
if (opt_ndb_extra_logging)
|
2006-07-05 14:20:14 +02:00
|
|
|
{
|
|
|
|
sql_print_information("NDB Binlog: starting log at epoch %u",
|
|
|
|
(unsigned)schema_gci);
|
|
|
|
}
|
2006-05-16 20:56:45 +02:00
|
|
|
}
|
|
|
|
}
|
2006-05-19 12:54:12 +02:00
|
|
|
{
|
|
|
|
static char db[]= "";
|
|
|
|
thd->db= db;
|
|
|
|
}
|
2006-05-16 20:56:45 +02:00
|
|
|
do_ndbcluster_binlog_close_connection= BCCC_running;
|
2007-02-06 22:06:13 +01:00
|
|
|
for ( ; !((ndbcluster_binlog_terminating ||
|
|
|
|
do_ndbcluster_binlog_close_connection) &&
|
2006-08-30 11:41:21 +02:00
|
|
|
ndb_latest_handled_binlog_epoch >= *p_latest_trans_gci) &&
|
2006-05-16 20:56:45 +02:00
|
|
|
do_ndbcluster_binlog_close_connection != BCCC_restart; )
|
|
|
|
{
|
|
|
|
#ifndef DBUG_OFF
|
|
|
|
if (do_ndbcluster_binlog_close_connection)
|
|
|
|
{
|
|
|
|
DBUG_PRINT("info", ("do_ndbcluster_binlog_close_connection: %d, "
|
2006-11-27 17:16:08 +01:00
|
|
|
"ndb_latest_handled_binlog_epoch: %lu, "
|
|
|
|
"*p_latest_trans_gci: %lu",
|
|
|
|
do_ndbcluster_binlog_close_connection,
|
|
|
|
(ulong) ndb_latest_handled_binlog_epoch,
|
|
|
|
(ulong) *p_latest_trans_gci));
|
2006-05-16 20:56:45 +02:00
|
|
|
}
|
|
|
|
#endif
|
2006-01-12 19:51:02 +01:00
|
|
|
#ifdef RUN_NDB_BINLOG_TIMER
|
|
|
|
main_timer.stop();
|
|
|
|
sql_print_information("main_timer %ld ms", main_timer.elapsed_ms());
|
|
|
|
main_timer.start();
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
now we don't want any events before next gci is complete
|
|
|
|
*/
|
|
|
|
thd->proc_info= "Waiting for event from ndbcluster";
|
|
|
|
thd->set_time();
|
|
|
|
|
|
|
|
/* wait for event or 1000 ms */
|
2006-02-01 01:12:11 +01:00
|
|
|
Uint64 gci= 0, schema_gci;
|
|
|
|
int res= 0, tot_poll_wait= 1000;
|
|
|
|
if (ndb_binlog_running)
|
|
|
|
{
|
2006-04-12 18:01:19 +02:00
|
|
|
res= i_ndb->pollEvents(tot_poll_wait, &gci);
|
2006-02-01 01:12:11 +01:00
|
|
|
tot_poll_wait= 0;
|
|
|
|
}
|
2007-05-09 12:51:37 +02:00
|
|
|
else
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
Just consume any events, not used if no binlogging
|
|
|
|
e.g. node failure events
|
|
|
|
*/
|
|
|
|
Uint64 tmp_gci;
|
|
|
|
if (i_ndb->pollEvents(0, &tmp_gci))
|
|
|
|
while (i_ndb->nextEvent())
|
|
|
|
;
|
|
|
|
}
|
2006-04-12 18:01:19 +02:00
|
|
|
int schema_res= s_ndb->pollEvents(tot_poll_wait, &schema_gci);
|
2006-01-12 19:51:02 +01:00
|
|
|
ndb_latest_received_binlog_epoch= gci;
|
|
|
|
|
|
|
|
while (gci > schema_gci && schema_res >= 0)
|
2006-05-16 20:56:45 +02:00
|
|
|
{
|
|
|
|
static char buf[64];
|
|
|
|
thd->proc_info= "Waiting for schema epoch";
|
|
|
|
my_snprintf(buf, sizeof(buf), "%s %u(%u)", thd->proc_info, (unsigned) schema_gci, (unsigned) gci);
|
|
|
|
thd->proc_info= buf;
|
2006-04-12 18:01:19 +02:00
|
|
|
schema_res= s_ndb->pollEvents(10, &schema_gci);
|
2006-05-16 20:56:45 +02:00
|
|
|
}
|
2006-01-12 19:51:02 +01:00
|
|
|
|
2007-02-06 22:06:13 +01:00
|
|
|
if ((ndbcluster_binlog_terminating ||
|
|
|
|
do_ndbcluster_binlog_close_connection) &&
|
2006-08-30 11:41:21 +02:00
|
|
|
(ndb_latest_handled_binlog_epoch >= *p_latest_trans_gci ||
|
2006-02-01 01:12:11 +01:00
|
|
|
!ndb_binlog_running))
|
2006-01-12 19:51:02 +01:00
|
|
|
break; /* Shutting down server */
|
|
|
|
|
2010-08-12 15:50:23 +02:00
|
|
|
if (ndb_binlog_index && ndb_binlog_index->s->has_old_version())
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
2010-08-12 15:50:23 +02:00
|
|
|
if (ndb_binlog_index->s->has_old_version())
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
2010-07-27 12:25:53 +02:00
|
|
|
trans_commit_stmt(thd);
|
2006-01-12 19:51:02 +01:00
|
|
|
close_thread_tables(thd);
|
2010-07-27 12:25:53 +02:00
|
|
|
thd->mdl_context.release_transactional_locks();
|
2006-12-01 15:49:07 +01:00
|
|
|
ndb_binlog_index= 0;
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
MEM_ROOT **root_ptr=
|
|
|
|
my_pthread_getspecific_ptr(MEM_ROOT**, THR_MALLOC);
|
|
|
|
MEM_ROOT *old_root= *root_ptr;
|
|
|
|
MEM_ROOT mem_root;
|
|
|
|
init_sql_alloc(&mem_root, 4096, 0);
|
2006-03-21 16:54:56 +01:00
|
|
|
List<Cluster_schema> post_epoch_log_list;
|
|
|
|
List<Cluster_schema> post_epoch_unlock_list;
|
2006-01-12 19:51:02 +01:00
|
|
|
*root_ptr= &mem_root;
|
|
|
|
|
|
|
|
if (unlikely(schema_res > 0))
|
|
|
|
{
|
2006-04-10 16:08:40 +02:00
|
|
|
thd->proc_info= "Processing events from schema table";
|
2006-04-12 18:01:19 +02:00
|
|
|
s_ndb->
|
2009-12-22 10:35:56 +01:00
|
|
|
setReportThreshEventGCISlip(opt_ndb_report_thresh_binlog_epoch_slip);
|
2006-04-12 18:01:19 +02:00
|
|
|
s_ndb->
|
2009-12-22 10:35:56 +01:00
|
|
|
setReportThreshEventFreeMem(opt_ndb_report_thresh_binlog_mem_usage);
|
2006-04-12 18:01:19 +02:00
|
|
|
NdbEventOperation *pOp= s_ndb->nextEvent();
|
2006-01-12 19:51:02 +01:00
|
|
|
while (pOp != NULL)
|
|
|
|
{
|
|
|
|
if (!pOp->hasError())
|
2006-05-16 20:56:45 +02:00
|
|
|
{
|
2006-04-13 09:37:43 +02:00
|
|
|
ndb_binlog_thread_handle_schema_event(thd, s_ndb, pOp,
|
2006-02-06 11:47:12 +01:00
|
|
|
&post_epoch_log_list,
|
|
|
|
&post_epoch_unlock_list,
|
|
|
|
&mem_root);
|
2006-05-16 20:56:45 +02:00
|
|
|
DBUG_PRINT("info", ("s_ndb first: %s", s_ndb->getEventOperation() ?
|
|
|
|
s_ndb->getEventOperation()->getEvent()->getTable()->getName() :
|
|
|
|
"<empty>"));
|
|
|
|
DBUG_PRINT("info", ("i_ndb first: %s", i_ndb->getEventOperation() ?
|
|
|
|
i_ndb->getEventOperation()->getEvent()->getTable()->getName() :
|
|
|
|
"<empty>"));
|
|
|
|
if (i_ndb->getEventOperation() == NULL &&
|
|
|
|
s_ndb->getEventOperation() == NULL &&
|
|
|
|
do_ndbcluster_binlog_close_connection == BCCC_running)
|
|
|
|
{
|
|
|
|
DBUG_PRINT("info", ("do_ndbcluster_binlog_close_connection= BCCC_restart"));
|
|
|
|
do_ndbcluster_binlog_close_connection= BCCC_restart;
|
2006-08-30 11:41:21 +02:00
|
|
|
if (ndb_latest_received_binlog_epoch < *p_latest_trans_gci && ndb_binlog_running)
|
2006-05-16 20:56:45 +02:00
|
|
|
{
|
2006-11-27 17:16:08 +01:00
|
|
|
sql_print_error("NDB Binlog: latest transaction in epoch %lu not in binlog "
|
|
|
|
"as latest received epoch is %lu",
|
|
|
|
(ulong) *p_latest_trans_gci,
|
|
|
|
(ulong) ndb_latest_received_binlog_epoch);
|
2006-05-16 20:56:45 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2006-01-12 19:51:02 +01:00
|
|
|
else
|
|
|
|
sql_print_error("NDB: error %lu (%s) on handling "
|
|
|
|
"binlog schema event",
|
|
|
|
(ulong) pOp->getNdbError().code,
|
|
|
|
pOp->getNdbError().message);
|
2006-04-12 18:01:19 +02:00
|
|
|
pOp= s_ndb->nextEvent();
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (res > 0)
|
|
|
|
{
|
|
|
|
DBUG_PRINT("info", ("pollEvents res: %d", res));
|
|
|
|
thd->proc_info= "Processing events";
|
2006-04-12 18:01:19 +02:00
|
|
|
NdbEventOperation *pOp= i_ndb->nextEvent();
|
2006-12-01 15:49:07 +01:00
|
|
|
ndb_binlog_index_row row;
|
2006-01-12 19:51:02 +01:00
|
|
|
while (pOp != NULL)
|
|
|
|
{
|
2006-04-21 18:28:00 +02:00
|
|
|
#ifdef RUN_NDB_BINLOG_TIMER
|
|
|
|
Timer gci_timer, write_timer;
|
|
|
|
int event_count= 0;
|
|
|
|
gci_timer.start();
|
|
|
|
#endif
|
2006-03-11 06:58:48 +01:00
|
|
|
gci= pOp->getGCI();
|
|
|
|
DBUG_PRINT("info", ("Handling gci: %d", (unsigned)gci));
|
2006-01-25 22:22:50 +01:00
|
|
|
// sometimes get TE_ALTER with invalid table
|
|
|
|
DBUG_ASSERT(pOp->getEventType() == NdbDictionary::Event::TE_ALTER ||
|
2006-02-01 11:55:26 +01:00
|
|
|
! IS_NDB_BLOB_PREFIX(pOp->getEvent()->getTable()->getName()));
|
2006-03-11 06:58:48 +01:00
|
|
|
DBUG_ASSERT(gci <= ndb_latest_received_binlog_epoch);
|
|
|
|
|
2007-07-25 07:24:25 +02:00
|
|
|
/* initialize some variables for this epoch */
|
|
|
|
g_ndb_log_slave_updates= opt_log_slave_updates;
|
2006-04-12 18:01:19 +02:00
|
|
|
i_ndb->
|
2009-12-22 10:35:56 +01:00
|
|
|
setReportThreshEventGCISlip(opt_ndb_report_thresh_binlog_epoch_slip);
|
|
|
|
i_ndb->setReportThreshEventFreeMem(opt_ndb_report_thresh_binlog_mem_usage);
|
2006-01-12 19:51:02 +01:00
|
|
|
|
|
|
|
bzero((char*) &row, sizeof(row));
|
2007-11-28 12:35:25 +01:00
|
|
|
thd->variables.character_set_client= &my_charset_latin1;
|
2006-03-11 06:58:48 +01:00
|
|
|
injector::transaction trans;
|
|
|
|
// pass table map before epoch
|
|
|
|
{
|
|
|
|
Uint32 iter= 0;
|
|
|
|
const NdbEventOperation *gci_op;
|
2006-02-16 14:54:30 +01:00
|
|
|
Uint32 event_types;
|
2006-04-12 18:01:19 +02:00
|
|
|
while ((gci_op= i_ndb->getGCIEventOperations(&iter, &event_types))
|
2006-03-11 06:58:48 +01:00
|
|
|
!= NULL)
|
2006-02-16 14:54:30 +01:00
|
|
|
{
|
2006-03-11 06:58:48 +01:00
|
|
|
NDB_SHARE *share= (NDB_SHARE*)gci_op->getCustomData();
|
2006-11-27 00:47:38 +01:00
|
|
|
DBUG_PRINT("info", ("per gci_op: 0x%lx share: 0x%lx event_types: 0x%x",
|
|
|
|
(long) gci_op, (long) share, event_types));
|
2006-03-11 06:58:48 +01:00
|
|
|
// workaround for interface returning TE_STOP events
|
|
|
|
// which are normally filtered out below in the nextEvent loop
|
|
|
|
if ((event_types & ~NdbDictionary::Event::TE_STOP) == 0)
|
|
|
|
{
|
|
|
|
DBUG_PRINT("info", ("Skipped TE_STOP on table %s",
|
|
|
|
gci_op->getEvent()->getTable()->getName()));
|
|
|
|
continue;
|
|
|
|
}
|
2006-02-16 14:54:30 +01:00
|
|
|
// this should not happen
|
|
|
|
if (share == NULL || share->table == NULL)
|
|
|
|
{
|
2006-03-11 06:58:48 +01:00
|
|
|
DBUG_PRINT("info", ("no share or table %s!",
|
|
|
|
gci_op->getEvent()->getTable()->getName()));
|
2006-02-16 14:54:30 +01:00
|
|
|
continue;
|
|
|
|
}
|
2006-12-01 15:49:07 +01:00
|
|
|
if (share == ndb_apply_status_share)
|
2006-03-08 14:12:26 +01:00
|
|
|
{
|
2006-03-11 06:58:48 +01:00
|
|
|
// skip this table, it is handled specially
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
TABLE *table= share->table;
|
2007-02-27 18:31:49 +01:00
|
|
|
#ifndef DBUG_OFF
|
2006-03-11 06:58:48 +01:00
|
|
|
const LEX_STRING &name= table->s->table_name;
|
2007-02-27 18:31:49 +01:00
|
|
|
#endif
|
2006-03-11 06:58:48 +01:00
|
|
|
if ((event_types & (NdbDictionary::Event::TE_INSERT |
|
|
|
|
NdbDictionary::Event::TE_UPDATE |
|
|
|
|
NdbDictionary::Event::TE_DELETE)) == 0)
|
|
|
|
{
|
|
|
|
DBUG_PRINT("info", ("skipping non data event table: %.*s",
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
(int) name.length, name.str));
|
2006-03-11 06:58:48 +01:00
|
|
|
continue;
|
2006-03-08 14:12:26 +01:00
|
|
|
}
|
2006-03-11 06:58:48 +01:00
|
|
|
if (!trans.good())
|
|
|
|
{
|
|
|
|
DBUG_PRINT("info",
|
|
|
|
("Found new data event, initializing transaction"));
|
|
|
|
inj->new_trans(thd, &trans);
|
|
|
|
}
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
DBUG_PRINT("info", ("use_table: %.*s",
|
|
|
|
(int) name.length, name.str));
|
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes
Changes that requires code changes in other code of other storage engines.
(Note that all changes are very straightforward and one should find all issues
by compiling a --debug build and fixing all compiler errors and all
asserts in field.cc while running the test suite),
- New optional handler function introduced: reset()
This is called after every DML statement to make it easy for a handler to
statement specific cleanups.
(The only case it's not called is if force the file to be closed)
- handler::extra(HA_EXTRA_RESET) is removed. Code that was there before
should be moved to handler::reset()
- table->read_set contains a bitmap over all columns that are needed
in the query. read_row() and similar functions only needs to read these
columns
- table->write_set contains a bitmap over all columns that will be updated
in the query. write_row() and update_row() only needs to update these
columns.
The above bitmaps should now be up to date in all context
(including ALTER TABLE, filesort()).
The handler is informed of any changes to the bitmap after
fix_fields() by calling the virtual function
handler::column_bitmaps_signal(). If the handler does caching of
these bitmaps (instead of using table->read_set, table->write_set),
it should redo the caching in this code. as the signal() may be sent
several times, it's probably best to set a variable in the signal
and redo the caching on read_row() / write_row() if the variable was
set.
- Removed the read_set and write_set bitmap objects from the handler class
- Removed all column bit handling functions from the handler class.
(Now one instead uses the normal bitmap functions in my_bitmap.c instead
of handler dedicated bitmap functions)
- field->query_id is removed. One should instead instead check
table->read_set and table->write_set if a field is used in the query.
- handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and
handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now
instead use table->read_set to check for which columns to retrieve.
- If a handler needs to call Field->val() or Field->store() on columns
that are not used in the query, one should install a temporary
all-columns-used map while doing so. For this, we provide the following
functions:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
field->val();
dbug_tmp_restore_column_map(table->read_set, old_map);
and similar for the write map:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
field->val();
dbug_tmp_restore_column_map(table->write_set, old_map);
If this is not done, you will sooner or later hit a DBUG_ASSERT
in the field store() / val() functions.
(For not DBUG binaries, the dbug_tmp_restore_column_map() and
dbug_tmp_restore_column_map() are inline dummy functions and should
be optimized away be the compiler).
- If one needs to temporary set the column map for all binaries (and not
just to avoid the DBUG_ASSERT() in the Field::store() / Field::val()
methods) one should use the functions tmp_use_all_columns() and
tmp_restore_column_map() instead of the above dbug_ variants.
- All 'status' fields in the handler base class (like records,
data_file_length etc) are now stored in a 'stats' struct. This makes
it easier to know what status variables are provided by the base
handler. This requires some trivial variable names in the extra()
function.
- New virtual function handler::records(). This is called to optimize
COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true.
(stats.records is not supposed to be an exact value. It's only has to
be 'reasonable enough' for the optimizer to be able to choose a good
optimization path).
- Non virtual handler::init() function added for caching of virtual
constants from engine.
- Removed has_transactions() virtual method. Now one should instead return
HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support
transactions.
- The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument
that is to be used with 'new handler_name()' to allocate the handler
in the right area. The xxxx_create_handler() function is also
responsible for any initialization of the object before returning.
For example, one should change:
static handler *myisam_create_handler(TABLE_SHARE *table)
{
return new ha_myisam(table);
}
->
static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root)
{
return new (mem_root) ha_myisam(table);
}
- New optional virtual function: use_hidden_primary_key().
This is called in case of an update/delete when
(table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined
but we don't have a primary key. This allows the handler to take precisions
in remembering any hidden primary key to able to update/delete any
found row. The default handler marks all columns to be read.
- handler::table_flags() now returns a ulonglong (to allow for more flags).
- New/changed table_flags()
- HA_HAS_RECORDS Set if ::records() is supported
- HA_NO_TRANSACTIONS Set if engine doesn't support transactions
- HA_PRIMARY_KEY_REQUIRED_FOR_DELETE
Set if we should mark all primary key columns for
read when reading rows as part of a DELETE
statement. If there is no primary key,
all columns are marked for read.
- HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some
cases (based on table->read_set)
- HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS
Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION.
- HA_DUPP_POS Renamed to HA_DUPLICATE_POS
- HA_REQUIRES_KEY_COLUMNS_FOR_DELETE
Set this if we should mark ALL key columns for
read when when reading rows as part of a DELETE
statement. In case of an update we will mark
all keys for read for which key part changed
value.
- HA_STATS_RECORDS_IS_EXACT
Set this if stats.records is exact.
(This saves us some extra records() calls
when optimizing COUNT(*))
- Removed table_flags()
- HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if
handler::records() gives an exact count() and
HA_STATS_RECORDS_IS_EXACT if stats.records is exact.
- HA_READ_RND_SAME Removed (no one supported this one)
- Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk()
- Renamed handler::dupp_pos to handler::dup_pos
- Removed not used variable handler::sortkey
Upper level handler changes:
- ha_reset() now does some overall checks and calls ::reset()
- ha_table_flags() added. This is a cached version of table_flags(). The
cache is updated on engine creation time and updated on open.
MySQL level changes (not obvious from the above):
- DBUG_ASSERT() added to check that column usage matches what is set
in the column usage bit maps. (This found a LOT of bugs in current
column marking code).
- In 5.1 before, all used columns was marked in read_set and only updated
columns was marked in write_set. Now we only mark columns for which we
need a value in read_set.
- Column bitmaps are created in open_binary_frm() and open_table_from_share().
(Before this was in table.cc)
- handler::table_flags() calls are replaced with handler::ha_table_flags()
- For calling field->val() you must have the corresponding bit set in
table->read_set. For calling field->store() you must have the
corresponding bit set in table->write_set. (There are asserts in
all store()/val() functions to catch wrong usage)
- thd->set_query_id is renamed to thd->mark_used_columns and instead
of setting this to an integer value, this has now the values:
MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE
Changed also all variables named 'set_query_id' to mark_used_columns.
- In filesort() we now inform the handler of exactly which columns are needed
doing the sort and choosing the rows.
- The TABLE_SHARE object has a 'all_set' column bitmap one can use
when one needs a column bitmap with all columns set.
(This is used for table->use_all_columns() and other places)
- The TABLE object has 3 column bitmaps:
- def_read_set Default bitmap for columns to be read
- def_write_set Default bitmap for columns to be written
- tmp_set Can be used as a temporary bitmap when needed.
The table object has also two pointer to bitmaps read_set and write_set
that the handler should use to find out which columns are used in which way.
- count() optimization now calls handler::records() instead of using
handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true).
- Added extra argument to Item::walk() to indicate if we should also
traverse sub queries.
- Added TABLE parameter to cp_buffer_from_ref()
- Don't close tables created with CREATE ... SELECT but keep them in
the table cache. (Faster usage of newly created tables).
New interfaces:
- table->clear_column_bitmaps() to initialize the bitmaps for tables
at start of new statements.
- table->column_bitmaps_set() to set up new column bitmaps and signal
the handler about this.
- table->column_bitmaps_set_no_signal() for some few cases where we need
to setup new column bitmaps but don't signal the handler (as the handler
has already been signaled about these before). Used for the momement
only in opt_range.cc when doing ROR scans.
- table->use_all_columns() to install a bitmap where all columns are marked
as use in the read and the write set.
- table->default_column_bitmaps() to install the normal read and write
column bitmaps, but not signaling the handler about this.
This is mainly used when creating TABLE instances.
- table->mark_columns_needed_for_delete(),
table->mark_columns_needed_for_delete() and
table->mark_columns_needed_for_insert() to allow us to put additional
columns in column usage maps if handler so requires.
(The handler indicates what it neads in handler->table_flags())
- table->prepare_for_position() to allow us to tell handler that it
needs to read primary key parts to be able to store them in
future table->position() calls.
(This replaces the table->file->ha_retrieve_all_pk function)
- table->mark_auto_increment_column() to tell handler are going to update
columns part of any auto_increment key.
- table->mark_columns_used_by_index() to mark all columns that is part of
an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow
it to quickly know that it only needs to read colums that are part
of the key. (The handler can also use the column map for detecting this,
but simpler/faster handler can just monitor the extra() call).
- table->mark_columns_used_by_index_no_reset() to in addition to other columns,
also mark all columns that is used by the given key.
- table->restore_column_maps_after_mark_index() to restore to default
column maps after a call to table->mark_columns_used_by_index().
- New item function register_field_in_read_map(), for marking used columns
in table->read_map. Used by filesort() to mark all used columns
- Maintain in TABLE->merge_keys set of all keys that are used in query.
(Simplices some optimization loops)
- Maintain Field->part_of_key_not_clustered which is like Field->part_of_key
but the field in the clustered key is not assumed to be part of all index.
(used in opt_range.cc for faster loops)
- dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map()
tmp_use_all_columns() and tmp_restore_column_map() functions to temporally
mark all columns as usable. The 'dbug_' version is primarily intended
inside a handler when it wants to just call Field:store() & Field::val()
functions, but don't need the column maps set for any other usage.
(ie:: bitmap_is_set() is never called)
- We can't use compare_records() to skip updates for handlers that returns
a partial column set and the read_set doesn't cover all columns in the
write set. The reason for this is that if we have a column marked only for
write we can't in the MySQL level know if the value changed or not.
The reason this worked before was that MySQL marked all to be written
columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden
bug'.
- open_table_from_share() does not anymore setup temporary MEM_ROOT
object as a thread specific variable for the handler. Instead we
send the to-be-used MEMROOT to get_new_handler().
(Simpler, faster code)
Bugs fixed:
- Column marking was not done correctly in a lot of cases.
(ALTER TABLE, when using triggers, auto_increment fields etc)
(Could potentially result in wrong values inserted in table handlers
relying on that the old column maps or field->set_query_id was correct)
Especially when it comes to triggers, there may be cases where the
old code would cause lost/wrong values for NDB and/or InnoDB tables.
- Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags:
OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG.
This allowed me to remove some wrong warnings about:
"Some non-transactional changed tables couldn't be rolled back"
- Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset
(thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose
some warnings about
"Some non-transactional changed tables couldn't be rolled back")
- Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table()
which could cause delete_table to report random failures.
- Fixed core dumps for some tests when running with --debug
- Added missing FN_LIBCHAR in mysql_rm_tmp_tables()
(This has probably caused us to not properly remove temporary files after
crash)
- slow_logs was not properly initialized, which could maybe cause
extra/lost entries in slow log.
- If we get an duplicate row on insert, change column map to read and
write all columns while retrying the operation. This is required by
the definition of REPLACE and also ensures that fields that are only
part of UPDATE are properly handled. This fixed a bug in NDB and
REPLACE where REPLACE wrongly copied some column values from the replaced
row.
- For table handler that doesn't support NULL in keys, we would give an error
when creating a primary key with NULL fields, even after the fields has been
automaticly converted to NOT NULL.
- Creating a primary key on a SPATIAL key, would fail if field was not
declared as NOT NULL.
Cleanups:
- Removed not used condition argument to setup_tables
- Removed not needed item function reset_query_id_processor().
- Field->add_index is removed. Now this is instead maintained in
(field->flags & FIELD_IN_ADD_INDEX)
- Field->fieldnr is removed (use field->field_index instead)
- New argument to filesort() to indicate that it should return a set of
row pointers (not used columns). This allowed me to remove some references
to sql_command in filesort and should also enable us to return column
results in some cases where we couldn't before.
- Changed column bitmap handling in opt_range.cc to be aligned with TABLE
bitmap, which allowed me to use bitmap functions instead of looping over
all fields to create some needed bitmaps. (Faster and smaller code)
- Broke up found too long lines
- Moved some variable declaration at start of function for better code
readability.
- Removed some not used arguments from functions.
(setup_fields(), mysql_prepare_insert_check_table())
- setup_fields() now takes an enum instead of an int for marking columns
usage.
- For internal temporary tables, use handler::write_row(),
handler::delete_row() and handler::update_row() instead of
handler::ha_xxxx() for faster execution.
- Changed some constants to enum's and define's.
- Using separate column read and write sets allows for easier checking
of timestamp field was set by statement.
- Remove calls to free_io_cache() as this is now done automaticly in ha_reset()
- Don't build table->normalized_path as this is now identical to table->path
(after bar's fixes to convert filenames)
- Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to
do comparision with the 'convert-dbug-for-diff' tool.
Things left to do in 5.1:
- We wrongly log failed CREATE TABLE ... SELECT in some cases when using
row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result)
Mats has promised to look into this.
- Test that my fix for CREATE TABLE ... SELECT is indeed correct.
(I added several test cases for this, but in this case it's better that
someone else also tests this throughly).
Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
|
|
|
injector::transaction::table tbl(table, TRUE);
|
2009-10-30 19:13:58 +01:00
|
|
|
int ret __attribute__((unused))= trans.use_table(::server_id, tbl);
|
2006-03-11 15:52:38 +01:00
|
|
|
DBUG_ASSERT(ret == 0);
|
2006-02-16 14:54:30 +01:00
|
|
|
}
|
|
|
|
}
|
2006-03-11 06:58:48 +01:00
|
|
|
if (trans.good())
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
2006-12-01 15:49:07 +01:00
|
|
|
if (ndb_apply_status_share)
|
2006-03-11 06:58:48 +01:00
|
|
|
{
|
2006-12-01 15:49:07 +01:00
|
|
|
TABLE *table= ndb_apply_status_share->table;
|
2006-03-11 06:58:48 +01:00
|
|
|
|
2007-02-27 18:31:49 +01:00
|
|
|
#ifndef DBUG_OFF
|
|
|
|
const LEX_STRING& name= table->s->table_name;
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
DBUG_PRINT("info", ("use_table: %.*s",
|
|
|
|
(int) name.length, name.str));
|
2007-02-27 18:31:49 +01:00
|
|
|
#endif
|
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes
Changes that requires code changes in other code of other storage engines.
(Note that all changes are very straightforward and one should find all issues
by compiling a --debug build and fixing all compiler errors and all
asserts in field.cc while running the test suite),
- New optional handler function introduced: reset()
This is called after every DML statement to make it easy for a handler to
statement specific cleanups.
(The only case it's not called is if force the file to be closed)
- handler::extra(HA_EXTRA_RESET) is removed. Code that was there before
should be moved to handler::reset()
- table->read_set contains a bitmap over all columns that are needed
in the query. read_row() and similar functions only needs to read these
columns
- table->write_set contains a bitmap over all columns that will be updated
in the query. write_row() and update_row() only needs to update these
columns.
The above bitmaps should now be up to date in all context
(including ALTER TABLE, filesort()).
The handler is informed of any changes to the bitmap after
fix_fields() by calling the virtual function
handler::column_bitmaps_signal(). If the handler does caching of
these bitmaps (instead of using table->read_set, table->write_set),
it should redo the caching in this code. as the signal() may be sent
several times, it's probably best to set a variable in the signal
and redo the caching on read_row() / write_row() if the variable was
set.
- Removed the read_set and write_set bitmap objects from the handler class
- Removed all column bit handling functions from the handler class.
(Now one instead uses the normal bitmap functions in my_bitmap.c instead
of handler dedicated bitmap functions)
- field->query_id is removed. One should instead instead check
table->read_set and table->write_set if a field is used in the query.
- handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and
handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now
instead use table->read_set to check for which columns to retrieve.
- If a handler needs to call Field->val() or Field->store() on columns
that are not used in the query, one should install a temporary
all-columns-used map while doing so. For this, we provide the following
functions:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
field->val();
dbug_tmp_restore_column_map(table->read_set, old_map);
and similar for the write map:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
field->val();
dbug_tmp_restore_column_map(table->write_set, old_map);
If this is not done, you will sooner or later hit a DBUG_ASSERT
in the field store() / val() functions.
(For not DBUG binaries, the dbug_tmp_restore_column_map() and
dbug_tmp_restore_column_map() are inline dummy functions and should
be optimized away be the compiler).
- If one needs to temporary set the column map for all binaries (and not
just to avoid the DBUG_ASSERT() in the Field::store() / Field::val()
methods) one should use the functions tmp_use_all_columns() and
tmp_restore_column_map() instead of the above dbug_ variants.
- All 'status' fields in the handler base class (like records,
data_file_length etc) are now stored in a 'stats' struct. This makes
it easier to know what status variables are provided by the base
handler. This requires some trivial variable names in the extra()
function.
- New virtual function handler::records(). This is called to optimize
COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true.
(stats.records is not supposed to be an exact value. It's only has to
be 'reasonable enough' for the optimizer to be able to choose a good
optimization path).
- Non virtual handler::init() function added for caching of virtual
constants from engine.
- Removed has_transactions() virtual method. Now one should instead return
HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support
transactions.
- The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument
that is to be used with 'new handler_name()' to allocate the handler
in the right area. The xxxx_create_handler() function is also
responsible for any initialization of the object before returning.
For example, one should change:
static handler *myisam_create_handler(TABLE_SHARE *table)
{
return new ha_myisam(table);
}
->
static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root)
{
return new (mem_root) ha_myisam(table);
}
- New optional virtual function: use_hidden_primary_key().
This is called in case of an update/delete when
(table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined
but we don't have a primary key. This allows the handler to take precisions
in remembering any hidden primary key to able to update/delete any
found row. The default handler marks all columns to be read.
- handler::table_flags() now returns a ulonglong (to allow for more flags).
- New/changed table_flags()
- HA_HAS_RECORDS Set if ::records() is supported
- HA_NO_TRANSACTIONS Set if engine doesn't support transactions
- HA_PRIMARY_KEY_REQUIRED_FOR_DELETE
Set if we should mark all primary key columns for
read when reading rows as part of a DELETE
statement. If there is no primary key,
all columns are marked for read.
- HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some
cases (based on table->read_set)
- HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS
Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION.
- HA_DUPP_POS Renamed to HA_DUPLICATE_POS
- HA_REQUIRES_KEY_COLUMNS_FOR_DELETE
Set this if we should mark ALL key columns for
read when when reading rows as part of a DELETE
statement. In case of an update we will mark
all keys for read for which key part changed
value.
- HA_STATS_RECORDS_IS_EXACT
Set this if stats.records is exact.
(This saves us some extra records() calls
when optimizing COUNT(*))
- Removed table_flags()
- HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if
handler::records() gives an exact count() and
HA_STATS_RECORDS_IS_EXACT if stats.records is exact.
- HA_READ_RND_SAME Removed (no one supported this one)
- Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk()
- Renamed handler::dupp_pos to handler::dup_pos
- Removed not used variable handler::sortkey
Upper level handler changes:
- ha_reset() now does some overall checks and calls ::reset()
- ha_table_flags() added. This is a cached version of table_flags(). The
cache is updated on engine creation time and updated on open.
MySQL level changes (not obvious from the above):
- DBUG_ASSERT() added to check that column usage matches what is set
in the column usage bit maps. (This found a LOT of bugs in current
column marking code).
- In 5.1 before, all used columns was marked in read_set and only updated
columns was marked in write_set. Now we only mark columns for which we
need a value in read_set.
- Column bitmaps are created in open_binary_frm() and open_table_from_share().
(Before this was in table.cc)
- handler::table_flags() calls are replaced with handler::ha_table_flags()
- For calling field->val() you must have the corresponding bit set in
table->read_set. For calling field->store() you must have the
corresponding bit set in table->write_set. (There are asserts in
all store()/val() functions to catch wrong usage)
- thd->set_query_id is renamed to thd->mark_used_columns and instead
of setting this to an integer value, this has now the values:
MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE
Changed also all variables named 'set_query_id' to mark_used_columns.
- In filesort() we now inform the handler of exactly which columns are needed
doing the sort and choosing the rows.
- The TABLE_SHARE object has a 'all_set' column bitmap one can use
when one needs a column bitmap with all columns set.
(This is used for table->use_all_columns() and other places)
- The TABLE object has 3 column bitmaps:
- def_read_set Default bitmap for columns to be read
- def_write_set Default bitmap for columns to be written
- tmp_set Can be used as a temporary bitmap when needed.
The table object has also two pointer to bitmaps read_set and write_set
that the handler should use to find out which columns are used in which way.
- count() optimization now calls handler::records() instead of using
handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true).
- Added extra argument to Item::walk() to indicate if we should also
traverse sub queries.
- Added TABLE parameter to cp_buffer_from_ref()
- Don't close tables created with CREATE ... SELECT but keep them in
the table cache. (Faster usage of newly created tables).
New interfaces:
- table->clear_column_bitmaps() to initialize the bitmaps for tables
at start of new statements.
- table->column_bitmaps_set() to set up new column bitmaps and signal
the handler about this.
- table->column_bitmaps_set_no_signal() for some few cases where we need
to setup new column bitmaps but don't signal the handler (as the handler
has already been signaled about these before). Used for the momement
only in opt_range.cc when doing ROR scans.
- table->use_all_columns() to install a bitmap where all columns are marked
as use in the read and the write set.
- table->default_column_bitmaps() to install the normal read and write
column bitmaps, but not signaling the handler about this.
This is mainly used when creating TABLE instances.
- table->mark_columns_needed_for_delete(),
table->mark_columns_needed_for_delete() and
table->mark_columns_needed_for_insert() to allow us to put additional
columns in column usage maps if handler so requires.
(The handler indicates what it neads in handler->table_flags())
- table->prepare_for_position() to allow us to tell handler that it
needs to read primary key parts to be able to store them in
future table->position() calls.
(This replaces the table->file->ha_retrieve_all_pk function)
- table->mark_auto_increment_column() to tell handler are going to update
columns part of any auto_increment key.
- table->mark_columns_used_by_index() to mark all columns that is part of
an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow
it to quickly know that it only needs to read colums that are part
of the key. (The handler can also use the column map for detecting this,
but simpler/faster handler can just monitor the extra() call).
- table->mark_columns_used_by_index_no_reset() to in addition to other columns,
also mark all columns that is used by the given key.
- table->restore_column_maps_after_mark_index() to restore to default
column maps after a call to table->mark_columns_used_by_index().
- New item function register_field_in_read_map(), for marking used columns
in table->read_map. Used by filesort() to mark all used columns
- Maintain in TABLE->merge_keys set of all keys that are used in query.
(Simplices some optimization loops)
- Maintain Field->part_of_key_not_clustered which is like Field->part_of_key
but the field in the clustered key is not assumed to be part of all index.
(used in opt_range.cc for faster loops)
- dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map()
tmp_use_all_columns() and tmp_restore_column_map() functions to temporally
mark all columns as usable. The 'dbug_' version is primarily intended
inside a handler when it wants to just call Field:store() & Field::val()
functions, but don't need the column maps set for any other usage.
(ie:: bitmap_is_set() is never called)
- We can't use compare_records() to skip updates for handlers that returns
a partial column set and the read_set doesn't cover all columns in the
write set. The reason for this is that if we have a column marked only for
write we can't in the MySQL level know if the value changed or not.
The reason this worked before was that MySQL marked all to be written
columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden
bug'.
- open_table_from_share() does not anymore setup temporary MEM_ROOT
object as a thread specific variable for the handler. Instead we
send the to-be-used MEMROOT to get_new_handler().
(Simpler, faster code)
Bugs fixed:
- Column marking was not done correctly in a lot of cases.
(ALTER TABLE, when using triggers, auto_increment fields etc)
(Could potentially result in wrong values inserted in table handlers
relying on that the old column maps or field->set_query_id was correct)
Especially when it comes to triggers, there may be cases where the
old code would cause lost/wrong values for NDB and/or InnoDB tables.
- Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags:
OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG.
This allowed me to remove some wrong warnings about:
"Some non-transactional changed tables couldn't be rolled back"
- Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset
(thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose
some warnings about
"Some non-transactional changed tables couldn't be rolled back")
- Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table()
which could cause delete_table to report random failures.
- Fixed core dumps for some tests when running with --debug
- Added missing FN_LIBCHAR in mysql_rm_tmp_tables()
(This has probably caused us to not properly remove temporary files after
crash)
- slow_logs was not properly initialized, which could maybe cause
extra/lost entries in slow log.
- If we get an duplicate row on insert, change column map to read and
write all columns while retrying the operation. This is required by
the definition of REPLACE and also ensures that fields that are only
part of UPDATE are properly handled. This fixed a bug in NDB and
REPLACE where REPLACE wrongly copied some column values from the replaced
row.
- For table handler that doesn't support NULL in keys, we would give an error
when creating a primary key with NULL fields, even after the fields has been
automaticly converted to NOT NULL.
- Creating a primary key on a SPATIAL key, would fail if field was not
declared as NOT NULL.
Cleanups:
- Removed not used condition argument to setup_tables
- Removed not needed item function reset_query_id_processor().
- Field->add_index is removed. Now this is instead maintained in
(field->flags & FIELD_IN_ADD_INDEX)
- Field->fieldnr is removed (use field->field_index instead)
- New argument to filesort() to indicate that it should return a set of
row pointers (not used columns). This allowed me to remove some references
to sql_command in filesort and should also enable us to return column
results in some cases where we couldn't before.
- Changed column bitmap handling in opt_range.cc to be aligned with TABLE
bitmap, which allowed me to use bitmap functions instead of looping over
all fields to create some needed bitmaps. (Faster and smaller code)
- Broke up found too long lines
- Moved some variable declaration at start of function for better code
readability.
- Removed some not used arguments from functions.
(setup_fields(), mysql_prepare_insert_check_table())
- setup_fields() now takes an enum instead of an int for marking columns
usage.
- For internal temporary tables, use handler::write_row(),
handler::delete_row() and handler::update_row() instead of
handler::ha_xxxx() for faster execution.
- Changed some constants to enum's and define's.
- Using separate column read and write sets allows for easier checking
of timestamp field was set by statement.
- Remove calls to free_io_cache() as this is now done automaticly in ha_reset()
- Don't build table->normalized_path as this is now identical to table->path
(after bar's fixes to convert filenames)
- Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to
do comparision with the 'convert-dbug-for-diff' tool.
Things left to do in 5.1:
- We wrongly log failed CREATE TABLE ... SELECT in some cases when using
row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result)
Mats has promised to look into this.
- Test that my fix for CREATE TABLE ... SELECT is indeed correct.
(I added several test cases for this, but in this case it's better that
someone else also tests this throughly).
Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
|
|
|
injector::transaction::table tbl(table, TRUE);
|
2009-10-30 19:13:58 +01:00
|
|
|
int ret __attribute__((unused))= trans.use_table(::server_id, tbl);
|
2006-03-11 15:52:38 +01:00
|
|
|
DBUG_ASSERT(ret == 0);
|
2006-06-22 16:42:50 +02:00
|
|
|
|
2007-04-05 15:59:42 +02:00
|
|
|
/*
|
|
|
|
Intialize table->record[0]
|
|
|
|
*/
|
|
|
|
empty_record(table);
|
|
|
|
|
2006-03-11 06:58:48 +01:00
|
|
|
table->field[0]->store((longlong)::server_id);
|
|
|
|
table->field[1]->store((longlong)gci);
|
2007-03-07 19:39:45 +01:00
|
|
|
table->field[2]->store("", 0, &my_charset_bin);
|
|
|
|
table->field[3]->store((longlong)0);
|
|
|
|
table->field[4]->store((longlong)0);
|
2006-03-11 06:58:48 +01:00
|
|
|
trans.write_row(::server_id,
|
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes
Changes that requires code changes in other code of other storage engines.
(Note that all changes are very straightforward and one should find all issues
by compiling a --debug build and fixing all compiler errors and all
asserts in field.cc while running the test suite),
- New optional handler function introduced: reset()
This is called after every DML statement to make it easy for a handler to
statement specific cleanups.
(The only case it's not called is if force the file to be closed)
- handler::extra(HA_EXTRA_RESET) is removed. Code that was there before
should be moved to handler::reset()
- table->read_set contains a bitmap over all columns that are needed
in the query. read_row() and similar functions only needs to read these
columns
- table->write_set contains a bitmap over all columns that will be updated
in the query. write_row() and update_row() only needs to update these
columns.
The above bitmaps should now be up to date in all context
(including ALTER TABLE, filesort()).
The handler is informed of any changes to the bitmap after
fix_fields() by calling the virtual function
handler::column_bitmaps_signal(). If the handler does caching of
these bitmaps (instead of using table->read_set, table->write_set),
it should redo the caching in this code. as the signal() may be sent
several times, it's probably best to set a variable in the signal
and redo the caching on read_row() / write_row() if the variable was
set.
- Removed the read_set and write_set bitmap objects from the handler class
- Removed all column bit handling functions from the handler class.
(Now one instead uses the normal bitmap functions in my_bitmap.c instead
of handler dedicated bitmap functions)
- field->query_id is removed. One should instead instead check
table->read_set and table->write_set if a field is used in the query.
- handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and
handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now
instead use table->read_set to check for which columns to retrieve.
- If a handler needs to call Field->val() or Field->store() on columns
that are not used in the query, one should install a temporary
all-columns-used map while doing so. For this, we provide the following
functions:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
field->val();
dbug_tmp_restore_column_map(table->read_set, old_map);
and similar for the write map:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
field->val();
dbug_tmp_restore_column_map(table->write_set, old_map);
If this is not done, you will sooner or later hit a DBUG_ASSERT
in the field store() / val() functions.
(For not DBUG binaries, the dbug_tmp_restore_column_map() and
dbug_tmp_restore_column_map() are inline dummy functions and should
be optimized away be the compiler).
- If one needs to temporary set the column map for all binaries (and not
just to avoid the DBUG_ASSERT() in the Field::store() / Field::val()
methods) one should use the functions tmp_use_all_columns() and
tmp_restore_column_map() instead of the above dbug_ variants.
- All 'status' fields in the handler base class (like records,
data_file_length etc) are now stored in a 'stats' struct. This makes
it easier to know what status variables are provided by the base
handler. This requires some trivial variable names in the extra()
function.
- New virtual function handler::records(). This is called to optimize
COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true.
(stats.records is not supposed to be an exact value. It's only has to
be 'reasonable enough' for the optimizer to be able to choose a good
optimization path).
- Non virtual handler::init() function added for caching of virtual
constants from engine.
- Removed has_transactions() virtual method. Now one should instead return
HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support
transactions.
- The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument
that is to be used with 'new handler_name()' to allocate the handler
in the right area. The xxxx_create_handler() function is also
responsible for any initialization of the object before returning.
For example, one should change:
static handler *myisam_create_handler(TABLE_SHARE *table)
{
return new ha_myisam(table);
}
->
static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root)
{
return new (mem_root) ha_myisam(table);
}
- New optional virtual function: use_hidden_primary_key().
This is called in case of an update/delete when
(table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined
but we don't have a primary key. This allows the handler to take precisions
in remembering any hidden primary key to able to update/delete any
found row. The default handler marks all columns to be read.
- handler::table_flags() now returns a ulonglong (to allow for more flags).
- New/changed table_flags()
- HA_HAS_RECORDS Set if ::records() is supported
- HA_NO_TRANSACTIONS Set if engine doesn't support transactions
- HA_PRIMARY_KEY_REQUIRED_FOR_DELETE
Set if we should mark all primary key columns for
read when reading rows as part of a DELETE
statement. If there is no primary key,
all columns are marked for read.
- HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some
cases (based on table->read_set)
- HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS
Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION.
- HA_DUPP_POS Renamed to HA_DUPLICATE_POS
- HA_REQUIRES_KEY_COLUMNS_FOR_DELETE
Set this if we should mark ALL key columns for
read when when reading rows as part of a DELETE
statement. In case of an update we will mark
all keys for read for which key part changed
value.
- HA_STATS_RECORDS_IS_EXACT
Set this if stats.records is exact.
(This saves us some extra records() calls
when optimizing COUNT(*))
- Removed table_flags()
- HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if
handler::records() gives an exact count() and
HA_STATS_RECORDS_IS_EXACT if stats.records is exact.
- HA_READ_RND_SAME Removed (no one supported this one)
- Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk()
- Renamed handler::dupp_pos to handler::dup_pos
- Removed not used variable handler::sortkey
Upper level handler changes:
- ha_reset() now does some overall checks and calls ::reset()
- ha_table_flags() added. This is a cached version of table_flags(). The
cache is updated on engine creation time and updated on open.
MySQL level changes (not obvious from the above):
- DBUG_ASSERT() added to check that column usage matches what is set
in the column usage bit maps. (This found a LOT of bugs in current
column marking code).
- In 5.1 before, all used columns was marked in read_set and only updated
columns was marked in write_set. Now we only mark columns for which we
need a value in read_set.
- Column bitmaps are created in open_binary_frm() and open_table_from_share().
(Before this was in table.cc)
- handler::table_flags() calls are replaced with handler::ha_table_flags()
- For calling field->val() you must have the corresponding bit set in
table->read_set. For calling field->store() you must have the
corresponding bit set in table->write_set. (There are asserts in
all store()/val() functions to catch wrong usage)
- thd->set_query_id is renamed to thd->mark_used_columns and instead
of setting this to an integer value, this has now the values:
MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE
Changed also all variables named 'set_query_id' to mark_used_columns.
- In filesort() we now inform the handler of exactly which columns are needed
doing the sort and choosing the rows.
- The TABLE_SHARE object has a 'all_set' column bitmap one can use
when one needs a column bitmap with all columns set.
(This is used for table->use_all_columns() and other places)
- The TABLE object has 3 column bitmaps:
- def_read_set Default bitmap for columns to be read
- def_write_set Default bitmap for columns to be written
- tmp_set Can be used as a temporary bitmap when needed.
The table object has also two pointer to bitmaps read_set and write_set
that the handler should use to find out which columns are used in which way.
- count() optimization now calls handler::records() instead of using
handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true).
- Added extra argument to Item::walk() to indicate if we should also
traverse sub queries.
- Added TABLE parameter to cp_buffer_from_ref()
- Don't close tables created with CREATE ... SELECT but keep them in
the table cache. (Faster usage of newly created tables).
New interfaces:
- table->clear_column_bitmaps() to initialize the bitmaps for tables
at start of new statements.
- table->column_bitmaps_set() to set up new column bitmaps and signal
the handler about this.
- table->column_bitmaps_set_no_signal() for some few cases where we need
to setup new column bitmaps but don't signal the handler (as the handler
has already been signaled about these before). Used for the momement
only in opt_range.cc when doing ROR scans.
- table->use_all_columns() to install a bitmap where all columns are marked
as use in the read and the write set.
- table->default_column_bitmaps() to install the normal read and write
column bitmaps, but not signaling the handler about this.
This is mainly used when creating TABLE instances.
- table->mark_columns_needed_for_delete(),
table->mark_columns_needed_for_delete() and
table->mark_columns_needed_for_insert() to allow us to put additional
columns in column usage maps if handler so requires.
(The handler indicates what it neads in handler->table_flags())
- table->prepare_for_position() to allow us to tell handler that it
needs to read primary key parts to be able to store them in
future table->position() calls.
(This replaces the table->file->ha_retrieve_all_pk function)
- table->mark_auto_increment_column() to tell handler are going to update
columns part of any auto_increment key.
- table->mark_columns_used_by_index() to mark all columns that is part of
an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow
it to quickly know that it only needs to read colums that are part
of the key. (The handler can also use the column map for detecting this,
but simpler/faster handler can just monitor the extra() call).
- table->mark_columns_used_by_index_no_reset() to in addition to other columns,
also mark all columns that is used by the given key.
- table->restore_column_maps_after_mark_index() to restore to default
column maps after a call to table->mark_columns_used_by_index().
- New item function register_field_in_read_map(), for marking used columns
in table->read_map. Used by filesort() to mark all used columns
- Maintain in TABLE->merge_keys set of all keys that are used in query.
(Simplices some optimization loops)
- Maintain Field->part_of_key_not_clustered which is like Field->part_of_key
but the field in the clustered key is not assumed to be part of all index.
(used in opt_range.cc for faster loops)
- dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map()
tmp_use_all_columns() and tmp_restore_column_map() functions to temporally
mark all columns as usable. The 'dbug_' version is primarily intended
inside a handler when it wants to just call Field:store() & Field::val()
functions, but don't need the column maps set for any other usage.
(ie:: bitmap_is_set() is never called)
- We can't use compare_records() to skip updates for handlers that returns
a partial column set and the read_set doesn't cover all columns in the
write set. The reason for this is that if we have a column marked only for
write we can't in the MySQL level know if the value changed or not.
The reason this worked before was that MySQL marked all to be written
columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden
bug'.
- open_table_from_share() does not anymore setup temporary MEM_ROOT
object as a thread specific variable for the handler. Instead we
send the to-be-used MEMROOT to get_new_handler().
(Simpler, faster code)
Bugs fixed:
- Column marking was not done correctly in a lot of cases.
(ALTER TABLE, when using triggers, auto_increment fields etc)
(Could potentially result in wrong values inserted in table handlers
relying on that the old column maps or field->set_query_id was correct)
Especially when it comes to triggers, there may be cases where the
old code would cause lost/wrong values for NDB and/or InnoDB tables.
- Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags:
OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG.
This allowed me to remove some wrong warnings about:
"Some non-transactional changed tables couldn't be rolled back"
- Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset
(thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose
some warnings about
"Some non-transactional changed tables couldn't be rolled back")
- Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table()
which could cause delete_table to report random failures.
- Fixed core dumps for some tests when running with --debug
- Added missing FN_LIBCHAR in mysql_rm_tmp_tables()
(This has probably caused us to not properly remove temporary files after
crash)
- slow_logs was not properly initialized, which could maybe cause
extra/lost entries in slow log.
- If we get an duplicate row on insert, change column map to read and
write all columns while retrying the operation. This is required by
the definition of REPLACE and also ensures that fields that are only
part of UPDATE are properly handled. This fixed a bug in NDB and
REPLACE where REPLACE wrongly copied some column values from the replaced
row.
- For table handler that doesn't support NULL in keys, we would give an error
when creating a primary key with NULL fields, even after the fields has been
automaticly converted to NOT NULL.
- Creating a primary key on a SPATIAL key, would fail if field was not
declared as NOT NULL.
Cleanups:
- Removed not used condition argument to setup_tables
- Removed not needed item function reset_query_id_processor().
- Field->add_index is removed. Now this is instead maintained in
(field->flags & FIELD_IN_ADD_INDEX)
- Field->fieldnr is removed (use field->field_index instead)
- New argument to filesort() to indicate that it should return a set of
row pointers (not used columns). This allowed me to remove some references
to sql_command in filesort and should also enable us to return column
results in some cases where we couldn't before.
- Changed column bitmap handling in opt_range.cc to be aligned with TABLE
bitmap, which allowed me to use bitmap functions instead of looping over
all fields to create some needed bitmaps. (Faster and smaller code)
- Broke up found too long lines
- Moved some variable declaration at start of function for better code
readability.
- Removed some not used arguments from functions.
(setup_fields(), mysql_prepare_insert_check_table())
- setup_fields() now takes an enum instead of an int for marking columns
usage.
- For internal temporary tables, use handler::write_row(),
handler::delete_row() and handler::update_row() instead of
handler::ha_xxxx() for faster execution.
- Changed some constants to enum's and define's.
- Using separate column read and write sets allows for easier checking
of timestamp field was set by statement.
- Remove calls to free_io_cache() as this is now done automaticly in ha_reset()
- Don't build table->normalized_path as this is now identical to table->path
(after bar's fixes to convert filenames)
- Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to
do comparision with the 'convert-dbug-for-diff' tool.
Things left to do in 5.1:
- We wrongly log failed CREATE TABLE ... SELECT in some cases when using
row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result)
Mats has promised to look into this.
- Test that my fix for CREATE TABLE ... SELECT is indeed correct.
(I added several test cases for this, but in this case it's better that
someone else also tests this throughly).
Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
|
|
|
injector::transaction::table(table, TRUE),
|
|
|
|
&table->s->all_set, table->s->fields,
|
2006-03-11 06:58:48 +01:00
|
|
|
table->record[0]);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
sql_print_error("NDB: Could not get apply status share");
|
|
|
|
}
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
|
|
|
#ifdef RUN_NDB_BINLOG_TIMER
|
|
|
|
write_timer.start();
|
|
|
|
#endif
|
|
|
|
do
|
|
|
|
{
|
|
|
|
#ifdef RUN_NDB_BINLOG_TIMER
|
|
|
|
event_count++;
|
|
|
|
#endif
|
|
|
|
if (pOp->hasError() &&
|
2006-04-12 18:01:19 +02:00
|
|
|
ndb_binlog_thread_handle_error(i_ndb, pOp, row) < 0)
|
2006-01-12 19:51:02 +01:00
|
|
|
goto err;
|
|
|
|
|
|
|
|
#ifndef DBUG_OFF
|
|
|
|
{
|
|
|
|
NDB_SHARE *share= (NDB_SHARE*) pOp->getCustomData();
|
|
|
|
DBUG_PRINT("info",
|
2006-11-27 00:47:38 +01:00
|
|
|
("EVENT TYPE: %d GCI: %ld last applied: %ld "
|
|
|
|
"share: 0x%lx (%s.%s)", pOp->getEventType(),
|
|
|
|
(long) gci,
|
|
|
|
(long) ndb_latest_applied_binlog_epoch,
|
|
|
|
(long) share,
|
|
|
|
share ? share->db : "'NULL'",
|
|
|
|
share ? share->table_name : "'NULL'"));
|
2006-01-12 19:51:02 +01:00
|
|
|
DBUG_ASSERT(share != 0);
|
|
|
|
}
|
2006-03-11 06:58:48 +01:00
|
|
|
// assert that there is consistancy between gci op list
|
|
|
|
// and event list
|
|
|
|
{
|
|
|
|
Uint32 iter= 0;
|
|
|
|
const NdbEventOperation *gci_op;
|
|
|
|
Uint32 event_types;
|
2006-04-12 18:01:19 +02:00
|
|
|
while ((gci_op= i_ndb->getGCIEventOperations(&iter, &event_types))
|
2006-03-11 06:58:48 +01:00
|
|
|
!= NULL)
|
|
|
|
{
|
|
|
|
if (gci_op == pOp)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
DBUG_ASSERT(gci_op == pOp);
|
|
|
|
DBUG_ASSERT((event_types & pOp->getEventType()) != 0);
|
|
|
|
}
|
2006-01-12 19:51:02 +01:00
|
|
|
#endif
|
|
|
|
if ((unsigned) pOp->getEventType() <
|
|
|
|
(unsigned) NDBEVENT::TE_FIRST_NON_DATA_EVENT)
|
2006-04-12 18:01:19 +02:00
|
|
|
ndb_binlog_thread_handle_data_event(i_ndb, pOp, row, trans);
|
2006-01-12 19:51:02 +01:00
|
|
|
else
|
2006-02-20 16:36:30 +01:00
|
|
|
{
|
|
|
|
// set injector_ndb database/schema from table internal name
|
2009-10-30 19:13:58 +01:00
|
|
|
int ret __attribute__((unused))=
|
2006-04-12 18:01:19 +02:00
|
|
|
i_ndb->setDatabaseAndSchemaName(pOp->getEvent()->getTable());
|
2006-03-11 06:58:48 +01:00
|
|
|
DBUG_ASSERT(ret == 0);
|
2006-04-12 18:01:19 +02:00
|
|
|
ndb_binlog_thread_handle_non_data_event(thd, i_ndb, pOp, row);
|
2006-02-20 16:36:30 +01:00
|
|
|
// reset to catch errors
|
2006-04-12 18:01:19 +02:00
|
|
|
i_ndb->setDatabaseName("");
|
2006-05-16 20:56:45 +02:00
|
|
|
DBUG_PRINT("info", ("s_ndb first: %s", s_ndb->getEventOperation() ?
|
|
|
|
s_ndb->getEventOperation()->getEvent()->getTable()->getName() :
|
|
|
|
"<empty>"));
|
|
|
|
DBUG_PRINT("info", ("i_ndb first: %s", i_ndb->getEventOperation() ?
|
|
|
|
i_ndb->getEventOperation()->getEvent()->getTable()->getName() :
|
|
|
|
"<empty>"));
|
|
|
|
if (i_ndb->getEventOperation() == NULL &&
|
|
|
|
s_ndb->getEventOperation() == NULL &&
|
|
|
|
do_ndbcluster_binlog_close_connection == BCCC_running)
|
|
|
|
{
|
|
|
|
DBUG_PRINT("info", ("do_ndbcluster_binlog_close_connection= BCCC_restart"));
|
|
|
|
do_ndbcluster_binlog_close_connection= BCCC_restart;
|
2006-08-30 11:41:21 +02:00
|
|
|
if (ndb_latest_received_binlog_epoch < *p_latest_trans_gci && ndb_binlog_running)
|
2006-05-16 20:56:45 +02:00
|
|
|
{
|
2006-11-27 17:16:08 +01:00
|
|
|
sql_print_error("NDB Binlog: latest transaction in epoch %lu not in binlog "
|
|
|
|
"as latest received epoch is %lu",
|
|
|
|
(ulong) *p_latest_trans_gci,
|
|
|
|
(ulong) ndb_latest_received_binlog_epoch);
|
2006-05-16 20:56:45 +02:00
|
|
|
}
|
|
|
|
}
|
2006-02-20 16:36:30 +01:00
|
|
|
}
|
2006-01-12 19:51:02 +01:00
|
|
|
|
2006-04-12 18:01:19 +02:00
|
|
|
pOp= i_ndb->nextEvent();
|
2006-01-12 19:51:02 +01:00
|
|
|
} while (pOp && pOp->getGCI() == gci);
|
|
|
|
|
|
|
|
/*
|
|
|
|
note! pOp is not referring to an event in the next epoch
|
|
|
|
or is == 0
|
2007-02-27 10:27:04 +01:00
|
|
|
*/
|
2006-01-12 19:51:02 +01:00
|
|
|
#ifdef RUN_NDB_BINLOG_TIMER
|
|
|
|
write_timer.stop();
|
|
|
|
#endif
|
|
|
|
|
2006-03-11 06:58:48 +01:00
|
|
|
if (trans.good())
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
2006-03-13 09:55:41 +01:00
|
|
|
//DBUG_ASSERT(row.n_inserts || row.n_updates || row.n_deletes);
|
2006-04-10 16:08:40 +02:00
|
|
|
thd->proc_info= "Committing events to binlog";
|
2006-01-12 19:51:02 +01:00
|
|
|
injector::transaction::binlog_pos start= trans.start_pos();
|
|
|
|
if (int r= trans.commit())
|
|
|
|
{
|
2007-05-31 16:45:22 +02:00
|
|
|
sql_print_error("NDB Binlog: "
|
2006-01-12 19:51:02 +01:00
|
|
|
"Error during COMMIT of GCI. Error: %d",
|
|
|
|
r);
|
|
|
|
/* TODO: Further handling? */
|
|
|
|
}
|
|
|
|
row.gci= gci;
|
|
|
|
row.master_log_file= start.file_name();
|
|
|
|
row.master_log_pos= start.file_pos();
|
|
|
|
|
2006-11-27 17:16:08 +01:00
|
|
|
DBUG_PRINT("info", ("COMMIT gci: %lu", (ulong) gci));
|
2006-12-01 15:49:07 +01:00
|
|
|
if (ndb_update_ndb_binlog_index)
|
|
|
|
ndb_add_ndb_binlog_index(thd, &row);
|
2006-04-21 18:28:00 +02:00
|
|
|
ndb_latest_applied_binlog_epoch= gci;
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
|
|
|
ndb_latest_handled_binlog_epoch= gci;
|
|
|
|
#ifdef RUN_NDB_BINLOG_TIMER
|
|
|
|
gci_timer.stop();
|
|
|
|
sql_print_information("gci %ld event_count %d write time "
|
|
|
|
"%ld(%d e/s), total time %ld(%d e/s)",
|
|
|
|
(ulong)gci, event_count,
|
|
|
|
write_timer.elapsed_ms(),
|
2007-02-01 07:28:41 +01:00
|
|
|
(1000*event_count) / write_timer.elapsed_ms(),
|
2006-01-12 19:51:02 +01:00
|
|
|
gci_timer.elapsed_ms(),
|
2007-02-01 07:28:41 +01:00
|
|
|
(1000*event_count) / gci_timer.elapsed_ms());
|
2006-01-12 19:51:02 +01:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-03-09 01:04:13 +01:00
|
|
|
ndb_binlog_thread_handle_schema_event_post_epoch(thd,
|
|
|
|
&post_epoch_log_list,
|
|
|
|
&post_epoch_unlock_list);
|
2006-01-12 19:51:02 +01:00
|
|
|
free_root(&mem_root, MYF(0));
|
|
|
|
*root_ptr= old_root;
|
|
|
|
ndb_latest_handled_binlog_epoch= ndb_latest_received_binlog_epoch;
|
|
|
|
}
|
2006-05-17 08:34:48 +02:00
|
|
|
if (do_ndbcluster_binlog_close_connection == BCCC_restart)
|
2006-05-19 12:54:12 +02:00
|
|
|
{
|
|
|
|
ndb_binlog_tables_inited= FALSE;
|
2010-07-27 12:25:53 +02:00
|
|
|
trans_commit_stmt(thd);
|
2006-05-19 12:54:12 +02:00
|
|
|
close_thread_tables(thd);
|
2010-07-27 12:25:53 +02:00
|
|
|
thd->mdl_context.release_transactional_locks();
|
2006-12-01 15:49:07 +01:00
|
|
|
ndb_binlog_index= 0;
|
2006-05-16 20:56:45 +02:00
|
|
|
goto restart;
|
2006-05-19 12:54:12 +02:00
|
|
|
}
|
2006-01-12 19:51:02 +01:00
|
|
|
err:
|
2006-12-20 22:57:23 +01:00
|
|
|
sql_print_information("Stopping Cluster Binlog");
|
2006-01-12 19:51:02 +01:00
|
|
|
DBUG_PRINT("info",("Shutting down cluster binlog thread"));
|
2006-04-10 16:08:40 +02:00
|
|
|
thd->proc_info= "Shutting down";
|
2010-07-27 12:25:53 +02:00
|
|
|
thd->stmt_da->can_overwrite_status= TRUE;
|
|
|
|
thd->is_error() ? trans_rollback_stmt(thd) : trans_commit_stmt(thd);
|
|
|
|
thd->stmt_da->can_overwrite_status= FALSE;
|
2006-01-12 19:51:02 +01:00
|
|
|
close_thread_tables(thd);
|
2010-07-27 12:25:53 +02:00
|
|
|
thd->mdl_context.release_transactional_locks();
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_lock(&injector_mutex);
|
2006-01-12 19:51:02 +01:00
|
|
|
/* don't mess with the injector_ndb anymore from other threads */
|
2006-04-12 18:01:19 +02:00
|
|
|
injector_thd= 0;
|
2006-01-12 19:51:02 +01:00
|
|
|
injector_ndb= 0;
|
2006-08-30 11:41:21 +02:00
|
|
|
p_latest_trans_gci= 0;
|
2006-04-12 18:01:19 +02:00
|
|
|
schema_ndb= 0;
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_unlock(&injector_mutex);
|
2006-01-12 19:51:02 +01:00
|
|
|
thd->db= 0; // as not to try to free memory
|
|
|
|
|
2006-12-01 15:49:07 +01:00
|
|
|
if (ndb_apply_status_share)
|
2006-03-09 15:50:26 +01:00
|
|
|
{
|
2007-02-06 06:40:26 +01:00
|
|
|
/* ndb_share reference binlog extra free */
|
|
|
|
DBUG_PRINT("NDB_SHARE", ("%s binlog extra free use_count: %u",
|
|
|
|
ndb_apply_status_share->key,
|
|
|
|
ndb_apply_status_share->use_count));
|
2006-12-01 15:49:07 +01:00
|
|
|
free_share(&ndb_apply_status_share);
|
|
|
|
ndb_apply_status_share= 0;
|
2006-03-09 15:50:26 +01:00
|
|
|
}
|
2006-12-01 15:49:07 +01:00
|
|
|
if (ndb_schema_share)
|
2006-03-09 15:50:26 +01:00
|
|
|
{
|
2007-02-05 06:04:36 +01:00
|
|
|
/* begin protect ndb_schema_share */
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_lock(&ndb_schema_share_mutex);
|
2007-02-06 06:40:26 +01:00
|
|
|
/* ndb_share reference binlog extra free */
|
|
|
|
DBUG_PRINT("NDB_SHARE", ("%s binlog extra free use_count: %u",
|
|
|
|
ndb_schema_share->key,
|
|
|
|
ndb_schema_share->use_count));
|
2006-12-01 15:49:07 +01:00
|
|
|
free_share(&ndb_schema_share);
|
|
|
|
ndb_schema_share= 0;
|
2008-02-28 18:55:46 +01:00
|
|
|
ndb_binlog_tables_inited= 0;
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_unlock(&ndb_schema_share_mutex);
|
2007-02-05 06:04:36 +01:00
|
|
|
/* end protect ndb_schema_share */
|
2006-03-09 15:50:26 +01:00
|
|
|
}
|
2006-01-12 19:51:02 +01:00
|
|
|
|
|
|
|
/* remove all event operations */
|
2006-04-12 18:01:19 +02:00
|
|
|
if (s_ndb)
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
|
|
|
NdbEventOperation *op;
|
|
|
|
DBUG_PRINT("info",("removing all event operations"));
|
2006-04-12 18:01:19 +02:00
|
|
|
while ((op= s_ndb->getEventOperation()))
|
2006-01-12 19:51:02 +01:00
|
|
|
{
|
2006-02-01 11:55:26 +01:00
|
|
|
DBUG_ASSERT(! IS_NDB_BLOB_PREFIX(op->getEvent()->getTable()->getName()));
|
2006-01-12 19:51:02 +01:00
|
|
|
DBUG_PRINT("info",("removing event operation on %s",
|
|
|
|
op->getEvent()->getName()));
|
|
|
|
NDB_SHARE *share= (NDB_SHARE*) op->getCustomData();
|
2006-04-13 09:37:43 +02:00
|
|
|
DBUG_ASSERT(share != 0);
|
2006-04-12 18:01:19 +02:00
|
|
|
DBUG_ASSERT(share->op == op ||
|
|
|
|
share->op_old == op);
|
|
|
|
share->op= share->op_old= 0;
|
2007-02-06 06:40:26 +01:00
|
|
|
/* ndb_share reference binlog free */
|
|
|
|
DBUG_PRINT("NDB_SHARE", ("%s binlog free use_count: %u",
|
|
|
|
share->key, share->use_count));
|
2006-01-12 19:51:02 +01:00
|
|
|
free_share(&share);
|
2006-04-12 18:01:19 +02:00
|
|
|
s_ndb->dropEventOperation(op);
|
|
|
|
}
|
|
|
|
delete s_ndb;
|
|
|
|
s_ndb= 0;
|
|
|
|
}
|
|
|
|
if (i_ndb)
|
|
|
|
{
|
|
|
|
NdbEventOperation *op;
|
|
|
|
DBUG_PRINT("info",("removing all event operations"));
|
|
|
|
while ((op= i_ndb->getEventOperation()))
|
|
|
|
{
|
|
|
|
DBUG_ASSERT(! IS_NDB_BLOB_PREFIX(op->getEvent()->getTable()->getName()));
|
|
|
|
DBUG_PRINT("info",("removing event operation on %s",
|
|
|
|
op->getEvent()->getName()));
|
|
|
|
NDB_SHARE *share= (NDB_SHARE*) op->getCustomData();
|
2006-04-13 09:37:43 +02:00
|
|
|
DBUG_ASSERT(share != 0);
|
2006-04-12 18:01:19 +02:00
|
|
|
DBUG_ASSERT(share->op == op ||
|
|
|
|
share->op_old == op);
|
|
|
|
share->op= share->op_old= 0;
|
2007-02-06 06:40:26 +01:00
|
|
|
/* ndb_share reference binlog free */
|
|
|
|
DBUG_PRINT("NDB_SHARE", ("%s binlog free use_count: %u",
|
|
|
|
share->key, share->use_count));
|
2006-04-12 18:01:19 +02:00
|
|
|
free_share(&share);
|
|
|
|
i_ndb->dropEventOperation(op);
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
2006-04-12 18:01:19 +02:00
|
|
|
delete i_ndb;
|
|
|
|
i_ndb= 0;
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
|
|
|
|
2009-10-14 18:37:38 +02:00
|
|
|
my_hash_free(&ndb_schema_objects);
|
2006-04-03 19:11:20 +02:00
|
|
|
|
2006-01-12 19:51:02 +01:00
|
|
|
net_end(&thd->net);
|
2006-12-21 16:43:11 +01:00
|
|
|
thd->cleanup();
|
2006-01-12 19:51:02 +01:00
|
|
|
delete thd;
|
|
|
|
|
|
|
|
ndb_binlog_thread_running= -1;
|
2006-02-01 01:12:11 +01:00
|
|
|
ndb_binlog_running= FALSE;
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_cond_signal(&injector_cond);
|
2006-01-12 19:51:02 +01:00
|
|
|
|
|
|
|
DBUG_PRINT("exit", ("ndb_binlog_thread"));
|
|
|
|
|
2009-09-23 15:10:23 +02:00
|
|
|
DBUG_LEAVE; // Must match DBUG_ENTER()
|
|
|
|
my_thread_end();
|
2006-01-12 19:51:02 +01:00
|
|
|
pthread_exit(0);
|
2009-09-23 15:10:23 +02:00
|
|
|
return NULL; // Avoid compiler warnings
|
2006-01-12 19:51:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
ndbcluster_show_status_binlog(THD* thd, stat_print_fn *stat_print,
|
|
|
|
enum ha_stat_type stat_type)
|
|
|
|
{
|
|
|
|
char buf[IO_SIZE];
|
|
|
|
uint buflen;
|
|
|
|
ulonglong ndb_latest_epoch= 0;
|
|
|
|
DBUG_ENTER("ndbcluster_show_status_binlog");
|
|
|
|
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_lock(&injector_mutex);
|
2006-01-12 19:51:02 +01:00
|
|
|
if (injector_ndb)
|
|
|
|
{
|
2006-06-23 01:49:19 +02:00
|
|
|
char buff1[22],buff2[22],buff3[22],buff4[22],buff5[22];
|
2006-01-12 19:51:02 +01:00
|
|
|
ndb_latest_epoch= injector_ndb->getLatestGCI();
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_unlock(&injector_mutex);
|
2006-01-12 19:51:02 +01:00
|
|
|
|
|
|
|
buflen=
|
|
|
|
snprintf(buf, sizeof(buf),
|
2006-06-23 01:49:19 +02:00
|
|
|
"latest_epoch=%s, "
|
|
|
|
"latest_trans_epoch=%s, "
|
|
|
|
"latest_received_binlog_epoch=%s, "
|
|
|
|
"latest_handled_binlog_epoch=%s, "
|
|
|
|
"latest_applied_binlog_epoch=%s",
|
|
|
|
llstr(ndb_latest_epoch, buff1),
|
2006-08-30 11:41:21 +02:00
|
|
|
llstr(*p_latest_trans_gci, buff2),
|
2006-06-23 01:49:19 +02:00
|
|
|
llstr(ndb_latest_received_binlog_epoch, buff3),
|
|
|
|
llstr(ndb_latest_handled_binlog_epoch, buff4),
|
|
|
|
llstr(ndb_latest_applied_binlog_epoch, buff5));
|
2006-05-28 14:51:01 +02:00
|
|
|
if (stat_print(thd, ndbcluster_hton_name, ndbcluster_hton_name_length,
|
2006-01-12 19:51:02 +01:00
|
|
|
"binlog", strlen("binlog"),
|
|
|
|
buf, buflen))
|
|
|
|
DBUG_RETURN(TRUE);
|
|
|
|
}
|
|
|
|
else
|
2010-01-07 06:42:07 +01:00
|
|
|
mysql_mutex_unlock(&injector_mutex);
|
2006-01-12 19:51:02 +01:00
|
|
|
DBUG_RETURN(FALSE);
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* HAVE_NDB_BINLOG */
|
2006-04-13 22:49:29 +02:00
|
|
|
#endif
|