mariadb/sql/sql_repl.cc

2085 lines
63 KiB
C++
Raw Normal View History

2012-02-16 10:48:16 +01:00
/* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
2011-06-30 17:46:53 +02:00
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
#include "sql_priv.h"
#include "unireg.h"
#include "sql_parse.h" // check_access
2003-01-15 09:11:44 +01:00
#ifdef HAVE_REPLICATION
#include "rpl_mi.h"
#include "sql_repl.h"
#include "sql_acl.h" // SUPER_ACL
#include "log_event.h"
#include "rpl_filter.h"
2000-10-02 06:52:36 +02:00
#include <my_dir.h>
#include "rpl_handler.h"
#include "debug_sync.h"
int max_binlog_dump_events = 0; // unlimited
my_bool opt_sporadic_binlog_dump_fail = 0;
2007-02-27 10:27:04 +01:00
#ifndef DBUG_OFF
static int binlog_dump_count = 0;
2007-02-27 10:27:04 +01:00
#endif
WL#4738 streamline/simplify @@variable creation process Bug#16565 mysqld --help --verbose does not order variablesBug#20413 sql_slave_skip_counter is not shown in show variables Bug#20415 Output of mysqld --help --verbose is incomplete Bug#25430 variable not found in SELECT @@global.ft_max_word_len; Bug#32902 plugin variables don't know their names Bug#34599 MySQLD Option and Variable Reference need to be consistent in formatting! Bug#34829 No default value for variable and setting default does not raise error Bug#34834 ? Is accepted as a valid sql mode Bug#34878 Few variables have default value according to documentation but error occurs Bug#34883 ft_boolean_syntax cant be assigned from user variable to global var. Bug#37187 `INFORMATION_SCHEMA`.`GLOBAL_VARIABLES`: inconsistent status Bug#40988 log_output_basic.test succeeded though syntactically false. Bug#41010 enum-style command-line options are not honoured (maria.maria-recover fails) Bug#42103 Setting key_buffer_size to a negative value may lead to very large allocations Bug#44691 Some plugins configured as MYSQL_PLUGIN_MANDATORY in can be disabled Bug#44797 plugins w/o command-line options have no disabling option in --help Bug#46314 string system variables don't support expressions Bug#46470 sys_vars.max_binlog_cache_size_basic_32 is broken Bug#46586 When using the plugin interface the type "set" for options caused a crash. Bug#47212 Crash in DBUG_PRINT in mysqltest.cc when trying to print octal number Bug#48758 mysqltest crashes on sys_vars.collation_server_basic in gcov builds Bug#49417 some complaints about mysqld --help --verbose output Bug#49540 DEFAULT value of binlog_format isn't the default value Bug#49640 ambiguous option '--skip-skip-myisam' (double skip prefix) Bug#49644 init_connect and \0 Bug#49645 init_slave and multi-byte characters Bug#49646 mysql --show-warnings crashes when server dies
2009-12-22 10:35:56 +01:00
/**
a copy of active_mi->rli->slave_skip_counter, for showing in SHOW VARIABLES,
INFORMATION_SCHEMA.GLOBAL_VARIABLES and @@sql_slave_skip_counter without
taking all the mutexes needed to access active_mi->rli->slave_skip_counter
properly.
*/
uint sql_slave_skip_counter;
2005-01-16 13:16:23 +01:00
/*
This will be pushed only after I fix the testsuite. This is the main commit for Worklog tasks: * A more dynamic binlog format which allows small changes (1064) * Log session variables in Query_log_event (1063) Below 5.0 means 5.0.0. MySQL 5.0 is able to replicate FOREIGN_KEY_CHECKS, UNIQUE_KEY_CHECKS (for speed), SQL_AUTO_IS_NULL, SQL_MODE. Not charsets (WL#1062), not some vars (I can only think of SQL_SELECT_LIMIT, which deserves a special treatment). Note that this works for queries, except LOAD DATA INFILE (for this it would have to wait for Dmitri's push of WL#874, which in turns waits for the present push, so... the deadlock must be broken!). Note that when Dmitri pushes WL#874 in 5.0.1, 5.0.0 won't be able to replicate a LOAD DATA INFILE from 5.0.1. Apart from that, the new binlog format is designed so that it can tolerate a little variation in the events (so that a 5.0.0 slave could replicate a 5.0.1 master, except for LOAD DATA INFILE unfortunately); that is, when I later add replication of charsets it should break nothing. And when I later add a UID to every event, it should break nothing. The main change brought by this patch is a new type of event, Format_description_log_event, which describes some lengthes in other event types. This event is needed for the master/slave/mysqlbinlog to understand a 5.0 log. Thanks to this event, we can later add more bytes to the header of every event without breaking compatibility. Inside Query_log_event, we have some additional dynamic format, as every Query_log_event can have a different number of status variables, stored as pairs (code, value); that's how SQL_MODE and session variables and catalog are stored. Like this, we can later add count of affected rows, charsets... and we can have options --don't-log-count-affected-rows if we want. MySQL 5.0 is able to run on 4.x relay logs, 4.x binlogs. Upgrading a 4.x master to 5.0 is ok (no need to delete binlogs), upgrading a 4.x slave to 5.0 is ok (no need to delete relay logs); so both can be "hot" upgrades. Upgrading a 3.23 master to 5.0 requires as much as upgrading it to 4.0. 3.23 and 4.x can't be slaves of 5.0. So downgrading from 5.0 to 4.x may be complicated. Log_event::log_pos is now the position of the end of the event, which is more useful than the position of the beginning. We take care about compatibility with <5.0 (in which log_pos is the beginning). I added a short test for replication of SQL_MODE and some other variables. TODO: - after committing this, merge the latest 5.0 into it - fix all tests - update the manual with upgrade notes.
2003-12-18 01:09:05 +01:00
fake_rotate_event() builds a fake (=which does not exist physically in any
binlog) Rotate event, which contains the name of the binlog we are going to
send to the slave (because the slave may not know it if it just asked for
MASTER_LOG_FILE='', MASTER_LOG_POS=4).
< 4.0.14, fake_rotate_event() was called only if the requested pos was 4.
After this version we always call it, so that a 3.23.58 slave can rely on
This will be pushed only after I fix the testsuite. This is the main commit for Worklog tasks: * A more dynamic binlog format which allows small changes (1064) * Log session variables in Query_log_event (1063) Below 5.0 means 5.0.0. MySQL 5.0 is able to replicate FOREIGN_KEY_CHECKS, UNIQUE_KEY_CHECKS (for speed), SQL_AUTO_IS_NULL, SQL_MODE. Not charsets (WL#1062), not some vars (I can only think of SQL_SELECT_LIMIT, which deserves a special treatment). Note that this works for queries, except LOAD DATA INFILE (for this it would have to wait for Dmitri's push of WL#874, which in turns waits for the present push, so... the deadlock must be broken!). Note that when Dmitri pushes WL#874 in 5.0.1, 5.0.0 won't be able to replicate a LOAD DATA INFILE from 5.0.1. Apart from that, the new binlog format is designed so that it can tolerate a little variation in the events (so that a 5.0.0 slave could replicate a 5.0.1 master, except for LOAD DATA INFILE unfortunately); that is, when I later add replication of charsets it should break nothing. And when I later add a UID to every event, it should break nothing. The main change brought by this patch is a new type of event, Format_description_log_event, which describes some lengthes in other event types. This event is needed for the master/slave/mysqlbinlog to understand a 5.0 log. Thanks to this event, we can later add more bytes to the header of every event without breaking compatibility. Inside Query_log_event, we have some additional dynamic format, as every Query_log_event can have a different number of status variables, stored as pairs (code, value); that's how SQL_MODE and session variables and catalog are stored. Like this, we can later add count of affected rows, charsets... and we can have options --don't-log-count-affected-rows if we want. MySQL 5.0 is able to run on 4.x relay logs, 4.x binlogs. Upgrading a 4.x master to 5.0 is ok (no need to delete binlogs), upgrading a 4.x slave to 5.0 is ok (no need to delete relay logs); so both can be "hot" upgrades. Upgrading a 3.23 master to 5.0 requires as much as upgrading it to 4.0. 3.23 and 4.x can't be slaves of 5.0. So downgrading from 5.0 to 4.x may be complicated. Log_event::log_pos is now the position of the end of the event, which is more useful than the position of the beginning. We take care about compatibility with <5.0 (in which log_pos is the beginning). I added a short test for replication of SQL_MODE and some other variables. TODO: - after committing this, merge the latest 5.0 into it - fix all tests - update the manual with upgrade notes.
2003-12-18 01:09:05 +01:00
it to detect if the master is 4.0 (and stop) (the _fake_ Rotate event has
zeros in the good positions which, by chance, make it possible for the 3.23
slave to detect that this event is unexpected) (this is luck which happens
because the master and slave disagree on the size of the header of
Log_event).
Relying on the event length of the Rotate event instead of these
well-placed zeros was not possible as Rotate events have a variable-length
part.
This will be pushed only after I fix the testsuite. This is the main commit for Worklog tasks: * A more dynamic binlog format which allows small changes (1064) * Log session variables in Query_log_event (1063) Below 5.0 means 5.0.0. MySQL 5.0 is able to replicate FOREIGN_KEY_CHECKS, UNIQUE_KEY_CHECKS (for speed), SQL_AUTO_IS_NULL, SQL_MODE. Not charsets (WL#1062), not some vars (I can only think of SQL_SELECT_LIMIT, which deserves a special treatment). Note that this works for queries, except LOAD DATA INFILE (for this it would have to wait for Dmitri's push of WL#874, which in turns waits for the present push, so... the deadlock must be broken!). Note that when Dmitri pushes WL#874 in 5.0.1, 5.0.0 won't be able to replicate a LOAD DATA INFILE from 5.0.1. Apart from that, the new binlog format is designed so that it can tolerate a little variation in the events (so that a 5.0.0 slave could replicate a 5.0.1 master, except for LOAD DATA INFILE unfortunately); that is, when I later add replication of charsets it should break nothing. And when I later add a UID to every event, it should break nothing. The main change brought by this patch is a new type of event, Format_description_log_event, which describes some lengthes in other event types. This event is needed for the master/slave/mysqlbinlog to understand a 5.0 log. Thanks to this event, we can later add more bytes to the header of every event without breaking compatibility. Inside Query_log_event, we have some additional dynamic format, as every Query_log_event can have a different number of status variables, stored as pairs (code, value); that's how SQL_MODE and session variables and catalog are stored. Like this, we can later add count of affected rows, charsets... and we can have options --don't-log-count-affected-rows if we want. MySQL 5.0 is able to run on 4.x relay logs, 4.x binlogs. Upgrading a 4.x master to 5.0 is ok (no need to delete binlogs), upgrading a 4.x slave to 5.0 is ok (no need to delete relay logs); so both can be "hot" upgrades. Upgrading a 3.23 master to 5.0 requires as much as upgrading it to 4.0. 3.23 and 4.x can't be slaves of 5.0. So downgrading from 5.0 to 4.x may be complicated. Log_event::log_pos is now the position of the end of the event, which is more useful than the position of the beginning. We take care about compatibility with <5.0 (in which log_pos is the beginning). I added a short test for replication of SQL_MODE and some other variables. TODO: - after committing this, merge the latest 5.0 into it - fix all tests - update the manual with upgrade notes.
2003-12-18 01:09:05 +01:00
*/
static int fake_rotate_event(NET* net, String* packet, char* log_file_name,
ulonglong position, const char** errmsg)
{
This will be pushed only after I fix the testsuite. This is the main commit for Worklog tasks: * A more dynamic binlog format which allows small changes (1064) * Log session variables in Query_log_event (1063) Below 5.0 means 5.0.0. MySQL 5.0 is able to replicate FOREIGN_KEY_CHECKS, UNIQUE_KEY_CHECKS (for speed), SQL_AUTO_IS_NULL, SQL_MODE. Not charsets (WL#1062), not some vars (I can only think of SQL_SELECT_LIMIT, which deserves a special treatment). Note that this works for queries, except LOAD DATA INFILE (for this it would have to wait for Dmitri's push of WL#874, which in turns waits for the present push, so... the deadlock must be broken!). Note that when Dmitri pushes WL#874 in 5.0.1, 5.0.0 won't be able to replicate a LOAD DATA INFILE from 5.0.1. Apart from that, the new binlog format is designed so that it can tolerate a little variation in the events (so that a 5.0.0 slave could replicate a 5.0.1 master, except for LOAD DATA INFILE unfortunately); that is, when I later add replication of charsets it should break nothing. And when I later add a UID to every event, it should break nothing. The main change brought by this patch is a new type of event, Format_description_log_event, which describes some lengthes in other event types. This event is needed for the master/slave/mysqlbinlog to understand a 5.0 log. Thanks to this event, we can later add more bytes to the header of every event without breaking compatibility. Inside Query_log_event, we have some additional dynamic format, as every Query_log_event can have a different number of status variables, stored as pairs (code, value); that's how SQL_MODE and session variables and catalog are stored. Like this, we can later add count of affected rows, charsets... and we can have options --don't-log-count-affected-rows if we want. MySQL 5.0 is able to run on 4.x relay logs, 4.x binlogs. Upgrading a 4.x master to 5.0 is ok (no need to delete binlogs), upgrading a 4.x slave to 5.0 is ok (no need to delete relay logs); so both can be "hot" upgrades. Upgrading a 3.23 master to 5.0 requires as much as upgrading it to 4.0. 3.23 and 4.x can't be slaves of 5.0. So downgrading from 5.0 to 4.x may be complicated. Log_event::log_pos is now the position of the end of the event, which is more useful than the position of the beginning. We take care about compatibility with <5.0 (in which log_pos is the beginning). I added a short test for replication of SQL_MODE and some other variables. TODO: - after committing this, merge the latest 5.0 into it - fix all tests - update the manual with upgrade notes.
2003-12-18 01:09:05 +01:00
DBUG_ENTER("fake_rotate_event");
char header[LOG_EVENT_HEADER_LEN], buf[ROTATE_HEADER_LEN+100];
/*
'when' (the timestamp) is set to 0 so that slave could distinguish between
real and fake Rotate events (if necessary)
*/
memset(header, 0, 4);
header[EVENT_TYPE_OFFSET] = ROTATE_EVENT;
char* p = log_file_name+dirname_length(log_file_name);
uint ident_len = (uint) strlen(p);
This will be pushed only after I fix the testsuite. This is the main commit for Worklog tasks: * A more dynamic binlog format which allows small changes (1064) * Log session variables in Query_log_event (1063) Below 5.0 means 5.0.0. MySQL 5.0 is able to replicate FOREIGN_KEY_CHECKS, UNIQUE_KEY_CHECKS (for speed), SQL_AUTO_IS_NULL, SQL_MODE. Not charsets (WL#1062), not some vars (I can only think of SQL_SELECT_LIMIT, which deserves a special treatment). Note that this works for queries, except LOAD DATA INFILE (for this it would have to wait for Dmitri's push of WL#874, which in turns waits for the present push, so... the deadlock must be broken!). Note that when Dmitri pushes WL#874 in 5.0.1, 5.0.0 won't be able to replicate a LOAD DATA INFILE from 5.0.1. Apart from that, the new binlog format is designed so that it can tolerate a little variation in the events (so that a 5.0.0 slave could replicate a 5.0.1 master, except for LOAD DATA INFILE unfortunately); that is, when I later add replication of charsets it should break nothing. And when I later add a UID to every event, it should break nothing. The main change brought by this patch is a new type of event, Format_description_log_event, which describes some lengthes in other event types. This event is needed for the master/slave/mysqlbinlog to understand a 5.0 log. Thanks to this event, we can later add more bytes to the header of every event without breaking compatibility. Inside Query_log_event, we have some additional dynamic format, as every Query_log_event can have a different number of status variables, stored as pairs (code, value); that's how SQL_MODE and session variables and catalog are stored. Like this, we can later add count of affected rows, charsets... and we can have options --don't-log-count-affected-rows if we want. MySQL 5.0 is able to run on 4.x relay logs, 4.x binlogs. Upgrading a 4.x master to 5.0 is ok (no need to delete binlogs), upgrading a 4.x slave to 5.0 is ok (no need to delete relay logs); so both can be "hot" upgrades. Upgrading a 3.23 master to 5.0 requires as much as upgrading it to 4.0. 3.23 and 4.x can't be slaves of 5.0. So downgrading from 5.0 to 4.x may be complicated. Log_event::log_pos is now the position of the end of the event, which is more useful than the position of the beginning. We take care about compatibility with <5.0 (in which log_pos is the beginning). I added a short test for replication of SQL_MODE and some other variables. TODO: - after committing this, merge the latest 5.0 into it - fix all tests - update the manual with upgrade notes.
2003-12-18 01:09:05 +01:00
ulong event_len = ident_len + LOG_EVENT_HEADER_LEN + ROTATE_HEADER_LEN;
int4store(header + SERVER_ID_OFFSET, server_id);
int4store(header + EVENT_LEN_OFFSET, event_len);
int2store(header + FLAGS_OFFSET, LOG_EVENT_ARTIFICIAL_F);
// TODO: check what problems this may cause and fix them
int4store(header + LOG_POS_OFFSET, 0);
packet->append(header, sizeof(header));
int8store(buf+R_POS_OFFSET,position);
packet->append(buf, ROTATE_HEADER_LEN);
packet->append(p,ident_len);
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
if (my_net_write(net, (uchar*) packet->ptr(), packet->length()))
{
*errmsg = "failed on my_net_write()";
This will be pushed only after I fix the testsuite. This is the main commit for Worklog tasks: * A more dynamic binlog format which allows small changes (1064) * Log session variables in Query_log_event (1063) Below 5.0 means 5.0.0. MySQL 5.0 is able to replicate FOREIGN_KEY_CHECKS, UNIQUE_KEY_CHECKS (for speed), SQL_AUTO_IS_NULL, SQL_MODE. Not charsets (WL#1062), not some vars (I can only think of SQL_SELECT_LIMIT, which deserves a special treatment). Note that this works for queries, except LOAD DATA INFILE (for this it would have to wait for Dmitri's push of WL#874, which in turns waits for the present push, so... the deadlock must be broken!). Note that when Dmitri pushes WL#874 in 5.0.1, 5.0.0 won't be able to replicate a LOAD DATA INFILE from 5.0.1. Apart from that, the new binlog format is designed so that it can tolerate a little variation in the events (so that a 5.0.0 slave could replicate a 5.0.1 master, except for LOAD DATA INFILE unfortunately); that is, when I later add replication of charsets it should break nothing. And when I later add a UID to every event, it should break nothing. The main change brought by this patch is a new type of event, Format_description_log_event, which describes some lengthes in other event types. This event is needed for the master/slave/mysqlbinlog to understand a 5.0 log. Thanks to this event, we can later add more bytes to the header of every event without breaking compatibility. Inside Query_log_event, we have some additional dynamic format, as every Query_log_event can have a different number of status variables, stored as pairs (code, value); that's how SQL_MODE and session variables and catalog are stored. Like this, we can later add count of affected rows, charsets... and we can have options --don't-log-count-affected-rows if we want. MySQL 5.0 is able to run on 4.x relay logs, 4.x binlogs. Upgrading a 4.x master to 5.0 is ok (no need to delete binlogs), upgrading a 4.x slave to 5.0 is ok (no need to delete relay logs); so both can be "hot" upgrades. Upgrading a 3.23 master to 5.0 requires as much as upgrading it to 4.0. 3.23 and 4.x can't be slaves of 5.0. So downgrading from 5.0 to 4.x may be complicated. Log_event::log_pos is now the position of the end of the event, which is more useful than the position of the beginning. We take care about compatibility with <5.0 (in which log_pos is the beginning). I added a short test for replication of SQL_MODE and some other variables. TODO: - after committing this, merge the latest 5.0 into it - fix all tests - update the manual with upgrade notes.
2003-12-18 01:09:05 +01:00
DBUG_RETURN(-1);
}
This will be pushed only after I fix the testsuite. This is the main commit for Worklog tasks: * A more dynamic binlog format which allows small changes (1064) * Log session variables in Query_log_event (1063) Below 5.0 means 5.0.0. MySQL 5.0 is able to replicate FOREIGN_KEY_CHECKS, UNIQUE_KEY_CHECKS (for speed), SQL_AUTO_IS_NULL, SQL_MODE. Not charsets (WL#1062), not some vars (I can only think of SQL_SELECT_LIMIT, which deserves a special treatment). Note that this works for queries, except LOAD DATA INFILE (for this it would have to wait for Dmitri's push of WL#874, which in turns waits for the present push, so... the deadlock must be broken!). Note that when Dmitri pushes WL#874 in 5.0.1, 5.0.0 won't be able to replicate a LOAD DATA INFILE from 5.0.1. Apart from that, the new binlog format is designed so that it can tolerate a little variation in the events (so that a 5.0.0 slave could replicate a 5.0.1 master, except for LOAD DATA INFILE unfortunately); that is, when I later add replication of charsets it should break nothing. And when I later add a UID to every event, it should break nothing. The main change brought by this patch is a new type of event, Format_description_log_event, which describes some lengthes in other event types. This event is needed for the master/slave/mysqlbinlog to understand a 5.0 log. Thanks to this event, we can later add more bytes to the header of every event without breaking compatibility. Inside Query_log_event, we have some additional dynamic format, as every Query_log_event can have a different number of status variables, stored as pairs (code, value); that's how SQL_MODE and session variables and catalog are stored. Like this, we can later add count of affected rows, charsets... and we can have options --don't-log-count-affected-rows if we want. MySQL 5.0 is able to run on 4.x relay logs, 4.x binlogs. Upgrading a 4.x master to 5.0 is ok (no need to delete binlogs), upgrading a 4.x slave to 5.0 is ok (no need to delete relay logs); so both can be "hot" upgrades. Upgrading a 3.23 master to 5.0 requires as much as upgrading it to 4.0. 3.23 and 4.x can't be slaves of 5.0. So downgrading from 5.0 to 4.x may be complicated. Log_event::log_pos is now the position of the end of the event, which is more useful than the position of the beginning. We take care about compatibility with <5.0 (in which log_pos is the beginning). I added a short test for replication of SQL_MODE and some other variables. TODO: - after committing this, merge the latest 5.0 into it - fix all tests - update the manual with upgrade notes.
2003-12-18 01:09:05 +01:00
DBUG_RETURN(0);
}
/*
Reset thread transmit packet buffer for event sending
This function allocates header bytes for event transmission, and
should be called before store the event data to the packet buffer.
*/
static int reset_transmit_packet(THD *thd, ushort flags,
ulong *ev_offset, const char **errmsg)
{
int ret= 0;
String *packet= &thd->packet;
/* reserve and set default header */
packet->length(0);
packet->set("\0", 1, &my_charset_bin);
if (RUN_HOOK(binlog_transmit, reserve_header, (thd, flags, packet)))
{
*errmsg= "Failed to run hook 'reserve_header'";
my_errno= ER_UNKNOWN_ERROR;
ret= 1;
}
*ev_offset= packet->length();
return ret;
}
static int send_file(THD *thd)
{
NET* net = &thd->net;
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
int fd = -1, error = 1;
size_t bytes;
char fname[FN_REFLEN+1];
const char *errmsg = 0;
int old_timeout;
unsigned long packet_len;
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
uchar buf[IO_SIZE]; // It's safe to alloc this
DBUG_ENTER("send_file");
/*
The client might be slow loading the data, give him wait_timeout to do
the job
*/
old_timeout= net->read_timeout;
my_net_set_read_timeout(net, thd->variables.net_wait_timeout);
/*
We need net_flush here because the client will not know it needs to send
us the file name until it has processed the load event entry
*/
if (net_flush(net) || (packet_len = my_net_read(net)) == packet_error)
{
errmsg = "while reading file name";
goto err;
}
// terminate with \0 for fn_format
*((char*)net->read_pos + packet_len) = 0;
fn_format(fname, (char*) net->read_pos + 1, "", "", 4);
// this is needed to make replicate-ignore-db
if (!strcmp(fname,"/dev/null"))
goto end;
if ((fd= mysql_file_open(key_file_send_file,
fname, O_RDONLY, MYF(0))) < 0)
{
errmsg = "on open of file";
goto err;
}
while ((long) (bytes= mysql_file_read(fd, buf, IO_SIZE, MYF(0))) > 0)
{
if (my_net_write(net, buf, bytes))
{
errmsg = "while writing data to client";
goto err;
}
}
end:
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
if (my_net_write(net, (uchar*) "", 0) || net_flush(net) ||
(my_net_read(net) == packet_error))
{
errmsg = "while negotiating file transfer close";
goto err;
}
error = 0;
err:
my_net_set_read_timeout(net, old_timeout);
if (fd >= 0)
mysql_file_close(fd, MYF(0));
if (errmsg)
{
sql_print_error("Failed in send_file() %s", errmsg);
2009-06-29 15:17:01 +02:00
DBUG_PRINT("error", ("%s", errmsg));
}
DBUG_RETURN(error);
}
/*
Adjust the position pointer in the binary log file for all running slaves
SYNOPSIS
adjust_linfo_offsets()
purge_offset Number of bytes removed from start of log index file
NOTES
- This is called when doing a PURGE when we delete lines from the
index log file
REQUIREMENTS
- Before calling this function, we have to ensure that no threads are
using any binary log file before purge_offset.a
TODO
- Inform the slave threads that they should sync the position
in the binary log file with flush_relay_log_info.
Now they sync is done for next read.
*/
void adjust_linfo_offsets(my_off_t purge_offset)
{
THD *tmp;
mysql_mutex_lock(&LOCK_thread_count);
I_List_iterator<THD> it(threads);
while ((tmp=it++))
{
LOG_INFO* linfo;
if ((linfo = tmp->current_linfo))
{
mysql_mutex_lock(&linfo->lock);
/*
Index file offset can be less that purge offset only if
we just started reading the index file. In that case
we have nothing to adjust
*/
if (linfo->index_file_offset < purge_offset)
linfo->fatal = (linfo->index_file_offset != 0);
else
linfo->index_file_offset -= purge_offset;
mysql_mutex_unlock(&linfo->lock);
}
}
mysql_mutex_unlock(&LOCK_thread_count);
}
2000-11-15 23:24:11 +01:00
bool log_in_use(const char* log_name)
{
size_t log_name_len = strlen(log_name) + 1;
THD *tmp;
bool result = 0;
mysql_mutex_lock(&LOCK_thread_count);
I_List_iterator<THD> it(threads);
while ((tmp=it++))
{
LOG_INFO* linfo;
if ((linfo = tmp->current_linfo))
{
mysql_mutex_lock(&linfo->lock);
result = !memcmp(log_name, linfo->log_file_name, log_name_len);
mysql_mutex_unlock(&linfo->lock);
if (result)
break;
}
}
mysql_mutex_unlock(&LOCK_thread_count);
return result;
}
bool purge_error_message(THD* thd, int res)
{
uint errcode;
if ((errcode= purge_log_get_error_code(res)) != 0)
{
my_message(errcode, ER(errcode), MYF(0));
return TRUE;
}
my_ok(thd);
return FALSE;
}
2003-08-11 21:44:43 +02:00
/**
Execute a PURGE BINARY LOGS TO <log> command.
@param thd Pointer to THD object for the client thread executing the
statement.
@param to_log Name of the last log to purge.
@retval FALSE success
@retval TRUE failure
*/
bool purge_master_logs(THD* thd, const char* to_log)
{
char search_file_name[FN_REFLEN];
2003-08-11 21:44:43 +02:00
if (!mysql_bin_log.is_open())
{
my_ok(thd);
return FALSE;
2003-08-11 21:44:43 +02:00
}
mysql_bin_log.make_log_name(search_file_name, to_log);
2003-08-11 21:44:43 +02:00
return purge_error_message(thd,
mysql_bin_log.purge_logs(search_file_name, 0, 1,
1, NULL));
}
/**
Execute a PURGE BINARY LOGS BEFORE <date> command.
@param thd Pointer to THD object for the client thread executing the
statement.
@param purge_time Date before which logs should be purged.
@retval FALSE success
@retval TRUE failure
*/
bool purge_master_logs_before_date(THD* thd, time_t purge_time)
{
if (!mysql_bin_log.is_open())
{
my_ok(thd);
return 0;
}
return purge_error_message(thd,
mysql_bin_log.purge_logs_before_date(purge_time));
}
int test_for_non_eof_log_read_errors(int error, const char **errmsg)
This will be pushed only after I fix the testsuite. This is the main commit for Worklog tasks: * A more dynamic binlog format which allows small changes (1064) * Log session variables in Query_log_event (1063) Below 5.0 means 5.0.0. MySQL 5.0 is able to replicate FOREIGN_KEY_CHECKS, UNIQUE_KEY_CHECKS (for speed), SQL_AUTO_IS_NULL, SQL_MODE. Not charsets (WL#1062), not some vars (I can only think of SQL_SELECT_LIMIT, which deserves a special treatment). Note that this works for queries, except LOAD DATA INFILE (for this it would have to wait for Dmitri's push of WL#874, which in turns waits for the present push, so... the deadlock must be broken!). Note that when Dmitri pushes WL#874 in 5.0.1, 5.0.0 won't be able to replicate a LOAD DATA INFILE from 5.0.1. Apart from that, the new binlog format is designed so that it can tolerate a little variation in the events (so that a 5.0.0 slave could replicate a 5.0.1 master, except for LOAD DATA INFILE unfortunately); that is, when I later add replication of charsets it should break nothing. And when I later add a UID to every event, it should break nothing. The main change brought by this patch is a new type of event, Format_description_log_event, which describes some lengthes in other event types. This event is needed for the master/slave/mysqlbinlog to understand a 5.0 log. Thanks to this event, we can later add more bytes to the header of every event without breaking compatibility. Inside Query_log_event, we have some additional dynamic format, as every Query_log_event can have a different number of status variables, stored as pairs (code, value); that's how SQL_MODE and session variables and catalog are stored. Like this, we can later add count of affected rows, charsets... and we can have options --don't-log-count-affected-rows if we want. MySQL 5.0 is able to run on 4.x relay logs, 4.x binlogs. Upgrading a 4.x master to 5.0 is ok (no need to delete binlogs), upgrading a 4.x slave to 5.0 is ok (no need to delete relay logs); so both can be "hot" upgrades. Upgrading a 3.23 master to 5.0 requires as much as upgrading it to 4.0. 3.23 and 4.x can't be slaves of 5.0. So downgrading from 5.0 to 4.x may be complicated. Log_event::log_pos is now the position of the end of the event, which is more useful than the position of the beginning. We take care about compatibility with <5.0 (in which log_pos is the beginning). I added a short test for replication of SQL_MODE and some other variables. TODO: - after committing this, merge the latest 5.0 into it - fix all tests - update the manual with upgrade notes.
2003-12-18 01:09:05 +01:00
{
if (error == LOG_READ_EOF)
This will be pushed only after I fix the testsuite. This is the main commit for Worklog tasks: * A more dynamic binlog format which allows small changes (1064) * Log session variables in Query_log_event (1063) Below 5.0 means 5.0.0. MySQL 5.0 is able to replicate FOREIGN_KEY_CHECKS, UNIQUE_KEY_CHECKS (for speed), SQL_AUTO_IS_NULL, SQL_MODE. Not charsets (WL#1062), not some vars (I can only think of SQL_SELECT_LIMIT, which deserves a special treatment). Note that this works for queries, except LOAD DATA INFILE (for this it would have to wait for Dmitri's push of WL#874, which in turns waits for the present push, so... the deadlock must be broken!). Note that when Dmitri pushes WL#874 in 5.0.1, 5.0.0 won't be able to replicate a LOAD DATA INFILE from 5.0.1. Apart from that, the new binlog format is designed so that it can tolerate a little variation in the events (so that a 5.0.0 slave could replicate a 5.0.1 master, except for LOAD DATA INFILE unfortunately); that is, when I later add replication of charsets it should break nothing. And when I later add a UID to every event, it should break nothing. The main change brought by this patch is a new type of event, Format_description_log_event, which describes some lengthes in other event types. This event is needed for the master/slave/mysqlbinlog to understand a 5.0 log. Thanks to this event, we can later add more bytes to the header of every event without breaking compatibility. Inside Query_log_event, we have some additional dynamic format, as every Query_log_event can have a different number of status variables, stored as pairs (code, value); that's how SQL_MODE and session variables and catalog are stored. Like this, we can later add count of affected rows, charsets... and we can have options --don't-log-count-affected-rows if we want. MySQL 5.0 is able to run on 4.x relay logs, 4.x binlogs. Upgrading a 4.x master to 5.0 is ok (no need to delete binlogs), upgrading a 4.x slave to 5.0 is ok (no need to delete relay logs); so both can be "hot" upgrades. Upgrading a 3.23 master to 5.0 requires as much as upgrading it to 4.0. 3.23 and 4.x can't be slaves of 5.0. So downgrading from 5.0 to 4.x may be complicated. Log_event::log_pos is now the position of the end of the event, which is more useful than the position of the beginning. We take care about compatibility with <5.0 (in which log_pos is the beginning). I added a short test for replication of SQL_MODE and some other variables. TODO: - after committing this, merge the latest 5.0 into it - fix all tests - update the manual with upgrade notes.
2003-12-18 01:09:05 +01:00
return 0;
my_errno= ER_MASTER_FATAL_ERROR_READING_BINLOG;
switch (error) {
case LOG_READ_BOGUS:
*errmsg = "bogus data in log event";
This will be pushed only after I fix the testsuite. This is the main commit for Worklog tasks: * A more dynamic binlog format which allows small changes (1064) * Log session variables in Query_log_event (1063) Below 5.0 means 5.0.0. MySQL 5.0 is able to replicate FOREIGN_KEY_CHECKS, UNIQUE_KEY_CHECKS (for speed), SQL_AUTO_IS_NULL, SQL_MODE. Not charsets (WL#1062), not some vars (I can only think of SQL_SELECT_LIMIT, which deserves a special treatment). Note that this works for queries, except LOAD DATA INFILE (for this it would have to wait for Dmitri's push of WL#874, which in turns waits for the present push, so... the deadlock must be broken!). Note that when Dmitri pushes WL#874 in 5.0.1, 5.0.0 won't be able to replicate a LOAD DATA INFILE from 5.0.1. Apart from that, the new binlog format is designed so that it can tolerate a little variation in the events (so that a 5.0.0 slave could replicate a 5.0.1 master, except for LOAD DATA INFILE unfortunately); that is, when I later add replication of charsets it should break nothing. And when I later add a UID to every event, it should break nothing. The main change brought by this patch is a new type of event, Format_description_log_event, which describes some lengthes in other event types. This event is needed for the master/slave/mysqlbinlog to understand a 5.0 log. Thanks to this event, we can later add more bytes to the header of every event without breaking compatibility. Inside Query_log_event, we have some additional dynamic format, as every Query_log_event can have a different number of status variables, stored as pairs (code, value); that's how SQL_MODE and session variables and catalog are stored. Like this, we can later add count of affected rows, charsets... and we can have options --don't-log-count-affected-rows if we want. MySQL 5.0 is able to run on 4.x relay logs, 4.x binlogs. Upgrading a 4.x master to 5.0 is ok (no need to delete binlogs), upgrading a 4.x slave to 5.0 is ok (no need to delete relay logs); so both can be "hot" upgrades. Upgrading a 3.23 master to 5.0 requires as much as upgrading it to 4.0. 3.23 and 4.x can't be slaves of 5.0. So downgrading from 5.0 to 4.x may be complicated. Log_event::log_pos is now the position of the end of the event, which is more useful than the position of the beginning. We take care about compatibility with <5.0 (in which log_pos is the beginning). I added a short test for replication of SQL_MODE and some other variables. TODO: - after committing this, merge the latest 5.0 into it - fix all tests - update the manual with upgrade notes.
2003-12-18 01:09:05 +01:00
break;
case LOG_READ_TOO_LARGE:
*errmsg = "log event entry exceeded max_allowed_packet; \
This will be pushed only after I fix the testsuite. This is the main commit for Worklog tasks: * A more dynamic binlog format which allows small changes (1064) * Log session variables in Query_log_event (1063) Below 5.0 means 5.0.0. MySQL 5.0 is able to replicate FOREIGN_KEY_CHECKS, UNIQUE_KEY_CHECKS (for speed), SQL_AUTO_IS_NULL, SQL_MODE. Not charsets (WL#1062), not some vars (I can only think of SQL_SELECT_LIMIT, which deserves a special treatment). Note that this works for queries, except LOAD DATA INFILE (for this it would have to wait for Dmitri's push of WL#874, which in turns waits for the present push, so... the deadlock must be broken!). Note that when Dmitri pushes WL#874 in 5.0.1, 5.0.0 won't be able to replicate a LOAD DATA INFILE from 5.0.1. Apart from that, the new binlog format is designed so that it can tolerate a little variation in the events (so that a 5.0.0 slave could replicate a 5.0.1 master, except for LOAD DATA INFILE unfortunately); that is, when I later add replication of charsets it should break nothing. And when I later add a UID to every event, it should break nothing. The main change brought by this patch is a new type of event, Format_description_log_event, which describes some lengthes in other event types. This event is needed for the master/slave/mysqlbinlog to understand a 5.0 log. Thanks to this event, we can later add more bytes to the header of every event without breaking compatibility. Inside Query_log_event, we have some additional dynamic format, as every Query_log_event can have a different number of status variables, stored as pairs (code, value); that's how SQL_MODE and session variables and catalog are stored. Like this, we can later add count of affected rows, charsets... and we can have options --don't-log-count-affected-rows if we want. MySQL 5.0 is able to run on 4.x relay logs, 4.x binlogs. Upgrading a 4.x master to 5.0 is ok (no need to delete binlogs), upgrading a 4.x slave to 5.0 is ok (no need to delete relay logs); so both can be "hot" upgrades. Upgrading a 3.23 master to 5.0 requires as much as upgrading it to 4.0. 3.23 and 4.x can't be slaves of 5.0. So downgrading from 5.0 to 4.x may be complicated. Log_event::log_pos is now the position of the end of the event, which is more useful than the position of the beginning. We take care about compatibility with <5.0 (in which log_pos is the beginning). I added a short test for replication of SQL_MODE and some other variables. TODO: - after committing this, merge the latest 5.0 into it - fix all tests - update the manual with upgrade notes.
2003-12-18 01:09:05 +01:00
Increase max_allowed_packet on master";
break;
case LOG_READ_IO:
*errmsg = "I/O error reading log event";
This will be pushed only after I fix the testsuite. This is the main commit for Worklog tasks: * A more dynamic binlog format which allows small changes (1064) * Log session variables in Query_log_event (1063) Below 5.0 means 5.0.0. MySQL 5.0 is able to replicate FOREIGN_KEY_CHECKS, UNIQUE_KEY_CHECKS (for speed), SQL_AUTO_IS_NULL, SQL_MODE. Not charsets (WL#1062), not some vars (I can only think of SQL_SELECT_LIMIT, which deserves a special treatment). Note that this works for queries, except LOAD DATA INFILE (for this it would have to wait for Dmitri's push of WL#874, which in turns waits for the present push, so... the deadlock must be broken!). Note that when Dmitri pushes WL#874 in 5.0.1, 5.0.0 won't be able to replicate a LOAD DATA INFILE from 5.0.1. Apart from that, the new binlog format is designed so that it can tolerate a little variation in the events (so that a 5.0.0 slave could replicate a 5.0.1 master, except for LOAD DATA INFILE unfortunately); that is, when I later add replication of charsets it should break nothing. And when I later add a UID to every event, it should break nothing. The main change brought by this patch is a new type of event, Format_description_log_event, which describes some lengthes in other event types. This event is needed for the master/slave/mysqlbinlog to understand a 5.0 log. Thanks to this event, we can later add more bytes to the header of every event without breaking compatibility. Inside Query_log_event, we have some additional dynamic format, as every Query_log_event can have a different number of status variables, stored as pairs (code, value); that's how SQL_MODE and session variables and catalog are stored. Like this, we can later add count of affected rows, charsets... and we can have options --don't-log-count-affected-rows if we want. MySQL 5.0 is able to run on 4.x relay logs, 4.x binlogs. Upgrading a 4.x master to 5.0 is ok (no need to delete binlogs), upgrading a 4.x slave to 5.0 is ok (no need to delete relay logs); so both can be "hot" upgrades. Upgrading a 3.23 master to 5.0 requires as much as upgrading it to 4.0. 3.23 and 4.x can't be slaves of 5.0. So downgrading from 5.0 to 4.x may be complicated. Log_event::log_pos is now the position of the end of the event, which is more useful than the position of the beginning. We take care about compatibility with <5.0 (in which log_pos is the beginning). I added a short test for replication of SQL_MODE and some other variables. TODO: - after committing this, merge the latest 5.0 into it - fix all tests - update the manual with upgrade notes.
2003-12-18 01:09:05 +01:00
break;
case LOG_READ_MEM:
*errmsg = "memory allocation failed reading log event";
This will be pushed only after I fix the testsuite. This is the main commit for Worklog tasks: * A more dynamic binlog format which allows small changes (1064) * Log session variables in Query_log_event (1063) Below 5.0 means 5.0.0. MySQL 5.0 is able to replicate FOREIGN_KEY_CHECKS, UNIQUE_KEY_CHECKS (for speed), SQL_AUTO_IS_NULL, SQL_MODE. Not charsets (WL#1062), not some vars (I can only think of SQL_SELECT_LIMIT, which deserves a special treatment). Note that this works for queries, except LOAD DATA INFILE (for this it would have to wait for Dmitri's push of WL#874, which in turns waits for the present push, so... the deadlock must be broken!). Note that when Dmitri pushes WL#874 in 5.0.1, 5.0.0 won't be able to replicate a LOAD DATA INFILE from 5.0.1. Apart from that, the new binlog format is designed so that it can tolerate a little variation in the events (so that a 5.0.0 slave could replicate a 5.0.1 master, except for LOAD DATA INFILE unfortunately); that is, when I later add replication of charsets it should break nothing. And when I later add a UID to every event, it should break nothing. The main change brought by this patch is a new type of event, Format_description_log_event, which describes some lengthes in other event types. This event is needed for the master/slave/mysqlbinlog to understand a 5.0 log. Thanks to this event, we can later add more bytes to the header of every event without breaking compatibility. Inside Query_log_event, we have some additional dynamic format, as every Query_log_event can have a different number of status variables, stored as pairs (code, value); that's how SQL_MODE and session variables and catalog are stored. Like this, we can later add count of affected rows, charsets... and we can have options --don't-log-count-affected-rows if we want. MySQL 5.0 is able to run on 4.x relay logs, 4.x binlogs. Upgrading a 4.x master to 5.0 is ok (no need to delete binlogs), upgrading a 4.x slave to 5.0 is ok (no need to delete relay logs); so both can be "hot" upgrades. Upgrading a 3.23 master to 5.0 requires as much as upgrading it to 4.0. 3.23 and 4.x can't be slaves of 5.0. So downgrading from 5.0 to 4.x may be complicated. Log_event::log_pos is now the position of the end of the event, which is more useful than the position of the beginning. We take care about compatibility with <5.0 (in which log_pos is the beginning). I added a short test for replication of SQL_MODE and some other variables. TODO: - after committing this, merge the latest 5.0 into it - fix all tests - update the manual with upgrade notes.
2003-12-18 01:09:05 +01:00
break;
case LOG_READ_TRUNC:
*errmsg = "binlog truncated in the middle of event; consider out of disk space on master";
This will be pushed only after I fix the testsuite. This is the main commit for Worklog tasks: * A more dynamic binlog format which allows small changes (1064) * Log session variables in Query_log_event (1063) Below 5.0 means 5.0.0. MySQL 5.0 is able to replicate FOREIGN_KEY_CHECKS, UNIQUE_KEY_CHECKS (for speed), SQL_AUTO_IS_NULL, SQL_MODE. Not charsets (WL#1062), not some vars (I can only think of SQL_SELECT_LIMIT, which deserves a special treatment). Note that this works for queries, except LOAD DATA INFILE (for this it would have to wait for Dmitri's push of WL#874, which in turns waits for the present push, so... the deadlock must be broken!). Note that when Dmitri pushes WL#874 in 5.0.1, 5.0.0 won't be able to replicate a LOAD DATA INFILE from 5.0.1. Apart from that, the new binlog format is designed so that it can tolerate a little variation in the events (so that a 5.0.0 slave could replicate a 5.0.1 master, except for LOAD DATA INFILE unfortunately); that is, when I later add replication of charsets it should break nothing. And when I later add a UID to every event, it should break nothing. The main change brought by this patch is a new type of event, Format_description_log_event, which describes some lengthes in other event types. This event is needed for the master/slave/mysqlbinlog to understand a 5.0 log. Thanks to this event, we can later add more bytes to the header of every event without breaking compatibility. Inside Query_log_event, we have some additional dynamic format, as every Query_log_event can have a different number of status variables, stored as pairs (code, value); that's how SQL_MODE and session variables and catalog are stored. Like this, we can later add count of affected rows, charsets... and we can have options --don't-log-count-affected-rows if we want. MySQL 5.0 is able to run on 4.x relay logs, 4.x binlogs. Upgrading a 4.x master to 5.0 is ok (no need to delete binlogs), upgrading a 4.x slave to 5.0 is ok (no need to delete relay logs); so both can be "hot" upgrades. Upgrading a 3.23 master to 5.0 requires as much as upgrading it to 4.0. 3.23 and 4.x can't be slaves of 5.0. So downgrading from 5.0 to 4.x may be complicated. Log_event::log_pos is now the position of the end of the event, which is more useful than the position of the beginning. We take care about compatibility with <5.0 (in which log_pos is the beginning). I added a short test for replication of SQL_MODE and some other variables. TODO: - after committing this, merge the latest 5.0 into it - fix all tests - update the manual with upgrade notes.
2003-12-18 01:09:05 +01:00
break;
default:
*errmsg = "unknown error reading log event on the master";
This will be pushed only after I fix the testsuite. This is the main commit for Worklog tasks: * A more dynamic binlog format which allows small changes (1064) * Log session variables in Query_log_event (1063) Below 5.0 means 5.0.0. MySQL 5.0 is able to replicate FOREIGN_KEY_CHECKS, UNIQUE_KEY_CHECKS (for speed), SQL_AUTO_IS_NULL, SQL_MODE. Not charsets (WL#1062), not some vars (I can only think of SQL_SELECT_LIMIT, which deserves a special treatment). Note that this works for queries, except LOAD DATA INFILE (for this it would have to wait for Dmitri's push of WL#874, which in turns waits for the present push, so... the deadlock must be broken!). Note that when Dmitri pushes WL#874 in 5.0.1, 5.0.0 won't be able to replicate a LOAD DATA INFILE from 5.0.1. Apart from that, the new binlog format is designed so that it can tolerate a little variation in the events (so that a 5.0.0 slave could replicate a 5.0.1 master, except for LOAD DATA INFILE unfortunately); that is, when I later add replication of charsets it should break nothing. And when I later add a UID to every event, it should break nothing. The main change brought by this patch is a new type of event, Format_description_log_event, which describes some lengthes in other event types. This event is needed for the master/slave/mysqlbinlog to understand a 5.0 log. Thanks to this event, we can later add more bytes to the header of every event without breaking compatibility. Inside Query_log_event, we have some additional dynamic format, as every Query_log_event can have a different number of status variables, stored as pairs (code, value); that's how SQL_MODE and session variables and catalog are stored. Like this, we can later add count of affected rows, charsets... and we can have options --don't-log-count-affected-rows if we want. MySQL 5.0 is able to run on 4.x relay logs, 4.x binlogs. Upgrading a 4.x master to 5.0 is ok (no need to delete binlogs), upgrading a 4.x slave to 5.0 is ok (no need to delete relay logs); so both can be "hot" upgrades. Upgrading a 3.23 master to 5.0 requires as much as upgrading it to 4.0. 3.23 and 4.x can't be slaves of 5.0. So downgrading from 5.0 to 4.x may be complicated. Log_event::log_pos is now the position of the end of the event, which is more useful than the position of the beginning. We take care about compatibility with <5.0 (in which log_pos is the beginning). I added a short test for replication of SQL_MODE and some other variables. TODO: - after committing this, merge the latest 5.0 into it - fix all tests - update the manual with upgrade notes.
2003-12-18 01:09:05 +01:00
break;
}
return error;
}
/**
An auxiliary function for calling in mysql_binlog_send
to initialize the heartbeat timeout in waiting for a binlogged event.
@param[in] thd THD to access a user variable
@return heartbeat period an ulonglong of nanoseconds
or zero if heartbeat was not demanded by slave
*/
static ulonglong get_heartbeat_period(THD * thd)
{
my_bool null_value;
LEX_STRING name= { C_STRING_WITH_LEN("master_heartbeat_period")};
user_var_entry *entry=
(user_var_entry*) my_hash_search(&thd->user_vars, (uchar*) name.str,
name.length);
return entry? entry->val_int(&null_value) : 0;
}
/*
Function prepares and sends repliation heartbeat event.
@param net net object of THD
@param packet buffer to store the heartbeat instance
@param event_coordinates binlog file name and position of the last
real event master sent from binlog
@note
Among three essential pieces of heartbeat data Log_event::when
is computed locally.
The error to send is serious and should force terminating
the dump thread.
*/
static int send_heartbeat_event(NET* net, String* packet,
const struct event_coordinates *coord)
{
DBUG_ENTER("send_heartbeat_event");
char header[LOG_EVENT_HEADER_LEN];
/*
'when' (the timestamp) is set to 0 so that slave could distinguish between
real and fake Rotate events (if necessary)
*/
memset(header, 0, 4); // when
header[EVENT_TYPE_OFFSET] = HEARTBEAT_LOG_EVENT;
char* p= coord->file_name + dirname_length(coord->file_name);
uint ident_len = strlen(p);
ulong event_len = ident_len + LOG_EVENT_HEADER_LEN;
int4store(header + SERVER_ID_OFFSET, server_id);
int4store(header + EVENT_LEN_OFFSET, event_len);
int2store(header + FLAGS_OFFSET, 0);
int4store(header + LOG_POS_OFFSET, coord->pos); // log_pos
packet->append(header, sizeof(header));
packet->append(p, ident_len); // log_file_name
if (my_net_write(net, (uchar*) packet->ptr(), packet->length()) ||
net_flush(net))
{
DBUG_RETURN(-1);
}
DBUG_RETURN(0);
}
/*
TODO: Clean up loop to only have one call to send_file()
*/
2000-11-15 23:24:11 +01:00
void mysql_binlog_send(THD* thd, char* log_ident, my_off_t pos,
ushort flags)
{
LOG_INFO linfo;
char *log_file_name = linfo.log_file_name;
char search_file_name[FN_REFLEN], *name;
ulong ev_offset;
IO_CACHE log;
File file = -1;
String* packet = &thd->packet;
int error;
const char *errmsg = "Unknown error";
char error_text[MAX_SLAVE_ERRMSG]; // to be send to slave via my_message()
NET* net = &thd->net;
mysql_mutex_t *log_lock;
mysql_cond_t *log_cond;
bool binlog_can_be_corrupted= FALSE;
#ifndef DBUG_OFF
int left_events = max_binlog_dump_events;
#endif
int old_max_allowed_packet= thd->variables.max_allowed_packet;
DBUG_ENTER("mysql_binlog_send");
DBUG_PRINT("enter",("log_ident: '%s' pos: %ld", log_ident, (long) pos));
bzero((char*) &log,sizeof(log));
/*
heartbeat_period from @master_heartbeat_period user variable
*/
ulonglong heartbeat_period= get_heartbeat_period(thd);
struct timespec heartbeat_buf;
struct timespec *heartbeat_ts= NULL;
const LOG_POS_COORD start_coord= { log_ident, pos },
*p_start_coord= &start_coord;
LOG_POS_COORD coord_buf= { log_file_name, BIN_LOG_HEADER_SIZE },
*p_coord= &coord_buf;
if (heartbeat_period != LL(0))
{
heartbeat_ts= &heartbeat_buf;
set_timespec_nsec(*heartbeat_ts, 0);
}
if (global_system_variables.log_warnings > 1)
sql_print_information("Start binlog_dump to slave_server(%d), pos(%s, %lu)",
thd->server_id, log_ident, (ulong)pos);
if (RUN_HOOK(binlog_transmit, transmit_start, (thd, flags, log_ident, pos)))
{
errmsg= "Failed to run hook 'transmit_start'";
my_errno= ER_UNKNOWN_ERROR;
goto err;
}
2001-06-29 03:17:04 +02:00
#ifndef DBUG_OFF
if (opt_sporadic_binlog_dump_fail && (binlog_dump_count++ % 2))
{
errmsg = "Master failed COM_BINLOG_DUMP to test if slave can recover";
my_errno= ER_UNKNOWN_ERROR;
goto err;
}
#endif
if (!mysql_bin_log.is_open())
{
errmsg = "Binary log is not open";
my_errno= ER_MASTER_FATAL_ERROR_READING_BINLOG;
goto err;
}
if (!server_id_supplied)
{
errmsg = "Misconfigured master - server id was not set";
my_errno= ER_MASTER_FATAL_ERROR_READING_BINLOG;
goto err;
}
name=search_file_name;
if (log_ident[0])
mysql_bin_log.make_log_name(search_file_name, log_ident);
else
name=0; // Find first log
linfo.index_file_offset = 0;
if (mysql_bin_log.find_log_pos(&linfo, name, 1))
{
errmsg = "Could not find first log file name in binary log index file";
my_errno= ER_MASTER_FATAL_ERROR_READING_BINLOG;
goto err;
}
mysql_mutex_lock(&LOCK_thread_count);
thd->current_linfo = &linfo;
mysql_mutex_unlock(&LOCK_thread_count);
if ((file=open_binlog(&log, log_file_name, &errmsg)) < 0)
{
my_errno= ER_MASTER_FATAL_ERROR_READING_BINLOG;
goto err;
}
if (pos < BIN_LOG_HEADER_SIZE || pos > my_b_filelength(&log))
{
errmsg= "Client requested master to start replication from \
impossible position";
my_errno= ER_MASTER_FATAL_ERROR_READING_BINLOG;
goto err;
}
/* reset transmit packet for the fake rotate event below */
if (reset_transmit_packet(thd, flags, &ev_offset, &errmsg))
goto err;
/*
This will be pushed only after I fix the testsuite. This is the main commit for Worklog tasks: * A more dynamic binlog format which allows small changes (1064) * Log session variables in Query_log_event (1063) Below 5.0 means 5.0.0. MySQL 5.0 is able to replicate FOREIGN_KEY_CHECKS, UNIQUE_KEY_CHECKS (for speed), SQL_AUTO_IS_NULL, SQL_MODE. Not charsets (WL#1062), not some vars (I can only think of SQL_SELECT_LIMIT, which deserves a special treatment). Note that this works for queries, except LOAD DATA INFILE (for this it would have to wait for Dmitri's push of WL#874, which in turns waits for the present push, so... the deadlock must be broken!). Note that when Dmitri pushes WL#874 in 5.0.1, 5.0.0 won't be able to replicate a LOAD DATA INFILE from 5.0.1. Apart from that, the new binlog format is designed so that it can tolerate a little variation in the events (so that a 5.0.0 slave could replicate a 5.0.1 master, except for LOAD DATA INFILE unfortunately); that is, when I later add replication of charsets it should break nothing. And when I later add a UID to every event, it should break nothing. The main change brought by this patch is a new type of event, Format_description_log_event, which describes some lengthes in other event types. This event is needed for the master/slave/mysqlbinlog to understand a 5.0 log. Thanks to this event, we can later add more bytes to the header of every event without breaking compatibility. Inside Query_log_event, we have some additional dynamic format, as every Query_log_event can have a different number of status variables, stored as pairs (code, value); that's how SQL_MODE and session variables and catalog are stored. Like this, we can later add count of affected rows, charsets... and we can have options --don't-log-count-affected-rows if we want. MySQL 5.0 is able to run on 4.x relay logs, 4.x binlogs. Upgrading a 4.x master to 5.0 is ok (no need to delete binlogs), upgrading a 4.x slave to 5.0 is ok (no need to delete relay logs); so both can be "hot" upgrades. Upgrading a 3.23 master to 5.0 requires as much as upgrading it to 4.0. 3.23 and 4.x can't be slaves of 5.0. So downgrading from 5.0 to 4.x may be complicated. Log_event::log_pos is now the position of the end of the event, which is more useful than the position of the beginning. We take care about compatibility with <5.0 (in which log_pos is the beginning). I added a short test for replication of SQL_MODE and some other variables. TODO: - after committing this, merge the latest 5.0 into it - fix all tests - update the manual with upgrade notes.
2003-12-18 01:09:05 +01:00
Tell the client about the log name with a fake Rotate event;
this is needed even if we also send a Format_description_log_event
just after, because that event does not contain the binlog's name.
Note that as this Rotate event is sent before
Format_description_log_event, the slave cannot have any info to
understand this event's format, so the header len of
Rotate_log_event is FROZEN (so in 5.0 it will have a header shorter
than other events except FORMAT_DESCRIPTION_EVENT).
Before 4.0.14 we called fake_rotate_event below only if (pos ==
BIN_LOG_HEADER_SIZE), because if this is false then the slave
already knows the binlog's name.
Since, we always call fake_rotate_event; if the slave already knew
the log's name (ex: CHANGE MASTER TO MASTER_LOG_FILE=...) this is
useless but does not harm much. It is nice for 3.23 (>=.58) slaves
which test Rotate events to see if the master is 4.0 (then they
choose to stop because they can't replicate 4.0); by always calling
fake_rotate_event we are sure that 3.23.58 and newer will detect the
problem as soon as replication starts (BUG#198).
Always calling fake_rotate_event makes sending of normal
(=from-binlog) Rotate events a priori unneeded, but it is not so
simple: the 2 Rotate events are not equivalent, the normal one is
before the Stop event, the fake one is after. If we don't send the
normal one, then the Stop event will be interpreted (by existing 4.0
slaves) as "the master stopped", which is wrong. So for safety,
given that we want minimum modification of 4.0, we send the normal
and fake Rotates.
*/
if (fake_rotate_event(net, packet, log_file_name, pos, &errmsg))
2001-01-22 03:46:32 +01:00
{
/*
This error code is not perfect, as fake_rotate_event() does not
read anything from the binlog; if it fails it's because of an
error in my_net_write(), fortunately it will say so in errmsg.
This will be pushed only after I fix the testsuite. This is the main commit for Worklog tasks: * A more dynamic binlog format which allows small changes (1064) * Log session variables in Query_log_event (1063) Below 5.0 means 5.0.0. MySQL 5.0 is able to replicate FOREIGN_KEY_CHECKS, UNIQUE_KEY_CHECKS (for speed), SQL_AUTO_IS_NULL, SQL_MODE. Not charsets (WL#1062), not some vars (I can only think of SQL_SELECT_LIMIT, which deserves a special treatment). Note that this works for queries, except LOAD DATA INFILE (for this it would have to wait for Dmitri's push of WL#874, which in turns waits for the present push, so... the deadlock must be broken!). Note that when Dmitri pushes WL#874 in 5.0.1, 5.0.0 won't be able to replicate a LOAD DATA INFILE from 5.0.1. Apart from that, the new binlog format is designed so that it can tolerate a little variation in the events (so that a 5.0.0 slave could replicate a 5.0.1 master, except for LOAD DATA INFILE unfortunately); that is, when I later add replication of charsets it should break nothing. And when I later add a UID to every event, it should break nothing. The main change brought by this patch is a new type of event, Format_description_log_event, which describes some lengthes in other event types. This event is needed for the master/slave/mysqlbinlog to understand a 5.0 log. Thanks to this event, we can later add more bytes to the header of every event without breaking compatibility. Inside Query_log_event, we have some additional dynamic format, as every Query_log_event can have a different number of status variables, stored as pairs (code, value); that's how SQL_MODE and session variables and catalog are stored. Like this, we can later add count of affected rows, charsets... and we can have options --don't-log-count-affected-rows if we want. MySQL 5.0 is able to run on 4.x relay logs, 4.x binlogs. Upgrading a 4.x master to 5.0 is ok (no need to delete binlogs), upgrading a 4.x slave to 5.0 is ok (no need to delete relay logs); so both can be "hot" upgrades. Upgrading a 3.23 master to 5.0 requires as much as upgrading it to 4.0. 3.23 and 4.x can't be slaves of 5.0. So downgrading from 5.0 to 4.x may be complicated. Log_event::log_pos is now the position of the end of the event, which is more useful than the position of the beginning. We take care about compatibility with <5.0 (in which log_pos is the beginning). I added a short test for replication of SQL_MODE and some other variables. TODO: - after committing this, merge the latest 5.0 into it - fix all tests - update the manual with upgrade notes.
2003-12-18 01:09:05 +01:00
*/
my_errno= ER_MASTER_FATAL_ERROR_READING_BINLOG;
goto err;
2001-01-22 03:46:32 +01:00
}
/*
Adding MAX_LOG_EVENT_HEADER_LEN, since a binlog event can become
this larger than the corresponding packet (query) sent
from client to master.
*/
thd->variables.max_allowed_packet= MAX_MAX_ALLOWED_PACKET;
This will be pushed only after I fix the testsuite. This is the main commit for Worklog tasks: * A more dynamic binlog format which allows small changes (1064) * Log session variables in Query_log_event (1063) Below 5.0 means 5.0.0. MySQL 5.0 is able to replicate FOREIGN_KEY_CHECKS, UNIQUE_KEY_CHECKS (for speed), SQL_AUTO_IS_NULL, SQL_MODE. Not charsets (WL#1062), not some vars (I can only think of SQL_SELECT_LIMIT, which deserves a special treatment). Note that this works for queries, except LOAD DATA INFILE (for this it would have to wait for Dmitri's push of WL#874, which in turns waits for the present push, so... the deadlock must be broken!). Note that when Dmitri pushes WL#874 in 5.0.1, 5.0.0 won't be able to replicate a LOAD DATA INFILE from 5.0.1. Apart from that, the new binlog format is designed so that it can tolerate a little variation in the events (so that a 5.0.0 slave could replicate a 5.0.1 master, except for LOAD DATA INFILE unfortunately); that is, when I later add replication of charsets it should break nothing. And when I later add a UID to every event, it should break nothing. The main change brought by this patch is a new type of event, Format_description_log_event, which describes some lengthes in other event types. This event is needed for the master/slave/mysqlbinlog to understand a 5.0 log. Thanks to this event, we can later add more bytes to the header of every event without breaking compatibility. Inside Query_log_event, we have some additional dynamic format, as every Query_log_event can have a different number of status variables, stored as pairs (code, value); that's how SQL_MODE and session variables and catalog are stored. Like this, we can later add count of affected rows, charsets... and we can have options --don't-log-count-affected-rows if we want. MySQL 5.0 is able to run on 4.x relay logs, 4.x binlogs. Upgrading a 4.x master to 5.0 is ok (no need to delete binlogs), upgrading a 4.x slave to 5.0 is ok (no need to delete relay logs); so both can be "hot" upgrades. Upgrading a 3.23 master to 5.0 requires as much as upgrading it to 4.0. 3.23 and 4.x can't be slaves of 5.0. So downgrading from 5.0 to 4.x may be complicated. Log_event::log_pos is now the position of the end of the event, which is more useful than the position of the beginning. We take care about compatibility with <5.0 (in which log_pos is the beginning). I added a short test for replication of SQL_MODE and some other variables. TODO: - after committing this, merge the latest 5.0 into it - fix all tests - update the manual with upgrade notes.
2003-12-18 01:09:05 +01:00
/*
We can set log_lock now, it does not move (it's a member of
mysql_bin_log, and it's already inited, and it will be destroyed
only at shutdown).
This will be pushed only after I fix the testsuite. This is the main commit for Worklog tasks: * A more dynamic binlog format which allows small changes (1064) * Log session variables in Query_log_event (1063) Below 5.0 means 5.0.0. MySQL 5.0 is able to replicate FOREIGN_KEY_CHECKS, UNIQUE_KEY_CHECKS (for speed), SQL_AUTO_IS_NULL, SQL_MODE. Not charsets (WL#1062), not some vars (I can only think of SQL_SELECT_LIMIT, which deserves a special treatment). Note that this works for queries, except LOAD DATA INFILE (for this it would have to wait for Dmitri's push of WL#874, which in turns waits for the present push, so... the deadlock must be broken!). Note that when Dmitri pushes WL#874 in 5.0.1, 5.0.0 won't be able to replicate a LOAD DATA INFILE from 5.0.1. Apart from that, the new binlog format is designed so that it can tolerate a little variation in the events (so that a 5.0.0 slave could replicate a 5.0.1 master, except for LOAD DATA INFILE unfortunately); that is, when I later add replication of charsets it should break nothing. And when I later add a UID to every event, it should break nothing. The main change brought by this patch is a new type of event, Format_description_log_event, which describes some lengthes in other event types. This event is needed for the master/slave/mysqlbinlog to understand a 5.0 log. Thanks to this event, we can later add more bytes to the header of every event without breaking compatibility. Inside Query_log_event, we have some additional dynamic format, as every Query_log_event can have a different number of status variables, stored as pairs (code, value); that's how SQL_MODE and session variables and catalog are stored. Like this, we can later add count of affected rows, charsets... and we can have options --don't-log-count-affected-rows if we want. MySQL 5.0 is able to run on 4.x relay logs, 4.x binlogs. Upgrading a 4.x master to 5.0 is ok (no need to delete binlogs), upgrading a 4.x slave to 5.0 is ok (no need to delete relay logs); so both can be "hot" upgrades. Upgrading a 3.23 master to 5.0 requires as much as upgrading it to 4.0. 3.23 and 4.x can't be slaves of 5.0. So downgrading from 5.0 to 4.x may be complicated. Log_event::log_pos is now the position of the end of the event, which is more useful than the position of the beginning. We take care about compatibility with <5.0 (in which log_pos is the beginning). I added a short test for replication of SQL_MODE and some other variables. TODO: - after committing this, merge the latest 5.0 into it - fix all tests - update the manual with upgrade notes.
2003-12-18 01:09:05 +01:00
*/
p_coord->pos= pos; // the first hb matches the slave's last seen value
log_lock= mysql_bin_log.get_log_lock();
log_cond= mysql_bin_log.get_log_cond();
This will be pushed only after I fix the testsuite. This is the main commit for Worklog tasks: * A more dynamic binlog format which allows small changes (1064) * Log session variables in Query_log_event (1063) Below 5.0 means 5.0.0. MySQL 5.0 is able to replicate FOREIGN_KEY_CHECKS, UNIQUE_KEY_CHECKS (for speed), SQL_AUTO_IS_NULL, SQL_MODE. Not charsets (WL#1062), not some vars (I can only think of SQL_SELECT_LIMIT, which deserves a special treatment). Note that this works for queries, except LOAD DATA INFILE (for this it would have to wait for Dmitri's push of WL#874, which in turns waits for the present push, so... the deadlock must be broken!). Note that when Dmitri pushes WL#874 in 5.0.1, 5.0.0 won't be able to replicate a LOAD DATA INFILE from 5.0.1. Apart from that, the new binlog format is designed so that it can tolerate a little variation in the events (so that a 5.0.0 slave could replicate a 5.0.1 master, except for LOAD DATA INFILE unfortunately); that is, when I later add replication of charsets it should break nothing. And when I later add a UID to every event, it should break nothing. The main change brought by this patch is a new type of event, Format_description_log_event, which describes some lengthes in other event types. This event is needed for the master/slave/mysqlbinlog to understand a 5.0 log. Thanks to this event, we can later add more bytes to the header of every event without breaking compatibility. Inside Query_log_event, we have some additional dynamic format, as every Query_log_event can have a different number of status variables, stored as pairs (code, value); that's how SQL_MODE and session variables and catalog are stored. Like this, we can later add count of affected rows, charsets... and we can have options --don't-log-count-affected-rows if we want. MySQL 5.0 is able to run on 4.x relay logs, 4.x binlogs. Upgrading a 4.x master to 5.0 is ok (no need to delete binlogs), upgrading a 4.x slave to 5.0 is ok (no need to delete relay logs); so both can be "hot" upgrades. Upgrading a 3.23 master to 5.0 requires as much as upgrading it to 4.0. 3.23 and 4.x can't be slaves of 5.0. So downgrading from 5.0 to 4.x may be complicated. Log_event::log_pos is now the position of the end of the event, which is more useful than the position of the beginning. We take care about compatibility with <5.0 (in which log_pos is the beginning). I added a short test for replication of SQL_MODE and some other variables. TODO: - after committing this, merge the latest 5.0 into it - fix all tests - update the manual with upgrade notes.
2003-12-18 01:09:05 +01:00
if (pos > BIN_LOG_HEADER_SIZE)
{
/* reset transmit packet for the event read from binary log
file */
if (reset_transmit_packet(thd, flags, &ev_offset, &errmsg))
goto err;
/*
Try to find a Format_description_log_event at the beginning of
the binlog
*/
This will be pushed only after I fix the testsuite. This is the main commit for Worklog tasks: * A more dynamic binlog format which allows small changes (1064) * Log session variables in Query_log_event (1063) Below 5.0 means 5.0.0. MySQL 5.0 is able to replicate FOREIGN_KEY_CHECKS, UNIQUE_KEY_CHECKS (for speed), SQL_AUTO_IS_NULL, SQL_MODE. Not charsets (WL#1062), not some vars (I can only think of SQL_SELECT_LIMIT, which deserves a special treatment). Note that this works for queries, except LOAD DATA INFILE (for this it would have to wait for Dmitri's push of WL#874, which in turns waits for the present push, so... the deadlock must be broken!). Note that when Dmitri pushes WL#874 in 5.0.1, 5.0.0 won't be able to replicate a LOAD DATA INFILE from 5.0.1. Apart from that, the new binlog format is designed so that it can tolerate a little variation in the events (so that a 5.0.0 slave could replicate a 5.0.1 master, except for LOAD DATA INFILE unfortunately); that is, when I later add replication of charsets it should break nothing. And when I later add a UID to every event, it should break nothing. The main change brought by this patch is a new type of event, Format_description_log_event, which describes some lengthes in other event types. This event is needed for the master/slave/mysqlbinlog to understand a 5.0 log. Thanks to this event, we can later add more bytes to the header of every event without breaking compatibility. Inside Query_log_event, we have some additional dynamic format, as every Query_log_event can have a different number of status variables, stored as pairs (code, value); that's how SQL_MODE and session variables and catalog are stored. Like this, we can later add count of affected rows, charsets... and we can have options --don't-log-count-affected-rows if we want. MySQL 5.0 is able to run on 4.x relay logs, 4.x binlogs. Upgrading a 4.x master to 5.0 is ok (no need to delete binlogs), upgrading a 4.x slave to 5.0 is ok (no need to delete relay logs); so both can be "hot" upgrades. Upgrading a 3.23 master to 5.0 requires as much as upgrading it to 4.0. 3.23 and 4.x can't be slaves of 5.0. So downgrading from 5.0 to 4.x may be complicated. Log_event::log_pos is now the position of the end of the event, which is more useful than the position of the beginning. We take care about compatibility with <5.0 (in which log_pos is the beginning). I added a short test for replication of SQL_MODE and some other variables. TODO: - after committing this, merge the latest 5.0 into it - fix all tests - update the manual with upgrade notes.
2003-12-18 01:09:05 +01:00
if (!(error = Log_event::read_log_event(&log, packet, log_lock)))
{
/*
The packet has offsets equal to the normal offsets in a
binlog event + ev_offset (the first ev_offset characters are
the header (default \0)).
This will be pushed only after I fix the testsuite. This is the main commit for Worklog tasks: * A more dynamic binlog format which allows small changes (1064) * Log session variables in Query_log_event (1063) Below 5.0 means 5.0.0. MySQL 5.0 is able to replicate FOREIGN_KEY_CHECKS, UNIQUE_KEY_CHECKS (for speed), SQL_AUTO_IS_NULL, SQL_MODE. Not charsets (WL#1062), not some vars (I can only think of SQL_SELECT_LIMIT, which deserves a special treatment). Note that this works for queries, except LOAD DATA INFILE (for this it would have to wait for Dmitri's push of WL#874, which in turns waits for the present push, so... the deadlock must be broken!). Note that when Dmitri pushes WL#874 in 5.0.1, 5.0.0 won't be able to replicate a LOAD DATA INFILE from 5.0.1. Apart from that, the new binlog format is designed so that it can tolerate a little variation in the events (so that a 5.0.0 slave could replicate a 5.0.1 master, except for LOAD DATA INFILE unfortunately); that is, when I later add replication of charsets it should break nothing. And when I later add a UID to every event, it should break nothing. The main change brought by this patch is a new type of event, Format_description_log_event, which describes some lengthes in other event types. This event is needed for the master/slave/mysqlbinlog to understand a 5.0 log. Thanks to this event, we can later add more bytes to the header of every event without breaking compatibility. Inside Query_log_event, we have some additional dynamic format, as every Query_log_event can have a different number of status variables, stored as pairs (code, value); that's how SQL_MODE and session variables and catalog are stored. Like this, we can later add count of affected rows, charsets... and we can have options --don't-log-count-affected-rows if we want. MySQL 5.0 is able to run on 4.x relay logs, 4.x binlogs. Upgrading a 4.x master to 5.0 is ok (no need to delete binlogs), upgrading a 4.x slave to 5.0 is ok (no need to delete relay logs); so both can be "hot" upgrades. Upgrading a 3.23 master to 5.0 requires as much as upgrading it to 4.0. 3.23 and 4.x can't be slaves of 5.0. So downgrading from 5.0 to 4.x may be complicated. Log_event::log_pos is now the position of the end of the event, which is more useful than the position of the beginning. We take care about compatibility with <5.0 (in which log_pos is the beginning). I added a short test for replication of SQL_MODE and some other variables. TODO: - after committing this, merge the latest 5.0 into it - fix all tests - update the manual with upgrade notes.
2003-12-18 01:09:05 +01:00
*/
DBUG_PRINT("info",
("Looked for a Format_description_log_event, found event type %d",
(*packet)[EVENT_TYPE_OFFSET+ev_offset]));
if ((*packet)[EVENT_TYPE_OFFSET+ev_offset] == FORMAT_DESCRIPTION_EVENT)
This will be pushed only after I fix the testsuite. This is the main commit for Worklog tasks: * A more dynamic binlog format which allows small changes (1064) * Log session variables in Query_log_event (1063) Below 5.0 means 5.0.0. MySQL 5.0 is able to replicate FOREIGN_KEY_CHECKS, UNIQUE_KEY_CHECKS (for speed), SQL_AUTO_IS_NULL, SQL_MODE. Not charsets (WL#1062), not some vars (I can only think of SQL_SELECT_LIMIT, which deserves a special treatment). Note that this works for queries, except LOAD DATA INFILE (for this it would have to wait for Dmitri's push of WL#874, which in turns waits for the present push, so... the deadlock must be broken!). Note that when Dmitri pushes WL#874 in 5.0.1, 5.0.0 won't be able to replicate a LOAD DATA INFILE from 5.0.1. Apart from that, the new binlog format is designed so that it can tolerate a little variation in the events (so that a 5.0.0 slave could replicate a 5.0.1 master, except for LOAD DATA INFILE unfortunately); that is, when I later add replication of charsets it should break nothing. And when I later add a UID to every event, it should break nothing. The main change brought by this patch is a new type of event, Format_description_log_event, which describes some lengthes in other event types. This event is needed for the master/slave/mysqlbinlog to understand a 5.0 log. Thanks to this event, we can later add more bytes to the header of every event without breaking compatibility. Inside Query_log_event, we have some additional dynamic format, as every Query_log_event can have a different number of status variables, stored as pairs (code, value); that's how SQL_MODE and session variables and catalog are stored. Like this, we can later add count of affected rows, charsets... and we can have options --don't-log-count-affected-rows if we want. MySQL 5.0 is able to run on 4.x relay logs, 4.x binlogs. Upgrading a 4.x master to 5.0 is ok (no need to delete binlogs), upgrading a 4.x slave to 5.0 is ok (no need to delete relay logs); so both can be "hot" upgrades. Upgrading a 3.23 master to 5.0 requires as much as upgrading it to 4.0. 3.23 and 4.x can't be slaves of 5.0. So downgrading from 5.0 to 4.x may be complicated. Log_event::log_pos is now the position of the end of the event, which is more useful than the position of the beginning. We take care about compatibility with <5.0 (in which log_pos is the beginning). I added a short test for replication of SQL_MODE and some other variables. TODO: - after committing this, merge the latest 5.0 into it - fix all tests - update the manual with upgrade notes.
2003-12-18 01:09:05 +01:00
{
binlog_can_be_corrupted= test((*packet)[FLAGS_OFFSET+ev_offset] &
LOG_EVENT_BINLOG_IN_USE_F);
(*packet)[FLAGS_OFFSET+ev_offset] &= ~LOG_EVENT_BINLOG_IN_USE_F;
This will be pushed only after I fix the testsuite. This is the main commit for Worklog tasks: * A more dynamic binlog format which allows small changes (1064) * Log session variables in Query_log_event (1063) Below 5.0 means 5.0.0. MySQL 5.0 is able to replicate FOREIGN_KEY_CHECKS, UNIQUE_KEY_CHECKS (for speed), SQL_AUTO_IS_NULL, SQL_MODE. Not charsets (WL#1062), not some vars (I can only think of SQL_SELECT_LIMIT, which deserves a special treatment). Note that this works for queries, except LOAD DATA INFILE (for this it would have to wait for Dmitri's push of WL#874, which in turns waits for the present push, so... the deadlock must be broken!). Note that when Dmitri pushes WL#874 in 5.0.1, 5.0.0 won't be able to replicate a LOAD DATA INFILE from 5.0.1. Apart from that, the new binlog format is designed so that it can tolerate a little variation in the events (so that a 5.0.0 slave could replicate a 5.0.1 master, except for LOAD DATA INFILE unfortunately); that is, when I later add replication of charsets it should break nothing. And when I later add a UID to every event, it should break nothing. The main change brought by this patch is a new type of event, Format_description_log_event, which describes some lengthes in other event types. This event is needed for the master/slave/mysqlbinlog to understand a 5.0 log. Thanks to this event, we can later add more bytes to the header of every event without breaking compatibility. Inside Query_log_event, we have some additional dynamic format, as every Query_log_event can have a different number of status variables, stored as pairs (code, value); that's how SQL_MODE and session variables and catalog are stored. Like this, we can later add count of affected rows, charsets... and we can have options --don't-log-count-affected-rows if we want. MySQL 5.0 is able to run on 4.x relay logs, 4.x binlogs. Upgrading a 4.x master to 5.0 is ok (no need to delete binlogs), upgrading a 4.x slave to 5.0 is ok (no need to delete relay logs); so both can be "hot" upgrades. Upgrading a 3.23 master to 5.0 requires as much as upgrading it to 4.0. 3.23 and 4.x can't be slaves of 5.0. So downgrading from 5.0 to 4.x may be complicated. Log_event::log_pos is now the position of the end of the event, which is more useful than the position of the beginning. We take care about compatibility with <5.0 (in which log_pos is the beginning). I added a short test for replication of SQL_MODE and some other variables. TODO: - after committing this, merge the latest 5.0 into it - fix all tests - update the manual with upgrade notes.
2003-12-18 01:09:05 +01:00
/*
mark that this event with "log_pos=0", so the slave
should not increment master's binlog position
(rli->group_master_log_pos)
*/
int4store((char*) packet->ptr()+LOG_POS_OFFSET+ev_offset, 0);
/*
if reconnect master sends FD event with `created' as 0
to avoid destroying temp tables.
*/
int4store((char*) packet->ptr()+LOG_EVENT_MINIMAL_HEADER_LEN+
ST_CREATED_OFFSET+ev_offset, (ulong) 0);
This will be pushed only after I fix the testsuite. This is the main commit for Worklog tasks: * A more dynamic binlog format which allows small changes (1064) * Log session variables in Query_log_event (1063) Below 5.0 means 5.0.0. MySQL 5.0 is able to replicate FOREIGN_KEY_CHECKS, UNIQUE_KEY_CHECKS (for speed), SQL_AUTO_IS_NULL, SQL_MODE. Not charsets (WL#1062), not some vars (I can only think of SQL_SELECT_LIMIT, which deserves a special treatment). Note that this works for queries, except LOAD DATA INFILE (for this it would have to wait for Dmitri's push of WL#874, which in turns waits for the present push, so... the deadlock must be broken!). Note that when Dmitri pushes WL#874 in 5.0.1, 5.0.0 won't be able to replicate a LOAD DATA INFILE from 5.0.1. Apart from that, the new binlog format is designed so that it can tolerate a little variation in the events (so that a 5.0.0 slave could replicate a 5.0.1 master, except for LOAD DATA INFILE unfortunately); that is, when I later add replication of charsets it should break nothing. And when I later add a UID to every event, it should break nothing. The main change brought by this patch is a new type of event, Format_description_log_event, which describes some lengthes in other event types. This event is needed for the master/slave/mysqlbinlog to understand a 5.0 log. Thanks to this event, we can later add more bytes to the header of every event without breaking compatibility. Inside Query_log_event, we have some additional dynamic format, as every Query_log_event can have a different number of status variables, stored as pairs (code, value); that's how SQL_MODE and session variables and catalog are stored. Like this, we can later add count of affected rows, charsets... and we can have options --don't-log-count-affected-rows if we want. MySQL 5.0 is able to run on 4.x relay logs, 4.x binlogs. Upgrading a 4.x master to 5.0 is ok (no need to delete binlogs), upgrading a 4.x slave to 5.0 is ok (no need to delete relay logs); so both can be "hot" upgrades. Upgrading a 3.23 master to 5.0 requires as much as upgrading it to 4.0. 3.23 and 4.x can't be slaves of 5.0. So downgrading from 5.0 to 4.x may be complicated. Log_event::log_pos is now the position of the end of the event, which is more useful than the position of the beginning. We take care about compatibility with <5.0 (in which log_pos is the beginning). I added a short test for replication of SQL_MODE and some other variables. TODO: - after committing this, merge the latest 5.0 into it - fix all tests - update the manual with upgrade notes.
2003-12-18 01:09:05 +01:00
/* send it */
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
if (my_net_write(net, (uchar*) packet->ptr(), packet->length()))
This will be pushed only after I fix the testsuite. This is the main commit for Worklog tasks: * A more dynamic binlog format which allows small changes (1064) * Log session variables in Query_log_event (1063) Below 5.0 means 5.0.0. MySQL 5.0 is able to replicate FOREIGN_KEY_CHECKS, UNIQUE_KEY_CHECKS (for speed), SQL_AUTO_IS_NULL, SQL_MODE. Not charsets (WL#1062), not some vars (I can only think of SQL_SELECT_LIMIT, which deserves a special treatment). Note that this works for queries, except LOAD DATA INFILE (for this it would have to wait for Dmitri's push of WL#874, which in turns waits for the present push, so... the deadlock must be broken!). Note that when Dmitri pushes WL#874 in 5.0.1, 5.0.0 won't be able to replicate a LOAD DATA INFILE from 5.0.1. Apart from that, the new binlog format is designed so that it can tolerate a little variation in the events (so that a 5.0.0 slave could replicate a 5.0.1 master, except for LOAD DATA INFILE unfortunately); that is, when I later add replication of charsets it should break nothing. And when I later add a UID to every event, it should break nothing. The main change brought by this patch is a new type of event, Format_description_log_event, which describes some lengthes in other event types. This event is needed for the master/slave/mysqlbinlog to understand a 5.0 log. Thanks to this event, we can later add more bytes to the header of every event without breaking compatibility. Inside Query_log_event, we have some additional dynamic format, as every Query_log_event can have a different number of status variables, stored as pairs (code, value); that's how SQL_MODE and session variables and catalog are stored. Like this, we can later add count of affected rows, charsets... and we can have options --don't-log-count-affected-rows if we want. MySQL 5.0 is able to run on 4.x relay logs, 4.x binlogs. Upgrading a 4.x master to 5.0 is ok (no need to delete binlogs), upgrading a 4.x slave to 5.0 is ok (no need to delete relay logs); so both can be "hot" upgrades. Upgrading a 3.23 master to 5.0 requires as much as upgrading it to 4.0. 3.23 and 4.x can't be slaves of 5.0. So downgrading from 5.0 to 4.x may be complicated. Log_event::log_pos is now the position of the end of the event, which is more useful than the position of the beginning. We take care about compatibility with <5.0 (in which log_pos is the beginning). I added a short test for replication of SQL_MODE and some other variables. TODO: - after committing this, merge the latest 5.0 into it - fix all tests - update the manual with upgrade notes.
2003-12-18 01:09:05 +01:00
{
errmsg = "Failed on my_net_write()";
my_errno= ER_UNKNOWN_ERROR;
goto err;
}
This will be pushed only after I fix the testsuite. This is the main commit for Worklog tasks: * A more dynamic binlog format which allows small changes (1064) * Log session variables in Query_log_event (1063) Below 5.0 means 5.0.0. MySQL 5.0 is able to replicate FOREIGN_KEY_CHECKS, UNIQUE_KEY_CHECKS (for speed), SQL_AUTO_IS_NULL, SQL_MODE. Not charsets (WL#1062), not some vars (I can only think of SQL_SELECT_LIMIT, which deserves a special treatment). Note that this works for queries, except LOAD DATA INFILE (for this it would have to wait for Dmitri's push of WL#874, which in turns waits for the present push, so... the deadlock must be broken!). Note that when Dmitri pushes WL#874 in 5.0.1, 5.0.0 won't be able to replicate a LOAD DATA INFILE from 5.0.1. Apart from that, the new binlog format is designed so that it can tolerate a little variation in the events (so that a 5.0.0 slave could replicate a 5.0.1 master, except for LOAD DATA INFILE unfortunately); that is, when I later add replication of charsets it should break nothing. And when I later add a UID to every event, it should break nothing. The main change brought by this patch is a new type of event, Format_description_log_event, which describes some lengthes in other event types. This event is needed for the master/slave/mysqlbinlog to understand a 5.0 log. Thanks to this event, we can later add more bytes to the header of every event without breaking compatibility. Inside Query_log_event, we have some additional dynamic format, as every Query_log_event can have a different number of status variables, stored as pairs (code, value); that's how SQL_MODE and session variables and catalog are stored. Like this, we can later add count of affected rows, charsets... and we can have options --don't-log-count-affected-rows if we want. MySQL 5.0 is able to run on 4.x relay logs, 4.x binlogs. Upgrading a 4.x master to 5.0 is ok (no need to delete binlogs), upgrading a 4.x slave to 5.0 is ok (no need to delete relay logs); so both can be "hot" upgrades. Upgrading a 3.23 master to 5.0 requires as much as upgrading it to 4.0. 3.23 and 4.x can't be slaves of 5.0. So downgrading from 5.0 to 4.x may be complicated. Log_event::log_pos is now the position of the end of the event, which is more useful than the position of the beginning. We take care about compatibility with <5.0 (in which log_pos is the beginning). I added a short test for replication of SQL_MODE and some other variables. TODO: - after committing this, merge the latest 5.0 into it - fix all tests - update the manual with upgrade notes.
2003-12-18 01:09:05 +01:00
/*
No need to save this event. We are only doing simple reads
(no real parsing of the events) so we don't need it. And so
we don't need the artificial Format_description_log_event of
3.23&4.x.
This will be pushed only after I fix the testsuite. This is the main commit for Worklog tasks: * A more dynamic binlog format which allows small changes (1064) * Log session variables in Query_log_event (1063) Below 5.0 means 5.0.0. MySQL 5.0 is able to replicate FOREIGN_KEY_CHECKS, UNIQUE_KEY_CHECKS (for speed), SQL_AUTO_IS_NULL, SQL_MODE. Not charsets (WL#1062), not some vars (I can only think of SQL_SELECT_LIMIT, which deserves a special treatment). Note that this works for queries, except LOAD DATA INFILE (for this it would have to wait for Dmitri's push of WL#874, which in turns waits for the present push, so... the deadlock must be broken!). Note that when Dmitri pushes WL#874 in 5.0.1, 5.0.0 won't be able to replicate a LOAD DATA INFILE from 5.0.1. Apart from that, the new binlog format is designed so that it can tolerate a little variation in the events (so that a 5.0.0 slave could replicate a 5.0.1 master, except for LOAD DATA INFILE unfortunately); that is, when I later add replication of charsets it should break nothing. And when I later add a UID to every event, it should break nothing. The main change brought by this patch is a new type of event, Format_description_log_event, which describes some lengthes in other event types. This event is needed for the master/slave/mysqlbinlog to understand a 5.0 log. Thanks to this event, we can later add more bytes to the header of every event without breaking compatibility. Inside Query_log_event, we have some additional dynamic format, as every Query_log_event can have a different number of status variables, stored as pairs (code, value); that's how SQL_MODE and session variables and catalog are stored. Like this, we can later add count of affected rows, charsets... and we can have options --don't-log-count-affected-rows if we want. MySQL 5.0 is able to run on 4.x relay logs, 4.x binlogs. Upgrading a 4.x master to 5.0 is ok (no need to delete binlogs), upgrading a 4.x slave to 5.0 is ok (no need to delete relay logs); so both can be "hot" upgrades. Upgrading a 3.23 master to 5.0 requires as much as upgrading it to 4.0. 3.23 and 4.x can't be slaves of 5.0. So downgrading from 5.0 to 4.x may be complicated. Log_event::log_pos is now the position of the end of the event, which is more useful than the position of the beginning. We take care about compatibility with <5.0 (in which log_pos is the beginning). I added a short test for replication of SQL_MODE and some other variables. TODO: - after committing this, merge the latest 5.0 into it - fix all tests - update the manual with upgrade notes.
2003-12-18 01:09:05 +01:00
*/
}
}
else
{
if (test_for_non_eof_log_read_errors(error, &errmsg))
This will be pushed only after I fix the testsuite. This is the main commit for Worklog tasks: * A more dynamic binlog format which allows small changes (1064) * Log session variables in Query_log_event (1063) Below 5.0 means 5.0.0. MySQL 5.0 is able to replicate FOREIGN_KEY_CHECKS, UNIQUE_KEY_CHECKS (for speed), SQL_AUTO_IS_NULL, SQL_MODE. Not charsets (WL#1062), not some vars (I can only think of SQL_SELECT_LIMIT, which deserves a special treatment). Note that this works for queries, except LOAD DATA INFILE (for this it would have to wait for Dmitri's push of WL#874, which in turns waits for the present push, so... the deadlock must be broken!). Note that when Dmitri pushes WL#874 in 5.0.1, 5.0.0 won't be able to replicate a LOAD DATA INFILE from 5.0.1. Apart from that, the new binlog format is designed so that it can tolerate a little variation in the events (so that a 5.0.0 slave could replicate a 5.0.1 master, except for LOAD DATA INFILE unfortunately); that is, when I later add replication of charsets it should break nothing. And when I later add a UID to every event, it should break nothing. The main change brought by this patch is a new type of event, Format_description_log_event, which describes some lengthes in other event types. This event is needed for the master/slave/mysqlbinlog to understand a 5.0 log. Thanks to this event, we can later add more bytes to the header of every event without breaking compatibility. Inside Query_log_event, we have some additional dynamic format, as every Query_log_event can have a different number of status variables, stored as pairs (code, value); that's how SQL_MODE and session variables and catalog are stored. Like this, we can later add count of affected rows, charsets... and we can have options --don't-log-count-affected-rows if we want. MySQL 5.0 is able to run on 4.x relay logs, 4.x binlogs. Upgrading a 4.x master to 5.0 is ok (no need to delete binlogs), upgrading a 4.x slave to 5.0 is ok (no need to delete relay logs); so both can be "hot" upgrades. Upgrading a 3.23 master to 5.0 requires as much as upgrading it to 4.0. 3.23 and 4.x can't be slaves of 5.0. So downgrading from 5.0 to 4.x may be complicated. Log_event::log_pos is now the position of the end of the event, which is more useful than the position of the beginning. We take care about compatibility with <5.0 (in which log_pos is the beginning). I added a short test for replication of SQL_MODE and some other variables. TODO: - after committing this, merge the latest 5.0 into it - fix all tests - update the manual with upgrade notes.
2003-12-18 01:09:05 +01:00
goto err;
/*
It's EOF, nothing to do, go on reading next events, the
Format_description_log_event will be found naturally if it is written.
*/
}
} /* end of if (pos > BIN_LOG_HEADER_SIZE); */
else
{
/* The Format_description_log_event event will be found naturally. */
}
This will be pushed only after I fix the testsuite. This is the main commit for Worklog tasks: * A more dynamic binlog format which allows small changes (1064) * Log session variables in Query_log_event (1063) Below 5.0 means 5.0.0. MySQL 5.0 is able to replicate FOREIGN_KEY_CHECKS, UNIQUE_KEY_CHECKS (for speed), SQL_AUTO_IS_NULL, SQL_MODE. Not charsets (WL#1062), not some vars (I can only think of SQL_SELECT_LIMIT, which deserves a special treatment). Note that this works for queries, except LOAD DATA INFILE (for this it would have to wait for Dmitri's push of WL#874, which in turns waits for the present push, so... the deadlock must be broken!). Note that when Dmitri pushes WL#874 in 5.0.1, 5.0.0 won't be able to replicate a LOAD DATA INFILE from 5.0.1. Apart from that, the new binlog format is designed so that it can tolerate a little variation in the events (so that a 5.0.0 slave could replicate a 5.0.1 master, except for LOAD DATA INFILE unfortunately); that is, when I later add replication of charsets it should break nothing. And when I later add a UID to every event, it should break nothing. The main change brought by this patch is a new type of event, Format_description_log_event, which describes some lengthes in other event types. This event is needed for the master/slave/mysqlbinlog to understand a 5.0 log. Thanks to this event, we can later add more bytes to the header of every event without breaking compatibility. Inside Query_log_event, we have some additional dynamic format, as every Query_log_event can have a different number of status variables, stored as pairs (code, value); that's how SQL_MODE and session variables and catalog are stored. Like this, we can later add count of affected rows, charsets... and we can have options --don't-log-count-affected-rows if we want. MySQL 5.0 is able to run on 4.x relay logs, 4.x binlogs. Upgrading a 4.x master to 5.0 is ok (no need to delete binlogs), upgrading a 4.x slave to 5.0 is ok (no need to delete relay logs); so both can be "hot" upgrades. Upgrading a 3.23 master to 5.0 requires as much as upgrading it to 4.0. 3.23 and 4.x can't be slaves of 5.0. So downgrading from 5.0 to 4.x may be complicated. Log_event::log_pos is now the position of the end of the event, which is more useful than the position of the beginning. We take care about compatibility with <5.0 (in which log_pos is the beginning). I added a short test for replication of SQL_MODE and some other variables. TODO: - after committing this, merge the latest 5.0 into it - fix all tests - update the manual with upgrade notes.
2003-12-18 01:09:05 +01:00
/* seek to the requested position, to start the requested dump */
my_b_seek(&log, pos); // Seek will done on next read
2001-01-22 03:46:32 +01:00
while (!net->error && net->vio != 0 && !thd->killed)
{
Log_event_type event_type= UNKNOWN_EVENT;
/* reset the transmit packet for the event read from binary log
file */
if (reset_transmit_packet(thd, flags, &ev_offset, &errmsg))
goto err;
while (!(error= Log_event::read_log_event(&log, packet, log_lock)))
{
#ifndef DBUG_OFF
if (max_binlog_dump_events && !left_events--)
{
net_flush(net);
errmsg = "Debugging binlog dump abort";
my_errno= ER_UNKNOWN_ERROR;
goto err;
}
#endif
/*
log's filename does not change while it's active
*/
p_coord->pos= uint4korr(packet->ptr() + ev_offset + LOG_POS_OFFSET);
event_type= (Log_event_type)((*packet)[LOG_EVENT_OFFSET+ev_offset]);
DBUG_EXECUTE_IF("dump_thread_wait_before_send_xid",
{
2011-01-18 18:38:10 +01:00
if (event_type == XID_EVENT)
{
net_flush(net);
const char act[]=
"now "
"wait_for signal.continue";
DBUG_ASSERT(opt_debug_sync_timeout > 0);
DBUG_ASSERT(!debug_sync_set_action(current_thd,
STRING_WITH_LEN(act)));
}
});
if (event_type == FORMAT_DESCRIPTION_EVENT)
{
binlog_can_be_corrupted= test((*packet)[FLAGS_OFFSET+ev_offset] &
LOG_EVENT_BINLOG_IN_USE_F);
(*packet)[FLAGS_OFFSET+ev_offset] &= ~LOG_EVENT_BINLOG_IN_USE_F;
}
else if (event_type == STOP_EVENT)
binlog_can_be_corrupted= FALSE;
/*
Introduced this code to make the gcc 4.6.1 compiler happy. When
warnings are converted to errors, the compiler complains about
the fact that binlog_can_be_corrupted is defined but never used.
We need to check if this is a dead code or if someone removed any
code by mistake.
/Alfranio
*/
if (binlog_can_be_corrupted)
{
/*
Don't try to print out warning messages because this generates
erroneous messages in the error log and causes performance
problems.
/Alfranio
*/
}
pos = my_b_tell(&log);
if (RUN_HOOK(binlog_transmit, before_send_event,
(thd, flags, packet, log_file_name, pos)))
{
my_errno= ER_UNKNOWN_ERROR;
errmsg= "run 'before_send_event' hook failed";
goto err;
}
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
if (my_net_write(net, (uchar*) packet->ptr(), packet->length()))
{
errmsg = "Failed on my_net_write()";
my_errno= ER_UNKNOWN_ERROR;
goto err;
}
DBUG_EXECUTE_IF("dump_thread_wait_before_send_xid",
{
2011-01-18 18:38:10 +01:00
if (event_type == XID_EVENT)
{
net_flush(net);
}
});
DBUG_PRINT("info", ("log event code %d", event_type));
if (event_type == LOAD_EVENT)
{
if (send_file(thd))
{
errmsg = "failed in send_file()";
my_errno= ER_UNKNOWN_ERROR;
goto err;
}
}
if (RUN_HOOK(binlog_transmit, after_send_event, (thd, flags, packet)))
{
errmsg= "Failed to run hook 'after_send_event'";
my_errno= ER_UNKNOWN_ERROR;
goto err;
}
/* reset transmit packet for next loop */
if (reset_transmit_packet(thd, flags, &ev_offset, &errmsg))
goto err;
}
/*
TODO: now that we are logging the offset, check to make sure
This will be pushed only after I fix the testsuite. This is the main commit for Worklog tasks: * A more dynamic binlog format which allows small changes (1064) * Log session variables in Query_log_event (1063) Below 5.0 means 5.0.0. MySQL 5.0 is able to replicate FOREIGN_KEY_CHECKS, UNIQUE_KEY_CHECKS (for speed), SQL_AUTO_IS_NULL, SQL_MODE. Not charsets (WL#1062), not some vars (I can only think of SQL_SELECT_LIMIT, which deserves a special treatment). Note that this works for queries, except LOAD DATA INFILE (for this it would have to wait for Dmitri's push of WL#874, which in turns waits for the present push, so... the deadlock must be broken!). Note that when Dmitri pushes WL#874 in 5.0.1, 5.0.0 won't be able to replicate a LOAD DATA INFILE from 5.0.1. Apart from that, the new binlog format is designed so that it can tolerate a little variation in the events (so that a 5.0.0 slave could replicate a 5.0.1 master, except for LOAD DATA INFILE unfortunately); that is, when I later add replication of charsets it should break nothing. And when I later add a UID to every event, it should break nothing. The main change brought by this patch is a new type of event, Format_description_log_event, which describes some lengthes in other event types. This event is needed for the master/slave/mysqlbinlog to understand a 5.0 log. Thanks to this event, we can later add more bytes to the header of every event without breaking compatibility. Inside Query_log_event, we have some additional dynamic format, as every Query_log_event can have a different number of status variables, stored as pairs (code, value); that's how SQL_MODE and session variables and catalog are stored. Like this, we can later add count of affected rows, charsets... and we can have options --don't-log-count-affected-rows if we want. MySQL 5.0 is able to run on 4.x relay logs, 4.x binlogs. Upgrading a 4.x master to 5.0 is ok (no need to delete binlogs), upgrading a 4.x slave to 5.0 is ok (no need to delete relay logs); so both can be "hot" upgrades. Upgrading a 3.23 master to 5.0 requires as much as upgrading it to 4.0. 3.23 and 4.x can't be slaves of 5.0. So downgrading from 5.0 to 4.x may be complicated. Log_event::log_pos is now the position of the end of the event, which is more useful than the position of the beginning. We take care about compatibility with <5.0 (in which log_pos is the beginning). I added a short test for replication of SQL_MODE and some other variables. TODO: - after committing this, merge the latest 5.0 into it - fix all tests - update the manual with upgrade notes.
2003-12-18 01:09:05 +01:00
the recorded offset and the actual match.
Guilhem 2003-06: this is not true if this master is a slave
<4.0.15 running with --log-slave-updates, because then log_pos may
be the offset in the-master-of-this-master's binlog.
*/
if (test_for_non_eof_log_read_errors(error, &errmsg))
goto err;
if (!(flags & BINLOG_DUMP_NON_BLOCK) &&
mysql_bin_log.is_active(log_file_name))
{
/*
Block until there is more data in the log
*/
if (net_flush(net))
{
errmsg = "failed on net_flush()";
my_errno= ER_UNKNOWN_ERROR;
goto err;
}
/*
We may have missed the update broadcast from the log
that has just happened, let's try to catch it if it did.
If we did not miss anything, we just wait for other threads
to signal us.
*/
{
log.error=0;
bool read_packet = 0;
#ifndef DBUG_OFF
if (max_binlog_dump_events && !left_events--)
{
errmsg = "Debugging binlog dump abort";
my_errno= ER_UNKNOWN_ERROR;
goto err;
}
#endif
/* reset the transmit packet for the event read from binary log
file */
if (reset_transmit_packet(thd, flags, &ev_offset, &errmsg))
goto err;
/*
No one will update the log while we are reading
now, but we'll be quick and just read one record
TODO:
Add an counter that is incremented for each time we update the
binary log. We can avoid the following read if the counter
has not been updated since last read.
*/
mysql_mutex_lock(log_lock);
switch (error= Log_event::read_log_event(&log, packet, (mysql_mutex_t*) 0)) {
case 0:
/* we read successfully, so we'll need to send it to the slave */
mysql_mutex_unlock(log_lock);
read_packet = 1;
p_coord->pos= uint4korr(packet->ptr() + ev_offset + LOG_POS_OFFSET);
event_type= (Log_event_type)((*packet)[LOG_EVENT_OFFSET+ev_offset]);
break;
case LOG_READ_EOF:
{
int ret;
ulong signal_cnt;
DBUG_PRINT("wait",("waiting for data in binary log"));
2003-07-03 01:08:31 +02:00
if (thd->server_id==0) // for mysqlbinlog (mysqlbinlog.server_id==0)
2003-07-02 22:56:27 +02:00
{
mysql_mutex_unlock(log_lock);
2003-07-02 22:56:27 +02:00
goto end;
}
#ifndef DBUG_OFF
ulong hb_info_counter= 0;
#endif
const char* old_msg= thd->proc_info;
signal_cnt= mysql_bin_log.signal_cnt;
do
{
if (heartbeat_period != 0)
{
DBUG_ASSERT(heartbeat_ts);
set_timespec_nsec(*heartbeat_ts, heartbeat_period);
}
thd->enter_cond(log_cond, log_lock,
"Master has sent all binlog to slave; "
"waiting for binlog to be updated");
ret= mysql_bin_log.wait_for_update_bin_log(thd, heartbeat_ts);
DBUG_ASSERT(ret == 0 || (heartbeat_period != 0));
if (ret == ETIMEDOUT || ret == ETIME)
{
#ifndef DBUG_OFF
if (hb_info_counter < 3)
{
sql_print_information("master sends heartbeat message");
hb_info_counter++;
if (hb_info_counter == 3)
sql_print_information("the rest of heartbeat info skipped ...");
}
#endif
/* reset transmit packet for the heartbeat event */
if (reset_transmit_packet(thd, flags, &ev_offset, &errmsg))
{
thd->exit_cond(old_msg);
goto err;
}
if (send_heartbeat_event(net, packet, p_coord))
{
errmsg = "Failed on my_net_write()";
my_errno= ER_UNKNOWN_ERROR;
thd->exit_cond(old_msg);
goto err;
}
}
else
{
DBUG_PRINT("wait",("binary log received update or a broadcast signal caught"));
}
} while (signal_cnt == mysql_bin_log.signal_cnt && !thd->killed);
thd->exit_cond(old_msg);
}
break;
default:
mysql_mutex_unlock(log_lock);
test_for_non_eof_log_read_errors(error, &errmsg);
goto err;
}
if (read_packet)
{
thd_proc_info(thd, "Sending binlog event to slave");
pos = my_b_tell(&log);
if (RUN_HOOK(binlog_transmit, before_send_event,
(thd, flags, packet, log_file_name, pos)))
{
my_errno= ER_UNKNOWN_ERROR;
errmsg= "run 'before_send_event' hook failed";
goto err;
}
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
if (my_net_write(net, (uchar*) packet->ptr(), packet->length()) )
{
errmsg = "Failed on my_net_write()";
my_errno= ER_UNKNOWN_ERROR;
goto err;
}
if (event_type == LOAD_EVENT)
{
if (send_file(thd))
{
errmsg = "failed in send_file()";
my_errno= ER_UNKNOWN_ERROR;
goto err;
}
}
if (RUN_HOOK(binlog_transmit, after_send_event, (thd, flags, packet)))
{
my_errno= ER_UNKNOWN_ERROR;
errmsg= "Failed to run hook 'after_send_event'";
goto err;
}
}
log.error=0;
}
}
else
{
bool loop_breaker = 0;
/* need this to break out of the for loop from switch */
Prevent bugs by making DBUG_* expressions syntactically equivalent to a single statement. --- Bug#24795: SHOW PROFILE Profiling is only partially functional on some architectures. Where there is no getrusage() system call, presently Null values are returned where it would be required. Notably, Windows needs some love applied to make it as useful. Syntax this adds: SHOW PROFILES SHOW PROFILE [types] [FOR QUERY n] [OFFSET n] [LIMIT n] where "n" is an integer and "types" is zero or many (comma-separated) of "CPU" "MEMORY" (not presently supported) "BLOCK IO" "CONTEXT SWITCHES" "PAGE FAULTS" "IPC" "SWAPS" "SOURCE" "ALL" It also adds a session variable (boolean) "profiling", set to "no" by default, and (integer) profiling_history_size, set to 15 by default. This patch abstracts setting THDs' "proc_info" behind a macro that can be used as a hook into the profiling code when profiling support is compiled in. All future code in this line should use that mechanism for setting thd->proc_info. --- Tests are now set to omit the statistics. --- Adds an Information_schema table, "profiling" for access to "show profile" data. --- Merge zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community-3--bug24795 into zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community --- Fix merge problems. --- Fixed one bug in the query_source being NULL. Updated test results. --- Include more thorough profiling tests. Improve support for prepared statements. Use session-specific query IDs, starting at zero. --- Selecting from I_S.profiling is no longer quashed in profiling, as requested by Giuseppe. Limit the size of captured query text. No longer log queries that are zero length.
2007-02-22 16:03:08 +01:00
thd_proc_info(thd, "Finished reading one binlog; switching to next binlog");
switch (mysql_bin_log.find_next_log(&linfo, 1)) {
case 0:
break;
case LOG_INFO_EOF:
if (mysql_bin_log.is_active(log_file_name))
{
loop_breaker = (flags & BINLOG_DUMP_NON_BLOCK);
break;
}
default:
errmsg = "could not find next log";
my_errno= ER_MASTER_FATAL_ERROR_READING_BINLOG;
goto err;
}
if (loop_breaker)
break;
2006-04-13 09:50:33 +02:00
end_io_cache(&log);
mysql_file_close(file, MYF(MY_WME));
/* reset transmit packet for the possible fake rotate event */
if (reset_transmit_packet(thd, flags, &ev_offset, &errmsg))
goto err;
/*
Call fake_rotate_event() in case the previous log (the one which
we have just finished reading) did not contain a Rotate event
(for example (I don't know any other example) the previous log
was the last one before the master was shutdown & restarted).
This way we tell the slave about the new log's name and
position. If the binlog is 5.0, the next event we are going to
read and send is Format_description_log_event.
*/
if ((file=open_binlog(&log, log_file_name, &errmsg)) < 0 ||
fake_rotate_event(net, packet, log_file_name, BIN_LOG_HEADER_SIZE,
&errmsg))
{
my_errno= ER_MASTER_FATAL_ERROR_READING_BINLOG;
goto err;
}
p_coord->file_name= log_file_name; // reset to the next
}
}
2003-07-02 22:56:27 +02:00
end:
end_io_cache(&log);
mysql_file_close(file, MYF(MY_WME));
RUN_HOOK(binlog_transmit, transmit_stop, (thd, flags));
my_eof(thd);
Prevent bugs by making DBUG_* expressions syntactically equivalent to a single statement. --- Bug#24795: SHOW PROFILE Profiling is only partially functional on some architectures. Where there is no getrusage() system call, presently Null values are returned where it would be required. Notably, Windows needs some love applied to make it as useful. Syntax this adds: SHOW PROFILES SHOW PROFILE [types] [FOR QUERY n] [OFFSET n] [LIMIT n] where "n" is an integer and "types" is zero or many (comma-separated) of "CPU" "MEMORY" (not presently supported) "BLOCK IO" "CONTEXT SWITCHES" "PAGE FAULTS" "IPC" "SWAPS" "SOURCE" "ALL" It also adds a session variable (boolean) "profiling", set to "no" by default, and (integer) profiling_history_size, set to 15 by default. This patch abstracts setting THDs' "proc_info" behind a macro that can be used as a hook into the profiling code when profiling support is compiled in. All future code in this line should use that mechanism for setting thd->proc_info. --- Tests are now set to omit the statistics. --- Adds an Information_schema table, "profiling" for access to "show profile" data. --- Merge zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community-3--bug24795 into zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community --- Fix merge problems. --- Fixed one bug in the query_source being NULL. Updated test results. --- Include more thorough profiling tests. Improve support for prepared statements. Use session-specific query IDs, starting at zero. --- Selecting from I_S.profiling is no longer quashed in profiling, as requested by Giuseppe. Limit the size of captured query text. No longer log queries that are zero length.
2007-02-22 16:03:08 +01:00
thd_proc_info(thd, "Waiting to finalize termination");
mysql_mutex_lock(&LOCK_thread_count);
thd->current_linfo = 0;
mysql_mutex_unlock(&LOCK_thread_count);
thd->variables.max_allowed_packet= old_max_allowed_packet;
DBUG_VOID_RETURN;
2003-07-02 22:56:27 +02:00
err:
Prevent bugs by making DBUG_* expressions syntactically equivalent to a single statement. --- Bug#24795: SHOW PROFILE Profiling is only partially functional on some architectures. Where there is no getrusage() system call, presently Null values are returned where it would be required. Notably, Windows needs some love applied to make it as useful. Syntax this adds: SHOW PROFILES SHOW PROFILE [types] [FOR QUERY n] [OFFSET n] [LIMIT n] where "n" is an integer and "types" is zero or many (comma-separated) of "CPU" "MEMORY" (not presently supported) "BLOCK IO" "CONTEXT SWITCHES" "PAGE FAULTS" "IPC" "SWAPS" "SOURCE" "ALL" It also adds a session variable (boolean) "profiling", set to "no" by default, and (integer) profiling_history_size, set to 15 by default. This patch abstracts setting THDs' "proc_info" behind a macro that can be used as a hook into the profiling code when profiling support is compiled in. All future code in this line should use that mechanism for setting thd->proc_info. --- Tests are now set to omit the statistics. --- Adds an Information_schema table, "profiling" for access to "show profile" data. --- Merge zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community-3--bug24795 into zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community --- Fix merge problems. --- Fixed one bug in the query_source being NULL. Updated test results. --- Include more thorough profiling tests. Improve support for prepared statements. Use session-specific query IDs, starting at zero. --- Selecting from I_S.profiling is no longer quashed in profiling, as requested by Giuseppe. Limit the size of captured query text. No longer log queries that are zero length.
2007-02-22 16:03:08 +01:00
thd_proc_info(thd, "Waiting to finalize termination");
if (my_errno == ER_MASTER_FATAL_ERROR_READING_BINLOG && my_b_inited(&log))
{
/*
detailing the fatal error message with coordinates
of the last position read.
*/
my_snprintf(error_text, sizeof(error_text),
"%s; the first event '%s' at %lld, "
"the last event read from '%s' at %lld, "
"the last byte read from '%s' at %lld.",
errmsg,
p_start_coord->file_name, p_start_coord->pos,
p_coord->file_name, p_coord->pos,
log_file_name, my_b_tell(&log));
}
else
strcpy(error_text, errmsg);
end_io_cache(&log);
RUN_HOOK(binlog_transmit, transmit_stop, (thd, flags));
/*
Exclude iteration through thread list
this is needed for purge_logs() - it will iterate through
thread list and update thd->current_linfo->index_file_offset
this mutex will make sure that it never tried to update our linfo
after we return from this stack frame
*/
mysql_mutex_lock(&LOCK_thread_count);
thd->current_linfo = 0;
mysql_mutex_unlock(&LOCK_thread_count);
if (file >= 0)
mysql_file_close(file, MYF(MY_WME));
thd->variables.max_allowed_packet= old_max_allowed_packet;
my_message(my_errno, error_text, MYF(0));
DBUG_VOID_RETURN;
}
/**
Execute a START SLAVE statement.
@param thd Pointer to THD object for the client thread executing the
statement.
@param mi Pointer to Master_info object for the slave's IO thread.
@param net_report If true, saves the exit status into thd->stmt_da.
@retval 0 success
@retval 1 error
*/
int start_slave(THD* thd , Master_info* mi, bool net_report)
{
int slave_errno= 0;
int thread_mask;
DBUG_ENTER("start_slave");
if (check_access(thd, SUPER_ACL, any_db, NULL, NULL, 0, 0))
DBUG_RETURN(1);
lock_slave_threads(mi); // this allows us to cleanly read slave_running
// Get a mask of _stopped_ threads
init_thread_mask(&thread_mask,mi,1 /* inverse */);
/*
Below we will start all stopped threads. But if the user wants to
start only one thread, do as if the other thread was running (as we
don't wan't to touch the other thread), so set the bit to 0 for the
other thread
*/
2003-05-05 20:54:37 +02:00
if (thd->lex->slave_thd_opt)
thread_mask&= thd->lex->slave_thd_opt;
if (thread_mask) //some threads are stopped, start them
{
if (init_master_info(mi,master_info_file,relay_log_info_file, 0,
thread_mask))
slave_errno=ER_MASTER_INFO;
else if (server_id_supplied && *mi->host)
{
/*
If we will start SQL thread we will care about UNTIL options If
not and they are specified we will ignore them and warn user
about this fact.
*/
if (thread_mask & SLAVE_SQL)
{
mysql_mutex_lock(&mi->rli.data_lock);
2003-09-24 15:26:20 +02:00
if (thd->lex->mi.pos)
{
BUG#12359942 - REPLICATION TEST FROM ENGINE SUITE RPL_ROW_UNTIL TIMES OUT === Problem === The test is dependent on binlog positions and checks to see if the command 'START SLAVE' functions correctly with the 'UNTIL' clause added to it. The 'UNTIL' clause is added to specify that the slave should start and run until the SQL thread reaches a given point in the master binary log or in the slave relay log. The test uses hard coded values for MASTER_LOG_POS and RELAY_LOG_POS, instead of extracting it using query_get_value() function. There is a test 'rpl.rpl_row_until' which does the similar thing but uses query_get_value() function to set the values of MASTER_LOG_POS/ RELAY_LOG_POS. To be precise, rpl.rpl_row_until is a modified version of engines/func.rpl_row_until.test. The use of hard coded values may lead the slave to stop at a position which may differ from the expected position in the binlog file, an example being the failure of engines/funcs.rpl_row_until in mysql-5.1 given as: "query 'select * from t2' failed. Table 'test.t2' doesn't exist". In this case, the slave actually ran a couple of extra commands as a result of which the slave first deleted the table and then ran a select query on table, leading to the above mentioned failure. === Fix === 1) Fixed the code for failure seen in rpl.rpl_row_until. This test was also failing although the symptoms of failure were different. 2) Copied the contents from rpl.rpl_row_until into into engines/funcs.rpl.rpl_row_until. 3) Updated engines/funcs.rpl_row_until.result accordingly.
2012-11-30 07:42:33 +01:00
if (thd->lex->mi.relay_log_pos)
slave_errno=ER_BAD_SLAVE_UNTIL_COND;
mi->rli.until_condition= Relay_log_info::UNTIL_MASTER_POS;
2003-09-24 15:26:20 +02:00
mi->rli.until_log_pos= thd->lex->mi.pos;
/*
We don't check thd->lex->mi.log_file_name for NULL here
since it is checked in sql_yacc.yy
*/
2003-09-24 15:26:20 +02:00
strmake(mi->rli.until_log_name, thd->lex->mi.log_file_name,
sizeof(mi->rli.until_log_name)-1);
}
2003-09-24 15:26:20 +02:00
else if (thd->lex->mi.relay_log_pos)
{
BUG#12359942 - REPLICATION TEST FROM ENGINE SUITE RPL_ROW_UNTIL TIMES OUT === Problem === The test is dependent on binlog positions and checks to see if the command 'START SLAVE' functions correctly with the 'UNTIL' clause added to it. The 'UNTIL' clause is added to specify that the slave should start and run until the SQL thread reaches a given point in the master binary log or in the slave relay log. The test uses hard coded values for MASTER_LOG_POS and RELAY_LOG_POS, instead of extracting it using query_get_value() function. There is a test 'rpl.rpl_row_until' which does the similar thing but uses query_get_value() function to set the values of MASTER_LOG_POS/ RELAY_LOG_POS. To be precise, rpl.rpl_row_until is a modified version of engines/func.rpl_row_until.test. The use of hard coded values may lead the slave to stop at a position which may differ from the expected position in the binlog file, an example being the failure of engines/funcs.rpl_row_until in mysql-5.1 given as: "query 'select * from t2' failed. Table 'test.t2' doesn't exist". In this case, the slave actually ran a couple of extra commands as a result of which the slave first deleted the table and then ran a select query on table, leading to the above mentioned failure. === Fix === 1) Fixed the code for failure seen in rpl.rpl_row_until. This test was also failing although the symptoms of failure were different. 2) Copied the contents from rpl.rpl_row_until into into engines/funcs.rpl.rpl_row_until. 3) Updated engines/funcs.rpl_row_until.result accordingly.
2012-11-30 07:42:33 +01:00
if (thd->lex->mi.pos)
slave_errno=ER_BAD_SLAVE_UNTIL_COND;
mi->rli.until_condition= Relay_log_info::UNTIL_RELAY_POS;
2003-09-24 15:26:20 +02:00
mi->rli.until_log_pos= thd->lex->mi.relay_log_pos;
strmake(mi->rli.until_log_name, thd->lex->mi.relay_log_name,
sizeof(mi->rli.until_log_name)-1);
}
else
mi->rli.clear_until_condition();
if (mi->rli.until_condition != Relay_log_info::UNTIL_NONE)
{
/* Preparing members for effective until condition checking */
const char *p= fn_ext(mi->rli.until_log_name);
char *p_end;
if (*p)
{
//p points to '.'
mi->rli.until_log_name_extension= strtoul(++p,&p_end, 10);
/*
p_end points to the first invalid character. If it equals
to p, no digits were found, error. If it contains '\0' it
means conversion went ok.
*/
if (p_end==p || *p_end)
slave_errno=ER_BAD_SLAVE_UNTIL_COND;
}
else
slave_errno=ER_BAD_SLAVE_UNTIL_COND;
/* mark the cached result of the UNTIL comparison as "undefined" */
mi->rli.until_log_names_cmp_result=
Relay_log_info::UNTIL_LOG_NAMES_CMP_UNKNOWN;
/* Issuing warning then started without --skip-slave-start */
if (!opt_skip_slave_start)
push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
2006-04-13 09:50:33 +02:00
ER_MISSING_SKIP_SLAVE,
ER(ER_MISSING_SKIP_SLAVE));
}
mysql_mutex_unlock(&mi->rli.data_lock);
}
2003-09-24 15:26:20 +02:00
else if (thd->lex->mi.pos || thd->lex->mi.relay_log_pos)
push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_UNTIL_COND_IGNORED,
ER(ER_UNTIL_COND_IGNORED));
if (!slave_errno)
slave_errno = start_slave_threads(0 /*no mutex */,
1 /* wait for start */,
mi,
master_info_file,relay_log_info_file,
thread_mask);
}
else
slave_errno = ER_BAD_SLAVE;
}
else
{
/* no error if all threads are already started, only a warning */
push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_SLAVE_WAS_RUNNING,
ER(ER_SLAVE_WAS_RUNNING));
}
2006-04-13 09:50:33 +02:00
unlock_slave_threads(mi);
if (slave_errno)
{
if (net_report)
2004-11-12 13:34:00 +01:00
my_message(slave_errno, ER(slave_errno), MYF(0));
DBUG_RETURN(1);
}
else if (net_report)
my_ok(thd);
DBUG_RETURN(0);
}
/**
Execute a STOP SLAVE statement.
@param thd Pointer to THD object for the client thread executing the
statement.
@param mi Pointer to Master_info object for the slave's IO thread.
@param net_report If true, saves the exit status into thd->stmt_da.
@retval 0 success
@retval 1 error
*/
int stop_slave(THD* thd, Master_info* mi, bool net_report )
{
DBUG_ENTER("stop_slave");
int slave_errno;
if (!thd)
thd = current_thd;
if (check_access(thd, SUPER_ACL, any_db, NULL, NULL, 0, 0))
DBUG_RETURN(1);
Prevent bugs by making DBUG_* expressions syntactically equivalent to a single statement. --- Bug#24795: SHOW PROFILE Profiling is only partially functional on some architectures. Where there is no getrusage() system call, presently Null values are returned where it would be required. Notably, Windows needs some love applied to make it as useful. Syntax this adds: SHOW PROFILES SHOW PROFILE [types] [FOR QUERY n] [OFFSET n] [LIMIT n] where "n" is an integer and "types" is zero or many (comma-separated) of "CPU" "MEMORY" (not presently supported) "BLOCK IO" "CONTEXT SWITCHES" "PAGE FAULTS" "IPC" "SWAPS" "SOURCE" "ALL" It also adds a session variable (boolean) "profiling", set to "no" by default, and (integer) profiling_history_size, set to 15 by default. This patch abstracts setting THDs' "proc_info" behind a macro that can be used as a hook into the profiling code when profiling support is compiled in. All future code in this line should use that mechanism for setting thd->proc_info. --- Tests are now set to omit the statistics. --- Adds an Information_schema table, "profiling" for access to "show profile" data. --- Merge zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community-3--bug24795 into zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community --- Fix merge problems. --- Fixed one bug in the query_source being NULL. Updated test results. --- Include more thorough profiling tests. Improve support for prepared statements. Use session-specific query IDs, starting at zero. --- Selecting from I_S.profiling is no longer quashed in profiling, as requested by Giuseppe. Limit the size of captured query text. No longer log queries that are zero length.
2007-02-22 16:03:08 +01:00
thd_proc_info(thd, "Killing slave");
int thread_mask;
lock_slave_threads(mi);
// Get a mask of _running_ threads
init_thread_mask(&thread_mask,mi,0 /* not inverse*/);
/*
Below we will stop all running threads.
But if the user wants to stop only one thread, do as if the other thread
was stopped (as we don't wan't to touch the other thread), so set the
bit to 0 for the other thread
*/
2003-05-05 20:54:37 +02:00
if (thd->lex->slave_thd_opt)
thread_mask &= thd->lex->slave_thd_opt;
if (thread_mask)
{
slave_errno= terminate_slave_threads(mi,thread_mask,
1 /*skip lock */);
}
else
{
//no error if both threads are already stopped, only a warning
slave_errno= 0;
push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_SLAVE_WAS_NOT_RUNNING,
ER(ER_SLAVE_WAS_NOT_RUNNING));
}
unlock_slave_threads(mi);
Prevent bugs by making DBUG_* expressions syntactically equivalent to a single statement. --- Bug#24795: SHOW PROFILE Profiling is only partially functional on some architectures. Where there is no getrusage() system call, presently Null values are returned where it would be required. Notably, Windows needs some love applied to make it as useful. Syntax this adds: SHOW PROFILES SHOW PROFILE [types] [FOR QUERY n] [OFFSET n] [LIMIT n] where "n" is an integer and "types" is zero or many (comma-separated) of "CPU" "MEMORY" (not presently supported) "BLOCK IO" "CONTEXT SWITCHES" "PAGE FAULTS" "IPC" "SWAPS" "SOURCE" "ALL" It also adds a session variable (boolean) "profiling", set to "no" by default, and (integer) profiling_history_size, set to 15 by default. This patch abstracts setting THDs' "proc_info" behind a macro that can be used as a hook into the profiling code when profiling support is compiled in. All future code in this line should use that mechanism for setting thd->proc_info. --- Tests are now set to omit the statistics. --- Adds an Information_schema table, "profiling" for access to "show profile" data. --- Merge zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community-3--bug24795 into zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community --- Fix merge problems. --- Fixed one bug in the query_source being NULL. Updated test results. --- Include more thorough profiling tests. Improve support for prepared statements. Use session-specific query IDs, starting at zero. --- Selecting from I_S.profiling is no longer quashed in profiling, as requested by Giuseppe. Limit the size of captured query text. No longer log queries that are zero length.
2007-02-22 16:03:08 +01:00
thd_proc_info(thd, 0);
if (slave_errno)
{
if (net_report)
2004-11-12 13:34:00 +01:00
my_message(slave_errno, ER(slave_errno), MYF(0));
DBUG_RETURN(1);
}
else if (net_report)
my_ok(thd);
DBUG_RETURN(0);
}
/**
Execute a RESET SLAVE statement.
@param thd Pointer to THD object of the client thread executing the
statement.
@param mi Pointer to Master_info object for the slave.
@retval 0 success
@retval 1 error
*/
int reset_slave(THD *thd, Master_info* mi)
{
MY_STAT stat_area;
char fname[FN_REFLEN];
int thread_mask= 0, error= 0;
2010-01-24 08:03:23 +01:00
uint sql_errno=ER_UNKNOWN_ERROR;
const char* errmsg= "Unknown error occured while reseting slave";
DBUG_ENTER("reset_slave");
lock_slave_threads(mi);
init_thread_mask(&thread_mask,mi,0 /* not inverse */);
if (thread_mask) // We refuse if any slave thread is running
{
sql_errno= ER_SLAVE_MUST_STOP;
error=1;
goto err;
}
2006-01-12 19:51:02 +01:00
ha_reset_slave(thd);
// delete relay logs, clear relay log coordinates
if ((error= purge_relay_logs(&mi->rli, thd,
1 /* just reset */,
&errmsg)))
{
sql_errno= ER_RELAY_LOG_FAIL;
goto err;
}
/* Clear master's log coordinates and associated information */
mi->clear_in_memory_info(thd->lex->reset_slave_info.all);
/*
Reset errors (the idea is that we forget about the
old master).
*/
mi->clear_error();
mi->rli.clear_error();
mi->rli.clear_until_condition();
// close master_info_file, relay_log_info_file, set mi->inited=rli->inited=0
end_master_info(mi);
// and delete these two files
fn_format(fname, master_info_file, mysql_data_home, "", 4+32);
if (mysql_file_stat(key_file_master_info, fname, &stat_area, MYF(0)) &&
mysql_file_delete(key_file_master_info, fname, MYF(MY_WME)))
{
error=1;
goto err;
}
// delete relay_log_info_file
fn_format(fname, relay_log_info_file, mysql_data_home, "", 4+32);
if (mysql_file_stat(key_file_relay_log_info, fname, &stat_area, MYF(0)) &&
mysql_file_delete(key_file_relay_log_info, fname, MYF(MY_WME)))
{
error=1;
goto err;
}
RUN_HOOK(binlog_relay_io, after_reset_slave, (thd, mi));
err:
unlock_slave_threads(mi);
2004-11-12 13:34:00 +01:00
if (error)
my_error(sql_errno, MYF(0), errmsg);
DBUG_RETURN(error);
}
/*
Kill all Binlog_dump threads which previously talked to the same slave
("same" means with the same server id). Indeed, if the slave stops, if the
Binlog_dump thread is waiting (mysql_cond_wait) for binlog update, then it
will keep existing until a query is written to the binlog. If the master is
idle, then this could last long, and if the slave reconnects, we could have 2
Binlog_dump threads in SHOW PROCESSLIST, until a query is written to the
binlog. To avoid this, when the slave reconnects and sends COM_BINLOG_DUMP,
the master kills any existing thread with the slave's server id (if this id is
not zero; it will be true for real slaves, but false for mysqlbinlog when it
sends COM_BINLOG_DUMP to get a remote binlog dump).
SYNOPSIS
kill_zombie_dump_threads()
slave_server_id the slave's server id
*/
2006-04-13 09:50:33 +02:00
void kill_zombie_dump_threads(uint32 slave_server_id)
{
mysql_mutex_lock(&LOCK_thread_count);
I_List_iterator<THD> it(threads);
THD *tmp;
while ((tmp=it++))
{
if (tmp->command == COM_BINLOG_DUMP &&
tmp->server_id == slave_server_id)
{
mysql_mutex_lock(&tmp->LOCK_thd_data); // Lock from delete
break;
}
}
mysql_mutex_unlock(&LOCK_thread_count);
if (tmp)
{
/*
Here we do not call kill_one_thread() as
it will be slow because it will iterate through the list
again. We just to do kill the thread ourselves.
*/
tmp->awake(THD::KILL_QUERY);
mysql_mutex_unlock(&tmp->LOCK_thd_data);
}
}
/**
Execute a CHANGE MASTER statement.
@param thd Pointer to THD object for the client thread executing the
statement.
@param mi Pointer to Master_info object belonging to the slave's IO
thread.
@retval FALSE success
@retval TRUE error
*/
bool change_master(THD* thd, Master_info* mi)
{
int thread_mask;
const char* errmsg= 0;
bool need_relay_log_purge= 1;
2009-10-01 18:44:53 +02:00
bool ret= FALSE;
char saved_host[HOSTNAME_LENGTH + 1];
uint saved_port;
char saved_log_name[FN_REFLEN];
my_off_t saved_log_pos;
DBUG_ENTER("change_master");
lock_slave_threads(mi);
init_thread_mask(&thread_mask,mi,0 /*not inverse*/);
2009-10-01 18:44:53 +02:00
LEX_MASTER_INFO* lex_mi= &thd->lex->mi;
if (thread_mask) // We refuse if any slave thread is running
{
2004-11-12 13:34:00 +01:00
my_message(ER_SLAVE_MUST_STOP, ER(ER_SLAVE_MUST_STOP), MYF(0));
2009-10-01 18:44:53 +02:00
ret= TRUE;
goto err;
}
Prevent bugs by making DBUG_* expressions syntactically equivalent to a single statement. --- Bug#24795: SHOW PROFILE Profiling is only partially functional on some architectures. Where there is no getrusage() system call, presently Null values are returned where it would be required. Notably, Windows needs some love applied to make it as useful. Syntax this adds: SHOW PROFILES SHOW PROFILE [types] [FOR QUERY n] [OFFSET n] [LIMIT n] where "n" is an integer and "types" is zero or many (comma-separated) of "CPU" "MEMORY" (not presently supported) "BLOCK IO" "CONTEXT SWITCHES" "PAGE FAULTS" "IPC" "SWAPS" "SOURCE" "ALL" It also adds a session variable (boolean) "profiling", set to "no" by default, and (integer) profiling_history_size, set to 15 by default. This patch abstracts setting THDs' "proc_info" behind a macro that can be used as a hook into the profiling code when profiling support is compiled in. All future code in this line should use that mechanism for setting thd->proc_info. --- Tests are now set to omit the statistics. --- Adds an Information_schema table, "profiling" for access to "show profile" data. --- Merge zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community-3--bug24795 into zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community --- Fix merge problems. --- Fixed one bug in the query_source being NULL. Updated test results. --- Include more thorough profiling tests. Improve support for prepared statements. Use session-specific query IDs, starting at zero. --- Selecting from I_S.profiling is no longer quashed in profiling, as requested by Giuseppe. Limit the size of captured query text. No longer log queries that are zero length.
2007-02-22 16:03:08 +01:00
thd_proc_info(thd, "Changing master");
/*
We need to check if there is an empty master_host. Otherwise
change master succeeds, a master.info file is created containing
empty master_host string and when issuing: start slave; an error
is thrown stating that the server is not configured as slave.
(See BUG#28796).
*/
if(lex_mi->host && !*lex_mi->host)
{
my_error(ER_WRONG_ARGUMENTS, MYF(0), "MASTER_HOST");
unlock_slave_threads(mi);
DBUG_RETURN(TRUE);
}
// TODO: see if needs re-write
if (init_master_info(mi, master_info_file, relay_log_info_file, 0,
thread_mask))
{
2004-11-12 13:34:00 +01:00
my_message(ER_MASTER_INFO, ER(ER_MASTER_INFO), MYF(0));
2009-10-01 18:44:53 +02:00
ret= TRUE;
goto err;
}
/*
Data lock not needed since we have already stopped the running threads,
and we have the hold on the run locks which will keep all threads that
could possibly modify the data structures from running
*/
/*
Before processing the command, save the previous state.
*/
strmake(saved_host, mi->host, HOSTNAME_LENGTH);
saved_port= mi->port;
strmake(saved_log_name, mi->master_log_name, FN_REFLEN - 1);
saved_log_pos= mi->master_log_pos;
/*
2006-04-13 09:50:33 +02:00
If the user specified host or port without binlog or position,
reset binlog's name to FIRST and position to 4.
2006-04-13 09:50:33 +02:00
*/
if ((lex_mi->host || lex_mi->port) && !lex_mi->log_file_name && !lex_mi->pos)
{
mi->master_log_name[0] = 0;
mi->master_log_pos= BIN_LOG_HEADER_SIZE;
}
if (lex_mi->log_file_name)
strmake(mi->master_log_name, lex_mi->log_file_name,
sizeof(mi->master_log_name)-1);
if (lex_mi->pos)
2001-06-01 00:54:16 +02:00
{
mi->master_log_pos= lex_mi->pos;
2001-06-01 00:54:16 +02:00
}
DBUG_PRINT("info", ("master_log_pos: %lu", (ulong) mi->master_log_pos));
if (lex_mi->host)
strmake(mi->host, lex_mi->host, sizeof(mi->host)-1);
if (lex_mi->user)
strmake(mi->user, lex_mi->user, sizeof(mi->user)-1);
if (lex_mi->password)
strmake(mi->password, lex_mi->password, sizeof(mi->password)-1);
if (lex_mi->port)
mi->port = lex_mi->port;
if (lex_mi->connect_retry)
mi->connect_retry = lex_mi->connect_retry;
if (lex_mi->heartbeat_opt != LEX_MASTER_INFO::LEX_MI_UNCHANGED)
mi->heartbeat_period = lex_mi->heartbeat_period;
else
mi->heartbeat_period= (float) min(SLAVE_MAX_HEARTBEAT_PERIOD,
(slave_net_timeout/2.0));
mi->received_heartbeats= LL(0); // counter lives until master is CHANGEd
2009-10-01 18:44:53 +02:00
/*
reset the last time server_id list if the current CHANGE MASTER
is mentioning IGNORE_SERVER_IDS= (...)
*/
if (lex_mi->repl_ignore_server_ids_opt == LEX_MASTER_INFO::LEX_MI_ENABLE)
reset_dynamic(&mi->ignore_server_ids);
for (uint i= 0; i < lex_mi->repl_ignore_server_ids.elements; i++)
{
ulong s_id;
get_dynamic(&lex_mi->repl_ignore_server_ids, (uchar*) &s_id, i);
if (s_id == ::server_id && replicate_same_server_id)
{
2011-05-21 10:21:08 +02:00
my_error(ER_SLAVE_IGNORE_SERVER_IDS, MYF(0), static_cast<int>(s_id));
2009-10-01 18:44:53 +02:00
ret= TRUE;
goto err;
}
else
{
if (bsearch((const ulong *) &s_id,
mi->ignore_server_ids.buffer,
mi->ignore_server_ids.elements, sizeof(ulong),
(int (*) (const void*, const void*))
change_master_server_id_cmp) == NULL)
insert_dynamic(&mi->ignore_server_ids, (uchar*) &s_id);
}
}
sort_dynamic(&mi->ignore_server_ids, (qsort_cmp) change_master_server_id_cmp);
2006-04-13 09:50:33 +02:00
if (lex_mi->ssl != LEX_MASTER_INFO::LEX_MI_UNCHANGED)
mi->ssl= (lex_mi->ssl == LEX_MASTER_INFO::LEX_MI_ENABLE);
if (lex_mi->ssl_verify_server_cert != LEX_MASTER_INFO::LEX_MI_UNCHANGED)
mi->ssl_verify_server_cert=
(lex_mi->ssl_verify_server_cert == LEX_MASTER_INFO::LEX_MI_ENABLE);
if (lex_mi->ssl_ca)
strmake(mi->ssl_ca, lex_mi->ssl_ca, sizeof(mi->ssl_ca)-1);
if (lex_mi->ssl_capath)
strmake(mi->ssl_capath, lex_mi->ssl_capath, sizeof(mi->ssl_capath)-1);
if (lex_mi->ssl_cert)
strmake(mi->ssl_cert, lex_mi->ssl_cert, sizeof(mi->ssl_cert)-1);
if (lex_mi->ssl_cipher)
strmake(mi->ssl_cipher, lex_mi->ssl_cipher, sizeof(mi->ssl_cipher)-1);
if (lex_mi->ssl_key)
strmake(mi->ssl_key, lex_mi->ssl_key, sizeof(mi->ssl_key)-1);
#ifndef HAVE_OPENSSL
if (lex_mi->ssl || lex_mi->ssl_ca || lex_mi->ssl_capath ||
lex_mi->ssl_cert || lex_mi->ssl_cipher || lex_mi->ssl_key ||
lex_mi->ssl_verify_server_cert )
2006-04-13 09:50:33 +02:00
push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
ER_SLAVE_IGNORED_SSL_PARAMS, ER(ER_SLAVE_IGNORED_SSL_PARAMS));
#endif
if (lex_mi->relay_log_name)
{
need_relay_log_purge= 0;
char relay_log_name[FN_REFLEN];
mi->rli.relay_log.make_log_name(relay_log_name, lex_mi->relay_log_name);
strmake(mi->rli.group_relay_log_name, relay_log_name,
sizeof(mi->rli.group_relay_log_name)-1);
strmake(mi->rli.event_relay_log_name, relay_log_name,
sizeof(mi->rli.event_relay_log_name)-1);
}
if (lex_mi->relay_log_pos)
{
need_relay_log_purge= 0;
mi->rli.group_relay_log_pos= mi->rli.event_relay_log_pos= lex_mi->relay_log_pos;
}
/*
If user did specify neither host nor port nor any log name nor any log
pos, i.e. he specified only user/password/master_connect_retry, he probably
wants replication to resume from where it had left, i.e. from the
coordinates of the **SQL** thread (imagine the case where the I/O is ahead
of the SQL; restarting from the coordinates of the I/O would lose some
events which is probably unwanted when you are just doing minor changes
like changing master_connect_retry).
A side-effect is that if only the I/O thread was started, this thread may
restart from ''/4 after the CHANGE MASTER. That's a minor problem (it is a
much more unlikely situation than the one we are fixing here).
Note: coordinates of the SQL thread must be read here, before the
'if (need_relay_log_purge)' block which resets them.
*/
if (!lex_mi->host && !lex_mi->port &&
!lex_mi->log_file_name && !lex_mi->pos &&
need_relay_log_purge)
{
/*
Sometimes mi->rli.master_log_pos == 0 (it happens when the SQL thread is
not initialized), so we use a max().
What happens to mi->rli.master_log_pos during the initialization stages
of replication is not 100% clear, so we guard against problems using
max().
*/
mi->master_log_pos = max(BIN_LOG_HEADER_SIZE,
mi->rli.group_master_log_pos);
strmake(mi->master_log_name, mi->rli.group_master_log_name,
sizeof(mi->master_log_name)-1);
}
/*
Relay log's IO_CACHE may not be inited, if rli->inited==0 (server was never
a slave before).
*/
if (flush_master_info(mi, FALSE, FALSE))
{
my_error(ER_RELAY_LOG_INIT, MYF(0), "Failed to flush master info file");
2009-10-01 18:44:53 +02:00
ret= TRUE;
goto err;
}
if (need_relay_log_purge)
{
relay_log_purge= 1;
Prevent bugs by making DBUG_* expressions syntactically equivalent to a single statement. --- Bug#24795: SHOW PROFILE Profiling is only partially functional on some architectures. Where there is no getrusage() system call, presently Null values are returned where it would be required. Notably, Windows needs some love applied to make it as useful. Syntax this adds: SHOW PROFILES SHOW PROFILE [types] [FOR QUERY n] [OFFSET n] [LIMIT n] where "n" is an integer and "types" is zero or many (comma-separated) of "CPU" "MEMORY" (not presently supported) "BLOCK IO" "CONTEXT SWITCHES" "PAGE FAULTS" "IPC" "SWAPS" "SOURCE" "ALL" It also adds a session variable (boolean) "profiling", set to "no" by default, and (integer) profiling_history_size, set to 15 by default. This patch abstracts setting THDs' "proc_info" behind a macro that can be used as a hook into the profiling code when profiling support is compiled in. All future code in this line should use that mechanism for setting thd->proc_info. --- Tests are now set to omit the statistics. --- Adds an Information_schema table, "profiling" for access to "show profile" data. --- Merge zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community-3--bug24795 into zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community --- Fix merge problems. --- Fixed one bug in the query_source being NULL. Updated test results. --- Include more thorough profiling tests. Improve support for prepared statements. Use session-specific query IDs, starting at zero. --- Selecting from I_S.profiling is no longer quashed in profiling, as requested by Giuseppe. Limit the size of captured query text. No longer log queries that are zero length.
2007-02-22 16:03:08 +01:00
thd_proc_info(thd, "Purging old relay logs");
if (purge_relay_logs(&mi->rli, thd,
0 /* not only reset, but also reinit */,
&errmsg))
{
my_error(ER_RELAY_LOG_FAIL, MYF(0), errmsg);
2009-10-01 18:44:53 +02:00
ret= TRUE;
goto err;
}
}
else
{
const char* msg;
relay_log_purge= 0;
/* Relay log is already initialized */
if (init_relay_log_pos(&mi->rli,
mi->rli.group_relay_log_name,
mi->rli.group_relay_log_pos,
0 /*no data lock*/,
This will be pushed only after I fix the testsuite. This is the main commit for Worklog tasks: * A more dynamic binlog format which allows small changes (1064) * Log session variables in Query_log_event (1063) Below 5.0 means 5.0.0. MySQL 5.0 is able to replicate FOREIGN_KEY_CHECKS, UNIQUE_KEY_CHECKS (for speed), SQL_AUTO_IS_NULL, SQL_MODE. Not charsets (WL#1062), not some vars (I can only think of SQL_SELECT_LIMIT, which deserves a special treatment). Note that this works for queries, except LOAD DATA INFILE (for this it would have to wait for Dmitri's push of WL#874, which in turns waits for the present push, so... the deadlock must be broken!). Note that when Dmitri pushes WL#874 in 5.0.1, 5.0.0 won't be able to replicate a LOAD DATA INFILE from 5.0.1. Apart from that, the new binlog format is designed so that it can tolerate a little variation in the events (so that a 5.0.0 slave could replicate a 5.0.1 master, except for LOAD DATA INFILE unfortunately); that is, when I later add replication of charsets it should break nothing. And when I later add a UID to every event, it should break nothing. The main change brought by this patch is a new type of event, Format_description_log_event, which describes some lengthes in other event types. This event is needed for the master/slave/mysqlbinlog to understand a 5.0 log. Thanks to this event, we can later add more bytes to the header of every event without breaking compatibility. Inside Query_log_event, we have some additional dynamic format, as every Query_log_event can have a different number of status variables, stored as pairs (code, value); that's how SQL_MODE and session variables and catalog are stored. Like this, we can later add count of affected rows, charsets... and we can have options --don't-log-count-affected-rows if we want. MySQL 5.0 is able to run on 4.x relay logs, 4.x binlogs. Upgrading a 4.x master to 5.0 is ok (no need to delete binlogs), upgrading a 4.x slave to 5.0 is ok (no need to delete relay logs); so both can be "hot" upgrades. Upgrading a 3.23 master to 5.0 requires as much as upgrading it to 4.0. 3.23 and 4.x can't be slaves of 5.0. So downgrading from 5.0 to 4.x may be complicated. Log_event::log_pos is now the position of the end of the event, which is more useful than the position of the beginning. We take care about compatibility with <5.0 (in which log_pos is the beginning). I added a short test for replication of SQL_MODE and some other variables. TODO: - after committing this, merge the latest 5.0 into it - fix all tests - update the manual with upgrade notes.
2003-12-18 01:09:05 +01:00
&msg, 0))
{
my_error(ER_RELAY_LOG_INIT, MYF(0), msg);
2009-10-01 18:44:53 +02:00
ret= TRUE;
goto err;
}
}
/*
Coordinates in rli were spoilt by the 'if (need_relay_log_purge)' block,
so restore them to good values. If we left them to ''/0, that would work;
but that would fail in the case of 2 successive CHANGE MASTER (without a
START SLAVE in between): because first one would set the coords in mi to
the good values of those in rli, the set those in rli to ''/0, then
second CHANGE MASTER would set the coords in mi to those of rli, i.e. to
''/0: we have lost all copies of the original good coordinates.
That's why we always save good coords in rli.
*/
mi->rli.group_master_log_pos= mi->master_log_pos;
DBUG_PRINT("info", ("master_log_pos: %lu", (ulong) mi->master_log_pos));
strmake(mi->rli.group_master_log_name,mi->master_log_name,
sizeof(mi->rli.group_master_log_name)-1);
if (!mi->rli.group_master_log_name[0]) // uninitialized case
mi->rli.group_master_log_pos=0;
mysql_mutex_lock(&mi->rli.data_lock);
mi->rli.abort_pos_wait++; /* for MASTER_POS_WAIT() to abort */
/* Clear the errors, for a clean start */
mi->rli.clear_error();
mi->rli.clear_until_condition();
sql_print_information("'CHANGE MASTER TO executed'. "
"Previous state master_host='%s', master_port='%u', master_log_file='%s', "
"master_log_pos='%ld'. "
"New state master_host='%s', master_port='%u', master_log_file='%s', "
"master_log_pos='%ld'.", saved_host, saved_port, saved_log_name,
(ulong) saved_log_pos, mi->host, mi->port, mi->master_log_name,
(ulong) mi->master_log_pos);
2003-08-11 21:44:43 +02:00
/*
If we don't write new coordinates to disk now, then old will remain in
relay-log.info until START SLAVE is issued; but if mysqld is shutdown
before START SLAVE, then old will remain in relay-log.info, and will be the
in-memory value at restart (thus causing errors, as the old relay log does
not exist anymore).
*/
2003-08-11 21:44:43 +02:00
flush_relay_log_info(&mi->rli);
mysql_cond_broadcast(&mi->data_cond);
mysql_mutex_unlock(&mi->rli.data_lock);
2009-10-01 18:44:53 +02:00
err:
unlock_slave_threads(mi);
Prevent bugs by making DBUG_* expressions syntactically equivalent to a single statement. --- Bug#24795: SHOW PROFILE Profiling is only partially functional on some architectures. Where there is no getrusage() system call, presently Null values are returned where it would be required. Notably, Windows needs some love applied to make it as useful. Syntax this adds: SHOW PROFILES SHOW PROFILE [types] [FOR QUERY n] [OFFSET n] [LIMIT n] where "n" is an integer and "types" is zero or many (comma-separated) of "CPU" "MEMORY" (not presently supported) "BLOCK IO" "CONTEXT SWITCHES" "PAGE FAULTS" "IPC" "SWAPS" "SOURCE" "ALL" It also adds a session variable (boolean) "profiling", set to "no" by default, and (integer) profiling_history_size, set to 15 by default. This patch abstracts setting THDs' "proc_info" behind a macro that can be used as a hook into the profiling code when profiling support is compiled in. All future code in this line should use that mechanism for setting thd->proc_info. --- Tests are now set to omit the statistics. --- Adds an Information_schema table, "profiling" for access to "show profile" data. --- Merge zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community-3--bug24795 into zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community --- Fix merge problems. --- Fixed one bug in the query_source being NULL. Updated test results. --- Include more thorough profiling tests. Improve support for prepared statements. Use session-specific query IDs, starting at zero. --- Selecting from I_S.profiling is no longer quashed in profiling, as requested by Giuseppe. Limit the size of captured query text. No longer log queries that are zero length.
2007-02-22 16:03:08 +01:00
thd_proc_info(thd, 0);
2009-10-01 18:44:53 +02:00
if (ret == FALSE)
my_ok(thd);
DBUG_RETURN(ret);
}
/**
Execute a RESET MASTER statement.
@param thd Pointer to THD object of the client thread executing the
statement.
@retval 0 success
@retval 1 error
*/
int reset_master(THD* thd)
{
if (!mysql_bin_log.is_open())
{
2004-11-12 13:34:00 +01:00
my_message(ER_FLUSH_MASTER_BINLOG_CLOSED,
ER(ER_FLUSH_MASTER_BINLOG_CLOSED), MYF(ME_BELL+ME_WAITTANG));
return 1;
}
if (mysql_bin_log.reset_logs(thd))
return 1;
RUN_HOOK(binlog_transmit, after_reset_master, (thd, 0 /* flags */));
return 0;
}
/**
Execute a SHOW BINLOG EVENTS statement.
@param thd Pointer to THD object for the client thread executing the
statement.
@retval FALSE success
@retval TRUE failure
*/
2005-01-16 13:16:23 +01:00
bool mysql_show_binlog_events(THD* thd)
{
Protocol *protocol= thd->protocol;
List<Item> field_list;
const char *errmsg = 0;
bool ret = TRUE;
IO_CACHE log;
File file = -1;
MYSQL_BIN_LOG *binary_log= NULL;
int old_max_allowed_packet= thd->variables.max_allowed_packet;
LOG_INFO linfo;
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes Changes that requires code changes in other code of other storage engines. (Note that all changes are very straightforward and one should find all issues by compiling a --debug build and fixing all compiler errors and all asserts in field.cc while running the test suite), - New optional handler function introduced: reset() This is called after every DML statement to make it easy for a handler to statement specific cleanups. (The only case it's not called is if force the file to be closed) - handler::extra(HA_EXTRA_RESET) is removed. Code that was there before should be moved to handler::reset() - table->read_set contains a bitmap over all columns that are needed in the query. read_row() and similar functions only needs to read these columns - table->write_set contains a bitmap over all columns that will be updated in the query. write_row() and update_row() only needs to update these columns. The above bitmaps should now be up to date in all context (including ALTER TABLE, filesort()). The handler is informed of any changes to the bitmap after fix_fields() by calling the virtual function handler::column_bitmaps_signal(). If the handler does caching of these bitmaps (instead of using table->read_set, table->write_set), it should redo the caching in this code. as the signal() may be sent several times, it's probably best to set a variable in the signal and redo the caching on read_row() / write_row() if the variable was set. - Removed the read_set and write_set bitmap objects from the handler class - Removed all column bit handling functions from the handler class. (Now one instead uses the normal bitmap functions in my_bitmap.c instead of handler dedicated bitmap functions) - field->query_id is removed. One should instead instead check table->read_set and table->write_set if a field is used in the query. - handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now instead use table->read_set to check for which columns to retrieve. - If a handler needs to call Field->val() or Field->store() on columns that are not used in the query, one should install a temporary all-columns-used map while doing so. For this, we provide the following functions: my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); field->val(); dbug_tmp_restore_column_map(table->read_set, old_map); and similar for the write map: my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set); field->val(); dbug_tmp_restore_column_map(table->write_set, old_map); If this is not done, you will sooner or later hit a DBUG_ASSERT in the field store() / val() functions. (For not DBUG binaries, the dbug_tmp_restore_column_map() and dbug_tmp_restore_column_map() are inline dummy functions and should be optimized away be the compiler). - If one needs to temporary set the column map for all binaries (and not just to avoid the DBUG_ASSERT() in the Field::store() / Field::val() methods) one should use the functions tmp_use_all_columns() and tmp_restore_column_map() instead of the above dbug_ variants. - All 'status' fields in the handler base class (like records, data_file_length etc) are now stored in a 'stats' struct. This makes it easier to know what status variables are provided by the base handler. This requires some trivial variable names in the extra() function. - New virtual function handler::records(). This is called to optimize COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true. (stats.records is not supposed to be an exact value. It's only has to be 'reasonable enough' for the optimizer to be able to choose a good optimization path). - Non virtual handler::init() function added for caching of virtual constants from engine. - Removed has_transactions() virtual method. Now one should instead return HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support transactions. - The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument that is to be used with 'new handler_name()' to allocate the handler in the right area. The xxxx_create_handler() function is also responsible for any initialization of the object before returning. For example, one should change: static handler *myisam_create_handler(TABLE_SHARE *table) { return new ha_myisam(table); } -> static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root) { return new (mem_root) ha_myisam(table); } - New optional virtual function: use_hidden_primary_key(). This is called in case of an update/delete when (table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined but we don't have a primary key. This allows the handler to take precisions in remembering any hidden primary key to able to update/delete any found row. The default handler marks all columns to be read. - handler::table_flags() now returns a ulonglong (to allow for more flags). - New/changed table_flags() - HA_HAS_RECORDS Set if ::records() is supported - HA_NO_TRANSACTIONS Set if engine doesn't support transactions - HA_PRIMARY_KEY_REQUIRED_FOR_DELETE Set if we should mark all primary key columns for read when reading rows as part of a DELETE statement. If there is no primary key, all columns are marked for read. - HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some cases (based on table->read_set) - HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION. - HA_DUPP_POS Renamed to HA_DUPLICATE_POS - HA_REQUIRES_KEY_COLUMNS_FOR_DELETE Set this if we should mark ALL key columns for read when when reading rows as part of a DELETE statement. In case of an update we will mark all keys for read for which key part changed value. - HA_STATS_RECORDS_IS_EXACT Set this if stats.records is exact. (This saves us some extra records() calls when optimizing COUNT(*)) - Removed table_flags() - HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if handler::records() gives an exact count() and HA_STATS_RECORDS_IS_EXACT if stats.records is exact. - HA_READ_RND_SAME Removed (no one supported this one) - Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk() - Renamed handler::dupp_pos to handler::dup_pos - Removed not used variable handler::sortkey Upper level handler changes: - ha_reset() now does some overall checks and calls ::reset() - ha_table_flags() added. This is a cached version of table_flags(). The cache is updated on engine creation time and updated on open. MySQL level changes (not obvious from the above): - DBUG_ASSERT() added to check that column usage matches what is set in the column usage bit maps. (This found a LOT of bugs in current column marking code). - In 5.1 before, all used columns was marked in read_set and only updated columns was marked in write_set. Now we only mark columns for which we need a value in read_set. - Column bitmaps are created in open_binary_frm() and open_table_from_share(). (Before this was in table.cc) - handler::table_flags() calls are replaced with handler::ha_table_flags() - For calling field->val() you must have the corresponding bit set in table->read_set. For calling field->store() you must have the corresponding bit set in table->write_set. (There are asserts in all store()/val() functions to catch wrong usage) - thd->set_query_id is renamed to thd->mark_used_columns and instead of setting this to an integer value, this has now the values: MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE Changed also all variables named 'set_query_id' to mark_used_columns. - In filesort() we now inform the handler of exactly which columns are needed doing the sort and choosing the rows. - The TABLE_SHARE object has a 'all_set' column bitmap one can use when one needs a column bitmap with all columns set. (This is used for table->use_all_columns() and other places) - The TABLE object has 3 column bitmaps: - def_read_set Default bitmap for columns to be read - def_write_set Default bitmap for columns to be written - tmp_set Can be used as a temporary bitmap when needed. The table object has also two pointer to bitmaps read_set and write_set that the handler should use to find out which columns are used in which way. - count() optimization now calls handler::records() instead of using handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true). - Added extra argument to Item::walk() to indicate if we should also traverse sub queries. - Added TABLE parameter to cp_buffer_from_ref() - Don't close tables created with CREATE ... SELECT but keep them in the table cache. (Faster usage of newly created tables). New interfaces: - table->clear_column_bitmaps() to initialize the bitmaps for tables at start of new statements. - table->column_bitmaps_set() to set up new column bitmaps and signal the handler about this. - table->column_bitmaps_set_no_signal() for some few cases where we need to setup new column bitmaps but don't signal the handler (as the handler has already been signaled about these before). Used for the momement only in opt_range.cc when doing ROR scans. - table->use_all_columns() to install a bitmap where all columns are marked as use in the read and the write set. - table->default_column_bitmaps() to install the normal read and write column bitmaps, but not signaling the handler about this. This is mainly used when creating TABLE instances. - table->mark_columns_needed_for_delete(), table->mark_columns_needed_for_delete() and table->mark_columns_needed_for_insert() to allow us to put additional columns in column usage maps if handler so requires. (The handler indicates what it neads in handler->table_flags()) - table->prepare_for_position() to allow us to tell handler that it needs to read primary key parts to be able to store them in future table->position() calls. (This replaces the table->file->ha_retrieve_all_pk function) - table->mark_auto_increment_column() to tell handler are going to update columns part of any auto_increment key. - table->mark_columns_used_by_index() to mark all columns that is part of an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow it to quickly know that it only needs to read colums that are part of the key. (The handler can also use the column map for detecting this, but simpler/faster handler can just monitor the extra() call). - table->mark_columns_used_by_index_no_reset() to in addition to other columns, also mark all columns that is used by the given key. - table->restore_column_maps_after_mark_index() to restore to default column maps after a call to table->mark_columns_used_by_index(). - New item function register_field_in_read_map(), for marking used columns in table->read_map. Used by filesort() to mark all used columns - Maintain in TABLE->merge_keys set of all keys that are used in query. (Simplices some optimization loops) - Maintain Field->part_of_key_not_clustered which is like Field->part_of_key but the field in the clustered key is not assumed to be part of all index. (used in opt_range.cc for faster loops) - dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map() tmp_use_all_columns() and tmp_restore_column_map() functions to temporally mark all columns as usable. The 'dbug_' version is primarily intended inside a handler when it wants to just call Field:store() & Field::val() functions, but don't need the column maps set for any other usage. (ie:: bitmap_is_set() is never called) - We can't use compare_records() to skip updates for handlers that returns a partial column set and the read_set doesn't cover all columns in the write set. The reason for this is that if we have a column marked only for write we can't in the MySQL level know if the value changed or not. The reason this worked before was that MySQL marked all to be written columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden bug'. - open_table_from_share() does not anymore setup temporary MEM_ROOT object as a thread specific variable for the handler. Instead we send the to-be-used MEMROOT to get_new_handler(). (Simpler, faster code) Bugs fixed: - Column marking was not done correctly in a lot of cases. (ALTER TABLE, when using triggers, auto_increment fields etc) (Could potentially result in wrong values inserted in table handlers relying on that the old column maps or field->set_query_id was correct) Especially when it comes to triggers, there may be cases where the old code would cause lost/wrong values for NDB and/or InnoDB tables. - Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags: OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG. This allowed me to remove some wrong warnings about: "Some non-transactional changed tables couldn't be rolled back" - Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset (thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose some warnings about "Some non-transactional changed tables couldn't be rolled back") - Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table() which could cause delete_table to report random failures. - Fixed core dumps for some tests when running with --debug - Added missing FN_LIBCHAR in mysql_rm_tmp_tables() (This has probably caused us to not properly remove temporary files after crash) - slow_logs was not properly initialized, which could maybe cause extra/lost entries in slow log. - If we get an duplicate row on insert, change column map to read and write all columns while retrying the operation. This is required by the definition of REPLACE and also ensures that fields that are only part of UPDATE are properly handled. This fixed a bug in NDB and REPLACE where REPLACE wrongly copied some column values from the replaced row. - For table handler that doesn't support NULL in keys, we would give an error when creating a primary key with NULL fields, even after the fields has been automaticly converted to NOT NULL. - Creating a primary key on a SPATIAL key, would fail if field was not declared as NOT NULL. Cleanups: - Removed not used condition argument to setup_tables - Removed not needed item function reset_query_id_processor(). - Field->add_index is removed. Now this is instead maintained in (field->flags & FIELD_IN_ADD_INDEX) - Field->fieldnr is removed (use field->field_index instead) - New argument to filesort() to indicate that it should return a set of row pointers (not used columns). This allowed me to remove some references to sql_command in filesort and should also enable us to return column results in some cases where we couldn't before. - Changed column bitmap handling in opt_range.cc to be aligned with TABLE bitmap, which allowed me to use bitmap functions instead of looping over all fields to create some needed bitmaps. (Faster and smaller code) - Broke up found too long lines - Moved some variable declaration at start of function for better code readability. - Removed some not used arguments from functions. (setup_fields(), mysql_prepare_insert_check_table()) - setup_fields() now takes an enum instead of an int for marking columns usage. - For internal temporary tables, use handler::write_row(), handler::delete_row() and handler::update_row() instead of handler::ha_xxxx() for faster execution. - Changed some constants to enum's and define's. - Using separate column read and write sets allows for easier checking of timestamp field was set by statement. - Remove calls to free_io_cache() as this is now done automaticly in ha_reset() - Don't build table->normalized_path as this is now identical to table->path (after bar's fixes to convert filenames) - Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to do comparision with the 'convert-dbug-for-diff' tool. Things left to do in 5.1: - We wrongly log failed CREATE TABLE ... SELECT in some cases when using row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result) Mats has promised to look into this. - Test that my fix for CREATE TABLE ... SELECT is indeed correct. (I added several test cases for this, but in this case it's better that someone else also tests this throughly). Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
DBUG_ENTER("mysql_show_binlog_events");
Log_event::init_show_field_list(&field_list);
Backport of revno 2630.28.10, 2630.28.31, 2630.28.26, 2630.33.1, 2630.39.1, 2630.28.29, 2630.34.3, 2630.34.2, 2630.34.1, 2630.29.29, 2630.29.28, 2630.31.1, 2630.28.13, 2630.28.10, 2617.23.14 and some other minor revisions. This patch implements: WL#4264 "Backup: Stabilize Service Interface" -- all the server prerequisites except si_objects.{h,cc} themselves (they can be just copied over, when needed). WL#4435: Support OUT-parameters in prepared statements. (and all issues in the initial patches for these two tasks, that were discovered in pushbuild and during testing). Bug#39519: mysql_stmt_close() should flush all data associated with the statement. After execution of a prepared statement, send OUT parameters of the invoked stored procedure, if any, to the client. When using the binary protocol, send the parameters in an additional result set over the wire. When using the text protocol, assign out parameters to the user variables from the CALL(@var1, @var2, ...) specification. The following refactoring has been made: - Protocol::send_fields() was renamed to Protocol::send_result_set_metadata(); - A new Protocol::send_result_set_row() was introduced to incapsulate common functionality for sending row data. - Signature of Protocol::prepare_for_send() was changed: this operation does not need a list of items, the number of items is fully sufficient. The following backward incompatible changes have been made: - CLIENT_MULTI_RESULTS is now enabled by default in the client; - CLIENT_PS_MULTI_RESUTLS is now enabled by default in the client.
2009-10-21 22:02:06 +02:00
if (protocol->send_result_set_metadata(&field_list,
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
DBUG_RETURN(TRUE);
Format_description_log_event *description_event= new
Format_description_log_event(3); /* MySQL 4.0 by default */
DBUG_ASSERT(thd->lex->sql_command == SQLCOM_SHOW_BINLOG_EVENTS ||
thd->lex->sql_command == SQLCOM_SHOW_RELAYLOG_EVENTS);
2006-01-12 19:51:02 +01:00
/* select wich binary log to use: binlog or relay */
if ( thd->lex->sql_command == SQLCOM_SHOW_BINLOG_EVENTS )
{
/*
Wait for handlers to insert any pending information
into the binlog. For e.g. ndb which updates the binlog asynchronously
this is needed so that the uses sees all its own commands in the binlog
*/
ha_binlog_wait(thd);
binary_log= &mysql_bin_log;
}
else /* showing relay log contents */
{
if (!active_mi)
DBUG_RETURN(TRUE);
binary_log= &(active_mi->rli.relay_log);
}
2006-01-12 19:51:02 +01:00
if (binary_log->is_open())
{
LEX_MASTER_INFO *lex_mi= &thd->lex->mi;
SELECT_LEX_UNIT *unit= &thd->lex->unit;
ha_rows event_count, limit_start, limit_end;
my_off_t pos = max(BIN_LOG_HEADER_SIZE, lex_mi->pos); // user-friendly
char search_file_name[FN_REFLEN], *name;
const char *log_file_name = lex_mi->log_file_name;
mysql_mutex_t *log_lock = binary_log->get_log_lock();
Log_event* ev;
unit->set_limit(thd->lex->current_select);
limit_start= unit->offset_limit_cnt;
limit_end= unit->select_limit_cnt;
name= search_file_name;
if (log_file_name)
binary_log->make_log_name(search_file_name, log_file_name);
else
name=0; // Find first log
linfo.index_file_offset = 0;
if (binary_log->find_log_pos(&linfo, name, 1))
{
errmsg = "Could not find target log";
goto err;
}
mysql_mutex_lock(&LOCK_thread_count);
thd->current_linfo = &linfo;
mysql_mutex_unlock(&LOCK_thread_count);
if ((file=open_binlog(&log, linfo.log_file_name, &errmsg)) < 0)
goto err;
/*
to account binlog event header size
*/
thd->variables.max_allowed_packet += MAX_LOG_EVENT_HEADER;
mysql_mutex_lock(log_lock);
This will be pushed only after I fix the testsuite. This is the main commit for Worklog tasks: * A more dynamic binlog format which allows small changes (1064) * Log session variables in Query_log_event (1063) Below 5.0 means 5.0.0. MySQL 5.0 is able to replicate FOREIGN_KEY_CHECKS, UNIQUE_KEY_CHECKS (for speed), SQL_AUTO_IS_NULL, SQL_MODE. Not charsets (WL#1062), not some vars (I can only think of SQL_SELECT_LIMIT, which deserves a special treatment). Note that this works for queries, except LOAD DATA INFILE (for this it would have to wait for Dmitri's push of WL#874, which in turns waits for the present push, so... the deadlock must be broken!). Note that when Dmitri pushes WL#874 in 5.0.1, 5.0.0 won't be able to replicate a LOAD DATA INFILE from 5.0.1. Apart from that, the new binlog format is designed so that it can tolerate a little variation in the events (so that a 5.0.0 slave could replicate a 5.0.1 master, except for LOAD DATA INFILE unfortunately); that is, when I later add replication of charsets it should break nothing. And when I later add a UID to every event, it should break nothing. The main change brought by this patch is a new type of event, Format_description_log_event, which describes some lengthes in other event types. This event is needed for the master/slave/mysqlbinlog to understand a 5.0 log. Thanks to this event, we can later add more bytes to the header of every event without breaking compatibility. Inside Query_log_event, we have some additional dynamic format, as every Query_log_event can have a different number of status variables, stored as pairs (code, value); that's how SQL_MODE and session variables and catalog are stored. Like this, we can later add count of affected rows, charsets... and we can have options --don't-log-count-affected-rows if we want. MySQL 5.0 is able to run on 4.x relay logs, 4.x binlogs. Upgrading a 4.x master to 5.0 is ok (no need to delete binlogs), upgrading a 4.x slave to 5.0 is ok (no need to delete relay logs); so both can be "hot" upgrades. Upgrading a 3.23 master to 5.0 requires as much as upgrading it to 4.0. 3.23 and 4.x can't be slaves of 5.0. So downgrading from 5.0 to 4.x may be complicated. Log_event::log_pos is now the position of the end of the event, which is more useful than the position of the beginning. We take care about compatibility with <5.0 (in which log_pos is the beginning). I added a short test for replication of SQL_MODE and some other variables. TODO: - after committing this, merge the latest 5.0 into it - fix all tests - update the manual with upgrade notes.
2003-12-18 01:09:05 +01:00
/*
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes Changes that requires code changes in other code of other storage engines. (Note that all changes are very straightforward and one should find all issues by compiling a --debug build and fixing all compiler errors and all asserts in field.cc while running the test suite), - New optional handler function introduced: reset() This is called after every DML statement to make it easy for a handler to statement specific cleanups. (The only case it's not called is if force the file to be closed) - handler::extra(HA_EXTRA_RESET) is removed. Code that was there before should be moved to handler::reset() - table->read_set contains a bitmap over all columns that are needed in the query. read_row() and similar functions only needs to read these columns - table->write_set contains a bitmap over all columns that will be updated in the query. write_row() and update_row() only needs to update these columns. The above bitmaps should now be up to date in all context (including ALTER TABLE, filesort()). The handler is informed of any changes to the bitmap after fix_fields() by calling the virtual function handler::column_bitmaps_signal(). If the handler does caching of these bitmaps (instead of using table->read_set, table->write_set), it should redo the caching in this code. as the signal() may be sent several times, it's probably best to set a variable in the signal and redo the caching on read_row() / write_row() if the variable was set. - Removed the read_set and write_set bitmap objects from the handler class - Removed all column bit handling functions from the handler class. (Now one instead uses the normal bitmap functions in my_bitmap.c instead of handler dedicated bitmap functions) - field->query_id is removed. One should instead instead check table->read_set and table->write_set if a field is used in the query. - handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now instead use table->read_set to check for which columns to retrieve. - If a handler needs to call Field->val() or Field->store() on columns that are not used in the query, one should install a temporary all-columns-used map while doing so. For this, we provide the following functions: my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); field->val(); dbug_tmp_restore_column_map(table->read_set, old_map); and similar for the write map: my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set); field->val(); dbug_tmp_restore_column_map(table->write_set, old_map); If this is not done, you will sooner or later hit a DBUG_ASSERT in the field store() / val() functions. (For not DBUG binaries, the dbug_tmp_restore_column_map() and dbug_tmp_restore_column_map() are inline dummy functions and should be optimized away be the compiler). - If one needs to temporary set the column map for all binaries (and not just to avoid the DBUG_ASSERT() in the Field::store() / Field::val() methods) one should use the functions tmp_use_all_columns() and tmp_restore_column_map() instead of the above dbug_ variants. - All 'status' fields in the handler base class (like records, data_file_length etc) are now stored in a 'stats' struct. This makes it easier to know what status variables are provided by the base handler. This requires some trivial variable names in the extra() function. - New virtual function handler::records(). This is called to optimize COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true. (stats.records is not supposed to be an exact value. It's only has to be 'reasonable enough' for the optimizer to be able to choose a good optimization path). - Non virtual handler::init() function added for caching of virtual constants from engine. - Removed has_transactions() virtual method. Now one should instead return HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support transactions. - The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument that is to be used with 'new handler_name()' to allocate the handler in the right area. The xxxx_create_handler() function is also responsible for any initialization of the object before returning. For example, one should change: static handler *myisam_create_handler(TABLE_SHARE *table) { return new ha_myisam(table); } -> static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root) { return new (mem_root) ha_myisam(table); } - New optional virtual function: use_hidden_primary_key(). This is called in case of an update/delete when (table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined but we don't have a primary key. This allows the handler to take precisions in remembering any hidden primary key to able to update/delete any found row. The default handler marks all columns to be read. - handler::table_flags() now returns a ulonglong (to allow for more flags). - New/changed table_flags() - HA_HAS_RECORDS Set if ::records() is supported - HA_NO_TRANSACTIONS Set if engine doesn't support transactions - HA_PRIMARY_KEY_REQUIRED_FOR_DELETE Set if we should mark all primary key columns for read when reading rows as part of a DELETE statement. If there is no primary key, all columns are marked for read. - HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some cases (based on table->read_set) - HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION. - HA_DUPP_POS Renamed to HA_DUPLICATE_POS - HA_REQUIRES_KEY_COLUMNS_FOR_DELETE Set this if we should mark ALL key columns for read when when reading rows as part of a DELETE statement. In case of an update we will mark all keys for read for which key part changed value. - HA_STATS_RECORDS_IS_EXACT Set this if stats.records is exact. (This saves us some extra records() calls when optimizing COUNT(*)) - Removed table_flags() - HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if handler::records() gives an exact count() and HA_STATS_RECORDS_IS_EXACT if stats.records is exact. - HA_READ_RND_SAME Removed (no one supported this one) - Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk() - Renamed handler::dupp_pos to handler::dup_pos - Removed not used variable handler::sortkey Upper level handler changes: - ha_reset() now does some overall checks and calls ::reset() - ha_table_flags() added. This is a cached version of table_flags(). The cache is updated on engine creation time and updated on open. MySQL level changes (not obvious from the above): - DBUG_ASSERT() added to check that column usage matches what is set in the column usage bit maps. (This found a LOT of bugs in current column marking code). - In 5.1 before, all used columns was marked in read_set and only updated columns was marked in write_set. Now we only mark columns for which we need a value in read_set. - Column bitmaps are created in open_binary_frm() and open_table_from_share(). (Before this was in table.cc) - handler::table_flags() calls are replaced with handler::ha_table_flags() - For calling field->val() you must have the corresponding bit set in table->read_set. For calling field->store() you must have the corresponding bit set in table->write_set. (There are asserts in all store()/val() functions to catch wrong usage) - thd->set_query_id is renamed to thd->mark_used_columns and instead of setting this to an integer value, this has now the values: MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE Changed also all variables named 'set_query_id' to mark_used_columns. - In filesort() we now inform the handler of exactly which columns are needed doing the sort and choosing the rows. - The TABLE_SHARE object has a 'all_set' column bitmap one can use when one needs a column bitmap with all columns set. (This is used for table->use_all_columns() and other places) - The TABLE object has 3 column bitmaps: - def_read_set Default bitmap for columns to be read - def_write_set Default bitmap for columns to be written - tmp_set Can be used as a temporary bitmap when needed. The table object has also two pointer to bitmaps read_set and write_set that the handler should use to find out which columns are used in which way. - count() optimization now calls handler::records() instead of using handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true). - Added extra argument to Item::walk() to indicate if we should also traverse sub queries. - Added TABLE parameter to cp_buffer_from_ref() - Don't close tables created with CREATE ... SELECT but keep them in the table cache. (Faster usage of newly created tables). New interfaces: - table->clear_column_bitmaps() to initialize the bitmaps for tables at start of new statements. - table->column_bitmaps_set() to set up new column bitmaps and signal the handler about this. - table->column_bitmaps_set_no_signal() for some few cases where we need to setup new column bitmaps but don't signal the handler (as the handler has already been signaled about these before). Used for the momement only in opt_range.cc when doing ROR scans. - table->use_all_columns() to install a bitmap where all columns are marked as use in the read and the write set. - table->default_column_bitmaps() to install the normal read and write column bitmaps, but not signaling the handler about this. This is mainly used when creating TABLE instances. - table->mark_columns_needed_for_delete(), table->mark_columns_needed_for_delete() and table->mark_columns_needed_for_insert() to allow us to put additional columns in column usage maps if handler so requires. (The handler indicates what it neads in handler->table_flags()) - table->prepare_for_position() to allow us to tell handler that it needs to read primary key parts to be able to store them in future table->position() calls. (This replaces the table->file->ha_retrieve_all_pk function) - table->mark_auto_increment_column() to tell handler are going to update columns part of any auto_increment key. - table->mark_columns_used_by_index() to mark all columns that is part of an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow it to quickly know that it only needs to read colums that are part of the key. (The handler can also use the column map for detecting this, but simpler/faster handler can just monitor the extra() call). - table->mark_columns_used_by_index_no_reset() to in addition to other columns, also mark all columns that is used by the given key. - table->restore_column_maps_after_mark_index() to restore to default column maps after a call to table->mark_columns_used_by_index(). - New item function register_field_in_read_map(), for marking used columns in table->read_map. Used by filesort() to mark all used columns - Maintain in TABLE->merge_keys set of all keys that are used in query. (Simplices some optimization loops) - Maintain Field->part_of_key_not_clustered which is like Field->part_of_key but the field in the clustered key is not assumed to be part of all index. (used in opt_range.cc for faster loops) - dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map() tmp_use_all_columns() and tmp_restore_column_map() functions to temporally mark all columns as usable. The 'dbug_' version is primarily intended inside a handler when it wants to just call Field:store() & Field::val() functions, but don't need the column maps set for any other usage. (ie:: bitmap_is_set() is never called) - We can't use compare_records() to skip updates for handlers that returns a partial column set and the read_set doesn't cover all columns in the write set. The reason for this is that if we have a column marked only for write we can't in the MySQL level know if the value changed or not. The reason this worked before was that MySQL marked all to be written columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden bug'. - open_table_from_share() does not anymore setup temporary MEM_ROOT object as a thread specific variable for the handler. Instead we send the to-be-used MEMROOT to get_new_handler(). (Simpler, faster code) Bugs fixed: - Column marking was not done correctly in a lot of cases. (ALTER TABLE, when using triggers, auto_increment fields etc) (Could potentially result in wrong values inserted in table handlers relying on that the old column maps or field->set_query_id was correct) Especially when it comes to triggers, there may be cases where the old code would cause lost/wrong values for NDB and/or InnoDB tables. - Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags: OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG. This allowed me to remove some wrong warnings about: "Some non-transactional changed tables couldn't be rolled back" - Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset (thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose some warnings about "Some non-transactional changed tables couldn't be rolled back") - Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table() which could cause delete_table to report random failures. - Fixed core dumps for some tests when running with --debug - Added missing FN_LIBCHAR in mysql_rm_tmp_tables() (This has probably caused us to not properly remove temporary files after crash) - slow_logs was not properly initialized, which could maybe cause extra/lost entries in slow log. - If we get an duplicate row on insert, change column map to read and write all columns while retrying the operation. This is required by the definition of REPLACE and also ensures that fields that are only part of UPDATE are properly handled. This fixed a bug in NDB and REPLACE where REPLACE wrongly copied some column values from the replaced row. - For table handler that doesn't support NULL in keys, we would give an error when creating a primary key with NULL fields, even after the fields has been automaticly converted to NOT NULL. - Creating a primary key on a SPATIAL key, would fail if field was not declared as NOT NULL. Cleanups: - Removed not used condition argument to setup_tables - Removed not needed item function reset_query_id_processor(). - Field->add_index is removed. Now this is instead maintained in (field->flags & FIELD_IN_ADD_INDEX) - Field->fieldnr is removed (use field->field_index instead) - New argument to filesort() to indicate that it should return a set of row pointers (not used columns). This allowed me to remove some references to sql_command in filesort and should also enable us to return column results in some cases where we couldn't before. - Changed column bitmap handling in opt_range.cc to be aligned with TABLE bitmap, which allowed me to use bitmap functions instead of looping over all fields to create some needed bitmaps. (Faster and smaller code) - Broke up found too long lines - Moved some variable declaration at start of function for better code readability. - Removed some not used arguments from functions. (setup_fields(), mysql_prepare_insert_check_table()) - setup_fields() now takes an enum instead of an int for marking columns usage. - For internal temporary tables, use handler::write_row(), handler::delete_row() and handler::update_row() instead of handler::ha_xxxx() for faster execution. - Changed some constants to enum's and define's. - Using separate column read and write sets allows for easier checking of timestamp field was set by statement. - Remove calls to free_io_cache() as this is now done automaticly in ha_reset() - Don't build table->normalized_path as this is now identical to table->path (after bar's fixes to convert filenames) - Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to do comparision with the 'convert-dbug-for-diff' tool. Things left to do in 5.1: - We wrongly log failed CREATE TABLE ... SELECT in some cases when using row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result) Mats has promised to look into this. - Test that my fix for CREATE TABLE ... SELECT is indeed correct. (I added several test cases for this, but in this case it's better that someone else also tests this throughly). Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
open_binlog() sought to position 4.
Read the first event in case it's a Format_description_log_event, to
know the format. If there's no such event, we are 3.23 or 4.x. This
code, like before, can't read 3.23 binlogs.
This code will fail on a mixed relay log (one which has Format_desc then
Rotate then Format_desc).
This will be pushed only after I fix the testsuite. This is the main commit for Worklog tasks: * A more dynamic binlog format which allows small changes (1064) * Log session variables in Query_log_event (1063) Below 5.0 means 5.0.0. MySQL 5.0 is able to replicate FOREIGN_KEY_CHECKS, UNIQUE_KEY_CHECKS (for speed), SQL_AUTO_IS_NULL, SQL_MODE. Not charsets (WL#1062), not some vars (I can only think of SQL_SELECT_LIMIT, which deserves a special treatment). Note that this works for queries, except LOAD DATA INFILE (for this it would have to wait for Dmitri's push of WL#874, which in turns waits for the present push, so... the deadlock must be broken!). Note that when Dmitri pushes WL#874 in 5.0.1, 5.0.0 won't be able to replicate a LOAD DATA INFILE from 5.0.1. Apart from that, the new binlog format is designed so that it can tolerate a little variation in the events (so that a 5.0.0 slave could replicate a 5.0.1 master, except for LOAD DATA INFILE unfortunately); that is, when I later add replication of charsets it should break nothing. And when I later add a UID to every event, it should break nothing. The main change brought by this patch is a new type of event, Format_description_log_event, which describes some lengthes in other event types. This event is needed for the master/slave/mysqlbinlog to understand a 5.0 log. Thanks to this event, we can later add more bytes to the header of every event without breaking compatibility. Inside Query_log_event, we have some additional dynamic format, as every Query_log_event can have a different number of status variables, stored as pairs (code, value); that's how SQL_MODE and session variables and catalog are stored. Like this, we can later add count of affected rows, charsets... and we can have options --don't-log-count-affected-rows if we want. MySQL 5.0 is able to run on 4.x relay logs, 4.x binlogs. Upgrading a 4.x master to 5.0 is ok (no need to delete binlogs), upgrading a 4.x slave to 5.0 is ok (no need to delete relay logs); so both can be "hot" upgrades. Upgrading a 3.23 master to 5.0 requires as much as upgrading it to 4.0. 3.23 and 4.x can't be slaves of 5.0. So downgrading from 5.0 to 4.x may be complicated. Log_event::log_pos is now the position of the end of the event, which is more useful than the position of the beginning. We take care about compatibility with <5.0 (in which log_pos is the beginning). I added a short test for replication of SQL_MODE and some other variables. TODO: - after committing this, merge the latest 5.0 into it - fix all tests - update the manual with upgrade notes.
2003-12-18 01:09:05 +01:00
*/
ev= Log_event::read_log_event(&log, (mysql_mutex_t*)0, description_event);
This will be pushed only after I fix the testsuite. This is the main commit for Worklog tasks: * A more dynamic binlog format which allows small changes (1064) * Log session variables in Query_log_event (1063) Below 5.0 means 5.0.0. MySQL 5.0 is able to replicate FOREIGN_KEY_CHECKS, UNIQUE_KEY_CHECKS (for speed), SQL_AUTO_IS_NULL, SQL_MODE. Not charsets (WL#1062), not some vars (I can only think of SQL_SELECT_LIMIT, which deserves a special treatment). Note that this works for queries, except LOAD DATA INFILE (for this it would have to wait for Dmitri's push of WL#874, which in turns waits for the present push, so... the deadlock must be broken!). Note that when Dmitri pushes WL#874 in 5.0.1, 5.0.0 won't be able to replicate a LOAD DATA INFILE from 5.0.1. Apart from that, the new binlog format is designed so that it can tolerate a little variation in the events (so that a 5.0.0 slave could replicate a 5.0.1 master, except for LOAD DATA INFILE unfortunately); that is, when I later add replication of charsets it should break nothing. And when I later add a UID to every event, it should break nothing. The main change brought by this patch is a new type of event, Format_description_log_event, which describes some lengthes in other event types. This event is needed for the master/slave/mysqlbinlog to understand a 5.0 log. Thanks to this event, we can later add more bytes to the header of every event without breaking compatibility. Inside Query_log_event, we have some additional dynamic format, as every Query_log_event can have a different number of status variables, stored as pairs (code, value); that's how SQL_MODE and session variables and catalog are stored. Like this, we can later add count of affected rows, charsets... and we can have options --don't-log-count-affected-rows if we want. MySQL 5.0 is able to run on 4.x relay logs, 4.x binlogs. Upgrading a 4.x master to 5.0 is ok (no need to delete binlogs), upgrading a 4.x slave to 5.0 is ok (no need to delete relay logs); so both can be "hot" upgrades. Upgrading a 3.23 master to 5.0 requires as much as upgrading it to 4.0. 3.23 and 4.x can't be slaves of 5.0. So downgrading from 5.0 to 4.x may be complicated. Log_event::log_pos is now the position of the end of the event, which is more useful than the position of the beginning. We take care about compatibility with <5.0 (in which log_pos is the beginning). I added a short test for replication of SQL_MODE and some other variables. TODO: - after committing this, merge the latest 5.0 into it - fix all tests - update the manual with upgrade notes.
2003-12-18 01:09:05 +01:00
if (ev)
{
if (ev->get_type_code() == FORMAT_DESCRIPTION_EVENT)
{
delete description_event;
This will be pushed only after I fix the testsuite. This is the main commit for Worklog tasks: * A more dynamic binlog format which allows small changes (1064) * Log session variables in Query_log_event (1063) Below 5.0 means 5.0.0. MySQL 5.0 is able to replicate FOREIGN_KEY_CHECKS, UNIQUE_KEY_CHECKS (for speed), SQL_AUTO_IS_NULL, SQL_MODE. Not charsets (WL#1062), not some vars (I can only think of SQL_SELECT_LIMIT, which deserves a special treatment). Note that this works for queries, except LOAD DATA INFILE (for this it would have to wait for Dmitri's push of WL#874, which in turns waits for the present push, so... the deadlock must be broken!). Note that when Dmitri pushes WL#874 in 5.0.1, 5.0.0 won't be able to replicate a LOAD DATA INFILE from 5.0.1. Apart from that, the new binlog format is designed so that it can tolerate a little variation in the events (so that a 5.0.0 slave could replicate a 5.0.1 master, except for LOAD DATA INFILE unfortunately); that is, when I later add replication of charsets it should break nothing. And when I later add a UID to every event, it should break nothing. The main change brought by this patch is a new type of event, Format_description_log_event, which describes some lengthes in other event types. This event is needed for the master/slave/mysqlbinlog to understand a 5.0 log. Thanks to this event, we can later add more bytes to the header of every event without breaking compatibility. Inside Query_log_event, we have some additional dynamic format, as every Query_log_event can have a different number of status variables, stored as pairs (code, value); that's how SQL_MODE and session variables and catalog are stored. Like this, we can later add count of affected rows, charsets... and we can have options --don't-log-count-affected-rows if we want. MySQL 5.0 is able to run on 4.x relay logs, 4.x binlogs. Upgrading a 4.x master to 5.0 is ok (no need to delete binlogs), upgrading a 4.x slave to 5.0 is ok (no need to delete relay logs); so both can be "hot" upgrades. Upgrading a 3.23 master to 5.0 requires as much as upgrading it to 4.0. 3.23 and 4.x can't be slaves of 5.0. So downgrading from 5.0 to 4.x may be complicated. Log_event::log_pos is now the position of the end of the event, which is more useful than the position of the beginning. We take care about compatibility with <5.0 (in which log_pos is the beginning). I added a short test for replication of SQL_MODE and some other variables. TODO: - after committing this, merge the latest 5.0 into it - fix all tests - update the manual with upgrade notes.
2003-12-18 01:09:05 +01:00
description_event= (Format_description_log_event*) ev;
}
This will be pushed only after I fix the testsuite. This is the main commit for Worklog tasks: * A more dynamic binlog format which allows small changes (1064) * Log session variables in Query_log_event (1063) Below 5.0 means 5.0.0. MySQL 5.0 is able to replicate FOREIGN_KEY_CHECKS, UNIQUE_KEY_CHECKS (for speed), SQL_AUTO_IS_NULL, SQL_MODE. Not charsets (WL#1062), not some vars (I can only think of SQL_SELECT_LIMIT, which deserves a special treatment). Note that this works for queries, except LOAD DATA INFILE (for this it would have to wait for Dmitri's push of WL#874, which in turns waits for the present push, so... the deadlock must be broken!). Note that when Dmitri pushes WL#874 in 5.0.1, 5.0.0 won't be able to replicate a LOAD DATA INFILE from 5.0.1. Apart from that, the new binlog format is designed so that it can tolerate a little variation in the events (so that a 5.0.0 slave could replicate a 5.0.1 master, except for LOAD DATA INFILE unfortunately); that is, when I later add replication of charsets it should break nothing. And when I later add a UID to every event, it should break nothing. The main change brought by this patch is a new type of event, Format_description_log_event, which describes some lengthes in other event types. This event is needed for the master/slave/mysqlbinlog to understand a 5.0 log. Thanks to this event, we can later add more bytes to the header of every event without breaking compatibility. Inside Query_log_event, we have some additional dynamic format, as every Query_log_event can have a different number of status variables, stored as pairs (code, value); that's how SQL_MODE and session variables and catalog are stored. Like this, we can later add count of affected rows, charsets... and we can have options --don't-log-count-affected-rows if we want. MySQL 5.0 is able to run on 4.x relay logs, 4.x binlogs. Upgrading a 4.x master to 5.0 is ok (no need to delete binlogs), upgrading a 4.x slave to 5.0 is ok (no need to delete relay logs); so both can be "hot" upgrades. Upgrading a 3.23 master to 5.0 requires as much as upgrading it to 4.0. 3.23 and 4.x can't be slaves of 5.0. So downgrading from 5.0 to 4.x may be complicated. Log_event::log_pos is now the position of the end of the event, which is more useful than the position of the beginning. We take care about compatibility with <5.0 (in which log_pos is the beginning). I added a short test for replication of SQL_MODE and some other variables. TODO: - after committing this, merge the latest 5.0 into it - fix all tests - update the manual with upgrade notes.
2003-12-18 01:09:05 +01:00
else
delete ev;
}
my_b_seek(&log, pos);
This will be pushed only after I fix the testsuite. This is the main commit for Worklog tasks: * A more dynamic binlog format which allows small changes (1064) * Log session variables in Query_log_event (1063) Below 5.0 means 5.0.0. MySQL 5.0 is able to replicate FOREIGN_KEY_CHECKS, UNIQUE_KEY_CHECKS (for speed), SQL_AUTO_IS_NULL, SQL_MODE. Not charsets (WL#1062), not some vars (I can only think of SQL_SELECT_LIMIT, which deserves a special treatment). Note that this works for queries, except LOAD DATA INFILE (for this it would have to wait for Dmitri's push of WL#874, which in turns waits for the present push, so... the deadlock must be broken!). Note that when Dmitri pushes WL#874 in 5.0.1, 5.0.0 won't be able to replicate a LOAD DATA INFILE from 5.0.1. Apart from that, the new binlog format is designed so that it can tolerate a little variation in the events (so that a 5.0.0 slave could replicate a 5.0.1 master, except for LOAD DATA INFILE unfortunately); that is, when I later add replication of charsets it should break nothing. And when I later add a UID to every event, it should break nothing. The main change brought by this patch is a new type of event, Format_description_log_event, which describes some lengthes in other event types. This event is needed for the master/slave/mysqlbinlog to understand a 5.0 log. Thanks to this event, we can later add more bytes to the header of every event without breaking compatibility. Inside Query_log_event, we have some additional dynamic format, as every Query_log_event can have a different number of status variables, stored as pairs (code, value); that's how SQL_MODE and session variables and catalog are stored. Like this, we can later add count of affected rows, charsets... and we can have options --don't-log-count-affected-rows if we want. MySQL 5.0 is able to run on 4.x relay logs, 4.x binlogs. Upgrading a 4.x master to 5.0 is ok (no need to delete binlogs), upgrading a 4.x slave to 5.0 is ok (no need to delete relay logs); so both can be "hot" upgrades. Upgrading a 3.23 master to 5.0 requires as much as upgrading it to 4.0. 3.23 and 4.x can't be slaves of 5.0. So downgrading from 5.0 to 4.x may be complicated. Log_event::log_pos is now the position of the end of the event, which is more useful than the position of the beginning. We take care about compatibility with <5.0 (in which log_pos is the beginning). I added a short test for replication of SQL_MODE and some other variables. TODO: - after committing this, merge the latest 5.0 into it - fix all tests - update the manual with upgrade notes.
2003-12-18 01:09:05 +01:00
if (!description_event->is_valid())
{
errmsg="Invalid Format_description event; could be out of memory";
goto err;
}
for (event_count = 0;
(ev = Log_event::read_log_event(&log, (mysql_mutex_t*) 0,
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes Changes that requires code changes in other code of other storage engines. (Note that all changes are very straightforward and one should find all issues by compiling a --debug build and fixing all compiler errors and all asserts in field.cc while running the test suite), - New optional handler function introduced: reset() This is called after every DML statement to make it easy for a handler to statement specific cleanups. (The only case it's not called is if force the file to be closed) - handler::extra(HA_EXTRA_RESET) is removed. Code that was there before should be moved to handler::reset() - table->read_set contains a bitmap over all columns that are needed in the query. read_row() and similar functions only needs to read these columns - table->write_set contains a bitmap over all columns that will be updated in the query. write_row() and update_row() only needs to update these columns. The above bitmaps should now be up to date in all context (including ALTER TABLE, filesort()). The handler is informed of any changes to the bitmap after fix_fields() by calling the virtual function handler::column_bitmaps_signal(). If the handler does caching of these bitmaps (instead of using table->read_set, table->write_set), it should redo the caching in this code. as the signal() may be sent several times, it's probably best to set a variable in the signal and redo the caching on read_row() / write_row() if the variable was set. - Removed the read_set and write_set bitmap objects from the handler class - Removed all column bit handling functions from the handler class. (Now one instead uses the normal bitmap functions in my_bitmap.c instead of handler dedicated bitmap functions) - field->query_id is removed. One should instead instead check table->read_set and table->write_set if a field is used in the query. - handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now instead use table->read_set to check for which columns to retrieve. - If a handler needs to call Field->val() or Field->store() on columns that are not used in the query, one should install a temporary all-columns-used map while doing so. For this, we provide the following functions: my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); field->val(); dbug_tmp_restore_column_map(table->read_set, old_map); and similar for the write map: my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set); field->val(); dbug_tmp_restore_column_map(table->write_set, old_map); If this is not done, you will sooner or later hit a DBUG_ASSERT in the field store() / val() functions. (For not DBUG binaries, the dbug_tmp_restore_column_map() and dbug_tmp_restore_column_map() are inline dummy functions and should be optimized away be the compiler). - If one needs to temporary set the column map for all binaries (and not just to avoid the DBUG_ASSERT() in the Field::store() / Field::val() methods) one should use the functions tmp_use_all_columns() and tmp_restore_column_map() instead of the above dbug_ variants. - All 'status' fields in the handler base class (like records, data_file_length etc) are now stored in a 'stats' struct. This makes it easier to know what status variables are provided by the base handler. This requires some trivial variable names in the extra() function. - New virtual function handler::records(). This is called to optimize COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true. (stats.records is not supposed to be an exact value. It's only has to be 'reasonable enough' for the optimizer to be able to choose a good optimization path). - Non virtual handler::init() function added for caching of virtual constants from engine. - Removed has_transactions() virtual method. Now one should instead return HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support transactions. - The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument that is to be used with 'new handler_name()' to allocate the handler in the right area. The xxxx_create_handler() function is also responsible for any initialization of the object before returning. For example, one should change: static handler *myisam_create_handler(TABLE_SHARE *table) { return new ha_myisam(table); } -> static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root) { return new (mem_root) ha_myisam(table); } - New optional virtual function: use_hidden_primary_key(). This is called in case of an update/delete when (table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined but we don't have a primary key. This allows the handler to take precisions in remembering any hidden primary key to able to update/delete any found row. The default handler marks all columns to be read. - handler::table_flags() now returns a ulonglong (to allow for more flags). - New/changed table_flags() - HA_HAS_RECORDS Set if ::records() is supported - HA_NO_TRANSACTIONS Set if engine doesn't support transactions - HA_PRIMARY_KEY_REQUIRED_FOR_DELETE Set if we should mark all primary key columns for read when reading rows as part of a DELETE statement. If there is no primary key, all columns are marked for read. - HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some cases (based on table->read_set) - HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION. - HA_DUPP_POS Renamed to HA_DUPLICATE_POS - HA_REQUIRES_KEY_COLUMNS_FOR_DELETE Set this if we should mark ALL key columns for read when when reading rows as part of a DELETE statement. In case of an update we will mark all keys for read for which key part changed value. - HA_STATS_RECORDS_IS_EXACT Set this if stats.records is exact. (This saves us some extra records() calls when optimizing COUNT(*)) - Removed table_flags() - HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if handler::records() gives an exact count() and HA_STATS_RECORDS_IS_EXACT if stats.records is exact. - HA_READ_RND_SAME Removed (no one supported this one) - Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk() - Renamed handler::dupp_pos to handler::dup_pos - Removed not used variable handler::sortkey Upper level handler changes: - ha_reset() now does some overall checks and calls ::reset() - ha_table_flags() added. This is a cached version of table_flags(). The cache is updated on engine creation time and updated on open. MySQL level changes (not obvious from the above): - DBUG_ASSERT() added to check that column usage matches what is set in the column usage bit maps. (This found a LOT of bugs in current column marking code). - In 5.1 before, all used columns was marked in read_set and only updated columns was marked in write_set. Now we only mark columns for which we need a value in read_set. - Column bitmaps are created in open_binary_frm() and open_table_from_share(). (Before this was in table.cc) - handler::table_flags() calls are replaced with handler::ha_table_flags() - For calling field->val() you must have the corresponding bit set in table->read_set. For calling field->store() you must have the corresponding bit set in table->write_set. (There are asserts in all store()/val() functions to catch wrong usage) - thd->set_query_id is renamed to thd->mark_used_columns and instead of setting this to an integer value, this has now the values: MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE Changed also all variables named 'set_query_id' to mark_used_columns. - In filesort() we now inform the handler of exactly which columns are needed doing the sort and choosing the rows. - The TABLE_SHARE object has a 'all_set' column bitmap one can use when one needs a column bitmap with all columns set. (This is used for table->use_all_columns() and other places) - The TABLE object has 3 column bitmaps: - def_read_set Default bitmap for columns to be read - def_write_set Default bitmap for columns to be written - tmp_set Can be used as a temporary bitmap when needed. The table object has also two pointer to bitmaps read_set and write_set that the handler should use to find out which columns are used in which way. - count() optimization now calls handler::records() instead of using handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true). - Added extra argument to Item::walk() to indicate if we should also traverse sub queries. - Added TABLE parameter to cp_buffer_from_ref() - Don't close tables created with CREATE ... SELECT but keep them in the table cache. (Faster usage of newly created tables). New interfaces: - table->clear_column_bitmaps() to initialize the bitmaps for tables at start of new statements. - table->column_bitmaps_set() to set up new column bitmaps and signal the handler about this. - table->column_bitmaps_set_no_signal() for some few cases where we need to setup new column bitmaps but don't signal the handler (as the handler has already been signaled about these before). Used for the momement only in opt_range.cc when doing ROR scans. - table->use_all_columns() to install a bitmap where all columns are marked as use in the read and the write set. - table->default_column_bitmaps() to install the normal read and write column bitmaps, but not signaling the handler about this. This is mainly used when creating TABLE instances. - table->mark_columns_needed_for_delete(), table->mark_columns_needed_for_delete() and table->mark_columns_needed_for_insert() to allow us to put additional columns in column usage maps if handler so requires. (The handler indicates what it neads in handler->table_flags()) - table->prepare_for_position() to allow us to tell handler that it needs to read primary key parts to be able to store them in future table->position() calls. (This replaces the table->file->ha_retrieve_all_pk function) - table->mark_auto_increment_column() to tell handler are going to update columns part of any auto_increment key. - table->mark_columns_used_by_index() to mark all columns that is part of an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow it to quickly know that it only needs to read colums that are part of the key. (The handler can also use the column map for detecting this, but simpler/faster handler can just monitor the extra() call). - table->mark_columns_used_by_index_no_reset() to in addition to other columns, also mark all columns that is used by the given key. - table->restore_column_maps_after_mark_index() to restore to default column maps after a call to table->mark_columns_used_by_index(). - New item function register_field_in_read_map(), for marking used columns in table->read_map. Used by filesort() to mark all used columns - Maintain in TABLE->merge_keys set of all keys that are used in query. (Simplices some optimization loops) - Maintain Field->part_of_key_not_clustered which is like Field->part_of_key but the field in the clustered key is not assumed to be part of all index. (used in opt_range.cc for faster loops) - dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map() tmp_use_all_columns() and tmp_restore_column_map() functions to temporally mark all columns as usable. The 'dbug_' version is primarily intended inside a handler when it wants to just call Field:store() & Field::val() functions, but don't need the column maps set for any other usage. (ie:: bitmap_is_set() is never called) - We can't use compare_records() to skip updates for handlers that returns a partial column set and the read_set doesn't cover all columns in the write set. The reason for this is that if we have a column marked only for write we can't in the MySQL level know if the value changed or not. The reason this worked before was that MySQL marked all to be written columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden bug'. - open_table_from_share() does not anymore setup temporary MEM_ROOT object as a thread specific variable for the handler. Instead we send the to-be-used MEMROOT to get_new_handler(). (Simpler, faster code) Bugs fixed: - Column marking was not done correctly in a lot of cases. (ALTER TABLE, when using triggers, auto_increment fields etc) (Could potentially result in wrong values inserted in table handlers relying on that the old column maps or field->set_query_id was correct) Especially when it comes to triggers, there may be cases where the old code would cause lost/wrong values for NDB and/or InnoDB tables. - Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags: OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG. This allowed me to remove some wrong warnings about: "Some non-transactional changed tables couldn't be rolled back" - Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset (thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose some warnings about "Some non-transactional changed tables couldn't be rolled back") - Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table() which could cause delete_table to report random failures. - Fixed core dumps for some tests when running with --debug - Added missing FN_LIBCHAR in mysql_rm_tmp_tables() (This has probably caused us to not properly remove temporary files after crash) - slow_logs was not properly initialized, which could maybe cause extra/lost entries in slow log. - If we get an duplicate row on insert, change column map to read and write all columns while retrying the operation. This is required by the definition of REPLACE and also ensures that fields that are only part of UPDATE are properly handled. This fixed a bug in NDB and REPLACE where REPLACE wrongly copied some column values from the replaced row. - For table handler that doesn't support NULL in keys, we would give an error when creating a primary key with NULL fields, even after the fields has been automaticly converted to NOT NULL. - Creating a primary key on a SPATIAL key, would fail if field was not declared as NOT NULL. Cleanups: - Removed not used condition argument to setup_tables - Removed not needed item function reset_query_id_processor(). - Field->add_index is removed. Now this is instead maintained in (field->flags & FIELD_IN_ADD_INDEX) - Field->fieldnr is removed (use field->field_index instead) - New argument to filesort() to indicate that it should return a set of row pointers (not used columns). This allowed me to remove some references to sql_command in filesort and should also enable us to return column results in some cases where we couldn't before. - Changed column bitmap handling in opt_range.cc to be aligned with TABLE bitmap, which allowed me to use bitmap functions instead of looping over all fields to create some needed bitmaps. (Faster and smaller code) - Broke up found too long lines - Moved some variable declaration at start of function for better code readability. - Removed some not used arguments from functions. (setup_fields(), mysql_prepare_insert_check_table()) - setup_fields() now takes an enum instead of an int for marking columns usage. - For internal temporary tables, use handler::write_row(), handler::delete_row() and handler::update_row() instead of handler::ha_xxxx() for faster execution. - Changed some constants to enum's and define's. - Using separate column read and write sets allows for easier checking of timestamp field was set by statement. - Remove calls to free_io_cache() as this is now done automaticly in ha_reset() - Don't build table->normalized_path as this is now identical to table->path (after bar's fixes to convert filenames) - Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to do comparision with the 'convert-dbug-for-diff' tool. Things left to do in 5.1: - We wrongly log failed CREATE TABLE ... SELECT in some cases when using row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result) Mats has promised to look into this. - Test that my fix for CREATE TABLE ... SELECT is indeed correct. (I added several test cases for this, but in this case it's better that someone else also tests this throughly). Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
description_event)); )
{
if (event_count >= limit_start &&
ev->net_send(protocol, linfo.log_file_name, pos))
{
errmsg = "Net error";
delete ev;
mysql_mutex_unlock(log_lock);
goto err;
}
pos = my_b_tell(&log);
delete ev;
if (++event_count >= limit_end)
break;
}
if (event_count < limit_end && log.error)
{
errmsg = "Wrong offset or I/O error";
mysql_mutex_unlock(log_lock);
goto err;
}
mysql_mutex_unlock(log_lock);
}
// Check that linfo is still on the function scope.
DEBUG_SYNC(thd, "after_show_binlog_events");
ret= FALSE;
err:
This will be pushed only after I fix the testsuite. This is the main commit for Worklog tasks: * A more dynamic binlog format which allows small changes (1064) * Log session variables in Query_log_event (1063) Below 5.0 means 5.0.0. MySQL 5.0 is able to replicate FOREIGN_KEY_CHECKS, UNIQUE_KEY_CHECKS (for speed), SQL_AUTO_IS_NULL, SQL_MODE. Not charsets (WL#1062), not some vars (I can only think of SQL_SELECT_LIMIT, which deserves a special treatment). Note that this works for queries, except LOAD DATA INFILE (for this it would have to wait for Dmitri's push of WL#874, which in turns waits for the present push, so... the deadlock must be broken!). Note that when Dmitri pushes WL#874 in 5.0.1, 5.0.0 won't be able to replicate a LOAD DATA INFILE from 5.0.1. Apart from that, the new binlog format is designed so that it can tolerate a little variation in the events (so that a 5.0.0 slave could replicate a 5.0.1 master, except for LOAD DATA INFILE unfortunately); that is, when I later add replication of charsets it should break nothing. And when I later add a UID to every event, it should break nothing. The main change brought by this patch is a new type of event, Format_description_log_event, which describes some lengthes in other event types. This event is needed for the master/slave/mysqlbinlog to understand a 5.0 log. Thanks to this event, we can later add more bytes to the header of every event without breaking compatibility. Inside Query_log_event, we have some additional dynamic format, as every Query_log_event can have a different number of status variables, stored as pairs (code, value); that's how SQL_MODE and session variables and catalog are stored. Like this, we can later add count of affected rows, charsets... and we can have options --don't-log-count-affected-rows if we want. MySQL 5.0 is able to run on 4.x relay logs, 4.x binlogs. Upgrading a 4.x master to 5.0 is ok (no need to delete binlogs), upgrading a 4.x slave to 5.0 is ok (no need to delete relay logs); so both can be "hot" upgrades. Upgrading a 3.23 master to 5.0 requires as much as upgrading it to 4.0. 3.23 and 4.x can't be slaves of 5.0. So downgrading from 5.0 to 4.x may be complicated. Log_event::log_pos is now the position of the end of the event, which is more useful than the position of the beginning. We take care about compatibility with <5.0 (in which log_pos is the beginning). I added a short test for replication of SQL_MODE and some other variables. TODO: - after committing this, merge the latest 5.0 into it - fix all tests - update the manual with upgrade notes.
2003-12-18 01:09:05 +01:00
delete description_event;
if (file >= 0)
{
end_io_cache(&log);
mysql_file_close(file, MYF(MY_WME));
}
if (errmsg)
my_error(ER_ERROR_WHEN_EXECUTING_COMMAND, MYF(0),
"SHOW BINLOG EVENTS", errmsg);
else
my_eof(thd);
mysql_mutex_lock(&LOCK_thread_count);
thd->current_linfo = 0;
mysql_mutex_unlock(&LOCK_thread_count);
thd->variables.max_allowed_packet= old_max_allowed_packet;
DBUG_RETURN(ret);
}
/**
Execute a SHOW MASTER STATUS statement.
@param thd Pointer to THD object for the client thread executing the
statement.
@retval FALSE success
@retval TRUE failure
*/
bool show_binlog_info(THD* thd)
{
Protocol *protocol= thd->protocol;
DBUG_ENTER("show_binlog_info");
List<Item> field_list;
field_list.push_back(new Item_empty_string("File", FN_REFLEN));
field_list.push_back(new Item_return_int("Position",20,
MYSQL_TYPE_LONGLONG));
field_list.push_back(new Item_empty_string("Binlog_Do_DB",255));
field_list.push_back(new Item_empty_string("Binlog_Ignore_DB",255));
Backport of revno 2630.28.10, 2630.28.31, 2630.28.26, 2630.33.1, 2630.39.1, 2630.28.29, 2630.34.3, 2630.34.2, 2630.34.1, 2630.29.29, 2630.29.28, 2630.31.1, 2630.28.13, 2630.28.10, 2617.23.14 and some other minor revisions. This patch implements: WL#4264 "Backup: Stabilize Service Interface" -- all the server prerequisites except si_objects.{h,cc} themselves (they can be just copied over, when needed). WL#4435: Support OUT-parameters in prepared statements. (and all issues in the initial patches for these two tasks, that were discovered in pushbuild and during testing). Bug#39519: mysql_stmt_close() should flush all data associated with the statement. After execution of a prepared statement, send OUT parameters of the invoked stored procedure, if any, to the client. When using the binary protocol, send the parameters in an additional result set over the wire. When using the text protocol, assign out parameters to the user variables from the CALL(@var1, @var2, ...) specification. The following refactoring has been made: - Protocol::send_fields() was renamed to Protocol::send_result_set_metadata(); - A new Protocol::send_result_set_row() was introduced to incapsulate common functionality for sending row data. - Signature of Protocol::prepare_for_send() was changed: this operation does not need a list of items, the number of items is fully sufficient. The following backward incompatible changes have been made: - CLIENT_MULTI_RESULTS is now enabled by default in the client; - CLIENT_PS_MULTI_RESUTLS is now enabled by default in the client.
2009-10-21 22:02:06 +02:00
if (protocol->send_result_set_metadata(&field_list,
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
DBUG_RETURN(TRUE);
protocol->prepare_for_resend();
if (mysql_bin_log.is_open())
{
LOG_INFO li;
mysql_bin_log.get_current_log(&li);
int dir_len = dirname_length(li.log_file_name);
protocol->store(li.log_file_name + dir_len, &my_charset_bin);
protocol->store((ulonglong) li.pos);
protocol->store(binlog_filter->get_do_db());
protocol->store(binlog_filter->get_ignore_db());
if (protocol->write())
DBUG_RETURN(TRUE);
}
my_eof(thd);
DBUG_RETURN(FALSE);
}
/**
Execute a SHOW BINARY LOGS statement.
@param thd Pointer to THD object for the client thread executing the
statement.
@retval FALSE success
@retval TRUE failure
*/
bool show_binlogs(THD* thd)
{
IO_CACHE *index_file;
LOG_INFO cur;
File file;
char fname[FN_REFLEN];
List<Item> field_list;
uint length;
int cur_dir_len;
Protocol *protocol= thd->protocol;
DBUG_ENTER("show_binlogs");
if (!mysql_bin_log.is_open())
{
my_error(ER_NO_BINARY_LOGGING, MYF(0));
DBUG_RETURN(TRUE);
}
field_list.push_back(new Item_empty_string("Log_name", 255));
2006-04-13 09:50:33 +02:00
field_list.push_back(new Item_return_int("File_size", 20,
MYSQL_TYPE_LONGLONG));
Backport of revno 2630.28.10, 2630.28.31, 2630.28.26, 2630.33.1, 2630.39.1, 2630.28.29, 2630.34.3, 2630.34.2, 2630.34.1, 2630.29.29, 2630.29.28, 2630.31.1, 2630.28.13, 2630.28.10, 2617.23.14 and some other minor revisions. This patch implements: WL#4264 "Backup: Stabilize Service Interface" -- all the server prerequisites except si_objects.{h,cc} themselves (they can be just copied over, when needed). WL#4435: Support OUT-parameters in prepared statements. (and all issues in the initial patches for these two tasks, that were discovered in pushbuild and during testing). Bug#39519: mysql_stmt_close() should flush all data associated with the statement. After execution of a prepared statement, send OUT parameters of the invoked stored procedure, if any, to the client. When using the binary protocol, send the parameters in an additional result set over the wire. When using the text protocol, assign out parameters to the user variables from the CALL(@var1, @var2, ...) specification. The following refactoring has been made: - Protocol::send_fields() was renamed to Protocol::send_result_set_metadata(); - A new Protocol::send_result_set_row() was introduced to incapsulate common functionality for sending row data. - Signature of Protocol::prepare_for_send() was changed: this operation does not need a list of items, the number of items is fully sufficient. The following backward incompatible changes have been made: - CLIENT_MULTI_RESULTS is now enabled by default in the client; - CLIENT_PS_MULTI_RESUTLS is now enabled by default in the client.
2009-10-21 22:02:06 +02:00
if (protocol->send_result_set_metadata(&field_list,
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
DBUG_RETURN(TRUE);
mysql_mutex_lock(mysql_bin_log.get_log_lock());
mysql_bin_log.lock_index();
index_file=mysql_bin_log.get_index_file();
mysql_bin_log.raw_get_current_log(&cur); // dont take mutex
mysql_mutex_unlock(mysql_bin_log.get_log_lock()); // lockdep, OK
cur_dir_len= dirname_length(cur.log_file_name);
reinit_io_cache(index_file, READ_CACHE, (my_off_t) 0, 0, 0);
/* The file ends with EOF or empty line */
while ((length=my_b_gets(index_file, fname, sizeof(fname))) > 1)
{
int dir_len;
ulonglong file_length= 0; // Length if open fails
fname[--length] = '\0'; // remove the newline
protocol->prepare_for_resend();
dir_len= dirname_length(fname);
length-= dir_len;
protocol->store(fname + dir_len, length, &my_charset_bin);
if (!(strncmp(fname+dir_len, cur.log_file_name+cur_dir_len, length)))
file_length= cur.pos; /* The active log, use the active position */
else
{
/* this is an old log, open it and find the size */
if ((file= mysql_file_open(key_file_binlog,
fname, O_RDONLY | O_SHARE | O_BINARY,
MYF(0))) >= 0)
{
file_length= (ulonglong) mysql_file_seek(file, 0L, MY_SEEK_END, MYF(0));
mysql_file_close(file, MYF(0));
}
}
protocol->store(file_length);
if (protocol->write())
goto err;
}
mysql_bin_log.unlock_index();
my_eof(thd);
DBUG_RETURN(FALSE);
err:
mysql_bin_log.unlock_index();
DBUG_RETURN(TRUE);
}
/**
Load data's io cache specific hook to be executed
before a chunk of data is being read into the cache's buffer
The fuction instantianates and writes into the binlog
replication events along LOAD DATA processing.
@param file pointer to io-cache
2010-01-24 08:03:23 +01:00
@retval 0 success
@retval 1 failure
*/
int log_loaded_block(IO_CACHE* file)
{
DBUG_ENTER("log_loaded_block");
LOAD_FILE_INFO *lf_info;
uint block_len;
/* buffer contains position where we started last read */
uchar* buffer= (uchar*) my_b_get_buffer_start(file);
uint max_event_size= current_thd->variables.max_allowed_packet;
lf_info= (LOAD_FILE_INFO*) file->arg;
BUG#39934: Slave stops for engine that only support row-based logging General overview: The logic for switching to row format when binlog_format=MIXED had numerous flaws. The underlying problem was the lack of a consistent architecture. General purpose of this changeset: This changeset introduces an architecture for switching to row format when binlog_format=MIXED. It enforces the architecture where it has to. It leaves some bugs to be fixed later. It adds extensive tests to verify that unsafe statements work as expected and that appropriate errors are produced by problems with the selection of binlog format. It was not practical to split this into smaller pieces of work. Problem 1: To determine the logging mode, the code has to take several parameters into account (namely: (1) the value of binlog_format; (2) the capabilities of the engines; (3) the type of the current statement: normal, unsafe, or row injection). These parameters may conflict in several ways, namely: - binlog_format=STATEMENT for a row injection - binlog_format=STATEMENT for an unsafe statement - binlog_format=STATEMENT for an engine only supporting row logging - binlog_format=ROW for an engine only supporting statement logging - statement is unsafe and engine does not support row logging - row injection in a table that does not support statement logging - statement modifies one table that does not support row logging and one that does not support statement logging Several of these conflicts were not detected, or were detected with an inappropriate error message. The problem of BUG#39934 was that no appropriate error message was written for the case when an engine only supporting row logging executed a row injection with binlog_format=ROW. However, all above cases must be handled. Fix 1: Introduce new error codes (sql/share/errmsg.txt). Ensure that all conditions are detected and handled in decide_logging_format() Problem 2: The binlog format shall be determined once per statement, in decide_logging_format(). It shall not be changed before or after that. Before decide_logging_format() is called, all information necessary to determine the logging format must be available. This principle ensures that all unsafe statements are handled in a consistent way. However, this principle is not followed: thd->set_current_stmt_binlog_row_based_if_mixed() is called in several places, including from code executing UPDATE..LIMIT, INSERT..SELECT..LIMIT, DELETE..LIMIT, INSERT DELAYED, and SET @@binlog_format. After Problem 1 was fixed, that caused inconsistencies where these unsafe statements would not print the appropriate warnings or errors for some of the conflicts. Fix 2: Remove calls to THD::set_current_stmt_binlog_row_based_if_mixed() from code executed after decide_logging_format(). Compensate by calling the set_current_stmt_unsafe() at parse time. This way, all unsafe statements are detected by decide_logging_format(). Problem 3: INSERT DELAYED is not unsafe: it is logged in statement format even if binlog_format=MIXED, and no warning is printed even if binlog_format=STATEMENT. This is BUG#45825. Fix 3: Made INSERT DELAYED set itself to unsafe at parse time. This allows decide_logging_format() to detect that a warning should be printed or the binlog_format changed. Problem 4: LIMIT clause were not marked as unsafe when executed inside stored functions/triggers/views/prepared statements. This is BUG#45785. Fix 4: Make statements containing the LIMIT clause marked as unsafe at parse time, instead of at execution time. This allows propagating unsafe-ness to the view.
2009-07-14 21:31:19 +02:00
if (lf_info->thd->is_current_stmt_binlog_format_row())
DBUG_RETURN(0);
if (lf_info->last_pos_in_file != HA_POS_ERROR &&
lf_info->last_pos_in_file >= my_b_get_pos_in_file(file))
DBUG_RETURN(0);
for (block_len= (uint) (my_b_get_bytes_in_buffer(file)); block_len > 0;
buffer += min(block_len, max_event_size),
block_len -= min(block_len, max_event_size))
{
lf_info->last_pos_in_file= my_b_get_pos_in_file(file);
if (lf_info->wrote_create_file)
{
Append_block_log_event a(lf_info->thd, lf_info->thd->db, buffer,
min(block_len, max_event_size),
lf_info->log_delayed);
2010-01-24 08:03:23 +01:00
if (mysql_bin_log.write(&a))
DBUG_RETURN(1);
}
else
{
Begin_load_query_log_event b(lf_info->thd, lf_info->thd->db,
buffer,
min(block_len, max_event_size),
lf_info->log_delayed);
2010-01-24 08:03:23 +01:00
if (mysql_bin_log.write(&b))
DBUG_RETURN(1);
lf_info->wrote_create_file= 1;
}
}
DBUG_RETURN(0);
}
2003-01-15 09:11:44 +01:00
#endif /* HAVE_REPLICATION */