Fixed a LOT of compiler warnings

Added missing DBUG_RETURN statements (in mysqldump.c)
Added missing enums
Fixed a lot of wrong DBUG_PRINT() statements, some of which could cause crashes
Removed usage of %lld and %p in printf strings as these are not portable or produces different results on different systems.


client/mysqldump.c:
  Fixed some compiler warnings
  Added some missing DBUG_RETURN
  Remove copying of 'cluster' database
client/mysqlslap.c:
  Fixed compiler warnings
client/mysqltest.c:
  After merge fix
extra/yassl/taocrypt/include/algebra.hpp:
  Removed compiler warning
mysql-test/include/im_check_env.inc:
  Fixed race condition (mysqld1 could report 'starting' or 'online'
mysql-test/mysql-test-run.pl:
  After merge fixes
  Added missing directory to LD_LIBRARY_PATH
mysql-test/r/ctype_cp1250_ch.result:
  After merge fix
mysql-test/r/im_cmd_line.result:
  Fixed race condition
mysql-test/r/im_daemon_life_cycle.result:
  Fixed race condition
mysql-test/r/im_instance_conf.result:
  Fixed race condition
mysql-test/r/im_life_cycle.result:
  Fixed race condition
mysql-test/r/im_utils.result:
  Fixed race condition
mysql-test/r/log_tables.result:
  Fixed wrong result
mysql-test/t/disabled.def:
  Disabled ndb_restore_partion, as ndb_restore_compate caused it to fail, becasue of table 'cluster/def/schema' which is stored in ndb_backup50
mysys/my_compress.c:
  Removed compiler warnings
mysys/my_getopt.c:
  Ensure we always have at least one space between option name and value
plugin/fulltext/plugin_example.c:
  Removed compiler warnings
server-tools/instance-manager/mysql_connection.cc:
  After merge fix
sql/event_data_objects.cc:
  Fixed compiler warnings
  Fixed platform compatibility issues (%lld is not portable)
sql/event_data_objects.h:
  Fixed compiler warnings
sql/event_db_repository.cc:
  Fixed compiler warnings
sql/event_queue.cc:
  Fixed compiler warnings
sql/event_scheduler.cc:
  Fixed compiler warnings
sql/events.cc:
  Fixed compiler warnings
sql/field.cc:
  Fixed compiler warnings
sql/ha_ndbcluster.cc:
  Fixed compiler warnings
sql/ha_ndbcluster_binlog.cc:
  Fixed compiler warnings
sql/ha_partition.cc:
  Fixed compiler warnings
sql/handler.cc:
  Fixed compiler warnings
sql/item_cmpfunc.cc:
  Fixed DBUG_PRINT style
sql/item_func.cc:
  Fixed compiler warnings
sql/log.cc:
  Fixed compiler warnings
sql/log_event.cc:
  Fixed compiler warnings
sql/mysqld.cc:
  Fixed compiler warnings
sql/opt_range.cc:
  Fixed compiler warnings
sql/repl_failsafe.cc:
  Indentation fixes
sql/rpl_rli.cc:
  Fixed compiler warnings
sql/rpl_tblmap.cc:
  Fixed compiler warnings
sql/set_var.cc:
  Fixed compiler warnings
sql/slave.cc:
  Fixed compiler warnings
sql/sp_head.cc:
  Fixed compiler warnings
sql/sql_base.cc:
  Fixed compiler warnings
  Fixed indentation
sql/sql_binlog.cc:
  Fixed compiler warnings
sql/sql_cache.cc:
  Fixed compiler warnings
sql/sql_class.cc:
  Fixed compiler warnings
sql/sql_handler.cc:
  Fixed compiler warnings
sql/sql_lex.cc:
  Fixed compiler warnings
sql/sql_parse.cc:
  Fixed compiler warnings
sql/sql_partition.cc:
  Fixed compiler warnings
sql/sql_prepare.cc:
  Fixed compiler warnings
sql/sql_table.cc:
  Fixed compiler warnings
sql/sql_test.cc:
  Fixed DBUG_PRINT style
sql/sql_trigger.cc:
  Fixed DBUG_PRINT style
sql/table.cc:
  Fixed compiler warnings
storage/federated/ha_federated.cc:
  Fixed compiler warnings
storage/myisam/mi_rsamepos.c:
  Fixed compiler warnings
storage/ndb/include/ndb_global.h.in:
  After merge fix
storage/ndb/include/util/NdbOut.hpp:
  Inform gcc that ndbout_c takes a printf() string as argument
storage/ndb/include/util/SimpleProperties.hpp:
  After merge fixes
storage/ndb/src/kernel/blocks/backup/Backup.cpp:
  Fixed compiler warnings
storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp:
  Fixed compiler warnings
storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp:
  Fixed compiler warnings
storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp:
  Fixed compiler warnings
storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp:
  Fixed compiler warnings
  Fixed usage of uninitialized value (Got help from Jonas with patch)
storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp:
  Fixed compiler warnings
storage/ndb/src/kernel/blocks/lgman.cpp:
  Fixed compiler warnings
storage/ndb/src/kernel/blocks/pgman.cpp:
  Fixed compiler warnings
storage/ndb/src/kernel/blocks/restore.cpp:
  Fixed compiler warnings
storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp:
  Fixed compiler warnings
storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp:
  Fixed compiler warnings
storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp:
  Fixed compiler warnings
storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp:
  Fixed compiler warnings
storage/ndb/src/kernel/blocks/dbtup/DbtupFixAlloc.cpp:
  Fixed compiler warnings
storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp:
  Fixed compiler warnings
storage/ndb/src/kernel/blocks/suma/Suma.cpp:
  Fixed compiler warnings
  Added missing enum's to switch
storage/ndb/src/kernel/vm/Configuration.cpp:
  Fixed compiler warnings
storage/ndb/src/kernel/vm/DLHashTable.hpp:
  Fixed compiler warnings
storage/ndb/src/kernel/vm/RWPool.hpp:
  Fixed compiler warnings
storage/ndb/src/kernel/vm/SimulatedBlock.cpp:
  Fixed compiler warnings
storage/ndb/src/kernel/vm/WOPool.hpp:
  Fixed compiler warnings
storage/ndb/src/kernel/vm/ndbd_malloc_impl.cpp:
  Fixed compiler warnings
storage/ndb/src/mgmclient/CommandInterpreter.cpp:
  Fixed compiler warnings
storage/ndb/src/mgmsrv/MgmtSrvr.cpp:
  Fixed compiler warnings
storage/ndb/src/ndbapi/DictCache.cpp:
  Fixed compiler warnings
storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp:
  Fixed compiler warnings
storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp:
  Fixed compiler warnings
storage/ndb/src/ndbapi/NdbIndexOperation.cpp:
  Fixed compiler warnings
storage/ndb/src/ndbapi/NdbIndexStat.cpp:
  Initialize possible uninitialized variable
storage/ndb/src/ndbapi/NdbOperationInt.cpp:
  Fixed compiler warnings
storage/ndb/src/ndbapi/NdbRecAttr.cpp:
  Added missing enum's (To avoid compiler warnings)
storage/ndb/src/ndbapi/NdbScanOperation.cpp:
  Fixed compiler warnings
storage/ndb/src/ndbapi/ObjectMap.hpp:
  Fixed compiler warnings
storage/ndb/tools/desc.cpp:
  Fixed compiler warnings
storage/ndb/tools/restore/Restore.cpp:
  Fixed compiler warnings
storage/ndb/tools/restore/consumer_restore.cpp:
  Fixed compiler warnings
unittest/mytap/t/basic-t.c:
  Fixed compiler warnings
unittest/mytap/tap.c:
  Fixed compiler warnings
This commit is contained in:
unknown 2006-11-27 01:47:38 +02:00
parent b8fe9fb47f
commit 788ad30f08
97 changed files with 504 additions and 417 deletions

View file

@ -30,14 +30,14 @@
** master/autocommit code by Brian Aker <brian@tangent.org>
** SSL by
** Andrei Errapart <andreie@no.spam.ee>
** ƒÂµnu Samuel <tonu@please.do.not.remove.this.spam.ee>
** µnu Samuel <tonu@please.do.not.remove.this.spam.ee>
** XML by Gary Huntress <ghuntress@mediaone.net> 10/10/01, cleaned up
** and adapted to mysqldump 05/11/01 by Jani Tolonen
** Added --single-transaction option 06/06/2002 by Peter Zaitsev
** 10 Jun 2003: SET NAMES and --no-set-names by Alexander Barkov
*/
#define DUMP_VERSION "10.11"
#define DUMP_VERSION "10.12"
#include <my_global.h>
#include <my_sys.h>
@ -540,8 +540,10 @@ static void write_header(FILE *sql_file, char *db_name)
if (opt_xml)
{
fputs("<?xml version=\"1.0\"?>\n", sql_file);
/* Schema reference. Allows use of xsi:nil for NULL values and
xsi:type to define an element's data type. */
/*
Schema reference. Allows use of xsi:nil for NULL values and
xsi:type to define an element's data type.
*/
fputs("<mysqldump ", sql_file);
fputs("xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"",
sql_file);
@ -2349,7 +2351,7 @@ static void dump_table(char *table, char *db)
The "table" could be a view. If so, we don't do anything here.
*/
if (strcmp (table_type, "VIEW") == 0)
return;
DBUG_VOID_RETURN;
/* Check --no-data flag */
if (opt_no_data)
@ -2657,16 +2659,16 @@ static void dump_table(char *table, char *db)
{
if (opt_hex_blob && is_blob && length)
{
/* Define xsi:type="xs:hexBinary" for hex encoded data */
print_xml_tag(md_result_file, "\t\t", "", "field", "name=",
field->name, "xsi:type=", "xs:hexBinary", NullS);
print_blob_as_hex(md_result_file, row[i], length);
/* Define xsi:type="xs:hexBinary" for hex encoded data */
print_xml_tag(md_result_file, "\t\t", "", "field", "name=",
field->name, "xsi:type=", "xs:hexBinary", NullS);
print_blob_as_hex(md_result_file, row[i], length);
}
else
{
print_xml_tag(md_result_file, "\t\t", "", "field", "name=",
field->name, NullS);
print_quoted_xml(md_result_file, row[i], length);
print_xml_tag(md_result_file, "\t\t", "", "field", "name=",
field->name, NullS);
print_quoted_xml(md_result_file, row[i], length);
}
fputs("</field>\n", md_result_file);
}
@ -3155,10 +3157,8 @@ static int dump_all_tables_in_db(char *database)
afterdot= strmov(hash_key, database);
*afterdot++= '.';
if (!strcmp(database, NDB_REP_DB)) /* Skip cluster internal database */
return 0;
if (init_dumping(database, init_dumping_tables))
return 1;
DBUG_RETURN(1);
if (opt_xml)
print_xml_tag(md_result_file, "", "\n", "database", "name=", database, NullS);
if (lock_tables)
@ -3218,7 +3218,7 @@ static int dump_all_tables_in_db(char *database)
fprintf(md_result_file,"\n--\n-- Flush Grant Tables \n--\n");
fprintf(md_result_file,"\n/*! FLUSH PRIVILEGES */;\n");
}
return 0;
DBUG_RETURN(0);
} /* dump_all_tables_in_db */

View file

@ -1031,7 +1031,7 @@ run_scheduler(stats *sptr, statement *stmts, uint concur, ulonglong limit)
for (x= 0; x < concur; x++)
{
int pid;
DBUG_PRINT("info", ("x %d concurrency %d", x, concurrency));
DBUG_PRINT("info", ("x: %d concurrency: %u", x, *concurrency));
pid= fork();
switch(pid)
{

View file

@ -80,7 +80,7 @@ enum {
OPT_SSL_CA, OPT_SSL_CAPATH, OPT_SSL_CIPHER, OPT_PS_PROTOCOL,
OPT_SP_PROTOCOL, OPT_CURSOR_PROTOCOL, OPT_VIEW_PROTOCOL,
OPT_SSL_VERIFY_SERVER_CERT, OPT_MAX_CONNECT_RETRIES,
OPT_MARK_PROGRESS, OPT_CHARSETS_DIR, OPT_LOG_DIR, OPT_DEBUG_INFO};
OPT_MARK_PROGRESS, OPT_CHARSETS_DIR, OPT_LOG_DIR, OPT_DEBUG_INFO
};
static int record= 0, opt_sleep= -1;

View file

@ -75,7 +75,7 @@ public:
typedef Integer Element;
AbstractRing() : AbstractGroup() {m_mg.m_pRing = this;}
AbstractRing(const AbstractRing &source) {m_mg.m_pRing = this;}
AbstractRing(const AbstractRing &source) :AbstractGroup() {m_mg.m_pRing = this;}
AbstractRing& operator=(const AbstractRing &source) {return *this;}
virtual bool IsUnit(const Element &a) const =0;

View file

@ -22,4 +22,5 @@ SHOW VARIABLES LIKE 'server_id';
# Check that IM understands that mysqld1 is online, while mysqld2 is
# offline.
--replace_result starting XXXXX online XXXXX
SHOW INSTANCES;

View file

@ -1576,7 +1576,8 @@ sub environment_setup () {
if ( $opt_source_dist )
{
push(@ld_library_paths, "$glob_basedir/libmysql/.libs/",
"$glob_basedir/libmysql_r/.libs/");
"$glob_basedir/libmysql_r/.libs/",
"$glob_basedir/zlib.libs/");
}
else
{
@ -2992,10 +2993,6 @@ sub do_after_run_mysqltest($)
# Save info from this testcase run to mysqltest.log
mtr_appendfile_to_file($path_timefile, $path_mysqltest_log)
if -f $path_timefile;
# Remove the file that mysqltest writes info to
unlink($path_timefile);
}
@ -3183,6 +3180,9 @@ sub run_testcase ($) {
}
}
# Remove the file that mysqltest writes info to
unlink($path_timefile);
# ----------------------------------------------------------------------
# Stop Instance Manager if we are processing an IM-test case.
# ----------------------------------------------------------------------
@ -4094,7 +4094,6 @@ sub run_testcase_start_servers($) {
}
if ( $clusters->[0]->{'pid'} and ! $master->[1]->{'pid'} )
{
{
# Test needs cluster, start an extra mysqld connected to cluster
@ -4848,4 +4847,3 @@ HERE
mtr_exit(1);
}

View file

@ -1,3 +1,4 @@
drop table if exists t1;
DROP TABLE IF EXISTS t1;
SHOW COLLATION LIKE 'cp1250_czech_cs';
Collation Charset Id Default Compiled Sortlen

View file

@ -3,7 +3,7 @@ Variable_name Value
server_id 1
SHOW INSTANCES;
instance_name state
mysqld1 starting
mysqld1 XXXXX
mysqld2 offline
--> Listing users...
im_admin

View file

@ -3,7 +3,7 @@ Variable_name Value
server_id 1
SHOW INSTANCES;
instance_name state
mysqld1 online
mysqld1 XXXXX
mysqld2 offline
Killing the process...
Sleeping...

View file

@ -3,7 +3,7 @@ Variable_name Value
server_id 1
SHOW INSTANCES;
instance_name state
mysqld1 online
mysqld1 XXXXX
mysqld2 offline
--------------------------------------------------------------------
server_id = 1

View file

@ -3,7 +3,7 @@ Variable_name Value
server_id 1
SHOW INSTANCES;
instance_name state
mysqld1 online
mysqld1 XXXXX
mysqld2 offline
--------------------------------------------------------------------

View file

@ -3,7 +3,7 @@ Variable_name Value
server_id 1
SHOW INSTANCES;
instance_name state
mysqld1 online
mysqld1 XXXXX
mysqld2 offline
SHOW INSTANCE OPTIONS mysqld1;
option_name value

View file

@ -280,6 +280,7 @@ create table general_log_new like general_log;
create table slow_log_new like slow_log;
show tables like "%log%";
Tables_in_mysql (%log%)
binlog_index
general_log
general_log_new
slow_log

View file

@ -16,6 +16,9 @@ concurrent_innodb : BUG#21579 2006-08-11 mleich innodb_concurrent random
ndb_autodiscover : BUG#18952 2006-02-16 jmiller Needs to be fixed w.r.t binlog
ndb_autodiscover2 : BUG#18952 2006-02-16 jmiller Needs to be fixed w.r.t binlog
ndb_load : BUG#17233 2006-05-04 tomas failed load data from infile causes mysqld dbug_assert, binlog not flushed
ndb_restore_partition : Problem with cluster/def/schema table that is in std_data/ndb_backup51; Pekka will schdule this to someone
rpl_ndb_sync : Problem with cluster/def/schema table that is in std_data/ndb_backup51; Pekka will schdule this to someone
partition_03ndb : BUG#16385 2006-03-24 mikael Partitions: crash when updating a range partitioned NDB table
ps_7ndb : BUG#18950 2006-02-16 jmiller create table like does not obtain LOCK_open
rpl_ndb_2innodb : BUG#19227 2006-04-20 pekka pk delete apparently not replicated

View file

@ -138,14 +138,14 @@ int packfrm(const void *data, uint len,
uint blob_len;
struct frm_blob_struct *blob;
DBUG_ENTER("packfrm");
DBUG_PRINT("enter", ("data: %x, len: %d", data, len));
DBUG_PRINT("enter", ("data: 0x%lx, len: %d", (long) data, len));
error= 1;
org_len= len;
if (my_compress((byte*)data, &org_len, &comp_len))
goto err;
DBUG_PRINT("info", ("org_len: %d, comp_len: %d", org_len, comp_len));
DBUG_PRINT("info", ("org_len: %lu comp_len: %lu", org_len, comp_len));
DBUG_DUMP("compressed", (char*)data, org_len);
error= 2;
@ -165,7 +165,8 @@ int packfrm(const void *data, uint len,
*pack_len= blob_len;
error= 0;
DBUG_PRINT("exit", ("pack_data: %x, pack_len: %d", *pack_data, *pack_len));
DBUG_PRINT("exit", ("pack_data: 0x%lx pack_len: %d",
(long) *pack_data, *pack_len));
err:
DBUG_RETURN(error);
@ -194,13 +195,13 @@ int unpackfrm(const void **unpack_data, uint *unpack_len,
byte *data;
ulong complen, orglen, ver;
DBUG_ENTER("unpackfrm");
DBUG_PRINT("enter", ("pack_data: %x", pack_data));
DBUG_PRINT("enter", ("pack_data: 0x%lx", (long) pack_data));
complen= uint4korr((char*)&blob->head.complen);
orglen= uint4korr((char*)&blob->head.orglen);
ver= uint4korr((char*)&blob->head.ver);
DBUG_PRINT("blob",("ver: %d complen: %d orglen: %d",
DBUG_PRINT("blob",("ver: %lu complen: %lu orglen: %lu",
ver,complen,orglen));
DBUG_DUMP("blob->data", (char*) blob->data, complen);
@ -220,7 +221,7 @@ int unpackfrm(const void **unpack_data, uint *unpack_len,
*unpack_data= data;
*unpack_len= complen;
DBUG_PRINT("exit", ("frmdata: %x, len: %d", *unpack_data, *unpack_len));
DBUG_PRINT("exit", ("frmdata: 0x%lx len: %d", (long) *unpack_data, *unpack_len));
DBUG_RETURN(0);
}
#endif /* HAVE_COMPRESS */

View file

@ -936,8 +936,8 @@ void my_print_variables(const struct my_option *options)
(*getopt_get_addr)("", 0, optp) : optp->value);
if (value)
{
printf("%s", optp->name);
length= (uint) strlen(optp->name);
printf("%s ", optp->name);
length= (uint) strlen(optp->name)+1;
for (; length < name_space; length++)
putchar(' ');
switch ((optp->var_type & GET_TYPE_MASK)) {

View file

@ -62,7 +62,7 @@ static long number_of_calls= 0; /* for SHOW STATUS, see below */
1 failure (cannot happen)
*/
static int simple_parser_plugin_init(void)
static int simple_parser_plugin_init(void *arg __attribute__((unused)))
{
return(0);
}
@ -81,7 +81,7 @@ static int simple_parser_plugin_init(void)
*/
static int simple_parser_plugin_deinit(void)
static int simple_parser_plugin_deinit(void *arg __attribute__((unused)))
{
return(0);
}

View file

@ -334,7 +334,6 @@ int Mysql_connection_thread::dispatch_command(enum enum_server_command command,
case COM_QUERY:
{
log_info("query for connection %lu : ----\n%s\n-------------------------",
log_info("query for connection %d : ----\n%s\n-------------------------",
connection_id,packet);
if (Command *command= parse_command(&instance_map, packet))
{

View file

@ -124,8 +124,8 @@ void
Event_parse_data::init_body(THD *thd)
{
DBUG_ENTER("Event_parse_data::init_body");
DBUG_PRINT("info", ("body=[%s] body_begin=0x%lx end=0x%lx", body_begin,
body_begin, thd->lex->ptr));
DBUG_PRINT("info", ("body: '%s' body_begin: 0x%lx end: 0x%lx", body_begin,
(long) body_begin, (long) thd->lex->ptr));
body.length= thd->lex->ptr - body_begin;
const uchar *body_end= body_begin + body.length - 1;
@ -399,8 +399,9 @@ Event_parse_data::init_starts(THD *thd)
thd->variables.time_zone->gmt_sec_to_TIME(&time_tmp,
(my_time_t) thd->query_start());
DBUG_PRINT("info",("now =%lld", TIME_to_ulonglong_datetime(&time_tmp)));
DBUG_PRINT("info",("starts=%lld", TIME_to_ulonglong_datetime(&ltime)));
DBUG_PRINT("info",("now: %ld starts: %ld",
(long) TIME_to_ulonglong_datetime(&time_tmp),
(long) TIME_to_ulonglong_datetime(&ltime)));
if (TIME_to_ulonglong_datetime(&ltime) <
TIME_to_ulonglong_datetime(&time_tmp))
goto wrong_value;
@ -536,8 +537,9 @@ Event_parse_data::check_parse_data(THD *thd)
{
bool ret;
DBUG_ENTER("Event_parse_data::check_parse_data");
DBUG_PRINT("info", ("execute_at=0x%lx expr=0x%lx starts=0x%lx ends=0x%lx",
item_execute_at, item_expression, item_starts, item_ends));
DBUG_PRINT("info", ("execute_at: 0x%lx expr=0x%lx starts=0x%lx ends=0x%lx",
(long) item_execute_at, (long) item_expression,
(long) item_starts, (long) item_ends));
init_name(thd, identifier);
@ -564,9 +566,9 @@ Event_parse_data::init_definer(THD *thd)
int definer_host_len;
DBUG_ENTER("Event_parse_data::init_definer");
DBUG_PRINT("info",("init definer_user thd->mem_root=0x%lx "
"thd->sec_ctx->priv_user=0x%lx", thd->mem_root,
thd->security_ctx->priv_user));
DBUG_PRINT("info",("init definer_user thd->mem_root: 0x%lx "
"thd->sec_ctx->priv_user: 0x%lx", (long) thd->mem_root,
(long) thd->security_ctx->priv_user));
definer_user_len= strlen(thd->security_ctx->priv_user);
definer_host_len= strlen(thd->security_ctx->priv_host);
@ -1032,8 +1034,9 @@ bool get_next_time(TIME *next, TIME *start, TIME *time_now, TIME *last_exec,
TIME tmp;
longlong months=0, seconds=0;
DBUG_ENTER("get_next_time");
DBUG_PRINT("enter", ("start=%llu now=%llu", TIME_to_ulonglong_datetime(start),
TIME_to_ulonglong_datetime(time_now)));
DBUG_PRINT("enter", ("start: %lu now: %lu",
(long) TIME_to_ulonglong_datetime(start),
(long) TIME_to_ulonglong_datetime(time_now)));
bzero(&interval, sizeof(interval));
@ -1081,7 +1084,7 @@ bool get_next_time(TIME *next, TIME *start, TIME *time_now, TIME *last_exec,
case INTERVAL_LAST:
DBUG_ASSERT(0);
}
DBUG_PRINT("info", ("seconds=%ld months=%ld", seconds, months));
DBUG_PRINT("info", ("seconds: %ld months: %ld", (long) seconds, (long) months));
if (seconds)
{
longlong seconds_diff;
@ -1099,14 +1102,14 @@ bool get_next_time(TIME *next, TIME *start, TIME *time_now, TIME *last_exec,
event two times for the same time
get the next exec if the modulus is not
*/
DBUG_PRINT("info", ("multiplier=%d", multiplier));
DBUG_PRINT("info", ("multiplier: %d", multiplier));
if (seconds_diff % seconds || (!seconds_diff && last_exec->year) ||
TIME_to_ulonglong_datetime(time_now) ==
TIME_to_ulonglong_datetime(last_exec))
++multiplier;
interval.second= seconds * multiplier;
DBUG_PRINT("info", ("multiplier=%u interval.second=%u", multiplier,
interval.second));
DBUG_PRINT("info", ("multiplier: %lu interval.second: %lu", (ulong) multiplier,
(ulong) interval.second));
tmp= *start;
if (!(ret= date_add_interval(&tmp, INTERVAL_SECOND, interval)))
*next= tmp;
@ -1158,7 +1161,7 @@ bool get_next_time(TIME *next, TIME *start, TIME *time_now, TIME *last_exec,
}
done:
DBUG_PRINT("info", ("next=%llu", TIME_to_ulonglong_datetime(next)));
DBUG_PRINT("info", ("next: %lu", (long) TIME_to_ulonglong_datetime(next)));
DBUG_RETURN(ret);
}
@ -1183,17 +1186,17 @@ Event_queue_element::compute_next_execution_time()
{
TIME time_now;
int tmp;
DBUG_ENTER("Event_queue_element::compute_next_execution_time");
DBUG_PRINT("enter", ("starts=%llu ends=%llu last_executed=%llu this=0x%lx",
TIME_to_ulonglong_datetime(&starts),
TIME_to_ulonglong_datetime(&ends),
TIME_to_ulonglong_datetime(&last_executed), this));
DBUG_PRINT("enter", ("starts: %lu ends: %lu last_executed: %lu this: 0x%lx",
(long) TIME_to_ulonglong_datetime(&starts),
(long) TIME_to_ulonglong_datetime(&ends),
(long) TIME_to_ulonglong_datetime(&last_executed),
(long) this));
if (status == Event_queue_element::DISABLED)
{
DBUG_PRINT("compute_next_execution_time",
("Event %s is DISABLED", name.str));
("Event %s is DISABLED", name.str));
goto ret;
}
/* If one-time, no need to do computation */
@ -1203,9 +1206,9 @@ Event_queue_element::compute_next_execution_time()
if (last_executed.year)
{
DBUG_PRINT("info",("One-time event %s.%s of was already executed",
dbname.str, name.str, definer.str));
dbname.str, name.str));
dropped= (on_completion == Event_queue_element::ON_COMPLETION_DROP);
DBUG_PRINT("info",("One-time event will be dropped=%d.", dropped));
DBUG_PRINT("info",("One-time event will be dropped: %d.", dropped));
status= Event_queue_element::DISABLED;
status_changed= TRUE;
@ -1226,7 +1229,7 @@ Event_queue_element::compute_next_execution_time()
execute_at_null= TRUE;
if (on_completion == Event_queue_element::ON_COMPLETION_DROP)
dropped= TRUE;
DBUG_PRINT("info", ("Dropped=%d", dropped));
DBUG_PRINT("info", ("Dropped: %d", dropped));
status= Event_queue_element::DISABLED;
status_changed= TRUE;
@ -1400,8 +1403,8 @@ Event_queue_element::compute_next_execution_time()
goto ret;
}
ret:
DBUG_PRINT("info", ("ret=0 execute_at=%llu",
TIME_to_ulonglong_datetime(&execute_at)));
DBUG_PRINT("info", ("ret: 0 execute_at: %lu",
(long) TIME_to_ulonglong_datetime(&execute_at)));
DBUG_RETURN(FALSE);
err:
DBUG_PRINT("info", ("ret=1"));
@ -1688,7 +1691,7 @@ done:
thd->end_statement();
thd->cleanup_after_query();
DBUG_PRINT("info", ("EXECUTED %s.%s ret=%d", dbname.str, name.str, ret));
DBUG_PRINT("info", ("EXECUTED %s.%s ret: %d", dbname.str, name.str, ret));
DBUG_RETURN(ret);
}
@ -1752,7 +1755,7 @@ Event_job_data::compile(THD *thd, MEM_ROOT *mem_root)
thd->update_charset();
DBUG_PRINT("info",("old_sql_mode=%d new_sql_mode=%d",old_sql_mode, sql_mode));
DBUG_PRINT("info",("old_sql_mode: %lu new_sql_mode: %lu",old_sql_mode, sql_mode));
thd->variables.sql_mode= this->sql_mode;
/* Change the memory root for the execution time */
if (mem_root)
@ -1769,7 +1772,7 @@ Event_job_data::compile(THD *thd, MEM_ROOT *mem_root)
thd->query= show_create.c_ptr_safe();
thd->query_length= show_create.length();
DBUG_PRINT("info", ("query:%s",thd->query));
DBUG_PRINT("info", ("query: %s",thd->query));
event_change_security_context(thd, definer_user, definer_host, dbname,
&save_ctx);
@ -1777,14 +1780,14 @@ Event_job_data::compile(THD *thd, MEM_ROOT *mem_root)
mysql_init_query(thd, (uchar*) thd->query, thd->query_length);
if (MYSQLparse((void *)thd) || thd->is_fatal_error)
{
DBUG_PRINT("error", ("error during compile or thd->is_fatal_error=%d",
DBUG_PRINT("error", ("error during compile or thd->is_fatal_error: %d",
thd->is_fatal_error));
/*
Free lex associated resources
QQ: Do we really need all this stuff here?
*/
sql_print_error("SCHEDULER: Error during compilation of %s.%s or "
"thd->is_fatal_error=%d",
"thd->is_fatal_error: %d",
dbname.str, name.str, thd->is_fatal_error);
lex.unit.cleanup();

View file

@ -111,14 +111,14 @@ public:
void *p;
DBUG_ENTER("Event_queue_element::new(size)");
p= my_malloc(size, MYF(0));
DBUG_PRINT("info", ("alloc_ptr=0x%lx", p));
DBUG_PRINT("info", ("alloc_ptr: 0x%lx", (long) p));
DBUG_RETURN(p);
}
static void operator delete(void *ptr, size_t size)
{
DBUG_ENTER("Event_queue_element::delete(ptr,size)");
DBUG_PRINT("enter", ("free_ptr=0x%lx", ptr));
DBUG_PRINT("enter", ("free_ptr: 0x%lx", (long) ptr));
TRASH(ptr, size);
my_free((gptr) ptr, MYF(0));
DBUG_VOID_RETURN;

View file

@ -958,7 +958,7 @@ Event_db_repository::load_named_event(THD *thd, LEX_STRING dbname,
Open_tables_state backup;
DBUG_ENTER("Event_db_repository::load_named_event");
DBUG_PRINT("enter",("thd=0x%lx name:%*s",thd, name.length, name.str));
DBUG_PRINT("enter",("thd: 0x%lx name: %*s", (long) thd, name.length, name.str));
thd->reset_n_backup_open_tables_state(&backup);

View file

@ -143,7 +143,7 @@ Event_queue::init_queue(THD *thd, Event_db_repository *db_repo)
struct event_queue_param *event_queue_param_value= NULL;
DBUG_ENTER("Event_queue::init_queue");
DBUG_PRINT("enter", ("this=0x%lx", this));
DBUG_PRINT("enter", ("this: 0x%lx", (long) this));
LOCK_QUEUE_DATA();
db_repository= db_repo;
@ -218,7 +218,7 @@ Event_queue::create_event(THD *thd, LEX_STRING dbname, LEX_STRING name)
int res;
Event_queue_element *new_element;
DBUG_ENTER("Event_queue::create_event");
DBUG_PRINT("enter", ("thd=0x%lx et=%s.%s",thd, dbname.str, name.str));
DBUG_PRINT("enter", ("thd: 0x%lx et=%s.%s", (long) thd, dbname.str, name.str));
new_element= new Event_queue_element();
res= db_repository->load_named_event(thd, dbname, name, new_element);
@ -229,7 +229,7 @@ Event_queue::create_event(THD *thd, LEX_STRING dbname, LEX_STRING name)
new_element->compute_next_execution_time();
LOCK_QUEUE_DATA();
DBUG_PRINT("info", ("new event in the queue 0x%lx", new_element));
DBUG_PRINT("info", ("new event in the queue: 0x%lx", (long) new_element));
queue_insert_safe(&queue, (byte *) new_element);
dbug_dump_queue(thd->query_start());
pthread_cond_broadcast(&COND_queue_state);
@ -264,7 +264,7 @@ Event_queue::update_event(THD *thd, LEX_STRING dbname, LEX_STRING name,
Event_queue_element *new_element;
DBUG_ENTER("Event_queue::update_event");
DBUG_PRINT("enter", ("thd=0x%lx et=[%s.%s]", thd, dbname.str, name.str));
DBUG_PRINT("enter", ("thd: 0x%lx et=[%s.%s]", (long) thd, dbname.str, name.str));
new_element= new Event_queue_element();
@ -294,7 +294,7 @@ Event_queue::update_event(THD *thd, LEX_STRING dbname, LEX_STRING name,
/* If not disabled event */
if (new_element)
{
DBUG_PRINT("info", ("new event in the Q 0x%lx", new_element));
DBUG_PRINT("info", ("new event in the queue: 0x%lx", (long) new_element));
queue_insert_safe(&queue, (byte *) new_element);
pthread_cond_broadcast(&COND_queue_state);
}
@ -322,7 +322,8 @@ void
Event_queue::drop_event(THD *thd, LEX_STRING dbname, LEX_STRING name)
{
DBUG_ENTER("Event_queue::drop_event");
DBUG_PRINT("enter", ("thd=0x%lx db=%s name=%s", thd, dbname.str, name.str));
DBUG_PRINT("enter", ("thd: 0x%lx db :%s name: %s", (long) thd,
dbname.str, name.str));
LOCK_QUEUE_DATA();
find_n_remove_event(dbname, name);
@ -484,7 +485,7 @@ Event_queue::load_events_from_db(THD *thd)
bool clean_the_queue= TRUE;
DBUG_ENTER("Event_queue::load_events_from_db");
DBUG_PRINT("enter", ("thd=0x%lx", thd));
DBUG_PRINT("enter", ("thd: 0x%lx", (long) thd));
if ((ret= db_repository->open_event_table(thd, TL_READ, &table)))
{
@ -555,7 +556,6 @@ Event_queue::load_events_from_db(THD *thd)
goto end;
}
DBUG_PRINT("load_events_from_db", ("Adding 0x%lx to the exec list."));
queue_insert_safe(&queue, (byte *) et);
count++;
}
@ -663,16 +663,20 @@ Event_queue::dbug_dump_queue(time_t now)
for (i = 0; i < queue.elements; i++)
{
et= ((Event_queue_element*)queue_element(&queue, i));
DBUG_PRINT("info",("et=0x%lx db=%s name=%s",et, et->dbname.str, et->name.str));
DBUG_PRINT("info", ("exec_at=%llu starts=%llu ends=%llu execs_so_far=%u"
" expr=%lld et.exec_at=%d now=%d (et.exec_at - now)=%d if=%d",
TIME_to_ulonglong_datetime(&et->execute_at),
TIME_to_ulonglong_datetime(&et->starts),
TIME_to_ulonglong_datetime(&et->ends),
et->execution_count,
et->expression, sec_since_epoch_TIME(&et->execute_at), now,
(int)(sec_since_epoch_TIME(&et->execute_at) - now),
sec_since_epoch_TIME(&et->execute_at) <= now));
DBUG_PRINT("info", ("et: 0x%lx name: %s.%s", (long) et,
et->dbname.str, et->name.str));
DBUG_PRINT("info", ("exec_at: %lu starts: %lu ends: %lu execs_so_far: %u "
"expr: %ld et.exec_at: %ld now: %ld "
"(et.exec_at - now): %d if: %d",
(long) TIME_to_ulonglong_datetime(&et->execute_at),
(long) TIME_to_ulonglong_datetime(&et->starts),
(long) TIME_to_ulonglong_datetime(&et->ends),
et->execution_count,
(long) et->expression,
(long) (sec_since_epoch_TIME(&et->execute_at)),
(long) now,
(int) (sec_since_epoch_TIME(&et->execute_at) - now),
sec_since_epoch_TIME(&et->execute_at) <= now));
}
DBUG_VOID_RETURN;
#endif
@ -812,11 +816,11 @@ end:
if (to_free)
delete top;
DBUG_PRINT("info", ("returning %d. et_new=0x%lx abstime.tv_sec=%d ",
ret, *job_data, abstime? abstime->tv_sec:0));
DBUG_PRINT("info", ("returning %d et_new: 0x%lx abstime.tv_sec: %ld ",
ret, (long) *job_data, abstime ? abstime->tv_sec : 0));
if (*job_data)
DBUG_PRINT("info", ("db=%s name=%s definer=%s", (*job_data)->dbname.str,
DBUG_PRINT("info", ("db: %s name: %s definer=%s", (*job_data)->dbname.str,
(*job_data)->name.str, (*job_data)->definer.str));
DBUG_RETURN(ret);

View file

@ -264,8 +264,9 @@ event_worker_thread(void *arg)
if (!post_init_event_thread(thd))
{
DBUG_PRINT("info", ("Baikonur, time is %d, BURAN reporting and operational."
"THD=0x%lx", time(NULL), thd));
DBUG_PRINT("info", ("Baikonur, time is %ld, BURAN reporting and operational."
"THD: 0x%lx",
(long) time(NULL), (long) thd));
sql_print_information("SCHEDULER: [%s.%s of %s] executing in thread %lu. "
"Execution %u",
@ -378,7 +379,7 @@ Event_scheduler::start()
DBUG_ENTER("Event_scheduler::start");
LOCK_DATA();
DBUG_PRINT("info", ("state before action %s", scheduler_states_names[state]));
DBUG_PRINT("info", ("state before action %s", scheduler_states_names[state].str));
if (state > INITIALIZED)
goto end;
@ -400,7 +401,7 @@ Event_scheduler::start()
scheduler_thd= new_thd;
DBUG_PRINT("info", ("Setting state go RUNNING"));
state= RUNNING;
DBUG_PRINT("info", ("Forking new thread for scheduduler. THD=0x%lx", new_thd));
DBUG_PRINT("info", ("Forking new thread for scheduduler. THD: 0x%lx", (long) new_thd));
if (pthread_create(&th, &connection_attrib, event_scheduler_thread,
(void*)scheduler_param_value))
{
@ -463,7 +464,7 @@ Event_scheduler::run(THD *thd)
break;
}
DBUG_PRINT("info", ("get_top returned job_data=0x%lx", job_data));
DBUG_PRINT("info", ("get_top returned job_data: 0x%lx", (long) job_data));
if (job_data)
{
if ((res= execute_top(thd, job_data)))
@ -522,11 +523,11 @@ Event_scheduler::execute_top(THD *thd, Event_job_data *job_data)
++started_events;
DBUG_PRINT("info", ("Launch succeeded. BURAN is in THD=0x%lx", new_thd));
DBUG_PRINT("info", ("Launch succeeded. BURAN is in THD: 0x%lx", (long) new_thd));
DBUG_RETURN(FALSE);
error:
DBUG_PRINT("error", ("Baikonur, we have a problem! res=%d", res));
DBUG_PRINT("error", ("Baikonur, we have a problem! res: %d", res));
if (new_thd)
{
new_thd->proc_info= "Clearing";
@ -581,10 +582,10 @@ Event_scheduler::stop()
{
THD *thd= current_thd;
DBUG_ENTER("Event_scheduler::stop");
DBUG_PRINT("enter", ("thd=0x%lx", current_thd));
DBUG_PRINT("enter", ("thd: 0x%lx", (long) thd));
LOCK_DATA();
DBUG_PRINT("info", ("state before action %s", scheduler_states_names[state]));
DBUG_PRINT("info", ("state before action %s", scheduler_states_names[state].str));
if (state != RUNNING)
goto end;
@ -605,7 +606,7 @@ Event_scheduler::stop()
*/
state= STOPPING;
DBUG_PRINT("info", ("Manager thread has id %d", scheduler_thd->thread_id));
DBUG_PRINT("info", ("Manager thread has id %lu", scheduler_thd->thread_id));
/* Lock from delete */
pthread_mutex_lock(&scheduler_thd->LOCK_delete);
/* This will wake up the thread if it waits on Queue's conditional */

View file

@ -858,7 +858,7 @@ Events::check_system_tables(THD *thd)
bool ret= FALSE;
DBUG_ENTER("Events::check_system_tables");
DBUG_PRINT("enter", ("thd=0x%lx", thd));
DBUG_PRINT("enter", ("thd: 0x%lx", (long) thd));
thd->reset_n_backup_open_tables_state(&backup);

View file

@ -8180,8 +8180,8 @@ Field_bit::do_last_null_byte() const
bits. On systems with CHAR_BIT > 8 (not very common), the storage
will lose the extra bits.
*/
DBUG_PRINT("debug", ("bit_ofs=%d, bit_len=%d, bit_ptr=%p",
bit_ofs, bit_len, bit_ptr));
DBUG_PRINT("test", ("bit_ofs: %d, bit_len: %d bit_ptr: 0x%lx",
bit_ofs, bit_len, (long) bit_ptr));
uchar *result;
if (bit_len == 0)
result= null_ptr;

View file

@ -413,7 +413,8 @@ Thd_ndb::get_open_table(THD *thd, const void *key)
thd_ndb_share->stat.no_uncommitted_rows_count= 0;
thd_ndb_share->stat.records= ~(ha_rows)0;
}
DBUG_PRINT("exit", ("thd_ndb_share: 0x%x key: 0x%x", thd_ndb_share, key));
DBUG_PRINT("exit", ("thd_ndb_share: 0x%lx key: 0x%lx",
(long) thd_ndb_share, (long) key));
DBUG_RETURN(thd_ndb_share);
}
@ -761,8 +762,8 @@ int ha_ndbcluster::set_ndb_value(NdbOperation *ndb_op, Field *field,
blob_ptr= (char*)"";
}
DBUG_PRINT("value", ("set blob ptr=%p len=%u",
blob_ptr, blob_len));
DBUG_PRINT("value", ("set blob ptr: 0x%lx len: %u",
(long) blob_ptr, blob_len));
DBUG_DUMP("value", (char*)blob_ptr, min(blob_len, 26));
if (set_blob_value)
@ -847,8 +848,8 @@ int get_ndb_blobs_value(TABLE* table, NdbValue* value_array,
uint32 len= 0xffffffff; // Max uint32
if (ndb_blob->readData(buf, len) != 0)
ERR_RETURN(ndb_blob->getNdbError());
DBUG_PRINT("info", ("[%u] offset=%u buf=%p len=%u [ptrdiff=%d]",
i, offset, buf, len, (int)ptrdiff));
DBUG_PRINT("info", ("[%u] offset: %u buf: 0x%lx len=%u [ptrdiff=%d]",
i, offset, (long) buf, len, (int)ptrdiff));
DBUG_ASSERT(len == len64);
// Ugly hack assumes only ptr needs to be changed
field_blob->ptr+= ptrdiff;
@ -1171,8 +1172,8 @@ int ha_ndbcluster::add_index_handle(THD *thd, NDBDICT *dict, KEY *key_info,
index= dict->getIndexGlobal(index_name, *m_table);
if (!index)
ERR_RETURN(dict->getNdbError());
DBUG_PRINT("info", ("index: 0x%x id: %d version: %d.%d status: %d",
index,
DBUG_PRINT("info", ("index: 0x%lx id: %d version: %d.%d status: %d",
(long) index,
index->getObjectId(),
index->getObjectVersion() & 0xFFFFFF,
index->getObjectVersion() >> 24,
@ -1215,8 +1216,8 @@ int ha_ndbcluster::add_index_handle(THD *thd, NDBDICT *dict, KEY *key_info,
index= dict->getIndexGlobal(unique_index_name, *m_table);
if (!index)
ERR_RETURN(dict->getNdbError());
DBUG_PRINT("info", ("index: 0x%x id: %d version: %d.%d status: %d",
index,
DBUG_PRINT("info", ("index: 0x%lx id: %d version: %d.%d status: %d",
(long) index,
index->getObjectId(),
index->getObjectVersion() & 0xFFFFFF,
index->getObjectVersion() >> 24,
@ -2305,7 +2306,7 @@ int ha_ndbcluster::set_bounds(NdbIndexScanOperation *op,
// Set bound if not done with this key
if (p.key != NULL)
{
DBUG_PRINT("info", ("key %d:%d offset=%d length=%d last=%d bound=%d",
DBUG_PRINT("info", ("key %d:%d offset: %d length: %d last: %d bound: %d",
j, i, tot_len, part_len, p.part_last, p.bound_type));
DBUG_DUMP("info", (const char*)p.part_ptr, part_store_len);
@ -2462,7 +2463,7 @@ int ha_ndbcluster::full_table_scan(byte *buf)
part_spec.start_part= 0;
part_spec.end_part= m_part_info->get_tot_partitions() - 1;
prune_partition_set(table, &part_spec);
DBUG_PRINT("info", ("part_spec.start_part = %u, part_spec.end_part = %u",
DBUG_PRINT("info", ("part_spec.start_part: %u part_spec.end_part: %u",
part_spec.start_part, part_spec.end_part));
/*
If partition pruning has found no partition in set
@ -2658,7 +2659,7 @@ int ha_ndbcluster::write_row(byte *record)
{
// Send rows to NDB
DBUG_PRINT("info", ("Sending inserts to NDB, "\
"rows_inserted:%d, bulk_insert_rows: %d",
"rows_inserted: %d bulk_insert_rows: %d",
(int)m_rows_inserted, (int)m_bulk_insert_rows));
m_bulk_insert_not_flushed= FALSE;
@ -3108,7 +3109,8 @@ void ndb_unpack_record(TABLE *table, NdbValue *value,
char* ptr;
field_blob->get_ptr(&ptr, row_offset);
uint32 len= field_blob->get_length(row_offset);
DBUG_PRINT("info",("[%u] SET ptr=%p len=%u", col_no, ptr, len));
DBUG_PRINT("info",("[%u] SET ptr: 0x%lx len: %u",
col_no, (long) ptr, len));
#endif
}
}
@ -3350,7 +3352,7 @@ int ha_ndbcluster::read_range_first_to_buf(const key_range *start_key,
if (m_use_partition_function)
{
get_partition_set(table, buf, active_index, start_key, &part_spec);
DBUG_PRINT("info", ("part_spec.start_part = %u, part_spec.end_part = %u",
DBUG_PRINT("info", ("part_spec.start_part: %u part_spec.end_part: %u",
part_spec.start_part, part_spec.end_part));
/*
If partition pruning has found no partition in set
@ -3876,7 +3878,7 @@ int ha_ndbcluster::end_bulk_insert()
NdbTransaction *trans= m_active_trans;
// Send rows to NDB
DBUG_PRINT("info", ("Sending inserts to NDB, "\
"rows_inserted:%d, bulk_insert_rows: %d",
"rows_inserted: %d bulk_insert_rows: %d",
(int) m_rows_inserted, (int) m_bulk_insert_rows));
m_bulk_insert_not_flushed= FALSE;
if (m_transaction_on)
@ -5101,13 +5103,12 @@ void ha_ndbcluster::prepare_for_alter()
int ha_ndbcluster::add_index(TABLE *table_arg,
KEY *key_info, uint num_of_keys)
{
DBUG_ENTER("ha_ndbcluster::add_index");
DBUG_PRINT("info", ("ha_ndbcluster::add_index to table %s",
table_arg->s->table_name));
int error= 0;
uint idx;
DBUG_ENTER("ha_ndbcluster::add_index");
DBUG_PRINT("enter", ("table %s", table_arg->s->table_name.str));
DBUG_ASSERT(m_share->state == NSS_ALTERED);
for (idx= 0; idx < num_of_keys; idx++)
{
KEY *key= key_info + idx;
@ -6662,7 +6663,7 @@ static int ndbcluster_end(handlerton *hton, ha_panic_function type)
void ha_ndbcluster::print_error(int error, myf errflag)
{
DBUG_ENTER("ha_ndbcluster::print_error");
DBUG_PRINT("enter", ("error = %d", error));
DBUG_PRINT("enter", ("error: %d", error));
if (error == HA_ERR_NO_PARTITION_FOUND)
m_part_info->print_no_partition_found(table);
@ -7168,16 +7169,16 @@ static void dbug_print_open_tables()
for (uint i= 0; i < ndbcluster_open_tables.records; i++)
{
NDB_SHARE *share= (NDB_SHARE*) hash_element(&ndbcluster_open_tables, i);
DBUG_PRINT("share",
("[%d] 0x%lx key: %s key_length: %d",
i, share, share->key, share->key_length));
DBUG_PRINT("share",
("db.tablename: %s.%s use_count: %d commit_count: %d",
DBUG_PRINT("loop",
("[%d] 0x%lx key: %s key_length: %d",
i, (long) share, share->key, share->key_length));
DBUG_PRINT("loop",
("db.tablename: %s.%s use_count: %d commit_count: %lu",
share->db, share->table_name,
share->use_count, share->commit_count));
share->use_count, (ulong) share->commit_count));
#ifdef HAVE_NDB_BINLOG
if (share->table)
DBUG_PRINT("share",
DBUG_PRINT("loop",
("table->s->db.table_name: %s.%s",
share->table->s->db.str, share->table->s->table_name.str));
#endif
@ -7330,13 +7331,13 @@ static int rename_share(NDB_SHARE *share, const char *new_key)
share->table_name= share->db + strlen(share->db) + 1;
ha_ndbcluster::set_tabname(new_key, share->table_name);
DBUG_PRINT("rename_share",
("0x%lx key: %s key_length: %d",
share, share->key, share->key_length));
DBUG_PRINT("rename_share",
("db.tablename: %s.%s use_count: %d commit_count: %d",
DBUG_PRINT("info",
("share: 0x%lx key: %s key_length: %d",
(long) share, share->key, share->key_length));
DBUG_PRINT("info",
("db.tablename: %s.%s use_count: %d commit_count: %lu",
share->db, share->table_name,
share->use_count, share->commit_count));
share->use_count, (ulong) share->commit_count));
if (share->table)
{
DBUG_PRINT("rename_share",
@ -7371,13 +7372,13 @@ NDB_SHARE *ndbcluster_get_share(NDB_SHARE *share)
dbug_print_open_tables();
DBUG_PRINT("get_share",
("0x%lx key: %s key_length: %d",
share, share->key, share->key_length));
DBUG_PRINT("get_share",
("db.tablename: %s.%s use_count: %d commit_count: %d",
DBUG_PRINT("info",
("share: 0x%lx key: %s key_length: %d",
(long) share, share->key, share->key_length));
DBUG_PRINT("info",
("db.tablename: %s.%s use_count: %d commit_count: %lu",
share->db, share->table_name,
share->use_count, share->commit_count));
share->use_count, (ulong) share->commit_count));
pthread_mutex_unlock(&ndbcluster_mutex);
return share;
}
@ -7485,13 +7486,12 @@ NDB_SHARE *ndbcluster_get_share(const char *key, TABLE *table,
void ndbcluster_real_free_share(NDB_SHARE **share)
{
DBUG_ENTER("ndbcluster_real_free_share");
DBUG_PRINT("real_free_share",
("0x%lx key: %s key_length: %d",
(*share), (*share)->key, (*share)->key_length));
DBUG_PRINT("real_free_share",
("db.tablename: %s.%s use_count: %d commit_count: %d",
DBUG_PRINT("enter",
("share: 0x%lx key: %s key_length: %d "
"db.tablename: %s.%s use_count: %d commit_count: %lu",
(long) (*share), (*share)->key, (*share)->key_length,
(*share)->db, (*share)->table_name,
(*share)->use_count, (*share)->commit_count));
(*share)->use_count, (ulong) (*share)->commit_count));
hash_delete(&ndbcluster_open_tables, (byte*) *share);
thr_lock_delete(&(*share)->lock);
@ -7539,13 +7539,13 @@ void ndbcluster_free_share(NDB_SHARE **share, bool have_lock)
else
{
dbug_print_open_tables();
DBUG_PRINT("free_share",
("0x%lx key: %s key_length: %d",
*share, (*share)->key, (*share)->key_length));
DBUG_PRINT("free_share",
("db.tablename: %s.%s use_count: %d commit_count: %d",
DBUG_PRINT("info",
("share: 0x%lx key: %s key_length: %d",
(long) *share, (*share)->key, (*share)->key_length));
DBUG_PRINT("info",
("db.tablename: %s.%s use_count: %d commit_count: %lu",
(*share)->db, (*share)->table_name,
(*share)->use_count, (*share)->commit_count));
(*share)->use_count, (ulong) (*share)->commit_count));
}
if (!have_lock)
pthread_mutex_unlock(&ndbcluster_mutex);
@ -7815,7 +7815,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
get_partition_set(table, curr, active_index,
&multi_range_curr->start_key,
&part_spec);
DBUG_PRINT("info", ("part_spec.start_part = %u, part_spec.end_part = %u",
DBUG_PRINT("info", ("part_spec.start_part: %u part_spec.end_part: %u",
part_spec.start_part, part_spec.end_part));
/*
If partition pruning has found no partition in set
@ -8347,8 +8347,8 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused)))
ndb_get_table_statistics(NULL, false, ndb, ndbtab_g.get_table(), &stat) == 0)
{
char buff[22], buff2[22];
DBUG_PRINT("ndb_util_thread",
("Table: %s, commit_count: %llu, rows: %llu",
DBUG_PRINT("info",
("Table: %s commit_count: %s rows: %s",
share->key,
llstr(stat.commit_count, buff),
llstr(stat.row_count, buff2)));

View file

@ -161,16 +161,16 @@ static void dbug_print_table(const char *info, TABLE *table)
}
DBUG_PRINT("info",
("%s: %s.%s s->fields: %d "
"reclength: %d rec_buff_length: %d record[0]: %lx "
"record[1]: %lx",
"reclength: %lu rec_buff_length: %u record[0]: 0x%lx "
"record[1]: 0x%lx",
info,
table->s->db.str,
table->s->table_name.str,
table->s->fields,
table->s->reclength,
table->s->rec_buff_length,
table->record[0],
table->record[1]));
(long) table->record[0],
(long) table->record[1]));
for (unsigned int i= 0; i < table->s->fields; i++)
{
@ -180,7 +180,7 @@ static void dbug_print_table(const char *info, TABLE *table)
"ptr: 0x%lx[+%d] null_bit: %u null_ptr: 0x%lx[+%d]",
i,
f->field_name,
f->flags,
(long) f->flags,
(f->flags & PRI_KEY_FLAG) ? "pri" : "attr",
(f->flags & NOT_NULL_FLAG) ? "" : ",nullable",
(f->flags & UNSIGNED_FLAG) ? ",unsigned" : ",signed",
@ -189,16 +189,18 @@ static void dbug_print_table(const char *info, TABLE *table)
(f->flags & BINARY_FLAG) ? ",binary" : "",
f->real_type(),
f->pack_length(),
f->ptr, f->ptr - table->record[0],
(long) f->ptr, (int) (f->ptr - table->record[0]),
f->null_bit,
f->null_ptr, (byte*) f->null_ptr - table->record[0]));
(long) f->null_ptr,
(int) ((byte*) f->null_ptr - table->record[0])));
if (f->type() == MYSQL_TYPE_BIT)
{
Field_bit *g= (Field_bit*) f;
DBUG_PRINT("MYSQL_TYPE_BIT",("field_length: %d bit_ptr: 0x%lx[+%d] "
"bit_ofs: %u bit_len: %u",
g->field_length, g->bit_ptr,
(byte*) g->bit_ptr-table->record[0],
"bit_ofs: %d bit_len: %u",
g->field_length, (long) g->bit_ptr,
(int) ((byte*) g->bit_ptr -
table->record[0]),
g->bit_ofs, g->bit_len));
}
}
@ -605,11 +607,11 @@ static int ndbcluster_binlog_end(THD *thd)
{
DBUG_PRINT("share",
("[%d] 0x%lx key: %s key_length: %d",
i, share, share->key, share->key_length));
i, (long) share, share->key, share->key_length));
DBUG_PRINT("share",
("db.tablename: %s.%s use_count: %d commit_count: %d",
("db.tablename: %s.%s use_count: %d commit_count: %lu",
share->db, share->table_name,
share->use_count, share->commit_count));
share->use_count, (long) share->commit_count));
}
}
pthread_mutex_unlock(&ndbcluster_mutex);
@ -685,8 +687,8 @@ static NDB_SHARE *ndbcluster_check_apply_status_share()
void *share= hash_search(&ndbcluster_open_tables,
NDB_APPLY_TABLE_FILE,
sizeof(NDB_APPLY_TABLE_FILE) - 1);
DBUG_PRINT("info",("ndbcluster_check_apply_status_share %s %p",
NDB_APPLY_TABLE_FILE, share));
DBUG_PRINT("info",("ndbcluster_check_apply_status_share %s 0x%lx",
NDB_APPLY_TABLE_FILE, (long) share));
pthread_mutex_unlock(&ndbcluster_mutex);
return (NDB_SHARE*) share;
}
@ -703,8 +705,8 @@ static NDB_SHARE *ndbcluster_check_schema_share()
void *share= hash_search(&ndbcluster_open_tables,
NDB_SCHEMA_TABLE_FILE,
sizeof(NDB_SCHEMA_TABLE_FILE) - 1);
DBUG_PRINT("info",("ndbcluster_check_schema_share %s %p",
NDB_SCHEMA_TABLE_FILE, share));
DBUG_PRINT("info",("ndbcluster_check_schema_share %s 0x%lx",
NDB_SCHEMA_TABLE_FILE, (long) share));
pthread_mutex_unlock(&ndbcluster_mutex);
return (NDB_SHARE*) share;
}
@ -2721,10 +2723,9 @@ ndbcluster_create_event_ops(NDB_SHARE *share, const NDBTAB *ndbtab,
if (share->flags & NSF_BLOB_FLAG)
op->mergeEvents(TRUE); // currently not inherited from event
DBUG_PRINT("info", ("share->ndb_value[0]: 0x%x",
share->ndb_value[0]));
DBUG_PRINT("info", ("share->ndb_value[1]: 0x%x",
share->ndb_value[1]));
DBUG_PRINT("info", ("share->ndb_value[0]: 0x%lx share->ndb_value[1]: 0x%lx",
(long) share->ndb_value[0],
(long) share->ndb_value[1]));
int n_columns= ndbtab->getNoOfColumns();
int n_fields= table ? table->s->fields : 0; // XXX ???
for (int j= 0; j < n_columns; j++)
@ -2778,12 +2779,14 @@ ndbcluster_create_event_ops(NDB_SHARE *share, const NDBTAB *ndbtab,
}
share->ndb_value[0][j].ptr= attr0.ptr;
share->ndb_value[1][j].ptr= attr1.ptr;
DBUG_PRINT("info", ("&share->ndb_value[0][%d]: 0x%x "
"share->ndb_value[0][%d]: 0x%x",
j, &share->ndb_value[0][j], j, attr0.ptr));
DBUG_PRINT("info", ("&share->ndb_value[1][%d]: 0x%x "
"share->ndb_value[1][%d]: 0x%x",
j, &share->ndb_value[0][j], j, attr1.ptr));
DBUG_PRINT("info", ("&share->ndb_value[0][%d]: 0x%lx "
"share->ndb_value[0][%d]: 0x%lx",
j, (long) &share->ndb_value[0][j],
j, (long) attr0.ptr));
DBUG_PRINT("info", ("&share->ndb_value[1][%d]: 0x%lx "
"share->ndb_value[1][%d]: 0x%lx",
j, (long) &share->ndb_value[0][j],
j, (long) attr1.ptr));
}
op->setCustomData((void *) share); // set before execute
share->op= op; // assign op in NDB_SHARE
@ -2826,8 +2829,8 @@ ndbcluster_create_event_ops(NDB_SHARE *share, const NDBTAB *ndbtab,
(void) pthread_cond_signal(&injector_cond);
}
DBUG_PRINT("info",("%s share->op: 0x%lx, share->use_count: %u",
share->key, share->op, share->use_count));
DBUG_PRINT("info",("%s share->op: 0x%lx share->use_count: %u",
share->key, (long) share->op, share->use_count));
if (ndb_extra_logging)
sql_print_information("NDB Binlog: logging %s", share->key);
@ -3012,10 +3015,11 @@ ndb_binlog_thread_handle_non_data_event(THD *thd, Ndb *ndb,
free_share(&apply_status_share);
apply_status_share= 0;
}
DBUG_PRINT("info", ("CLUSTER FAILURE EVENT: "
"%s received share: 0x%lx op: %lx share op: %lx "
"op_old: %lx",
share->key, share, pOp, share->op, share->op_old));
DBUG_PRINT("error", ("CLUSTER FAILURE EVENT: "
"%s received share: 0x%lx op: 0x%lx share op: 0x%lx "
"op_old: 0x%lx",
share->key, (long) share, (long) pOp,
(long) share->op, (long) share->op_old));
break;
case NDBEVENT::TE_DROP:
if (apply_status_share == share)
@ -3033,10 +3037,11 @@ ndb_binlog_thread_handle_non_data_event(THD *thd, Ndb *ndb,
// fall through
case NDBEVENT::TE_ALTER:
row.n_schemaops++;
DBUG_PRINT("info", ("TABLE %s EVENT: %s received share: 0x%lx op: %lx "
"share op: %lx op_old: %lx",
type == NDBEVENT::TE_DROP ? "DROP" : "ALTER",
share->key, share, pOp, share->op, share->op_old));
DBUG_PRINT("info", ("TABLE %s EVENT: %s received share: 0x%lx op: 0x%lx "
"share op: 0x%lx op_old: 0x%lx",
type == NDBEVENT::TE_DROP ? "DROP" : "ALTER",
share->key, (long) share, (long) pOp,
(long) share->op, (long) share->op_old));
break;
case NDBEVENT::TE_NODE_FAILURE:
/* fall through */
@ -3513,7 +3518,8 @@ restart:
}
}
// now check that we have epochs consistant with what we had before the restart
DBUG_PRINT("info", ("schema_res: %d schema_gci: %d", schema_res, schema_gci));
DBUG_PRINT("info", ("schema_res: %d schema_gci: %lu", schema_res,
(long) schema_gci));
{
i_ndb->flushIncompleteEvents(schema_gci);
s_ndb->flushIncompleteEvents(schema_gci);
@ -3697,8 +3703,8 @@ restart:
!= NULL)
{
NDB_SHARE *share= (NDB_SHARE*)gci_op->getCustomData();
DBUG_PRINT("info", ("per gci_op: %p share: %p event_types: 0x%x",
gci_op, share, event_types));
DBUG_PRINT("info", ("per gci_op: 0x%lx share: 0x%lx event_types: 0x%x",
(long) gci_op, (long) share, event_types));
// workaround for interface returning TE_STOP events
// which are normally filtered out below in the nextEvent loop
if ((event_types & ~NdbDictionary::Event::TE_STOP) == 0)
@ -3784,11 +3790,13 @@ restart:
{
NDB_SHARE *share= (NDB_SHARE*) pOp->getCustomData();
DBUG_PRINT("info",
("EVENT TYPE: %d GCI: %lld last applied: %lld "
"share: 0x%lx (%s.%s)", pOp->getEventType(), gci,
ndb_latest_applied_binlog_epoch, share,
share ? share->db : "share == NULL",
share ? share->table_name : ""));
("EVENT TYPE: %d GCI: %ld last applied: %ld "
"share: 0x%lx (%s.%s)", pOp->getEventType(),
(long) gci,
(long) ndb_latest_applied_binlog_epoch,
(long) share,
share ? share->db : "'NULL'",
share ? share->table_name : "'NULL'"));
DBUG_ASSERT(share != 0);
}
// assert that there is consistancy between gci op list

View file

@ -2027,7 +2027,7 @@ bool ha_partition::create_handlers(MEM_ROOT *mem_root)
if (!(m_file[i]= get_new_handler(table_share, mem_root,
m_engine_array[i])))
DBUG_RETURN(TRUE);
DBUG_PRINT("info", ("engine_type: %u", m_engine_array[i]));
DBUG_PRINT("info", ("engine_type: %u", m_engine_array[i]->db_type));
}
/* For the moment we only support partition over the same table engine */
if (m_engine_array[0] == myisam_hton)
@ -2939,8 +2939,8 @@ int ha_partition::rnd_init(bool scan)
include_partition_fields_in_used_fields();
/* Now we see what the index of our first important partition is */
DBUG_PRINT("info", ("m_part_info->used_partitions 0x%x",
m_part_info->used_partitions.bitmap));
DBUG_PRINT("info", ("m_part_info->used_partitions: 0x%lx",
(long) m_part_info->used_partitions.bitmap));
part_id= bitmap_get_first_set(&(m_part_info->used_partitions));
DBUG_PRINT("info", ("m_part_spec.start_part %d", part_id));

View file

@ -1513,7 +1513,7 @@ int handler::ha_open(TABLE *table_arg, const char *name, int mode,
DBUG_ENTER("handler::ha_open");
DBUG_PRINT("enter",
("name: %s db_type: %d db_stat: %d mode: %d lock_test: %d",
name, table_share->db_type, table_arg->db_stat, mode,
name, ht->db_type, table_arg->db_stat, mode,
test_if_locked));
table= table_arg;
@ -1927,8 +1927,8 @@ int handler::update_auto_increment()
void handler::column_bitmaps_signal()
{
DBUG_ENTER("column_bitmaps_signal");
DBUG_PRINT("info", ("read_set: 0x%lx write_set: 0x%lx", table->read_set,
table->write_set));
DBUG_PRINT("info", ("read_set: 0x%lx write_set: 0x%lx", (long) table->read_set,
(long) table->write_set));
DBUG_VOID_RETURN;
}
@ -3507,8 +3507,10 @@ namespace
int write_locked_table_maps(THD *thd)
{
DBUG_ENTER("write_locked_table_maps");
DBUG_PRINT("enter", ("thd=%p, thd->lock=%p, thd->locked_tables=%p, thd->extra_lock",
thd, thd->lock, thd->locked_tables, thd->extra_lock));
DBUG_PRINT("enter", ("thd: 0x%lx thd->lock: 0x%lx thd->locked_tables: 0x%lx "
"thd->extra_lock: 0x%lx",
(long) thd, (long) thd->lock,
(long) thd->locked_tables, (long) thd->extra_lock));
if (thd->get_binlog_table_maps() == 0)
{
@ -3528,7 +3530,7 @@ namespace
++table_ptr)
{
TABLE *const table= *table_ptr;
DBUG_PRINT("info", ("Checking table %s", table->s->table_name));
DBUG_PRINT("info", ("Checking table %s", table->s->table_name.str));
if (table->current_lock == F_WRLCK &&
check_table_binlog_row_based(thd, table))
{

View file

@ -3061,7 +3061,7 @@ longlong Item_is_not_null_test::val_int()
if (!used_tables_cache)
{
owner->was_null|= (!cached_value);
DBUG_PRINT("info", ("cached :%ld", (long) cached_value));
DBUG_PRINT("info", ("cached: %ld", (long) cached_value));
DBUG_RETURN(cached_value);
}
if (args[0]->is_null())

View file

@ -5044,7 +5044,7 @@ Item_func_sp::result_type() const
{
Field *field;
DBUG_ENTER("Item_func_sp::result_type");
DBUG_PRINT("info", ("m_sp = %p", m_sp));
DBUG_PRINT("info", ("m_sp: 0x%lx", (long) m_sp));
if (result_field)
DBUG_RETURN(result_field->result_type());

View file

@ -1344,7 +1344,7 @@ binlog_trans_log_savepos(THD *thd, my_off_t *pos)
(binlog_trx_data*) thd->ha_data[binlog_hton->slot];
DBUG_ASSERT(mysql_bin_log.is_open());
*pos= trx_data->position();
DBUG_PRINT("return", ("*pos=%u", *pos));
DBUG_PRINT("return", ("*pos: %lu", (ulong) *pos));
DBUG_VOID_RETURN;
}
@ -1368,7 +1368,7 @@ static void
binlog_trans_log_truncate(THD *thd, my_off_t pos)
{
DBUG_ENTER("binlog_trans_log_truncate");
DBUG_PRINT("enter", ("pos=%u", pos));
DBUG_PRINT("enter", ("pos: %lu", (ulong) pos));
DBUG_ASSERT(thd->ha_data[binlog_hton->slot] != NULL);
/* Only true if binlog_trans_log_savepos() wasn't called before */
@ -1444,8 +1444,8 @@ binlog_end_trans(THD *thd, binlog_trx_data *trx_data,
DBUG_ENTER("binlog_end_trans");
int error=0;
IO_CACHE *trans_log= &trx_data->trans_log;
DBUG_PRINT("enter", ("transaction: %s, end_ev=%p",
all ? "all" : "stmt", end_ev));
DBUG_PRINT("enter", ("transaction: %s end_ev: 0x%lx",
all ? "all" : "stmt", (long) end_ev));
DBUG_PRINT("info", ("thd->options={ %s%s}",
FLAGSTR(thd->options, OPTION_NOT_AUTOCOMMIT),
FLAGSTR(thd->options, OPTION_BEGIN)));
@ -3417,12 +3417,13 @@ int THD::binlog_setup_trx_data()
void
THD::binlog_start_trans_and_stmt()
{
DBUG_ENTER("binlog_start_trans_and_stmt");
binlog_trx_data *trx_data= (binlog_trx_data*) ha_data[binlog_hton->slot];
DBUG_PRINT("enter", ("trx_data=0x%lu", trx_data));
if (trx_data)
DBUG_PRINT("enter", ("trx_data->before_stmt_pos=%u",
trx_data->before_stmt_pos));
DBUG_ENTER("binlog_start_trans_and_stmt");
DBUG_PRINT("enter", ("trx_data: 0x%lx trx_data->before_stmt_pos: %lu",
(long) trx_data,
(trx_data ? (ulong) trx_data->before_stmt_pos :
(ulong) 0)));
if (trx_data == NULL ||
trx_data->before_stmt_pos == MY_OFF_T_UNDEF)
{
@ -3453,8 +3454,8 @@ int THD::binlog_write_table_map(TABLE *table, bool is_trans)
{
int error;
DBUG_ENTER("THD::binlog_write_table_map");
DBUG_PRINT("enter", ("table: %0xlx (%s: #%u)",
(long) table, table->s->table_name,
DBUG_PRINT("enter", ("table: 0x%lx (%s: #%lu)",
(long) table, table->s->table_name.str,
table->s->table_map_id));
/* Pre-conditions */
@ -3517,7 +3518,7 @@ MYSQL_BIN_LOG::flush_and_set_pending_rows_event(THD *thd,
{
DBUG_ENTER("MYSQL_BIN_LOG::flush_and_set_pending_rows_event(event)");
DBUG_ASSERT(mysql_bin_log.is_open());
DBUG_PRINT("enter", ("event=%p", event));
DBUG_PRINT("enter", ("event: 0x%lx", (long) event));
int error= 0;
@ -3526,7 +3527,7 @@ MYSQL_BIN_LOG::flush_and_set_pending_rows_event(THD *thd,
DBUG_ASSERT(trx_data);
DBUG_PRINT("info", ("trx_data->pending()=%p", trx_data->pending()));
DBUG_PRINT("info", ("trx_data->pending(): 0x%lx", (long) trx_data->pending()));
if (Rows_log_event* pending= trx_data->pending())
{
@ -3681,9 +3682,9 @@ bool MYSQL_BIN_LOG::write(Log_event *event_info)
my_off_t trans_log_pos= my_b_tell(trans_log);
if (event_info->get_cache_stmt() || trans_log_pos != 0)
{
DBUG_PRINT("info", ("Using trans_log: cache=%d, trans_log_pos=%u",
DBUG_PRINT("info", ("Using trans_log: cache: %d, trans_log_pos: %lu",
event_info->get_cache_stmt(),
trans_log_pos));
(ulong) trans_log_pos));
if (trans_log_pos == 0)
thd->binlog_start_trans_and_stmt();
file= trans_log;
@ -3725,15 +3726,17 @@ bool MYSQL_BIN_LOG::write(Log_event *event_info)
}
if (thd->auto_inc_intervals_in_cur_stmt_for_binlog.nb_elements() > 0)
{
DBUG_PRINT("info",("number of auto_inc intervals: %lu",
thd->auto_inc_intervals_in_cur_stmt_for_binlog.nb_elements()));
DBUG_PRINT("info",("number of auto_inc intervals: %u",
thd->auto_inc_intervals_in_cur_stmt_for_binlog.
nb_elements()));
/*
If the auto_increment was second in a table's index (possible with
MyISAM or BDB) (table->next_number_key_offset != 0), such event is
in fact not necessary. We could avoid logging it.
*/
Intvar_log_event e(thd,(uchar) INSERT_ID_EVENT,
thd->auto_inc_intervals_in_cur_stmt_for_binlog.minimum());
Intvar_log_event e(thd, (uchar) INSERT_ID_EVENT,
thd->auto_inc_intervals_in_cur_stmt_for_binlog.
minimum());
if (e.write(file))
goto err;
}

View file

@ -5345,8 +5345,8 @@ Rows_log_event::Rows_log_event(const char *buf, uint event_len,
uint8 const common_header_len= description_event->common_header_len;
uint8 const post_header_len= description_event->post_header_len[event_type-1];
DBUG_PRINT("enter",("event_len=%ld, common_header_len=%d, "
"post_header_len=%d",
DBUG_PRINT("enter",("event_len: %u common_header_len: %d "
"post_header_len: %d",
event_len, common_header_len,
post_header_len));
@ -5376,7 +5376,7 @@ Rows_log_event::Rows_log_event(const char *buf, uint event_len,
const byte* const ptr_rows_data= var_start + byte_count + 1;
my_size_t const data_size= event_len - (ptr_rows_data - (const byte *) buf);
DBUG_PRINT("info",("m_table_id=%lu, m_flags=%d, m_width=%u, data_size=%lu",
DBUG_PRINT("info",("m_table_id: %lu m_flags: %d m_width: %lu data_size: %u",
m_table_id, m_flags, m_width, data_size));
m_rows_buf= (byte*)my_malloc(data_size, MYF(MY_WME));
@ -5416,7 +5416,7 @@ int Rows_log_event::do_add_row_data(byte *const row_data,
would save binlog space. TODO
*/
DBUG_ENTER("Rows_log_event::do_add_row_data");
DBUG_PRINT("enter", ("row_data: 0x%lx length: %lu", (ulong) row_data,
DBUG_PRINT("enter", ("row_data: 0x%lx length: %u", (ulong) row_data,
length));
/*
Don't print debug messages when running valgrind since they can
@ -5513,7 +5513,7 @@ unpack_row(RELAY_LOG_INFO *rli,
{
DBUG_ENTER("unpack_row");
DBUG_ASSERT(record && row);
DBUG_PRINT("enter", ("row=0x%lx; record=0x%lx", row, record));
DBUG_PRINT("enter", ("row: 0x%lx record: 0x%lx", (long) row, (long) record));
my_ptrdiff_t const offset= record - (byte*) table->record[0];
my_size_t master_null_bytes= table->s->null_bytes;
@ -5555,10 +5555,12 @@ unpack_row(RELAY_LOG_INFO *rli,
if (bitmap_is_set(cols, field_ptr - begin_ptr))
{
DBUG_ASSERT(table->record[0] <= f->ptr);
DBUG_ASSERT(f->ptr < table->record[0] + table->s->reclength + (f->pack_length_in_rec() == 0));
DBUG_ASSERT(f->ptr < (table->record[0] + table->s->reclength +
(f->pack_length_in_rec() == 0)));
f->move_field_offset(offset);
DBUG_PRINT("info", ("unpacking column '%s' to 0x%lx", f->field_name, f->ptr));
DBUG_PRINT("info", ("unpacking column '%s' to 0x%lx", f->field_name,
(long) f->ptr));
ptr= f->unpack(f->ptr, ptr);
f->move_field_offset(-offset);
/* Field...::unpack() cannot return 0 */
@ -6068,7 +6070,7 @@ Table_map_log_event::Table_map_log_event(const char *buf, uint event_len,
uint8 common_header_len= description_event->common_header_len;
uint8 post_header_len= description_event->post_header_len[TABLE_MAP_EVENT-1];
DBUG_PRINT("info",("event_len=%ld, common_header_len=%d, post_header_len=%d",
DBUG_PRINT("info",("event_len: %u common_header_len: %d post_header_len: %d",
event_len, common_header_len, post_header_len));
/*
@ -6116,10 +6118,10 @@ Table_map_log_event::Table_map_log_event(const char *buf, uint event_len,
uchar *ptr_after_colcnt= (uchar*) ptr_colcnt;
m_colcnt= net_field_length(&ptr_after_colcnt);
DBUG_PRINT("info",("m_dblen=%d off=%d m_tbllen=%d off=%d m_colcnt=%d off=%d",
m_dblen, ptr_dblen-(const byte*)vpart,
m_tbllen, ptr_tbllen-(const byte*)vpart,
m_colcnt, ptr_colcnt-(const byte*)vpart));
DBUG_PRINT("info",("m_dblen: %d off: %ld m_tbllen: %d off: %ld m_colcnt: %lu off: %ld",
m_dblen, (long) (ptr_dblen-(const byte*)vpart),
m_tbllen, (long) (ptr_tbllen-(const byte*)vpart),
m_colcnt, (long) (ptr_colcnt-(const byte*)vpart)));
/* Allocate mem for all fields in one go. If fails, catched in is_valid() */
m_memory= my_multi_malloc(MYF(MY_WME),
@ -6523,10 +6525,10 @@ copy_extra_record_fields(TABLE *table,
my_size_t master_reclength,
my_ptrdiff_t master_fields)
{
DBUG_PRINT("info", ("Copying to %p "
DBUG_PRINT("info", ("Copying to 0x%lx "
"from field %ld at offset %u "
"to field %d at offset %u",
table->record[0],
"to field %d at offset %lu",
(long) table->record[0],
master_fields, master_reclength,
table->s->fields, table->s->reclength));
/*

View file

@ -2121,7 +2121,7 @@ the thread stack. Please read http://www.mysql.com/doc/en/Linux.html\n\n",
#ifdef HAVE_STACKTRACE
if (!(test_flags & TEST_NO_STACKTRACE))
{
fprintf(stderr,"thd=%p\n",thd);
fprintf(stderr,"thd: 0x%lx\n",(long) thd);
print_stacktrace(thd ? (gptr) thd->thread_stack : (gptr) 0,
thread_stack);
}

View file

@ -10814,7 +10814,7 @@ static void print_sel_tree(PARAM *param, SEL_TREE *tree, key_map *tree_map,
if (!tmp.length())
tmp.append(STRING_WITH_LEN("(empty)"));
DBUG_PRINT("info", ("SEL_TREE %p (%s) scans:%s", tree, msg, tmp.ptr()));
DBUG_PRINT("info", ("SEL_TREE: 0x%lx (%s) scans: %s", (long) tree, msg, tmp.ptr()));
DBUG_VOID_RETURN;
}

View file

@ -564,8 +564,8 @@ err:
mysql_free_result(res);
if (error)
{
sql_print_error("While trying to obtain the list of slaves from the master \
'%s:%d', user '%s' got the following error: '%s'",
sql_print_error("While trying to obtain the list of slaves from the master "
"'%s:%d', user '%s' got the following error: '%s'",
mi->host, mi->port, mi->user, error);
DBUG_RETURN(1);
}

View file

@ -402,7 +402,7 @@ int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log,
bool look_for_description_event)
{
DBUG_ENTER("init_relay_log_pos");
DBUG_PRINT("info", ("pos=%lu", pos));
DBUG_PRINT("info", ("pos: %lu", (ulong) pos));
*errmsg=0;
pthread_mutex_t *log_lock=rli->relay_log.get_log_lock();
@ -855,7 +855,7 @@ void st_relay_log_info::close_temporary_tables()
Don't ask for disk deletion. For now, anyway they will be deleted when
slave restarts, but it is a better intention to not delete them.
*/
DBUG_PRINT("info", ("table: %p", table));
DBUG_PRINT("info", ("table: 0x%lx", (long) table));
close_temporary(table, 1, 0);
}
save_temporary_tables= 0;

View file

@ -50,17 +50,17 @@ table_mapping::~table_mapping()
st_table* table_mapping::get_table(ulong table_id)
{
DBUG_ENTER("table_mapping::get_table(ulong)");
DBUG_PRINT("enter", ("table_id=%d", table_id));
DBUG_PRINT("enter", ("table_id: %lu", table_id));
entry *e= find_entry(table_id);
if (e)
{
DBUG_PRINT("info", ("tid %d -> table %p (%s)",
table_id, e->table,
DBUG_PRINT("info", ("tid %lu -> table 0x%lx (%s)",
table_id, (long) e->table,
MAYBE_TABLE_NAME(e->table)));
DBUG_RETURN(e->table);
}
DBUG_PRINT("info", ("tid %d is not mapped!", table_id));
DBUG_PRINT("info", ("tid %lu is not mapped!", table_id));
DBUG_RETURN(NULL);
}
@ -93,9 +93,9 @@ int table_mapping::expand()
int table_mapping::set_table(ulong table_id, TABLE* table)
{
DBUG_ENTER("table_mapping::set_table(ulong,TABLE*)");
DBUG_PRINT("enter", ("table_id=%d, table=%p (%s)",
DBUG_PRINT("enter", ("table_id: %lu table: 0x%lx (%s)",
table_id,
table, MAYBE_TABLE_NAME(table)));
(long) table, MAYBE_TABLE_NAME(table)));
entry *e= find_entry(table_id);
if (e == 0)
{
@ -111,8 +111,8 @@ int table_mapping::set_table(ulong table_id, TABLE* table)
e->table= table;
my_hash_insert(&m_table_ids,(byte *)e);
DBUG_PRINT("info", ("tid %d -> table %p (%s)",
table_id, e->table,
DBUG_PRINT("info", ("tid %lu -> table 0x%lx (%s)",
table_id, (long) e->table,
MAYBE_TABLE_NAME(e->table)));
DBUG_RETURN(0); // All OK
}

View file

@ -3943,7 +3943,7 @@ sys_var_event_scheduler::update(THD *thd, set_var *var)
DBUG_RETURN(TRUE);
}
DBUG_PRINT("new_value", ("%lu", (bool)var->save_result.ulong_value));
DBUG_PRINT("info", ("new_value: %d", (int) var->save_result.ulong_value));
Item_result var_type= var->value->result_type();

View file

@ -1609,7 +1609,7 @@ static ulong read_event(MYSQL* mysql, MASTER_INFO *mi, bool* suppress_warnings)
DBUG_RETURN(packet_error);
}
DBUG_PRINT("info",( "len=%u, net->read_pos[4] = %d\n",
DBUG_PRINT("exit", ("len: %lu net->read_pos[4]: %d",
len, mysql->net.read_pos[4]));
DBUG_RETURN(len - 1);
}
@ -1800,7 +1800,7 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli)
ev->when = time(NULL);
ev->thd = thd; // because up to this point, ev->thd == 0
exec_res = ev->exec_event(rli);
DBUG_PRINT("info", ("exec_event result = %d", exec_res));
DBUG_PRINT("info", ("exec_event result: %d", exec_res));
DBUG_ASSERT(rli->sql_thd==thd);
/*
Format_description_log_event should not be deleted because it will be
@ -1951,9 +1951,9 @@ pthread_handler_t handle_slave_io(void *arg)
// we can get killed during safe_connect
if (!safe_connect(thd, mysql, mi))
{
sql_print_information("Slave I/O thread: connected to master '%s@%s:%d',\
replication started in log '%s' at position %s", mi->user,
mi->host, mi->port,
sql_print_information("Slave I/O thread: connected to master '%s@%s:%d',"
"replication started in log '%s' at position %s",
mi->user, mi->host, mi->port,
IO_RPL_LOG_NAME,
llstr(mi->master_log_pos,llbuff));
/*
@ -3107,8 +3107,8 @@ static int connect_to_master(THD* thd, MYSQL* mysql, MASTER_INFO* mi,
{
last_errno=mysql_errno(mysql);
suppress_warnings= 0;
sql_print_error("Slave I/O thread: error %s to master \
'%s@%s:%d': \
sql_print_error("Slave I/O thread: error %s to master "
"'%s@%s:%d': \
Error: '%s' errno: %d retry-time: %d retries: %lu",
(reconnect ? "reconnecting" : "connecting"),
mi->user, mi->host, mi->port,

View file

@ -899,7 +899,7 @@ subst_spvars(THD *thd, sp_instr *instr, LEX_STRING *query_str)
break;
val= (*splocal)->this_item();
DBUG_PRINT("info", ("print %p", val));
DBUG_PRINT("info", ("print 0x%lx", (long) val));
str_value= sp_get_item_value(val, &str_value_holder);
if (str_value)
res|= qbuf.append(*str_value);

View file

@ -1087,7 +1087,7 @@ void close_thread_tables(THD *thd, bool lock_in_use, bool skip_derived)
if (!lock_in_use)
VOID(pthread_mutex_lock(&LOCK_open));
DBUG_PRINT("info", ("thd->open_tables: %p", thd->open_tables));
DBUG_PRINT("info", ("thd->open_tables: 0x%lx", (long) thd->open_tables));
found_old_table= 0;
while (thd->open_tables)
@ -1177,6 +1177,16 @@ static inline uint tmpkeyval(THD *thd, TABLE *table)
void close_temporary_tables(THD *thd)
{
TABLE *table;
TABLE *next;
/*
TODO: 5.1 maintains prev link in temporary_tables
double-linked list so we could fix it. But it is not necessary
at this time when the list is being destroyed
*/
TABLE *prev_table;
/* Assume thd->options has OPTION_QUOTE_SHOW_CREATE */
bool was_quote_show= TRUE;
if (!thd->temporary_tables)
return;
@ -1192,12 +1202,7 @@ void close_temporary_tables(THD *thd)
return;
}
TABLE *next,
*prev_table /* TODO: 5.1 maintaines prev link in temporary_tables
double-linked list so we could fix it. But it is not necessary
at this time when the list is being destroyed */;
bool was_quote_show= true; /* to assume thd->options has OPTION_QUOTE_SHOW_CREATE */
// Better add "if exists", in case a RESET MASTER has been done
/* Better add "if exists", in case a RESET MASTER has been done */
const char stub[]= "DROP /*!40005 TEMPORARY */ TABLE IF EXISTS ";
uint stub_len= sizeof(stub) - 1;
char buf[256];
@ -1303,7 +1308,7 @@ void close_temporary_tables(THD *thd)
}
}
if (!was_quote_show)
thd->options &= ~OPTION_QUOTE_SHOW_CREATE; /* restore option */
thd->options&= ~OPTION_QUOTE_SHOW_CREATE; /* restore option */
thd->temporary_tables=0;
}
@ -2069,7 +2074,7 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root,
VOID(pthread_mutex_unlock(&LOCK_open));
DBUG_RETURN(0); // VIEW
}
DBUG_PRINT("info", ("inserting table %p into the cache", table));
DBUG_PRINT("info", ("inserting table 0x%lx into the cache", (long) table));
VOID(my_hash_insert(&open_cache,(byte*) table));
}
@ -2399,7 +2404,7 @@ bool table_is_used(TABLE *table, bool wait_for_name_lock)
{
DBUG_PRINT("info", ("share: 0x%lx locked_by_logger: %d "
"locked_by_flush: %d locked_by_name: %d "
"db_stat: %u version: %u",
"db_stat: %u version: %lu",
(ulong) search->s, search->locked_by_logger,
search->locked_by_flush, search->locked_by_name,
search->db_stat,

View file

@ -80,8 +80,9 @@ void mysql_client_binlog_statement(THD* thd)
int bytes_decoded= base64_decode(strptr, coded_len, buf, &endptr);
DBUG_PRINT("info",
("bytes_decoded=%d; strptr=0x%lu; endptr=0x%lu ('%c':%d)",
bytes_decoded, strptr, endptr, *endptr, *endptr));
("bytes_decoded: %d strptr: 0x%lx endptr: 0x%lx ('%c':%d)",
bytes_decoded, (long) strptr, (long) endptr, *endptr,
*endptr));
if (bytes_decoded < 0)
{
@ -145,14 +146,15 @@ void mysql_client_binlog_statement(THD* thd)
bufptr += event_len;
DBUG_PRINT("info",("ev->get_type_code()=%d", ev->get_type_code()));
DBUG_PRINT("info",("bufptr+EVENT_TYPE_OFFSET=0x%lx",
bufptr+EVENT_TYPE_OFFSET));
DBUG_PRINT("info", ("bytes_decoded=%d; bufptr=0x%lx; buf[EVENT_LEN_OFFSET]=%u",
bytes_decoded, bufptr, uint4korr(bufptr+EVENT_LEN_OFFSET)));
DBUG_PRINT("info",("bufptr+EVENT_TYPE_OFFSET: 0x%lx",
(long) (bufptr+EVENT_TYPE_OFFSET)));
DBUG_PRINT("info", ("bytes_decoded: %d bufptr: 0x%lx buf[EVENT_LEN_OFFSET]: %lu",
bytes_decoded, (long) bufptr,
uint4korr(bufptr+EVENT_LEN_OFFSET)));
ev->thd= thd;
if (int err= ev->exec_event(thd->rli_fake))
{
DBUG_PRINT("info", ("exec_event() - error=%d", error));
DBUG_PRINT("error", ("exec_event() returned: %d", err));
/*
TODO: Maybe a better error message since the BINLOG statement
now contains several events.

View file

@ -2981,7 +2981,7 @@ static TABLE_COUNTER_TYPE process_and_count_tables(TABLE_LIST *tables_used,
DBUG_PRINT("qcache", ("table: %s db: %s type: %u",
tables_used->table->s->table_name.str,
tables_used->table->s->db.str,
tables_used->table->s->db_type));
tables_used->table->s->db_type->db_type));
if (tables_used->derived)
{
table_count--;
@ -3037,7 +3037,7 @@ Query_cache::is_cacheable(THD *thd, uint32 query_len, char *query, LEX *lex,
lex->safe_to_cache_query)
{
DBUG_PRINT("qcache", ("options: %lx %lx type: %u",
OPTION_TO_QUERY_CACHE,
(long) OPTION_TO_QUERY_CACHE,
(long) lex->select_lex.options,
(int) thd->variables.query_cache_type));
@ -3057,7 +3057,7 @@ Query_cache::is_cacheable(THD *thd, uint32 query_len, char *query, LEX *lex,
DBUG_PRINT("qcache",
("not interesting query: %d or not cacheable, options %lx %lx type: %u",
(int) lex->sql_command,
OPTION_TO_QUERY_CACHE,
(long) OPTION_TO_QUERY_CACHE,
(long) lex->select_lex.options,
(int) thd->variables.query_cache_type));
DBUG_RETURN(0);

View file

@ -551,7 +551,7 @@ void add_diff_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var,
void THD::awake(THD::killed_state state_to_set)
{
DBUG_ENTER("THD::awake");
DBUG_PRINT("enter", ("this=0x%lx", this));
DBUG_PRINT("enter", ("this: 0x%lx", (long) this));
THD_CHECK_SENTRY(this);
safe_mutex_assert_owner(&LOCK_delete);
@ -2623,9 +2623,9 @@ namespace {
return m_memory != 0;
}
byte *slot(int const s)
byte *slot(uint s)
{
DBUG_ASSERT(0 <= s && s < sizeof(m_ptr)/sizeof(*m_ptr));
DBUG_ASSERT(s < sizeof(m_ptr)/sizeof(*m_ptr));
DBUG_ASSERT(m_ptr[s] != 0);
DBUG_ASSERT(m_alloc_checked == true);
return m_ptr[s];

View file

@ -367,9 +367,9 @@ bool mysql_ha_read(THD *thd, TABLE_LIST *tables,
strlen(tables->alias) + 1)))
{
table= hash_tables->table;
DBUG_PRINT("info-in-hash",("'%s'.'%s' as '%s' tab %p",
DBUG_PRINT("info-in-hash",("'%s'.'%s' as '%s' table: 0x%lx",
hash_tables->db, hash_tables->table_name,
hash_tables->alias, table));
hash_tables->alias, (long) table));
if (!table)
{
/*
@ -633,7 +633,8 @@ int mysql_ha_flush(THD *thd, TABLE_LIST *tables, uint mode_flags,
TABLE **table_ptr;
bool did_lock= FALSE;
DBUG_ENTER("mysql_ha_flush");
DBUG_PRINT("enter", ("tables: %p mode_flags: 0x%02x", tables, mode_flags));
DBUG_PRINT("enter", ("tables: 0x%lx mode_flags: 0x%02x",
(long) tables, mode_flags));
if (tables)
{

View file

@ -1443,7 +1443,7 @@ bool st_select_lex::add_order_to_list(THD *thd, Item *item, bool asc)
bool st_select_lex::add_item_to_list(THD *thd, Item *item)
{
DBUG_ENTER("st_select_lex::add_item_to_list");
DBUG_PRINT("info", ("Item: %p", item));
DBUG_PRINT("info", ("Item: 0x%lx", (long) item));
DBUG_RETURN(item_list.push_back(item));
}

View file

@ -1604,7 +1604,7 @@ bool do_command(THD *thd)
command= COM_END; // Wrong command
DBUG_PRINT("info",("Command on %s = %d (%s)",
vio_description(net->vio), command,
command_name[command]));
command_name[command].str));
}
net->read_timeout=old_timeout; // restore it
/*
@ -1828,7 +1828,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
char *packet_end= thd->query + thd->query_length;
/* 'b' stands for 'buffer' parameter', special for 'my_snprintf' */
const char *format= "%.*b";
general_log.write(thd, command, format, thd->query_length, thd->query);
general_log_print(thd, command, format, thd->query_length, thd->query);
DBUG_PRINT("query",("%-.4096s",thd->query));
if (!(specialflag & SPECIAL_NO_PRIOR))

View file

@ -4480,7 +4480,7 @@ that are reorganised.
{
if (!alt_part_info->use_default_partitions)
{
DBUG_PRINT("info", ("part_info= %x", tab_part_info));
DBUG_PRINT("info", ("part_info: 0x%lx", (long) tab_part_info));
tab_part_info->use_default_partitions= FALSE;
}
tab_part_info->use_default_no_partitions= FALSE;

View file

@ -1918,7 +1918,7 @@ void mysql_stmt_prepare(THD *thd, const char *packet, uint packet_length)
else
{
const char *format= "[%lu] %.*b";
general_log.write(thd, COM_STMT_PREPARE, format, stmt->id,
general_log_print(thd, COM_STMT_PREPARE, format, stmt->id,
stmt->query_length, stmt->query);
}
@ -2265,7 +2265,7 @@ void mysql_stmt_execute(THD *thd, char *packet_arg, uint packet_length)
DBUG_VOID_RETURN;
DBUG_PRINT("exec_query", ("%s", stmt->query));
DBUG_PRINT("info",("stmt: %p", stmt));
DBUG_PRINT("info",("stmt: 0x%lx", (long) stmt));
sp_cache_flush_obsolete(&thd->sp_proc_cache);
sp_cache_flush_obsolete(&thd->sp_func_cache);
@ -2305,9 +2305,9 @@ void mysql_stmt_execute(THD *thd, char *packet_arg, uint packet_length)
if (error == 0)
{
const char *format= "[%lu] %.*b";
general_log.write(thd, COM_STMT_EXECUTE, format, stmt->id,
general_log_print(thd, COM_STMT_EXECUTE, format, stmt->id,
thd->query_length, thd->query);
}
DBUG_VOID_RETURN;
set_params_data_err:
@ -2360,7 +2360,7 @@ void mysql_sql_stmt_execute(THD *thd)
DBUG_VOID_RETURN;
}
DBUG_PRINT("info",("stmt: %p", stmt));
DBUG_PRINT("info",("stmt: 0x%lx", (long) stmt));
/*
If the free_list is not empty, we'll wrongly free some externally
@ -2724,7 +2724,8 @@ void Prepared_statement::setup_set_params()
Prepared_statement::~Prepared_statement()
{
DBUG_ENTER("Prepared_statement::~Prepared_statement");
DBUG_PRINT("enter",("stmt: %p cursor: %p", this, cursor));
DBUG_PRINT("enter",("stmt: 0x%lx cursor: 0x%lx",
(long) this, (long) cursor));
delete cursor;
/*
We have to call free on the items even if cleanup is called as some items,
@ -2745,7 +2746,7 @@ Query_arena::Type Prepared_statement::type() const
void Prepared_statement::cleanup_stmt()
{
DBUG_ENTER("Prepared_statement::cleanup_stmt");
DBUG_PRINT("enter",("stmt: %p", this));
DBUG_PRINT("enter",("stmt: 0x%lx", (long) this));
/* The order is important */
lex->unit.cleanup();

View file

@ -3743,7 +3743,7 @@ static void wait_while_table_is_used(THD *thd,TABLE *table,
enum ha_extra_function function)
{
DBUG_ENTER("wait_while_table_is_used");
DBUG_PRINT("enter", ("table: '%s' share: 0x%lx db_stat: %u version: %u",
DBUG_PRINT("enter", ("table: '%s' share: 0x%lx db_stat: %u version: %lu",
table->s->table_name.str, (ulong) table->s,
table->db_stat, table->s->version));

View file

@ -248,14 +248,15 @@ print_plan(JOIN* join, uint idx, double record_count, double read_time,
if (join->best_read == DBL_MAX)
{
fprintf(DBUG_FILE,
"%s; idx:%u, best: DBL_MAX, atime: %g, itime: %g, count: %g\n",
info, idx, current_read_time, read_time, record_count);
"%s; idx: %u best: DBL_MAX atime: %g itime: %g count: %g\n",
info, idx, current_read_time, read_time, record_count);
}
else
{
fprintf(DBUG_FILE,
"%s; idx:%u, best: %g, accumulated: %g, increment: %g, count: %g\n",
info, idx, join->best_read, current_read_time, read_time, record_count);
"%s; idx :%u best: %g accumulated: %g increment: %g count: %g\n",
info, idx, join->best_read, current_read_time, read_time,
record_count);
}
/* Print the tables in JOIN->positions */

View file

@ -1612,7 +1612,7 @@ Handle_old_incorrect_sql_modes_hook::process_unknown_string(char *&unknown_key,
char *end)
{
DBUG_ENTER("Handle_old_incorrect_sql_modes_hook::process_unknown_string");
DBUG_PRINT("info", ("unknown key:%60s", unknown_key));
DBUG_PRINT("info", ("unknown key: %60s", unknown_key));
if (unknown_key + INVALID_SQL_MODES_LENGTH + 1 < end &&
unknown_key[INVALID_SQL_MODES_LENGTH] == '=' &&
@ -1654,7 +1654,7 @@ process_unknown_string(char *&unknown_key, gptr base, MEM_ROOT *mem_root,
char *end)
{
DBUG_ENTER("Handle_old_incorrect_trigger_table_hook::process_unknown_string");
DBUG_PRINT("info", ("unknown key:%60s", unknown_key));
DBUG_PRINT("info", ("unknown key: %60s", unknown_key));
if (unknown_key + INVALID_TRIGGER_TABLE_LENGTH + 1 < end &&
unknown_key[INVALID_TRIGGER_TABLE_LENGTH] == '=' &&

View file

@ -1339,7 +1339,7 @@ int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias,
Field **field_ptr;
DBUG_ENTER("open_table_from_share");
DBUG_PRINT("enter",("name: '%s.%s' form: 0x%lx", share->db.str,
share->table_name.str, outparam));
share->table_name.str, (long) outparam));
error= 1;
bzero((char*) outparam, sizeof(*outparam));
@ -2401,8 +2401,8 @@ table_check_intact(TABLE *table, const uint table_f_count,
my_bool error= FALSE;
my_bool fields_diff_count;
DBUG_ENTER("table_check_intact");
DBUG_PRINT("info",("table=%s expected_count=%d",table->alias, table_f_count));
DBUG_PRINT("info",("last_create_time=%d", *last_create_time));
DBUG_PRINT("info",("table: %s expected_count: %d last_create_time: %ld",
table->alias, table_f_count, *last_create_time));
if ((fields_diff_count= (table->s->fields != table_f_count)) ||
(*last_create_time != table->file->stats.create_time))

View file

@ -2157,7 +2157,7 @@ error:
int ha_federated::index_init(uint keynr, bool sorted)
{
DBUG_ENTER("ha_federated::index_init");
DBUG_PRINT("info", ("table: '%s' key: %u", table->s->table_name, keynr));
DBUG_PRINT("info", ("table: '%s' key: %u", table->s->table_name.str, keynr));
active_index= keynr;
DBUG_RETURN(0);
}

View file

@ -33,7 +33,8 @@ int mi_rsame_with_pos(MI_INFO *info, byte *record, int inx, my_off_t filepos)
DBUG_ENTER("mi_rsame_with_pos");
DBUG_PRINT("enter",("index: %d filepos: %ld", inx, (long) filepos));
if (inx < -1 || inx >= 0 && ! mi_is_key_active(info->s->state.key_map, inx))
if (inx < -1 ||
(inx >= 0 && ! mi_is_key_active(info->s->state.key_map, inx)))
{
DBUG_RETURN(my_errno=HA_ERR_WRONG_INDEX);
}

View file

@ -137,6 +137,7 @@ extern "C" {
#define LINT_SET_PTR = {0,0}
#else
#define LINT_SET_PTR
#endif
#ifndef MIN
#define MIN(x,y) (((x)<(y))?(x):(y))

View file

@ -106,7 +106,7 @@ inline NdbOut& dec(NdbOut& _NdbOut) {
return _NdbOut.setHexFormat(0);
}
extern "C"
void ndbout_c(const char * fmt, ...);
void ndbout_c(const char * fmt, ...) ATTRIBUTE_FORMAT(printf, 1, 2);
class FilteredNdbOut : public NdbOut {
public:

View file

@ -153,7 +153,6 @@ public:
ValueType m_type;
protected:
Reader();
virtual ~Reader() {}
virtual void reset() = 0;
virtual bool step(Uint32 len) = 0;
@ -168,7 +167,6 @@ public:
class Writer {
public:
Writer() {}
virtual ~Writer() {}
bool first();
bool add(Uint16 key, Uint32 value);
@ -192,7 +190,6 @@ public:
SimplePropertiesLinearReader(const Uint32 * src, Uint32 len);
virtual ~SimplePropertiesLinearReader() {}
virtual ~SimplePropertiesLinearReader() {}
virtual void reset();
virtual bool step(Uint32 len);
virtual bool getWord(Uint32 * dst);
@ -230,7 +227,6 @@ public:
UtilBufferWriter(class UtilBuffer & buf);
virtual ~UtilBufferWriter() {}
virtual ~UtilBufferWriter() {}
virtual bool reset();
virtual bool putWord(Uint32 val);
virtual bool putWords(const Uint32 * src, Uint32 len);
@ -284,7 +280,6 @@ public:
SimplePropertiesSectionWriter(class SectionSegmentPool &);
virtual ~SimplePropertiesSectionWriter() {}
virtual ~SimplePropertiesSectionWriter() {}
virtual bool reset();
virtual bool putWord(Uint32 val);
virtual bool putWords(const Uint32 * src, Uint32 len);

View file

@ -2118,7 +2118,7 @@ Backup::execDROP_TRIG_REF(Signal* signal)
BackupRecordPtr ptr LINT_SET_PTR;
c_backupPool.getPtr(ptr, ptrI);
if(ref->getConf()->getTriggerId() != -1)
if(ref->getConf()->getTriggerId() != ~(Uint32) 0)
{
ndbout << "ERROR DROPPING TRIGGER: " << ref->getConf()->getTriggerId();
ndbout << " Err: " << (Uint32)ref->getErrorCode() << endl << endl;

View file

@ -1287,7 +1287,7 @@ Cmvmi::execTESTSIG(Signal* signal){
fprintf(stdout, "\n");
for(i = 0; i<signal->header.m_noOfSections; i++){
SegmentedSectionPtr ptr = {0,0,0};
SegmentedSectionPtr ptr;
ndbout_c("-- Section %d --", i);
signal->getSection(ptr, i);
ndbrequire(ptr.p != 0);
@ -1345,7 +1345,7 @@ Cmvmi::execTESTSIG(Signal* signal){
LinearSectionPtr ptr[3];
const Uint32 secs = signal->getNoOfSections();
for(i = 0; i<secs; i++){
SegmentedSectionPtr sptr = {0,0,0};
SegmentedSectionPtr sptr;
signal->getSection(sptr, i);
ptr[i].sz = sptr.sz;
ptr[i].p = new Uint32[sptr.sz];
@ -1394,7 +1394,7 @@ Cmvmi::execTESTSIG(Signal* signal){
LinearSectionPtr ptr[3];
const Uint32 secs = signal->getNoOfSections();
for(i = 0; i<secs; i++){
SegmentedSectionPtr sptr = {0,0,0};
SegmentedSectionPtr sptr;
signal->getSection(sptr, i);
ptr[i].sz = sptr.sz;
ptr[i].p = new Uint32[sptr.sz];
@ -1460,7 +1460,7 @@ Cmvmi::execTESTSIG(Signal* signal){
const Uint32 secs = signal->getNoOfSections();
memset(g_test, 0, sizeof(g_test));
for(i = 0; i<secs; i++){
SegmentedSectionPtr sptr = {0,0,0};
SegmentedSectionPtr sptr;
signal->getSection(sptr, i);
g_test[i].sz = sptr.sz;
g_test[i].p = new Uint32[sptr.sz];

View file

@ -971,10 +971,10 @@ void Dbacc::initOpRec(Signal* signal)
Uint32 opbits = 0;
opbits |= Treqinfo & 0x7;
opbits |= ((Treqinfo >> 4) & 0x3) ? Operationrec::OP_LOCK_MODE : 0;
opbits |= ((Treqinfo >> 4) & 0x3) ? Operationrec::OP_ACC_LOCK_MODE : 0;
opbits |= (dirtyReadFlag) ? Operationrec::OP_DIRTY_READ : 0;
opbits |= ((Treqinfo >> 31) & 0x1) ? Operationrec::OP_LOCK_REQ : 0;
opbits |= ((Treqinfo >> 4) & 0x3) ? (Uint32) Operationrec::OP_LOCK_MODE : 0;
opbits |= ((Treqinfo >> 4) & 0x3) ? (Uint32) Operationrec::OP_ACC_LOCK_MODE : 0;
opbits |= (dirtyReadFlag) ? (Uint32) Operationrec::OP_DIRTY_READ : 0;
opbits |= ((Treqinfo >> 31) & 0x1) ? (Uint32) Operationrec::OP_LOCK_REQ : 0;
//operationRecPtr.p->nodeType = (Treqinfo >> 7) & 0x3;
operationRecPtr.p->fid = fragrecptr.p->myfid;
@ -6947,10 +6947,10 @@ void Dbacc::initScanOpRec(Signal* signal)
Uint32 opbits = 0;
opbits |= ZSCAN_OP;
opbits |= scanPtr.p->scanLockMode ? Operationrec::OP_LOCK_MODE : 0;
opbits |= scanPtr.p->scanLockMode ? Operationrec::OP_ACC_LOCK_MODE : 0;
opbits |= scanPtr.p->scanReadCommittedFlag ?
Operationrec::OP_EXECUTED_DIRTY_READ : 0;
opbits |= scanPtr.p->scanLockMode ? (Uint32) Operationrec::OP_LOCK_MODE : 0;
opbits |= scanPtr.p->scanLockMode ? (Uint32) Operationrec::OP_ACC_LOCK_MODE : 0;
opbits |= (scanPtr.p->scanReadCommittedFlag ?
(Uint32) Operationrec::OP_EXECUTED_DIRTY_READ : 0);
opbits |= Operationrec::OP_COMMIT_DELETE_CHECK;
operationRecPtr.p->userptr = RNIL;
operationRecPtr.p->scanRecPtr = scanPtr.i;
@ -7700,6 +7700,7 @@ void Dbacc::putOverflowRecInFrag(Signal* signal)
OverflowRecordPtr tpifPrevOverrecPtr;
tpifNextOverrecPtr.i = fragrecptr.p->firstOverflowRec;
LINT_INIT(tpifPrevOverrecPtr.p);
tpifPrevOverrecPtr.i = RNIL;
while (tpifNextOverrecPtr.i != RNIL) {
ptrCheckGuard(tpifNextOverrecPtr, coverflowrecsize, overflowRecord);
@ -7749,6 +7750,7 @@ void Dbacc::putRecInFreeOverdir(Signal* signal)
OverflowRecordPtr tpfoPrevOverrecPtr;
tpfoNextOverrecPtr.i = fragrecptr.p->firstFreeDirindexRec;
LINT_INIT(tpfoPrevOverrecPtr.p);
tpfoPrevOverrecPtr.i = RNIL;
while (tpfoNextOverrecPtr.i != RNIL) {
ptrCheckGuard(tpfoNextOverrecPtr, coverflowrecsize, overflowRecord);

View file

@ -189,7 +189,7 @@ struct {
&Dbdict::drop_undofile_prepare_start, 0,
0,
0, 0,
0, 0
0, 0, 0
}
};

View file

@ -2909,7 +2909,7 @@ Dbdih::nr_start_fragment(Signal* signal,
}
}
if (maxLcpIndex == ~0)
if (maxLcpIndex == ~ (Uint32) 0)
{
ndbout_c("Didnt find any LCP for node: %d tab: %d frag: %d",
takeOverPtr.p->toStartingNode,
@ -5968,6 +5968,7 @@ Dbdih::sendMASTER_LCPCONF(Signal * signal){
break;
default:
ndbrequire(false);
lcpState= MasterLCPConf::LCP_STATUS_IDLE; // remove warning
}//switch
Uint32 failedNodeId = c_lcpState.m_MASTER_LCPREQ_FailedNodeId;
@ -6892,6 +6893,8 @@ void Dbdih::execDIADDTABREQ(Signal* signal)
Uint32 align;
};
SegmentedSectionPtr fragDataPtr;
LINT_INIT(fragDataPtr.i);
LINT_INIT(fragDataPtr.sz);
signal->getSection(fragDataPtr, DiAddTabReq::FRAGMENTATION);
copy((Uint32*)fragments, fragDataPtr);
releaseSections(signal);
@ -6981,7 +6984,9 @@ Dbdih::sendAddFragreq(Signal* signal, ConnectRecordPtr connectPtr,
TabRecordPtr tabPtr, Uint32 fragId){
jam();
const Uint32 fragCount = tabPtr.p->totalfragments;
ReplicaRecordPtr replicaPtr; replicaPtr.i = RNIL;
ReplicaRecordPtr replicaPtr;
LINT_INIT(replicaPtr.p);
replicaPtr.i = RNIL;
FragmentstorePtr fragPtr;
for(; fragId<fragCount; fragId++){
jam();
@ -7541,7 +7546,11 @@ void Dbdih::execDI_FCOUNTREQ(Signal* signal)
if(connectPtr.i == RNIL)
ref->m_connectionData = RNIL;
else
{
jam();
ptrCheckGuard(connectPtr, cconnectFileSize, connectRecord);
ref->m_connectionData = connectPtr.p->userpointer;
}
ref->m_tableRef = tabPtr.i;
ref->m_senderData = senderData;
ref->m_error = DihFragCountRef::ErroneousTableState;
@ -11443,6 +11452,7 @@ Dbdih::findBestLogNode(CreateReplicaRecord* createReplica,
{
ConstPtr<ReplicaRecord> fblFoundReplicaPtr;
ConstPtr<ReplicaRecord> fblReplicaPtr;
LINT_INIT(fblFoundReplicaPtr.p);
/* --------------------------------------------------------------------- */
/* WE START WITH ZERO AS FOUND TO ENSURE THAT FIRST HIT WILL BE */

View file

@ -3417,9 +3417,9 @@ void Dblqh::execLQHKEYREQ(Signal* signal)
}
else
{
regTcPtr->operation = op == ZREAD_EX ? ZREAD : op;
regTcPtr->operation = (Operation_t) op == ZREAD_EX ? ZREAD : (Operation_t) op;
regTcPtr->lockType =
op == ZREAD_EX ? ZUPDATE : op == ZWRITE ? ZINSERT : op;
op == ZREAD_EX ? ZUPDATE : (Operation_t) op == ZWRITE ? ZINSERT : (Operation_t) op;
}
CRASH_INSERTION2(5041, regTcPtr->simpleRead &&
@ -18520,7 +18520,7 @@ Dblqh::execDUMP_STATE_ORD(Signal* signal)
do
{
ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
ndbout_c(" file %d(%d) FileChangeState: %d logFileStatus: %d currentMbyte: %d currentFilepage",
ndbout_c(" file %d(%d) FileChangeState: %d logFileStatus: %d currentMbyte: %d currentFilepage %d",
logFilePtr.p->fileNo,
logFilePtr.i,
logFilePtr.p->fileChangeState,

View file

@ -3194,7 +3194,7 @@ void Dbtc::sendlqhkeyreq(Signal* signal,
if (unlikely(version < NDBD_ROWID_VERSION))
{
Uint32 op = regTcPtr->operation;
Uint32 lock = op == ZREAD_EX ? ZUPDATE : op == ZWRITE ? ZINSERT : op;
Uint32 lock = (Operation_t) op == ZREAD_EX ? ZUPDATE : (Operation_t) op == ZWRITE ? ZINSERT : (Operation_t) op;
LqhKeyReq::setLockType(Tdata10, lock);
}
/* ---------------------------------------------------------------------- */

View file

@ -43,7 +43,7 @@ void Dbtup::execTUP_DEALLOCREQ(Signal* signal)
getFragmentrec(regFragPtr, frag_id, regTabPtr.p);
ndbassert(regFragPtr.p != NULL);
if (! (((frag_page_id << MAX_TUPLES_BITS) + page_index) == ~0))
if (! (((frag_page_id << MAX_TUPLES_BITS) + page_index) == ~ (Uint32) 0))
{
Local_key tmp;
tmp.m_page_no= getRealpid(regFragPtr.p, frag_page_id);

View file

@ -82,7 +82,7 @@ Dbtup::dump_disk_alloc(Dbtup::Disk_alloc_info & alloc)
{
ndbout << ptr << " ";
}
ndbout_c("");
ndbout_c(" ");
}
ndbout_c("page requests");
for(Uint32 i = 0; i<MAX_FREE_LIST; i++)
@ -95,7 +95,7 @@ Dbtup::dump_disk_alloc(Dbtup::Disk_alloc_info & alloc)
{
ndbout << ptr << " ";
}
ndbout_c("");
ndbout_c(" ");
}
ndbout_c("Extent matrix");
@ -108,7 +108,7 @@ Dbtup::dump_disk_alloc(Dbtup::Disk_alloc_info & alloc)
{
ndbout << ptr << " ";
}
ndbout_c("");
ndbout_c(" ");
}
if (alloc.m_curr_extent_info_ptr_i != RNIL)

View file

@ -684,7 +684,7 @@ void Dbtup::execTUPKEYREQ(Signal* signal)
copyAttrinfo(regOperPtr, &cinBuffer[0]);
Uint32 localkey = (pageid << MAX_TUPLES_BITS) + pageidx;
if(Roptype == ZINSERT && localkey == ~0)
if (Roptype == ZINSERT && localkey == ~ (Uint32) 0)
{
// No tuple allocatated yet
goto do_insert;

View file

@ -284,4 +284,5 @@ Dbtup::alloc_fix_rowid(Fragrecord* regFragPtr,
case ZEMPTY_MM:
ndbrequire(false);
}
return 0; /* purify: deadcode */
}

View file

@ -1809,11 +1809,11 @@ Lgman::execLCP_FRAG_ORD(Signal* signal)
if(0)
ndbout_c
("execLCP_FRAG_ORD (%d %d) (%d %d) (%d %d) free pages: %d",
("execLCP_FRAG_ORD (%d %d) (%d %d) (%d %d) free pages: %ld",
ptr.p->m_tail_pos[0].m_ptr_i, ptr.p->m_tail_pos[0].m_idx,
ptr.p->m_tail_pos[1].m_ptr_i, ptr.p->m_tail_pos[1].m_idx,
ptr.p->m_tail_pos[2].m_ptr_i, ptr.p->m_tail_pos[2].m_idx,
(ptr.p->m_free_file_words / File_formats::UNDO_PAGE_WORDS));
(long) (ptr.p->m_free_file_words / File_formats::UNDO_PAGE_WORDS));
}
m_logfile_group_list.next(ptr);
}

View file

@ -655,7 +655,7 @@ Ndbfs::createAsyncFile(){
// Print info about all open files
for (unsigned i = 0; i < theFiles.size(); i++){
AsyncFile* file = theFiles[i];
ndbout_c("%2d (0x%x): %s", i, file, file->isOpen()?"OPEN":"CLOSED");
ndbout_c("%2d (0x%lx): %s", i, (long) file, file->isOpen()?"OPEN":"CLOSED");
}
ERROR_SET(fatal, NDBD_EXIT_AFS_MAXOPEN,""," Ndbfs::createAsyncFile");
}
@ -1130,7 +1130,7 @@ Ndbfs::execDUMP_STATE_ORD(Signal* signal)
ndbout << "All files: " << endl;
for (unsigned i = 0; i < theFiles.size(); i++){
AsyncFile* file = theFiles[i];
ndbout_c("%2d (0x%x): %s", i,file, file->isOpen()?"OPEN":"CLOSED");
ndbout_c("%2d (0x%lx): %s", i, (long) file, file->isOpen()?"OPEN":"CLOSED");
}
}
}//Ndbfs::execDUMP_STATE_ORD()

View file

@ -1188,7 +1188,7 @@ Pgman::process_lcp(Signal* signal)
pl_hash.next(m_lcp_curr_bucket, iter);
Uint32 loop = 0;
while (iter.curr.i != RNIL &&
m_lcp_outstanding < max_count &&
m_lcp_outstanding < (Uint32) max_count &&
(loop ++ < 32 || iter.bucket == m_lcp_curr_bucket))
{
Ptr<Page_entry>& ptr = iter.curr;
@ -2324,7 +2324,7 @@ Pgman::execDUMP_STATE_ORD(Signal* signal)
if (signal->theData[0] == 11004)
{
ndbout << "Dump LCP bucket m_lcp_outstanding: %d", m_lcp_outstanding;
ndbout << "Dump LCP bucket m_lcp_outstanding: " << m_lcp_outstanding;
if (m_lcp_curr_bucket != ~(Uint32)0)
{
Page_hashlist::Iterator iter;

View file

@ -1137,7 +1137,7 @@ Restore::reorder_key(const KeyDescriptor* desc,
}
dst += sz;
}
ndbassert((dst - Tmp) == len);
ndbassert((Uint32) (dst - Tmp) == len);
memcpy(data, Tmp, 4*len);
}

View file

@ -1590,6 +1590,9 @@ Suma::execGET_TABINFOREF(Signal* signal){
break;
case GetTabInfoRef::TableNameTooLong:
ndbrequire(false);
break;
case GetTabInfoRef::NoFetchByName:
break;
}
if (do_resend_request)
{
@ -4306,7 +4309,7 @@ Suma::Restart::sendSubStartReq(SubscriptionPtr subPtr, SubscriberPtr subbPtr,
// restarting suma will not respond to this until startphase 5
// since it is not until then data copying has been completed
DBUG_PRINT("info",("Restarting subscriber: %u on key: [%u,%u]",
DBUG_PRINT("info",("Restarting subscriber: %u on key: [%u,%u] %u",
subbPtr.i,
subPtr.p->m_subscriptionId,
subPtr.p->m_subscriptionKey,

View file

@ -191,7 +191,7 @@ Configuration::init(int argc, char** argv)
}
if (! (val > 0 && val < MAX_NDB_NODES))
{
ndbout_c("Invalid nodeid specified in nowait-nodes: %d : %s",
ndbout_c("Invalid nodeid specified in nowait-nodes: %ld : %s",
val, _nowait_nodes);
exit(-1);
}

View file

@ -287,6 +287,7 @@ DLHashTableImpl<P, T, U>::remove(Ptr<T> & ptr, const T & key)
Uint32 i;
T * p;
Ptr<T> prev;
LINT_INIT(prev.p);
prev.i = RNIL;
i = hashValues[hv];

View file

@ -70,6 +70,7 @@ RWPool::getPtr(Uint32 i)
return record;
}
handle_invalid_get_ptr(i);
return 0; /* purify: deadcode */
}
#endif

View file

@ -1930,6 +1930,7 @@ SimulatedBlock::xfrm_attr(Uint32 attrDesc, CHARSET_INFO* cs,
{
jam();
Uint32 len;
LINT_INIT(len);
switch(array){
case NDB_ARRAYTYPE_SHORT_VAR:
len = 1 + srcPtr[0];

View file

@ -115,6 +115,7 @@ WOPool::getPtr(Uint32 i)
return record;
}
handle_invalid_get_ptr(i);
return 0; /* purify: deadcode */
}
#endif

View file

@ -223,6 +223,10 @@ Ndbd_mem_manager::init(bool alloc_less_memory)
InitChunk chunk;
Uint32 remaining = pages - allocated;
#if defined(_lint) || defined(FORCE_INIT_OF_VARS)
memset((char*) &chunk, 0 , sizeof(chunk));
#endif
if (do_malloc(pages - allocated, &chunk))
{
Uint32 i = 0;

View file

@ -1558,6 +1558,8 @@ CommandInterpreter::executeShow(char* parameters)
case NDB_MGM_NODE_TYPE_UNKNOWN:
ndbout << "Error: Unknown Node Type" << endl;
return -1;
case NDB_MGM_NODE_TYPE_MAX:
break; /* purify: deadcode */
}
}

View file

@ -2495,7 +2495,7 @@ MgmtSrvr::startBackup(Uint32& backupId, int waitCompleted)
const BackupCompleteRep * const rep =
CAST_CONSTPTR(BackupCompleteRep, signal->getDataPtr());
#ifdef VM_TRACE
ndbout_c("Backup(%d) completed %d", rep->backupId);
ndbout_c("Backup(%d) completed", rep->backupId);
#endif
event.Event = BackupEvent::BackupCompleted;
event.Completed.BackupId = rep->backupId;
@ -2751,7 +2751,7 @@ MgmtSrvr::setDbParameter(int node, int param, const char * value,
break;
case 1:
res = i2.set(param, val_64);
ndbout_c("Updating node %d param: %d to %Ld", node, param, val_32);
ndbout_c("Updating node %d param: %d to %u", node, param, val_32);
break;
case 2:
res = i2.set(param, val_char);

View file

@ -417,7 +417,7 @@ GlobalDictCache::alter_table_rep(const char * name,
{
TableVersion & ver = (* vers)[i];
if(ver.m_version == tableVersion && ver.m_impl &&
ver.m_impl->m_id == tableId)
(Uint32) ver.m_impl->m_id == tableId)
{
ver.m_status = DROPPED;
ver.m_impl->m_status = altered ?

View file

@ -3583,7 +3583,7 @@ NdbDictInterface::createEvent(class Ndb & ndb,
evnt.mi_type = evntConf->getEventType();
evnt.setTable(dataPtr);
} else {
if (evnt.m_tableImpl->m_id != evntConf->getTableId() ||
if ((Uint32) evnt.m_tableImpl->m_id != evntConf->getTableId() ||
evnt.m_tableImpl->m_version != evntConf->getTableVersion() ||
//evnt.m_attrListBitmask != evntConf->getAttrListBitmask() ||
evnt.mi_type != evntConf->getEventType()) {
@ -3701,7 +3701,7 @@ NdbDictionaryImpl::getEvent(const char * eventName, NdbTableImpl* tab)
DBUG_RETURN(NULL);
}
if ((tab->m_status != NdbDictionary::Object::Retrieved) ||
(tab->m_id != ev->m_table_id) ||
((Uint32) tab->m_id != ev->m_table_id) ||
(table_version_major(tab->m_version) !=
table_version_major(ev->m_table_version)))
{
@ -3731,7 +3731,7 @@ NdbDictionaryImpl::getEvent(const char * eventName, NdbTableImpl* tab)
DBUG_PRINT("info",("Table: id: %d version: %d",
table.m_id, table.m_version));
if (table.m_id != ev->m_table_id ||
if ((Uint32) table.m_id != ev->m_table_id ||
table_version_major(table.m_version) !=
table_version_major(ev->m_table_version))
{
@ -3747,7 +3747,7 @@ NdbDictionaryImpl::getEvent(const char * eventName, NdbTableImpl* tab)
#endif
if ( attributeList_sz > table.getNoOfColumns() )
if ( attributeList_sz > (uint) table.getNoOfColumns() )
{
m_error.code = 241;
DBUG_PRINT("error",("Invalid version, too many columns"));
@ -3757,7 +3757,7 @@ NdbDictionaryImpl::getEvent(const char * eventName, NdbTableImpl* tab)
assert( (int)attributeList_sz <= table.getNoOfColumns() );
for(unsigned id= 0; ev->m_columns.size() < attributeList_sz; id++) {
if ( id >= table.getNoOfColumns())
if ( id >= (uint) table.getNoOfColumns())
{
m_error.code = 241;
DBUG_PRINT("error",("Invalid version, column %d out of range", id));

View file

@ -58,7 +58,7 @@ print_std(const SubTableData * sdata, LinearSectionPtr ptr[3])
SubTableData::getOperation(sdata->requestInfo));
for (int i = 0; i <= 2; i++) {
printf("sec=%d addr=%p sz=%d\n", i, (void*)ptr[i].p, ptr[i].sz);
for (int j = 0; j < ptr[i].sz; j++)
for (int j = 0; (uint) j < ptr[i].sz; j++)
printf("%08x ", ptr[i].p[j]);
printf("\n");
}
@ -199,11 +199,11 @@ NdbEventOperationImpl::init(NdbEventImpl& evnt)
m_mergeEvents = false;
#endif
m_ref_count = 0;
DBUG_PRINT("info", ("m_ref_count = 0 for op: %p", this));
DBUG_PRINT("info", ("m_ref_count = 0 for op: 0x%lx", (long) this));
m_has_error= 0;
DBUG_PRINT("exit",("this: 0x%x oid: %u", this, m_oid));
DBUG_PRINT("exit",("this: 0x%lx oid: %u", (long) this, m_oid));
DBUG_VOID_RETURN;
}
@ -739,8 +739,8 @@ NdbEventOperationImpl::receive_event()
NdbTableImpl *tmp_table_impl= m_eventImpl->m_tableImpl;
m_eventImpl->m_tableImpl = at;
DBUG_PRINT("info", ("switching table impl 0x%x -> 0x%x",
tmp_table_impl, at));
DBUG_PRINT("info", ("switching table impl 0x%lx -> 0x%lx",
(long) tmp_table_impl, (long) at));
// change the rec attrs to refer to the new table object
int i;
@ -751,9 +751,9 @@ NdbEventOperationImpl::receive_event()
{
int no = p->getColumn()->getColumnNo();
NdbColumnImpl *tAttrInfo = at->getColumn(no);
DBUG_PRINT("info", ("rec_attr: 0x%x "
"switching column impl 0x%x -> 0x%x",
p, p->m_column, tAttrInfo));
DBUG_PRINT("info", ("rec_attr: 0x%lx "
"switching column impl 0x%lx -> 0x%lx",
(long) p, (long) p->m_column, (long) tAttrInfo));
p->m_column = tAttrInfo;
p = p->next();
}
@ -765,9 +765,9 @@ NdbEventOperationImpl::receive_event()
{
int no = p->getColumn()->getColumnNo();
NdbColumnImpl *tAttrInfo = at->getColumn(no);
DBUG_PRINT("info", ("rec_attr: 0x%x "
"switching column impl 0x%x -> 0x%x",
p, p->m_column, tAttrInfo));
DBUG_PRINT("info", ("rec_attr: 0x%lx "
"switching column impl 0x%lx -> 0x%lx",
(long) p, (long) p->m_column, (long) tAttrInfo));
p->m_column = tAttrInfo;
p = p->next();
}
@ -1269,8 +1269,9 @@ NdbEventBuffer::getGCIEventOperations(Uint32* iter, Uint32* event_types)
EventBufData_list::Gci_op g = gci_ops->m_gci_op_list[(*iter)++];
if (event_types != NULL)
*event_types = g.event_types;
DBUG_PRINT("info", ("gci: %d g.op: %x g.event_types: %x",
(unsigned)gci_ops->m_gci, g.op, g.event_types));
DBUG_PRINT("info", ("gci: %u g.op: 0x%lx g.event_types: 0x%lx",
(unsigned)gci_ops->m_gci, (long) g.op,
(long) g.event_types));
DBUG_RETURN(g.op);
}
DBUG_RETURN(NULL);
@ -1563,8 +1564,8 @@ NdbEventBuffer::complete_outof_order_gcis()
#endif
m_complete_data.m_data.append_list(&bucket->m_data, start_gci);
#ifdef VM_TRACE
ndbout_c(" moved %lld rows -> %lld", bucket->m_data.m_count,
m_complete_data.m_data.m_count);
ndbout_c(" moved %ld rows -> %ld", (long) bucket->m_data.m_count,
(long) m_complete_data.m_data.m_count);
#else
ndbout_c("");
#endif
@ -2180,7 +2181,7 @@ NdbEventBuffer::merge_data(const SubTableData * const sdata,
Ev_t* tp = 0;
int i;
for (i = 0; i < sizeof(ev_t)/sizeof(ev_t[0]); i++) {
for (i = 0; (uint) i < sizeof(ev_t)/sizeof(ev_t[0]); i++) {
if (ev_t[i].t1 == t1 && ev_t[i].t2 == t2) {
tp = &ev_t[i];
break;

View file

@ -64,6 +64,9 @@ NdbIndexOperation::indxInit(const NdbIndexImpl * anIndex,
case(NdbDictionary::Index::OrderedIndex):
setErrorCodeAbort(4003);
return -1;
default:
DBUG_ASSERT(0);
break;
}
m_theIndex = anIndex;
m_accessTable = anIndex->m_table;

View file

@ -236,7 +236,7 @@ NdbIndexStat::stat_search(const Area& a, const Uint32* key, Uint32 keylen, Uint3
int
NdbIndexStat::stat_oldest(const Area& a)
{
Uint32 i, k, m;
Uint32 i, k= 0, m;
bool found = false;
m = ~(Uint32)0; // shut up incorrect CC warning
for (i = 0; i < a.m_entries; i++) {

View file

@ -1091,53 +1091,61 @@ NdbOperation::branch_col(Uint32 type,
int
NdbOperation::branch_col_eq(Uint32 ColId, const void * val, Uint32 len,
bool nopad, Uint32 Label){
INT_DEBUG(("branch_col_eq %u %.*s(%u,%d) -> %u", ColId, len, val, len, nopad, Label));
INT_DEBUG(("branch_col_eq %u %.*s(%u,%d) -> %u", ColId, len, (char*) val, len,
nopad, Label));
return branch_col(Interpreter::EQ, ColId, val, len, nopad, Label);
}
int
NdbOperation::branch_col_ne(Uint32 ColId, const void * val, Uint32 len,
bool nopad, Uint32 Label){
INT_DEBUG(("branch_col_ne %u %.*s(%u,%d) -> %u", ColId, len, val, len, nopad, Label));
INT_DEBUG(("branch_col_ne %u %.*s(%u,%d) -> %u", ColId, len, (char*) val, len,
nopad, Label));
return branch_col(Interpreter::NE, ColId, val, len, nopad, Label);
}
int
NdbOperation::branch_col_lt(Uint32 ColId, const void * val, Uint32 len,
bool nopad, Uint32 Label){
INT_DEBUG(("branch_col_lt %u %.*s(%u,%d) -> %u", ColId, len, val, len, nopad, Label));
INT_DEBUG(("branch_col_lt %u %.*s(%u,%d) -> %u", ColId, len, (char*) val, len,
nopad, Label));
return branch_col(Interpreter::LT, ColId, val, len, nopad, Label);
}
int
NdbOperation::branch_col_le(Uint32 ColId, const void * val, Uint32 len,
bool nopad, Uint32 Label){
INT_DEBUG(("branch_col_le %u %.*s(%u,%d) -> %u", ColId, len, val, len, nopad, Label));
INT_DEBUG(("branch_col_le %u %.*s(%u,%d) -> %u", ColId, len, (char*) val, len,
nopad, Label));
return branch_col(Interpreter::LE, ColId, val, len, nopad, Label);
}
int
NdbOperation::branch_col_gt(Uint32 ColId, const void * val, Uint32 len,
bool nopad, Uint32 Label){
INT_DEBUG(("branch_col_gt %u %.*s(%u,%d) -> %u", ColId, len, val, len, nopad, Label));
INT_DEBUG(("branch_col_gt %u %.*s(%u,%d) -> %u", ColId, len, (char*) val, len,
nopad, Label));
return branch_col(Interpreter::GT, ColId, val, len, nopad, Label);
}
int
NdbOperation::branch_col_ge(Uint32 ColId, const void * val, Uint32 len,
bool nopad, Uint32 Label){
INT_DEBUG(("branch_col_ge %u %.*s(%u,%d) -> %u", ColId, len, val, len, nopad, Label));
INT_DEBUG(("branch_col_ge %u %.*s(%u,%d) -> %u", ColId, len, (char*) val, len,
nopad, Label));
return branch_col(Interpreter::GE, ColId, val, len, nopad, Label);
}
int
NdbOperation::branch_col_like(Uint32 ColId, const void * val, Uint32 len,
bool nopad, Uint32 Label){
INT_DEBUG(("branch_col_like %u %.*s(%u,%d) -> %u", ColId, len, val, len, nopad, Label));
INT_DEBUG(("branch_col_like %u %.*s(%u,%d) -> %u", ColId, len, (char*) val, len,
nopad, Label));
return branch_col(Interpreter::LIKE, ColId, val, len, nopad, Label);
}
int
NdbOperation::branch_col_notlike(Uint32 ColId, const void * val, Uint32 len,
bool nopad, Uint32 Label){
INT_DEBUG(("branch_col_notlike %u %.*s(%u,%d) -> %u", ColId,len,val,len,nopad,Label));
INT_DEBUG(("branch_col_notlike %u %.*s(%u,%d) -> %u", ColId, len, (char*) val, len,
nopad, Label));
return branch_col(Interpreter::NOT_LIKE, ColId, val, len, nopad, Label);
}

View file

@ -372,7 +372,12 @@ NdbOut& operator<<(NdbOut& out, const NdbRecAttr &r)
j = length;
}
break;
unknown:
case NdbDictionary::Column::Undefined:
case NdbDictionary::Column::Mediumint:
case NdbDictionary::Column::Mediumunsigned:
case NdbDictionary::Column::Longvarbinary:
unknown:
//default: /* no print functions for the rest, just print type */
out << (int) r.getType();
j = length;

View file

@ -181,7 +181,8 @@ NdbScanOperation::readTuples(NdbScanOperation::LockMode lm,
}
bool rangeScan = false;
if (m_accessTable->m_indexType == NdbDictionary::Index::OrderedIndex)
if ( (int) m_accessTable->m_indexType ==
(int) NdbDictionary::Index::OrderedIndex)
{
if (m_currentTable == m_accessTable){
// Old way of scanning indexes, should not be allowed
@ -588,7 +589,7 @@ err4:
theNdbCon->theTransactionIsStarted = false;
theNdbCon->theReleaseOnClose = true;
if(DEBUG_NEXT_RESULT) ndbout_c("return -1", retVal);
if(DEBUG_NEXT_RESULT) ndbout_c("return %d", retVal);
return -1;
}

View file

@ -84,7 +84,7 @@ NdbObjectIdMap::map(void * object){
// unlock();
DBUG_PRINT("info",("NdbObjectIdMap::map(0x%x) %u", object, ff<<2));
DBUG_PRINT("info",("NdbObjectIdMap::map(0x%lx) %u", (long) object, ff<<2));
return ff<<2;
}
@ -102,14 +102,16 @@ NdbObjectIdMap::unmap(Uint32 id, void *object){
m_map[i].m_next = m_firstFree;
m_firstFree = i;
} else {
ndbout_c("Error: NdbObjectIdMap::::unmap(%u, 0x%x) obj=0x%x", id, object, obj);
DBUG_PRINT("error",("NdbObjectIdMap::unmap(%u, 0x%x) obj=0x%x", id, object, obj));
ndbout_c("Error: NdbObjectIdMap::::unmap(%u, 0x%lx) obj=0x%lx",
id, (long) object, (long) obj);
DBUG_PRINT("error",("NdbObjectIdMap::unmap(%u, 0x%lx) obj=0x%lx",
id, (long) object, (long) obj));
return 0;
}
// unlock();
DBUG_PRINT("info",("NdbObjectIdMap::unmap(%u) obj=0x%x", id, obj));
DBUG_PRINT("info",("NdbObjectIdMap::unmap(%u) obj=0x%lx", id, (long) obj));
return obj;
}

View file

@ -131,7 +131,7 @@ int desc_logfilegroup(Ndb *myndb, char* name)
assert(dict);
NdbDictionary::LogfileGroup lfg= dict->getLogfileGroup(name);
NdbError err= dict->getNdbError();
if(err.classification!=ndberror_cl_none)
if( (int) err.classification != (int) ndberror_cl_none)
return 0;
ndbout << "Type: LogfileGroup" << endl;
@ -153,7 +153,7 @@ int desc_tablespace(Ndb *myndb, char* name)
assert(dict);
NdbDictionary::Tablespace ts= dict->getTablespace(name);
NdbError err= dict->getNdbError();
if(err.classification!=ndberror_cl_none)
if ((int) err.classification != (int) ndberror_cl_none)
return 0;
ndbout << "Type: Tablespace" << endl;
@ -175,11 +175,11 @@ int desc_undofile(Ndb_cluster_connection &con, Ndb *myndb, char* name)
con.init_get_next_node(iter);
while(id= con.get_next_node(iter))
while ((id= con.get_next_node(iter)))
{
NdbDictionary::Undofile uf= dict->getUndofile(0, name);
NdbError err= dict->getNdbError();
if(err.classification!=ndberror_cl_none)
if ((int) err.classification != (int) ndberror_cl_none)
return 0;
ndbout << "Type: Undofile" << endl;
@ -211,11 +211,11 @@ int desc_datafile(Ndb_cluster_connection &con, Ndb *myndb, char* name)
con.init_get_next_node(iter);
while(id= con.get_next_node(iter))
while ((id= con.get_next_node(iter)))
{
NdbDictionary::Datafile df= dict->getDatafile(id, name);
NdbError err= dict->getNdbError();
if(err.classification!=ndberror_cl_none)
if ((int) err.classification != (int) ndberror_cl_none)
return 0;
ndbout << "Type: Datafile" << endl;

View file

@ -300,7 +300,13 @@ RestoreMetaData::markSysTables()
strcmp(tableName, "NDB$EVENTS_0") == 0 ||
strcmp(tableName, "sys/def/SYSTAB_0") == 0 ||
strcmp(tableName, "sys/def/NDB$EVENTS_0") == 0 ||
/*
The following is for old MySQL versions,
before we changed the database name of the tables from
"cluster_replication" -> "cluster" -> "mysql"
*/
strcmp(tableName, "cluster_replication/def/" NDB_APPLY_TABLE) == 0 ||
strcmp(tableName, "cluster/def/" NDB_APPLY_TABLE) == 0 ||
strcmp(tableName, NDB_REP_DB "/def/" NDB_APPLY_TABLE) == 0 ||
strcmp(tableName, NDB_REP_DB "/def/" NDB_SCHEMA_TABLE)== 0 )
table->isSysTable = true;

View file

@ -494,7 +494,7 @@ BackupRestore::object(Uint32 type, const void * ptr)
NdbDictionary::Tablespace curr = dict->getTablespace(old.getName());
NdbError errobj = dict->getNdbError();
if(errobj.classification == ndberror_cl_none)
if ((int) errobj.classification == (int) ndberror_cl_none)
{
NdbDictionary::Tablespace* currptr = new NdbDictionary::Tablespace(curr);
NdbDictionary::Tablespace * null = 0;
@ -533,7 +533,7 @@ BackupRestore::object(Uint32 type, const void * ptr)
NdbDictionary::LogfileGroup curr = dict->getLogfileGroup(old.getName());
NdbError errobj = dict->getNdbError();
if(errobj.classification == ndberror_cl_none)
if ((int) errobj.classification == (int) ndberror_cl_none)
{
NdbDictionary::LogfileGroup* currptr =
new NdbDictionary::LogfileGroup(curr);
@ -680,7 +680,7 @@ BackupRestore::table(const TableS & table){
return true;
const NdbTableImpl & tmptab = NdbTableImpl::getImpl(* table.m_dictTable);
if(tmptab.m_indexType != NdbDictionary::Index::Undefined){
if ((int) tmptab.m_indexType != (int) NdbDictionary::Index::Undefined){
m_indexes.push_back(table.m_dictTable);
return true;
}

View file

@ -7,7 +7,7 @@
int main() {
plan(5);
ok(1 == 1, "testing basic functions");
ok(2 == 2, "");
ok(2 == 2, " ");
ok(3 == 3, NULL);
if (1 == 1)
skip(2, "Sensa fragoli");

View file

@ -235,6 +235,7 @@ skip(int how_many, char const *const fmt, ...)
while (how_many-- > 0)
{
va_list ap;
memset((char*) &ap, 0, sizeof(ap)); /* Keep compiler happy */
vemit_tap(1, NULL, ap);
emit_dir("skip", reason);
emit_endl();