mirror of
https://github.com/MariaDB/server.git
synced 2025-01-17 20:42:30 +01:00
merge
This commit is contained in:
commit
015cd1cd21
27 changed files with 289 additions and 286 deletions
|
@ -45,9 +45,9 @@ enum options_client
|
|||
OPT_COMPATIBLE, OPT_RECONNECT, OPT_DELIMITER, OPT_SECURE_AUTH,
|
||||
OPT_OPEN_FILES_LIMIT, OPT_SET_CHARSET, OPT_CREATE_OPTIONS,
|
||||
OPT_START_POSITION, OPT_STOP_POSITION, OPT_START_DATETIME, OPT_STOP_DATETIME,
|
||||
OPT_SIGINT_IGNORE, OPT_HEXBLOB, OPT_ORDER_BY_PRIMARY
|
||||
OPT_SIGINT_IGNORE, OPT_HEXBLOB, OPT_ORDER_BY_PRIMARY, OPT_COUNT,
|
||||
#ifdef HAVE_NDBCLUSTER_DB
|
||||
,OPT_NDBCLUSTER,OPT_NDB_CONNECTSTRING
|
||||
OPT_NDBCLUSTER, OPT_NDB_CONNECTSTRING,
|
||||
#endif
|
||||
,OPT_IGNORE_TABLE
|
||||
OPT_IGNORE_TABLE
|
||||
};
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
#include <sslopt-vars.h>
|
||||
|
||||
static my_string host=0,opt_password=0,user=0;
|
||||
static my_bool opt_show_keys= 0, opt_compress= 0, opt_status= 0,
|
||||
static my_bool opt_show_keys= 0, opt_compress= 0, opt_count=0, opt_status= 0,
|
||||
tty_password= 0, opt_table_type= 0;
|
||||
static uint opt_verbose=0;
|
||||
static char *default_charset= (char*) MYSQL_DEFAULT_CHARSET_NAME;
|
||||
|
@ -71,8 +71,7 @@ int main(int argc, char **argv)
|
|||
char *pos= argv[argc-1], *to;
|
||||
for (to= pos ; *pos ; pos++, to++)
|
||||
{
|
||||
switch (*pos)
|
||||
{
|
||||
switch (*pos) {
|
||||
case '*':
|
||||
*pos= '%';
|
||||
first_argument_uses_wildcards= 1;
|
||||
|
@ -163,6 +162,10 @@ static struct my_option my_long_options[] =
|
|||
{"default-character-set", OPT_DEFAULT_CHARSET,
|
||||
"Set the default character set.", (gptr*) &default_charset,
|
||||
(gptr*) &default_charset, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
|
||||
{"count", OPT_COUNT,
|
||||
"Show number of rows per table (may be slow for not MyISAM tables)",
|
||||
(gptr*) &opt_count, (gptr*) &opt_count, 0, GET_BOOL, NO_ARG, 0, 0, 0,
|
||||
0, 0, 0},
|
||||
{"compress", 'C', "Use compression in server/client protocol.",
|
||||
(gptr*) &opt_compress, (gptr*) &opt_compress, 0, GET_BOOL, NO_ARG, 0, 0, 0,
|
||||
0, 0, 0},
|
||||
|
@ -308,6 +311,14 @@ get_options(int *argc,char ***argv)
|
|||
|
||||
if (tty_password)
|
||||
opt_password=get_tty_password(NullS);
|
||||
if (opt_count)
|
||||
{
|
||||
/*
|
||||
We need to set verbose to 2 as we need to change the output to include
|
||||
the number-of-rows column
|
||||
*/
|
||||
opt_verbose= 2;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -322,7 +333,7 @@ list_dbs(MYSQL *mysql,const char *wild)
|
|||
char query[255];
|
||||
MYSQL_FIELD *field;
|
||||
MYSQL_RES *result;
|
||||
MYSQL_ROW row, trow, rrow;
|
||||
MYSQL_ROW row, rrow;
|
||||
|
||||
if (!(result=mysql_list_dbs(mysql,wild)))
|
||||
{
|
||||
|
@ -352,11 +363,6 @@ list_dbs(MYSQL *mysql,const char *wild)
|
|||
|
||||
if (opt_verbose)
|
||||
{
|
||||
/*
|
||||
* Original code by MG16373; Slightly modified by Monty.
|
||||
* Print now the count of tables and rows for each database.
|
||||
*/
|
||||
|
||||
if (!(mysql_select_db(mysql,row[0])))
|
||||
{
|
||||
MYSQL_RES *tresult = mysql_list_tables(mysql,(char*)NULL);
|
||||
|
@ -366,6 +372,8 @@ list_dbs(MYSQL *mysql,const char *wild)
|
|||
rowcount = 0;
|
||||
if (opt_verbose > 1)
|
||||
{
|
||||
/* Print the count of tables and rows for each database */
|
||||
MYSQL_ROW trow;
|
||||
while ((trow = mysql_fetch_row(tresult)))
|
||||
{
|
||||
sprintf(query,"SELECT COUNT(*) FROM `%s`",trow[0]);
|
||||
|
@ -487,10 +495,6 @@ list_tables(MYSQL *mysql,const char *db,const char *table)
|
|||
|
||||
while ((row = mysql_fetch_row(result)))
|
||||
{
|
||||
/*
|
||||
* Modified by MG16373
|
||||
* Print now the count of rows for each table.
|
||||
*/
|
||||
counter++;
|
||||
if (opt_verbose > 0)
|
||||
{
|
||||
|
@ -510,6 +514,7 @@ list_tables(MYSQL *mysql,const char *db,const char *table)
|
|||
|
||||
if (opt_verbose > 1)
|
||||
{
|
||||
/* Print the count of rows for each table */
|
||||
sprintf(query,"SELECT COUNT(*) FROM `%s`",row[0]);
|
||||
if (!(mysql_query(mysql,query)))
|
||||
{
|
||||
|
@ -574,7 +579,7 @@ list_table_status(MYSQL *mysql,const char *db,const char *wild)
|
|||
MYSQL_RES *result;
|
||||
MYSQL_ROW row;
|
||||
|
||||
end=strxmov(query,"show table status from ",db,NullS);
|
||||
end=strxmov(query,"show table status from `",db,"`",NullS);
|
||||
if (wild && wild[0])
|
||||
strxmov(end," like '",wild,"'",NullS);
|
||||
if (mysql_query(mysql,query) || !(result=mysql_store_result(mysql)))
|
||||
|
@ -600,8 +605,8 @@ list_table_status(MYSQL *mysql,const char *db,const char *wild)
|
|||
}
|
||||
|
||||
/*
|
||||
** list fields uses field interface as an example of how to parse
|
||||
** a MYSQL FIELD
|
||||
list fields uses field interface as an example of how to parse
|
||||
a MYSQL FIELD
|
||||
*/
|
||||
|
||||
static int
|
||||
|
@ -612,6 +617,7 @@ list_fields(MYSQL *mysql,const char *db,const char *table,
|
|||
MYSQL_RES *result;
|
||||
MYSQL_ROW row;
|
||||
ulong rows;
|
||||
LINT_INIT(rows);
|
||||
|
||||
if (mysql_select_db(mysql,db))
|
||||
{
|
||||
|
@ -619,16 +625,20 @@ list_fields(MYSQL *mysql,const char *db,const char *table,
|
|||
mysql_error(mysql));
|
||||
return 1;
|
||||
}
|
||||
sprintf(query,"select count(*) from `%s`", table);
|
||||
if (mysql_query(mysql,query) || !(result=mysql_store_result(mysql)))
|
||||
|
||||
if (opt_count)
|
||||
{
|
||||
fprintf(stderr,"%s: Cannot get record count for db: %s, table: %s: %s\n",
|
||||
my_progname,db,table,mysql_error(mysql));
|
||||
return 1;
|
||||
sprintf(query,"select count(*) from `%s`", table);
|
||||
if (mysql_query(mysql,query) || !(result=mysql_store_result(mysql)))
|
||||
{
|
||||
fprintf(stderr,"%s: Cannot get record count for db: %s, table: %s: %s\n",
|
||||
my_progname,db,table,mysql_error(mysql));
|
||||
return 1;
|
||||
}
|
||||
row= mysql_fetch_row(result);
|
||||
rows= (ulong) strtoull(row[0], (char**) 0, 10);
|
||||
mysql_free_result(result);
|
||||
}
|
||||
row = mysql_fetch_row(result);
|
||||
rows = (ulong) strtoull(row[0], (char**) 0, 10);
|
||||
mysql_free_result(result);
|
||||
|
||||
end=strmov(strmov(strmov(query,"show /*!32332 FULL */ columns from `"),table),"`");
|
||||
if (wild && wild[0])
|
||||
|
@ -640,8 +650,9 @@ list_fields(MYSQL *mysql,const char *db,const char *table,
|
|||
return 1;
|
||||
}
|
||||
|
||||
printf("Database: %s Table: %s Rows: %lu", db, table, rows);
|
||||
|
||||
printf("Database: %s Table: %s", db, table);
|
||||
if (opt_count)
|
||||
printf(" Rows: %lu", rows);
|
||||
if (wild && wild[0])
|
||||
printf(" Wildcard: %s",wild);
|
||||
putchar('\n');
|
||||
|
@ -675,7 +686,7 @@ list_fields(MYSQL *mysql,const char *db,const char *table,
|
|||
|
||||
|
||||
/*****************************************************************************
|
||||
** General functions to print a nice ascii-table from data
|
||||
General functions to print a nice ascii-table from data
|
||||
*****************************************************************************/
|
||||
|
||||
static void
|
||||
|
|
|
@ -67,12 +67,11 @@ my_bool my_thread_global_init(void)
|
|||
/*
|
||||
Set mutex type to "fast" a.k.a "adaptive"
|
||||
|
||||
The mutex kind determines what happens if a thread attempts to lock
|
||||
a mutex it already owns with pthread_mutex_lock(3). If the mutex
|
||||
is of the ``fast'' kind, pthread_mutex_lock(3) simply suspends
|
||||
the calling thread forever. If the mutex is of the ``error checking''
|
||||
kind, pthread_mutex_lock(3) returns immediately with the error
|
||||
code EDEADLK.
|
||||
In this case the thread may steal the mutex from some other thread
|
||||
that is waiting for the same mutex. This will save us some
|
||||
context switches but may cause a thread to 'starve forever' while
|
||||
waiting for the mutex (not likely if the code within the mutex is
|
||||
short).
|
||||
*/
|
||||
pthread_mutexattr_init(&my_fast_mutexattr);
|
||||
pthread_mutexattr_settype(&my_fast_mutexattr,
|
||||
|
|
|
@ -2012,9 +2012,7 @@ THR_LOCK_DATA **ha_berkeley::store_lock(THD *thd, THR_LOCK_DATA **to,
|
|||
lock_type <= TL_WRITE) &&
|
||||
!thd->in_lock_tables)
|
||||
lock_type = TL_WRITE_ALLOW_WRITE;
|
||||
lock.type=lock_type;
|
||||
lock_on_read= ((table->reginfo.lock_type > TL_WRITE_ALLOW_READ) ? DB_RMW :
|
||||
0);
|
||||
lock.type= lock_type;
|
||||
}
|
||||
*to++= &lock;
|
||||
return to;
|
||||
|
|
|
@ -57,7 +57,6 @@ class ha_berkeley: public handler
|
|||
ulong alloced_rec_buff_length;
|
||||
ulong changed_rows;
|
||||
uint primary_key,last_dup_key, hidden_primary_key, version;
|
||||
u_int32_t lock_on_read;
|
||||
bool key_read, using_ignore;
|
||||
bool fix_rec_buff_for_blob(ulong length);
|
||||
byte current_ident[BDB_HIDDEN_PRIMARY_KEY_LENGTH];
|
||||
|
|
|
@ -2452,18 +2452,6 @@ set_field_in_record_to_null(
|
|||
record[null_offset] = record[null_offset] | field->null_bit;
|
||||
}
|
||||
|
||||
/******************************************************************
|
||||
Resets SQL NULL bits in a record to zero. */
|
||||
inline
|
||||
void
|
||||
reset_null_bits(
|
||||
/*============*/
|
||||
TABLE* table, /* in: MySQL table object */
|
||||
char* record) /* in: a row in MySQL format */
|
||||
{
|
||||
bzero(record, table->s->null_bytes);
|
||||
}
|
||||
|
||||
extern "C" {
|
||||
/*****************************************************************
|
||||
InnoDB uses this function to compare two data fields for which the data type
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* Copyright (C) 2000-2003 MySQL AB
|
||||
/* Copyright (C) 2000-2003 MySQL AB
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
|
@ -1227,7 +1227,7 @@ static void shrink_varchar(Field* field, const byte* & ptr, char* buf)
|
|||
if (ptr[1] == 0) {
|
||||
buf[0]= ptr[0];
|
||||
} else {
|
||||
DBUG_ASSERT(false);
|
||||
DBUG_ASSERT(FALSE);
|
||||
buf[0]= 255;
|
||||
}
|
||||
memmove(buf + 1, ptr + 2, pack_len - 1);
|
||||
|
@ -1773,7 +1773,7 @@ int ha_ndbcluster::set_bounds(NdbIndexScanOperation *op,
|
|||
if (p.bound_type == -1)
|
||||
{
|
||||
DBUG_PRINT("error", ("key %d unknown flag %d", j, p.key->flag));
|
||||
DBUG_ASSERT(false);
|
||||
DBUG_ASSERT(FALSE);
|
||||
// Stop setting bounds but continue with what we have
|
||||
op->end_of_bound(range_no);
|
||||
DBUG_RETURN(0);
|
||||
|
@ -1850,7 +1850,7 @@ int ha_ndbcluster::ordered_index_scan(const key_range *start_key,
|
|||
|
||||
if (m_active_cursor == 0)
|
||||
{
|
||||
restart= false;
|
||||
restart= FALSE;
|
||||
NdbOperation::LockMode lm=
|
||||
(NdbOperation::LockMode)get_ndb_lock_type(m_lock.type);
|
||||
if (!(op= trans->getNdbIndexScanOperation((NDBINDEX *)
|
||||
|
@ -1860,7 +1860,7 @@ int ha_ndbcluster::ordered_index_scan(const key_range *start_key,
|
|||
ERR_RETURN(trans->getNdbError());
|
||||
m_active_cursor= op;
|
||||
} else {
|
||||
restart= true;
|
||||
restart= TRUE;
|
||||
op= (NdbIndexScanOperation*)m_active_cursor;
|
||||
|
||||
DBUG_ASSERT(op->getSorted() == sorted);
|
||||
|
@ -2741,7 +2741,7 @@ int ha_ndbcluster::close_scan()
|
|||
m_ops_pending= 0;
|
||||
}
|
||||
|
||||
cursor->close(m_force_send, true);
|
||||
cursor->close(m_force_send, TRUE);
|
||||
m_active_cursor= m_multi_cursor= NULL;
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
@ -5554,7 +5554,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
|
|||
/**
|
||||
* blobs can't be batched currently
|
||||
*/
|
||||
m_disable_multi_read= true;
|
||||
m_disable_multi_read= TRUE;
|
||||
DBUG_RETURN(handler::read_multi_range_first(found_range_p,
|
||||
ranges,
|
||||
range_count,
|
||||
|
@ -5562,7 +5562,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
|
|||
buffer));
|
||||
}
|
||||
|
||||
m_disable_multi_read= false;
|
||||
m_disable_multi_read= FALSE;
|
||||
|
||||
/**
|
||||
* Copy arguments into member variables
|
||||
|
@ -5610,7 +5610,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
|
|||
!op->readTuple(lm) &&
|
||||
!set_primary_key(op, multi_range_curr->start_key.key) &&
|
||||
!define_read_attrs(curr, op) &&
|
||||
(op->setAbortOption(AO_IgnoreError), true))
|
||||
(op->setAbortOption(AO_IgnoreError), TRUE))
|
||||
curr += reclength;
|
||||
else
|
||||
ERR_RETURN(op ? op->getNdbError() : m_active_trans->getNdbError());
|
||||
|
@ -5625,7 +5625,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
|
|||
!op->readTuple(lm) &&
|
||||
!set_index_key(op, key_info, multi_range_curr->start_key.key) &&
|
||||
!define_read_attrs(curr, op) &&
|
||||
(op->setAbortOption(AO_IgnoreError), true))
|
||||
(op->setAbortOption(AO_IgnoreError), TRUE))
|
||||
curr += reclength;
|
||||
else
|
||||
ERR_RETURN(op ? op->getNdbError() : m_active_trans->getNdbError());
|
||||
|
@ -5660,7 +5660,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
|
|||
end_of_buffer -= reclength;
|
||||
}
|
||||
else if ((scanOp= m_active_trans->getNdbIndexScanOperation(idx, tab))
|
||||
&&!scanOp->readTuples(lm, 0, parallelism, sorted, false, true)
|
||||
&&!scanOp->readTuples(lm, 0, parallelism, sorted, FALSE, TRUE)
|
||||
&&!generate_scan_filter(m_cond_stack, scanOp)
|
||||
&&!define_read_attrs(end_of_buffer-reclength, scanOp))
|
||||
{
|
||||
|
@ -5807,11 +5807,11 @@ ha_ndbcluster::read_multi_range_next(KEY_MULTI_RANGE ** multi_range_found_p)
|
|||
continue;
|
||||
}
|
||||
|
||||
DBUG_ASSERT(false); // Should only get here via goto's
|
||||
DBUG_ASSERT(FALSE); // Should only get here via goto's
|
||||
close_scan:
|
||||
if (res == 1)
|
||||
{
|
||||
m_multi_cursor->close(false, true);
|
||||
m_multi_cursor->close(FALSE, TRUE);
|
||||
m_active_cursor= m_multi_cursor= 0;
|
||||
DBUG_MULTI_RANGE(8);
|
||||
continue;
|
||||
|
@ -6997,7 +6997,7 @@ int
|
|||
ha_ndbcluster::build_scan_filter_group(Ndb_cond* &cond, NdbScanFilter *filter)
|
||||
{
|
||||
uint level=0;
|
||||
bool negated= false;
|
||||
bool negated= FALSE;
|
||||
|
||||
DBUG_ENTER("build_scan_filter_group");
|
||||
do
|
||||
|
@ -7013,7 +7013,7 @@ ha_ndbcluster::build_scan_filter_group(Ndb_cond* &cond, NdbScanFilter *filter)
|
|||
if ((negated) ? filter->begin(NdbScanFilter::NAND)
|
||||
: filter->begin(NdbScanFilter::AND) == -1)
|
||||
DBUG_RETURN(1);
|
||||
negated= false;
|
||||
negated= FALSE;
|
||||
cond= cond->next;
|
||||
break;
|
||||
}
|
||||
|
@ -7024,19 +7024,19 @@ ha_ndbcluster::build_scan_filter_group(Ndb_cond* &cond, NdbScanFilter *filter)
|
|||
if ((negated) ? filter->begin(NdbScanFilter::NOR)
|
||||
: filter->begin(NdbScanFilter::OR) == -1)
|
||||
DBUG_RETURN(1);
|
||||
negated= false;
|
||||
negated= FALSE;
|
||||
cond= cond->next;
|
||||
break;
|
||||
}
|
||||
case(Item_func::NOT_FUNC): {
|
||||
cond= cond->next;
|
||||
negated= true;
|
||||
negated= TRUE;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
if (build_scan_filter_predicate(cond, filter, negated))
|
||||
DBUG_RETURN(1);
|
||||
negated= false;
|
||||
negated= FALSE;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
|
|
@ -366,11 +366,11 @@ int ha_init()
|
|||
|
||||
if (opt_bin_log)
|
||||
{
|
||||
if (!(*ht= binlog_init()))
|
||||
if (!(*ht= binlog_init())) // Always succeed
|
||||
{
|
||||
mysql_bin_log.close(LOG_CLOSE_INDEX);
|
||||
opt_bin_log= 0;
|
||||
error= 1;
|
||||
mysql_bin_log.close(LOG_CLOSE_INDEX); // Never used
|
||||
opt_bin_log= 0; // Never used
|
||||
error= 1; // Never used
|
||||
}
|
||||
else
|
||||
ha_was_inited_ok(ht++);
|
||||
|
@ -2417,6 +2417,7 @@ TYPELIB *ha_known_exts(void)
|
|||
return &known_extensions;
|
||||
}
|
||||
|
||||
|
||||
#ifdef HAVE_REPLICATION
|
||||
/*
|
||||
Reports to table handlers up to which position we have sent the binlog
|
||||
|
@ -2424,19 +2425,16 @@ TYPELIB *ha_known_exts(void)
|
|||
|
||||
SYNOPSIS
|
||||
ha_repl_report_sent_binlog()
|
||||
thd thread doing the binlog communication to the slave
|
||||
log_file_name binlog file name
|
||||
end_offse t the offset in the binlog file up to which we sent the
|
||||
contents to the slave
|
||||
|
||||
NOTES
|
||||
Only works for InnoDB at the moment
|
||||
|
||||
RETURN VALUE
|
||||
Always 0 (= success)
|
||||
|
||||
PARAMETERS
|
||||
THD *thd in: thread doing the binlog communication to
|
||||
the slave
|
||||
char *log_file_name in: binlog file name
|
||||
my_off_t end_offset in: the offset in the binlog file up to
|
||||
which we sent the contents to the slave
|
||||
*/
|
||||
|
||||
int ha_repl_report_sent_binlog(THD *thd, char *log_file_name,
|
||||
|
@ -2445,17 +2443,17 @@ int ha_repl_report_sent_binlog(THD *thd, char *log_file_name,
|
|||
#ifdef HAVE_INNOBASE_DB
|
||||
return innobase_repl_report_sent_binlog(thd,log_file_name,end_offset);
|
||||
#else
|
||||
/* remove warnings about unused parameters */
|
||||
thd=thd; log_file_name=log_file_name; end_offset=end_offset;
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Reports to table handlers that we stop replication to a specific slave
|
||||
|
||||
SYNOPSIS
|
||||
ha_repl_report_replication_stop()
|
||||
thd thread doing the binlog communication to the slave
|
||||
|
||||
NOTES
|
||||
Does nothing at the moment
|
||||
|
@ -2464,14 +2462,10 @@ int ha_repl_report_sent_binlog(THD *thd, char *log_file_name,
|
|||
Always 0 (= success)
|
||||
|
||||
PARAMETERS
|
||||
THD *thd in: thread doing the binlog communication to
|
||||
the slave
|
||||
*/
|
||||
|
||||
int ha_repl_report_replication_stop(THD *thd)
|
||||
{
|
||||
thd = thd;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif /* HAVE_REPLICATION */
|
||||
|
|
|
@ -510,10 +510,10 @@ bool Item_field::collect_item_field_processor(byte *arg)
|
|||
while ((curr_item= item_list_it++))
|
||||
{
|
||||
if (curr_item->eq(this, 1))
|
||||
DBUG_RETURN(false); /* Already in the set. */
|
||||
DBUG_RETURN(FALSE); /* Already in the set. */
|
||||
}
|
||||
item_list->push_back(this);
|
||||
DBUG_RETURN(false);
|
||||
DBUG_RETURN(FALSE);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -1737,8 +1737,7 @@ my_decimal *Item_func_coalesce::val_decimal(my_decimal *decimal_value)
|
|||
void Item_func_coalesce::fix_length_and_dec()
|
||||
{
|
||||
agg_result_type(&cached_result_type, args, arg_count);
|
||||
switch (cached_result_type)
|
||||
{
|
||||
switch (cached_result_type) {
|
||||
case STRING_RESULT:
|
||||
count_only_length();
|
||||
decimals= NOT_FIXED_DEC;
|
||||
|
|
|
@ -412,6 +412,7 @@ public:
|
|||
const char *func_name() const { return "between"; }
|
||||
void fix_length_and_dec();
|
||||
void print(String *str);
|
||||
bool is_bool_func() { return 1; }
|
||||
CHARSET_INFO *compare_collation() { return cmp_collation.collation; }
|
||||
uint decimal_precision() const { return 1; }
|
||||
};
|
||||
|
|
|
@ -4664,29 +4664,40 @@ Item_func_sp::func_name() const
|
|||
Field *
|
||||
Item_func_sp::sp_result_field(void) const
|
||||
{
|
||||
Field *field= 0;
|
||||
THD *thd= current_thd;
|
||||
Field *field;
|
||||
DBUG_ENTER("Item_func_sp::sp_result_field");
|
||||
if (m_sp)
|
||||
|
||||
if (!m_sp)
|
||||
{
|
||||
if (dummy_table->s == NULL)
|
||||
if (!(m_sp= sp_find_function(current_thd, m_name, TRUE)))
|
||||
{
|
||||
char *empty_name= (char *) "";
|
||||
TABLE_SHARE *share;
|
||||
dummy_table->s= share= &dummy_table->share_not_to_be_used;
|
||||
dummy_table->alias = empty_name;
|
||||
dummy_table->maybe_null = maybe_null;
|
||||
dummy_table->in_use= current_thd;
|
||||
share->table_cache_key = empty_name;
|
||||
share->table_name = empty_name;
|
||||
share->table_name = empty_name;
|
||||
my_error(ER_SP_DOES_NOT_EXIST, MYF(0), "FUNCTION", m_name->m_qname.str);
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
field= m_sp->make_field(max_length, name, dummy_table);
|
||||
}
|
||||
DBUG_RETURN(field);
|
||||
if (!dummy_table->s)
|
||||
{
|
||||
char *empty_name= (char *) "";
|
||||
TABLE_SHARE *share;
|
||||
dummy_table->s= share= &dummy_table->share_not_to_be_used;
|
||||
dummy_table->alias = empty_name;
|
||||
dummy_table->maybe_null = maybe_null;
|
||||
dummy_table->in_use= current_thd;
|
||||
share->table_cache_key = empty_name;
|
||||
share->table_name = empty_name;
|
||||
}
|
||||
DBUG_RETURN(m_sp->make_field(max_length, name, dummy_table));
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Execute function & store value in field
|
||||
|
||||
RETURN
|
||||
0 value <> NULL
|
||||
1 value = NULL or error
|
||||
*/
|
||||
|
||||
int
|
||||
Item_func_sp::execute(Field **flp)
|
||||
{
|
||||
|
@ -4706,7 +4717,7 @@ Item_func_sp::execute(Field **flp)
|
|||
f->null_bit= 1;
|
||||
}
|
||||
it->save_in_field(f, 1);
|
||||
return f->is_null();
|
||||
return null_value= f->is_null();
|
||||
}
|
||||
|
||||
|
||||
|
@ -4721,12 +4732,13 @@ Item_func_sp::execute(Item **itp)
|
|||
st_sp_security_context save_ctx;
|
||||
#endif
|
||||
|
||||
if (! m_sp)
|
||||
m_sp= sp_find_function(thd, m_name, TRUE); // cache only
|
||||
if (! m_sp)
|
||||
{
|
||||
my_error(ER_SP_DOES_NOT_EXIST, MYF(0), "FUNCTION", m_name->m_qname.str);
|
||||
DBUG_RETURN(-1);
|
||||
if (!(m_sp= sp_find_function(thd, m_name, TRUE)))
|
||||
{
|
||||
my_error(ER_SP_DOES_NOT_EXIST, MYF(0), "FUNCTION", m_name->m_qname.str);
|
||||
DBUG_RETURN(-1);
|
||||
}
|
||||
}
|
||||
|
||||
old_client_capabilites= thd->client_capabilities;
|
||||
|
@ -4788,15 +4800,12 @@ Item_func_sp::make_field(Send_field *tmp_field)
|
|||
{
|
||||
Field *field;
|
||||
DBUG_ENTER("Item_func_sp::make_field");
|
||||
if (! m_sp)
|
||||
m_sp= sp_find_function(current_thd, m_name, TRUE); // cache only
|
||||
if ((field= sp_result_field()))
|
||||
{
|
||||
field->make_field(tmp_field);
|
||||
delete field;
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
my_error(ER_SP_DOES_NOT_EXIST, MYF(0), "FUNCTION", m_name->m_qname.str);
|
||||
init_make_field(tmp_field, MYSQL_TYPE_VARCHAR);
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
@ -4805,20 +4814,17 @@ Item_func_sp::make_field(Send_field *tmp_field)
|
|||
enum enum_field_types
|
||||
Item_func_sp::field_type() const
|
||||
{
|
||||
Field *field= 0;
|
||||
Field *field;
|
||||
DBUG_ENTER("Item_func_sp::field_type");
|
||||
|
||||
if (result_field)
|
||||
DBUG_RETURN(result_field->type());
|
||||
if (! m_sp)
|
||||
m_sp= sp_find_function(current_thd, m_name, TRUE); // cache only
|
||||
if ((field= sp_result_field()))
|
||||
{
|
||||
enum_field_types result= field->type();
|
||||
delete field;
|
||||
DBUG_RETURN(result);
|
||||
}
|
||||
my_error(ER_SP_DOES_NOT_EXIST, MYF(0), "FUNCTION", m_name->m_qname.str);
|
||||
DBUG_RETURN(MYSQL_TYPE_VARCHAR);
|
||||
}
|
||||
|
||||
|
@ -4826,28 +4832,25 @@ Item_func_sp::field_type() const
|
|||
Item_result
|
||||
Item_func_sp::result_type() const
|
||||
{
|
||||
Field *field= 0;
|
||||
Field *field;
|
||||
DBUG_ENTER("Item_func_sp::result_type");
|
||||
DBUG_PRINT("info", ("m_sp = %p", m_sp));
|
||||
|
||||
if (result_field)
|
||||
DBUG_RETURN(result_field->result_type());
|
||||
if (! m_sp)
|
||||
m_sp= sp_find_function(current_thd, m_name, TRUE); // cache only
|
||||
if ((field= sp_result_field()))
|
||||
{
|
||||
Item_result result= field->result_type();
|
||||
delete field;
|
||||
DBUG_RETURN(result);
|
||||
}
|
||||
my_error(ER_SP_DOES_NOT_EXIST, MYF(0), "FUNCTION", m_name->m_qname.str);
|
||||
DBUG_RETURN(STRING_RESULT);
|
||||
}
|
||||
|
||||
void
|
||||
Item_func_sp::fix_length_and_dec()
|
||||
{
|
||||
Field *field= result_field;
|
||||
Field *field;
|
||||
DBUG_ENTER("Item_func_sp::fix_length_and_dec");
|
||||
|
||||
if (result_field)
|
||||
|
@ -4857,20 +4860,12 @@ Item_func_sp::fix_length_and_dec()
|
|||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
if (! m_sp)
|
||||
m_sp= sp_find_function(current_thd, m_name, TRUE); // cache only
|
||||
if (! m_sp)
|
||||
{
|
||||
my_error(ER_SP_DOES_NOT_EXIST, MYF(0), "FUNCTION", m_name->m_qname.str);
|
||||
}
|
||||
else
|
||||
{
|
||||
field= sp_result_field();
|
||||
decimals= field->decimals();
|
||||
max_length= field->field_length;
|
||||
maybe_null= 1;
|
||||
}
|
||||
delete field;
|
||||
if (!(field= sp_result_field()))
|
||||
DBUG_VOID_RETURN;
|
||||
decimals= field->decimals();
|
||||
max_length= field->field_length;
|
||||
maybe_null= 1;
|
||||
delete field;
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
|
@ -4878,11 +4873,10 @@ Item_func_sp::fix_length_and_dec()
|
|||
longlong Item_func_found_rows::val_int()
|
||||
{
|
||||
DBUG_ASSERT(fixed == 1);
|
||||
THD *thd= current_thd;
|
||||
|
||||
return thd->found_rows();
|
||||
return current_thd->found_rows();
|
||||
}
|
||||
|
||||
|
||||
Field *
|
||||
Item_func_sp::tmp_table_field(TABLE *t_arg)
|
||||
{
|
||||
|
|
|
@ -956,9 +956,9 @@ bool MYSQL_LOG::reset_logs(THD* thd)
|
|||
my_delete(index_file_name, MYF(MY_WME)); // Reset (open will update)
|
||||
if (!thd->slave_thread)
|
||||
need_start_event=1;
|
||||
open_index_file(index_file_name, 0);
|
||||
open(save_name, save_log_type, 0,
|
||||
io_cache_type, no_auto_events, max_size, 0);
|
||||
if (!open_index_file(index_file_name, 0))
|
||||
open(save_name, save_log_type, 0,
|
||||
io_cache_type, no_auto_events, max_size, 0);
|
||||
my_free((gptr) save_name, MYF(0));
|
||||
|
||||
err:
|
||||
|
@ -1589,7 +1589,7 @@ bool MYSQL_LOG::write(Log_event *event_info)
|
|||
present event could be about a non-transactional table, but still we need
|
||||
to write to the binlog cache in that case to handle updates to mixed
|
||||
trans/non-trans table types the best possible in binlogging)
|
||||
- or if the event asks for it (cache_stmt == true).
|
||||
- or if the event asks for it (cache_stmt == TRUE).
|
||||
*/
|
||||
if (opt_using_transactions && thd)
|
||||
{
|
||||
|
|
|
@ -183,6 +183,11 @@ extern CHARSET_INFO *national_charset_info, *table_alias_charset;
|
|||
#else
|
||||
#define IF_INNOBASE_DB(A, B) (B)
|
||||
#endif
|
||||
#ifdef __NETWARE__
|
||||
#define IF_NETWARE(A,B) (A)
|
||||
#else
|
||||
#define IF_NETWARE(A,B) (B)
|
||||
#endif
|
||||
|
||||
#if defined(__WIN__) || defined(OS2)
|
||||
#define IF_WIN(A,B) (A)
|
||||
|
|
|
@ -2670,8 +2670,8 @@ static int init_server_components()
|
|||
Update log is removed since 5.0. But we still accept the option.
|
||||
The idea is if the user already uses the binlog and the update log,
|
||||
we completely ignore any option/variable related to the update log, like
|
||||
if the update log did not exist. But if the user uses only the update log,
|
||||
then we translate everything into binlog for him (with warnings).
|
||||
if the update log did not exist. But if the user uses only the update
|
||||
log, then we translate everything into binlog for him (with warnings).
|
||||
Implementation of the above :
|
||||
- If mysqld is started with --log-update and --log-bin,
|
||||
ignore --log-update (print a warning), push a warning when SQL_LOG_UPDATE
|
||||
|
@ -2685,11 +2685,11 @@ static int init_server_components()
|
|||
|
||||
Note that we tell the user that --sql-bin-update-same is deprecated and
|
||||
does nothing, and we don't take into account if he used this option or
|
||||
not; but internally we give this variable a value to have the behaviour we
|
||||
want (i.e. have SQL_LOG_UPDATE influence SQL_LOG_BIN or not).
|
||||
not; but internally we give this variable a value to have the behaviour
|
||||
we want (i.e. have SQL_LOG_UPDATE influence SQL_LOG_BIN or not).
|
||||
As sql-bin-update-same, log-update and log-bin cannot be changed by the
|
||||
user after starting the server (they are not variables), the user will not
|
||||
later interfere with the settings we do here.
|
||||
user after starting the server (they are not variables), the user will
|
||||
not later interfere with the settings we do here.
|
||||
*/
|
||||
if (opt_bin_log)
|
||||
{
|
||||
|
@ -2703,7 +2703,7 @@ version 5.0 and above. It is replaced by the binary log.");
|
|||
opt_bin_log= 1;
|
||||
if (opt_update_logname)
|
||||
{
|
||||
// as opt_bin_log==0, no need to free opt_bin_logname
|
||||
/* as opt_bin_log==0, no need to free opt_bin_logname */
|
||||
if (!(opt_bin_logname= my_strdup(opt_update_logname, MYF(MY_WME))))
|
||||
exit(EXIT_OUT_OF_MEMORY);
|
||||
sql_print_error("The update log is no longer supported by MySQL in \
|
||||
|
@ -2718,8 +2718,8 @@ with --log-bin instead.");
|
|||
}
|
||||
if (opt_log_slave_updates && !opt_bin_log)
|
||||
{
|
||||
sql_print_warning("You need to use --log-bin to make "
|
||||
"--log-slave-updates work.");
|
||||
sql_print_warning("You need to use --log-bin to make "
|
||||
"--log-slave-updates work.");
|
||||
unireg_abort(1);
|
||||
}
|
||||
|
||||
|
@ -2781,7 +2781,15 @@ server.");
|
|||
my_free(opt_bin_logname, MYF(MY_ALLOW_ZERO_PTR));
|
||||
opt_bin_logname=my_strdup(buf, MYF(0));
|
||||
}
|
||||
mysql_bin_log.open_index_file(opt_binlog_index_name, ln);
|
||||
if (mysql_bin_log.open_index_file(opt_binlog_index_name, ln))
|
||||
{
|
||||
unireg_abort(1);
|
||||
}
|
||||
|
||||
/*
|
||||
Used to specify which type of lock we need to use for queries of type
|
||||
INSERT ... SELECT. This will change when we have row level logging.
|
||||
*/
|
||||
using_update_log=1;
|
||||
}
|
||||
|
||||
|
@ -2790,10 +2798,10 @@ server.");
|
|||
sql_print_error("Can't init databases");
|
||||
unireg_abort(1);
|
||||
}
|
||||
tc_log= total_ha_2pc > 1 ? opt_bin_log ?
|
||||
(TC_LOG *)&mysql_bin_log :
|
||||
(TC_LOG *)&tc_log_mmap :
|
||||
(TC_LOG *)&tc_log_dummy;
|
||||
tc_log= (total_ha_2pc > 1 ? (opt_bin_log ?
|
||||
(TC_LOG *) &mysql_bin_log :
|
||||
(TC_LOG *) &tc_log_mmap) :
|
||||
(TC_LOG *) &tc_log_dummy);
|
||||
|
||||
if (tc_log->open(opt_bin_logname))
|
||||
{
|
||||
|
@ -2808,7 +2816,7 @@ server.");
|
|||
|
||||
if (opt_bin_log && mysql_bin_log.open(opt_bin_logname, LOG_BIN, 0,
|
||||
WRITE_CACHE, 0, max_binlog_size, 0))
|
||||
unireg_abort(1);
|
||||
unireg_abort(1);
|
||||
|
||||
#ifdef HAVE_REPLICATION
|
||||
if (opt_bin_log && expire_logs_days)
|
||||
|
@ -3567,11 +3575,8 @@ inline void kill_broken_server()
|
|||
(!opt_disable_networking && ip_sock == INVALID_SOCKET))
|
||||
{
|
||||
select_thread_in_use = 0;
|
||||
#ifdef __NETWARE__
|
||||
kill_server(MYSQL_KILL_SIGNAL); /* never returns */
|
||||
#else
|
||||
kill_server((void*)MYSQL_KILL_SIGNAL); /* never returns */
|
||||
#endif /* __NETWARE__ */
|
||||
/* The following call will never return */
|
||||
kill_server(IF_NETWARE(MYSQL_KILL_SIGNAL, (void*) MYSQL_KILL_SIGNAL));
|
||||
}
|
||||
}
|
||||
#define MAYBE_BROKEN_SYSCALL kill_broken_server();
|
||||
|
@ -4512,12 +4517,7 @@ Disable with --skip-innodb-doublewrite.", (gptr*) &innobase_use_doublewrite,
|
|||
".",
|
||||
(gptr*) &innobase_fast_shutdown,
|
||||
(gptr*) &innobase_fast_shutdown, 0, GET_ULONG, OPT_ARG, 1, 0,
|
||||
#ifndef __NETWARE__
|
||||
2,
|
||||
#else
|
||||
1,
|
||||
#endif
|
||||
0, 0, 0},
|
||||
IF_NETWARE(1,2), 0, 0, 0},
|
||||
{"innodb_file_per_table", OPT_INNODB_FILE_PER_TABLE,
|
||||
"Stores each InnoDB table to an .ibd file in the database dir.",
|
||||
(gptr*) &innobase_file_per_table,
|
||||
|
|
|
@ -853,7 +853,7 @@ QUICK_ROR_INTERSECT_SELECT::QUICK_ROR_INTERSECT_SELECT(THD *thd_param,
|
|||
bool retrieve_full_rows,
|
||||
MEM_ROOT *parent_alloc)
|
||||
: cpk_quick(NULL), thd(thd_param), need_to_fetch_row(retrieve_full_rows),
|
||||
scans_inited(false)
|
||||
scans_inited(FALSE)
|
||||
{
|
||||
index= MAX_KEY;
|
||||
head= table;
|
||||
|
@ -1022,7 +1022,7 @@ int QUICK_ROR_INTERSECT_SELECT::reset()
|
|||
DBUG_ENTER("QUICK_ROR_INTERSECT_SELECT::reset");
|
||||
if (!scans_inited && init_ror_merged_scan(TRUE))
|
||||
DBUG_RETURN(1);
|
||||
scans_inited= true;
|
||||
scans_inited= TRUE;
|
||||
List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects);
|
||||
QUICK_RANGE_SELECT *quick;
|
||||
while ((quick= it++))
|
||||
|
@ -1066,7 +1066,7 @@ QUICK_ROR_INTERSECT_SELECT::~QUICK_ROR_INTERSECT_SELECT()
|
|||
|
||||
QUICK_ROR_UNION_SELECT::QUICK_ROR_UNION_SELECT(THD *thd_param,
|
||||
TABLE *table)
|
||||
: thd(thd_param), scans_inited(false)
|
||||
: thd(thd_param), scans_inited(FALSE)
|
||||
{
|
||||
index= MAX_KEY;
|
||||
head= table;
|
||||
|
@ -1148,7 +1148,7 @@ int QUICK_ROR_UNION_SELECT::reset()
|
|||
if (quick->init_ror_merged_scan(FALSE))
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
scans_inited= true;
|
||||
scans_inited= TRUE;
|
||||
}
|
||||
queue_remove_all(&queue);
|
||||
/*
|
||||
|
@ -2677,7 +2677,7 @@ static bool ror_intersect_add(ROR_INTERSECT_INFO *info,
|
|||
{
|
||||
/* Don't add this scan if it doesn't improve selectivity. */
|
||||
DBUG_PRINT("info", ("The scan doesn't improve selectivity."));
|
||||
DBUG_RETURN(false);
|
||||
DBUG_RETURN(FALSE);
|
||||
}
|
||||
|
||||
info->out_rows *= selectivity_mult;
|
||||
|
@ -2865,7 +2865,7 @@ TRP_ROR_INTERSECT *get_best_ror_intersect(const PARAM *param, SEL_TREE *tree,
|
|||
while (cur_ror_scan != tree->ror_scans_end && !intersect->is_covering)
|
||||
{
|
||||
/* S= S + first(R); R= R - first(R); */
|
||||
if (!ror_intersect_add(intersect, *cur_ror_scan, false))
|
||||
if (!ror_intersect_add(intersect, *cur_ror_scan, FALSE))
|
||||
{
|
||||
cur_ror_scan++;
|
||||
continue;
|
||||
|
@ -8080,7 +8080,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_min()
|
|||
}
|
||||
else
|
||||
{
|
||||
/* Apply the constant equality conditions to the non-group select fields. */
|
||||
/* Apply the constant equality conditions to the non-group select fields */
|
||||
if (key_infix_len > 0)
|
||||
{
|
||||
if ((result= file->index_read(record, group_prefix, real_prefix_len,
|
||||
|
@ -8114,9 +8114,10 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_min()
|
|||
*/
|
||||
if (!result)
|
||||
{
|
||||
if(key_cmp(index_info->key_part, group_prefix, real_prefix_len))
|
||||
if (key_cmp(index_info->key_part, group_prefix, real_prefix_len))
|
||||
key_restore(record, tmp_record, index_info, 0);
|
||||
} else if (result == HA_ERR_KEY_NOT_FOUND)
|
||||
}
|
||||
else if (result == HA_ERR_KEY_NOT_FOUND)
|
||||
result= 0; /* There is a result in any case. */
|
||||
}
|
||||
}
|
||||
|
|
|
@ -313,7 +313,7 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds)
|
|||
removed_tables is != 0 if we have used MIN() or MAX().
|
||||
*/
|
||||
if (removed_tables && used_tables != removed_tables)
|
||||
const_result= 0; // We didn't remove all tables
|
||||
const_result= 0; // We didn't remove all tables
|
||||
return const_result;
|
||||
}
|
||||
|
||||
|
@ -323,12 +323,14 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds)
|
|||
|
||||
SYNOPSIS
|
||||
simple_pred()
|
||||
func_item in: Predicate item
|
||||
func_item Predicate item
|
||||
args out: Here we store the field followed by constants
|
||||
inv_order out: Is set to 1 if the predicate is of the form 'const op field'
|
||||
inv_order out: Is set to 1 if the predicate is of the form
|
||||
'const op field'
|
||||
|
||||
RETURN
|
||||
0 func_item is a simple predicate: a field is compared with constants
|
||||
0 func_item is a simple predicate: a field is compared with
|
||||
constants
|
||||
1 Otherwise
|
||||
*/
|
||||
|
||||
|
|
|
@ -385,32 +385,33 @@ sp_head::create_typelib(List<String> *src)
|
|||
return 0;
|
||||
result->type_lengths= (unsigned int *)(result->type_names + result->count+1);
|
||||
List_iterator<String> it(*src);
|
||||
String conv, *tmp;
|
||||
uint32 dummy;
|
||||
for (uint i=0; i<result->count; i++)
|
||||
String conv;
|
||||
for (uint i=0; i < result->count; i++)
|
||||
{
|
||||
tmp = it++;
|
||||
uint32 dummy;
|
||||
uint length;
|
||||
String *tmp= it++;
|
||||
|
||||
if (String::needs_conversion(tmp->length(), tmp->charset(),
|
||||
cs, &dummy))
|
||||
{
|
||||
uint cnv_errs;
|
||||
conv.copy(tmp->ptr(), tmp->length(), tmp->charset(), cs, &cnv_errs);
|
||||
char *buf= (char*) alloc_root(mem_root,conv.length()+1);
|
||||
memcpy(buf, conv.ptr(), conv.length());
|
||||
buf[conv.length()]= '\0';
|
||||
result->type_names[i]= buf;
|
||||
result->type_lengths[i]= conv.length();
|
||||
|
||||
length= conv.length();
|
||||
result->type_names[i]= (char*) strmake_root(mem_root, conv.ptr(),
|
||||
length);
|
||||
}
|
||||
else {
|
||||
result->type_names[i]= strdup_root(mem_root, tmp->c_ptr());
|
||||
result->type_lengths[i]= tmp->length();
|
||||
else
|
||||
{
|
||||
length= tmp->length();
|
||||
result->type_names[i]= strmake_root(mem_root, tmp->ptr(), length);
|
||||
}
|
||||
|
||||
// Strip trailing spaces.
|
||||
uint lengthsp= cs->cset->lengthsp(cs, result->type_names[i],
|
||||
result->type_lengths[i]);
|
||||
result->type_lengths[i]= lengthsp;
|
||||
((uchar *)result->type_names[i])[lengthsp]= '\0';
|
||||
length= cs->cset->lengthsp(cs, result->type_names[i], length);
|
||||
result->type_lengths[i]= length;
|
||||
((uchar *)result->type_names[i])[length]= '\0';
|
||||
}
|
||||
result->type_names[result->count]= 0;
|
||||
result->type_lengths[result->count]= 0;
|
||||
|
|
|
@ -2173,10 +2173,14 @@ static int replace_column_table(GRANT_TABLE *g_t,
|
|||
KEY_PART_INFO *key_part= table->key_info->key_part;
|
||||
DBUG_ENTER("replace_column_table");
|
||||
|
||||
table->field[0]->store(combo.host.str,combo.host.length, system_charset_info);
|
||||
table->field[1]->store(db,(uint) strlen(db), system_charset_info);
|
||||
table->field[2]->store(combo.user.str,combo.user.length, system_charset_info);
|
||||
table->field[3]->store(table_name,(uint) strlen(table_name), system_charset_info);
|
||||
table->field[0]->store(combo.host.str,combo.host.length,
|
||||
system_charset_info);
|
||||
table->field[1]->store(db,(uint) strlen(db),
|
||||
system_charset_info);
|
||||
table->field[2]->store(combo.user.str,combo.user.length,
|
||||
system_charset_info);
|
||||
table->field[3]->store(table_name,(uint) strlen(table_name),
|
||||
system_charset_info);
|
||||
|
||||
/* Get length of 3 first key parts */
|
||||
key_prefix_length= (key_part[0].store_length + key_part[1].store_length +
|
||||
|
@ -2188,17 +2192,17 @@ static int replace_column_table(GRANT_TABLE *g_t,
|
|||
/* first fix privileges for all columns in column list */
|
||||
|
||||
List_iterator <LEX_COLUMN> iter(columns);
|
||||
class LEX_COLUMN *xx;
|
||||
class LEX_COLUMN *column;
|
||||
table->file->ha_index_init(0);
|
||||
while ((xx=iter++))
|
||||
while ((column= iter++))
|
||||
{
|
||||
ulong privileges = xx->rights;
|
||||
ulong privileges= column->rights;
|
||||
bool old_row_exists=0;
|
||||
byte user_key[MAX_KEY_LENGTH];
|
||||
|
||||
key_restore(table->record[0],key,table->key_info,
|
||||
key_prefix_length);
|
||||
table->field[4]->store(xx->column.ptr(),xx->column.length(),
|
||||
table->field[4]->store(column->column.ptr(), column->column.length(),
|
||||
system_charset_info);
|
||||
/* Get key for the first 4 columns */
|
||||
key_copy(user_key, table->record[0], table->key_info,
|
||||
|
@ -2213,15 +2217,15 @@ static int replace_column_table(GRANT_TABLE *g_t,
|
|||
{
|
||||
my_error(ER_NONEXISTING_TABLE_GRANT, MYF(0),
|
||||
combo.user.str, combo.host.str,
|
||||
table_name); /* purecov: inspected */
|
||||
result= -1; /* purecov: inspected */
|
||||
continue; /* purecov: inspected */
|
||||
table_name); /* purecov: inspected */
|
||||
result= -1; /* purecov: inspected */
|
||||
continue; /* purecov: inspected */
|
||||
}
|
||||
old_row_exists = 0;
|
||||
restore_record(table, s->default_values); // Get empty record
|
||||
key_restore(table->record[0],key,table->key_info,
|
||||
key_prefix_length);
|
||||
table->field[4]->store(xx->column.ptr(),xx->column.length(),
|
||||
table->field[4]->store(column->column.ptr(),column->column.length(),
|
||||
system_charset_info);
|
||||
}
|
||||
else
|
||||
|
@ -2241,6 +2245,7 @@ static int replace_column_table(GRANT_TABLE *g_t,
|
|||
|
||||
if (old_row_exists)
|
||||
{
|
||||
GRANT_COLUMN *grant_column;
|
||||
if (privileges)
|
||||
error=table->file->update_row(table->record[1],table->record[0]);
|
||||
else
|
||||
|
@ -2251,21 +2256,21 @@ static int replace_column_table(GRANT_TABLE *g_t,
|
|||
result= -1; /* purecov: inspected */
|
||||
goto end; /* purecov: inspected */
|
||||
}
|
||||
GRANT_COLUMN *grant_column = column_hash_search(g_t,
|
||||
xx->column.ptr(),
|
||||
xx->column.length());
|
||||
grant_column= column_hash_search(g_t, column->column.ptr(),
|
||||
column->column.length());
|
||||
if (grant_column) // Should always be true
|
||||
grant_column->rights = privileges; // Update hash
|
||||
grant_column->rights= privileges; // Update hash
|
||||
}
|
||||
else // new grant
|
||||
{
|
||||
GRANT_COLUMN *grant_column;
|
||||
if ((error=table->file->write_row(table->record[0])))
|
||||
{
|
||||
table->file->print_error(error,MYF(0)); /* purecov: inspected */
|
||||
result= -1; /* purecov: inspected */
|
||||
goto end; /* purecov: inspected */
|
||||
}
|
||||
GRANT_COLUMN *grant_column = new GRANT_COLUMN(xx->column,privileges);
|
||||
grant_column= new GRANT_COLUMN(column->column,privileges);
|
||||
my_hash_insert(&g_t->hash_columns,(byte*) grant_column);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1051,7 +1051,7 @@ pthread_handler_decl(handle_one_connection,arg)
|
|||
/* now that we've called my_thread_init(), it is safe to call DBUG_* */
|
||||
|
||||
#if defined(__WIN__)
|
||||
init_signals(); // IRENA; testing ?
|
||||
init_signals();
|
||||
#elif !defined(OS2) && !defined(__NETWARE__)
|
||||
sigset_t set;
|
||||
VOID(sigemptyset(&set)); // Get mask in use
|
||||
|
|
|
@ -11657,11 +11657,13 @@ cp_buffer_from_ref(THD *thd, TABLE_REF *ref)
|
|||
enum enum_check_fields save_count_cuted_fields= thd->count_cuted_fields;
|
||||
thd->count_cuted_fields= CHECK_FIELD_IGNORE;
|
||||
for (store_key **copy=ref->key_copy ; *copy ; copy++)
|
||||
{
|
||||
if ((*copy)->copy())
|
||||
{
|
||||
thd->count_cuted_fields= save_count_cuted_fields;
|
||||
return 1; // Something went wrong
|
||||
}
|
||||
}
|
||||
thd->count_cuted_fields= save_count_cuted_fields;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1600,6 +1600,8 @@ LEX_STRING *make_lex_string(THD *thd, LEX_STRING *lex_str,
|
|||
|
||||
/* INFORMATION_SCHEMA name */
|
||||
LEX_STRING information_schema_name= {(char*)"information_schema", 18};
|
||||
|
||||
/* This is only used internally, but we need it here as a forward reference */
|
||||
extern ST_SCHEMA_TABLE schema_tables[];
|
||||
|
||||
typedef struct st_index_field_values
|
||||
|
@ -1693,8 +1695,8 @@ bool uses_only_table_name_fields(Item *item, TABLE_LIST *table)
|
|||
CHARSET_INFO *cs= system_charset_info;
|
||||
ST_SCHEMA_TABLE *schema_table= table->schema_table;
|
||||
ST_FIELD_INFO *field_info= schema_table->fields_info;
|
||||
const char *field_name1= field_info[schema_table->idx_field1].field_name;
|
||||
const char *field_name2= field_info[schema_table->idx_field2].field_name;
|
||||
const char *field_name1= schema_table->idx_field1 >= 0 ? field_info[schema_table->idx_field1].field_name : "";
|
||||
const char *field_name2= schema_table->idx_field2 >= 0 ? field_info[schema_table->idx_field2].field_name : "";
|
||||
if (table->table != item_field->field->table ||
|
||||
(cs->coll->strnncollsp(cs, (uchar *) field_name1, strlen(field_name1),
|
||||
(uchar *) item_field->field_name,
|
||||
|
|
128
sql/sql_table.cc
128
sql/sql_table.cc
|
@ -38,6 +38,7 @@ static int copy_data_between_tables(TABLE *from,TABLE *to,
|
|||
bool ignore,
|
||||
uint order_num, ORDER *order,
|
||||
ha_rows *copied,ha_rows *deleted);
|
||||
static bool prepare_blob_field(THD *thd, create_field *sql_field);
|
||||
|
||||
/*
|
||||
delete (drop) tables.
|
||||
|
@ -700,21 +701,20 @@ static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
|
|||
String conv, *tmp;
|
||||
for (uint i= 0; (tmp= it++); i++)
|
||||
{
|
||||
uint lengthsp;
|
||||
if (String::needs_conversion(tmp->length(), tmp->charset(),
|
||||
cs, &dummy))
|
||||
{
|
||||
uint cnv_errs;
|
||||
conv.copy(tmp->ptr(), tmp->length(), tmp->charset(), cs, &cnv_errs);
|
||||
char *buf= (char*) sql_alloc(conv.length()+1);
|
||||
memcpy(buf, conv.ptr(), conv.length());
|
||||
buf[conv.length()]= '\0';
|
||||
interval->type_names[i]= buf;
|
||||
interval->type_names[i]= strmake_root(thd->mem_root, conv.ptr(),
|
||||
conv.length());
|
||||
interval->type_lengths[i]= conv.length();
|
||||
}
|
||||
|
||||
// Strip trailing spaces.
|
||||
uint lengthsp= cs->cset->lengthsp(cs, interval->type_names[i],
|
||||
interval->type_lengths[i]);
|
||||
lengthsp= cs->cset->lengthsp(cs, interval->type_names[i],
|
||||
interval->type_lengths[i]);
|
||||
interval->type_lengths[i]= lengthsp;
|
||||
((uchar *)interval->type_names[i])[lengthsp]= '\0';
|
||||
}
|
||||
|
@ -781,37 +781,8 @@ static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
|
|||
}
|
||||
|
||||
sql_field->create_length_to_internal_length();
|
||||
if (sql_field->length > MAX_FIELD_VARCHARLENGTH &&
|
||||
!(sql_field->flags & BLOB_FLAG))
|
||||
{
|
||||
/* Convert long VARCHAR columns to TEXT or BLOB */
|
||||
char warn_buff[MYSQL_ERRMSG_SIZE];
|
||||
|
||||
if (sql_field->def)
|
||||
{
|
||||
my_error(ER_TOO_BIG_FIELDLENGTH, MYF(0), sql_field->field_name,
|
||||
MAX_FIELD_VARCHARLENGTH / sql_field->charset->mbmaxlen);
|
||||
DBUG_RETURN(-1);
|
||||
}
|
||||
sql_field->sql_type= FIELD_TYPE_BLOB;
|
||||
sql_field->flags|= BLOB_FLAG;
|
||||
sprintf(warn_buff, ER(ER_AUTO_CONVERT), sql_field->field_name,
|
||||
"VARCHAR",
|
||||
(sql_field->charset == &my_charset_bin) ? "BLOB" : "TEXT");
|
||||
push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_AUTO_CONVERT,
|
||||
warn_buff);
|
||||
}
|
||||
|
||||
if ((sql_field->flags & BLOB_FLAG) && sql_field->length)
|
||||
{
|
||||
if (sql_field->sql_type == FIELD_TYPE_BLOB)
|
||||
{
|
||||
/* The user has given a length to the blob column */
|
||||
sql_field->sql_type= get_blob_type_from_length(sql_field->length);
|
||||
sql_field->pack_length= calc_pack_length(sql_field->sql_type, 0);
|
||||
}
|
||||
sql_field->length= 0; // Probably from an item
|
||||
}
|
||||
if (prepare_blob_field(thd, sql_field))
|
||||
DBUG_RETURN(-1);
|
||||
|
||||
if (!(sql_field->flags & NOT_NULL_FLAG))
|
||||
null_fields++;
|
||||
|
@ -1351,6 +1322,58 @@ static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
Extend long VARCHAR fields to blob & prepare field if it's a blob
|
||||
|
||||
SYNOPSIS
|
||||
prepare_blob_field()
|
||||
sql_field Field to check
|
||||
|
||||
RETURN
|
||||
0 ok
|
||||
1 Error (sql_field can't be converted to blob)
|
||||
In this case the error is given
|
||||
*/
|
||||
|
||||
static bool prepare_blob_field(THD *thd, create_field *sql_field)
|
||||
{
|
||||
DBUG_ENTER("prepare_blob_field");
|
||||
|
||||
if (sql_field->length > MAX_FIELD_VARCHARLENGTH &&
|
||||
!(sql_field->flags & BLOB_FLAG))
|
||||
{
|
||||
/* Convert long VARCHAR columns to TEXT or BLOB */
|
||||
char warn_buff[MYSQL_ERRMSG_SIZE];
|
||||
|
||||
if (sql_field->def)
|
||||
{
|
||||
my_error(ER_TOO_BIG_FIELDLENGTH, MYF(0), sql_field->field_name,
|
||||
MAX_FIELD_VARCHARLENGTH / sql_field->charset->mbmaxlen);
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
sql_field->sql_type= FIELD_TYPE_BLOB;
|
||||
sql_field->flags|= BLOB_FLAG;
|
||||
sprintf(warn_buff, ER(ER_AUTO_CONVERT), sql_field->field_name,
|
||||
"VARCHAR",
|
||||
(sql_field->charset == &my_charset_bin) ? "BLOB" : "TEXT");
|
||||
push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_AUTO_CONVERT,
|
||||
warn_buff);
|
||||
}
|
||||
|
||||
if ((sql_field->flags & BLOB_FLAG) && sql_field->length)
|
||||
{
|
||||
if (sql_field->sql_type == FIELD_TYPE_BLOB)
|
||||
{
|
||||
/* The user has given a length to the blob column */
|
||||
sql_field->sql_type= get_blob_type_from_length(sql_field->length);
|
||||
sql_field->pack_length= calc_pack_length(sql_field->sql_type, 0);
|
||||
}
|
||||
sql_field->length= 0;
|
||||
}
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Preparation of create_field for SP function return values.
|
||||
Based on code used in the inner loop of mysql_prepare_table() above
|
||||
|
@ -1395,33 +1418,12 @@ void sp_prepare_create_field(THD *thd, create_field *sql_field)
|
|||
FIELDFLAG_TREAT_BIT_AS_CHAR;
|
||||
}
|
||||
sql_field->create_length_to_internal_length();
|
||||
|
||||
if (sql_field->length > MAX_FIELD_VARCHARLENGTH &&
|
||||
!(sql_field->flags & BLOB_FLAG))
|
||||
{
|
||||
/* Convert long VARCHAR columns to TEXT or BLOB */
|
||||
char warn_buff[MYSQL_ERRMSG_SIZE];
|
||||
|
||||
sql_field->sql_type= FIELD_TYPE_BLOB;
|
||||
sql_field->flags|= BLOB_FLAG;
|
||||
sprintf(warn_buff, ER(ER_AUTO_CONVERT), sql_field->field_name,
|
||||
"VARCHAR",
|
||||
(sql_field->charset == &my_charset_bin) ? "BLOB" : "TEXT");
|
||||
push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_AUTO_CONVERT,
|
||||
warn_buff);
|
||||
}
|
||||
|
||||
if ((sql_field->flags & BLOB_FLAG) && sql_field->length)
|
||||
{
|
||||
if (sql_field->sql_type == FIELD_TYPE_BLOB)
|
||||
{
|
||||
/* The user has given a length to the blob column */
|
||||
sql_field->sql_type= get_blob_type_from_length(sql_field->length);
|
||||
sql_field->pack_length= calc_pack_length(sql_field->sql_type, 0);
|
||||
}
|
||||
sql_field->length= 0; // Probably from an item
|
||||
}
|
||||
DBUG_ASSERT(sql_field->def == 0);
|
||||
/* Can't go wrong as sql_field->def is not defined */
|
||||
(void) prepare_blob_field(thd, sql_field);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Create a table
|
||||
|
||||
|
|
|
@ -183,7 +183,7 @@ TEST_join(JOIN *join)
|
|||
else if (tab->select->quick)
|
||||
{
|
||||
fprintf(DBUG_FILE, " quick select used:\n");
|
||||
tab->select->quick->dbug_dump(18, false);
|
||||
tab->select->quick->dbug_dump(18, FALSE);
|
||||
}
|
||||
else
|
||||
VOID(fputs(" select used\n",DBUG_FILE));
|
||||
|
|
|
@ -28,7 +28,7 @@ static File_option triggers_file_parameters[]=
|
|||
mysql_create_or_drop_trigger()
|
||||
thd - current thread context (including trigger definition in LEX)
|
||||
tables - table list containing one table for which trigger is created.
|
||||
create - whenever we create (true) or drop (false) trigger
|
||||
create - whenever we create (TRUE) or drop (FALSE) trigger
|
||||
|
||||
NOTE
|
||||
This function is mainly responsible for opening and locking of table and
|
||||
|
|
|
@ -1409,8 +1409,7 @@ create_function_tail:
|
|||
|
||||
sp->m_returns_cs= new_field->charset;
|
||||
|
||||
if (new_field->sql_type == FIELD_TYPE_SET ||
|
||||
new_field->sql_type == FIELD_TYPE_ENUM)
|
||||
if (new_field->interval_list.elements)
|
||||
{
|
||||
new_field->interval=
|
||||
sp->create_typelib(&new_field->interval_list);
|
||||
|
|
|
@ -504,6 +504,7 @@ static bool pack_header(uchar *forminfo, enum db_type table_type,
|
|||
int2store(forminfo+280,22); /* Rows needed */
|
||||
int2store(forminfo+282,null_fields);
|
||||
int2store(forminfo+284,com_length);
|
||||
/* Up to forminfo+288 is free to use for additional information */
|
||||
DBUG_RETURN(0);
|
||||
} /* pack_header */
|
||||
|
||||
|
|
Loading…
Reference in a new issue