Merge joreland@bk-internal.mysql.com:/home/bk/mysql-5.0

into mysql.com:/home/jonas/src/mysql-5.0


sql/ha_ndbcluster.cc:
  Auto merged
This commit is contained in:
unknown 2004-11-22 07:49:59 +01:00
commit 2c15c8e332
121 changed files with 2616 additions and 583 deletions

View file

@ -102,6 +102,7 @@ Makefile.in
Makefile.in'
PENDING/*
TAGS
ac_available_languages_fragment
aclocal.m4
autom4te-2.53.cache/*
autom4te-2.53.cache/output.0
@ -315,6 +316,7 @@ gmon.out
hardcopy.0
heap/hp_test1
heap/hp_test2
help
help.c
help.h
include/my_config.h
@ -756,11 +758,25 @@ ndb/tools/ndb_delete_all
ndb/tools/ndb_desc
ndb/tools/ndb_drop_index
ndb/tools/ndb_drop_table
ndb/tools/ndb_restore
ndb/tools/ndb_select_all
ndb/tools/ndb_select_count
ndb/tools/ndb_show_tables
ndb/tools/ndb_test_platform
ndb/tools/ndb_waiter
ndbcluster-1186
ndbcluster-1186/SCCS
ndbcluster-1186/config.ini
ndbcluster-1186/ndb_1.pid
ndbcluster-1186/ndb_1_out.log
ndbcluster-1186/ndb_1_signal.log
ndbcluster-1186/ndb_2.pid
ndbcluster-1186/ndb_2_out.log
ndbcluster-1186/ndb_2_signal.log
ndbcluster-1186/ndb_3.pid
ndbcluster-1186/ndb_3_cluster.log
ndbcluster-1186/ndb_3_out.log
ndbcluster-1186/ndbcluster.pid
pull.log
regex/re
repl-tests/test-repl-ts/repl-timestamp.master.reject
@ -959,5 +975,3 @@ vio/test-ssl
vio/test-sslclient
vio/test-sslserver
vio/viotest-ssl
ac_available_languages_fragment
ndb/tools/ndb_restore

View file

@ -20,13 +20,13 @@
INCLUDES = -I$(top_srcdir)/include -I$(top_srcdir)/regex \
$(openssl_includes)
LIBS = @CLIENT_LIBS@
DEPLIB= @ndb_mgmclient_libs@ \
../libmysql/libmysqlclient.la
DEPLIB= ../libmysql/libmysqlclient.la
LDADD = @CLIENT_EXTRA_LDFLAGS@ $(DEPLIB)
bin_PROGRAMS = mysql mysqladmin mysqlcheck mysqlshow \
mysqldump mysqlimport mysqltest mysqlbinlog mysqlmanagerc mysqlmanager-pwgen
noinst_HEADERS = sql_string.h completion_hash.h my_readline.h \
client_priv.h
mysqladmin_SOURCES = mysqladmin.cc
mysql_SOURCES = mysql.cc readline.cc sql_string.cc completion_hash.cc
mysql_LDADD = @readline_link@ @TERMCAP_LIB@ $(LDADD) $(CXXLDFLAGS)
mysqlbinlog_LDADD = $(LDADD) $(CXXLDFLAGS)

View file

@ -1609,7 +1609,7 @@ static void print_help_item(MYSQL_ROW *cur, int num_name, int num_cat, char *las
char ccat= (*cur)[num_cat][0];
if (*last_char != ccat)
{
put_info(ccat == 'Y' ? "categories :" : "topics :", INFO_INFO);
put_info(ccat == 'Y' ? "categories:" : "topics:", INFO_INFO);
*last_char= ccat;
}
tee_fprintf(PAGER, " %s\n", (*cur)[num_name]);
@ -1676,8 +1676,8 @@ static int com_server_help(String *buffer __attribute__((unused)),
if (num_fields == 2)
{
put_info("Many help items for your request exist", INFO_INFO);
put_info("To make a more specific request, please type 'help <item>',\nwhere item is one of next", INFO_INFO);
put_info("Many help items for your request exist.", INFO_INFO);
put_info("To make a more specific request, please type 'help <item>',\nwhere item is one of the following", INFO_INFO);
num_name= 0;
num_cat= 1;
last_char= '_';

View file

@ -24,7 +24,7 @@
#include <sys/stat.h>
#include <mysql.h>
#ifdef HAVE_NDBCLUSTER_DB
#ifdef LATER_HAVE_NDBCLUSTER_DB
#include "../ndb/src/mgmclient/ndb_mgmclient.h"
#endif
@ -45,7 +45,7 @@ static uint tcp_port = 0, option_wait = 0, option_silent=0, nr_iterations,
opt_count_iterations= 0;
static ulong opt_connect_timeout, opt_shutdown_timeout;
static my_string unix_port=0;
#ifdef HAVE_NDBCLUSTER_DB
#ifdef LATER_HAVE_NDBCLUSTER_DB
static my_bool opt_ndbcluster=0;
static char *opt_ndb_connectstring=0;
#endif
@ -101,7 +101,7 @@ enum commands {
ADMIN_PING, ADMIN_EXTENDED_STATUS, ADMIN_FLUSH_STATUS,
ADMIN_FLUSH_PRIVILEGES, ADMIN_START_SLAVE, ADMIN_STOP_SLAVE,
ADMIN_FLUSH_THREADS, ADMIN_OLD_PASSWORD
#ifdef HAVE_NDBCLUSTER_DB
#ifdef LATER_HAVE_NDBCLUSTER_DB
,ADMIN_NDB_MGM
#endif
};
@ -114,7 +114,7 @@ static const char *command_names[]= {
"ping", "extended-status", "flush-status",
"flush-privileges", "start-slave", "stop-slave",
"flush-threads","old-password",
#ifdef HAVE_NDBCLUSTER_DB
#ifdef LATER_HAVE_NDBCLUSTER_DB
"ndb-mgm",
#endif
NullS
@ -197,7 +197,7 @@ static struct my_option my_long_options[] =
{"shutdown_timeout", OPT_SHUTDOWN_TIMEOUT, "", (gptr*) &opt_shutdown_timeout,
(gptr*) &opt_shutdown_timeout, 0, GET_ULONG, REQUIRED_ARG,
SHUTDOWN_DEF_TIMEOUT, 0, 3600*12, 0, 1, 0},
#ifdef HAVE_NDBCLUSTER_DB
#ifdef LATER_HAVE_NDBCLUSTER_DB
{"ndbcluster", OPT_NDBCLUSTER, ""
"", (gptr*) &opt_ndbcluster,
(gptr*) &opt_ndbcluster, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
@ -903,7 +903,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv)
}
mysql->reconnect=1; /* Automatic reconnect is default */
break;
#ifdef HAVE_NDBCLUSTER_DB
#ifdef LATER_HAVE_NDBCLUSTER_DB
case ADMIN_NDB_MGM:
{
if (argc < 2)
@ -1287,9 +1287,6 @@ static my_bool wait_pidfile(char *pidfile, time_t last_modified,
}
DBUG_RETURN(error);
}
#ifdef HAVE_NDBCLUSTER_DB
/* lib linked in contains c++ code */
#ifdef __GNUC__
FIX_GCC_LINKING_PROBLEM
#endif
#endif

View file

@ -61,7 +61,7 @@ static const char *load_default_groups[]= { "mysqlbinlog","client",0 };
void sql_print_error(const char *format, ...);
static bool one_database=0, to_last_remote_log= 0;
static bool one_database=0, to_last_remote_log= 0, disable_log_bin= 0;
static const char* database= 0;
static my_bool force_opt= 0, short_form= 0, remote_opt= 0;
static ulonglong offset = 0;
@ -495,6 +495,13 @@ static struct my_option my_long_options[] =
{"database", 'd', "List entries for just this database (local log only).",
(gptr*) &database, (gptr*) &database, 0, GET_STR_ALLOC, REQUIRED_ARG,
0, 0, 0, 0, 0, 0},
{"disable-log-bin", 'D', "Disable binary log. This is useful, if you "
"enabled --to-last-log and are sending the output to the same MySQL server. "
"This way you could avoid an endless loop. You would also like to use it "
"when restoring after a crash to avoid duplication of the statements you "
"already have. NOTE: you will need a SUPER privilege to use this option.",
(gptr*) &disable_log_bin, (gptr*) &disable_log_bin, 0, GET_BOOL,
NO_ARG, 0, 0, 0, 0, 0, 0},
{"force-read", 'f', "Force reading unknown binlog events.",
(gptr*) &force_opt, (gptr*) &force_opt, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0,
0, 0},
@ -1218,6 +1225,11 @@ int main(int argc, char** argv)
fprintf(result_file,
"/*!40019 SET @@session.max_insert_delayed_threads=0*/;\n");
if (disable_log_bin)
fprintf(result_file,
"/*!32316 SET @OLD_SQL_LOG_BIN=@@SQL_LOG_BIN, SQL_LOG_BIN=0*/;\n");
for (save_stop_position= stop_position, stop_position= ~(my_off_t)0 ;
(--argc >= 0) && !stop_passed ; )
{
@ -1232,6 +1244,9 @@ int main(int argc, char** argv)
start_position= BIN_LOG_HEADER_SIZE;
}
if (disable_log_bin)
fprintf(result_file, "/*!32316 SET SQL_LOG_BIN=@OLD_SQL_LOG_BIN*/;\n");
if (tmpdir.list)
free_tmpdir(&tmpdir);
if (result_file != stdout)

View file

@ -399,7 +399,6 @@ then
then
if $CXX -v 2>&1 | grep 'version 3' > /dev/null 2>&1
then
CFLAGS="$CFLAGS -DDEFINE_CXA_PURE_VIRTUAL"
CXXFLAGS="$CXXFLAGS -DUSE_MYSYS_NEW -DDEFINE_CXA_PURE_VIRTUAL"
fi
fi

View file

@ -64,14 +64,15 @@ void init_time(void);
my_time_t
my_system_gmt_sec(const MYSQL_TIME *t, long *my_timezone, bool *in_dst_time_gap);
void set_zero_time(MYSQL_TIME *tm);
void set_zero_time(MYSQL_TIME *tm, enum enum_mysql_timestamp_type time_type);
/*
Required buffer length for my_time_to_str, my_date_to_str,
my_datetime_to_str and TIME_to_string functions. Note, that the
caller is still responsible to check that given TIME structure
has values in valid ranges, otherwise size of the buffer could
be not enough.
be not enough. We also rely on the fact that even wrong values
sent using binary protocol fit in this buffer.
*/
#define MAX_DATE_STRING_REP_LENGTH 30

View file

@ -33,6 +33,18 @@ enum enum_mysql_timestamp_type
};
/*
Structure which is used to represent datetime values inside MySQL.
We assume that values in this structure are normalized, i.e. year <= 9999,
month <= 12, day <= 31, hour <= 23, hour <= 59, hour <= 59. Many functions
in server such as my_system_gmt_sec() or make_time() family of functions
rely on this (actually now usage of make_*() family relies on a bit weaker
restriction). Also functions that produce MYSQL_TIME as result ensure this.
There is one exception to this rule though if this structure holds time
value (time_type == MYSQL_TIMESTAMP_TIME) days and hour member can hold
bigger values.
*/
typedef struct st_mysql_time
{
unsigned int year, month, day, hour, minute, second;

View file

@ -408,4 +408,5 @@
#define ER_WRONG_MAGIC 1389
#define ER_PS_MANY_PARAM 1390
#define ER_KEY_PART_0 1391
#define ER_ERROR_MESSAGES 392
#define ER_VIEW_CHECKSUM 1392
#define ER_ERROR_MESSAGES 393

View file

@ -109,7 +109,7 @@ struct fil_node_struct {
device or a raw disk partition */
ulint size; /* size of the file in database pages, 0 if
not known yet; the possible last incomplete
megabyte is ignored if space == 0 */
megabyte may be ignored if space == 0 */
ulint n_pending;
/* count of pending i/o's on this file;
closing of the file is not allowed if
@ -163,7 +163,9 @@ struct fil_space_struct {
UT_LIST_BASE_NODE_T(fil_node_t) chain;
/* base node for the file chain */
ulint size; /* space size in pages; 0 if a single-table
tablespace whose size we do not know yet */
tablespace whose size we do not know yet;
last incomplete megabytes in data files may be
ignored if space == 0 */
ulint n_reserved_extents;
/* number of reserved free extents for
ongoing operations like B-tree page split */
@ -3258,7 +3260,7 @@ fil_extend_space_to_desired_size(
ulint* actual_size, /* out: size of the space after extension;
if we ran out of disk space this may be lower
than the desired size */
ulint space_id, /* in: space id, must be != 0 */
ulint space_id, /* in: space id */
ulint size_after_extend)/* in: desired size in pages after the
extension; if the current space size is bigger
than this already, the function does nothing */
@ -3355,6 +3357,17 @@ fil_extend_space_to_desired_size(
fil_node_complete_io(node, system, OS_FILE_WRITE);
*actual_size = space->size;
if (space_id == 0) {
ulint pages_per_mb = (1024 * 1024) / UNIV_PAGE_SIZE;
/* Keep the last data file size info up to date, rounded to
full megabytes */
srv_data_file_sizes[srv_n_data_files - 1] =
(node->size / pages_per_mb) * pages_per_mb;
}
/*
printf("Extended %s to %lu, actual size %lu pages\n", space->name,
size_after_extend, *actual_size); */

View file

@ -480,7 +480,7 @@ fil_extend_space_to_desired_size(
ulint* actual_size, /* out: size of the space after extension;
if we ran out of disk space this may be lower
than the desired size */
ulint space_id, /* in: space id, must be != 0 */
ulint space_id, /* in: space id */
ulint size_after_extend);/* in: desired size in pages after the
extension; if the current space size is bigger
than this already, the function does nothing */

View file

@ -1509,7 +1509,6 @@ row_ins_scan_sec_index_for_duplicate(
ibool moved;
mtr_t mtr;
trx_t* trx;
const char* ptr;
n_unique = dict_index_get_n_unique(index);
@ -1630,7 +1629,6 @@ row_ins_duplicate_error_in_clust(
page_t* page;
ulint n_unique;
trx_t* trx = thr_get_trx(thr);
const char* ptr;
UT_NOT_USED(mtr);

View file

@ -3326,11 +3326,12 @@ static void read_binary_time(MYSQL_TIME *tm, uchar **pos)
tm->hour+= tm->day*24;
tm->day= 0;
}
tm->time_type= MYSQL_TIMESTAMP_TIME;
*pos+= length;
}
else
set_zero_time(tm);
tm->time_type= MYSQL_TIMESTAMP_TIME;
set_zero_time(tm, MYSQL_TIMESTAMP_TIME);
}
static void read_binary_datetime(MYSQL_TIME *tm, uchar **pos)
@ -3355,12 +3356,12 @@ static void read_binary_datetime(MYSQL_TIME *tm, uchar **pos)
else
tm->hour= tm->minute= tm->second= 0;
tm->second_part= (length > 7) ? (ulong) sint4korr(to+7) : 0;
tm->time_type= MYSQL_TIMESTAMP_DATETIME;
*pos+= length;
}
else
set_zero_time(tm);
tm->time_type= MYSQL_TIMESTAMP_DATETIME;
set_zero_time(tm, MYSQL_TIMESTAMP_DATETIME);
}
static void read_binary_date(MYSQL_TIME *tm, uchar **pos)
@ -3377,12 +3378,12 @@ static void read_binary_date(MYSQL_TIME *tm, uchar **pos)
tm->hour= tm->minute= tm->second= 0;
tm->second_part= 0;
tm->neg= 0;
tm->time_type= MYSQL_TIMESTAMP_DATE;
*pos+= length;
}
else
set_zero_time(tm);
tm->time_type= MYSQL_TIMESTAMP_DATE;
set_zero_time(tm, MYSQL_TIMESTAMP_DATE);
}

View file

@ -0,0 +1,56 @@
#
# Common tests for all character sets and collations.
# Include this file from a test with @test_characrer_set
# and @test_collation set to desired values.
#
# Please don't use SHOW CREATE TABLE in this file,
# we want it to be HANDLER independent. You can
# use SHOW FULL COLUMNS instead.
#
# Please surround all CREATE TABLE with --disable_warnings
# and --enable_warnings to be able to set storage_engine
# without having to check if the hanlder exists.
SET @safe_character_set_server= @@character_set_server;
SET @safe_collation_server= @@collation_server;
SET character_set_server= @test_character_set;
SET collation_server= @test_collation;
CREATE DATABASE d1;
USE d1;
#
# Bug 1883: LIKE did not work in some cases with a key.
#
--disable_warnings
CREATE TABLE t1 (c CHAR(10), KEY(c));
--enable_warnings
# check the column was created with the expected charset/collation
SHOW FULL COLUMNS FROM t1;
INSERT INTO t1 VALUES ('aaa'),('aaaa'),('aaaaa');
SELECT c as want3results FROM t1 WHERE c LIKE 'aaa%';
DROP TABLE t1;
#
# Bug 6643 incorrect response with partial utf8 index
#
--disable_warnings
CREATE TABLE t1 (c1 varchar(15), KEY c1 (c1(2)));
--enable_warnings
# check the column was created with the expected charset/collation
SHOW FULL COLUMNS FROM t1;
INSERT INTO t1 VALUES ('location'),('loberge'),('lotre'),('boabab');
SELECT c1 as want3results from t1 where c1 like 'l%';
SELECT c1 as want3results from t1 where c1 like 'lo%';
SELECT c1 as want1result from t1 where c1 like 'loc%';
SELECT c1 as want1result from t1 where c1 like 'loca%';
SELECT c1 as want1result from t1 where c1 like 'locat%';
SELECT c1 as want1result from t1 where c1 like 'locati%';
SELECT c1 as want1result from t1 where c1 like 'locatio%';
SELECT c1 as want1result from t1 where c1 like 'location%';
DROP TABLE t1;
DROP DATABASE d1;
# Restore settings
USE test;
SET character_set_server= @safe_character_set_server;
SET collation_server= @safe_collation_server;

View file

@ -1,10 +1,58 @@
drop table if exists t1;
SET NAMES big5;
CREATE TABLE t1 (c CHAR(10) CHARACTER SET big5, KEY(c));
SET @test_character_set= 'big5';
SET @test_collation= 'big5_chinese_ci';
SET @safe_character_set_server= @@character_set_server;
SET @safe_collation_server= @@collation_server;
SET character_set_server= @test_character_set;
SET collation_server= @test_collation;
CREATE DATABASE d1;
USE d1;
CREATE TABLE t1 (c CHAR(10), KEY(c));
SHOW FULL COLUMNS FROM t1;
Field Type Collation Null Key Default Extra Privileges Comment
c char(10) big5_chinese_ci YES MUL NULL select,insert,update,references
INSERT INTO t1 VALUES ('aaa'),('aaaa'),('aaaaa');
SELECT * FROM t1 WHERE c LIKE 'aaa%';
c
SELECT c as want3results FROM t1 WHERE c LIKE 'aaa%';
want3results
aaa
aaaa
aaaaa
DROP TABLE t1;
CREATE TABLE t1 (c1 varchar(15), KEY c1 (c1(2)));
SHOW FULL COLUMNS FROM t1;
Field Type Collation Null Key Default Extra Privileges Comment
c1 varchar(15) big5_chinese_ci YES MUL NULL select,insert,update,references
INSERT INTO t1 VALUES ('location'),('loberge'),('lotre'),('boabab');
SELECT c1 as want3results from t1 where c1 like 'l%';
want3results
location
loberge
lotre
SELECT c1 as want3results from t1 where c1 like 'lo%';
want3results
location
loberge
lotre
SELECT c1 as want1result from t1 where c1 like 'loc%';
want1result
location
SELECT c1 as want1result from t1 where c1 like 'loca%';
want1result
location
SELECT c1 as want1result from t1 where c1 like 'locat%';
want1result
location
SELECT c1 as want1result from t1 where c1 like 'locati%';
want1result
location
SELECT c1 as want1result from t1 where c1 like 'locatio%';
want1result
location
SELECT c1 as want1result from t1 where c1 like 'location%';
want1result
location
DROP TABLE t1;
DROP DATABASE d1;
USE test;
SET character_set_server= @safe_character_set_server;
SET collation_server= @safe_collation_server;

View file

@ -2315,3 +2315,60 @@ HEX(CONVERT(col1 USING ucs2))
064A06A9062F064A06AF0631
064A06A9064A
DROP TABLE t1;
SET @test_character_set= 'utf8';
SET @test_collation= 'utf8_swedish_ci';
SET @safe_character_set_server= @@character_set_server;
SET @safe_collation_server= @@collation_server;
SET character_set_server= @test_character_set;
SET collation_server= @test_collation;
CREATE DATABASE d1;
USE d1;
CREATE TABLE t1 (c CHAR(10), KEY(c));
SHOW FULL COLUMNS FROM t1;
Field Type Collation Null Key Default Extra Privileges Comment
c char(10) utf8_swedish_ci YES MUL NULL select,insert,update,references
INSERT INTO t1 VALUES ('aaa'),('aaaa'),('aaaaa');
SELECT c as want3results FROM t1 WHERE c LIKE 'aaa%';
want3results
aaa
aaaa
aaaaa
DROP TABLE t1;
CREATE TABLE t1 (c1 varchar(15), KEY c1 (c1(2)));
SHOW FULL COLUMNS FROM t1;
Field Type Collation Null Key Default Extra Privileges Comment
c1 varchar(15) utf8_swedish_ci YES MUL NULL select,insert,update,references
INSERT INTO t1 VALUES ('location'),('loberge'),('lotre'),('boabab');
SELECT c1 as want3results from t1 where c1 like 'l%';
want3results
location
loberge
lotre
SELECT c1 as want3results from t1 where c1 like 'lo%';
want3results
location
loberge
lotre
SELECT c1 as want1result from t1 where c1 like 'loc%';
want1result
location
SELECT c1 as want1result from t1 where c1 like 'loca%';
want1result
location
SELECT c1 as want1result from t1 where c1 like 'locat%';
want1result
location
SELECT c1 as want1result from t1 where c1 like 'locati%';
want1result
location
SELECT c1 as want1result from t1 where c1 like 'locatio%';
want1result
location
SELECT c1 as want1result from t1 where c1 like 'location%';
want1result
location
DROP TABLE t1;
DROP DATABASE d1;
USE test;
SET character_set_server= @safe_character_set_server;
SET collation_server= @safe_collation_server;

View file

@ -639,8 +639,22 @@ create table t1 (a char(10));
insert into t1 values ('a'),('b'),('c');
select coercibility(max(a)) from t1;
coercibility(max(a))
3
2
drop table t1;
create table t1 (a char character set latin2);
insert into t1 values ('a'),('b');
select charset(max(a)), coercibility(max(a)),
charset(min(a)), coercibility(min(a)) from t1;
charset(max(a)) coercibility(max(a)) charset(min(a)) coercibility(min(a))
latin2 2 latin2 2
create table t2 select max(a),min(a) from t1;
show create table t2;
Table Create Table
t2 CREATE TABLE `t2` (
`max(a)` char(1) character set latin2 default NULL,
`min(a)` char(1) character set latin2 default NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1
drop table t2,t1;
create table t1 (a int);
insert into t1 values (1);
select max(a) as b from t1 having b=1;

View file

@ -474,6 +474,12 @@ unix_timestamp(@a)
select unix_timestamp('1969-12-01 19:00:01');
unix_timestamp('1969-12-01 19:00:01')
0
select from_unixtime(0);
from_unixtime(0)
NULL
select from_unixtime(2145916800);
from_unixtime(2145916800)
NULL
CREATE TABLE t1 (datetime datetime, timestamp timestamp, date date, time time);
INSERT INTO t1 values ("2001-01-02 03:04:05", "2002-01-02 03:04:05", "2003-01-02", "06:07:08");
SELECT * from t1;

View file

@ -1,16 +1,16 @@
grant all privileges on test.* to mysqltest_1@localhost;
select * from information_schema.SCHEMATA where schema_name > 'm';
CATALOG_NAME SCHEMA_NAME DEFAULT_CHARACTER_SET_NAME
NULL mysql latin1
NULL test latin1
CATALOG_NAME SCHEMA_NAME DEFAULT_CHARACTER_SET_NAME SQL_PATH
NULL mysql latin1 NULL
NULL test latin1 NULL
select schema_name from information_schema.schemata;
schema_name
mysql
test
show databases *;
CATALOG_NAME SCHEMA_NAME DEFAULT_CHARACTER_SET_NAME
NULL mysql latin1
NULL test latin1
CATALOG_NAME SCHEMA_NAME DEFAULT_CHARACTER_SET_NAME SQL_PATH
NULL mysql latin1 NULL
NULL test latin1 NULL
show databases like 't%';
Database (t%)
test
@ -19,10 +19,10 @@ Database
mysql
test
show databases * where schema_name like 't%';
CATALOG_NAME SCHEMA_NAME DEFAULT_CHARACTER_SET_NAME
NULL test latin1
CATALOG_NAME SCHEMA_NAME DEFAULT_CHARACTER_SET_NAME SQL_PATH
NULL test latin1 NULL
show databases * where schema_name = 't%';
CATALOG_NAME SCHEMA_NAME DEFAULT_CHARACTER_SET_NAME
CATALOG_NAME SCHEMA_NAME DEFAULT_CHARACTER_SET_NAME SQL_PATH
create database testtets;
create table testtets.t1(a int, b VARCHAR(30), KEY string_data (b));
create table test.t2(a int);
@ -119,24 +119,24 @@ Field Type Collation Null Key Default Extra Privileges Comment
c char(64) utf8_general_ci select,insert,update,references
select * from information_schema.COLUMNS where table_name="t1"
and column_name= "a";
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION DATA_TYPE CHARACTER_MAXIMUM_LENGTH CHARACTER_OCTET_LENGTH NUMERIC_PRECISION NUMERIC_SCALE CHARACTER_SET_NAME TYPE COLLATION_NAME IS_NULLABLE KEY COLUMN_DEFAULT EXTRA PRIVILEGES COMMENT
NULL testtets t1 a 1 int 0 11 4 0 NULL int(11) NULL YES NULL select,insert,update,references
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION COLUMN_DEFAULT IS_NULLABLE DATA_TYPE CHARACTER_MAXIMUM_LENGTH CHARACTER_OCTET_LENGTH NUMERIC_PRECISION NUMERIC_SCALE CHARACTER_SET_NAME COLLATION_NAME COLUMN_TYPE COLUMN_KEY EXTRA PRIVILEGES COLUMN_COMMENT
NULL testtets t1 a 1 NULL YES int 11 11 11 0 NULL NULL int(11) select,insert,update,references
show columns * where table_name = "t1";
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION DATA_TYPE CHARACTER_MAXIMUM_LENGTH CHARACTER_OCTET_LENGTH NUMERIC_PRECISION NUMERIC_SCALE CHARACTER_SET_NAME TYPE COLLATION_NAME IS_NULLABLE KEY COLUMN_DEFAULT EXTRA PRIVILEGES COMMENT
NULL testtets t1 a 1 int 0 11 4 0 NULL int(11) NULL YES NULL select,insert,update,references
NULL testtets t1 b 2 varchar 30 30 30 31 latin1 varchar(30) latin1_swedish_ci YES MUL NULL select,insert,update,references
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION COLUMN_DEFAULT IS_NULLABLE DATA_TYPE CHARACTER_MAXIMUM_LENGTH CHARACTER_OCTET_LENGTH NUMERIC_PRECISION NUMERIC_SCALE CHARACTER_SET_NAME COLLATION_NAME COLUMN_TYPE COLUMN_KEY EXTRA PRIVILEGES COLUMN_COMMENT
NULL testtets t1 a 1 NULL YES int 11 11 11 0 NULL NULL int(11) select,insert,update,references
NULL testtets t1 b 2 NULL YES varchar 30 30 NULL NULL latin1 latin1_swedish_ci varchar(30) MUL select,insert,update,references
drop view v1;
drop tables testtets.t4, testtets.t1, t2, t3;
drop database testtets;
select * from information_schema.CHARACTER_SETS
where CHARACTER_SET_NAME like 'latin1%';
CHARACTER_SET_NAME Description DEFAULT_COLLATE_NAME Maxlen
CHARACTER_SET_NAME DESCRIPTION DEFAULT_COLLATE_NAME MAXLEN
latin1 ISO 8859-1 West European latin1_swedish_ci 1
SHOW CHARACTER SET LIKE 'latin1%';
Charset Description Default collation Maxlen
latin1 ISO 8859-1 West European latin1_swedish_ci 1
SHOW CHARACTER SET * LIKE 'latin1%';
CHARACTER_SET_NAME Description DEFAULT_COLLATE_NAME Maxlen
CHARACTER_SET_NAME DESCRIPTION DEFAULT_COLLATE_NAME MAXLEN
latin1 ISO 8859-1 West European latin1_swedish_ci 1
SHOW CHARACTER SET WHERE CHARACTER_SET_NAME like 'latin1%';
Charset Description Default collation Maxlen
@ -145,11 +145,11 @@ SHOW CHARACTER SET CHARACTER_SET_NAME WHERE CHARACTER_SET_NAME like 'latin1%';
CHARACTER_SET_NAME
latin1
SHOW CHARACTER SET * WHERE CHARACTER_SET_NAME like 'latin1%';
CHARACTER_SET_NAME Description DEFAULT_COLLATE_NAME Maxlen
CHARACTER_SET_NAME DESCRIPTION DEFAULT_COLLATE_NAME MAXLEN
latin1 ISO 8859-1 West European latin1_swedish_ci 1
select * from information_schema.COLLATIONS
where COLLATION_NAME like 'latin1%';
COLLATION_NAME Charset Id Default Compiled Sortlen
COLLATION_NAME CHARSET ID DEFAULT COMPILED SORTLEN
latin1_german1_ci latin1 5 0
latin1_swedish_ci latin1 8 Yes Yes 1
latin1_danish_ci latin1 15 0
@ -169,7 +169,7 @@ latin1_general_ci latin1 48 0
latin1_general_cs latin1 49 0
latin1_spanish_ci latin1 94 0
SHOW COLLATION * LIKE 'latin1%';
COLLATION_NAME Charset Id Default Compiled Sortlen
COLLATION_NAME CHARSET ID DEFAULT COMPILED SORTLEN
latin1_german1_ci latin1 5 0
latin1_swedish_ci latin1 8 Yes Yes 1
latin1_danish_ci latin1 15 0
@ -199,7 +199,7 @@ latin1_general_ci
latin1_general_cs
latin1_spanish_ci
SHOW COLLATION * WHERE COLLATION_NAME like 'latin1%';
COLLATION_NAME Charset Id Default Compiled Sortlen
COLLATION_NAME CHARSET ID DEFAULT COMPILED SORTLEN
latin1_german1_ci latin1 5 0
latin1_swedish_ci latin1 8 Yes Yes 1
latin1_danish_ci latin1 15 0
@ -358,11 +358,11 @@ NULL test key_1 test t1 UNIQUE NULL
NULL test key_2 test t1 UNIQUE NULL
select * from information_schema.KEY_COLUMN_USAGE where
TABLE_SCHEMA= "test";
CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION REFERENCED_TABLE_SCHEMA REFERENCED_TABLE_NAME REFERENCED_COLUMN_NAME
NULL test PRIMARY test t1 a 1 NULL NULL NULL
NULL test constraint_1 test t1 a 1 NULL NULL NULL
NULL test key_1 test t1 a 1 NULL NULL NULL
NULL test key_2 test t1 a 1 NULL NULL NULL
CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION REFERENCED_TABLE_SCHEMA REFERENCED_TABLE_NAME REFERENCED_COLUMN_NAME
NULL test PRIMARY NULL test t1 a 1 NULL NULL NULL
NULL test constraint_1 NULL test t1 a 1 NULL NULL NULL
NULL test key_1 NULL test t1 a 1 NULL NULL NULL
NULL test key_2 NULL test t1 a 1 NULL NULL NULL
drop table t1;
CREATE TABLE t1 (id INT NOT NULL, PRIMARY KEY (id)) ENGINE=INNODB;
CREATE TABLE t2 (id INT PRIMARY KEY, t1_id INT, INDEX par_ind (t1_id),
@ -377,11 +377,11 @@ NULL test t2_ibfk_1 test t2 FOREIGN KEY ON DELETE CASCADE
NULL test t2_ibfk_2 test t2 FOREIGN KEY ON UPDATE CASCADE
select * from information_schema.KEY_COLUMN_USAGE where
TABLE_SCHEMA= "test";
CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION REFERENCED_TABLE_SCHEMA REFERENCED_TABLE_NAME REFERENCED_COLUMN_NAME
NULL test PRIMARY test t1 id 1 NULL NULL NULL
NULL test PRIMARY test t2 id 1 NULL NULL NULL
NULL test t2_ibfk_1 test t2 t1_id 1 test t1 id
NULL test t2_ibfk_2 test t2 t1_id 1 test t1 id
CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION REFERENCED_TABLE_SCHEMA REFERENCED_TABLE_NAME REFERENCED_COLUMN_NAME
NULL test PRIMARY NULL test t1 id 1 NULL NULL NULL
NULL test PRIMARY NULL test t2 id 1 NULL NULL NULL
NULL test t2_ibfk_1 NULL test t2 t1_id 1 NULL id
NULL test t2_ibfk_2 NULL test t2 t1_id 1 NULL id
select table_name from information_schema.TABLES where table_schema like "test%";
table_name
t1
@ -439,6 +439,9 @@ v
call px5()//
v
9
select sql_mode from information_schema.ROUTINES;
sql_mode
create table t1 (a int not null auto_increment,b int, primary key (a));
insert into t1 values (1,1),(NULL,3),(NULL,4);
select AUTO_INCREMENT from information_schema.tables where table_name = 't1';
@ -457,32 +460,51 @@ SHOW CREATE TABLE INFORMATION_SCHEMA.CHARACTER_SETS;
Table Create Table
CHARACTER_SETS CREATE TEMPORARY TABLE `CHARACTER_SETS` (
`CHARACTER_SET_NAME` char(30) NOT NULL default '',
`Description` char(60) NOT NULL default '',
`DESCRIPTION` char(60) NOT NULL default '',
`DEFAULT_COLLATE_NAME` char(60) NOT NULL default '',
`Maxlen` bigint(3) NOT NULL default '0'
`MAXLEN` bigint(3) NOT NULL default '0'
) ENGINE=HEAP DEFAULT CHARSET=utf8 MAX_ROWS=2282
set names latin2;
SHOW CREATE TABLE INFORMATION_SCHEMA.CHARACTER_SETS;
Table Create Table
CHARACTER_SETS CREATE TEMPORARY TABLE `CHARACTER_SETS` (
`CHARACTER_SET_NAME` char(30) NOT NULL default '',
`Description` char(60) NOT NULL default '',
`DESCRIPTION` char(60) NOT NULL default '',
`DEFAULT_COLLATE_NAME` char(60) NOT NULL default '',
`Maxlen` bigint(3) NOT NULL default '0'
`MAXLEN` bigint(3) NOT NULL default '0'
) ENGINE=HEAP DEFAULT CHARSET=utf8 MAX_ROWS=2282
set names latin1;
create table t1 select * from information_schema.CHARACTER_SETS
where CHARACTER_SET_NAME like "latin1";
select * from t1;
CHARACTER_SET_NAME Description DEFAULT_COLLATE_NAME Maxlen
CHARACTER_SET_NAME DESCRIPTION DEFAULT_COLLATE_NAME MAXLEN
latin1 ISO 8859-1 West European latin1_swedish_ci 1
alter table t1 default character set utf8;
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
`CHARACTER_SET_NAME` char(30) NOT NULL default '',
`Description` char(60) NOT NULL default '',
`DESCRIPTION` char(60) NOT NULL default '',
`DEFAULT_COLLATE_NAME` char(60) NOT NULL default '',
`Maxlen` bigint(3) NOT NULL default '0'
`MAXLEN` bigint(3) NOT NULL default '0'
) ENGINE=MyISAM DEFAULT CHARSET=utf8
drop table t1;
create view v1 as select * from information_schema.TABLES;
drop view v1;
create table t1(a NUMERIC(5,3), b NUMERIC(5,1), c float(5,2),
d NUMERIC(6,4), e float, f DECIMAL(6,3), g int(11), h DOUBLE(10,3),
i DOUBLE);
select COLUMN_NAME,COLUMN_TYPE, CHARACTER_MAXIMUM_LENGTH,
CHARACTER_OCTET_LENGTH, NUMERIC_PRECISION, NUMERIC_SCALE
from information_schema.columns where table_name= 't1';
COLUMN_NAME COLUMN_TYPE CHARACTER_MAXIMUM_LENGTH CHARACTER_OCTET_LENGTH NUMERIC_PRECISION NUMERIC_SCALE
a decimal(5,3) 7 7 5 3
b decimal(5,1) 7 7 5 1
c float(5,2) 5 5 5 2
d decimal(6,4) 8 8 6 4
e float 12 12 12 NULL
f decimal(6,3) 8 8 6 3
g int(11) 11 11 11 0
h double(10,3) 10 10 10 3
i double 22 22 22 NULL
drop table t1;

View file

@ -529,6 +529,7 @@ show keys from t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment
t1 1 a 1 a A NULL NULL NULL YES BTREE disabled
create table t2 (a int);
set @@rand_seed1=31415926,@@rand_seed2=2718281828;
insert t1 select * from t2;
show keys from t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment

View file

@ -78,9 +78,9 @@ unique key(a)
) engine=ndb;
insert into t1 values(1, 'aAa');
insert into t1 values(2, 'aaa');
ERROR 23000: Can't write, because of unique constraint, to table 't1'
ERROR 23000: Duplicate entry '2' for key 1
insert into t1 values(3, 'AAA');
ERROR 23000: Can't write, because of unique constraint, to table 't1'
ERROR 23000: Duplicate entry '3' for key 1
select * from t1 order by p;
p a
1 aAa

View file

@ -0,0 +1,416 @@
drop table if exists t1;
SET NAMES binary;
use mysql;
alter table columns_priv engine=ndb;
alter table db engine=ndb;
alter table func engine=ndb;
alter table help_category engine=ndb;
alter table help_keyword engine=ndb;
alter table help_relation engine=ndb;
alter table help_topic engine=ndb;
alter table host engine=ndb;
alter table tables_priv engine=ndb;
alter table time_zone engine=ndb;
alter table time_zone_leap_second engine=ndb;
alter table time_zone_name engine=ndb;
alter table time_zone_transition engine=ndb;
alter table time_zone_transition_type engine=ndb;
alter table user engine=ndb;
use test;
delete from mysql.user where user='mysqltest_1';
delete from mysql.db where user='mysqltest_1';
flush privileges;
begin;
grant select on mysqltest.* to mysqltest_1@localhost require cipher "EDH-RSA-DES-CBC3-SHA";
commit;
show grants for mysqltest_1@localhost;
Grants for mysqltest_1@localhost
GRANT USAGE ON *.* TO 'mysqltest_1'@'localhost' REQUIRE CIPHER 'EDH-RSA-DES-CBC3-SHA'
GRANT SELECT ON `mysqltest`.* TO 'mysqltest_1'@'localhost'
begin;
grant delete on mysqltest.* to mysqltest_1@localhost;
commit;
select * from mysql.user where user="mysqltest_1";
Host User Password Select_priv Insert_priv Update_priv Delete_priv Create_priv Drop_priv Reload_priv Shutdown_priv Process_priv File_priv Grant_priv References_priv Index_priv Alter_priv Show_db_priv Super_priv Create_tmp_table_priv Lock_tables_priv Execute_priv Repl_slave_priv Repl_client_priv ssl_type ssl_cipher x509_issuer x509_subject max_questions max_updates max_connections
localhost mysqltest_1 N N N N N N N N N N N N N N N N N N N N N SPECIFIED EDH-RSA-DES-CBC3-SHA 0 0 0
show grants for mysqltest_1@localhost;
Grants for mysqltest_1@localhost
GRANT USAGE ON *.* TO 'mysqltest_1'@'localhost' REQUIRE CIPHER 'EDH-RSA-DES-CBC3-SHA'
GRANT SELECT, DELETE ON `mysqltest`.* TO 'mysqltest_1'@'localhost'
begin;
revoke delete on mysqltest.* from mysqltest_1@localhost;
commit;
show grants for mysqltest_1@localhost;
Grants for mysqltest_1@localhost
GRANT USAGE ON *.* TO 'mysqltest_1'@'localhost' REQUIRE CIPHER 'EDH-RSA-DES-CBC3-SHA'
GRANT SELECT ON `mysqltest`.* TO 'mysqltest_1'@'localhost'
begin;
grant select on mysqltest.* to mysqltest_1@localhost require NONE;
commit;
show grants for mysqltest_1@localhost;
Grants for mysqltest_1@localhost
GRANT USAGE ON *.* TO 'mysqltest_1'@'localhost'
GRANT SELECT ON `mysqltest`.* TO 'mysqltest_1'@'localhost'
begin;
grant USAGE on mysqltest.* to mysqltest_1@localhost require cipher "EDH-RSA-DES-CBC3-SHA" AND SUBJECT "testsubject" ISSUER "MySQL AB";
commit;
show grants for mysqltest_1@localhost;
Grants for mysqltest_1@localhost
GRANT USAGE ON *.* TO 'mysqltest_1'@'localhost' REQUIRE ISSUER 'MySQL AB' SUBJECT 'testsubject' CIPHER 'EDH-RSA-DES-CBC3-SHA'
GRANT SELECT ON `mysqltest`.* TO 'mysqltest_1'@'localhost'
begin;
revoke all privileges on mysqltest.* from mysqltest_1@localhost;
commit;
show grants for mysqltest_1@localhost;
Grants for mysqltest_1@localhost
GRANT USAGE ON *.* TO 'mysqltest_1'@'localhost' REQUIRE ISSUER 'MySQL AB' SUBJECT 'testsubject' CIPHER 'EDH-RSA-DES-CBC3-SHA'
delete from mysql.user where user='mysqltest_1';
flush privileges;
begin;
grant CREATE TEMPORARY TABLES, LOCK TABLES on mysqltest.* to mysqltest_1@localhost;
commit;
show grants for mysqltest_1@localhost;
Grants for mysqltest_1@localhost
GRANT USAGE ON *.* TO 'mysqltest_1'@'localhost'
GRANT CREATE TEMPORARY TABLES, LOCK TABLES ON `mysqltest`.* TO 'mysqltest_1'@'localhost'
flush privileges;
show grants for mysqltest_1@localhost;
Grants for mysqltest_1@localhost
GRANT USAGE ON *.* TO 'mysqltest_1'@'localhost'
GRANT CREATE TEMPORARY TABLES, LOCK TABLES ON `mysqltest`.* TO 'mysqltest_1'@'localhost'
begin;
revoke CREATE TEMPORARY TABLES on mysqltest.* from mysqltest_1@localhost;
commit;
show grants for mysqltest_1@localhost;
Grants for mysqltest_1@localhost
GRANT USAGE ON *.* TO 'mysqltest_1'@'localhost'
GRANT LOCK TABLES ON `mysqltest`.* TO 'mysqltest_1'@'localhost'
begin;
grant ALL PRIVILEGES on mysqltest.* to mysqltest_1@localhost with GRANT OPTION;
commit;
flush privileges;
show grants for mysqltest_1@localhost;
Grants for mysqltest_1@localhost
GRANT USAGE ON *.* TO 'mysqltest_1'@'localhost'
GRANT ALL PRIVILEGES ON `mysqltest`.* TO 'mysqltest_1'@'localhost' WITH GRANT OPTION
begin;
revoke LOCK TABLES, ALTER on mysqltest.* from mysqltest_1@localhost;
commit;
show grants for mysqltest_1@localhost;
Grants for mysqltest_1@localhost
GRANT USAGE ON *.* TO 'mysqltest_1'@'localhost'
GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, REFERENCES, INDEX, CREATE TEMPORARY TABLES ON `mysqltest`.* TO 'mysqltest_1'@'localhost' WITH GRANT OPTION
begin;
revoke all privileges on mysqltest.* from mysqltest_1@localhost;
commit;
delete from mysql.user where user='mysqltest_1';
flush privileges;
begin;
grant usage on test.* to mysqltest_1@localhost with grant option;
commit;
show grants for mysqltest_1@localhost;
Grants for mysqltest_1@localhost
GRANT USAGE ON *.* TO 'mysqltest_1'@'localhost'
GRANT USAGE ON `mysqltest`.* TO 'mysqltest_1'@'localhost' WITH GRANT OPTION
GRANT USAGE ON `test`.* TO 'mysqltest_1'@'localhost' WITH GRANT OPTION
delete from mysql.user where user='mysqltest_1';
delete from mysql.db where user='mysqltest_1';
delete from mysql.tables_priv where user='mysqltest_1';
delete from mysql.columns_priv where user='mysqltest_1';
flush privileges;
show grants for mysqltest_1@localhost;
ERROR 42000: There is no such grant defined for user 'mysqltest_1' on host 'localhost'
create table t1 (a int);
begin;
GRANT select,update,insert on t1 to mysqltest_1@localhost;
GRANT select (a), update (a),insert(a), references(a) on t1 to mysqltest_1@localhost;
commit;
show grants for mysqltest_1@localhost;
Grants for mysqltest_1@localhost
GRANT USAGE ON *.* TO 'mysqltest_1'@'localhost'
GRANT SELECT, SELECT (a), INSERT, INSERT (a), UPDATE, UPDATE (a), REFERENCES (a) ON `test`.`t1` TO 'mysqltest_1'@'localhost'
select table_priv,column_priv from mysql.tables_priv where user="mysqltest_1";
table_priv column_priv
Select,Insert,Update Select,Insert,Update,References
begin;
REVOKE select (a), update on t1 from mysqltest_1@localhost;
commit;
show grants for mysqltest_1@localhost;
Grants for mysqltest_1@localhost
GRANT USAGE ON *.* TO 'mysqltest_1'@'localhost'
GRANT SELECT, INSERT, INSERT (a), REFERENCES (a) ON `test`.`t1` TO 'mysqltest_1'@'localhost'
begin;
REVOKE select,update,insert,insert (a) on t1 from mysqltest_1@localhost;
commit;
show grants for mysqltest_1@localhost;
Grants for mysqltest_1@localhost
GRANT USAGE ON *.* TO 'mysqltest_1'@'localhost'
GRANT REFERENCES (a) ON `test`.`t1` TO 'mysqltest_1'@'localhost'
begin;
GRANT select,references on t1 to mysqltest_1@localhost;
commit;
select table_priv,column_priv from mysql.tables_priv where user="mysqltest_1";
table_priv column_priv
Select,References References
begin;
grant all on test.* to mysqltest_3@localhost with grant option;
revoke all on test.* from mysqltest_3@localhost;
commit;
show grants for mysqltest_3@localhost;
Grants for mysqltest_3@localhost
GRANT USAGE ON *.* TO 'mysqltest_3'@'localhost'
GRANT USAGE ON `test`.* TO 'mysqltest_3'@'localhost' WITH GRANT OPTION
begin;
revoke grant option on test.* from mysqltest_3@localhost;
commit;
show grants for mysqltest_3@localhost;
Grants for mysqltest_3@localhost
GRANT USAGE ON *.* TO 'mysqltest_3'@'localhost'
begin;
grant all on test.t1 to mysqltest_2@localhost with grant option;
revoke all on test.t1 from mysqltest_2@localhost;
commit;
show grants for mysqltest_2@localhost;
Grants for mysqltest_2@localhost
GRANT USAGE ON *.* TO 'mysqltest_2'@'localhost'
GRANT USAGE ON `test`.`t1` TO 'mysqltest_2'@'localhost' WITH GRANT OPTION
begin;
revoke grant option on test.t1 from mysqltest_2@localhost;
commit;
show grants for mysqltest_2@localhost;
Grants for mysqltest_2@localhost
GRANT USAGE ON *.* TO 'mysqltest_2'@'localhost'
delete from mysql.user where user='mysqltest_1' or user="mysqltest_2" or user="mysqltest_3";
delete from mysql.db where user='mysqltest_1' or user="mysqltest_2" or user="mysqltest_3";
delete from mysql.tables_priv where user='mysqltest_1' or user="mysqltest_2" or user="mysqltest_3";
delete from mysql.columns_priv where user='mysqltest_1' or user="mysqltest_2" or user="mysqltest_3";
flush privileges;
drop table t1;
begin;
GRANT FILE on mysqltest.* to mysqltest_1@localhost;
ERROR HY000: Incorrect usage of DB GRANT and GLOBAL PRIVILEGES
commit;
select 1;
1
1
create database mysqltest1;
begin;
grant usage on mysqltest1.* to test6123 identified by 'magic123';
commit;
select host,db,user,select_priv,insert_priv from mysql.db where db="mysqltest1";
host db user select_priv insert_priv
delete from mysql.user where user='test6123';
drop database mysqltest1;
create table t1 (a int);
begin;
grant ALL PRIVILEGES on *.* to drop_user2@localhost with GRANT OPTION;
commit;
show grants for drop_user2@localhost;
Grants for drop_user2@localhost
GRANT ALL PRIVILEGES ON *.* TO 'drop_user2'@'localhost' WITH GRANT OPTION
begin;
revoke all privileges, grant option from drop_user2@localhost;
commit;
drop user drop_user2@localhost;
begin;
grant ALL PRIVILEGES on *.* to drop_user@localhost with GRANT OPTION;
grant ALL PRIVILEGES on test.* to drop_user@localhost with GRANT OPTION;
grant select(a) on test.t1 to drop_user@localhost;
commit;
show grants for drop_user@localhost;
Grants for drop_user@localhost
GRANT ALL PRIVILEGES ON *.* TO 'drop_user'@'localhost' WITH GRANT OPTION
GRANT ALL PRIVILEGES ON `test`.* TO 'drop_user'@'localhost' WITH GRANT OPTION
GRANT SELECT (a) ON `test`.`t1` TO 'drop_user'@'localhost'
set sql_mode=ansi_quotes;
show grants for drop_user@localhost;
Grants for drop_user@localhost
GRANT ALL PRIVILEGES ON *.* TO 'drop_user'@'localhost' WITH GRANT OPTION
GRANT ALL PRIVILEGES ON "test".* TO 'drop_user'@'localhost' WITH GRANT OPTION
GRANT SELECT (a) ON "test"."t1" TO 'drop_user'@'localhost'
set sql_mode=default;
set sql_quote_show_create=0;
show grants for drop_user@localhost;
Grants for drop_user@localhost
GRANT ALL PRIVILEGES ON *.* TO 'drop_user'@'localhost' WITH GRANT OPTION
GRANT ALL PRIVILEGES ON test.* TO 'drop_user'@'localhost' WITH GRANT OPTION
GRANT SELECT (a) ON test.t1 TO 'drop_user'@'localhost'
set sql_mode="ansi_quotes";
show grants for drop_user@localhost;
Grants for drop_user@localhost
GRANT ALL PRIVILEGES ON *.* TO 'drop_user'@'localhost' WITH GRANT OPTION
GRANT ALL PRIVILEGES ON test.* TO 'drop_user'@'localhost' WITH GRANT OPTION
GRANT SELECT (a) ON test.t1 TO 'drop_user'@'localhost'
set sql_quote_show_create=1;
show grants for drop_user@localhost;
Grants for drop_user@localhost
GRANT ALL PRIVILEGES ON *.* TO 'drop_user'@'localhost' WITH GRANT OPTION
GRANT ALL PRIVILEGES ON "test".* TO 'drop_user'@'localhost' WITH GRANT OPTION
GRANT SELECT (a) ON "test"."t1" TO 'drop_user'@'localhost'
set sql_mode="";
show grants for drop_user@localhost;
Grants for drop_user@localhost
GRANT ALL PRIVILEGES ON *.* TO 'drop_user'@'localhost' WITH GRANT OPTION
GRANT ALL PRIVILEGES ON `test`.* TO 'drop_user'@'localhost' WITH GRANT OPTION
GRANT SELECT (a) ON `test`.`t1` TO 'drop_user'@'localhost'
revoke all privileges, grant option from drop_user@localhost;
show grants for drop_user@localhost;
Grants for drop_user@localhost
GRANT USAGE ON *.* TO 'drop_user'@'localhost'
drop user drop_user@localhost;
begin;
revoke all privileges, grant option from drop_user@localhost;
ERROR HY000: Can't revoke all privileges, grant for one or more of the requested users
commit;
begin;
grant select(a) on test.t1 to drop_user1@localhost;
commit;
flush privileges;
begin;
grant select on test.t1 to drop_user2@localhost;
grant select on test.* to drop_user3@localhost;
grant select on *.* to drop_user4@localhost;
commit;
drop user drop_user1@localhost, drop_user2@localhost, drop_user3@localhost,
drop_user4@localhost;
ERROR HY000: Can't drop one or more of the requested users
begin;
revoke all privileges, grant option from drop_user1@localhost, drop_user2@localhost,
drop_user3@localhost, drop_user4@localhost;
commit;
drop user drop_user1@localhost, drop_user2@localhost, drop_user3@localhost,
drop_user4@localhost;
drop table t1;
begin;
grant usage on *.* to mysqltest_1@localhost identified by "password";
grant select, update, insert on test.* to mysqltest@localhost;
commit;
show grants for mysqltest_1@localhost;
Grants for mysqltest_1@localhost
GRANT USAGE ON *.* TO 'mysqltest_1'@'localhost' IDENTIFIED BY PASSWORD '*2470C0C06DEE42FD1618BB99005ADCA2EC9D1E19'
drop user mysqltest_1@localhost;
SET NAMES koi8r;
CREATE DATABASE ÂÄ;
USE ÂÄ;
CREATE TABLE ÔÁÂ (ËÏÌ int);
begin;
GRANT SELECT ON ÂÄ.* TO ÀÚÅÒ@localhost;
commit;
SHOW GRANTS FOR ÀÚÅÒ@localhost;
Grants for ÀÚÅÒ@localhost
GRANT USAGE ON *.* TO 'ÀÚÅÒ'@'localhost'
GRANT SELECT ON `ÂÄ`.* TO 'ÀÚÅÒ'@'localhost'
begin;
REVOKE SELECT ON ÂÄ.* FROM ÀÚÅÒ@localhost;
commit;
begin;
GRANT SELECT ON ÂÄ.ÔÁÂ TO ÀÚÅÒ@localhost;
commit;
SHOW GRANTS FOR ÀÚÅÒ@localhost;
Grants for ÀÚÅÒ@localhost
GRANT USAGE ON *.* TO 'ÀÚÅÒ'@'localhost'
GRANT SELECT ON `ÂÄ`.`ÔÁÂ` TO 'ÀÚÅÒ'@'localhost'
begin;
REVOKE SELECT ON ÂÄ.ÔÁÂ FROM ÀÚÅÒ@localhost;
commit;
begin;
GRANT SELECT (ËÏÌ) ON ÂÄ.ÔÁÂ TO ÀÚÅÒ@localhost;
commit;
SHOW GRANTS FOR ÀÚÅÒ@localhost;
Grants for ÀÚÅÒ@localhost
GRANT USAGE ON *.* TO 'ÀÚÅÒ'@'localhost'
GRANT SELECT (ËÏÌ) ON `ÂÄ`.`ÔÁÂ` TO 'ÀÚÅÒ'@'localhost'
begin;
REVOKE SELECT (ËÏÌ) ON ÂÄ.ÔÁÂ FROM ÀÚÅÒ@localhost;
commit;
DROP DATABASE ÂÄ;
SET NAMES latin1;
USE test;
CREATE TABLE t1 (a int );
CREATE TABLE t2 LIKE t1;
CREATE TABLE t3 LIKE t1;
CREATE TABLE t4 LIKE t1;
CREATE TABLE t5 LIKE t1;
CREATE TABLE t6 LIKE t1;
CREATE TABLE t7 LIKE t1;
CREATE TABLE t8 LIKE t1;
CREATE TABLE t9 LIKE t1;
CREATE TABLE t10 LIKE t1;
CREATE DATABASE testdb1;
CREATE DATABASE testdb2;
CREATE DATABASE testdb3;
CREATE DATABASE testdb4;
CREATE DATABASE testdb5;
CREATE DATABASE testdb6;
CREATE DATABASE testdb7;
CREATE DATABASE testdb8;
CREATE DATABASE testdb9;
CREATE DATABASE testdb10;
begin;
GRANT ALL ON testdb1.* TO testuser@localhost;
GRANT ALL ON testdb2.* TO testuser@localhost;
GRANT ALL ON testdb3.* TO testuser@localhost;
GRANT ALL ON testdb4.* TO testuser@localhost;
GRANT ALL ON testdb5.* TO testuser@localhost;
GRANT ALL ON testdb6.* TO testuser@localhost;
GRANT ALL ON testdb7.* TO testuser@localhost;
GRANT ALL ON testdb8.* TO testuser@localhost;
GRANT ALL ON testdb9.* TO testuser@localhost;
GRANT ALL ON testdb10.* TO testuser@localhost;
GRANT SELECT ON test.t1 TO testuser@localhost;
GRANT SELECT ON test.t2 TO testuser@localhost;
GRANT SELECT ON test.t3 TO testuser@localhost;
GRANT SELECT ON test.t4 TO testuser@localhost;
GRANT SELECT ON test.t5 TO testuser@localhost;
GRANT SELECT ON test.t6 TO testuser@localhost;
GRANT SELECT ON test.t7 TO testuser@localhost;
GRANT SELECT ON test.t8 TO testuser@localhost;
GRANT SELECT ON test.t9 TO testuser@localhost;
GRANT SELECT ON test.t10 TO testuser@localhost;
GRANT SELECT (a) ON test.t1 TO testuser@localhost;
GRANT SELECT (a) ON test.t2 TO testuser@localhost;
GRANT SELECT (a) ON test.t3 TO testuser@localhost;
GRANT SELECT (a) ON test.t4 TO testuser@localhost;
GRANT SELECT (a) ON test.t5 TO testuser@localhost;
GRANT SELECT (a) ON test.t6 TO testuser@localhost;
GRANT SELECT (a) ON test.t7 TO testuser@localhost;
GRANT SELECT (a) ON test.t8 TO testuser@localhost;
GRANT SELECT (a) ON test.t9 TO testuser@localhost;
GRANT SELECT (a) ON test.t10 TO testuser@localhost;
commit;
begin;
REVOKE ALL PRIVILEGES, GRANT OPTION FROM testuser@localhost;
commit;
SHOW GRANTS FOR testuser@localhost;
Grants for testuser@localhost
GRANT USAGE ON *.* TO 'testuser'@'localhost'
DROP USER testuser@localhost;
DROP TABLE t1,t2,t3,t4,t5,t6,t7,t8,t9,t10;
DROP DATABASE testdb1;
DROP DATABASE testdb2;
DROP DATABASE testdb3;
DROP DATABASE testdb4;
DROP DATABASE testdb5;
DROP DATABASE testdb6;
DROP DATABASE testdb7;
DROP DATABASE testdb8;
DROP DATABASE testdb9;
DROP DATABASE testdb10;
use mysql;
alter table columns_priv engine=myisam;
alter table db engine=myisam;
alter table func engine=myisam;
alter table help_category engine=myisam;
alter table help_keyword engine=myisam;
alter table help_relation engine=myisam;
alter table help_topic engine=myisam;
alter table host engine=myisam;
alter table tables_priv engine=myisam;
alter table time_zone engine=myisam;
alter table time_zone_leap_second engine=myisam;
alter table time_zone_name engine=myisam;
alter table time_zone_transition engine=myisam;
alter table time_zone_transition_type engine=myisam;
alter table user engine=myisam;
use test;
flush privileges;

View file

@ -22,7 +22,7 @@ select * from t1 where b = 4 order by a;
a b c
3 4 6
insert into t1 values(8, 2, 3);
ERROR 23000: Can't write, because of unique constraint, to table 't1'
ERROR 23000: Duplicate entry '8' for key 1
select * from t1 order by a;
a b c
1 2 3
@ -65,7 +65,7 @@ select * from t2 where b = 4 order by a;
a b c
3 4 6
insert into t2 values(8, 2, 3);
ERROR 23000: Can't write, because of unique constraint, to table 't2'
ERROR 23000: Duplicate entry '8' for key 1
select * from t2 order by a;
a b c
1 2 3
@ -123,7 +123,7 @@ pk a
3 NULL
4 4
insert into t1 values (5,0);
ERROR 23000: Can't write, because of unique constraint, to table 't1'
ERROR 23000: Duplicate entry '5' for key 1
select * from t1 order by pk;
pk a
-1 NULL
@ -156,7 +156,7 @@ pk a b c
0 NULL 18 NULL
1 3 19 abc
insert into t2 values(2,3,19,'abc');
ERROR 23000: Can't write, because of unique constraint, to table 't2'
ERROR 23000: Duplicate entry '2' for key 1
select * from t2 order by pk;
pk a b c
-1 1 17 NULL

View file

@ -535,27 +535,46 @@ count(*)
2000
insert into t1 select * from t1 where b < 10 order by pk1;
ERROR 23000: Duplicate entry '9' for key 1
DELETE FROM t1 WHERE pk1=2;
begin;
INSERT IGNORE INTO t1 VALUES(1,2,3);
ERROR HY000: Table storage engine for 't1' doesn't have this option
commit;
select * from t1 where pk1=1;
INSERT IGNORE INTO t1 VALUES(1,2,3),(2,3,4);
select * from t1 where pk1 < 3 order by pk1;
pk1 b c
0 0 0
1 1 1
INSERT IGNORE INTO t1 VALUES(1,2,3);
ERROR HY000: Table storage engine for 't1' doesn't have this option
select * from t1 where pk1=1;
2 3 4
rollback;
INSERT IGNORE INTO t1 VALUES(1,2,3),(2,3,4);
select * from t1 where pk1 < 3 order by pk1;
pk1 b c
0 0 0
1 1 1
REPLACE INTO t1 values(1, 2, 3);
2 3 4
REPLACE INTO t1 values(1, 78, 3);
select * from t1 where pk1=1;
pk1 b c
1 2 3
INSERT INTO t1 VALUES(1,1,1) ON DUPLICATE KEY UPDATE b=79;
ERROR HY000: Table storage engine for 't1' doesn't have this option
select * from t1 where pk1=1;
1 78 3
INSERT INTO t1 VALUES(1,1,1),(3,4,5) ON DUPLICATE KEY UPDATE b=79;
select * from t1 where pk1 < 4 order by pk1;
pk1 b c
1 2 3
0 0 0
1 79 3
2 3 4
3 79 3
INSERT INTO t1 VALUES(1,1,1),(3,4,5) ON DUPLICATE KEY UPDATE b=pk1+c;
select * from t1 where pk1 < 4 order by pk1;
pk1 b c
0 0 0
1 4 3
2 3 4
3 6 3
DELETE FROM t1 WHERE pk1 = 2 OR pk1 = 4 OR pk1 = 6;
INSERT INTO t1 VALUES(1,1,1),(2,2,17),(3,4,5) ON DUPLICATE KEY UPDATE pk1=b;
select * from t1 where pk1 = b and b != c order by pk1;
pk1 b c
2 2 17
4 4 3
6 6 3
DROP TABLE t1;
CREATE TABLE t1(a INT) ENGINE=ndb;
INSERT IGNORE INTO t1 VALUES (1);

View file

@ -37,11 +37,10 @@ a b
32 6
drop table t1;
set @@session.auto_increment_increment=100, @@session.auto_increment_offset=10;
show variables like "%auto%";
show variables like "%auto_inc%";
Variable_name Value
auto_increment_increment 100
auto_increment_offset 10
innodb_autoextend_increment 8
create table t1 (a int not null auto_increment, primary key (a)) engine=myisam;
insert into t1 values (NULL),(5),(NULL);
insert into t1 values (250),(NULL);

View file

@ -97,13 +97,15 @@ select * from t1 where a is null or b is null;
a b
drop table t1;
create table t1 (t datetime);
insert into t1 values (20030102030460),(20030102036301),(20030102240401),(20030132030401),(20031302030460);
insert into t1 values (20030102030460),(20030102036301),(20030102240401),
(20030132030401),(20031302030401),(100001202030401);
Warnings:
Warning 1265 Data truncated for column 't' at row 1
Warning 1265 Data truncated for column 't' at row 2
Warning 1265 Data truncated for column 't' at row 3
Warning 1265 Data truncated for column 't' at row 4
Warning 1265 Data truncated for column 't' at row 5
Warning 1265 Data truncated for column 't' at row 6
select * from t1;
t
0000-00-00 00:00:00
@ -111,14 +113,18 @@ t
0000-00-00 00:00:00
0000-00-00 00:00:00
0000-00-00 00:00:00
0000-00-00 00:00:00
delete from t1;
insert into t1 values ("20030102030460"),("20030102036301"),("20030102240401"),("20030132030401"),("20031302030460");
insert into t1 values
("2003-01-02 03:04:60"),("2003-01-02 03:63:01"),("2003-01-02 24:04:01"),
("2003-01-32 03:04:01"),("2003-13-02 03:04:01"), ("10000-12-02 03:04:00");
Warnings:
Warning 1264 Out of range value adjusted for column 't' at row 1
Warning 1264 Out of range value adjusted for column 't' at row 2
Warning 1264 Out of range value adjusted for column 't' at row 3
Warning 1264 Out of range value adjusted for column 't' at row 4
Warning 1264 Out of range value adjusted for column 't' at row 5
Warning 1264 Out of range value adjusted for column 't' at row 6
select * from t1;
t
0000-00-00 00:00:00
@ -126,6 +132,7 @@ t
0000-00-00 00:00:00
0000-00-00 00:00:00
0000-00-00 00:00:00
0000-00-00 00:00:00
delete from t1;
insert into t1 values ("0000-00-00 00:00:00 some trailer"),("2003-01-01 00:00:00 some trailer");
Warnings:

View file

@ -1110,4 +1110,14 @@ t1 CREATE TABLE `t1` (
`a` char(1) character set latin1 collate latin1_german1_ci default NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1
drop table t1;
create table t1 as
(select a from t2) union
(select b from t2) union
(select 'c' collate latin1_german1_ci from t2);
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` char(1) character set latin1 collate latin1_german1_ci default NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1
drop table t1;
drop table t2;

View file

@ -1646,3 +1646,18 @@ insert into v4 values (30);
ERROR HY000: You can't specify target table 'v4' for update in FROM clause
drop view v4, v3, v2, v1;
drop table t1;
create table t1 (a int);
create view v1 as select * from t1;
check table t1,v1;
Table Op Msg_type Msg_text
test.t1 check status OK
test.v1 check status OK
check table v1,t1;
Table Op Msg_type Msg_text
test.v1 check status OK
test.t1 check status OK
drop table t1;
check table v1;
Table Op Msg_type Msg_text
test.v1 check error View 'test.v1' references invalid table(s) or column(s)
drop view v1;

View file

@ -7,12 +7,6 @@
drop table if exists t1;
--enable_warnings
SET NAMES big5;
#
# Bug 1883: LIKE did not work in some cases with a key.
#
CREATE TABLE t1 (c CHAR(10) CHARACTER SET big5, KEY(c));
INSERT INTO t1 VALUES ('aaa'),('aaaa'),('aaaaa');
SELECT * FROM t1 WHERE c LIKE 'aaa%';
DROP TABLE t1;
SET @test_character_set= 'big5';
SET @test_collation= 'big5_chinese_ci';
-- source include/ctype_common.inc

View file

@ -435,3 +435,7 @@ INSERT INTO t1 VALUES (CONVERT(_ucs2 0x06280648062F0646062F USING utf8));
INSERT INTO t1 VALUES (CONVERT(_ucs2 0x06450647064506270646 USING utf8));
SELECT HEX(CONVERT(col1 USING ucs2)) FROM t1 ORDER BY col1 COLLATE utf8_persian_ci, col1 COLLATE utf8_bin;
DROP TABLE t1;
SET @test_character_set= 'utf8';
SET @test_collation= 'utf8_swedish_ci';
-- source include/ctype_common.inc

View file

@ -384,6 +384,17 @@ insert into t1 values ('a'),('b'),('c');
select coercibility(max(a)) from t1;
drop table t1;
#
# Bug #6658 MAX(column) returns incorrect coercibility
#
create table t1 (a char character set latin2);
insert into t1 values ('a'),('b');
select charset(max(a)), coercibility(max(a)),
charset(min(a)), coercibility(min(a)) from t1;
create table t2 select max(a),min(a) from t1;
show create table t2;
drop table t2,t1;
#
# aggregate functions on static tables
#

View file

@ -229,6 +229,13 @@ select @a:=FROM_UNIXTIME(1);
select unix_timestamp(@a);
select unix_timestamp('1969-12-01 19:00:01');
#
# Test for bug #6439 "unix_timestamp() function returns wrong datetime
# values for too big argument". It should return error instead.
#
select from_unixtime(0);
select from_unixtime(2145916800);
#
# Test types from + INTERVAL
#

View file

@ -217,6 +217,7 @@ end;//
call px5()//
call px5()//
delimiter ;//
select sql_mode from information_schema.ROUTINES;
create table t1 (a int not null auto_increment,b int, primary key (a));
insert into t1 values (1,1),(NULL,3),(NULL,4);
@ -240,5 +241,14 @@ where CHARACTER_SET_NAME like "latin1";
select * from t1;
alter table t1 default character set utf8;
show create table t1;
drop table t1;
drop table t1;
create view v1 as select * from information_schema.TABLES;
drop view v1;
create table t1(a NUMERIC(5,3), b NUMERIC(5,1), c float(5,2),
d NUMERIC(6,4), e float, f DECIMAL(6,3), g int(11), h DOUBLE(10,3),
i DOUBLE);
select COLUMN_NAME,COLUMN_TYPE, CHARACTER_MAXIMUM_LENGTH,
CHARACTER_OCTET_LENGTH, NUMERIC_PRECISION, NUMERIC_SCALE
from information_schema.columns where table_name= 't1';
drop table t1;

View file

@ -498,11 +498,12 @@ alter table t1 disable keys;
show keys from t1;
create table t2 (a int);
let $i=1000;
set @@rand_seed1=31415926,@@rand_seed2=2718281828;
--disable_query_log
while ($i)
{
dec $i;
eval insert t2 values (rand()*100000);
insert t2 values (rand()*100000);
}
--enable_query_log
insert t1 select * from t2;

View file

@ -86,9 +86,9 @@ create table t1 (
# ok
insert into t1 values(1, 'aAa');
# fail
--error 1169
--error 1062
insert into t1 values(2, 'aaa');
--error 1169
--error 1062
insert into t1 values(3, 'AAA');
# 1
select * from t1 order by p;

374
mysql-test/t/ndb_grant.test Normal file
View file

@ -0,0 +1,374 @@
-- source include/have_ndb.inc
# Test of GRANT commands
# Cleanup
--disable_warnings
drop table if exists t1;
--enable_warnings
SET NAMES binary;
#
# Alter mysql system tables to ndb
# make sure you alter all back in the end
#
use mysql;
alter table columns_priv engine=ndb;
alter table db engine=ndb;
alter table func engine=ndb;
alter table help_category engine=ndb;
alter table help_keyword engine=ndb;
alter table help_relation engine=ndb;
alter table help_topic engine=ndb;
alter table host engine=ndb;
alter table tables_priv engine=ndb;
alter table time_zone engine=ndb;
alter table time_zone_leap_second engine=ndb;
alter table time_zone_name engine=ndb;
alter table time_zone_transition engine=ndb;
alter table time_zone_transition_type engine=ndb;
alter table user engine=ndb;
use test;
#
# Test that SSL options works properly
#
delete from mysql.user where user='mysqltest_1';
delete from mysql.db where user='mysqltest_1';
flush privileges;
begin;
grant select on mysqltest.* to mysqltest_1@localhost require cipher "EDH-RSA-DES-CBC3-SHA";
commit;
show grants for mysqltest_1@localhost;
begin;
grant delete on mysqltest.* to mysqltest_1@localhost;
commit;
select * from mysql.user where user="mysqltest_1";
show grants for mysqltest_1@localhost;
begin;
revoke delete on mysqltest.* from mysqltest_1@localhost;
commit;
show grants for mysqltest_1@localhost;
begin;
grant select on mysqltest.* to mysqltest_1@localhost require NONE;
commit;
show grants for mysqltest_1@localhost;
begin;
grant USAGE on mysqltest.* to mysqltest_1@localhost require cipher "EDH-RSA-DES-CBC3-SHA" AND SUBJECT "testsubject" ISSUER "MySQL AB";
commit;
show grants for mysqltest_1@localhost;
begin;
revoke all privileges on mysqltest.* from mysqltest_1@localhost;
commit;
show grants for mysqltest_1@localhost;
delete from mysql.user where user='mysqltest_1';
flush privileges;
#
# Test that the new db privileges are stored/retrieved correctly
#
begin;
grant CREATE TEMPORARY TABLES, LOCK TABLES on mysqltest.* to mysqltest_1@localhost;
commit;
show grants for mysqltest_1@localhost;
flush privileges;
show grants for mysqltest_1@localhost;
begin;
revoke CREATE TEMPORARY TABLES on mysqltest.* from mysqltest_1@localhost;
commit;
show grants for mysqltest_1@localhost;
begin;
grant ALL PRIVILEGES on mysqltest.* to mysqltest_1@localhost with GRANT OPTION;
commit;
flush privileges;
show grants for mysqltest_1@localhost;
begin;
revoke LOCK TABLES, ALTER on mysqltest.* from mysqltest_1@localhost;
commit;
show grants for mysqltest_1@localhost;
begin;
revoke all privileges on mysqltest.* from mysqltest_1@localhost;
commit;
delete from mysql.user where user='mysqltest_1';
flush privileges;
begin;
grant usage on test.* to mysqltest_1@localhost with grant option;
commit;
show grants for mysqltest_1@localhost;
delete from mysql.user where user='mysqltest_1';
delete from mysql.db where user='mysqltest_1';
delete from mysql.tables_priv where user='mysqltest_1';
delete from mysql.columns_priv where user='mysqltest_1';
flush privileges;
--error 1141
show grants for mysqltest_1@localhost;
#
# Test what happens when you have same table and colum level grants
#
create table t1 (a int);
begin;
GRANT select,update,insert on t1 to mysqltest_1@localhost;
GRANT select (a), update (a),insert(a), references(a) on t1 to mysqltest_1@localhost;
commit;
show grants for mysqltest_1@localhost;
select table_priv,column_priv from mysql.tables_priv where user="mysqltest_1";
begin;
REVOKE select (a), update on t1 from mysqltest_1@localhost;
commit;
show grants for mysqltest_1@localhost;
begin;
REVOKE select,update,insert,insert (a) on t1 from mysqltest_1@localhost;
commit;
show grants for mysqltest_1@localhost;
begin;
GRANT select,references on t1 to mysqltest_1@localhost;
commit;
select table_priv,column_priv from mysql.tables_priv where user="mysqltest_1";
begin;
grant all on test.* to mysqltest_3@localhost with grant option;
revoke all on test.* from mysqltest_3@localhost;
commit;
show grants for mysqltest_3@localhost;
begin;
revoke grant option on test.* from mysqltest_3@localhost;
commit;
show grants for mysqltest_3@localhost;
begin;
grant all on test.t1 to mysqltest_2@localhost with grant option;
revoke all on test.t1 from mysqltest_2@localhost;
commit;
show grants for mysqltest_2@localhost;
begin;
revoke grant option on test.t1 from mysqltest_2@localhost;
commit;
show grants for mysqltest_2@localhost;
delete from mysql.user where user='mysqltest_1' or user="mysqltest_2" or user="mysqltest_3";
delete from mysql.db where user='mysqltest_1' or user="mysqltest_2" or user="mysqltest_3";
delete from mysql.tables_priv where user='mysqltest_1' or user="mysqltest_2" or user="mysqltest_3";
delete from mysql.columns_priv where user='mysqltest_1' or user="mysqltest_2" or user="mysqltest_3";
flush privileges;
drop table t1;
#
# Test some error conditions
#
begin;
--error 1221
GRANT FILE on mysqltest.* to mysqltest_1@localhost;
commit;
select 1; -- To test that the previous command didn't cause problems
#
# Bug#6123: GRANT USAGE inserts useless Db row
#
create database mysqltest1;
begin;
grant usage on mysqltest1.* to test6123 identified by 'magic123';
commit;
select host,db,user,select_priv,insert_priv from mysql.db where db="mysqltest1";
delete from mysql.user where user='test6123';
drop database mysqltest1;
#
# Test for 'drop user', 'revoke privileges, grant'
#
create table t1 (a int);
begin;
grant ALL PRIVILEGES on *.* to drop_user2@localhost with GRANT OPTION;
commit;
show grants for drop_user2@localhost;
begin;
revoke all privileges, grant option from drop_user2@localhost;
commit;
drop user drop_user2@localhost;
begin;
grant ALL PRIVILEGES on *.* to drop_user@localhost with GRANT OPTION;
grant ALL PRIVILEGES on test.* to drop_user@localhost with GRANT OPTION;
grant select(a) on test.t1 to drop_user@localhost;
commit;
show grants for drop_user@localhost;
#
# Bug3086
#
set sql_mode=ansi_quotes;
show grants for drop_user@localhost;
set sql_mode=default;
set sql_quote_show_create=0;
show grants for drop_user@localhost;
set sql_mode="ansi_quotes";
show grants for drop_user@localhost;
set sql_quote_show_create=1;
show grants for drop_user@localhost;
set sql_mode="";
show grants for drop_user@localhost;
revoke all privileges, grant option from drop_user@localhost;
show grants for drop_user@localhost;
drop user drop_user@localhost;
begin;
--error 1269
revoke all privileges, grant option from drop_user@localhost;
commit;
begin;
grant select(a) on test.t1 to drop_user1@localhost;
commit;
flush privileges;
begin;
grant select on test.t1 to drop_user2@localhost;
grant select on test.* to drop_user3@localhost;
grant select on *.* to drop_user4@localhost;
commit;
--error 1268
drop user drop_user1@localhost, drop_user2@localhost, drop_user3@localhost,
drop_user4@localhost;
begin;
revoke all privileges, grant option from drop_user1@localhost, drop_user2@localhost,
drop_user3@localhost, drop_user4@localhost;
commit;
drop user drop_user1@localhost, drop_user2@localhost, drop_user3@localhost,
drop_user4@localhost;
drop table t1;
begin;
grant usage on *.* to mysqltest_1@localhost identified by "password";
grant select, update, insert on test.* to mysqltest@localhost;
commit;
show grants for mysqltest_1@localhost;
drop user mysqltest_1@localhost;
#
# Bug #3403 Wrong encodin in SHOW GRANTS output
#
SET NAMES koi8r;
CREATE DATABASE ÂÄ;
USE ÂÄ;
CREATE TABLE ÔÁÂ (ËÏÌ int);
begin;
GRANT SELECT ON ÂÄ.* TO ÀÚÅÒ@localhost;
commit;
SHOW GRANTS FOR ÀÚÅÒ@localhost;
begin;
REVOKE SELECT ON ÂÄ.* FROM ÀÚÅÒ@localhost;
commit;
begin;
GRANT SELECT ON ÂÄ.ÔÁÂ TO ÀÚÅÒ@localhost;
commit;
SHOW GRANTS FOR ÀÚÅÒ@localhost;
begin;
REVOKE SELECT ON ÂÄ.ÔÁÂ FROM ÀÚÅÒ@localhost;
commit;
begin;
GRANT SELECT (ËÏÌ) ON ÂÄ.ÔÁÂ TO ÀÚÅÒ@localhost;
commit;
SHOW GRANTS FOR ÀÚÅÒ@localhost;
begin;
REVOKE SELECT (ËÏÌ) ON ÂÄ.ÔÁÂ FROM ÀÚÅÒ@localhost;
commit;
DROP DATABASE ÂÄ;
SET NAMES latin1;
#
# Bug #5831: REVOKE ALL PRIVILEGES, GRANT OPTION does not revoke everything
#
USE test;
CREATE TABLE t1 (a int );
CREATE TABLE t2 LIKE t1;
CREATE TABLE t3 LIKE t1;
CREATE TABLE t4 LIKE t1;
CREATE TABLE t5 LIKE t1;
CREATE TABLE t6 LIKE t1;
CREATE TABLE t7 LIKE t1;
CREATE TABLE t8 LIKE t1;
CREATE TABLE t9 LIKE t1;
CREATE TABLE t10 LIKE t1;
CREATE DATABASE testdb1;
CREATE DATABASE testdb2;
CREATE DATABASE testdb3;
CREATE DATABASE testdb4;
CREATE DATABASE testdb5;
CREATE DATABASE testdb6;
CREATE DATABASE testdb7;
CREATE DATABASE testdb8;
CREATE DATABASE testdb9;
CREATE DATABASE testdb10;
begin;
GRANT ALL ON testdb1.* TO testuser@localhost;
GRANT ALL ON testdb2.* TO testuser@localhost;
GRANT ALL ON testdb3.* TO testuser@localhost;
GRANT ALL ON testdb4.* TO testuser@localhost;
GRANT ALL ON testdb5.* TO testuser@localhost;
GRANT ALL ON testdb6.* TO testuser@localhost;
GRANT ALL ON testdb7.* TO testuser@localhost;
GRANT ALL ON testdb8.* TO testuser@localhost;
GRANT ALL ON testdb9.* TO testuser@localhost;
GRANT ALL ON testdb10.* TO testuser@localhost;
GRANT SELECT ON test.t1 TO testuser@localhost;
GRANT SELECT ON test.t2 TO testuser@localhost;
GRANT SELECT ON test.t3 TO testuser@localhost;
GRANT SELECT ON test.t4 TO testuser@localhost;
GRANT SELECT ON test.t5 TO testuser@localhost;
GRANT SELECT ON test.t6 TO testuser@localhost;
GRANT SELECT ON test.t7 TO testuser@localhost;
GRANT SELECT ON test.t8 TO testuser@localhost;
GRANT SELECT ON test.t9 TO testuser@localhost;
GRANT SELECT ON test.t10 TO testuser@localhost;
GRANT SELECT (a) ON test.t1 TO testuser@localhost;
GRANT SELECT (a) ON test.t2 TO testuser@localhost;
GRANT SELECT (a) ON test.t3 TO testuser@localhost;
GRANT SELECT (a) ON test.t4 TO testuser@localhost;
GRANT SELECT (a) ON test.t5 TO testuser@localhost;
GRANT SELECT (a) ON test.t6 TO testuser@localhost;
GRANT SELECT (a) ON test.t7 TO testuser@localhost;
GRANT SELECT (a) ON test.t8 TO testuser@localhost;
GRANT SELECT (a) ON test.t9 TO testuser@localhost;
GRANT SELECT (a) ON test.t10 TO testuser@localhost;
commit;
begin;
REVOKE ALL PRIVILEGES, GRANT OPTION FROM testuser@localhost;
commit;
SHOW GRANTS FOR testuser@localhost;
DROP USER testuser@localhost;
DROP TABLE t1,t2,t3,t4,t5,t6,t7,t8,t9,t10;
DROP DATABASE testdb1;
DROP DATABASE testdb2;
DROP DATABASE testdb3;
DROP DATABASE testdb4;
DROP DATABASE testdb5;
DROP DATABASE testdb6;
DROP DATABASE testdb7;
DROP DATABASE testdb8;
DROP DATABASE testdb9;
DROP DATABASE testdb10;
#
# Alter mysql system tables back to myisam
#
use mysql;
alter table columns_priv engine=myisam;
alter table db engine=myisam;
alter table func engine=myisam;
alter table help_category engine=myisam;
alter table help_keyword engine=myisam;
alter table help_relation engine=myisam;
alter table help_topic engine=myisam;
alter table host engine=myisam;
alter table tables_priv engine=myisam;
alter table time_zone engine=myisam;
alter table time_zone_leap_second engine=myisam;
alter table time_zone_name engine=myisam;
alter table time_zone_transition engine=myisam;
alter table time_zone_transition_type engine=myisam;
alter table user engine=myisam;
use test;
flush privileges;

View file

@ -21,7 +21,7 @@ select * from t1 where b = 4 order by b;
insert into t1 values(7,8,3);
select * from t1 where b = 4 order by a;
-- error 1169
-- error 1062
insert into t1 values(8, 2, 3);
select * from t1 order by a;
delete from t1 where a = 1;
@ -49,7 +49,7 @@ select * from t2 where c = 6;
insert into t2 values(7,8,3);
select * from t2 where b = 4 order by a;
-- error 1169
-- error 1062
insert into t2 values(8, 2, 3);
select * from t2 order by a;
delete from t2 where a = 1;
@ -92,7 +92,7 @@ insert into t1 values (-1,NULL), (0,0), (1,NULL),(2,2),(3,NULL),(4,4);
select * from t1 order by pk;
--error 1169
--error 1062
insert into t1 values (5,0);
select * from t1 order by pk;
delete from t1 where a = 0;
@ -111,7 +111,7 @@ insert into t2 values (-1,1,17,NULL),(0,NULL,18,NULL),(1,3,19,'abc');
select * from t2 order by pk;
--error 1169
--error 1062
insert into t2 values(2,3,19,'abc');
select * from t2 order by pk;
delete from t2 where c IS NOT NULL;

View file

@ -564,23 +564,37 @@ select count(*) from t1;
--error 1062
insert into t1 select * from t1 where b < 10 order by pk1;
DELETE FROM t1 WHERE pk1=2;
begin;
--error 1031
INSERT IGNORE INTO t1 VALUES(1,2,3);
commit;
INSERT IGNORE INTO t1 VALUES(1,2,3),(2,3,4);
select * from t1 where pk1 < 3 order by pk1;
rollback;
INSERT IGNORE INTO t1 VALUES(1,2,3),(2,3,4);
select * from t1 where pk1 < 3 order by pk1;
REPLACE INTO t1 values(1, 78, 3);
select * from t1 where pk1=1;
--error 1031
INSERT IGNORE INTO t1 VALUES(1,2,3);
select * from t1 where pk1=1;
INSERT INTO t1 VALUES(1,1,1),(3,4,5) ON DUPLICATE KEY UPDATE b=79;
select * from t1 where pk1 < 4 order by pk1;
REPLACE INTO t1 values(1, 2, 3);
select * from t1 where pk1=1;
INSERT INTO t1 VALUES(1,1,1),(3,4,5) ON DUPLICATE KEY UPDATE b=pk1+c;
select * from t1 where pk1 < 4 order by pk1;
--error 1031
INSERT INTO t1 VALUES(1,1,1) ON DUPLICATE KEY UPDATE b=79;
select * from t1 where pk1=1;
DELETE FROM t1 WHERE pk1 = 2 OR pk1 = 4 OR pk1 = 6;
INSERT INTO t1 VALUES(1,1,1),(2,2,17),(3,4,5) ON DUPLICATE KEY UPDATE pk1=b;
select * from t1 where pk1 = b and b != c order by pk1;
# The following test case currently does not work
#DELETE FROM t1;
#CREATE UNIQUE INDEX bi ON t1(b);
#INSERT INTO t1 VALUES
#(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),
#(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10);
#INSERT INTO t1 VALUES(0,1,0),(21,21,21) ON DUPLICATE KEY UPDATE pk1=b+10,c=b+10;
#select * from t1 order by pk1;
DROP TABLE t1;

View file

@ -26,7 +26,7 @@ connection master;
drop table t1;
set @@session.auto_increment_increment=100, @@session.auto_increment_offset=10;
show variables like "%auto%";
show variables like "%auto_inc%";
create table t1 (a int not null auto_increment, primary key (a)) engine=myisam;
# Insert with 2 insert statements to get better testing of logging

View file

@ -77,10 +77,13 @@ drop table t1;
# warnings (for both strings and numbers)
#
create table t1 (t datetime);
insert into t1 values (20030102030460),(20030102036301),(20030102240401),(20030132030401),(20031302030460);
insert into t1 values (20030102030460),(20030102036301),(20030102240401),
(20030132030401),(20031302030401),(100001202030401);
select * from t1;
delete from t1;
insert into t1 values ("20030102030460"),("20030102036301"),("20030102240401"),("20030132030401"),("20031302030460");
insert into t1 values
("2003-01-02 03:04:60"),("2003-01-02 03:63:01"),("2003-01-02 24:04:01"),
("2003-01-32 03:04:01"),("2003-13-02 03:04:01"), ("10000-12-02 03:04:00");
select * from t1;
delete from t1;
insert into t1 values ("0000-00-00 00:00:00 some trailer"),("2003-01-01 00:00:00 some trailer");

View file

@ -652,5 +652,11 @@ create table t1 as
(select b collate latin1_german1_ci from t2);
show create table t1;
drop table t1;
create table t1 as
(select a from t2) union
(select b from t2) union
(select 'c' collate latin1_german1_ci from t2);
show create table t1;
drop table t1;
drop table t2;

View file

@ -1588,3 +1588,14 @@ create view v4 as select * from v2 where 20 < (select (s1) from t1);
insert into v4 values (30);
drop view v4, v3, v2, v1;
drop table t1;
#
# CHECK TABLE with VIEW
#
create table t1 (a int);
create view v1 as select * from t1;
check table t1,v1;
check table v1,t1;
drop table t1;
check table v1;
drop view v1;

View file

@ -309,7 +309,7 @@ void multi_keycache_free(void)
Get a key cache to be used for a specific table.
SYNOPSIS
multi_key_cache_get()
multi_key_cache_search()
key key to find (usually table path)
uint length Length of key.

View file

@ -28,6 +28,7 @@ ndbapi/NdbIndexScanOperation.hpp \
ndbapi/ndberror.h
mgmapiinclude_HEADERS = \
mgmapi/LocalConfig.hpp \
mgmapi/mgmapi.h \
mgmapi/mgmapi_debug.h

View file

@ -94,13 +94,14 @@ public:
// arbitration result
LoseNodes = 41, // lose on ndb node count
WinGroups = 42, // we win, no need for arbitration
LoseGroups = 43, // we lose, missing node group
Partitioning = 44, // possible network partitioning
WinChoose = 45, // positive reply
LoseChoose = 46, // negative reply
LoseNorun = 47, // arbitrator required but not running
LoseNocfg = 48, // arbitrator required but none configured
WinNodes = 42, // win on ndb node count
WinGroups = 43, // we win, no need for arbitration
LoseGroups = 44, // we lose, missing node group
Partitioning = 45, // possible network partitioning
WinChoose = 46, // positive reply
LoseChoose = 47, // negative reply
LoseNorun = 48, // arbitrator required but not running
LoseNocfg = 49, // arbitrator required but none configured
// general error codes
ErrTicket = 91, // invalid arbitrator-ticket

View file

@ -132,9 +132,10 @@ class TupAddAttrConf {
friend class Dblqh;
friend class Dbtup;
public:
STATIC_CONST( SignalLength = 1 );
STATIC_CONST( SignalLength = 2 );
private:
Uint32 userPtr;
Uint32 lastAttr; // bool: got last attr and closed frag op
};
class TupAddAttrRef {
@ -171,9 +172,10 @@ class TuxAddAttrConf {
friend class Dblqh;
friend class Dbtux;
public:
STATIC_CONST( SignalLength = 1 );
STATIC_CONST( SignalLength = 2 );
private:
Uint32 userPtr;
Uint32 lastAttr; // bool: got last attr and closed frag op
};
class TuxAddAttrRef {

View file

@ -421,6 +421,11 @@ EventLogger::getText(char * m_text, size_t m_text_len,
"%sArbitration check lost - less than 1/2 nodes left",
theNodeId);
break;
case ArbitCode::WinNodes:
BaseString::snprintf(m_text, m_text_len,
"%sArbitration check won - all node groups and more than 1/2 nodes left",
theNodeId);
break;
case ArbitCode::WinGroups:
BaseString::snprintf(m_text, m_text_len,
"%sArbitration check won - node group majority",

View file

@ -582,7 +582,7 @@ NdbSqlUtil::usable_in_pk(Uint32 typeId, const void* info)
cs->cset != 0 &&
cs->coll != 0 &&
cs->coll->strnxfrm != 0 &&
cs->strxfrm_multiply == 1; // current limitation
cs->strxfrm_multiply <= 1; // current limitation
}
break;
case Type::Varchar:
@ -618,7 +618,7 @@ NdbSqlUtil::usable_in_ordered_index(Uint32 typeId, const void* info)
cs->coll != 0 &&
cs->coll->strnxfrm != 0 &&
cs->coll->strnncollsp != 0 &&
cs->strxfrm_multiply == 1; // current limitation
cs->strxfrm_multiply <= 1; // current limitation
}
break;
case Type::Varchar:
@ -633,7 +633,7 @@ NdbSqlUtil::usable_in_ordered_index(Uint32 typeId, const void* info)
cs->coll != 0 &&
cs->coll->strnxfrm != 0 &&
cs->coll->strnncollsp != 0 &&
cs->strxfrm_multiply == 1; // current limitation
cs->strxfrm_multiply <= 1; // current limitation
}
break;
default:

View file

@ -2,7 +2,7 @@ Next QMGR 1
Next NDBCNTR 1000
Next NDBFS 2000
Next DBACC 3001
Next DBTUP 4007
Next DBTUP 4013
Next DBLQH 5042
Next DBDICT 6006
Next DBDIH 7174
@ -10,7 +10,7 @@ Next DBTC 8035
Next CMVMI 9000
Next BACKUP 10022
Next DBUTIL 11002
Next DBTUX 12001
Next DBTUX 12007
Next SUMA 13001
TESTING NODE FAILURE, ARBITRATION
@ -393,6 +393,12 @@ Failed Create Table:
--------------------
7173: Create table failed due to not sufficient number of fragment or
replica records.
4007 12001: Fail create 1st fragment
4008 12002: Fail create 2nd fragment
4009 12003: Fail create 1st attribute in 1st fragment
4010 12004: Fail create last attribute in 1st fragment
4011 12005: Fail create 1st attribute in 2nd fragment
4012 12006: Fail create last attribute in 2nd fragment
Drop Table/Index:
-----------------

View file

@ -239,7 +239,11 @@ Dbdict::packTableIntoPagesImpl(SimpleProperties::Writer & w,
w.add(DictTabInfo::TableName, tablePtr.p->tableName);
w.add(DictTabInfo::TableId, tablePtr.i);
#ifdef HAVE_TABLE_REORG
w.add(DictTabInfo::SecondTableId, tablePtr.p->secondTable);
#else
w.add(DictTabInfo::SecondTableId, (Uint32)0);
#endif
w.add(DictTabInfo::TableVersion, tablePtr.p->tableVersion);
w.add(DictTabInfo::NoOfKeyAttr, tablePtr.p->noOfPrimkey);
w.add(DictTabInfo::NoOfAttributes, tablePtr.p->noOfAttributes);
@ -1436,6 +1440,7 @@ Uint32 Dbdict::getFreeTableRecord(Uint32 primaryTableId)
jam();
return RNIL;
}//if
#ifdef HAVE_TABLE_REORG
bool secondFound = false;
for (tablePtr.i = firstTablePtr.i + 1; tablePtr.i < tabSize ; tablePtr.i++) {
jam();
@ -1455,6 +1460,7 @@ Uint32 Dbdict::getFreeTableRecord(Uint32 primaryTableId)
firstTablePtr.p->tabState = TableRecord::NOT_DEFINED;
return RNIL;
}//if
#endif
return firstTablePtr.i;
}//Dbdict::getFreeTableRecord()
@ -4623,7 +4629,7 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it,
jam();
tablePtr.p->tabState = TableRecord::DEFINING;
}//if
#ifdef HAVE_TABLE_REORG
/* ---------------------------------------------------------------- */
// Get id of second table id and check that table doesn't already exist
// and set up links between first and second table.
@ -4637,7 +4643,7 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it,
secondTablePtr.p->tabState = TableRecord::REORG_TABLE_PREPARED;
secondTablePtr.p->secondTable = tablePtr.i;
tablePtr.p->secondTable = secondTablePtr.i;
#endif
/* ---------------------------------------------------------------- */
// Set table version
/* ---------------------------------------------------------------- */
@ -5535,10 +5541,12 @@ void Dbdict::releaseTableObject(Uint32 tableId, bool removeFromHash)
nextAttrRecord = attrPtr.p->nextAttrInTable;
c_attributeRecordPool.release(attrPtr);
}//if
#ifdef HAVE_TABLE_REORG
Uint32 secondTableId = tablePtr.p->secondTable;
initialiseTableRecord(tablePtr);
c_tableRecordPool.getPtr(tablePtr, secondTableId);
initialiseTableRecord(tablePtr);
#endif
return;
}//releaseTableObject()

View file

@ -151,10 +151,10 @@ public:
/* Temporary record used during add/drop table */
Uint32 myConnect;
#ifdef HAVE_TABLE_REORG
/* Second table used by this table (for table reorg) */
Uint32 secondTable;
#endif
/* Next record in Pool */
Uint32 nextPool;

View file

@ -2474,7 +2474,7 @@ private:
void sendExecFragRefLab(Signal* signal);
void fragrefLab(Signal* signal, BlockReference retRef,
Uint32 retPtr, Uint32 errorCode);
void accFragRefLab(Signal* signal);
void abortAddFragOps(Signal* signal);
void rwConcludedLab(Signal* signal);
void sendsttorryLab(Signal* signal);
void initialiseRecordsLab(Signal* signal, Uint32 data, Uint32, Uint32);

View file

@ -912,6 +912,10 @@ void Dblqh::execREAD_CONFIG_REQ(Signal* signal)
/* *********************************************************> */
/* LQHFRAGREQ: Create new fragments for a table. Sender DICT */
/* *********************************************************> */
// this unbelievable mess could be replaced by one signal to LQH
// and execute direct to local DICT to get everything at once
void Dblqh::execLQHFRAGREQ(Signal* signal)
{
jamEntry();
@ -1049,6 +1053,11 @@ void Dblqh::execLQHFRAGREQ(Signal* signal)
addfragptr.p->lh3DistrBits = tlhstar;
addfragptr.p->tableType = tableType;
addfragptr.p->primaryTableId = primaryTableId;
//
addfragptr.p->tup1Connectptr = RNIL;
addfragptr.p->tup2Connectptr = RNIL;
addfragptr.p->tux1Connectptr = RNIL;
addfragptr.p->tux2Connectptr = RNIL;
if (DictTabInfo::isTable(tableType) ||
DictTabInfo::isHashIndex(tableType)) {
@ -1329,15 +1338,21 @@ void Dblqh::execTUP_ADD_ATTCONF(Signal* signal)
{
jamEntry();
addfragptr.i = signal->theData[0];
// implies that operation was released on the other side
const bool lastAttr = signal->theData[1];
ptrCheckGuard(addfragptr, caddfragrecFileSize, addFragRecord);
switch (addfragptr.p->addfragStatus) {
case AddFragRecord::TUP_ATTR_WAIT1:
jam();
if (lastAttr)
addfragptr.p->tup1Connectptr = RNIL;
addfragptr.p->addfragStatus = AddFragRecord::TUP_ATTR_WAIT2;
sendAddAttrReq(signal);
break;
case AddFragRecord::TUP_ATTR_WAIT2:
jam();
if (lastAttr)
addfragptr.p->tup2Connectptr = RNIL;
if (DictTabInfo::isOrderedIndex(addfragptr.p->tableType)) {
addfragptr.p->addfragStatus = AddFragRecord::TUX_ATTR_WAIT1;
sendAddAttrReq(signal);
@ -1347,11 +1362,15 @@ void Dblqh::execTUP_ADD_ATTCONF(Signal* signal)
break;
case AddFragRecord::TUX_ATTR_WAIT1:
jam();
if (lastAttr)
addfragptr.p->tux1Connectptr = RNIL;
addfragptr.p->addfragStatus = AddFragRecord::TUX_ATTR_WAIT2;
sendAddAttrReq(signal);
break;
case AddFragRecord::TUX_ATTR_WAIT2:
jam();
if (lastAttr)
addfragptr.p->tux2Connectptr = RNIL;
goto done_with_attr;
break;
done_with_attr:
@ -1455,6 +1474,7 @@ Dblqh::sendAddAttrReq(Signal* signal)
jam();
TupAddAttrConf* tupconf = (TupAddAttrConf*)signal->getDataPtrSend();
tupconf->userPtr = addfragptr.i;
tupconf->lastAttr = false;
sendSignal(reference(), GSN_TUP_ADD_ATTCONF,
signal, TupAddAttrConf::SignalLength, JBB);
return;
@ -1485,6 +1505,7 @@ Dblqh::sendAddAttrReq(Signal* signal)
jam();
TuxAddAttrConf* tuxconf = (TuxAddAttrConf*)signal->getDataPtrSend();
tuxconf->userPtr = addfragptr.i;
tuxconf->lastAttr = false;
sendSignal(reference(), GSN_TUX_ADD_ATTRCONF,
signal, TuxAddAttrConf::SignalLength, JBB);
return;
@ -1549,6 +1570,40 @@ void Dblqh::fragrefLab(Signal* signal,
return;
}//Dblqh::fragrefLab()
/*
* Abort on-going ops.
*/
void Dblqh::abortAddFragOps(Signal* signal)
{
fragptr.i = addfragptr.p->fragmentPtr;
ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
signal->theData[0] = (Uint32)-1;
if (addfragptr.p->tup1Connectptr != RNIL) {
jam();
signal->theData[1] = addfragptr.p->tup1Connectptr;
sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ, signal, 2, JBB);
addfragptr.p->tup1Connectptr = RNIL;
}
if (addfragptr.p->tup2Connectptr != RNIL) {
jam();
signal->theData[1] = addfragptr.p->tup2Connectptr;
sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ, signal, 2, JBB);
addfragptr.p->tup2Connectptr = RNIL;
}
if (addfragptr.p->tux1Connectptr != RNIL) {
jam();
signal->theData[1] = addfragptr.p->tux1Connectptr;
sendSignal(fragptr.p->tuxBlockref, GSN_TUXFRAGREQ, signal, 2, JBB);
addfragptr.p->tux1Connectptr = RNIL;
}
if (addfragptr.p->tux2Connectptr != RNIL) {
jam();
signal->theData[1] = addfragptr.p->tux2Connectptr;
sendSignal(fragptr.p->tuxBlockref, GSN_TUXFRAGREQ, signal, 2, JBB);
addfragptr.p->tux2Connectptr = RNIL;
}
}
/* ************>> */
/* ACCFRAGREF > */
/* ************>> */
@ -1582,6 +1637,27 @@ void Dblqh::execTUPFRAGREF(Signal* signal)
fragptr.i = addfragptr.p->fragmentPtr;
ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
addfragptr.p->addfragErrorCode = terrorCode;
// no operation to release, just add some jams
switch (addfragptr.p->addfragStatus) {
case AddFragRecord::WAIT_TWO_TUP:
jam();
break;
case AddFragRecord::WAIT_ONE_TUP:
jam();
break;
case AddFragRecord::WAIT_TWO_TUX:
jam();
break;
case AddFragRecord::WAIT_ONE_TUX:
jam();
break;
default:
ndbrequire(false);
break;
}
abortAddFragOps(signal);
const Uint32 ref = addfragptr.p->dictBlockref;
const Uint32 senderData = addfragptr.p->dictConnectptr;
const Uint32 errorCode = addfragptr.p->addfragErrorCode;
@ -1605,11 +1681,38 @@ void Dblqh::execTUXFRAGREF(Signal* signal)
void Dblqh::execTUP_ADD_ATTRREF(Signal* signal)
{
jamEntry();
addfragptr.i = signal->theData[0];
ptrCheckGuard(addfragptr, caddfragrecFileSize, addFragRecord);
terrorCode = signal->theData[1];
addfragptr.p->addfragErrorCode = terrorCode;
// operation was released on the other side
switch (addfragptr.p->addfragStatus) {
case AddFragRecord::TUP_ATTR_WAIT1:
jam();
ndbrequire(addfragptr.p->tup1Connectptr != RNIL);
addfragptr.p->tup1Connectptr = RNIL;
break;
case AddFragRecord::TUP_ATTR_WAIT2:
jam();
ndbrequire(addfragptr.p->tup2Connectptr != RNIL);
addfragptr.p->tup2Connectptr = RNIL;
break;
case AddFragRecord::TUX_ATTR_WAIT1:
jam();
ndbrequire(addfragptr.p->tux1Connectptr != RNIL);
addfragptr.p->tux1Connectptr = RNIL;
break;
case AddFragRecord::TUX_ATTR_WAIT2:
jam();
ndbrequire(addfragptr.p->tux2Connectptr != RNIL);
addfragptr.p->tux2Connectptr = RNIL;
break;
default:
ndbrequire(false);
break;
}
abortAddFragOps(signal);
const Uint32 Ref = addfragptr.p->dictBlockref;
const Uint32 senderData = addfragptr.p->dictConnectptr;

View file

@ -504,6 +504,7 @@ struct Fragoperrec {
Uint32 noOfNewAttrCount;
Uint32 charsetIndex;
BlockReference lqhBlockrefFrag;
bool inUse;
};
typedef Ptr<Fragoperrec> FragoperrecPtr;
@ -1936,6 +1937,7 @@ private:
void setUpKeyArray(Tablerec* const regTabPtr);
bool addfragtotab(Tablerec* const regTabPtr, Uint32 fragId, Uint32 fragIndex);
void deleteFragTab(Tablerec* const regTabPtr, Uint32 fragId);
void abortAddFragOp(Signal* signal);
void releaseTabDescr(Tablerec* const regTabPtr);
void getFragmentrec(FragrecordPtr& regFragPtr, Uint32 fragId, Tablerec* const regTabPtr);

View file

@ -39,11 +39,18 @@
/* ---------------------------------------------------------------- */
void Dbtup::execTUPFRAGREQ(Signal* signal)
{
ljamEntry();
if (signal->theData[0] == (Uint32)-1) {
ljam();
abortAddFragOp(signal);
return;
}
FragoperrecPtr fragOperPtr;
FragrecordPtr regFragPtr;
TablerecPtr regTabPtr;
ljamEntry();
Uint32 userptr = signal->theData[0];
Uint32 userblockref = signal->theData[1];
Uint32 reqinfo = signal->theData[2];
@ -132,6 +139,15 @@ void Dbtup::execTUPFRAGREQ(Signal* signal)
return;
}//if
if (ERROR_INSERTED(4007) && regTabPtr.p->fragid[0] == fragId ||
ERROR_INSERTED(4008) && regTabPtr.p->fragid[1] == fragId) {
ljam();
terrorCode = 1;
fragrefuse4Lab(signal, fragOperPtr, regFragPtr, regTabPtr.p, fragId);
CLEAR_ERROR_INSERT_VALUE;
return;
}
if (regTabPtr.p->tableStatus == NOT_DEFINED) {
ljam();
//-------------------------------------------------------------------------------------
@ -243,6 +259,7 @@ void Dbtup::seizeFragoperrec(FragoperrecPtr& fragOperPtr)
ptrCheckGuard(fragOperPtr, cnoOfFragoprec, fragoperrec);
cfirstfreeFragopr = fragOperPtr.p->nextFragoprec;
fragOperPtr.p->nextFragoprec = RNIL;
fragOperPtr.p->inUse = true;
}//Dbtup::seizeFragoperrec()
/* **************************************************************** */
@ -273,6 +290,7 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal)
ndbrequire(fragOperPtr.p->attributeCount > 0);
fragOperPtr.p->attributeCount--;
const bool lastAttr = (fragOperPtr.p->attributeCount == 0);
if ((regTabPtr.p->tableStatus == DEFINING) &&
(fragOperPtr.p->definingFragment)) {
@ -346,20 +364,30 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal)
addattrrefuseLab(signal, regFragPtr, fragOperPtr, regTabPtr.p, fragId);
return;
}//if
if ((fragOperPtr.p->attributeCount == 0) &&
(fragOperPtr.p->freeNullBit != 0)) {
if (lastAttr && (fragOperPtr.p->freeNullBit != 0)) {
ljam();
terrorCode = ZINCONSISTENT_NULL_ATTRIBUTE_COUNT;
addattrrefuseLab(signal, regFragPtr, fragOperPtr, regTabPtr.p, fragId);
return;
}//if
}//if
if (ERROR_INSERTED(4009) && regTabPtr.p->fragid[0] == fragId && attrId == 0 ||
ERROR_INSERTED(4010) && regTabPtr.p->fragid[0] == fragId && lastAttr ||
ERROR_INSERTED(4011) && regTabPtr.p->fragid[1] == fragId && attrId == 0 ||
ERROR_INSERTED(4012) && regTabPtr.p->fragid[1] == fragId && lastAttr) {
ljam();
terrorCode = 1;
addattrrefuseLab(signal, regFragPtr, fragOperPtr, regTabPtr.p, fragId);
CLEAR_ERROR_INSERT_VALUE;
return;
}
/* **************************************************************** */
/* ************** TUP_ADD_ATTCONF ****************** */
/* **************************************************************** */
signal->theData[0] = fragOperPtr.p->lqhPtrFrag;
sendSignal(fragOperPtr.p->lqhBlockrefFrag, GSN_TUP_ADD_ATTCONF, signal, 1, JBB);
if (fragOperPtr.p->attributeCount > 0) {
signal->theData[1] = lastAttr;
sendSignal(fragOperPtr.p->lqhBlockrefFrag, GSN_TUP_ADD_ATTCONF, signal, 2, JBB);
if (! lastAttr) {
ljam();
return; /* EXIT AND WAIT FOR MORE */
}//if
@ -491,11 +519,11 @@ void Dbtup::fragrefuseLab(Signal* signal, FragoperrecPtr fragOperPtr)
void Dbtup::releaseFragoperrec(FragoperrecPtr fragOperPtr)
{
fragOperPtr.p->inUse = false;
fragOperPtr.p->nextFragoprec = cfirstfreeFragopr;
cfirstfreeFragopr = fragOperPtr.i;
}//Dbtup::releaseFragoperrec()
void Dbtup::deleteFragTab(Tablerec* const regTabPtr, Uint32 fragId)
{
for (Uint32 i = 0; i < (2 * MAX_FRAG_PER_NODE); i++) {
@ -510,6 +538,20 @@ void Dbtup::deleteFragTab(Tablerec* const regTabPtr, Uint32 fragId)
ndbrequire(false);
}//Dbtup::deleteFragTab()
/*
* LQH aborts on-going create table operation. The table is later
* dropped by DICT.
*/
void Dbtup::abortAddFragOp(Signal* signal)
{
FragoperrecPtr fragOperPtr;
fragOperPtr.i = signal->theData[1];
ptrCheckGuard(fragOperPtr, cnoOfFragoprec, fragoperrec);
ndbrequire(fragOperPtr.p->inUse);
releaseFragoperrec(fragOperPtr);
}
void
Dbtup::execDROP_TAB_REQ(Signal* signal)
{

View file

@ -575,6 +575,7 @@ private:
void execDROP_TAB_REQ(Signal* signal);
bool allocDescEnt(IndexPtr indexPtr);
void freeDescEnt(IndexPtr indexPtr);
void abortAddFragOp(Signal* signal);
void dropIndex(Signal* signal, IndexPtr indexPtr, Uint32 senderRef, Uint32 senderData);
/*
@ -684,6 +685,7 @@ private:
friend class NdbOut& operator<<(NdbOut&, const ScanOp&);
friend class NdbOut& operator<<(NdbOut&, const Index&);
friend class NdbOut& operator<<(NdbOut&, const Frag&);
friend class NdbOut& operator<<(NdbOut&, const FragOp&);
friend class NdbOut& operator<<(NdbOut&, const NodeHandle&);
FILE* debugFile;
NdbOut debugOut;

View file

@ -403,6 +403,19 @@ operator<<(NdbOut& out, const Dbtux::Frag& frag)
return out;
}
NdbOut&
operator<<(NdbOut& out, const Dbtux::FragOp& fragOp)
{
out << "[FragOp " << hex << &fragOp;
out << " [userPtr " << dec << fragOp.m_userPtr << "]";
out << " [indexId " << dec << fragOp.m_indexId << "]";
out << " [fragId " << dec << fragOp.m_fragId << "]";
out << " [fragNo " << dec << fragOp.m_fragNo << "]";
out << " numAttrsRecvd " << dec << fragOp.m_numAttrsRecvd << "]";
out << "]";
return out;
}
NdbOut&
operator<<(NdbOut& out, const Dbtux::NodeHandle& node)
{

View file

@ -24,12 +24,7 @@ Dbtux::Dbtux(const Configuration& conf) :
#ifdef VM_TRACE
debugFile(0),
debugOut(*new NullOutputStream()),
// until ndb_mgm supports dump
#ifdef DBTUX_DEBUG_TREE
debugFlags(DebugTree),
#else
debugFlags(0),
#endif
#endif
c_internalStartPhase(0),
c_typeOfStart(NodeState::ST_ILLEGAL_TYPE),
@ -86,7 +81,7 @@ Dbtux::execCONTINUEB(Signal* signal)
jamEntry();
const Uint32* data = signal->getDataPtr();
switch (data[0]) {
case TuxContinueB::DropIndex:
case TuxContinueB::DropIndex: // currently unused
{
IndexPtr indexPtr;
c_indexPool.getPtr(indexPtr, data[1]);
@ -174,7 +169,7 @@ Dbtux::execREAD_CONFIG_REQ(Signal* signal)
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUX_ATTRIBUTE, &nAttribute));
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUX_SCAN_OP, &nScanOp));
const Uint32 nDescPage = (nIndex + nAttribute + DescPageSize - 1) / DescPageSize;
const Uint32 nDescPage = (nIndex * DescHeadSize + nAttribute * DescAttrSize + DescPageSize - 1) / DescPageSize;
const Uint32 nScanBoundWords = nScanOp * ScanBoundSegmentSize * 4;
c_indexPool.setSize(nIndex);

View file

@ -29,6 +29,11 @@ void
Dbtux::execTUXFRAGREQ(Signal* signal)
{
jamEntry();
if (signal->theData[0] == (Uint32)-1) {
jam();
abortAddFragOp(signal);
return;
}
const TuxFragReq reqCopy = *(const TuxFragReq*)signal->getDataPtr();
const TuxFragReq* const req = &reqCopy;
IndexPtr indexPtr;
@ -61,6 +66,11 @@ Dbtux::execTUXFRAGREQ(Signal* signal)
fragOpPtr.p->m_fragId = req->fragId;
fragOpPtr.p->m_fragNo = indexPtr.p->m_numFrags;
fragOpPtr.p->m_numAttrsRecvd = 0;
#ifdef VM_TRACE
if (debugFlags & DebugMeta) {
debugOut << "Seize frag op " << fragOpPtr.i << " " << *fragOpPtr.p << endl;
}
#endif
// check if index has place for more fragments
ndbrequire(indexPtr.p->m_numFrags < MaxIndexFragments);
// seize new fragment record
@ -129,6 +139,14 @@ Dbtux::execTUXFRAGREQ(Signal* signal)
debugOut << "Add frag " << fragPtr.i << " " << *fragPtr.p << endl;
}
#endif
// error inserts
if (ERROR_INSERTED(12001) && fragOpPtr.p->m_fragNo == 0 ||
ERROR_INSERTED(12002) && fragOpPtr.p->m_fragNo == 1) {
jam();
errorCode = (TuxFragRef::ErrorCode)1;
CLEAR_ERROR_INSERT_VALUE;
break;
}
// success
TuxFragConf* const conf = (TuxFragConf*)signal->getDataPtrSend();
conf->userPtr = req->userPtr;
@ -145,10 +163,18 @@ Dbtux::execTUXFRAGREQ(Signal* signal)
ref->errorCode = errorCode;
sendSignal(req->userRef, GSN_TUXFRAGREF,
signal, TuxFragRef::SignalLength, JBB);
if (fragOpPtr.i != RNIL)
if (fragOpPtr.i != RNIL) {
#ifdef VM_TRACE
if (debugFlags & DebugMeta) {
debugOut << "Release on frag error frag op " << fragOpPtr.i << " " << *fragOpPtr.p << endl;
}
#endif
c_fragOpPool.release(fragOpPtr);
if (indexPtr.i != RNIL)
dropIndex(signal, indexPtr, 0, 0);
}
if (indexPtr.i != RNIL) {
jam();
// let DICT drop the unfinished index
}
}
void
@ -203,7 +229,16 @@ Dbtux::execTUX_ADD_ATTRREQ(Signal* signal)
}
}
#endif
if (indexPtr.p->m_numAttrs == fragOpPtr.p->m_numAttrsRecvd) {
const bool lastAttr = (indexPtr.p->m_numAttrs == fragOpPtr.p->m_numAttrsRecvd);
if (ERROR_INSERTED(12003) && fragOpPtr.p->m_fragNo == 0 && attrId == 0 ||
ERROR_INSERTED(12004) && fragOpPtr.p->m_fragNo == 0 && lastAttr ||
ERROR_INSERTED(12005) && fragOpPtr.p->m_fragNo == 1 && attrId == 0 ||
ERROR_INSERTED(12006) && fragOpPtr.p->m_fragNo == 1 && lastAttr) {
errorCode = (TuxAddAttrRef::ErrorCode)1;
CLEAR_ERROR_INSERT_VALUE;
break;
}
if (lastAttr) {
jam();
// initialize tree header
TreeHead& tree = fragPtr.p->m_tree;
@ -246,11 +281,17 @@ Dbtux::execTUX_ADD_ATTRREQ(Signal* signal)
}
#endif
// fragment is defined
#ifdef VM_TRACE
if (debugFlags & DebugMeta) {
debugOut << "Release frag op " << fragOpPtr.i << " " << *fragOpPtr.p << endl;
}
#endif
c_fragOpPool.release(fragOpPtr);
}
// success
TuxAddAttrConf* conf = (TuxAddAttrConf*)signal->getDataPtrSend();
conf->userPtr = fragOpPtr.p->m_userPtr;
conf->lastAttr = lastAttr;
sendSignal(fragOpPtr.p->m_userRef, GSN_TUX_ADD_ATTRCONF,
signal, TuxAddAttrConf::SignalLength, JBB);
return;
@ -261,8 +302,32 @@ Dbtux::execTUX_ADD_ATTRREQ(Signal* signal)
ref->errorCode = errorCode;
sendSignal(fragOpPtr.p->m_userRef, GSN_TUX_ADD_ATTRREF,
signal, TuxAddAttrRef::SignalLength, JBB);
#ifdef VM_TRACE
if (debugFlags & DebugMeta) {
debugOut << "Release on attr error frag op " << fragOpPtr.i << " " << *fragOpPtr.p << endl;
}
#endif
c_fragOpPool.release(fragOpPtr);
dropIndex(signal, indexPtr, 0, 0);
// let DICT drop the unfinished index
}
/*
* LQH aborts on-going create index operation.
*/
void
Dbtux::abortAddFragOp(Signal* signal)
{
FragOpPtr fragOpPtr;
IndexPtr indexPtr;
c_fragOpPool.getPtr(fragOpPtr, signal->theData[1]);
c_indexPool.getPtr(indexPtr, fragOpPtr.p->m_indexId);
#ifdef VM_TRACE
if (debugFlags & DebugMeta) {
debugOut << "Release on abort frag op " << fragOpPtr.i << " " << *fragOpPtr.p << endl;
}
#endif
c_fragOpPool.release(fragOpPtr);
// let DICT drop the unfinished index
}
/*
@ -341,20 +406,13 @@ Dbtux::dropIndex(Signal* signal, IndexPtr indexPtr, Uint32 senderRef, Uint32 sen
{
jam();
indexPtr.p->m_state = Index::Dropping;
// drop one fragment at a time
if (indexPtr.p->m_numFrags > 0) {
// drop fragments
while (indexPtr.p->m_numFrags > 0) {
jam();
unsigned i = --indexPtr.p->m_numFrags;
Uint32 i = --indexPtr.p->m_numFrags;
FragPtr fragPtr;
c_fragPool.getPtr(fragPtr, indexPtr.p->m_fragPtrI[i]);
c_fragPool.release(fragPtr);
// the real time break is not used for anything currently
signal->theData[0] = TuxContinueB::DropIndex;
signal->theData[1] = indexPtr.i;
signal->theData[2] = senderRef;
signal->theData[3] = senderData;
sendSignal(reference(), GSN_CONTINUEB, signal, 4, JBB);
return;
}
// drop attributes
if (indexPtr.p->m_descPage != RNIL) {

View file

@ -2946,6 +2946,12 @@ void Qmgr::sendPrepFailReq(Signal* signal, Uint16 aNode)
* the "handle" routines.
*/
/**
* Should < 1/2 nodes die unconditionally. Affects only >= 3-way
* replication.
*/
static const bool g_ndb_arbit_one_half_rule = false;
/**
* Config signals are logically part of CM_INIT.
*/
@ -3157,7 +3163,8 @@ Qmgr::handleArbitCheck(Signal* signal)
ndbrequire(cpresident == getOwnNodeId());
NodeBitmask ndbMask;
computeArbitNdbMask(ndbMask);
if (2 * ndbMask.count() < cnoOfNodes) {
if (g_ndb_arbit_one_half_rule &&
2 * ndbMask.count() < cnoOfNodes) {
jam();
arbitRec.code = ArbitCode::LoseNodes;
} else {
@ -3181,6 +3188,11 @@ Qmgr::handleArbitCheck(Signal* signal)
case CheckNodeGroups::Partitioning:
jam();
arbitRec.code = ArbitCode::Partitioning;
if (g_ndb_arbit_one_half_rule &&
2 * ndbMask.count() > cnoOfNodes) {
jam();
arbitRec.code = ArbitCode::WinNodes;
}
break;
default:
ndbrequire(false);
@ -3190,8 +3202,12 @@ Qmgr::handleArbitCheck(Signal* signal)
switch (arbitRec.code) {
case ArbitCode::LoseNodes:
jam();
case ArbitCode::LoseGroups:
jam();
goto crashme;
case ArbitCode::WinGroups:
case ArbitCode::WinNodes:
jam();
case ArbitCode::WinGroups:
jam();
if (arbitRec.state == ARBIT_RUN) {
jam();
@ -3200,9 +3216,6 @@ Qmgr::handleArbitCheck(Signal* signal)
arbitRec.state = ARBIT_INIT;
arbitRec.newstate = true;
break;
case ArbitCode::LoseGroups:
jam();
goto crashme;
case ArbitCode::Partitioning:
if (arbitRec.state == ARBIT_RUN) {
jam();
@ -3762,8 +3775,7 @@ Qmgr::execARBIT_CHOOSEREF(Signal* signal)
}
/**
* Handle CRASH state. We must crash immediately. But it
* would be nice to wait until event reports have been sent.
* Handle CRASH state. We must crash immediately.
* XXX tell other nodes in our party to crash too.
*/
void
@ -3773,12 +3785,11 @@ Qmgr::stateArbitCrash(Signal* signal)
if (arbitRec.newstate) {
jam();
CRASH_INSERTION((Uint32)910 + arbitRec.state);
arbitRec.setTimestamp();
arbitRec.code = 0;
arbitRec.newstate = false;
}
#if 0
#ifdef ndb_arbit_crash_wait_for_event_report_to_get_out
if (! (arbitRec.getTimediff() > getArbitTimeout()))
return;
#endif

View file

@ -590,6 +590,23 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
*/
ConfigValuesFactory cfg(ownConfig);
Uint32 noOfMetaTables= noOfTables + noOfOrderedIndexes +
noOfUniqueHashIndexes;
if (noOfMetaTables > MAX_TABLES)
noOfMetaTables= MAX_TABLES;
{
/**
* Dict Size Alt values
*/
cfg.put(CFG_DICT_ATTRIBUTE,
noOfAttributes);
cfg.put(CFG_DICT_TABLE,
noOfMetaTables);
}
if (noOfLocalScanRecords == 0) {
noOfLocalScanRecords = (noOfDBNodes * noOfScanRecords) + 1;
}
@ -599,7 +616,7 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
Uint32 noOfTCScanRecords = noOfScanRecords;
{
Uint32 noOfAccTables= noOfTables + noOfUniqueHashIndexes;
Uint32 noOfAccTables= noOfMetaTables/*noOfTables+noOfUniqueHashIndexes*/;
/**
* Acc Size Alt values
*/
@ -641,19 +658,6 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
cfg.put(CFG_ACC_SCAN, noOfLocalScanRecords);
}
Uint32 noOfMetaTables= noOfTables + noOfOrderedIndexes +
noOfUniqueHashIndexes;
{
/**
* Dict Size Alt values
*/
cfg.put(CFG_DICT_ATTRIBUTE,
noOfAttributes);
cfg.put(CFG_DICT_TABLE,
noOfMetaTables);
}
{
/**
* Dih Size Alt values
@ -746,8 +750,8 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
noOfMetaTables);
cfg.put(CFG_TUP_TABLE_DESC,
4 * NO_OF_FRAG_PER_NODE * noOfAttributes* noOfReplicas +
12 * NO_OF_FRAG_PER_NODE * noOfMetaTables* noOfReplicas );
2 * 6 * NO_OF_FRAG_PER_NODE * noOfAttributes * noOfReplicas +
2 * 10 * NO_OF_FRAG_PER_NODE * noOfMetaTables * noOfReplicas );
cfg.put(CFG_TUP_STORED_PROC,
noOfLocalScanRecords);
@ -758,9 +762,9 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
* Tux Size Alt values
*/
cfg.put(CFG_TUX_INDEX,
noOfOrderedIndexes);
noOfMetaTables /*noOfOrderedIndexes*/);
cfg.put(CFG_TUX_FRAGMENT,
cfg.put(CFG_TUX_FRAGMENT,
2 * NO_OF_FRAG_PER_NODE * noOfOrderedIndexes * noOfReplicas);
cfg.put(CFG_TUX_ATTRIBUTE,

View file

@ -193,7 +193,7 @@ extern "C" {
{
return (Ndb_mgmclient_handle) new Ndb_mgmclient(connect_string);
}
int ndb_mgmclient_execute(Ndb_mgmclient_handle h, int argc, const char** argv)
int ndb_mgmclient_execute(Ndb_mgmclient_handle h, int argc, char** argv)
{
return ((Ndb_mgmclient*)h)->execute(argc, argv, 1);
}
@ -226,7 +226,7 @@ extern "C" {
#include <util/InputStream.hpp>
#include <util/OutputStream.hpp>
int Ndb_mgmclient::execute(int argc, const char** argv, int _try_reconnect)
int Ndb_mgmclient::execute(int argc, char** argv, int _try_reconnect)
{
if (argc <= 0)
return 0;
@ -1386,7 +1386,7 @@ void
CommandInterpreter::executeDumpState(int processId, const char* parameters,
bool all)
{
if(parameters == 0 || strlen(parameters) == 0){
if(emptyString(parameters)){
ndbout << "Expected argument" << endl;
return;
}
@ -1806,6 +1806,10 @@ CommandInterpreter::executeEventReporting(int processId,
const char* parameters,
bool all)
{
if (emptyString(parameters)) {
ndbout << "Expected argument" << endl;
return;
}
connect();
BaseString tmp(parameters);
@ -1906,6 +1910,7 @@ void
CommandInterpreter::executeAbortBackup(char* parameters)
{
connect();
strtok(parameters, " ");
struct ndb_mgm_reply reply;
char* id = strtok(NULL, "\0");

View file

@ -23,7 +23,7 @@ extern "C" {
typedef void* Ndb_mgmclient_handle;
Ndb_mgmclient_handle ndb_mgmclient_handle_create(const char *connect_string);
int ndb_mgmclient_execute(Ndb_mgmclient_handle, int argc, const char** argv);
int ndb_mgmclient_execute(Ndb_mgmclient_handle, int argc, char** argv);
int ndb_mgmclient_handle_destroy(Ndb_mgmclient_handle);
#ifdef __cplusplus

View file

@ -24,7 +24,7 @@ public:
Ndb_mgmclient(const char*);
~Ndb_mgmclient();
int execute(const char *_line, int _try_reconnect=-1);
int execute(int argc, const char** argv, int _try_reconnect=-1);
int execute(int argc, char** argv, int _try_reconnect=-1);
int disconnect();
private:
CommandInterpreter *m_cmd;

View file

@ -1571,7 +1571,13 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
NdbApiSignal tSignal(m_reference);
tSignal.theReceiversBlockNumber = DBDICT;
if (alter) {
LinearSectionPtr ptr[3];
ptr[0].p = (Uint32*)m_buffer.get_data();
ptr[0].sz = m_buffer.length() / 4;
int ret;
if (alter)
{
AlterTableReq * const req =
CAST_PTR(AlterTableReq, tSignal.getDataPtrSend());
@ -1582,8 +1588,10 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
req->tableVersion = impl.m_version;;
tSignal.theVerId_signalNumber = GSN_ALTER_TABLE_REQ;
tSignal.theLength = AlterTableReq::SignalLength;
ret= alterTable(&tSignal, ptr);
}
else {
else
{
CreateTableReq * const req =
CAST_PTR(CreateTableReq, tSignal.getDataPtrSend());
@ -1591,25 +1599,21 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
req->senderData = 0;
tSignal.theVerId_signalNumber = GSN_CREATE_TABLE_REQ;
tSignal.theLength = CreateTableReq::SignalLength;
}
LinearSectionPtr ptr[3];
ptr[0].p = (Uint32*)m_buffer.get_data();
ptr[0].sz = m_buffer.length() / 4;
ret= createTable(&tSignal, ptr);
int ret = (alter) ?
alterTable(&tSignal, ptr)
: createTable(&tSignal, ptr);
if (ret)
return ret;
if (!alter && haveAutoIncrement) {
if (!ndb.setAutoIncrementValue(impl.m_externalName.c_str(),
autoIncrementValue)) {
if (ndb.theError.code == 0) {
m_error.code = 4336;
ndb.theError = m_error;
} else
m_error= ndb.theError;
ret = -1; // errorcode set in initialize_autoincrement
if (haveAutoIncrement) {
if (!ndb.setAutoIncrementValue(impl.m_externalName.c_str(),
autoIncrementValue)) {
if (ndb.theError.code == 0) {
m_error.code = 4336;
ndb.theError = m_error;
} else
m_error= ndb.theError;
ret = -1; // errorcode set in initialize_autoincrement
}
}
}
return ret;

View file

@ -272,7 +272,7 @@ int NdbIndexOperation::equal_impl(const NdbColumnImpl* tAttrInfo,
CHARSET_INFO* cs = tAttrInfo->m_cs;
if (cs != 0) {
// current limitation: strxfrm does not increase length
assert(cs->strxfrm_multiply == 1);
assert(cs->strxfrm_multiply <= 1);
unsigned n =
(*cs->coll->strnxfrm)(cs,
(uchar*)xfrmData, sizeof(xfrmData),

View file

@ -142,7 +142,7 @@ NdbOperation::equal_impl(const NdbColumnImpl* tAttrInfo,
CHARSET_INFO* cs = tAttrInfo->m_cs;
if (cs != 0) {
// current limitation: strxfrm does not increase length
assert(cs->strxfrm_multiply == 1);
assert(cs->strxfrm_multiply <= 1);
unsigned n =
(*cs->coll->strnxfrm)(cs,
(uchar*)xfrmData, sizeof(xfrmData),

View file

@ -1091,7 +1091,7 @@ NdbIndexScanOperation::setBound(const NdbColumnImpl* tAttrInfo,
Uint32 xfrmData[2000];
if (cs != NULL && aValue != NULL) {
// current limitation: strxfrm does not increase length
assert(cs->strxfrm_multiply == 1);
assert(cs->strxfrm_multiply <= 1);
unsigned n =
(*cs->coll->strnxfrm)(cs,
(uchar*)xfrmData, sizeof(xfrmData),

View file

@ -53,6 +53,9 @@ typedef struct ErrorBundle {
#define NI ndberror_cl_function_not_implemented
#define UE ndberror_cl_unknown_error_code
static const char REDO_BUFFER_MSG[]=
"REDO log buffers overloaded, consult online manual (increase RedoBuffer, and|or decrease TimeBetweenLocalCheckpoints, and|or increase NoOfFragmentLogFiles)";
static const char* empty_string = "";
static
@ -137,9 +140,8 @@ ErrorBundle ErrorCodes[] = {
{ 805, TR, "Out of attrinfo records in tuple manager" },
{ 830, TR, "Out of add fragment operation records" },
{ 873, TR, "Out of attrinfo records for scan in tuple manager" },
{ 1217, TR, "1217" },
{ 1219, TR, "Out of operation records in local data manager (increase MaxNoOfLocalOperations)" },
{ 1220, TR, "1220" },
{ 1217, TR, "Out of operation records in local data manager (increase MaxNoOfLocalOperations)" },
{ 1220, TR, REDO_BUFFER_MSG },
{ 1222, TR, "Out of transaction markers in LQH" },
{ 4021, TR, "Out of Send Buffer space in NDB API" },
{ 4022, TR, "Out of Send Buffer space in NDB API" },
@ -165,14 +167,13 @@ ErrorBundle ErrorCodes[] = {
{ 297, TO, "Time-out in NDB, probably caused by deadlock" }, /* Scan trans timeout, temporary!! */
{ 237, TO, "Transaction had timed out when trying to commit it" },
/**
* OverloadError
*/
{ 410, OL, "Out of log file space temporarily" },
{ 410, OL, REDO_BUFFER_MSG },
{ 677, OL, "Index UNDO buffers overloaded (increase UndoIndexBuffer)" },
{ 891, OL, "Data UNDO buffers overloaded (increase UndoDataBuffer)" },
{ 1221, OL, "REDO log buffers overloaded (increase RedoBuffer)" },
{ 1221, OL, REDO_BUFFER_MSG },
{ 4006, OL, "Connect failure - out of connection objects (increase MaxNoOfConcurrentTransactions)" },

View file

@ -1479,6 +1479,69 @@ runTestDictionaryPerf(NDBT_Context* ctx, NDBT_Step* step){
return NDBT_OK;
}
int runFailAddFragment(NDBT_Context* ctx, NDBT_Step* step){
static int tuplst[] = { 4007, 4008, 4009, 4010, 4011, 4012 };
static int tuxlst[] = { 12001, 12002, 12003, 12004, 12005, 12006 };
static unsigned tupcnt = sizeof(tuplst)/sizeof(tuplst[0]);
static unsigned tuxcnt = sizeof(tuxlst)/sizeof(tuxlst[0]);
NdbRestarter restarter;
int nodeId = restarter.getMasterNodeId();
Ndb* pNdb = GETNDB(step);
NdbDictionary::Dictionary* pDic = pNdb->getDictionary();
NdbDictionary::Table tab(*ctx->getTab());
tab.setFragmentType(NdbDictionary::Object::FragAllLarge);
// ordered index on first few columns
NdbDictionary::Index idx("X");
idx.setTable(tab.getName());
idx.setType(NdbDictionary::Index::OrderedIndex);
idx.setLogging(false);
for (int i_hate_broken_compilers = 0;
i_hate_broken_compilers < 3 &&
i_hate_broken_compilers < tab.getNoOfColumns();
i_hate_broken_compilers++) {
idx.addColumn(*tab.getColumn(i_hate_broken_compilers));
}
const int loops = ctx->getNumLoops();
int result = NDBT_OK;
(void)pDic->dropTable(tab.getName());
for (int l = 0; l < loops; l++) {
for (unsigned i1 = 0; i1 < tupcnt; i1++) {
unsigned j = (l == 0 ? i1 : myRandom48(tupcnt));
int errval = tuplst[j];
g_info << "insert error node=" << nodeId << " value=" << errval << endl;
CHECK2(restarter.insertErrorInNode(nodeId, errval) == 0,
"failed to set error insert");
CHECK2(pDic->createTable(tab) != 0,
"failed to fail after error insert " << errval);
CHECK2(pDic->createTable(tab) == 0,
pDic->getNdbError());
CHECK2(pDic->dropTable(tab.getName()) == 0,
pDic->getNdbError());
}
for (unsigned i2 = 0; i2 < tuxcnt; i2++) {
unsigned j = (l == 0 ? i2 : myRandom48(tuxcnt));
int errval = tuxlst[j];
g_info << "insert error node=" << nodeId << " value=" << errval << endl;
CHECK2(restarter.insertErrorInNode(nodeId, errval) == 0,
"failed to set error insert");
CHECK2(pDic->createTable(tab) == 0,
pDic->getNdbError());
CHECK2(pDic->createIndex(idx) != 0,
"failed to fail after error insert " << errval);
CHECK2(pDic->createIndex(idx) == 0,
pDic->getNdbError());
CHECK2(pDic->dropTable(tab.getName()) == 0,
pDic->getNdbError());
}
}
end:
return result;
}
NDBT_TESTSUITE(testDict);
TESTCASE("CreateAndDrop",
"Try to create and drop the table loop number of times\n"){
@ -1574,6 +1637,10 @@ TESTCASE("DictionaryPerf",
""){
INITIALIZER(runTestDictionaryPerf);
}
TESTCASE("FailAddFragment",
"Fail add fragment or attribute in TUP or TUX\n"){
INITIALIZER(runFailAddFragment);
}
NDBT_TESTSUITE_END(testDict);
int main(int argc, const char** argv){

View file

@ -26,7 +26,7 @@ ndb_select_all_SOURCES = select_all.cpp \
../test/src/NDBT_ResultRow.cpp \
$(tools_common_sources)
ndb_select_count_SOURCES = select_count.cpp $(tools_common_sources)
ndb_restore_SOURCES = restore/main.cpp \
ndb_restore_SOURCES = restore/restore_main.cpp \
restore/consumer.cpp \
restore/consumer_restore.cpp \
restore/consumer_printer.cpp \

View file

@ -74,7 +74,7 @@ static struct my_option my_long_options[] =
"No of parallel transactions during restore of data."
"(parallelism can be 1 to 1024)",
(gptr*) &ga_nParallelism, (gptr*) &ga_nParallelism, 0,
GET_INT, REQUIRED_ARG, 128, 0, 0, 0, 0, 0 },
GET_INT, REQUIRED_ARG, 128, 1, 1024, 0, 1, 0 },
{ "print", 256, "Print data and log to stdout",
(gptr*) &_print, (gptr*) &_print, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
@ -120,6 +120,20 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
case 'V':
print_version();
exit(0);
case 'n':
if (ga_nodeId == 0)
{
printf("Error in --nodeid,-n setting, see --help\n");
exit(1);
}
break;
case 'b':
if (ga_backupId == 0)
{
printf("Error in --backupid,-b setting, see --help\n");
exit(1);
}
break;
case '?':
usage();
exit(0);
@ -131,11 +145,8 @@ readArguments(int *pargc, char*** pargv)
{
const char *load_default_groups[]= { "ndb_tools","ndb_restore",0 };
load_defaults("my",load_default_groups,pargc,pargv);
if (handle_options(pargc, pargv, my_long_options, get_one_option) ||
ga_nodeId == 0 ||
ga_backupId == 0 ||
ga_nParallelism < 1 ||
ga_nParallelism >1024) {
if (handle_options(pargc, pargv, my_long_options, get_one_option))
{
exit(1);
}
@ -343,7 +354,8 @@ main(int argc, char** argv)
if (res < 0)
{
err << "Restore: An error occured while restoring data. Exiting... res=" << res << endl;
err << "Restore: An error occured while restoring data. Exiting... "
<< "res=" << res << endl;
return -1;
}
@ -369,7 +381,8 @@ main(int argc, char** argv)
}
if (res < 0)
{
err << "Restore: An restoring the data log. Exiting... res=" << res << endl;
err << "Restore: An restoring the data log. Exiting... res="
<< res << endl;
return -1;
}
logIter.validateFooter(); //not implemented

View file

@ -148,10 +148,11 @@ void Thread_repository::deliver_shutdown()
{
struct timespec shutdown_time;
set_timespec(shutdown_time, 1);
Thread_info *info;
pthread_mutex_lock(&LOCK_thread_repository);
shutdown_in_progress= true;
for (Thread_info *info= head.next; info != &head; info= info->next)
for (info= head.next; info != &head; info= info->next)
{
pthread_kill(info->thread_id, THREAD_KICK_OFF_SIGNAL);
/*
@ -173,7 +174,7 @@ void Thread_repository::deliver_shutdown()
so this time everybody should be informed (presumably each worker can
get CPU during shutdown_time.)
*/
for (Thread_info *info= head.next; info != &head; info= info->next)
for (info= head.next; info != &head; info= info->next)
{
pthread_kill(info->thread_id, THREAD_KICK_OFF_SIGNAL);
if (info->current_cond)

View file

@ -395,7 +395,8 @@ str_to_datetime(const char *str, uint length, MYSQL_TIME *l_time,
goto err;
}
if (number_of_fields < 3 || l_time->month > 12 ||
if (number_of_fields < 3 ||
l_time->year > 9999 || l_time->month > 12 ||
l_time->day > 31 || l_time->hour > 23 ||
l_time->minute > 59 || l_time->second > 59 ||
(!(flags & TIME_FUZZY_DATE) && (l_time->month == 0 ||
@ -792,10 +793,10 @@ my_system_gmt_sec(const MYSQL_TIME *t, long *my_timezone, bool *in_dst_time_gap)
/* Set MYSQL_TIME structure to 0000-00-00 00:00:00.000000 */
void set_zero_time(MYSQL_TIME *tm)
void set_zero_time(MYSQL_TIME *tm, enum enum_mysql_timestamp_type time_type)
{
bzero((void*) tm, sizeof(*tm));
tm->time_type= MYSQL_TIMESTAMP_NONE;
tm->time_type= time_type;
}

View file

@ -4112,6 +4112,10 @@ int Field_datetime::store(longlong nr)
void Field_datetime::store_time(TIME *ltime,timestamp_type type)
{
longlong tmp;
/*
We don't perform range checking here since values stored in TIME
structure always fit into DATETIME range.
*/
if (type == MYSQL_TIMESTAMP_DATE || type == MYSQL_TIMESTAMP_DATETIME)
tmp=((ltime->year*10000L+ltime->month*100+ltime->day)*LL(1000000)+
(ltime->hour*10000L+ltime->minute*100+ltime->second));

View file

@ -39,9 +39,6 @@ static const int parallelism= 240;
// createable against NDB from this handler
static const int max_transactions= 256;
// Default value for prefetch of autoincrement values
static const ha_rows autoincrement_prefetch= 32;
// connectstring to cluster if given by mysqld
const char *ndbcluster_connectstring= 0;
@ -103,51 +100,52 @@ struct err_code_mapping
{
int ndb_err;
int my_err;
int show_warning;
};
static const err_code_mapping err_map[]=
{
{ 626, HA_ERR_KEY_NOT_FOUND },
{ 630, HA_ERR_FOUND_DUPP_KEY },
{ 893, HA_ERR_FOUND_DUPP_UNIQUE },
{ 721, HA_ERR_TABLE_EXIST },
{ 4244, HA_ERR_TABLE_EXIST },
{ 626, HA_ERR_KEY_NOT_FOUND, 0 },
{ 630, HA_ERR_FOUND_DUPP_KEY, 0 },
{ 893, HA_ERR_FOUND_DUPP_KEY, 0 },
{ 721, HA_ERR_TABLE_EXIST, 1 },
{ 4244, HA_ERR_TABLE_EXIST, 1 },
{ 709, HA_ERR_NO_SUCH_TABLE },
{ 284, HA_ERR_NO_SUCH_TABLE },
{ 709, HA_ERR_NO_SUCH_TABLE, 1 },
{ 284, HA_ERR_NO_SUCH_TABLE, 1 },
{ 266, HA_ERR_LOCK_WAIT_TIMEOUT },
{ 274, HA_ERR_LOCK_WAIT_TIMEOUT },
{ 296, HA_ERR_LOCK_WAIT_TIMEOUT },
{ 297, HA_ERR_LOCK_WAIT_TIMEOUT },
{ 237, HA_ERR_LOCK_WAIT_TIMEOUT },
{ 266, HA_ERR_LOCK_WAIT_TIMEOUT, 1 },
{ 274, HA_ERR_LOCK_WAIT_TIMEOUT, 1 },
{ 296, HA_ERR_LOCK_WAIT_TIMEOUT, 1 },
{ 297, HA_ERR_LOCK_WAIT_TIMEOUT, 1 },
{ 237, HA_ERR_LOCK_WAIT_TIMEOUT, 1 },
{ 623, HA_ERR_RECORD_FILE_FULL },
{ 624, HA_ERR_RECORD_FILE_FULL },
{ 625, HA_ERR_RECORD_FILE_FULL },
{ 826, HA_ERR_RECORD_FILE_FULL },
{ 827, HA_ERR_RECORD_FILE_FULL },
{ 832, HA_ERR_RECORD_FILE_FULL },
{ 623, HA_ERR_RECORD_FILE_FULL, 1 },
{ 624, HA_ERR_RECORD_FILE_FULL, 1 },
{ 625, HA_ERR_RECORD_FILE_FULL, 1 },
{ 826, HA_ERR_RECORD_FILE_FULL, 1 },
{ 827, HA_ERR_RECORD_FILE_FULL, 1 },
{ 832, HA_ERR_RECORD_FILE_FULL, 1 },
{ 0, 1 },
{ 0, 1, 0 },
{ -1, -1 }
{ -1, -1, 1 }
};
static int ndb_to_mysql_error(const NdbError *err)
{
uint i;
for (i=0 ; err_map[i].ndb_err != err->code ; i++)
for (i=0; err_map[i].ndb_err != err->code && err_map[i].my_err != -1; i++);
if (err_map[i].show_warning)
{
if (err_map[i].my_err == -1){
// Push the NDB error message as warning
push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
err->code, err->message, "NDB");
return err->code;
}
// Push the NDB error message as warning
push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
err->code, err->message, "NDB");
}
if (err_map[i].my_err == -1)
return err->code;
return err_map[i].my_err;
}
@ -161,7 +159,7 @@ int execute_no_commit(ha_ndbcluster *h, NdbConnection *trans)
if (m_batch_execute)
return 0;
#endif
return trans->execute(NoCommit,AbortOnError,1);
return trans->execute(NoCommit,AbortOnError,h->m_force_send);
}
inline
@ -172,7 +170,18 @@ int execute_commit(ha_ndbcluster *h, NdbConnection *trans)
if (m_batch_execute)
return 0;
#endif
return trans->execute(Commit,AbortOnError,1);
return trans->execute(Commit,AbortOnError,h->m_force_send);
}
inline
int execute_commit(THD *thd, NdbConnection *trans)
{
int m_batch_execute= 0;
#ifdef NOT_USED
if (m_batch_execute)
return 0;
#endif
return trans->execute(Commit,AbortOnError,thd->variables.ndb_force_send);
}
inline
@ -183,7 +192,7 @@ int execute_no_commit_ie(ha_ndbcluster *h, NdbConnection *trans)
if (m_batch_execute)
return 0;
#endif
return trans->execute(NoCommit,IgnoreError,1);
return trans->execute(NoCommit,IgnoreError,h->m_force_send);
}
/*
@ -226,6 +235,8 @@ void ha_ndbcluster::set_rec_per_key()
void ha_ndbcluster::records_update()
{
if (m_ha_not_exact_count)
return;
DBUG_ENTER("ha_ndbcluster::records_update");
struct Ndb_table_local_info *info= (struct Ndb_table_local_info *)m_table_info;
DBUG_PRINT("info", ("id=%d, no_uncommitted_rows_count=%d",
@ -249,6 +260,8 @@ void ha_ndbcluster::records_update()
void ha_ndbcluster::no_uncommitted_rows_execute_failure()
{
if (m_ha_not_exact_count)
return;
DBUG_ENTER("ha_ndbcluster::no_uncommitted_rows_execute_failure");
THD *thd= current_thd;
((Thd_ndb*)(thd->transaction.thd_ndb))->error= 1;
@ -257,6 +270,8 @@ void ha_ndbcluster::no_uncommitted_rows_execute_failure()
void ha_ndbcluster::no_uncommitted_rows_init(THD *thd)
{
if (m_ha_not_exact_count)
return;
DBUG_ENTER("ha_ndbcluster::no_uncommitted_rows_init");
struct Ndb_table_local_info *info= (struct Ndb_table_local_info *)m_table_info;
Thd_ndb *thd_ndb= (Thd_ndb *)thd->transaction.thd_ndb;
@ -274,6 +289,8 @@ void ha_ndbcluster::no_uncommitted_rows_init(THD *thd)
void ha_ndbcluster::no_uncommitted_rows_update(int c)
{
if (m_ha_not_exact_count)
return;
DBUG_ENTER("ha_ndbcluster::no_uncommitted_rows_update");
struct Ndb_table_local_info *info=
(struct Ndb_table_local_info *)m_table_info;
@ -286,6 +303,8 @@ void ha_ndbcluster::no_uncommitted_rows_update(int c)
void ha_ndbcluster::no_uncommitted_rows_reset(THD *thd)
{
if (m_ha_not_exact_count)
return;
DBUG_ENTER("ha_ndbcluster::no_uncommitted_rows_reset");
((Thd_ndb*)(thd->transaction.thd_ndb))->count++;
((Thd_ndb*)(thd->transaction.thd_ndb))->error= 0;
@ -1018,7 +1037,8 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf)
{
Field *field= table->field[i];
if ((thd->query_id == field->query_id) ||
m_retrieve_all_fields)
m_retrieve_all_fields ||
(field->flags & PRI_KEY_FLAG) && m_retrieve_primary_key)
{
if (get_ndb_value(op, field, i, buf))
ERR_RETURN(trans->getNdbError());
@ -1093,6 +1113,34 @@ int ha_ndbcluster::complemented_pk_read(const byte *old_data, byte *new_data)
DBUG_RETURN(0);
}
/*
Peek to check if a particular row already exists
*/
int ha_ndbcluster::peek_row()
{
NdbConnection *trans= m_active_trans;
NdbOperation *op;
THD *thd= current_thd;
DBUG_ENTER("peek_row");
NdbOperation::LockMode lm=
(NdbOperation::LockMode)get_ndb_lock_type(m_lock.type);
if (!(op= trans->getNdbOperation((const NDBTAB *) m_table)) ||
op->readTuple(lm) != 0)
ERR_RETURN(trans->getNdbError());
int res;
if ((res= set_primary_key(op)))
ERR_RETURN(trans->getNdbError());
if (execute_no_commit_ie(this,trans) != 0)
{
table->status= STATUS_NOT_FOUND;
DBUG_RETURN(ndb_err(trans));
}
DBUG_RETURN(0);
}
/*
Read one record from NDB using unique secondary index
@ -1138,7 +1186,7 @@ int ha_ndbcluster::unique_index_read(const byte *key,
{
Field *field= table->field[i];
if ((thd->query_id == field->query_id) ||
(field->flags & PRI_KEY_FLAG))
(field->flags & PRI_KEY_FLAG)) // && m_retrieve_primary_key ??
{
if (get_ndb_value(op, field, i, buf))
ERR_RETURN(op->getNdbError());
@ -1222,7 +1270,8 @@ inline int ha_ndbcluster::next_result(byte *buf)
DBUG_PRINT("info", ("ops_pending: %d", m_ops_pending));
if (m_ops_pending)
{
if (current_thd->transaction.on)
// if (current_thd->transaction.on)
if (m_transaction_on)
{
if (execute_no_commit(this,trans) != 0)
DBUG_RETURN(ndb_err(trans));
@ -1566,7 +1615,7 @@ int ha_ndbcluster::filtered_scan(const byte *key, uint key_len,
Field* field= key_part->field;
uint ndb_fieldnr= key_part->fieldnr-1;
DBUG_PRINT("key_part", ("fieldnr: %d", ndb_fieldnr));
// const NDBCOL *col= tab->getColumn(ndb_fieldnr);
//const NDBCOL *col= ((const NDBTAB *) m_table)->getColumn(ndb_fieldnr);
uint32 field_len= field->pack_length();
DBUG_DUMP("key", (char*)key, field_len);
@ -1637,9 +1686,17 @@ int ha_ndbcluster::write_row(byte *record)
DBUG_ENTER("write_row");
if(m_ignore_dup_key_not_supported)
if(m_ignore_dup_key && table->primary_key != MAX_KEY)
{
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
int peek_res= peek_row();
if (!peek_res)
{
m_dupkey= table->primary_key;
DBUG_RETURN(HA_ERR_FOUND_DUPP_KEY);
}
if (peek_res != HA_ERR_KEY_NOT_FOUND)
DBUG_RETURN(peek_res);
}
statistic_increment(thd->status_var.ha_write_count, &LOCK_status);
@ -1710,7 +1767,8 @@ int ha_ndbcluster::write_row(byte *record)
(int)m_rows_inserted, (int)m_bulk_insert_rows));
m_bulk_insert_not_flushed= FALSE;
if (thd->transaction.on)
// if (thd->transaction.on)
if (m_transaction_on)
{
if (execute_no_commit(this,trans) != 0)
{
@ -1883,7 +1941,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
for (i= 0; i < table->fields; i++)
{
Field *field= table->field[i];
if ((thd->query_id == field->query_id) &&
if (((thd->query_id == field->query_id) || m_retrieve_all_fields) &&
(!(field->flags & PRI_KEY_FLAG)) &&
set_ndb_value(op, field, i))
ERR_RETURN(op->getNdbError());
@ -2152,17 +2210,15 @@ void ha_ndbcluster::print_results()
break;
}
case NdbDictionary::Column::Char:{
char buf[field->pack_length()+1];
char *value= (char *) field->ptr;
snprintf(buf, field->pack_length(), "%s", value);
fprintf(DBUG_FILE, "Char\t'%s'", buf);
const char *value= (char *) field->ptr;
fprintf(DBUG_FILE, "Char\t'%.*s'", field->pack_length(), value);
break;
}
case NdbDictionary::Column::Varchar:
case NdbDictionary::Column::Binary:
case NdbDictionary::Column::Varbinary: {
char *value= (char *) field->ptr;
fprintf(DBUG_FILE, "'%s'", value);
const char *value= (char *) field->ptr;
fprintf(DBUG_FILE, "Var\t'%.*s'", field->pack_length(), value);
break;
}
case NdbDictionary::Column::Datetime: {
@ -2548,14 +2604,17 @@ void ha_ndbcluster::info(uint flag)
DBUG_PRINT("info", ("HA_STATUS_VARIABLE"));
if (m_table_info)
{
records_update();
if (m_ha_not_exact_count)
records= 100;
else
records_update();
}
else
{
Uint64 rows;
if(ndb_get_table_statistics(m_ndb, m_tabname, &rows, 0) == 0){
records= rows;
}
Uint64 rows= 100;
if (current_thd->variables.ndb_use_exact_count)
ndb_get_table_statistics(m_ndb, m_tabname, &rows, 0);
records= rows;
}
}
if (flag & HA_STATUS_CONST)
@ -2661,15 +2720,15 @@ int ha_ndbcluster::extra(enum ha_extra_function operation)
m_use_write= TRUE;
} else
{
if (table->keys)
m_ignore_dup_key_not_supported= TRUE;
DBUG_PRINT("info", ("Ignoring duplicate key"));
m_ignore_dup_key= TRUE;
}
break;
case HA_EXTRA_NO_IGNORE_DUP_KEY:
DBUG_PRINT("info", ("HA_EXTRA_NO_IGNORE_DUP_KEY"));
DBUG_PRINT("info", ("Turning OFF use of write instead of insert"));
m_use_write= FALSE;
m_ignore_dup_key_not_supported= FALSE;
m_ignore_dup_key= FALSE;
break;
case HA_EXTRA_RETRIEVE_ALL_COLS: /* Retrieve all columns, not just those
where field->query_id is the same as
@ -2688,6 +2747,7 @@ int ha_ndbcluster::extra(enum ha_extra_function operation)
break;
case HA_EXTRA_RETRIEVE_PRIMARY_KEY:
DBUG_PRINT("info", ("HA_EXTRA_RETRIEVE_PRIMARY_KEY"));
m_retrieve_primary_key= TRUE;
break;
case HA_EXTRA_CHANGE_KEY_TO_UNIQUE:
DBUG_PRINT("info", ("HA_EXTRA_CHANGE_KEY_TO_UNIQUE"));
@ -2946,12 +3006,22 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
pointer to point to the NDB transaction.
*/
// store thread specific data first to set the right context
m_force_send= thd->variables.ndb_force_send;
m_ha_not_exact_count= !thd->variables.ndb_use_exact_count;
m_autoincrement_prefetch= thd->variables.ndb_autoincrement_prefetch_sz;
if (!thd->transaction.on)
m_transaction_on= FALSE;
else
m_transaction_on= thd->variables.ndb_use_transactions;
m_active_trans= thd->transaction.all.ndb_tid ?
(NdbConnection*)thd->transaction.all.ndb_tid:
(NdbConnection*)thd->transaction.stmt.ndb_tid;
DBUG_ASSERT(m_active_trans);
// Start of transaction
m_retrieve_all_fields= FALSE;
m_retrieve_primary_key= FALSE;
m_ops_pending= 0;
{
NDBDICT *dict= m_ndb->getDictionary();
@ -3044,6 +3114,7 @@ int ha_ndbcluster::start_stmt(THD *thd)
// Start of statement
m_retrieve_all_fields= FALSE;
m_retrieve_primary_key= FALSE;
m_ops_pending= 0;
DBUG_RETURN(error);
@ -3066,7 +3137,7 @@ int ndbcluster_commit(THD *thd, void *ndb_transaction)
"stmt" : "all"));
DBUG_ASSERT(ndb && trans);
if (execute_commit(0,trans) != 0)
if (execute_commit(thd,trans) != 0)
{
const NdbError err= trans->getNdbError();
const NdbOperation *error_op= trans->getNdbErrorOperation();
@ -3621,13 +3692,17 @@ ulonglong ha_ndbcluster::get_auto_increment()
Uint64 auto_value;
DBUG_ENTER("get_auto_increment");
DBUG_PRINT("enter", ("m_tabname: %s", m_tabname));
cache_size= ((m_rows_to_insert - m_rows_inserted < autoincrement_prefetch) ?
m_rows_to_insert - m_rows_inserted :
max(m_rows_to_insert, autoincrement_prefetch));
auto_value= ((m_skip_auto_increment) ?
m_ndb->readAutoIncrementValue((const NDBTAB *) m_table) :
m_ndb->getAutoIncrementValue((const NDBTAB *) m_table, cache_size));
DBUG_RETURN((ulonglong) auto_value);
cache_size=
(m_rows_to_insert - m_rows_inserted < m_autoincrement_prefetch) ?
m_rows_to_insert - m_rows_inserted
: (m_rows_to_insert > m_autoincrement_prefetch) ?
m_rows_to_insert
: m_autoincrement_prefetch;
auto_value=
(m_skip_auto_increment) ?
m_ndb->readAutoIncrementValue((const NDBTAB *) m_table)
: m_ndb->getAutoIncrementValue((const NDBTAB *) m_table, cache_size);
DBUG_RETURN((longlong)auto_value);
}
@ -3648,9 +3723,10 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
HA_NO_PREFIX_CHAR_KEYS),
m_share(0),
m_use_write(FALSE),
m_ignore_dup_key_not_supported(FALSE),
m_ignore_dup_key(FALSE),
m_primary_key_update(FALSE),
m_retrieve_all_fields(FALSE),
m_retrieve_primary_key(FALSE),
m_rows_to_insert(1),
m_rows_inserted(0),
m_bulk_insert_rows(1024),
@ -3660,7 +3736,11 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
m_blobs_pending(0),
m_blobs_buffer(0),
m_blobs_buffer_size(0),
m_dupkey((uint) -1)
m_dupkey((uint) -1),
m_ha_not_exact_count(FALSE),
m_force_send(TRUE),
m_autoincrement_prefetch(32),
m_transaction_on(TRUE)
{
int i;
@ -4311,6 +4391,62 @@ ha_ndbcluster::records_in_range(uint inx, key_range *min_key,
DBUG_RETURN(10); /* Good guess when you don't know anything */
}
ulong ha_ndbcluster::table_flags(void) const
{
if (m_ha_not_exact_count)
return m_table_flags | HA_NOT_EXACT_COUNT;
else
return m_table_flags;
}
const char * ha_ndbcluster::table_type() const
{
return("ndbcluster");
}
uint ha_ndbcluster::max_supported_record_length() const
{
return NDB_MAX_TUPLE_SIZE;
}
uint ha_ndbcluster::max_supported_keys() const
{
return MAX_KEY;
}
uint ha_ndbcluster::max_supported_key_parts() const
{
return NDB_MAX_NO_OF_ATTRIBUTES_IN_KEY;
}
uint ha_ndbcluster::max_supported_key_length() const
{
return NDB_MAX_KEY_SIZE;
}
bool ha_ndbcluster::low_byte_first() const
{
#ifdef WORDS_BIGENDIAN
return FALSE;
#else
return TRUE;
#endif
}
bool ha_ndbcluster::has_transactions()
{
return TRUE;
}
const char* ha_ndbcluster::index_type(uint key_number)
{
switch (get_index_type(key_number)) {
case ORDERED_INDEX:
case UNIQUE_ORDERED_INDEX:
case PRIMARY_KEY_ORDERED_INDEX:
return "BTREE";
case UNIQUE_INDEX:
case PRIMARY_KEY_INDEX:
default:
return "HASH";
}
}
uint8 ha_ndbcluster::table_cache_type()
{
return HA_CACHE_TBL_NOCACHE;
}
/*
Handling the shared NDB_SHARE structure that is needed to

View file

@ -118,15 +118,14 @@ class ha_ndbcluster: public handler
int reset();
int external_lock(THD *thd, int lock_type);
int start_stmt(THD *thd);
const char * table_type() const { return("ndbcluster");}
const char * table_type() const;
const char ** bas_ext() const;
ulong table_flags(void) const { return m_table_flags; }
ulong table_flags(void) const;
ulong index_flags(uint idx, uint part, bool all_parts) const;
uint max_supported_record_length() const { return NDB_MAX_TUPLE_SIZE; };
uint max_supported_keys() const { return MAX_KEY; }
uint max_supported_key_parts() const
{ return NDB_MAX_NO_OF_ATTRIBUTES_IN_KEY; };
uint max_supported_key_length() const { return NDB_MAX_KEY_SIZE;};
uint max_supported_record_length() const;
uint max_supported_keys() const;
uint max_supported_key_parts() const;
uint max_supported_key_length() const;
int rename_table(const char *from, const char *to);
int delete_table(const char *name);
@ -135,28 +134,9 @@ class ha_ndbcluster: public handler
THR_LOCK_DATA **to,
enum thr_lock_type lock_type);
bool low_byte_first() const
{
#ifdef WORDS_BIGENDIAN
return FALSE;
#else
return TRUE;
#endif
}
bool has_transactions() { return TRUE; }
const char* index_type(uint key_number) {
switch (get_index_type(key_number)) {
case ORDERED_INDEX:
case UNIQUE_ORDERED_INDEX:
case PRIMARY_KEY_ORDERED_INDEX:
return "BTREE";
case UNIQUE_INDEX:
case PRIMARY_KEY_INDEX:
default:
return "HASH";
}
}
bool low_byte_first() const;
bool has_transactions();
const char* index_type(uint key_number);
double scan_time();
ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key);
@ -165,7 +145,7 @@ class ha_ndbcluster: public handler
static Thd_ndb* seize_thd_ndb();
static void release_thd_ndb(Thd_ndb* thd_ndb);
uint8 table_cache_type() { return HA_CACHE_TBL_NOCACHE; }
uint8 table_cache_type();
private:
int alter_table_name(const char *from, const char *to);
@ -183,6 +163,7 @@ class ha_ndbcluster: public handler
int pk_read(const byte *key, uint key_len, byte *buf);
int complemented_pk_read(const byte *old_data, byte *new_data);
int peek_row();
int unique_index_read(const byte *key, uint key_len,
byte *buf);
int ordered_index_scan(const key_range *start_key,
@ -242,9 +223,10 @@ class ha_ndbcluster: public handler
typedef union { NdbRecAttr *rec; NdbBlob *blob; void *ptr; } NdbValue;
NdbValue m_value[NDB_MAX_ATTRIBUTES_IN_TABLE];
bool m_use_write;
bool m_ignore_dup_key_not_supported;
bool m_ignore_dup_key;
bool m_primary_key_update;
bool m_retrieve_all_fields;
bool m_retrieve_primary_key;
ha_rows m_rows_to_insert;
ha_rows m_rows_inserted;
ha_rows m_bulk_insert_rows;
@ -256,6 +238,10 @@ class ha_ndbcluster: public handler
char *m_blobs_buffer;
uint32 m_blobs_buffer_size;
uint m_dupkey;
bool m_ha_not_exact_count;
bool m_force_send;
ha_rows m_autoincrement_prefetch;
bool m_transaction_on;
void set_rec_per_key();
void records_update();
@ -265,6 +251,8 @@ class ha_ndbcluster: public handler
void no_uncommitted_rows_reset(THD *);
friend int execute_no_commit(ha_ndbcluster*, NdbConnection*);
friend int execute_commit(ha_ndbcluster*, NdbConnection*);
friend int execute_no_commit_ie(ha_ndbcluster*, NdbConnection*);
};
bool ndbcluster_init(void);

View file

@ -44,6 +44,7 @@
#define HA_ADMIN_INVALID -5
#define HA_ADMIN_REJECT -6
#define HA_ADMIN_TRY_ALTER -7
#define HA_ADMIN_WRONG_CHECKSUM -8
/* Bits in table_flags() to show what database can do */
#define HA_READ_RND_SAME (1 << 0) /* can switch index during the scan

View file

@ -1007,6 +1007,21 @@ void Item_param::set_double(double d)
}
/*
Set parameter value from TIME value.
SYNOPSIS
set_time()
tm - datetime value to set (time_type is ignored)
type - type of datetime value
max_length_arg - max length of datetime value as string
NOTE
If we value to be stored is not normalized, zero value will be stored
instead and proper warning will be produced. This function relies on
the fact that even wrong value sent over binary protocol fits into
MAX_DATE_STRING_REP_LENGTH buffer.
*/
void Item_param::set_time(TIME *tm, timestamp_type type, uint32 max_length_arg)
{
DBUG_ENTER("Item_param::set_time");
@ -1014,6 +1029,17 @@ void Item_param::set_time(TIME *tm, timestamp_type type, uint32 max_length_arg)
value.time= *tm;
value.time.time_type= type;
if (value.time.year > 9999 || value.time.month > 12 ||
value.time.day > 31 ||
type != MYSQL_TIMESTAMP_TIME && value.time.hour > 23 ||
value.time.minute > 59 || value.time.second > 59)
{
char buff[MAX_DATE_STRING_REP_LENGTH];
uint length= my_TIME_to_str(&value.time, buff);
make_truncated_value_warning(current_thd, buff, length, type, 0);
set_zero_time(&value.time, MYSQL_TIMESTAMP_ERROR);
}
state= TIME_VALUE;
maybe_null= 0;
max_length= max_length_arg;

View file

@ -395,6 +395,7 @@ void Item_func::split_sum_func(THD *thd, Item **ref_pointer_array,
{
uint el= fields.elements;
Item *new_item= new Item_ref(ref_pointer_array + el, 0, item->name);
new_item->collation.set(item->collation);
fields.push_front(item);
ref_pointer_array[el]= item;
thd->change_item_tree(arg, new_item);

View file

@ -221,16 +221,13 @@ Item_sum_hybrid::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref)
hybrid_type= item->result_type();
if (hybrid_type == INT_RESULT)
{
cmp_charset= &my_charset_bin;
max_length=20;
}
else if (hybrid_type == REAL_RESULT)
{
cmp_charset= &my_charset_bin;
max_length=float_length(decimals);
}else
{
cmp_charset= item->collation.collation;
max_length=item->max_length;
}
decimals=item->decimals;
@ -676,7 +673,7 @@ bool Item_sum_min::add()
{
String *result=args[0]->val_str(&tmp_value);
if (!args[0]->null_value &&
(null_value || sortcmp(&value,result,cmp_charset) > 0))
(null_value || sortcmp(&value,result,collation.collation) > 0))
{
value.copy(*result);
null_value=0;
@ -729,7 +726,7 @@ bool Item_sum_max::add()
{
String *result=args[0]->val_str(&tmp_value);
if (!args[0]->null_value &&
(null_value || sortcmp(&value,result,cmp_charset) < 0))
(null_value || sortcmp(&value,result,collation.collation) < 0))
{
value.copy(*result);
null_value=0;
@ -1040,7 +1037,7 @@ Item_sum_hybrid::min_max_update_str_field()
result_field->val_str(&tmp_value);
if (result_field->is_null() ||
(cmp_sign * sortcmp(res_str,&tmp_value,cmp_charset)) < 0)
(cmp_sign * sortcmp(res_str,&tmp_value,collation.collation)) < 0)
result_field->store(res_str->ptr(),res_str->length(),res_str->charset());
result_field->set_notnull();
}

View file

@ -442,20 +442,19 @@ class Item_sum_hybrid :public Item_sum
enum_field_types hybrid_field_type;
int cmp_sign;
table_map used_table_cache;
CHARSET_INFO *cmp_charset;
public:
Item_sum_hybrid(Item *item_par,int sign)
:Item_sum(item_par), sum(0.0), sum_int(0),
hybrid_type(INT_RESULT), hybrid_field_type(FIELD_TYPE_LONGLONG),
cmp_sign(sign), used_table_cache(~(table_map) 0),
cmp_charset(&my_charset_bin)
{}
cmp_sign(sign), used_table_cache(~(table_map) 0)
{ collation.set(&my_charset_bin); }
Item_sum_hybrid(THD *thd, Item_sum_hybrid *item):
Item_sum(thd, item), value(item->value),
sum(item->sum), sum_int(item->sum_int), hybrid_type(item->hybrid_type),
hybrid_field_type(item->hybrid_field_type),cmp_sign(item->cmp_sign),
used_table_cache(item->used_table_cache), cmp_charset(item->cmp_charset) {}
used_table_cache(item->used_table_cache)
{ collation.set(item->collation); }
bool fix_fields(THD *, TABLE_LIST *, Item **);
table_map used_tables() const { return used_table_cache; }
bool const_item() const { return !used_table_cache; }

View file

@ -1607,50 +1607,46 @@ void Item_func_from_unixtime::fix_length_and_dec()
String *Item_func_from_unixtime::val_str(String *str)
{
TIME time_tmp;
my_time_t tmp;
DBUG_ASSERT(fixed == 1);
tmp= (time_t) args[0]->val_int();
if ((null_value=args[0]->null_value))
goto null_date;
thd->variables.time_zone->gmt_sec_to_TIME(&time_tmp, tmp);
if (get_date(&time_tmp, 0))
return 0;
if (str->alloc(20*MY_CHARSET_BIN_MB_MAXLEN))
goto null_date;
{
null_value= 1;
return 0;
}
make_datetime((DATE_TIME_FORMAT *) 0, &time_tmp, str);
return str;
null_date:
null_value=1;
return 0;
}
longlong Item_func_from_unixtime::val_int()
{
TIME time_tmp;
my_time_t tmp;
DBUG_ASSERT(fixed == 1);
tmp= (time_t) (ulong) args[0]->val_int();
if ((null_value=args[0]->null_value))
if (get_date(&time_tmp, 0))
return 0;
current_thd->variables.time_zone->gmt_sec_to_TIME(&time_tmp, tmp);
return (longlong) TIME_to_ulonglong_datetime(&time_tmp);
}
bool Item_func_from_unixtime::get_date(TIME *ltime,
uint fuzzy_date __attribute__((unused)))
{
my_time_t tmp=(my_time_t) args[0]->val_int();
if ((null_value=args[0]->null_value))
longlong tmp= args[0]->val_int();
if ((null_value= (args[0]->null_value ||
tmp < TIMESTAMP_MIN_VALUE ||
tmp > TIMESTAMP_MAX_VALUE)))
return 1;
current_thd->variables.time_zone->gmt_sec_to_TIME(ltime, tmp);
thd->variables.time_zone->gmt_sec_to_TIME(ltime, (my_time_t)tmp);
return 0;
}

View file

@ -3611,7 +3611,7 @@ void Create_file_log_event::print(FILE* file, bool short_form,
if (enable_local)
{
Load_log_event::print(file, 1, last_event_info, !check_fname_outside_temp_buf());
Load_log_event::print(file, short_form, last_event_info, !check_fname_outside_temp_buf());
/*
That one is for "file_id: etc" below: in mysqlbinlog we want the #, in
SHOW BINLOG EVENTS we don't.

View file

@ -4041,7 +4041,10 @@ enum options_mysqld
OPT_INNODB_FILE_PER_TABLE, OPT_CRASH_BINLOG_INNODB,
OPT_INNODB_LOCKS_UNSAFE_FOR_BINLOG,
OPT_SAFE_SHOW_DB, OPT_INNODB_SAFE_BINLOG,
OPT_INNODB, OPT_ISAM, OPT_NDBCLUSTER, OPT_NDB_CONNECTSTRING, OPT_SKIP_SAFEMALLOC,
OPT_INNODB, OPT_ISAM,
OPT_NDBCLUSTER, OPT_NDB_CONNECTSTRING, OPT_NDB_USE_EXACT_COUNT,
OPT_NDB_FORCE_SEND, OPT_NDB_AUTOINCREMENT_PREFETCH_SZ,
OPT_SKIP_SAFEMALLOC,
OPT_TEMP_POOL, OPT_TX_ISOLATION,
OPT_SKIP_STACK_TRACE, OPT_SKIP_SYMLINKS,
OPT_MAX_BINLOG_DUMP_EVENTS, OPT_SPORADIC_BINLOG_DUMP_FAIL,
@ -4495,9 +4498,26 @@ Disable with --skip-ndbcluster (will save memory).",
(gptr*) &opt_ndbcluster, (gptr*) &opt_ndbcluster, 0, GET_BOOL, NO_ARG, 1, 0, 0,
0, 0, 0},
#ifdef HAVE_NDBCLUSTER_DB
{"ndb-connectstring", OPT_NDB_CONNECTSTRING, "Connect string for ndbcluster.",
(gptr*) &ndbcluster_connectstring, (gptr*) &ndbcluster_connectstring, 0, GET_STR,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"ndb-connectstring", OPT_NDB_CONNECTSTRING,
"Connect string for ndbcluster.",
(gptr*) &ndbcluster_connectstring, (gptr*) &ndbcluster_connectstring,
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"ndb_autoincrement_prefetch_sz", OPT_NDB_AUTOINCREMENT_PREFETCH_SZ,
"Specify number of autoincrement values that are prefetched",
(gptr*) &global_system_variables.ndb_autoincrement_prefetch_sz,
(gptr*) &global_system_variables.ndb_autoincrement_prefetch_sz,
0, GET_INT, REQUIRED_ARG, 32, 1, 256, 0, 0, 0},
{"ndb_force_send", OPT_NDB_FORCE_SEND,
"Force send of buffers to ndb immediately without waiting for other threads",
(gptr*) &global_system_variables.ndb_force_send,
(gptr*) &global_system_variables.ndb_force_send,
0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0},
{"ndb_use_exact_count", OPT_NDB_USE_EXACT_COUNT,
"Use exact records count during query planning and for "
"fast select count(*)",
(gptr*) &global_system_variables.ndb_use_exact_count,
(gptr*) &global_system_variables.ndb_use_exact_count,
0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0},
#endif
{"new", 'n', "Use very new possible 'unsafe' functions.",
(gptr*) &global_system_variables.new_mode,

View file

@ -374,6 +374,23 @@ sys_var_thd_bool sys_innodb_table_locks("innodb_table_locks",
sys_var_long_ptr sys_innodb_autoextend_increment("innodb_autoextend_increment",
&srv_auto_extend_increment);
#endif
#ifdef HAVE_NDBCLUSTER_DB
// ndb thread specific variable settings
sys_var_thd_ulong
sys_ndb_autoincrement_prefetch_sz("ndb_autoincrement_prefetch_sz",
&SV::ndb_autoincrement_prefetch_sz);
sys_var_thd_bool
sys_ndb_force_send("ndb_force_send",
&SV::ndb_force_send);
sys_var_thd_bool
sys_ndb_use_exact_count("ndb_use_exact_count",
&SV::ndb_use_exact_count);
sys_var_thd_bool
sys_ndb_use_transactions("ndb_use_transactions",
&SV::ndb_use_transactions);
// ndb server global variable settings
// none
#endif
/* Time/date/datetime formats */
@ -631,6 +648,12 @@ sys_var *sys_variables[]=
&sys_innodb_table_locks,
&sys_innodb_max_purge_lag,
&sys_innodb_autoextend_increment,
#endif
#ifdef HAVE_NDBCLUSTER_DB
&sys_ndb_autoincrement_prefetch_sz,
&sys_ndb_force_send,
&sys_ndb_use_exact_count,
&sys_ndb_use_transactions,
#endif
&sys_unique_checks,
&sys_updatable_views_with_limit,
@ -794,6 +817,13 @@ struct show_var_st init_vars[]= {
{sys_myisam_sort_buffer_size.name, (char*) &sys_myisam_sort_buffer_size, SHOW_SYS},
#ifdef __NT__
{"named_pipe", (char*) &opt_enable_named_pipe, SHOW_MY_BOOL},
#endif
#ifdef HAVE_NDBCLUSTER_DB
{sys_ndb_autoincrement_prefetch_sz.name,
(char*) &sys_ndb_autoincrement_prefetch_sz, SHOW_SYS},
{sys_ndb_force_send.name, (char*) &sys_ndb_force_send, SHOW_SYS},
{sys_ndb_use_exact_count.name,(char*) &sys_ndb_use_exact_count, SHOW_SYS},
{sys_ndb_use_transactions.name,(char*) &sys_ndb_use_transactions, SHOW_SYS},
#endif
{sys_net_buffer_length.name,(char*) &sys_net_buffer_length, SHOW_SYS},
{sys_net_read_timeout.name, (char*) &sys_net_read_timeout, SHOW_SYS},

View file

@ -420,3 +420,4 @@ character-set=latin2
"Wrong magic in %-.64s"
"Prepared statement contains too many placeholders"
"Key part '%-.64s' length cannot be 0"
"View text checksum failed"

View file

@ -411,3 +411,4 @@ character-set=latin1
"Wrong magic in %-.64s"
"Prepared statement contains too many placeholders"
"Key part '%-.64s' length cannot be 0"
"View text checksum failed"

View file

@ -420,3 +420,4 @@ character-set=latin1
"Wrong magic in %-.64s"
"Prepared statement contains too many placeholders"
"Key part '%-.64s' length cannot be 0"
"View text checksum failed"

View file

@ -408,3 +408,4 @@ character-set=latin1
"Wrong magic in %-.64s"
"Prepared statement contains too many placeholders"
"Key part '%-.64s' length cannot be 0"
"View text checksum failed"

View file

@ -413,3 +413,4 @@ character-set=latin7
"Wrong magic in %-.64s"
"Prepared statement contains too many placeholders"
"Key part '%-.64s' length cannot be 0"
"View text checksum failed"

View file

@ -408,3 +408,4 @@ character-set=latin1
"Wrong magic in %-.64s"
"Prepared statement contains too many placeholders"
"Key part '%-.64s' length cannot be 0"
"View text checksum failed"

View file

@ -421,3 +421,4 @@ character-set=latin1
"Wrong magic in %-.64s"
"Prepared statement contains too many placeholders"
"Key part '%-.64s' length cannot be 0"
"View text checksum failed"

View file

@ -408,3 +408,4 @@ character-set=greek
"Wrong magic in %-.64s"
"Prepared statement contains too many placeholders"
"Key part '%-.64s' length cannot be 0"
"View text checksum failed"

View file

@ -413,3 +413,4 @@ character-set=latin2
"Wrong magic in %-.64s"
"Prepared statement contains too many placeholders"
"Key part '%-.64s' length cannot be 0"
"View text checksum failed"

View file

@ -408,3 +408,4 @@ character-set=latin1
"Wrong magic in %-.64s"
"Prepared statement contains too many placeholders"
"Key part '%-.64s' length cannot be 0"
"View text checksum failed"

View file

@ -412,3 +412,4 @@ character-set=ujis
"Wrong magic in %-.64s"
"Prepared statement contains too many placeholders"
"Key part '%-.64s' length cannot be 0"
"View text checksum failed"

View file

@ -408,3 +408,4 @@ character-set=euckr
"Wrong magic in %-.64s"
"Prepared statement contains too many placeholders"
"Key part '%-.64s' length cannot be 0"
"View text checksum failed"

View file

@ -410,3 +410,4 @@ character-set=latin1
"Wrong magic in %-.64s"
"Prepared statement contains too many placeholders"
"Key part '%-.64s' length cannot be 0"
"View text checksum failed"

View file

@ -410,3 +410,4 @@ character-set=latin1
"Wrong magic in %-.64s"
"Prepared statement contains too many placeholders"
"Key part '%-.64s' length cannot be 0"
"View text checksum failed"

Some files were not shown because too many files have changed in this diff Show more