Merge c-4908e253.1238-1-64736c10.cust.bredbandsbolaget.se:/home/pappa/clean-mysql-5.1-new

into  c-4908e253.1238-1-64736c10.cust.bredbandsbolaget.se:/home/pappa/bug18198
This commit is contained in:
mikael@c-4908e253.1238-1-64736c10.cust.bredbandsbolaget.se 2006-04-08 18:14:03 -04:00
commit e50b98e6c3
86 changed files with 10281 additions and 652 deletions

View file

@ -1,98 +1,128 @@
#!/bin/sh
########################################################################
get_key_value()
{
echo "$1" | sed 's/^--[a-zA-Z_-]*=//'
}
usage()
{
cat <<EOF
Usage: $0 [-h|-n] [configure-options]
-h, --help Show this help message.
-n, --just-print Don't actually run any commands; just print them.
-c, --just-configure Stop after running configure.
--with-debug=full Build with full debug.
--warning-mode=[old|pedantic]
Influences the debug flags. Old is default.
--prefix=path Build with prefix 'path'.
Note: this script is intended for internal use by MySQL developers.
EOF
}
parse_options()
{
while test $# -gt 0
do
case "$1" in
--prefix=*)
prefix=`get_key_value "$1"`;;
--with-debug=full)
full_debug="=full";;
--warning-mode=*)
warning_mode=`get_key_value "$1"`;;
-c | --just-configure)
just_configure=1;;
-n | --just-print | --print)
just_print=1;;
-h | --help)
usage
exit 0;;
*)
echo "Unknown option '$1'"
exit 1;;
esac
shift
done
}
########################################################################
if ! test -f sql/mysqld.cc
then
echo "You must run this script from the MySQL top-level directory"
exit 1
fi
prefix_configs="--prefix=/usr/local/mysql"
prefix="/usr/local/mysql"
just_print=
just_configure=
full_debug=
warning_mode=
parse_options "$@"
if test -n "$MYSQL_BUILD_PREFIX"
then
prefix_configs="--prefix=$MYSQL_BUILD_PREFIX"
prefix="$MYSQL_BUILD_PREFIX"
fi
while test $# -gt 0
do
case "$1" in
--prefix=* ) prefix_configs="$1"; shift ;;
--with-debug=full ) full_debug="=full"; shift ;;
-c | --just-configure ) just_configure=1; shift ;;
-n | --just-print | --print ) just_print=1; shift ;;
-h | --help ) cat <<EOF; exit 0 ;;
Usage: $0 [-h|-n] [configure-options]
-h, --help Show this help message.
-n, --just-print Don't actually run any commands; just print them.
-c, --just-configure Stop after running configure.
--with-debug=full Build with full debug.
--prefix=path Build with prefix 'path'.
Note: this script is intended for internal use by MySQL developers.
EOF
* )
echo "Unknown option '$1'"
exit 1
break ;;
esac
done
set -e
#
# Check for the CPU and set up CPU specific flags. We may reset them
# later.
#
path=`dirname $0`
. "$path/check-cpu"
export AM_MAKEFLAGS
AM_MAKEFLAGS="-j 4"
# SSL library to use.
SSL_LIBRARY=--with-yassl
# If you are not using codefusion add "-Wpointer-arith" to WARNINGS
# The following warning flag will give too many warnings:
# -Wshadow -Wunused -Winline (The later isn't usable in C++ as
# __attribute()__ doesn't work with gnu C++)
global_warnings="-Wimplicit -Wreturn-type -Wswitch -Wtrigraphs -Wcomment -W -Wchar-subscripts -Wformat -Wparentheses -Wsign-compare -Wwrite-strings"
c_warnings="$global_warnings -Wunused"
cxx_warnings="$global_warnings -Woverloaded-virtual -Wsign-promo -Wreorder -Wctor-dtor-privacy -Wnon-virtual-dtor"
base_max_configs="--with-innodb --with-berkeley-db --with-ndbcluster --with-archive-storage-engine --with-big-tables --with-blackhole-storage-engine --with-federated-storage-engine --with-csv-storage-engine --with-example-storage-engine --with-partition $SSL_LIBRARY"
base_max_no_ndb_configs="--with-innodb --with-berkeley-db --without-ndbcluster --with-archive-storage-engine --with-big-tables --with-blackhole-storage-engine --with-federated-storage-engine --with-csv-storage-engine --with-example-storage-engine --with-partition $SSL_LIBRARY"
max_configs="$base_max_configs --with-embedded-server"
max_no_ndb_configs="$base_max_no_ndb_configs --with-embedded-server"
valgrind_flags="-USAFEMALLOC -UFORCE_INIT_OF_VARS -DHAVE_purify -DMYSQL_SERVER_SUFFIX=-valgrind-max"
path=`dirname $0`
. "$path/check-cpu"
alpha_cflags="$check_cpu_cflags -Wa,-m$cpu_flag"
amd64_cflags="$check_cpu_cflags"
pentium_cflags="$check_cpu_cflags"
pentium64_cflags="$check_cpu_cflags -m64"
ppc_cflags="$check_cpu_cflags"
sparc_cflags=""
# be as fast as we can be without losing our ability to backtrace
fast_cflags="-O3 -fno-omit-frame-pointer"
# this is one is for someone who thinks 1% speedup is worth not being
# able to backtrace
reckless_cflags="-O3 -fomit-frame-pointer "
debug_cflags="-DUNIV_MUST_NOT_INLINE -DEXTRA_DEBUG -DFORCE_INIT_OF_VARS -DSAFEMALLOC -DPEDANTIC_SAFEMALLOC -DSAFE_MUTEX"
debug_extra_cflags="-O1 -Wuninitialized"
if [ "x$warning_mode" != "xpedantic" ]; then
# Both C and C++ warnings
warnings="-Wimplicit -Wreturn-type -Wswitch -Wtrigraphs -Wcomment -W"
warnings="$warnings -Wchar-subscripts -Wformat -Wparentheses -Wsign-compare"
warnings="$warnings -Wwrite-strings"
# C warnings
c_warnings="$warnings -Wunused"
# C++ warnings
cxx_warnings="$warnings -Woverloaded-virtual -Wsign-promo -Wreorder"
cxx_warnings="$warnings -Wctor-dtor-privacy -Wnon-virtual-dtor"
# Added unless --with-debug=full
debug_extra_cflags="-O1 -Wuninitialized"
else
warnings="-W -Wall -ansi -pedantic -Wno-long-long -D_POSIX_SOURCE"
c_warnings="$warnings"
cxx_warnings="$warnings -std=c++98"
# NOTE: warning mode should not influence optimize/debug mode.
# Please feel free to add a separate option if you don't feel it's an overkill.
debug_extra_flags="-O0"
# Reset CPU flags (-mtune), they don't work in -pedantic mode
check_cpu_cflags=""
fi
# Set flags for various build configurations.
# Used in -valgrind builds
valgrind_flags="-USAFEMALLOC -UFORCE_INIT_OF_VARS -DHAVE_purify "
valgrind_flags="$valgrind_flags -DMYSQL_SERVER_SUFFIX=-valgrind-max"
#
# Used in -debug builds
debug_cflags="-DUNIV_MUST_NOT_INLINE -DEXTRA_DEBUG -DFORCE_INIT_OF_VARS "
debug_cflags="$debug_cflags -DSAFEMALLOC -DPEDANTIC_SAFEMALLOC -DSAFE_MUTEX"
#
# Base C++ flags for all builds
base_cxxflags="-felide-constructors -fno-exceptions -fno-rtti"
amd64_cxxflags="" # If dropping '--with-big-tables', add here "-DBIG_TABLES"
base_configs="$prefix_configs --enable-assembler --with-extra-charsets=complex --enable-thread-safe-client --with-readline --with-big-tables"
static_link="--with-mysqld-ldflags=-all-static --with-client-ldflags=-all-static"
amd64_configs=""
alpha_configs="" # Not used yet
pentium_configs=""
sparc_configs=""
# we need local-infile in all binaries for rpl000001
# if you need to disable local-infile in the client, write a build script
# and unset local_infile_configs
local_infile_configs="--enable-local-infile"
#
# Flags for optimizing builds.
# Be as fast as we can be without losing our ability to backtrace.
fast_cflags="-O3 -fno-omit-frame-pointer"
debug_configs="--with-debug$full_debug"
if [ -z "$full_debug" ]
@ -100,6 +130,45 @@ then
debug_cflags="$debug_cflags $debug_extra_cflags"
fi
#
# Configuration options.
#
base_configs="--prefix=$prefix --enable-assembler "
base_configs="$base_configs --with-extra-charsets=complex "
base_configs="$base_configs --enable-thread-safe-client --with-readline "
base_configs="$base_configs --with-big-tables"
static_link="--with-mysqld-ldflags=-all-static "
static_link="$static_link --with-client-ldflags=-all-static"
# we need local-infile in all binaries for rpl000001
# if you need to disable local-infile in the client, write a build script
# and unset local_infile_configs
local_infile_configs="--enable-local-infile"
max_configs="--with-innodb --with-berkeley-db"
max_configs="$max_configs --with-archive-storage-engine"
max_configs="$max_configs --with-big-tables"
max_configs="$max_configs --with-blackhole-storage-engine"
max_configs="$max_configs --with-federated-storage-engine"
max_configs="$max_configs --with-csv-storage-engine"
max_configs="$max_configs --with-example-storage-engine"
max_configs="$max_configs --with-partition $SSL_LIBRARY"
max_no_embedded_configs="$max_configs --with-ndbcluster"
max_no_ndb_configs="$max_configs --without-ndbcluster --with-embedded-server"
max_configs="$max_configs --with-ndbcluster --with-embedded-server"
#
# CPU and platform specific compilation flags.
#
alpha_cflags="$check_cpu_cflags -Wa,-m$cpu_flag"
amd64_cflags="$check_cpu_cflags"
amd64_cxxflags="" # If dropping '--with-big-tables', add here "-DBIG_TABLES"
pentium64_cflags="$check_cpu_cflags -m64"
ppc_cflags="$check_cpu_cflags"
sparc_cflags=""
if gmake --version > /dev/null 2>&1
then
make=gmake

View file

@ -1,7 +1,7 @@
#! /bin/sh
path=`dirname $0`
. "$path/SETUP.sh" $@ --with-debug=full
. "$path/SETUP.sh" "$@" --with-debug=full
extra_flags="$pentium_cflags $debug_cflags"
extra_configs="$pentium_configs $debug_configs $max_configs"

View file

@ -4,6 +4,6 @@ path=`dirname $0`
. "$path/SETUP.sh"
extra_flags="$pentium_cflags $debug_cflags"
extra_configs="$pentium_configs $debug_configs $base_max_configs"
extra_configs="$pentium_configs $debug_configs $max_no_embedded_configs"
. "$path/FINISH.sh"

View file

@ -1,7 +1,7 @@
#! /bin/sh
path=`dirname $0`
. "$path/SETUP.sh"
. "$path/SETUP.sh" "$@"
extra_flags="$pentium_cflags $debug_cflags $valgrind_flags"
extra_configs="$pentium_configs $debug_configs $max_configs"

View file

@ -7,7 +7,7 @@ AC_INIT(sql/mysqld.cc)
AC_CANONICAL_SYSTEM
# The Docs Makefile.am parses this line!
# remember to also change ndb version below and update version.c in ndb
AM_INIT_AUTOMAKE(mysql, 5.1.9-beta)
AM_INIT_AUTOMAKE(mysql, 5.1.10-beta)
AM_CONFIG_HEADER(config.h)
PROTOCOL_VERSION=10

View file

@ -917,7 +917,7 @@ typedef unsigned long uint32; /* Short for unsigned integer >= 32 bits */
#error "Neither int or long is of 4 bytes width"
#endif
#if !defined(HAVE_ULONG) && !defined(TARGET_OS_LINUX) && !defined(__USE_MISC)
#if !defined(HAVE_ULONG) && !defined(__USE_MISC)
typedef unsigned long ulong; /* Short for unsigned long */
#endif
#ifndef longlong_defined

View file

@ -850,6 +850,7 @@ my_bool my_gethwaddr(uchar *to);
#define PROT_WRITE 2
#define MAP_NORESERVE 0
#define MAP_SHARED 0x0001
#define MAP_PRIVATE 0x0002
#define MAP_NOSYNC 0x0800
#define MAP_FAILED ((void *)-1)
#define MS_SYNC 0x0000

View file

@ -1380,35 +1380,6 @@ mysql_get_server_info(MYSQL *mysql)
}
/*
Get version number for server in a form easy to test on
SYNOPSIS
mysql_get_server_version()
mysql Connection
EXAMPLE
4.1.0-alfa -> 40100
NOTES
We will ensure that a newer server always has a bigger number.
RETURN
Signed number > 323000
*/
ulong STDCALL
mysql_get_server_version(MYSQL *mysql)
{
uint major, minor, version;
char *pos= mysql->server_version, *end_pos;
major= (uint) strtoul(pos, &end_pos, 10); pos=end_pos+1;
minor= (uint) strtoul(pos, &end_pos, 10); pos=end_pos+1;
version= (uint) strtoul(pos, &end_pos, 10);
return (ulong) major*10000L+(ulong) (minor*100+version);
}
const char * STDCALL
mysql_get_host_info(MYSQL *mysql)
{

View file

@ -465,8 +465,7 @@ sub mtr_kill_leftovers () {
if ( kill(0, @pids) ) # Check if some left
{
# FIXME maybe just mtr_warning() ?
mtr_error("can't kill process(es) " . join(" ", @pids));
mtr_warning("can't kill process(es) " . join(" ", @pids));
}
}
}
@ -479,7 +478,7 @@ sub mtr_kill_leftovers () {
{
if ( mtr_ping_mysqld_server($srv->{'port'}, $srv->{'sockfile'}) )
{
mtr_error("can't kill old mysqld holding port $srv->{'port'}");
mtr_warning("can't kill old mysqld holding port $srv->{'port'}");
}
}
}

View file

@ -230,6 +230,8 @@ our $opt_client_ddd;
our $opt_manual_gdb;
our $opt_manual_ddd;
our $opt_manual_debug;
our $opt_debugger;
our $opt_client_debugger;
our $opt_gprof;
our $opt_gprof_dir;
@ -633,6 +635,8 @@ sub command_line_setup () {
'manual-debug' => \$opt_manual_debug,
'ddd' => \$opt_ddd,
'client-ddd' => \$opt_client_ddd,
'debugger=s' => \$opt_debugger,
'client-debugger=s' => \$opt_client_debugger,
'strace-client' => \$opt_strace_client,
'master-binary=s' => \$exe_master_mysqld,
'slave-binary=s' => \$exe_slave_mysqld,
@ -812,9 +816,10 @@ sub command_line_setup () {
# Check debug related options
if ( $opt_gdb || $opt_client_gdb || $opt_ddd || $opt_client_ddd ||
$opt_manual_gdb || $opt_manual_ddd || $opt_manual_debug)
$opt_manual_gdb || $opt_manual_ddd || $opt_manual_debug ||
$opt_debugger || $opt_client_debugger )
{
# Indicate that we are using debugger
# Indicate that we are using debugger
$glob_debugger= 1;
# Increase timeouts
$opt_wait_timeout= 300;
@ -2798,6 +2803,10 @@ sub mysqld_start ($$$$$) {
{
ddd_arguments(\$args, \$exe, "$type"."_$idx");
}
elsif ( $opt_debugger )
{
debugger_arguments(\$args, \$exe, "$type"."_$idx");
}
elsif ( $opt_manual_debug )
{
print "\nStart $type in your debugger\n" .
@ -3324,6 +3333,10 @@ sub run_mysqltest ($) {
{
ddd_arguments(\$args, \$exe, "client");
}
elsif ( $opt_client_debugger )
{
debugger_arguments(\$args, \$exe, "client");
}
if ($glob_use_libtool and $opt_valgrind)
{
@ -3476,6 +3489,42 @@ sub ddd_arguments {
mtr_add_arg($$args, "$save_exe");
}
#
# Modify the exe and args so that program is run in the selected debugger
#
sub debugger_arguments {
my $args= shift;
my $exe= shift;
my $debugger= $opt_debugger || $opt_client_debugger;
if ( $debugger eq "vcexpress" or $debugger eq "vc")
{
# vc[express] /debugexe exe arg1 .. argn
# Add /debugexe and name of the exe before args
unshift(@$$args, "/debugexe");
unshift(@$$args, "$$exe");
}
elsif ( $debugger eq "windbg" )
{
# windbg exe arg1 .. argn
# Add name of the exe before args
unshift(@$$args, "$$exe");
}
else
{
mtr_error("Unknown argument \"$debugger\" passed to --debugger");
}
# Set exe to debuggername
$$exe= $debugger;
}
#
# Modify the exe and args so that program is run in valgrind
#
@ -3588,6 +3637,8 @@ Options for debugging the product
client-gdb Start mysqltest client in gdb
ddd Start mysqld in ddd
client-ddd Start mysqltest client in ddd
debugger=NAME Start mysqld in the selected debugger
client-debugger=NAME Start mysqltest in the selected debugger
strace-client FIXME
master-binary=PATH Specify the master "mysqld" to use
slave-binary=PATH Specify the slave "mysqld" to use

View file

@ -369,3 +369,25 @@ ERROR 42000: You have an error in your SQL syntax; check the manual that corresp
SELECT '„a' as str;
str
„a
set @str= _latin1 'ABC €°§ß²³µ~ äöüÄÖÜ áéíóú ÀÈÌÒÙ @ abc';
SELECT convert(@str collate latin1_bin using utf8);
convert(@str collate latin1_bin using utf8)
ABC €°§ß²³µ~ äöüÄÖÜ áéíóú ÀÈÌÒÙ @ abc
SELECT convert(@str collate latin1_general_ci using utf8);
convert(@str collate latin1_general_ci using utf8)
ABC €°§ß²³µ~ äöüÄÖÜ áéíóú ÀÈÌÒÙ @ abc
SELECT convert(@str collate latin1_german1_ci using utf8);
convert(@str collate latin1_german1_ci using utf8)
ABC €°§ß²³µ~ äöüÄÖÜ áéíóú ÀÈÌÒÙ @ abc
SELECT convert(@str collate latin1_danish_ci using utf8);
convert(@str collate latin1_danish_ci using utf8)
ABC €°§ß²³µ~ äöüÄÖÜ áéíóú ÀÈÌÒÙ @ abc
SELECT convert(@str collate latin1_spanish_ci using utf8);
convert(@str collate latin1_spanish_ci using utf8)
ABC €°§ß²³µ~ äöüÄÖÜ áéíóú ÀÈÌÒÙ @ abc
SELECT convert(@str collate latin1_german2_ci using utf8);
convert(@str collate latin1_german2_ci using utf8)
ABC €°§ß²³µ~ äöüÄÖÜ áéíóú ÀÈÌÒÙ @ abc
SELECT convert(@str collate latin1_swedish_ci using utf8);
convert(@str collate latin1_swedish_ci using utf8)
ABC €°§ß²³µ~ äöüÄÖÜ áéíóú ÀÈÌÒÙ @ abc

View file

@ -719,3 +719,11 @@ lily
river
drop table t1;
deallocate prepare stmt;
create table t1(a blob, b text charset utf8, c text charset ucs2);
select data_type, character_octet_length, character_maximum_length
from information_schema.columns where table_name='t1';
data_type character_octet_length character_maximum_length
blob 65535 65535
text 65535 65535
text 65535 32767
drop table t1;

View file

@ -106,7 +106,6 @@ drop event if exists event3;
Warnings:
Note 1305 Event event3 does not exist
create event event3 on schedule every 50 + 10 minute starts date_add("20100101", interval 5 minute) ends date_add("20151010", interval 5 day) comment "portokala_comment" DO insert into t_event3 values (unix_timestamp(), rand());
set max_allowed_packet=128000000;
select count(*) from t_event3;
count(*)
0
@ -232,6 +231,9 @@ Db Name Definer Type Execute at Interval value Interval field Starts Ends Status
events_test intact_check root@localhost RECURRING NULL 10 HOUR # # ENABLED
CREATE TABLE event_like LIKE mysql.event;
INSERT INTO event_like SELECT * FROM mysql.event;
ALTER TABLE mysql.event MODIFY db char(64) character set cp1251 default '';
SELECT event_name FROM INFORMATION_SCHEMA.EVENTS;
ERROR HY000: Cannot load from mysql.event. Table probably corrupted. See error log.
ALTER TABLE mysql.event MODIFY db char(20) character set utf8 collate utf8_bin default '';
SHOW CREATE TABLE mysql.event;
Table Create Table

View file

@ -0,0 +1,46 @@
CREATE DATABASE IF NOT EXISTS events_test;
USE events_test;
CREATE TABLE table_1(a int);
CREATE TABLE table_2(a int);
CREATE TABLE table_3(a int);
CREATE TABLE table_4(a int);
SET GLOBAL event_scheduler=1;
CREATE EVENT two_sec ON SCHEDULE EVERY 2 SECOND DO INSERT INTO table_1 VALUES(1);
CREATE EVENT start_n_end
ON SCHEDULE EVERY 1 SECOND
ENDS NOW() + INTERVAL 6 SECOND
ON COMPLETION PRESERVE
DO INSERT INTO table_2 VALUES(1);
CREATE EVENT only_one_time ON SCHEDULE EVERY 2 SECOND ENDS NOW() + INTERVAL 1 SECOND DO INSERT INTO table_3 VALUES(1);
CREATE EVENT two_time ON SCHEDULE EVERY 1 SECOND ENDS NOW() + INTERVAL 1 SECOND DO INSERT INTO table_4 VALUES(1);
SELECT IF(SUM(a) >= 4, 'OK', 'ERROR') FROM table_1;
IF(SUM(a) >= 4, 'OK', 'ERROR')
OK
SELECT IF(SUM(a) >= 5, 'OK', 'ERROR') FROM table_2;
IF(SUM(a) >= 5, 'OK', 'ERROR')
OK
SELECT IF(SUM(a) > 0, 'OK', 'ERROR') FROM table_3;
IF(SUM(a) > 0, 'OK', 'ERROR')
OK
SELECT IF(SUM(a) > 0, 'OK', 'ERROR') FROM table_4;
IF(SUM(a) > 0, 'OK', 'ERROR')
OK
DROP EVENT two_sec;
SELECT IF(TIME_TO_SEC(TIMEDIFF(ENDS,STARTS))=6, 'OK', 'ERROR') FROM INFORMATION_SCHEMA.EVENTS WHERE EVENT_SCHEMA=DATABASE() AND EVENT_NAME='start_n_end' AND ENDS IS NOT NULL;
IF(TIME_TO_SEC(TIMEDIFF(ENDS,STARTS))=6, 'OK', 'ERROR')
OK
SELECT IF(LAST_EXECUTED-ENDS < 2, 'OK', 'ERROR') FROM INFORMATION_SCHEMA.EVENTS WHERE EVENT_SCHEMA=DATABASE() AND EVENT_NAME='start_n_end' AND ENDS IS NOT NULL;
IF(LAST_EXECUTED-ENDS < 2, 'OK', 'ERROR')
OK
DROP EVENT start_n_end;
"Already dropped because ended. Therefore an error."
DROP EVENT only_one_time;
ERROR HY000: Unknown event 'only_one_time'
"Already dropped because ended. Therefore an error."
DROP EVENT two_time;
ERROR HY000: Unknown event 'two_time'
DROP TABLE table_1;
DROP TABLE table_2;
DROP TABLE table_3;
DROP TABLE table_4;
DROP DATABASE events_test;

View file

@ -626,3 +626,8 @@ latin1
latin1
drop table t1, t2, t3;
set names default;
create table t1 (c1 varchar(10), c2 int);
select charset(group_concat(c1 order by c2)) from t1;
charset(group_concat(c1 order by c2))
latin1
drop table t1;

View file

@ -360,6 +360,42 @@ extract(SECOND FROM "1999-01-02 10:11:12")
select extract(MONTH FROM "2001-02-00");
extract(MONTH FROM "2001-02-00")
2
SELECT EXTRACT(QUARTER FROM '2004-01-15') AS quarter;
quarter
1
SELECT EXTRACT(QUARTER FROM '2004-02-15') AS quarter;
quarter
1
SELECT EXTRACT(QUARTER FROM '2004-03-15') AS quarter;
quarter
1
SELECT EXTRACT(QUARTER FROM '2004-04-15') AS quarter;
quarter
2
SELECT EXTRACT(QUARTER FROM '2004-05-15') AS quarter;
quarter
2
SELECT EXTRACT(QUARTER FROM '2004-06-15') AS quarter;
quarter
2
SELECT EXTRACT(QUARTER FROM '2004-07-15') AS quarter;
quarter
3
SELECT EXTRACT(QUARTER FROM '2004-08-15') AS quarter;
quarter
3
SELECT EXTRACT(QUARTER FROM '2004-09-15') AS quarter;
quarter
3
SELECT EXTRACT(QUARTER FROM '2004-10-15') AS quarter;
quarter
4
SELECT EXTRACT(QUARTER FROM '2004-11-15') AS quarter;
quarter
4
SELECT EXTRACT(QUARTER FROM '2004-12-15') AS quarter;
quarter
4
SELECT "1900-01-01 00:00:00" + INTERVAL 2147483648 SECOND;
"1900-01-01 00:00:00" + INTERVAL 2147483648 SECOND
1968-01-20 03:14:08

View file

@ -359,3 +359,38 @@ group by s1 collate latin1_swedish_ci having s1 = 'y';
s1 count(s1)
y 1
drop table t1;
DROP SCHEMA IF EXISTS HU;
Warnings:
Note 1008 Can't drop database 'HU'; database doesn't exist
CREATE SCHEMA HU ;
USE HU ;
CREATE TABLE STAFF
(EMPNUM CHAR(3) NOT NULL UNIQUE,
EMPNAME CHAR(20),
GRADE DECIMAL(4),
CITY CHAR(15));
CREATE TABLE PROJ
(PNUM CHAR(3) NOT NULL UNIQUE,
PNAME CHAR(20),
PTYPE CHAR(6),
BUDGET DECIMAL(9),
CITY CHAR(15));
INSERT INTO STAFF VALUES ('E1','Alice',12,'Deale');
INSERT INTO STAFF VALUES ('E2','Betty',10,'Vienna');
INSERT INTO STAFF VALUES ('E3','Carmen',13,'Vienna');
INSERT INTO STAFF VALUES ('E4','Don',12,'Deale');
INSERT INTO STAFF VALUES ('E5','Ed',13,'Akron');
INSERT INTO PROJ VALUES ('P1','MXSS','Design',10000,'Deale');
INSERT INTO PROJ VALUES ('P2','CALM','Code',30000,'Vienna');
INSERT INTO PROJ VALUES ('P3','SDP','Test',30000,'Tampa');
INSERT INTO PROJ VALUES ('P4','SDP','Design',20000,'Deale');
INSERT INTO PROJ VALUES ('P5','IRM','Test',10000,'Vienna');
INSERT INTO PROJ VALUES ('P6','PAYR','Design',50000,'Deale');
SELECT EMPNUM, GRADE*1000
FROM HU.STAFF WHERE GRADE * 1000 >
ANY (SELECT SUM(BUDGET) FROM HU.PROJ
GROUP BY CITY, PTYPE
HAVING HU.PROJ.CITY = HU.STAFF.CITY);
EMPNUM GRADE*1000
E3 13000
DROP SCHEMA HU;

View file

@ -214,34 +214,34 @@ latin1 cp1252 West European latin1_swedish_ci 1
select * from information_schema.COLLATIONS
where COLLATION_NAME like 'latin1%';
COLLATION_NAME CHARACTER_SET_NAME ID IS_DEFAULT IS_COMPILED SORTLEN
latin1_german1_ci latin1 5 0
latin1_swedish_ci latin1 8 Yes Yes 1
latin1_danish_ci latin1 15 0
latin1_german2_ci latin1 31 Yes 2
latin1_bin latin1 47 Yes 1
latin1_general_ci latin1 48 0
latin1_general_cs latin1 49 0
latin1_spanish_ci latin1 94 0
latin1_german1_ci latin1 5 # 1
latin1_swedish_ci latin1 8 Yes # 1
latin1_danish_ci latin1 15 # 1
latin1_german2_ci latin1 31 # 2
latin1_bin latin1 47 # 1
latin1_general_ci latin1 48 # 1
latin1_general_cs latin1 49 # 1
latin1_spanish_ci latin1 94 # 1
SHOW COLLATION LIKE 'latin1%';
Collation Charset Id Default Compiled Sortlen
latin1_german1_ci latin1 5 0
latin1_swedish_ci latin1 8 Yes Yes 1
latin1_danish_ci latin1 15 0
latin1_german2_ci latin1 31 Yes 2
latin1_bin latin1 47 Yes 1
latin1_general_ci latin1 48 0
latin1_general_cs latin1 49 0
latin1_spanish_ci latin1 94 0
latin1_german1_ci latin1 5 # 1
latin1_swedish_ci latin1 8 Yes # 1
latin1_danish_ci latin1 15 # 1
latin1_german2_ci latin1 31 # 2
latin1_bin latin1 47 # 1
latin1_general_ci latin1 48 # 1
latin1_general_cs latin1 49 # 1
latin1_spanish_ci latin1 94 # 1
SHOW COLLATION WHERE collation like 'latin1%';
Collation Charset Id Default Compiled Sortlen
latin1_german1_ci latin1 5 0
latin1_swedish_ci latin1 8 Yes Yes 1
latin1_danish_ci latin1 15 0
latin1_german2_ci latin1 31 Yes 2
latin1_bin latin1 47 Yes 1
latin1_general_ci latin1 48 0
latin1_general_cs latin1 49 0
latin1_spanish_ci latin1 94 0
latin1_german1_ci latin1 5 # 1
latin1_swedish_ci latin1 8 Yes # 1
latin1_danish_ci latin1 15 # 1
latin1_german2_ci latin1 31 # 2
latin1_bin latin1 47 # 1
latin1_general_ci latin1 48 # 1
latin1_general_cs latin1 49 # 1
latin1_spanish_ci latin1 94 # 1
select * from information_schema.COLLATION_CHARACTER_SET_APPLICABILITY
where COLLATION_NAME like 'latin1%';
COLLATION_NAME CHARACTER_SET_NAME
@ -1084,14 +1084,6 @@ select 1 from (select 1 from test.t1) a;
1
use test;
drop table t1;
create table t1(a blob, b text charset utf8, c text charset ucs2);
select data_type, character_octet_length, character_maximum_length
from information_schema.columns where table_name='t1';
data_type character_octet_length character_maximum_length
blob 65535 65535
text 65535 65535
text 65535 32767
drop table t1;
create table t1 (f1 int(11));
create view v1 as select * from t1;
drop table t1;

View file

@ -3133,9 +3133,7 @@ SHOW CREATE TABLE t2;
Table Create Table
t2 CREATE TABLE `t2` (
`a` int(11) DEFAULT NULL,
KEY `t2_ibfk_0` (`a`),
CONSTRAINT `t2_ibfk_0` FOREIGN KEY (`a`) REFERENCES `t1` (`a`),
CONSTRAINT `t2_ibfk_1` FOREIGN KEY (`a`) REFERENCES `t1` (`a`)
KEY `t2_ibfk_0` (`a`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
DROP TABLE t2,t1;
create table t1(a int not null, b int, c int, d int, primary key(a)) engine=innodb;
@ -3214,3 +3212,34 @@ UPDATE t1 SET field1 = 'other' WHERE field2 = 'somevalu';
ERROR 23000: Upholding foreign key constraints for table 't1', entry 'other-somevalu', key 1 would lead to a duplicate entry
DROP TABLE t2;
DROP TABLE t1;
create table t1 (
c1 bigint not null,
c2 bigint not null,
primary key (c1),
unique key (c2)
) engine=innodb;
create table t2 (
c1 bigint not null,
primary key (c1)
) engine=innodb;
alter table t1 add constraint c2_fk foreign key (c2)
references t2(c1) on delete cascade;
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
`c1` bigint(20) NOT NULL,
`c2` bigint(20) NOT NULL,
PRIMARY KEY (`c1`),
UNIQUE KEY `c2` (`c2`),
CONSTRAINT `c2_fk` FOREIGN KEY (`c2`) REFERENCES `t2` (`c1`) ON DELETE CASCADE
) ENGINE=InnoDB DEFAULT CHARSET=latin1
alter table t1 drop foreign key c2_fk;
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
`c1` bigint(20) NOT NULL,
`c2` bigint(20) NOT NULL,
PRIMARY KEY (`c1`),
UNIQUE KEY `c2` (`c2`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
drop table t1, t2;

View file

@ -47,6 +47,17 @@ unlock tables;
lock tables t1 write, t1 as t1_alias read;
insert into t1 select index1,nr from t1 as t1_alias;
drop table t1,t2;
create table t1 (c1 int);
create table t2 (c1 int);
create table t3 (c1 int);
lock tables t1 write, t2 write, t3 write;
drop table t2, t3, t1;
create table t1 (c1 int);
create table t2 (c1 int);
create table t3 (c1 int);
lock tables t1 write, t2 write, t3 write, t1 as t4 read;
alter table t2 add column c2 int;
drop table t1, t2, t3;
create table t1 ( a int(11) not null auto_increment, primary key(a));
create table t2 ( a int(11) not null auto_increment, primary key(a));
lock tables t1 write, t2 read;

View file

@ -597,3 +597,65 @@ NULL
explain partitions select * from t1 where f_int1 is null;
id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 part4_p2sp0 system NULL NULL NULL NULL 1
drop table t1;
create table t1 (a int not null, b int not null)
partition by list(a)
subpartition by hash(b) subpartitions 4
(
partition p0 values in (1),
partition p1 values in (2),
partition p2 values in (3)
);
insert into t1 values (1,1),(1,2),(1,3),(1,4),
(2,1),(2,2),(2,3),(2,4);
explain partitions select * from t1 where a=1 AND (b=1 OR b=2);
id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 p0_p0sp1,p0_p0sp2 ALL NULL NULL NULL NULL 2 Using where
drop table t1;
create table t1 (a int, b int not null)
partition by list(a)
subpartition by hash(b) subpartitions 2
(
partition p0 values in (1),
partition p1 values in (2),
partition p2 values in (3),
partition pn values in (NULL)
);
insert into t1 values (1,1),(1,2),(1,3),(1,4),
(2,1),(2,2),(2,3),(2,4), (NULL,1);
explain partitions select * from t1 where a IS NULL AND (b=1 OR b=2);
id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 pn_p3sp0,pn_p3sp1 system NULL NULL NULL NULL 1
explain partitions select * from t1 where (a IS NULL or a < 1) AND (b=1 OR b=2);
id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 pn_p3sp0,pn_p3sp1 system NULL NULL NULL NULL 1
explain partitions select * from t1 where (a IS NULL or a < 2) AND (b=1 OR b=2);
id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 p0_p0sp0,p0_p0sp1,pn_p3sp0,pn_p3sp1 ALL NULL NULL NULL NULL 5 Using where
explain partitions select * from t1 where (a IS NULL or a <= 1) AND (b=1 OR b=2);
id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 p0_p0sp0,p0_p0sp1,pn_p3sp0,pn_p3sp1 ALL NULL NULL NULL NULL 5 Using where
drop table t1;
create table t1 ( a int) partition by list (MOD(a, 10))
( partition p0 values in (0), partition p1 values in (1),
partition p2 values in (2), partition p3 values in (3),
partition p4 values in (4), partition p5 values in (5),
partition p6 values in (6), partition pn values in (NULL)
);
insert into t1 values (NULL), (0),(1),(2),(3),(4),(5),(6);
explain partitions select * from t1 where a is null or a < 2;
id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 p0,p1,p2,p3,p4,p5,p6,pn ALL NULL NULL NULL NULL 8 Using where
drop table t1;
create table t1 (s1 int) partition by list (s1)
(partition p1 values in (0),
partition p2 values in (1),
partition p3 values in (null));
insert into t1 values (0),(1),(null);
select count(*) from t1 where s1 < 0 or s1 is null;
count(*)
1
explain partitions select count(*) from t1 where s1 < 0 or s1 is null;
id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 p3 system NULL NULL NULL NULL 1
drop table t1;

View file

@ -4821,6 +4821,17 @@ begin
declare x int;
select id from t1 order by x;
end|
drop procedure if exists bug14945|
create table t3 (id int not null auto_increment primary key)|
create procedure bug14945() deterministic truncate t3|
insert into t3 values (null)|
call bug14945()|
insert into t3 values (null)|
select * from t3|
id
1
drop table t3|
drop procedure bug14945|
create procedure bug16474_2(x int)
select id from t1 order by x|
call bug16474_1()|

View file

@ -2562,3 +2562,20 @@ my_sqrt
1.4142135623731
DROP VIEW v1;
DROP TABLE t1;
CREATE TABLE t1 (id int PRIMARY KEY);
CREATE TABLE t2 (id int PRIMARY KEY);
INSERT INTO t1 VALUES (1), (3);
INSERT INTO t2 VALUES (1), (2), (3);
CREATE VIEW v2 AS SELECT * FROM t2;
SELECT COUNT(*) FROM t1 LEFT JOIN t2 ON t1.id=t2.id;
COUNT(*)
2
SELECT * FROM t1 LEFT JOIN t2 ON t1.id=t2.id;
id id
1 1
3 3
SELECT COUNT(*) FROM t1 LEFT JOIN v2 ON t1.id=v2.id;
COUNT(*)
2
DROP VIEW v2;
DROP TABLE t1, t2;

View file

@ -95,4 +95,18 @@ SET collation_connection='latin1_bin';
CREATE TABLE „a (a int);
SELECT '„a' as str;
#
# Bug#18321: Can't store EuroSign with latin1_german1_ci and latin1_general_ci
# The problem was in latin1->utf8->latin1 round trip.
#
set @str= _latin1 'ABC €°§ß²³µ~ äöüÄÖÜ áéíóú ÀÈÌÒÙ @ abc';
SELECT convert(@str collate latin1_bin using utf8);
SELECT convert(@str collate latin1_general_ci using utf8);
SELECT convert(@str collate latin1_german1_ci using utf8);
SELECT convert(@str collate latin1_danish_ci using utf8);
SELECT convert(@str collate latin1_spanish_ci using utf8);
SELECT convert(@str collate latin1_german2_ci using utf8);
SELECT convert(@str collate latin1_swedish_ci using utf8);
# End of 4.1 tests

View file

@ -455,3 +455,11 @@ execute stmt using @param1;
select utext from t1 where utext like '%%';
drop table t1;
deallocate prepare stmt;
#
# Bug #14290: character_maximum_length for text fields
#
create table t1(a blob, b text charset utf8, c text charset ucs2);
select data_type, character_octet_length, character_maximum_length
from information_schema.columns where table_name='t1';
drop table t1;

View file

@ -101,7 +101,6 @@ set global event_scheduler = 0;
create table t_event3 (a int, b float);
drop event if exists event3;
create event event3 on schedule every 50 + 10 minute starts date_add("20100101", interval 5 minute) ends date_add("20151010", interval 5 day) comment "portokala_comment" DO insert into t_event3 values (unix_timestamp(), rand());
set max_allowed_packet=128000000;
select count(*) from t_event3;
drop event event3;
drop table t_event3;
@ -202,6 +201,9 @@ CREATE TABLE event_like LIKE mysql.event;
INSERT INTO event_like SELECT * FROM mysql.event;
#sleep a bit or we won't catch the change of time
--sleep 1
ALTER TABLE mysql.event MODIFY db char(64) character set cp1251 default '';
--error ER_CANNOT_LOAD_FROM_TABLE
SELECT event_name FROM INFORMATION_SCHEMA.EVENTS;
ALTER TABLE mysql.event MODIFY db char(20) character set utf8 collate utf8_bin default '';
#wait a bit or we won't see the difference because of seconds resolution
--sleep 1
@ -220,6 +222,7 @@ ALTER TABLE mysql.event MODIFY db char(64) character set cp1251 default '';
SELECT event_name FROM INFORMATION_SCHEMA.EVENTS;
--sleep 1
ALTER TABLE mysql.event MODIFY db varchar(64) character set utf8 collate utf8_bin default '';
--sleep 1
--error ER_CANNOT_LOAD_FROM_TABLE
SELECT event_name FROM INFORMATION_SCHEMA.EVENTS;
--sleep 1
@ -412,7 +415,8 @@ select 1;
select event_schema, event_name, definer, event_body from information_schema.events where event_name='white_space';
drop event white_space;
create event white_space on schedule every 10 hour disable do
select 2;
select 2;
select event_schema, event_name, definer, event_body from information_schema.events where event_name='white_space';
drop event white_space;
create event white_space on schedule every 10 hour disable do select 3;
@ -422,7 +426,7 @@ drop event white_space;
# END: BUG #17453: Creating Event crash the server
#
#
##set global event_scheduler=1;
# Bug#17403 "Events: packets out of order with show create event"
#
create event e1 on schedule every 1 year do set @a = 5;
@ -436,7 +440,7 @@ drop event e1;
##select get_lock("test_lock3", 20);
##create event закачка on schedule every 10 hour do select get_lock("test_lock3", 20);
##select sleep(2);
##select /*7*/ user, host, db, command, state, info from information_schema.processlist where info is null or info not like '%processlist%' order by info;
##show processlist;
##drop event закачка;
##select release_lock("test_lock3");
@ -446,13 +450,14 @@ drop event e1;
##select get_lock("test_lock4", 20);
##create event закачка4 on schedule every 1 second do select get_lock("test_lock4", 20);
##select sleep(3);
##select /*8*/ user, host, db, command, state, info from information_schema.processlist where info is null or info not like '%processlist%' order by info;
##--replace_column 1 # 6 #
##drop event закачка4;
##select release_lock("test_lock4");
##set global event_scheduler=0;
##select sleep(2);
##select /*9*/ user, host, db, command, state, info from information_schema.processlist where info is null or info not like '%processlist%' order by info;
##--replace_column 1 # 6 #
##select count(*) from mysql.event;
drop database events_test;

View file

@ -0,0 +1,36 @@
CREATE DATABASE IF NOT EXISTS events_test;
USE events_test;
CREATE TABLE table_1(a int);
CREATE TABLE table_2(a int);
CREATE TABLE table_3(a int);
CREATE TABLE table_4(a int);
SET GLOBAL event_scheduler=1;
CREATE EVENT two_sec ON SCHEDULE EVERY 2 SECOND DO INSERT INTO table_1 VALUES(1);
CREATE EVENT start_n_end
ON SCHEDULE EVERY 1 SECOND
ENDS NOW() + INTERVAL 6 SECOND
ON COMPLETION PRESERVE
DO INSERT INTO table_2 VALUES(1);
--sleep 5
CREATE EVENT only_one_time ON SCHEDULE EVERY 2 SECOND ENDS NOW() + INTERVAL 1 SECOND DO INSERT INTO table_3 VALUES(1);
CREATE EVENT two_time ON SCHEDULE EVERY 1 SECOND ENDS NOW() + INTERVAL 1 SECOND DO INSERT INTO table_4 VALUES(1);
--sleep 5
SELECT IF(SUM(a) >= 4, 'OK', 'ERROR') FROM table_1;
SELECT IF(SUM(a) >= 5, 'OK', 'ERROR') FROM table_2;
SELECT IF(SUM(a) > 0, 'OK', 'ERROR') FROM table_3;
SELECT IF(SUM(a) > 0, 'OK', 'ERROR') FROM table_4;
DROP EVENT two_sec;
SELECT IF(TIME_TO_SEC(TIMEDIFF(ENDS,STARTS))=6, 'OK', 'ERROR') FROM INFORMATION_SCHEMA.EVENTS WHERE EVENT_SCHEMA=DATABASE() AND EVENT_NAME='start_n_end' AND ENDS IS NOT NULL;
SELECT IF(LAST_EXECUTED-ENDS < 2, 'OK', 'ERROR') FROM INFORMATION_SCHEMA.EVENTS WHERE EVENT_SCHEMA=DATABASE() AND EVENT_NAME='start_n_end' AND ENDS IS NOT NULL;
DROP EVENT start_n_end;
--echo "Already dropped because ended. Therefore an error."
--error ER_EVENT_DOES_NOT_EXIST
DROP EVENT only_one_time;
--echo "Already dropped because ended. Therefore an error."
--error ER_EVENT_DOES_NOT_EXIST
DROP EVENT two_time;
DROP TABLE table_1;
DROP TABLE table_2;
DROP TABLE table_3;
DROP TABLE table_4;
DROP DATABASE events_test;

View file

@ -414,3 +414,11 @@ select charset(a) from t2;
select charset(a) from t3;
drop table t1, t2, t3;
set names default;
#
# Bug#18281 group_concat changes charset to binary
#
create table t1 (c1 varchar(10), c2 int);
select charset(group_concat(c1 order by c2)) from t1;
drop table t1;

View file

@ -139,6 +139,24 @@ select extract(MINUTE_SECOND FROM "10:11:12");
select extract(SECOND FROM "1999-01-02 10:11:12");
select extract(MONTH FROM "2001-02-00");
#
# test EXTRACT QUARTER (Bug #18100)
#
SELECT EXTRACT(QUARTER FROM '2004-01-15') AS quarter;
SELECT EXTRACT(QUARTER FROM '2004-02-15') AS quarter;
SELECT EXTRACT(QUARTER FROM '2004-03-15') AS quarter;
SELECT EXTRACT(QUARTER FROM '2004-04-15') AS quarter;
SELECT EXTRACT(QUARTER FROM '2004-05-15') AS quarter;
SELECT EXTRACT(QUARTER FROM '2004-06-15') AS quarter;
SELECT EXTRACT(QUARTER FROM '2004-07-15') AS quarter;
SELECT EXTRACT(QUARTER FROM '2004-08-15') AS quarter;
SELECT EXTRACT(QUARTER FROM '2004-09-15') AS quarter;
SELECT EXTRACT(QUARTER FROM '2004-10-15') AS quarter;
SELECT EXTRACT(QUARTER FROM '2004-11-15') AS quarter;
SELECT EXTRACT(QUARTER FROM '2004-12-15') AS quarter;
#
# Test big intervals (Bug #3498)
#

View file

@ -347,3 +347,47 @@ group by s1 collate latin1_swedish_ci having s1 = 'y';
# MySQL returns: 1 row, with count(s1) = 1
drop table t1;
#
# Bug #15917: unexpected complain for a name in having clause
# when the server is run on Windows or with --lower-case-table-names=1
#
DROP SCHEMA IF EXISTS HU;
CREATE SCHEMA HU ;
USE HU ;
CREATE TABLE STAFF
(EMPNUM CHAR(3) NOT NULL UNIQUE,
EMPNAME CHAR(20),
GRADE DECIMAL(4),
CITY CHAR(15));
CREATE TABLE PROJ
(PNUM CHAR(3) NOT NULL UNIQUE,
PNAME CHAR(20),
PTYPE CHAR(6),
BUDGET DECIMAL(9),
CITY CHAR(15));
INSERT INTO STAFF VALUES ('E1','Alice',12,'Deale');
INSERT INTO STAFF VALUES ('E2','Betty',10,'Vienna');
INSERT INTO STAFF VALUES ('E3','Carmen',13,'Vienna');
INSERT INTO STAFF VALUES ('E4','Don',12,'Deale');
INSERT INTO STAFF VALUES ('E5','Ed',13,'Akron');
INSERT INTO PROJ VALUES ('P1','MXSS','Design',10000,'Deale');
INSERT INTO PROJ VALUES ('P2','CALM','Code',30000,'Vienna');
INSERT INTO PROJ VALUES ('P3','SDP','Test',30000,'Tampa');
INSERT INTO PROJ VALUES ('P4','SDP','Design',20000,'Deale');
INSERT INTO PROJ VALUES ('P5','IRM','Test',10000,'Vienna');
INSERT INTO PROJ VALUES ('P6','PAYR','Design',50000,'Deale');
SELECT EMPNUM, GRADE*1000
FROM HU.STAFF WHERE GRADE * 1000 >
ANY (SELECT SUM(BUDGET) FROM HU.PROJ
GROUP BY CITY, PTYPE
HAVING HU.PROJ.CITY = HU.STAFF.CITY);
DROP SCHEMA HU;

View file

@ -97,9 +97,12 @@ SHOW CHARACTER SET WHERE charset like 'latin1%';
# Test for information_schema.COLLATIONS &
# SHOW COLLATION
--replace_column 5 #
select * from information_schema.COLLATIONS
where COLLATION_NAME like 'latin1%';
--replace_column 5 #
SHOW COLLATION LIKE 'latin1%';
--replace_column 5 #
SHOW COLLATION WHERE collation like 'latin1%';
select * from information_schema.COLLATION_CHARACTER_SET_APPLICABILITY
@ -739,15 +742,6 @@ select 1 from (select 1 from test.t1) a;
use test;
drop table t1;
#
# Bug #14290: character_maximum_length for text fields
#
create table t1(a blob, b text charset utf8, c text charset ucs2);
select data_type, character_octet_length, character_maximum_length
from information_schema.columns where table_name='t1';
drop table t1;
#
# Bug#14476 `information_schema`.`TABLES`.`TABLE_TYPE` with empty value
#

View file

@ -2113,3 +2113,28 @@ UPDATE t1 SET field1 = 'other' WHERE field2 = 'somevalu';
DROP TABLE t2;
DROP TABLE t1;
#
# Bug#18477 - MySQL/InnoDB Ignoring Foreign Keys in ALTER TABLE
#
create table t1 (
c1 bigint not null,
c2 bigint not null,
primary key (c1),
unique key (c2)
) engine=innodb;
#
create table t2 (
c1 bigint not null,
primary key (c1)
) engine=innodb;
#
alter table t1 add constraint c2_fk foreign key (c2)
references t2(c1) on delete cascade;
show create table t1;
#
alter table t1 drop foreign key c2_fk;
show create table t1;
#
drop table t1, t2;

View file

@ -61,6 +61,24 @@ insert into t1 select index1,nr from t1 as t1_alias;
drop table t1,t2;
#
# BUG#5390 - problems with merge tables
# Supplement test for the after-fix optimization
# Check that a dropped table is correctly removed from a lock.
create table t1 (c1 int);
create table t2 (c1 int);
create table t3 (c1 int);
lock tables t1 write, t2 write, t3 write;
# This removes one table after the other from the lock.
drop table t2, t3, t1;
#
# Check that a lock merge works.
create table t1 (c1 int);
create table t2 (c1 int);
create table t3 (c1 int);
lock tables t1 write, t2 write, t3 write, t1 as t4 read;
alter table t2 add column c2 int;
drop table t1, t2, t3;
# Bug7241 - Invalid response when DELETE .. USING and LOCK TABLES used.
#
create table t1 ( a int(11) not null auto_increment, primary key(a));

View file

@ -492,6 +492,65 @@ insert into t1 set f_int1 = null;
select * from t1 where f_int1 is null;
explain partitions select * from t1 where f_int1 is null;
drop table t1;
#
# BUG#18558
#
create table t1 (a int not null, b int not null)
partition by list(a)
subpartition by hash(b) subpartitions 4
(
partition p0 values in (1),
partition p1 values in (2),
partition p2 values in (3)
);
insert into t1 values (1,1),(1,2),(1,3),(1,4),
(2,1),(2,2),(2,3),(2,4);
explain partitions select * from t1 where a=1 AND (b=1 OR b=2);
drop table t1;
create table t1 (a int, b int not null)
partition by list(a)
subpartition by hash(b) subpartitions 2
(
partition p0 values in (1),
partition p1 values in (2),
partition p2 values in (3),
partition pn values in (NULL)
);
insert into t1 values (1,1),(1,2),(1,3),(1,4),
(2,1),(2,2),(2,3),(2,4), (NULL,1);
explain partitions select * from t1 where a IS NULL AND (b=1 OR b=2);
explain partitions select * from t1 where (a IS NULL or a < 1) AND (b=1 OR b=2);
explain partitions select * from t1 where (a IS NULL or a < 2) AND (b=1 OR b=2);
explain partitions select * from t1 where (a IS NULL or a <= 1) AND (b=1 OR b=2);
drop table t1;
create table t1 ( a int) partition by list (MOD(a, 10))
( partition p0 values in (0), partition p1 values in (1),
partition p2 values in (2), partition p3 values in (3),
partition p4 values in (4), partition p5 values in (5),
partition p6 values in (6), partition pn values in (NULL)
);
insert into t1 values (NULL), (0),(1),(2),(3),(4),(5),(6);
explain partitions select * from t1 where a is null or a < 2;
drop table t1;
# Testcase from BUG#18751
create table t1 (s1 int) partition by list (s1)
(partition p1 values in (0),
partition p2 values in (1),
partition p3 values in (null));
insert into t1 values (0),(1),(null);
select count(*) from t1 where s1 < 0 or s1 is null;
explain partitions select count(*) from t1 where s1 < 0 or s1 is null;
drop table t1;
# No tests for NULLs in RANGE(monotonic_expr()) - they depend on BUG#15447
# being fixed.

View file

@ -2,5 +2,6 @@
# By JBM 2006-02-14 Test wrapping to #
# Share test code between engine tests #
#########################################
--source include/have_ndb.inc
let $engine_type=NDB;
-- source extra/rpl_tests/rpl_delete_no_where.test

View file

@ -5672,6 +5672,21 @@ begin
select id from t1 order by x;
end|
#
# BUG#14945: Truncate table doesn't reset the auto_increment counter
#
--disable_warnings
drop procedure if exists bug14945|
--enable_warnings
create table t3 (id int not null auto_increment primary key)|
create procedure bug14945() deterministic truncate t3|
insert into t3 values (null)|
call bug14945()|
insert into t3 values (null)|
select * from t3|
drop table t3|
drop procedure bug14945|
# This does NOT order by column index; variable is an expression.
create procedure bug16474_2(x int)
select id from t1 order by x|

View file

@ -2418,3 +2418,24 @@ SELECT my_sqrt FROM v1 ORDER BY my_sqrt;
DROP VIEW v1;
DROP TABLE t1;
#
# Bug #18237: invalid count optimization applied to an outer join with a view
#
CREATE TABLE t1 (id int PRIMARY KEY);
CREATE TABLE t2 (id int PRIMARY KEY);
INSERT INTO t1 VALUES (1), (3);
INSERT INTO t2 VALUES (1), (2), (3);
CREATE VIEW v2 AS SELECT * FROM t2;
SELECT COUNT(*) FROM t1 LEFT JOIN t2 ON t1.id=t2.id;
SELECT * FROM t1 LEFT JOIN t2 ON t1.id=t2.id;
SELECT COUNT(*) FROM t1 LEFT JOIN v2 ON t1.id=v2.id;
DROP VIEW v2;
DROP TABLE t1, t2;

View file

@ -43,22 +43,19 @@ int my_getpagesize(void)
void *my_mmap(void *addr, size_t len, int prot,
int flags, int fd, my_off_t offset)
{
DWORD flProtect=0;
HANDLE hFileMap;
LPVOID ptr;
HANDLE hFile= (HANDLE)_get_osfhandle(fd);
if (hFile == INVALID_HANDLE_VALUE)
return MAP_FAILED;
flProtect|=SEC_COMMIT;
hFileMap=CreateFileMapping(hFile, &mmap_security_attributes,
PAGE_READWRITE, 0, (DWORD) len, NULL);
if (hFileMap == 0)
return MAP_FAILED;
ptr=MapViewOfFile(hFileMap,
flags & PROT_WRITE ? FILE_MAP_WRITE : FILE_MAP_READ,
prot & PROT_WRITE ? FILE_MAP_WRITE : FILE_MAP_READ,
(DWORD)(offset >> 32), (DWORD)offset, len);
/*

1
plugin/fulltext/AUTHORS Normal file
View file

@ -0,0 +1 @@
AUTHORS file example for a plugin

View file

@ -0,0 +1 @@
ChangeLog file example for a plugin

View file

@ -1,4 +1,9 @@
#Makefile.am example for a plugin
pkglibdir=$(libdir)/mysql
INCLUDES= -I$(top_builddir)/include -I$(top_srcdir)/include
noinst_LTLIBRARIES= mypluglib.la
#pkglib_LTLIBRARIES= mypluglib.la
mypluglib_la_SOURCES= plugin_example.c
mypluglib_la_LDFLAGS= -module -rpath $(pkglibdir)

1
plugin/fulltext/NEWS Normal file
View file

@ -0,0 +1 @@
NEWS file example for a plugin

1
plugin/fulltext/README Normal file
View file

@ -0,0 +1 @@
README file example for a plugin

View file

@ -0,0 +1,9 @@
# configure.in example for a plugin
AC_INIT(plugin_example, 0.1)
AM_INIT_AUTOMAKE
AC_DISABLE_STATIC
AC_PROG_LIBTOOL
AC_CONFIG_FILES([Makefile])
AC_OUTPUT

View file

@ -17,7 +17,7 @@
#include <ctype.h>
#include <mysql/plugin.h>
long number_of_calls= 0; /* for SHOW STATUS, see below */
static long number_of_calls= 0; /* for SHOW STATUS, see below */
/*
Simple full-text parser plugin that acts as a replacement for the
@ -84,7 +84,7 @@ static int simple_parser_plugin_deinit(void)
/*
Initialize the parser at ... [WHEN]
Initialize the parser on the first use in the query
SYNOPSIS
simple_parser_init()
@ -104,7 +104,7 @@ static int simple_parser_init(MYSQL_FTPARSER_PARAM *param)
/*
Terminate the parser at ... [WHEN]
Terminate the parser at the end of the query
SYNOPSIS
simple_parser_deinit()
@ -164,7 +164,7 @@ static void add_word(MYSQL_FTPARSER_PARAM *param, char *word, size_t len)
and passes every word to the MySQL full-text indexing engine.
*/
int simple_parser_parse(MYSQL_FTPARSER_PARAM *param)
static int simple_parser_parse(MYSQL_FTPARSER_PARAM *param)
{
char *end, *start, *docend= param->doc + param->length;
@ -205,7 +205,7 @@ static struct st_mysql_ftparser simple_parser_descriptor=
Plugin status variables for SHOW STATUS
*/
struct st_mysql_show_var simple_status[]=
static struct st_mysql_show_var simple_status[]=
{
{"static", (char *)"just a static text", SHOW_CHAR},
{"called", (char *)&number_of_calls, SHOW_LONG},
@ -229,3 +229,4 @@ mysql_declare_plugin
simple_status /* status variables */
}
mysql_declare_plugin_end;

View file

@ -2817,6 +2817,36 @@ const char * STDCALL mysql_error(MYSQL *mysql)
return mysql->net.last_error;
}
/*
Get version number for server in a form easy to test on
SYNOPSIS
mysql_get_server_version()
mysql Connection
EXAMPLE
4.1.0-alfa -> 40100
NOTES
We will ensure that a newer server always has a bigger number.
RETURN
Signed number > 323000
*/
ulong STDCALL
mysql_get_server_version(MYSQL *mysql)
{
uint major, minor, version;
char *pos= mysql->server_version, *end_pos;
major= (uint) strtoul(pos, &end_pos, 10); pos=end_pos+1;
minor= (uint) strtoul(pos, &end_pos, 10); pos=end_pos+1;
version= (uint) strtoul(pos, &end_pos, 10);
return (ulong) major*10000L+(ulong) (minor*100+version);
}
/*
mysql_set_character_set function sends SET NAMES cs_name to
the server (which changes character_set_client, character_set_result
@ -2836,6 +2866,9 @@ int STDCALL mysql_set_character_set(MYSQL *mysql, const char *cs_name)
{
char buff[MY_CS_NAME_SIZE + 10];
charsets_dir= save_csdir;
/* Skip execution of "SET NAMES" for pre-4.1 servers */
if (mysql_get_server_version(mysql) < 40100)
return 0;
sprintf(buff, "SET NAMES %s", cs_name);
if (!mysql_real_query(mysql, buff, strlen(buff)))
{

View file

@ -1051,13 +1051,6 @@ evex_load_and_compile_event(THD * thd, sp_name *spn, LEX_STRING definer,
thd->restore_backup_open_tables_state(&backup);
if (ret)
goto done;
/*
allocate on evex_mem_root. if you call without evex_mem_root
then sphead will not be cleared!
*/
if ((ret= ett->compile(thd, &evex_mem_root)))
goto done;
ett->compute_next_execution_time();
if (use_lock)

View file

@ -42,6 +42,8 @@ pthread_mutex_t LOCK_event_arrays, // mutex for when working with t
LOCK_workers_count, // mutex for when inc/dec uint workers_count
LOCK_evex_running; // mutes for managing bool evex_is_running
static pthread_mutex_t LOCK_evex_main_thread; // mutex for when working with the queue
bool scheduler_main_thread_running= false;
bool evex_is_running= false;
@ -111,6 +113,7 @@ evex_init_mutexes()
pthread_mutex_init(&LOCK_event_arrays, MY_MUTEX_INIT_FAST);
pthread_mutex_init(&LOCK_workers_count, MY_MUTEX_INIT_FAST);
pthread_mutex_init(&LOCK_evex_running, MY_MUTEX_INIT_FAST);
pthread_mutex_init(&LOCK_evex_main_thread, MY_MUTEX_INIT_FAST);
event_executor_running_global_var= opt_event_executor;
}
@ -241,6 +244,7 @@ shutdown_events()
pthread_mutex_destroy(&LOCK_event_arrays);
pthread_mutex_destroy(&LOCK_workers_count);
pthread_mutex_destroy(&LOCK_evex_running);
pthread_mutex_destroy(&LOCK_evex_main_thread);
}
DBUG_VOID_RETURN;
}
@ -351,6 +355,7 @@ executor_wait_till_next_event_exec(THD *thd)
t2sleep= evex_time_diff(&et->execute_at, &time_now);
VOID(pthread_mutex_unlock(&LOCK_event_arrays));
t2sleep*=20;
DBUG_PRINT("evex main thread",("unlocked LOCK_event_arrays"));
if (t2sleep > 0)
{
@ -366,7 +371,7 @@ executor_wait_till_next_event_exec(THD *thd)
modified))
{
DBUG_PRINT("evex main thread",("will sleep a bit more."));
my_sleep(1000000);
my_sleep(50000);
}
DBUG_PRINT("info",("saved_modified=%llu current=%llu", modified,
evex_queue_num_elements(EVEX_EQ_NAME)?
@ -407,10 +412,23 @@ event_executor_main(void *arg)
THD *thd; /* needs to be first for thread_stack */
uint i=0, j=0;
my_ulonglong cnt= 0;
DBUG_ENTER("event_executor_main");
DBUG_PRINT("event_executor_main", ("EVEX thread started"));
pthread_mutex_lock(&LOCK_evex_main_thread);
if (!scheduler_main_thread_running)
scheduler_main_thread_running= true;
else
{
DBUG_PRINT("event_executor_main", ("already running. thd_id=%d",
evex_main_thread_id));
pthread_mutex_unlock(&LOCK_evex_main_thread);
my_thread_end();
pthread_exit(0);
DBUG_RETURN(0); // Can't return anything here
}
pthread_mutex_unlock(&LOCK_evex_main_thread);
/* init memory root */
init_alloc_root(&evex_mem_root, MEM_ROOT_BLOCK_SIZE, MEM_ROOT_PREALLOC);
@ -489,7 +507,7 @@ event_executor_main(void *arg)
if (!evex_queue_num_elements(EVEX_EQ_NAME))
{
my_sleep(1000000);// sleep 1s
my_sleep(100000);// sleep 0.1s
continue;
}
@ -652,12 +670,17 @@ finish:
err_no_thd:
VOID(pthread_mutex_lock(&LOCK_evex_running));
evex_is_running= false;
event_executor_running_global_var= false;
VOID(pthread_mutex_unlock(&LOCK_evex_running));
free_root(&evex_mem_root, MYF(0));
sql_print_information("SCHEDULER: Stopped.");
#ifndef DBUG_FAULTY_THR
pthread_mutex_lock(&LOCK_evex_main_thread);
scheduler_main_thread_running= false;
pthread_mutex_unlock(&LOCK_evex_main_thread);
my_thread_end();
pthread_exit(0);
#endif

View file

@ -593,28 +593,9 @@ Event_timed::load_from_row(MEM_ROOT *mem_root, TABLE *table)
et->created= table->field[EVEX_FIELD_CREATED]->val_int();
et->modified= table->field[EVEX_FIELD_MODIFIED]->val_int();
/*
ToDo Andrey : Ask PeterG & Serg what to do in this case.
Whether on load last_executed_at should be loaded
or it must be 0ed. If last_executed_at is loaded
then an event can be scheduled for execution
instantly. Let's say an event has to be executed
every 15 mins. The server has been stopped for
more than this time and then started. If L_E_AT
is loaded from DB, execution at L_E_AT+15min
will be scheduled. However this time is in the past.
Hence immediate execution. Due to patch of
::mark_last_executed() last_executed gets time_now
and not execute_at. If not like this a big
queue can be scheduled for times which are still in
the past (2, 3 and more executions which will be
consequent).
*/
set_zero_time(&last_executed, MYSQL_TIMESTAMP_DATETIME);
#ifdef ANDREY_0
table->field[EVEX_FIELD_LAST_EXECUTED]->
get_date(&et->last_executed, TIME_NO_ZERO_DATE);
#endif
last_executed_changed= false;
/* ToDo : Andrey . Find a way not to allocate ptr on event_mem_root */
@ -648,70 +629,164 @@ error:
/*
Computes the sum of a timestamp plus interval
Computes the sum of a timestamp plus interval. Presumed is that at least one
previous execution has occured.
SYNOPSIS
get_next_time(TIME *start, int interval_value, interval_type interval)
next the sum
start add interval_value to this time
time_now current time
i_value quantity of time type interval to add
i_type type of interval to add (SECOND, MINUTE, HOUR, WEEK ...)
RETURNS
0 OK
1 Error
NOTES
1) If the interval is conversible to SECOND, like MINUTE, HOUR, DAY, WEEK.
Then we use TIMEDIFF()'s implementation as underlying and number of
seconds as resolution for computation.
2) In all other cases - MONTH, QUARTER, YEAR we use MONTH as resolution
and PERIOD_DIFF()'s implementation
3) We get the difference between time_now and `start`, then divide it
by the months, respectively seconds and round up. Then we multiply
monts/seconds by the rounded value and add it to `start` -> we get
the next execution time.
*/
static
bool get_next_time(TIME *next, TIME *start, int i_value, interval_type i_type)
bool get_next_time(TIME *next, TIME *start, TIME *time_now, TIME *last_exec,
int i_value, interval_type i_type)
{
bool ret;
INTERVAL interval;
TIME tmp;
longlong months=0, seconds=0;
DBUG_ENTER("get_next_time");
DBUG_PRINT("enter", ("start=%llu now=%llu", TIME_to_ulonglong_datetime(start),
TIME_to_ulonglong_datetime(time_now)));
bzero(&interval, sizeof(interval));
switch (i_type) {
case INTERVAL_YEAR:
interval.year= (ulong) i_value;
months= i_value*12;
break;
case INTERVAL_QUARTER:
interval.month= (ulong)(i_value*3);
break;
/* Has already been converted to months */
case INTERVAL_YEAR_MONTH:
case INTERVAL_MONTH:
interval.month= (ulong) i_value;
months= i_value;
break;
case INTERVAL_WEEK:
interval.day= (ulong)(i_value*7);
break;
/* WEEK has already been converted to days */
case INTERVAL_DAY:
interval.day= (ulong) i_value;
seconds= i_value*24*3600;
break;
case INTERVAL_DAY_HOUR:
case INTERVAL_HOUR:
interval.hour= (ulong) i_value;
seconds= i_value*3600;
break;
case INTERVAL_DAY_MINUTE:
case INTERVAL_HOUR_MINUTE:
case INTERVAL_MINUTE:
interval.minute=i_value;
seconds= i_value*60;
break;
case INTERVAL_DAY_SECOND:
case INTERVAL_HOUR_SECOND:
case INTERVAL_MINUTE_SECOND:
case INTERVAL_SECOND:
interval.second=i_value;
seconds= i_value;
break;
case INTERVAL_DAY_MICROSECOND:
case INTERVAL_HOUR_MICROSECOND:
case INTERVAL_MINUTE_MICROSECOND:
case INTERVAL_SECOND_MICROSECOND:
case INTERVAL_MICROSECOND:
interval.second_part=i_value;
/*
We should return an error here so SHOW EVENTS/ SELECT FROM I_S.EVENTS
would give an error then.
*/
DBUG_RETURN(1);
break;
}
tmp= *start;
if (!(ret= date_add_interval(&tmp, i_type, interval)))
*next= tmp;
DBUG_PRINT("info", ("seconds=%ld months=%ld", seconds, months));
if (seconds)
{
longlong seconds_diff;
long microsec_diff;
if (calc_time_diff(time_now, start, 1, &seconds_diff, &microsec_diff))
{
DBUG_PRINT("error", ("negative difference"));
DBUG_ASSERT(0);
}
uint multiplier= seconds_diff / seconds;
/*
Increase the multiplier is the modulus is not zero to make round up.
Or if time_now==start then we should not execute the same
event two times for the same time
get the next exec if the modulus is not
*/
DBUG_PRINT("info", ("multiplier=%d", multiplier));
if (seconds_diff % seconds || (!seconds_diff && last_exec->year))
++multiplier;
interval.second= seconds * multiplier;
DBUG_PRINT("info", ("multiplier=%u interval.second=%u", multiplier,
interval.second));
tmp= *start;
if (!(ret= date_add_interval(&tmp, INTERVAL_SECOND, interval)))
*next= tmp;
}
else
{
/* PRESUMED is that at least one execution took already place */
int diff_months= (time_now->year - start->year)*12 +
(time_now->month - start->month);
/*
Note: If diff_months is 0 that means we are in the same month as the
last execution which is also the first execution.
*/
/*
First we try with the smaller if not then + 1, because if we try with
directly with +1 we will be after the current date but it could be that
we will be 1 month ahead, so 2 steps are necessary.
*/
interval.month= (diff_months / months)*months;
/*
Check if the same month as last_exec (always set - prerequisite)
An event happens at most once per month so there is no way to schedule
it two times for the current month. This saves us from two calls to
date_add_interval() if the event was just executed. But if the scheduler
is started and there was at least 1 scheduled date skipped this one does
not help and two calls to date_add_interval() will be done, which is a
bit more expensive but compared to the rareness of the case is neglectable.
*/
if (time_now->year==last_exec->year && time_now->month==last_exec->month)
interval.month+= months;
return ret;
tmp= *start;
if ((ret= date_add_interval(&tmp, INTERVAL_MONTH, interval)))
goto done;
/* If `tmp` is still before time_now just add one more time the interval */
if (my_time_compare(&tmp, time_now) == -1)
{
interval.month+= months;
tmp= *start;
if ((ret= date_add_interval(&tmp, INTERVAL_MONTH, interval)))
goto done;
}
*next= tmp;
/* assert on that the next is after now */
DBUG_ASSERT(1==my_time_compare(next, time_now));
}
done:
DBUG_PRINT("info", ("next=%llu", TIME_to_ulonglong_datetime(next)));
DBUG_RETURN(ret);
}
@ -734,6 +809,10 @@ Event_timed::compute_next_execution_time()
int tmp;
DBUG_ENTER("Event_timed::compute_next_execution_time");
DBUG_PRINT("enter", ("starts=%llu ends=%llu last_executed=%llu",
TIME_to_ulonglong_datetime(&starts),
TIME_to_ulonglong_datetime(&ends),
TIME_to_ulonglong_datetime(&last_executed)));
if (status == MYSQL_EVENT_DISABLED)
{
@ -757,29 +836,14 @@ Event_timed::compute_next_execution_time()
}
goto ret;
}
time((time_t *)&now);
my_tz_UTC->gmt_sec_to_TIME(&time_now, now);
my_tz_UTC->gmt_sec_to_TIME(&time_now, current_thd->query_start());
#ifdef ANDREY_0
sql_print_information("[%s.%s]", dbname.str, name.str);
sql_print_information("time_now : [%d-%d-%d %d:%d:%d ]",
time_now.year, time_now.month, time_now.day,
time_now.hour, time_now.minute, time_now.second);
sql_print_information("starts : [%d-%d-%d %d:%d:%d ]", starts.year,
starts.month, starts.day, starts.hour,
starts.minute, starts.second);
sql_print_information("ends : [%d-%d-%d %d:%d:%d ]", ends.year,
ends.month, ends.day, ends.hour,
ends.minute, ends.second);
sql_print_information("m_last_ex: [%d-%d-%d %d:%d:%d ]", last_executed.year,
last_executed.month, last_executed.day,
last_executed.hour, last_executed.minute,
last_executed.second);
#endif
DBUG_PRINT("info",("NOW=[%llu]", TIME_to_ulonglong_datetime(&time_now)));
/* if time_now is after ends don't execute anymore */
if (!ends_null && (tmp= my_time_compare(&ends, &time_now)) == -1)
{
DBUG_PRINT("info", ("NOW after ENDS, don't execute anymore"));
/* time_now is after ends. don't execute anymore */
set_zero_time(&execute_at, MYSQL_TIMESTAMP_DATETIME);
execute_at_null= TRUE;
@ -807,6 +871,7 @@ Event_timed::compute_next_execution_time()
}
else
{
DBUG_PRINT("info", ("STARTS is future, NOW <= STARTS,sched for STARTS"));
/*
starts is in the future
time_now before starts. Scheduling for starts
@ -825,8 +890,10 @@ Event_timed::compute_next_execution_time()
after m_ends set execute_at to 0. And check for on_completion
If not set then schedule for now.
*/
DBUG_PRINT("info", ("Both STARTS & ENDS are set"));
if (!last_executed.year)
{
DBUG_PRINT("info", ("Not executed so far. Execute NOW."));
execute_at= time_now;
execute_at_null= FALSE;
}
@ -834,12 +901,15 @@ Event_timed::compute_next_execution_time()
{
TIME next_exec;
if (get_next_time(&next_exec, &last_executed, expression, interval))
DBUG_PRINT("info", ("Executed at least once"));
if (get_next_time(&next_exec, &starts, &time_now, &last_executed,
expression, interval))
goto err;
/* There was previous execution */
if (my_time_compare(&ends, &next_exec) == -1)
{
DBUG_PRINT("info", ("Next execution after ENDS. Stop executing."));
/* Next execution after ends. No more executions */
set_zero_time(&execute_at, MYSQL_TIMESTAMP_DATETIME);
execute_at_null= TRUE;
@ -848,6 +918,7 @@ Event_timed::compute_next_execution_time()
}
else
{
DBUG_PRINT("info",("Next[%llu]",TIME_to_ulonglong_datetime(&next_exec)));
execute_at= next_exec;
execute_at_null= FALSE;
}
@ -856,18 +927,24 @@ Event_timed::compute_next_execution_time()
}
else if (starts_null && ends_null)
{
DBUG_PRINT("info", ("Neither STARTS nor ENDS are set"));
/*
Both starts and m_ends are not set, so we schedule for the next
based on last_executed.
*/
if (last_executed.year)
{
if (get_next_time(&execute_at, &last_executed, expression, interval))
TIME next_exec;
if (get_next_time(&next_exec, &starts, &time_now, &last_executed,
expression, interval))
goto err;
execute_at= next_exec;
DBUG_PRINT("info",("Next[%llu]",TIME_to_ulonglong_datetime(&next_exec)));
}
else
{
/* last_executed not set. Schedule the event for now */
DBUG_PRINT("info", ("Execute NOW"));
execute_at= time_now;
}
execute_at_null= FALSE;
@ -877,6 +954,7 @@ Event_timed::compute_next_execution_time()
/* either starts or m_ends is set */
if (!starts_null)
{
DBUG_PRINT("info", ("STARTS is set"));
/*
- starts is set.
- starts is not in the future according to check made before
@ -885,15 +963,24 @@ Event_timed::compute_next_execution_time()
*/
if (last_executed.year)
{
if (get_next_time(&execute_at, &last_executed, expression, interval))
TIME next_exec;
DBUG_PRINT("info", ("Executed at least once."));
if (get_next_time(&next_exec, &starts, &time_now, &last_executed,
expression, interval))
goto err;
execute_at= next_exec;
DBUG_PRINT("info",("Next[%llu]",TIME_to_ulonglong_datetime(&next_exec)));
}
else
{
DBUG_PRINT("info", ("Not executed so far. Execute at STARTS"));
execute_at= starts;
}
execute_at_null= FALSE;
}
else
{
DBUG_PRINT("info", ("STARTS is not set. ENDS is set"));
/*
- m_ends is set
- m_ends is after time_now or is equal
@ -907,11 +994,13 @@ Event_timed::compute_next_execution_time()
{
TIME next_exec;
if (get_next_time(&next_exec, &last_executed, expression, interval))
if (get_next_time(&next_exec, &starts, &time_now, &last_executed,
expression, interval))
goto err;
if (my_time_compare(&ends, &next_exec) == -1)
{
DBUG_PRINT("info", ("Next execution after ENDS. Stop executing."));
set_zero_time(&execute_at, MYSQL_TIMESTAMP_DATETIME);
execute_at_null= TRUE;
if (on_completion == MYSQL_EVENT_ON_COMPLETION_DROP)
@ -919,6 +1008,8 @@ Event_timed::compute_next_execution_time()
}
else
{
DBUG_PRINT("info", ("Next[%llu]",
TIME_to_ulonglong_datetime(&next_exec)));
execute_at= next_exec;
execute_at_null= FALSE;
}
@ -927,9 +1018,10 @@ Event_timed::compute_next_execution_time()
goto ret;
}
ret:
DBUG_PRINT("info", ("ret=0"));
DBUG_RETURN(false);
err:
DBUG_PRINT("info", ("ret=1"));
DBUG_RETURN(true);
}
@ -1462,6 +1554,7 @@ Event_timed::spawn_now(void * (*thread_func)(void*))
int ret= EVENT_EXEC_STARTED;
static uint exec_num= 0;
DBUG_ENTER("Event_timed::spawn_now");
DBUG_PRINT("info", ("this=0x%lx", this));
DBUG_PRINT("info", ("[%s.%s]", dbname.str, name.str));
VOID(pthread_mutex_lock(&this->LOCK_running));

View file

@ -1000,20 +1000,23 @@ public:
class Field_string :public Field_longstr {
public:
bool can_alter_field_type;
Field_string(char *ptr_arg, uint32 len_arg,uchar *null_ptr_arg,
uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
CHARSET_INFO *cs)
:Field_longstr(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
unireg_check_arg, field_name_arg, cs) {};
unireg_check_arg, field_name_arg, cs),
can_alter_field_type(1) {};
Field_string(uint32 len_arg,bool maybe_null_arg, const char *field_name_arg,
CHARSET_INFO *cs)
:Field_longstr((char*) 0, len_arg, maybe_null_arg ? (uchar*) "": 0, 0,
NONE, field_name_arg, cs) {};
NONE, field_name_arg, cs),
can_alter_field_type(1) {};
enum_field_types type() const
{
return ((orig_table &&
return ((can_alter_field_type && orig_table &&
orig_table->s->db_create_options & HA_OPTION_PACK_RECORD &&
field_length >= 4) &&
orig_table->s->frm_version < FRM_VER_TRUE_VARCHAR ?

View file

@ -3061,6 +3061,7 @@ static Item** find_field_in_group_list(Item *find_item, ORDER *group_list)
int found_match_degree= 0;
Item_ident *cur_field;
int cur_match_degree= 0;
char name_buff[NAME_LEN+1];
if (find_item->type() == Item::FIELD_ITEM ||
find_item->type() == Item::REF_ITEM)
@ -3072,6 +3073,14 @@ static Item** find_field_in_group_list(Item *find_item, ORDER *group_list)
else
return NULL;
if (db_name && lower_case_table_names)
{
/* Convert database to lower case for comparison */
strmake(name_buff, db_name, sizeof(name_buff)-1);
my_casedn_str(files_charset_info, name_buff);
db_name= name_buff;
}
DBUG_ASSERT(field_name != 0);
for (ORDER *cur_group= group_list ; cur_group ; cur_group= cur_group->next)

View file

@ -3249,7 +3249,10 @@ Item_func_group_concat::fix_fields(THD *thd, Item **ref)
}
if (agg_item_charsets(collation, func_name(),
args, arg_count, MY_COLL_ALLOW_CONV))
args,
/* skip charset aggregation for order columns */
arg_count - arg_count_order,
MY_COLL_ALLOW_CONV))
return 1;
result.set_charset(collation.collation);

View file

@ -772,81 +772,6 @@ static bool get_interval_info(const char *str,uint length,CHARSET_INFO *cs,
}
/*
Calculate difference between two datetime values as seconds + microseconds.
SYNOPSIS
calc_time_diff()
l_time1 - TIME/DATE/DATETIME value
l_time2 - TIME/DATE/DATETIME value
l_sign - 1 absolute values are substracted,
-1 absolute values are added.
seconds_out - Out parameter where difference between
l_time1 and l_time2 in seconds is stored.
microseconds_out- Out parameter where microsecond part of difference
between l_time1 and l_time2 is stored.
NOTE
This function calculates difference between l_time1 and l_time2 absolute
values. So one should set l_sign and correct result if he want to take
signs into account (i.e. for TIME values).
RETURN VALUES
Returns sign of difference.
1 means negative result
0 means positive result
*/
static bool calc_time_diff(TIME *l_time1, TIME *l_time2, int l_sign,
longlong *seconds_out, long *microseconds_out)
{
long days;
bool neg;
longlong microseconds;
/*
We suppose that if first argument is MYSQL_TIMESTAMP_TIME
the second argument should be TIMESTAMP_TIME also.
We should check it before calc_time_diff call.
*/
if (l_time1->time_type == MYSQL_TIMESTAMP_TIME) // Time value
days= (long)l_time1->day - l_sign * (long)l_time2->day;
else
{
days= calc_daynr((uint) l_time1->year,
(uint) l_time1->month,
(uint) l_time1->day);
if (l_time2->time_type == MYSQL_TIMESTAMP_TIME)
days-= l_sign * (long)l_time2->day;
else
days-= l_sign*calc_daynr((uint) l_time2->year,
(uint) l_time2->month,
(uint) l_time2->day);
}
microseconds= ((longlong)days*LL(86400) +
(longlong)(l_time1->hour*3600L +
l_time1->minute*60L +
l_time1->second) -
l_sign*(longlong)(l_time2->hour*3600L +
l_time2->minute*60L +
l_time2->second)) * LL(1000000) +
(longlong)l_time1->second_part -
l_sign*(longlong)l_time2->second_part;
neg= 0;
if (microseconds < 0)
{
microseconds= -microseconds;
neg= 1;
}
*seconds_out= microseconds/1000000L;
*microseconds_out= (long) (microseconds%1000000L);
return neg;
}
longlong Item_func_period_add::val_int()
{
DBUG_ASSERT(fixed == 1);
@ -2031,16 +1956,13 @@ bool Item_date_add_interval::get_date(TIME *ltime, uint fuzzy_date)
INTERVAL interval;
if (args[0]->get_date(ltime, TIME_NO_ZERO_DATE) ||
get_interval_value(args[1],int_type,&value,&interval))
goto null_date;
get_interval_value(args[1], int_type, &value, &interval))
return (null_value=1);
if (date_sub_interval)
interval.neg = !interval.neg;
return (null_value= date_add_interval(ltime, int_type, interval));
null_date:
return (null_value=1);
}
@ -2168,7 +2090,7 @@ longlong Item_extract::val_int()
switch (int_type) {
case INTERVAL_YEAR: return ltime.year;
case INTERVAL_YEAR_MONTH: return ltime.year*100L+ltime.month;
case INTERVAL_QUARTER: return ltime.month/3 + 1;
case INTERVAL_QUARTER: return (ltime.month+2)/3;
case INTERVAL_MONTH: return ltime.month;
case INTERVAL_WEEK:
{

View file

@ -396,6 +396,7 @@ static SYMBOL symbols[] = {
{ "PASSWORD", SYM(PASSWORD)},
{ "PHASE", SYM(PHASE_SYM)},
{ "PLUGIN", SYM(PLUGIN_SYM)},
{ "PLUGINS", SYM(PLUGINS_SYM)},
{ "POINT", SYM(POINT_SYM)},
{ "POLYGON", SYM(POLYGON)},
{ "PRECISION", SYM(PRECISION)},

View file

@ -68,20 +68,20 @@ TODO:
#include "mysql_priv.h"
#include <hash.h>
#include "ha_myisammrg.h"
#ifndef MASTER
#include "../srclib/myisammrg/myrg_def.h"
#else
#include "../storage/myisammrg/myrg_def.h"
#endif
#include <assert.h>
extern HASH open_cache;
/* flags for get_lock_data */
#define GET_LOCK_UNLOCK 1
#define GET_LOCK_STORE_LOCKS 2
static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table,uint count,
bool unlock, TABLE **write_locked);
uint flags, TABLE **write_locked);
static int lock_external(THD *thd, TABLE **table,uint count);
static int unlock_external(THD *thd, TABLE **table,uint count);
static void print_lock_error(int error, const char *);
/*
Lock tables.
@ -122,7 +122,8 @@ MYSQL_LOCK *mysql_lock_tables(THD *thd, TABLE **tables, uint count,
for (;;)
{
if (!(sql_lock = get_lock_data(thd,tables,count, 0,&write_lock_used)))
if (! (sql_lock= get_lock_data(thd, tables, count, GET_LOCK_STORE_LOCKS,
&write_lock_used)))
break;
if (global_read_lock && write_lock_used &&
@ -156,7 +157,12 @@ MYSQL_LOCK *mysql_lock_tables(THD *thd, TABLE **tables, uint count,
thd->proc_info="Table lock";
DBUG_PRINT("info", ("thd->proc_info %s", thd->proc_info));
thd->locked=1;
rc= thr_lock_errno_to_mysql[(int) thr_multi_lock(sql_lock->locks,
/* Copy the lock data array. thr_multi_lock() reorders its contens. */
memcpy(sql_lock->locks + sql_lock->lock_count, sql_lock->locks,
sql_lock->lock_count * sizeof(*sql_lock->locks));
/* Lock on the copied half of the lock data array. */
rc= thr_lock_errno_to_mysql[(int) thr_multi_lock(sql_lock->locks +
sql_lock->lock_count,
sql_lock->lock_count,
thd->lock_id)];
if (rc > 1) /* a timeout or a deadlock */
@ -269,7 +275,8 @@ void mysql_unlock_some_tables(THD *thd, TABLE **table,uint count)
{
MYSQL_LOCK *sql_lock;
TABLE *write_lock_used;
if ((sql_lock = get_lock_data(thd, table, count, 1, &write_lock_used)))
if ((sql_lock= get_lock_data(thd, table, count, GET_LOCK_UNLOCK,
&write_lock_used)))
mysql_unlock_tables(thd, sql_lock);
}
@ -306,6 +313,7 @@ void mysql_unlock_read_tables(THD *thd, MYSQL_LOCK *sql_lock)
TABLE **table=sql_lock->table;
for (i=found=0 ; i < sql_lock->table_count ; i++)
{
DBUG_ASSERT(sql_lock->table[i]->lock_position == i);
if ((uint) sql_lock->table[i]->reginfo.lock_type >= TL_WRITE_ALLOW_READ)
{
swap_variables(TABLE *, *table, sql_lock->table[i]);
@ -319,6 +327,17 @@ void mysql_unlock_read_tables(THD *thd, MYSQL_LOCK *sql_lock)
VOID(unlock_external(thd,table,i-found));
sql_lock->table_count=found;
}
/* Fix the lock positions in TABLE */
table= sql_lock->table;
found= 0;
for (i= 0; i < sql_lock->table_count; i++)
{
TABLE *tbl= *table;
tbl->lock_position= table - sql_lock->table;
tbl->lock_data_start= found;
found+= tbl->lock_count;
table++;
}
DBUG_VOID_RETURN;
}
@ -334,20 +353,51 @@ void mysql_lock_remove(THD *thd, MYSQL_LOCK *locked,TABLE *table)
{
if (locked->table[i] == table)
{
locked->table_count--;
uint j, removed_locks, old_tables;
TABLE *tbl;
uint lock_data_end;
DBUG_ASSERT(table->lock_position == i);
/* Decrement table_count in advance, making below expressions easier */
old_tables= --locked->table_count;
/* The table has 'removed_locks' lock data elements in locked->locks */
removed_locks= table->lock_count;
/* Move down all table pointers above 'i'. */
bmove((char*) (locked->table+i),
(char*) (locked->table+i+1),
(locked->table_count-i)* sizeof(TABLE*));
(old_tables - i) * sizeof(TABLE*));
lock_data_end= table->lock_data_start + table->lock_count;
/* Move down all lock data pointers above 'table->lock_data_end-1' */
bmove((char*) (locked->locks + table->lock_data_start),
(char*) (locked->locks + lock_data_end),
(locked->lock_count - lock_data_end) *
sizeof(THR_LOCK_DATA*));
/*
Fix moved table elements.
lock_position is the index in the 'locked->table' array,
it must be fixed by one.
table->lock_data_start is pointer to the lock data for this table
in the 'locked->locks' array, they must be fixed by 'removed_locks',
the lock data count of the removed table.
*/
for (j= i ; j < old_tables; j++)
{
tbl= locked->table[j];
tbl->lock_position--;
DBUG_ASSERT(tbl->lock_position == j);
tbl->lock_data_start-= removed_locks;
}
/* Finally adjust lock_count. */
locked->lock_count-= removed_locks;
break;
}
}
THR_LOCK_DATA **prev=locked->locks;
for (i=0 ; i < locked->lock_count ; i++)
{
if (locked->locks[i]->type != TL_UNLOCK)
*prev++ = locked->locks[i];
}
locked->lock_count=(uint) (prev - locked->locks);
}
}
@ -375,7 +425,8 @@ void mysql_lock_abort(THD *thd, TABLE *table, bool upgrade_lock)
TABLE *write_lock_used;
DBUG_ENTER("mysql_lock_abort");
if ((locked = get_lock_data(thd,&table,1,1,&write_lock_used)))
if ((locked= get_lock_data(thd, &table, 1, GET_LOCK_UNLOCK,
&write_lock_used)))
{
for (uint i=0; i < locked->lock_count; i++)
thr_abort_locks(locked->locks[i]->lock, upgrade_lock);
@ -405,7 +456,8 @@ bool mysql_lock_abort_for_thread(THD *thd, TABLE *table)
bool result= FALSE;
DBUG_ENTER("mysql_lock_abort_for_thread");
if ((locked = get_lock_data(thd,&table,1,1,&write_lock_used)))
if ((locked= get_lock_data(thd, &table, 1, GET_LOCK_UNLOCK,
&write_lock_used)))
{
for (uint i=0; i < locked->lock_count; i++)
{
@ -422,7 +474,9 @@ bool mysql_lock_abort_for_thread(THD *thd, TABLE *table)
MYSQL_LOCK *mysql_lock_merge(MYSQL_LOCK *a,MYSQL_LOCK *b)
{
MYSQL_LOCK *sql_lock;
TABLE **table, **end_table;
DBUG_ENTER("mysql_lock_merge");
if (!(sql_lock= (MYSQL_LOCK*)
my_malloc(sizeof(*sql_lock)+
sizeof(THR_LOCK_DATA*)*(a->lock_count+b->lock_count)+
@ -438,6 +492,21 @@ MYSQL_LOCK *mysql_lock_merge(MYSQL_LOCK *a,MYSQL_LOCK *b)
memcpy(sql_lock->table,a->table,a->table_count*sizeof(*a->table));
memcpy(sql_lock->table+a->table_count,b->table,
b->table_count*sizeof(*b->table));
/*
Now adjust lock_position and lock_data_start for all objects that was
moved in 'b' (as there is now all objects in 'a' before these).
*/
for (table= sql_lock->table + a->table_count,
end_table= table + b->table_count;
table < end_table;
table++)
{
(*table)->lock_position+= a->table_count;
(*table)->lock_data_start+= a->lock_count;
}
/* Delete old, not needed locks */
my_free((gptr) a,MYF(0));
my_free((gptr) b,MYF(0));
DBUG_RETURN(sql_lock);
@ -456,112 +525,96 @@ MYSQL_LOCK *mysql_lock_merge(MYSQL_LOCK *a,MYSQL_LOCK *b)
NOTE
This is mainly meant for MERGE tables in INSERT ... SELECT
situations. The 'real', underlying tables can be found only after
the table is opened. The easier way is to check this after the
tables are locked.
the MERGE tables are opened. This function assumes that the tables are
already locked.
Temporary tables are ignored here like they are ignored in
get_lock_data(). If we allow two opens on temporary tables later,
both functions should be checked.
RETURN
1 A table from 'tables' matches a lock on 'table'.
0 No duplicate lock is present.
-1 Error.
NULL No duplicate lock found.
! NULL First table from 'haystack' that matches a lock on 'needle'.
*/
TABLE_LIST *mysql_lock_have_duplicate(THD *thd, TABLE_LIST *needle,
TABLE_LIST *haystack)
{
uint count;
uint dup_pos;
TABLE *write_lock_used; /* dummy */
TABLE **tables1;
TABLE **tables2;
TABLE **table_ptr;
TABLE_LIST *tlist_ptr;
MYSQL_LOCK *sql_lock1;
MYSQL_LOCK *sql_lock2;
THR_LOCK_DATA **lock_data1;
THR_LOCK_DATA **end_data1;
MYSQL_LOCK *mylock;
TABLE **lock_tables;
TABLE *table;
TABLE *table2;
THR_LOCK_DATA **lock_locks;
THR_LOCK_DATA **table_lock_data;
THR_LOCK_DATA **end_data;
THR_LOCK_DATA **lock_data2;
THR_LOCK_DATA **end_data2;
THR_LOCK *lock1;
DBUG_ENTER("mysql_lock_have_duplicate");
/* Table may not be defined for derived or view tables. */
if (! needle->table)
DBUG_RETURN(NULL);
/* Get lock(s) for needle. */
tables1= &needle->table;
if (! (sql_lock1= get_lock_data(thd, tables1, 1, 1, &write_lock_used)))
goto err0;
/* Count real tables in list. */
count=0;
for (tlist_ptr = haystack; tlist_ptr; tlist_ptr= tlist_ptr->next_global)
if (! tlist_ptr->placeholder() && ! tlist_ptr->schema_table)
count++;
/* Allocate a table array. */
if (! (tables2= (TABLE**) sql_alloc(sizeof(TABLE*) * count)))
goto err1;
table_ptr= tables2;
/* Assign table pointers. */
for (tlist_ptr = haystack; tlist_ptr; tlist_ptr= tlist_ptr->next_global)
if (! tlist_ptr->placeholder() && ! tlist_ptr->schema_table)
*(table_ptr++)= tlist_ptr->table;
/* Get lock(s) for haystack. */
if (! (sql_lock2= get_lock_data(thd, tables2, count, 1, &write_lock_used)))
goto err1;
/* Initialize duplicate position to an impossible value. */
dup_pos= UINT_MAX;
/*
Find a duplicate lock.
In case of merge tables, sql_lock1 can have more than 1 lock.
Table may not be defined for derived or view tables.
Table may not be part of a lock for delayed operations.
*/
for (lock_data1= sql_lock1->locks,
end_data1= lock_data1 + sql_lock1->lock_count;
lock_data1 < end_data1;
lock_data1++)
if (! (table= needle->table) || ! table->lock_count)
goto end;
/* A temporary table does not have locks. */
if (table->s->tmp_table == TMP_TABLE)
goto end;
/* Get command lock or LOCK TABLES lock. Maybe empty for INSERT DELAYED. */
if (! (mylock= thd->lock ? thd->lock : thd->locked_tables))
goto end;
/* If we have less than two tables, we cannot have duplicates. */
if (mylock->table_count < 2)
goto end;
lock_locks= mylock->locks;
lock_tables= mylock->table;
/* Prepare table related variables that don't change in loop. */
DBUG_ASSERT((table->lock_position < mylock->table_count) &&
(table == lock_tables[table->lock_position]));
table_lock_data= lock_locks + table->lock_data_start;
end_data= table_lock_data + table->lock_count;
for (; haystack; haystack= haystack->next_global)
{
lock1= (*lock_data1)->lock;
for (lock_data2= sql_lock2->locks,
end_data2= lock_data2 + sql_lock2->lock_count;
if (haystack->placeholder() || haystack->schema_table)
continue;
table2= haystack->table;
if (table2->s->tmp_table == TMP_TABLE)
continue;
/* All tables in list must be in lock. */
DBUG_ASSERT((table2->lock_position < mylock->table_count) &&
(table2 == lock_tables[table2->lock_position]));
for (lock_data2= lock_locks + table2->lock_data_start,
end_data2= lock_data2 + table2->lock_count;
lock_data2 < end_data2;
lock_data2++)
{
if ((*lock_data2)->lock == lock1)
THR_LOCK_DATA **lock_data;
THR_LOCK *lock2= (*lock_data2)->lock;
for (lock_data= table_lock_data;
lock_data < end_data;
lock_data++)
{
DBUG_PRINT("ingo", ("duplicate lock found"));
/* Change duplicate position to the real value. */
dup_pos= lock_data2 - sql_lock2->locks;
goto end;
if ((*lock_data)->lock == lock2)
{
DBUG_PRINT("info", ("haystack match: '%s'", haystack->table_name));
DBUG_RETURN(haystack);
}
}
}
}
end:
tlist_ptr= NULL; /* In case that no duplicate was found. */
if (dup_pos != UINT_MAX)
{
/* Duplicate found. Search the matching TABLE_LIST object. */
count= 0;
for (tlist_ptr = haystack; tlist_ptr; tlist_ptr= tlist_ptr->next_global)
{
if (! tlist_ptr->placeholder() && ! tlist_ptr->schema_table)
{
count+= tlist_ptr->table->file->lock_count();
if (count > dup_pos)
break;
}
}
}
my_free((gptr) sql_lock2, MYF(0));
my_free((gptr) sql_lock1, MYF(0));
DBUG_RETURN(tlist_ptr);
err1:
my_free((gptr) sql_lock1, MYF(0));
err0:
/* This non-null but special value indicates error, if caller cares. */
DBUG_RETURN(needle);
DBUG_PRINT("info", ("no duplicate found"));
DBUG_RETURN(NULL);
}
@ -591,17 +644,27 @@ static int unlock_external(THD *thd, TABLE **table,uint count)
/*
** Get lock structures from table structs and initialize locks
Get lock structures from table structs and initialize locks
SYNOPSIS
get_lock_data()
thd Thread handler
table_ptr Pointer to tables that should be locks
flags One of:
GET_LOCK_UNLOCK: If we should send TL_IGNORE to
store lock
GET_LOCK_STORE_LOCKS: Store lock info in TABLE
write_lock_used Store pointer to last table with WRITE_ALLOW_WRITE
*/
static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count,
bool get_old_locks, TABLE **write_lock_used)
uint flags, TABLE **write_lock_used)
{
uint i,tables,lock_count;
MYSQL_LOCK *sql_lock;
THR_LOCK_DATA **locks;
TABLE **to;
THR_LOCK_DATA **locks, **locks_buf, **locks_start;
TABLE **to, **table_buf;
DBUG_ENTER("get_lock_data");
DBUG_PRINT("info", ("count %d", count));
@ -625,13 +688,20 @@ static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count,
DBUG_RETURN(0);
}
/*
Allocating twice the number of pointers for lock data for use in
thr_mulit_lock(). This function reorders the lock data, but cannot
update the table values. So the second part of the array is copied
from the first part immediately before calling thr_multi_lock().
*/
if (!(sql_lock= (MYSQL_LOCK*)
my_malloc(sizeof(*sql_lock)+
sizeof(THR_LOCK_DATA*)*tables+sizeof(table_ptr)*lock_count,
my_malloc(sizeof(*sql_lock) +
sizeof(THR_LOCK_DATA*) * tables * 2 +
sizeof(table_ptr) * lock_count,
MYF(0))))
DBUG_RETURN(0);
locks=sql_lock->locks=(THR_LOCK_DATA**) (sql_lock+1);
to=sql_lock->table=(TABLE**) (locks+tables);
locks= locks_buf= sql_lock->locks= (THR_LOCK_DATA**) (sql_lock + 1);
to= table_buf= sql_lock->table= (TABLE**) (locks + tables * 2);
sql_lock->table_count=lock_count;
sql_lock->lock_count=tables;
DBUG_PRINT("info", ("sql_lock->table_count %d sql_lock->lock_count %d",
@ -640,10 +710,11 @@ static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count,
for (i=0 ; i < count ; i++)
{
TABLE *table;
enum thr_lock_type lock_type;
if ((table=table_ptr[i])->s->tmp_table == TMP_TABLE)
continue;
*to++=table;
enum thr_lock_type lock_type= table->reginfo.lock_type;
lock_type= table->reginfo.lock_type;
if (lock_type >= TL_WRITE_ALLOW_WRITE)
{
*write_lock_used=table;
@ -655,8 +726,17 @@ static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count,
}
}
THR_LOCK_DATA **org_locks = locks;
locks=table->file->store_lock(thd, locks, get_old_locks ? TL_IGNORE :
lock_type);
locks_start= locks;
locks= table->file->store_lock(thd, locks,
(flags & GET_LOCK_UNLOCK) ? TL_IGNORE :
lock_type);
if (flags & GET_LOCK_STORE_LOCKS)
{
table->lock_position= (uint) (to - table_buf);
table->lock_data_start= (uint) (locks_start - locks_buf);
table->lock_count= (uint) (locks - locks_start);
}
*to++= table;
if (locks)
for ( ; org_locks != locks ; org_locks++)
(*org_locks)->debug_print_param= (void *) table;

View file

@ -1549,6 +1549,8 @@ void make_truncated_value_warning(THD *thd, const char *str_val,
const char *field_name);
bool date_add_interval(TIME *ltime, interval_type int_type, INTERVAL interval);
bool calc_time_diff(TIME *l_time1, TIME *l_time2, int l_sign,
longlong *seconds_out, long *microseconds_out);
extern DATE_TIME_FORMAT *date_time_format_make(timestamp_type format_type,
const char *format_str,

View file

@ -2296,8 +2296,6 @@ bool prune_partitions(THD *thd, TABLE *table, Item *pprune_cond)
RANGE_OPT_PARAM *range_par= &prune_param.range_param;
prune_param.part_info= part_info;
prune_param.part_iter.has_null_value= FALSE;
init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0);
range_par->mem_root= &alloc;
range_par->old_root= thd->mem_root;
@ -2730,7 +2728,7 @@ int find_used_partitions(PART_PRUNE_PARAM *ppar, SEL_ARG *key_tree)
key_tree->min_flag | key_tree->max_flag,
&ppar->part_iter);
if (!res)
goto go_right; /* res=0 --> no satisfying partitions */
goto go_right; /* res==0 --> no satisfying partitions */
if (res == -1)
{
//get a full range iterator

View file

@ -96,8 +96,14 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds)
*/
for (TABLE_LIST *tl= tables; tl; tl= tl->next_leaf)
{
TABLE_LIST *embedded;
for (embedded= tl ; embedded; embedded= embedded->embedding)
{
if (embedded->on_expr)
break;
}
if (embedded)
/* Don't replace expression on a table that is part of an outer join */
if (tl->on_expr)
{
outer_tables|= tl->table->map;
@ -117,8 +123,11 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds)
If the storage manager of 'tl' gives exact row count, compute the total
number of rows. If there are no outer table dependencies, this count
may be used as the real count.
Schema tables are filled after this function is invoked, so we can't
get row count
*/
if (tl->table->file->table_flags() & HA_NOT_EXACT_COUNT)
if ((tl->table->file->table_flags() & HA_NOT_EXACT_COUNT) ||
tl->schema_table)
{
is_exact_count= FALSE;
count= 1; // ensure count != 0
@ -143,31 +152,15 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds)
switch (item_sum->sum_func()) {
case Item_sum::COUNT_FUNC:
/*
If the expr in count(expr) can never be null we can change this
If the expr in COUNT(expr) can never be null we can change this
to the number of rows in the tables if this number is exact and
there are no outer joins.
*/
if (!conds && !((Item_sum_count*) item)->args[0]->maybe_null &&
!outer_tables && is_exact_count)
{
longlong count= 1;
TABLE_LIST *table;
for (table= tables; table; table= table->next_leaf)
{
if (outer_tables || (table->table->file->table_flags() &
HA_NOT_EXACT_COUNT) || table->schema_table)
{
const_result= 0; // Can't optimize left join
break;
}
tables->table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
count*= table->table->file->records;
}
if (!table)
{
((Item_sum_count*) item)->make_const(count);
recalc_const_item= 1;
}
((Item_sum_count*) item)->make_const(count);
recalc_const_item= 1;
}
else
const_result= 0;

View file

@ -267,7 +267,7 @@ uint32 get_next_partition_id_range(struct st_partition_iter* part_iter);
static inline void init_single_partition_iterator(uint32 part_id,
PARTITION_ITERATOR *part_iter)
{
part_iter->part_nums.start= part_id;
part_iter->part_nums.start= part_iter->part_nums.cur= part_id;
part_iter->part_nums.end= part_id+1;
part_iter->get_next= get_next_partition_id_range;
}
@ -277,7 +277,7 @@ static inline
void init_all_partitions_iterator(partition_info *part_info,
PARTITION_ITERATOR *part_iter)
{
part_iter->part_nums.start= 0;
part_iter->part_nums.start= part_iter->part_nums.cur= 0;
part_iter->part_nums.end= part_info->no_parts;
part_iter->get_next= get_next_partition_id_range;
}

View file

@ -115,7 +115,10 @@ To make maintaining easier please:
<alias>l1</alias>
<alias>latin1</alias>
<collation name="latin1_german1_ci" id="5" order="German Duden"/>
<collation name="latin1_swedish_ci" id="8" order="Finnish, Swedish" flag="primary"/>
<collation name="latin1_swedish_ci" id="8" order="Finnish, Swedish">
<flag>primary</flag>
<flag>compiled</flag>
</collation>
<collation name="latin1_danish_ci" id="15" order="Danish"/>
<collation name="latin1_german2_ci" id="31" order="German Phonebook" flag="compiled"/>
<collation name="latin1_spanish_ci" id="94" order="Spanish"/>

View file

@ -33,14 +33,14 @@
01 01 01 01 01 01 01 01 01 01 01 10 10 10 10 10
10 82 82 82 82 82 82 02 02 02 02 02 02 02 02 02
02 02 02 02 02 02 02 02 02 02 02 10 10 10 10 20
20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20
20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20
00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
10 00 10 02 10 10 10 10 10 10 01 10 01 00 01 00
00 10 10 10 10 10 10 10 10 10 02 10 02 00 02 01
48 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10
10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10
01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01
01 01 01 01 01 01 01 00 01 01 01 01 01 01 01 02
01 01 01 01 01 01 01 10 01 01 01 01 01 01 01 02
02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02
02 02 02 02 02 02 02 00 02 02 02 02 02 02 02 02
02 02 02 02 02 02 02 10 02 02 02 02 02 02 02 02
</map>
</ctype>
@ -99,8 +99,8 @@
0050 0051 0052 0053 0054 0055 0056 0057 0058 0059 005A 005B 005C 005D 005E 005F
0060 0061 0062 0063 0064 0065 0066 0067 0068 0069 006A 006B 006C 006D 006E 006F
0070 0071 0072 0073 0074 0075 0076 0077 0078 0079 007A 007B 007C 007D 007E 007F
0080 0081 0082 0083 0084 0085 0086 0087 0088 0089 008A 008B 008C 008D 008E 008F
0090 0091 0092 0093 0094 0095 0096 0097 0098 0099 009A 009B 009C 009D 009E 009F
20AC 0081 201A 0192 201E 2026 2020 2021 02C6 2030 0160 2039 0152 008D 017D 008F
0090 2018 2019 201C 201D 2022 2013 2014 02DC 2122 0161 203A 0153 009D 017E 0178
00A0 00A1 00A2 00A3 00A4 00A5 00A6 00A7 00A8 00A9 00AA 00AB 00AC 00AD 00AE 00AF
00B0 00B1 00B2 00B3 00B4 00B5 00B6 00B7 00B8 00B9 00BA 00BB 00BC 00BD 00BE 00BF
00C0 00C1 00C2 00C3 00C4 00C5 00C6 00C7 00C8 00C9 00CA 00CB 00CC 00CD 00CE 00CF

View file

@ -916,8 +916,7 @@ bool mysql_truncate(THD *thd, TABLE_LIST *table_list, bool dont_send_ok)
DBUG_RETURN(TRUE);
}
if (!ha_check_storage_engine_flag(ha_resolve_by_legacy_type(thd, table_type),
HTON_CAN_RECREATE)
|| thd->lex->sphead)
HTON_CAN_RECREATE))
goto trunc_by_del;
if (lock_and_wait_for_table_name(thd, table_list))

View file

@ -1571,7 +1571,10 @@ TABLE *delayed_insert::get_local_table(THD* client_thd)
/* Adjust in_use for pointing to client thread */
copy->in_use= client_thd;
/* Adjust lock_count. This table object is not part of a lock. */
copy->lock_count= 0;
return copy;
/* Got fatal error */

View file

@ -706,6 +706,7 @@ typedef class st_select_lex SELECT_LEX;
#define ALTER_CHECK_PARTITION (1L << 23)
#define ALTER_REPAIR_PARTITION (1L << 24)
#define ALTER_REMOVE_PARTITIONING (1L << 25)
#define ALTER_FOREIGN_KEY (1L << 26)
typedef struct st_alter_info
{

View file

@ -3387,7 +3387,7 @@ end_with_restore_list:
Don't allow this within a transaction because we want to use
re-generate table
*/
if ((thd->locked_tables && !lex->sphead) || thd->active_transaction())
if (thd->locked_tables || thd->active_transaction())
{
my_message(ER_LOCK_OR_ACTIVE_TRANSACTION,
ER(ER_LOCK_OR_ACTIVE_TRANSACTION), MYF(0));

View file

@ -5269,7 +5269,7 @@ static void set_up_range_analysis_info(partition_info *part_info)
}
/*
Check get_part_iter_for_interval_via_walking() can be used for
Check if get_part_iter_for_interval_via_walking() can be used for
partitioning
*/
if (part_info->no_part_fields == 1)
@ -5291,7 +5291,7 @@ static void set_up_range_analysis_info(partition_info *part_info)
setup_subparts:
/*
Check get_part_iter_for_interval_via_walking() can be used for
Check if get_part_iter_for_interval_via_walking() can be used for
subpartitioning
*/
if (part_info->no_subpart_fields == 1)
@ -5374,40 +5374,47 @@ int get_part_iter_for_interval_via_mapping(partition_info *part_info,
max_endpoint_val= part_info->no_list_values;
part_iter->get_next= get_next_partition_id_list;
part_iter->part_info= part_info;
part_iter->ret_null_part= part_iter->ret_null_part_orig= FALSE;
}
else
DBUG_ASSERT(0);
if (field->real_maybe_null() && part_info->has_null_value)
/*
Find minimum: Do special handling if the interval has left bound in form
" NULL <= X ":
*/
if (field->real_maybe_null() && part_info->has_null_value &&
!(flags & (NO_MIN_RANGE | NEAR_MIN)) && *min_value)
{
if (*min_value)
part_iter->ret_null_part= part_iter->ret_null_part_orig= TRUE;
part_iter->part_nums.start= part_iter->part_nums.cur= 0;
if (*max_value && !(flags & NO_MAX_RANGE))
{
if (*max_value && !(flags & (NO_MIN_RANGE | NO_MAX_RANGE)))
{
init_single_partition_iterator(part_info->has_null_part_id, part_iter);
return 1;
}
if (!(flags & NEAR_MIN))
part_iter->has_null_value= TRUE;
/* The right bound is X <= NULL, i.e. it is a "X IS NULL" interval */
part_iter->part_nums.end= 0;
return 1;
}
}
/* Find minimum */
if (flags & NO_MIN_RANGE)
part_iter->part_nums.start= 0;
else
{
/*
Store the interval edge in the record buffer, and call the
function that maps the edge in table-field space to an edge
in ordered-set-of-partitions (for RANGE partitioning) or
index-in-ordered-array-of-list-constants (for LIST) space.
*/
store_key_image_to_rec(field, min_value, field_len);
bool include_endp= part_info->range_analysis_include_bounds ||
!test(flags & NEAR_MIN);
part_iter->part_nums.start= get_endpoint(part_info, 1, include_endp);
if (part_iter->part_nums.start == max_endpoint_val)
return 0; /* No partitions */
if (flags & NO_MIN_RANGE)
part_iter->part_nums.start= part_iter->part_nums.cur= 0;
else
{
/*
Store the interval edge in the record buffer, and call the
function that maps the edge in table-field space to an edge
in ordered-set-of-partitions (for RANGE partitioning) or
index-in-ordered-array-of-list-constants (for LIST) space.
*/
store_key_image_to_rec(field, min_value, field_len);
bool include_endp= part_info->range_analysis_include_bounds ||
!test(flags & NEAR_MIN);
part_iter->part_nums.start= get_endpoint(part_info, 1, include_endp);
part_iter->part_nums.cur= part_iter->part_nums.start;
if (part_iter->part_nums.start == max_endpoint_val)
return 0; /* No partitions */
}
}
/* Find maximum, do the same as above but for right interval bound */
@ -5419,7 +5426,8 @@ int get_part_iter_for_interval_via_mapping(partition_info *part_info,
bool include_endp= part_info->range_analysis_include_bounds ||
!test(flags & NEAR_MAX);
part_iter->part_nums.end= get_endpoint(part_info, 0, include_endp);
if (part_iter->part_nums.start== part_iter->part_nums.end)
if (part_iter->part_nums.start == part_iter->part_nums.end &&
!part_iter->ret_null_part)
return 0; /* No partitions */
}
return 1; /* Ok, iterator initialized */
@ -5534,8 +5542,13 @@ int get_part_iter_for_interval_via_walking(partition_info *part_info,
return 0; /* No partitions match */
}
if (flags & (NO_MIN_RANGE | NO_MAX_RANGE))
if ((field->real_maybe_null() &&
((!(flags & NO_MIN_RANGE) && *min_value) || // NULL <? X
(!(flags & NO_MAX_RANGE) && *max_value))) || // X <? NULL
(flags & (NO_MIN_RANGE | NO_MAX_RANGE))) // -inf at any bound
{
return -1; /* Can't handle this interval, have to use all partitions */
}
/* Get integers for left and right interval bound */
longlong a, b;
@ -5553,7 +5566,7 @@ int get_part_iter_for_interval_via_walking(partition_info *part_info,
if (n_values > total_parts || n_values > MAX_RANGE_TO_WALK)
return -1;
part_iter->field_vals.start= a;
part_iter->field_vals.start= part_iter->field_vals.cur= a;
part_iter->field_vals.end= b;
part_iter->part_info= part_info;
part_iter->get_next= get_next_func;
@ -5565,12 +5578,13 @@ int get_part_iter_for_interval_via_walking(partition_info *part_info,
PARTITION_ITERATOR::get_next implementation: enumerate partitions in range
SYNOPSIS
get_next_partition_id_list()
get_next_partition_id_range()
part_iter Partition set iterator structure
DESCRIPTION
This is implementation of PARTITION_ITERATOR::get_next() that returns
[sub]partition ids in [min_partition_id, max_partition_id] range.
The function conforms to partition_iter_func type.
RETURN
partition id
@ -5579,10 +5593,13 @@ int get_part_iter_for_interval_via_walking(partition_info *part_info,
uint32 get_next_partition_id_range(PARTITION_ITERATOR* part_iter)
{
if (part_iter->part_nums.start== part_iter->part_nums.end)
if (part_iter->part_nums.cur == part_iter->part_nums.end)
{
part_iter->part_nums.cur= part_iter->part_nums.start;
return NOT_A_PARTITION_ID;
}
else
return part_iter->part_nums.start++;
return part_iter->part_nums.cur++;
}
@ -5597,6 +5614,7 @@ uint32 get_next_partition_id_range(PARTITION_ITERATOR* part_iter)
This implementation of PARTITION_ITERATOR::get_next() is special for
LIST partitioning: it enumerates partition ids in
part_info->list_array[i] where i runs over [min_idx, max_idx] interval.
The function conforms to partition_iter_func type.
RETURN
partition id
@ -5605,18 +5623,20 @@ uint32 get_next_partition_id_range(PARTITION_ITERATOR* part_iter)
uint32 get_next_partition_id_list(PARTITION_ITERATOR *part_iter)
{
if (part_iter->part_nums.start == part_iter->part_nums.end)
if (part_iter->part_nums.cur == part_iter->part_nums.end)
{
if (part_iter->has_null_value)
if (part_iter->ret_null_part)
{
part_iter->has_null_value= FALSE;
part_iter->ret_null_part= FALSE;
return part_iter->part_info->has_null_part_id;
}
part_iter->part_nums.cur= part_iter->part_nums.start;
part_iter->ret_null_part= part_iter->ret_null_part_orig;
return NOT_A_PARTITION_ID;
}
else
return part_iter->part_info->list_array[part_iter->
part_nums.start++].partition_id;
part_nums.cur++].partition_id;
}
@ -5631,6 +5651,7 @@ uint32 get_next_partition_id_list(PARTITION_ITERATOR *part_iter)
This implementation of PARTITION_ITERATOR::get_next() returns ids of
partitions that contain records with partitioning field value within
[start_val, end_val] interval.
The function conforms to partition_iter_func type.
RETURN
partition id
@ -5641,11 +5662,10 @@ static uint32 get_next_partition_via_walking(PARTITION_ITERATOR *part_iter)
{
uint32 part_id;
Field *field= part_iter->part_info->part_field_array[0];
while (part_iter->field_vals.start != part_iter->field_vals.end)
while (part_iter->field_vals.cur != part_iter->field_vals.end)
{
field->store(part_iter->field_vals.start, FALSE);
part_iter->field_vals.start++;
longlong dummy;
field->store(part_iter->field_vals.cur++, FALSE);
if (part_iter->part_info->is_sub_partitioned() &&
!part_iter->part_info->get_part_partition_id(part_iter->part_info,
&part_id, &dummy) ||
@ -5653,6 +5673,9 @@ static uint32 get_next_partition_via_walking(PARTITION_ITERATOR *part_iter)
&part_id, &dummy))
return part_id;
}
//psergey-todo: return partition(part_func(NULL)) here...
part_iter->field_vals.cur= part_iter->field_vals.start;
return NOT_A_PARTITION_ID;
}
@ -5663,10 +5686,12 @@ static uint32 get_next_subpartition_via_walking(PARTITION_ITERATOR *part_iter)
{
uint32 part_id;
Field *field= part_iter->part_info->subpart_field_array[0];
if (part_iter->field_vals.start == part_iter->field_vals.end)
if (part_iter->field_vals.cur == part_iter->field_vals.end)
{
part_iter->field_vals.cur= part_iter->field_vals.start;
return NOT_A_PARTITION_ID;
field->store(part_iter->field_vals.start, FALSE);
part_iter->field_vals.start++;
}
field->store(part_iter->field_vals.cur++, FALSE);
return part_iter->part_info->get_subpartition_id(part_iter->part_info);
}
#endif

View file

@ -94,10 +94,20 @@ uint32 get_partition_id_range_for_endpoint(partition_info *part_info,
/*
A "Get next" function for partition iterator.
SYNOPSIS
partition_iter_func()
part_iter Partition iterator, you call only "iter.get_next(&iter)"
DESCRIPTION
Depending on whether partitions or sub-partitions are iterated, the
function returns next subpartition id/partition number. The sequence of
returned numbers is not ordered and may contain duplicates.
When the end of sequence is reached, NOT_A_PARTITION_ID is returned, and
the iterator resets itself (so next get_next() call will start to
enumerate the set all over again).
RETURN
NOT_A_PARTITION_ID if there are no more partitions.
[sub]partition_id of the next partition
@ -124,16 +134,22 @@ typedef uint32 (*partition_iter_func)(st_partition_iter* part_iter);
typedef struct st_partition_iter
{
partition_iter_func get_next;
bool has_null_value;
/*
Valid for "Interval mapping" in LIST partitioning: if true, let the
iterator also produce id of the partition that contains NULL value.
*/
bool ret_null_part, ret_null_part_orig;
struct st_part_num_range
{
uint32 start;
uint32 cur;
uint32 end;
};
struct st_field_value_range
{
longlong start;
longlong cur;
longlong end;
};

View file

@ -3980,7 +3980,7 @@ fill_events_copy_to_schema_table(THD *thd, TABLE *sch_table, TABLE *event_table)
if (!(!wild || !wild[0] || !wild_compare(et.name.str, wild, 0)))
DBUG_RETURN(0);
//->field[0] is EVENT_CATALOG and is by default NULL
/* ->field[0] is EVENT_CATALOG and is by default NULL */
sch_table->field[1]->store(et.dbname.str, et.dbname.length, scs);
sch_table->field[2]->store(et.name.str, et.name.length, scs);
@ -4000,12 +4000,9 @@ fill_events_copy_to_schema_table(THD *thd, TABLE *sch_table, TABLE *event_table)
if (et.expression)
{
String show_str;
//type
/* type */
sch_table->field[5]->store(STRING_WITH_LEN("RECURRING"), scs);
/* execute_at */
sch_table->field[6]->set_null();
/* interval_value */
//interval_type
if (event_reconstruct_interval_expression(&show_str, et.interval,
et.expression))
DBUG_RETURN(1);
@ -4058,9 +4055,10 @@ fill_events_copy_to_schema_table(THD *thd, TABLE *sch_table, TABLE *event_table)
sch_table->field[15]->store_time(&time, MYSQL_TIMESTAMP_DATETIME);
if (et.last_executed.year)
{
sch_table->field[16]->set_notnull();
sch_table->field[16]->store_time(&et.last_executed,MYSQL_TIMESTAMP_DATETIME);
else
sch_table->field[16]->set_null();
}
sch_table->field[17]->store(et.comment.str, et.comment.length, scs);

View file

@ -3796,7 +3796,7 @@ static uint compare_tables(TABLE *table, List<create_field> *create_list,
create_info->used_fields & HA_CREATE_USED_ENGINE ||
create_info->used_fields & HA_CREATE_USED_CHARSET ||
create_info->used_fields & HA_CREATE_USED_DEFAULT_CHARSET ||
(alter_info->flags & ALTER_RECREATE) ||
(alter_info->flags & (ALTER_RECREATE | ALTER_FOREIGN_KEY)) ||
order_num)
DBUG_RETURN(ALTER_TABLE_DATA_CHANGED);

View file

@ -1229,6 +1229,11 @@ multi_update::initialize_tables(JOIN *join)
Field_string offset(table->file->ref_length, 0, "offset",
&my_charset_bin);
offset.init(table);
/*
The field will be converted to varstring when creating tmp table if
table to be updated was created by mysql 4.1. Deny this.
*/
offset.can_alter_field_type= 0;
if (!(ifield= new Item_field(((Field *) &offset))))
DBUG_RETURN(1);
ifield->maybe_null= 0;

View file

@ -501,6 +501,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token PARAM_MARKER
%token PHASE_SYM
%token PLUGIN_SYM
%token PLUGINS_SYM
%token POINTFROMTEXT
%token POINT_SYM
%token POLYFROMTEXT
@ -4167,6 +4168,9 @@ key_def:
HA_KEY_ALG_UNDEF, 1,
lex->col_list));
lex->col_list.empty(); /* Alloced by sql_alloc */
/* Only used for ALTER TABLE. Ignored otherwise. */
lex->alter_info.flags|= ALTER_FOREIGN_KEY;
}
| constraint opt_check_constraint
{
@ -5137,7 +5141,7 @@ alter_list_item:
}
| DROP FOREIGN KEY_SYM opt_ident
{
Lex->alter_info.flags|= ALTER_DROP_INDEX;
Lex->alter_info.flags|= ALTER_DROP_INDEX | ALTER_FOREIGN_KEY;
}
| DROP PRIMARY_SYM KEY_SYM
{
@ -8170,6 +8174,15 @@ show_param:
YYABORT;
}
| PLUGIN_SYM
{
LEX *lex= Lex;
WARN_DEPRECATED(yythd, "5.2", "SHOW PLUGIN", "'SHOW PLUGINS'");
lex->sql_command= SQLCOM_SELECT;
lex->orig_sql_command= SQLCOM_SHOW_PLUGINS;
if (prepare_schema_table(YYTHD, lex, 0, SCH_PLUGINS))
YYABORT;
}
| PLUGINS_SYM
{
LEX *lex= Lex;
lex->sql_command= SQLCOM_SELECT;
@ -9358,7 +9371,6 @@ keyword:
| OPEN_SYM {}
| PARSER_SYM {}
| PARTITION_SYM {}
| PLUGIN_SYM {}
| PREPARE_SYM {}
| REMOVE_SYM {}
| REPAIR {}
@ -9539,6 +9551,8 @@ keyword_sp:
| PARTITIONS_SYM {}
| PASSWORD {}
| PHASE_SYM {}
| PLUGIN_SYM {}
| PLUGINS_SYM {}
| POINT_SYM {}
| POLYGON {}
| PRESERVE_SYM {}
@ -9552,7 +9566,7 @@ keyword_sp:
| REBUILD_SYM {}
| RECOVER_SYM {}
| REDO_BUFFER_SIZE_SYM {}
| REDOFILE_SYM {}
| REDOFILE_SYM {}
| REDUNDANT_SYM {}
| RELAY_LOG_FILE_SYM {}
| RELAY_LOG_POS_SYM {}

View file

@ -26,8 +26,8 @@
void open_table_error(TABLE_SHARE *share, int error, int db_errno,
myf errortype, int errarg);
static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
File file);
static int open_binary_frm(THD *thd, TABLE_SHARE *share,
uchar *head, File file);
static void fix_type_pointers(const char ***array, TYPELIB *point_to_type,
uint types, char **names);
static uint find_field(Field **fields, uint start, uint length);
@ -717,8 +717,8 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
keyinfo->parser= plugin_lock(&parser_name, MYSQL_FTPARSER_PLUGIN);
if (! keyinfo->parser)
{
my_free(buff, MYF(0));
my_error(ER_PLUGIN_IS_NOT_LOADED, MYF(0), parser_name.str);
my_free(buff, MYF(0));
goto err;
}
}

View file

@ -277,7 +277,10 @@ struct st_table {
*/
timestamp_auto_set_type timestamp_field_type;
table_map map; /* ID bit of table (1,2,4,8,16...) */
uint lock_position; /* Position in MYSQL_LOCK.table */
uint lock_data_start; /* Start pos. in MYSQL_LOCK.locks */
uint lock_count; /* Number of locks */
uint tablenr,used_fields;
uint temp_pool_slot; /* Used by intern temp tables */
uint status; /* What's in record[0] */
@ -286,8 +289,8 @@ struct st_table {
uint derived_select_number;
int current_lock; /* Type of lock on table */
my_bool copy_blobs; /* copy_blobs when storing */
/*
/*
0 or JOIN_TYPE_{LEFT|RIGHT}. Currently this is only compared to 0.
If maybe_null !=0, this table is inner w.r.t. some outer join operation,
and null_row may be true.

View file

@ -833,4 +833,80 @@ invalid_date:
}
/*
Calculate difference between two datetime values as seconds + microseconds.
SYNOPSIS
calc_time_diff()
l_time1 - TIME/DATE/DATETIME value
l_time2 - TIME/DATE/DATETIME value
l_sign - 1 absolute values are substracted,
-1 absolute values are added.
seconds_out - Out parameter where difference between
l_time1 and l_time2 in seconds is stored.
microseconds_out- Out parameter where microsecond part of difference
between l_time1 and l_time2 is stored.
NOTE
This function calculates difference between l_time1 and l_time2 absolute
values. So one should set l_sign and correct result if he want to take
signs into account (i.e. for TIME values).
RETURN VALUES
Returns sign of difference.
1 means negative result
0 means positive result
*/
bool
calc_time_diff(TIME *l_time1, TIME *l_time2, int l_sign, longlong *seconds_out,
long *microseconds_out)
{
long days;
bool neg;
longlong microseconds;
/*
We suppose that if first argument is MYSQL_TIMESTAMP_TIME
the second argument should be TIMESTAMP_TIME also.
We should check it before calc_time_diff call.
*/
if (l_time1->time_type == MYSQL_TIMESTAMP_TIME) // Time value
days= (long)l_time1->day - l_sign * (long)l_time2->day;
else
{
days= calc_daynr((uint) l_time1->year,
(uint) l_time1->month,
(uint) l_time1->day);
if (l_time2->time_type == MYSQL_TIMESTAMP_TIME)
days-= l_sign * (long)l_time2->day;
else
days-= l_sign*calc_daynr((uint) l_time2->year,
(uint) l_time2->month,
(uint) l_time2->day);
}
microseconds= ((longlong)days*LL(86400) +
(longlong)(l_time1->hour*3600L +
l_time1->minute*60L +
l_time1->second) -
l_sign*(longlong)(l_time2->hour*3600L +
l_time2->minute*60L +
l_time2->second)) * LL(1000000) +
(longlong)l_time1->second_part -
l_sign*(longlong)l_time2->second_part;
neg= 0;
if (microseconds < 0)
{
microseconds= -microseconds;
neg= 1;
}
*seconds_out= microseconds/1000000L;
*microseconds_out= (long) (microseconds%1000000L);
return neg;
}
#endif

View file

@ -49,7 +49,6 @@ TODO:
#include "mysql_priv.h"
#include "ha_tina.h"
#include <sys/mman.h>
#include <mysql/plugin.h>
@ -161,7 +160,7 @@ int get_mmap(TINA_SHARE *share, int write)
share->mapped_file= (byte *)my_mmap(NULL, share->file_stat.st_size,
PROT_READ, MAP_PRIVATE,
share->data_file, 0);
if ((share->mapped_file ==(caddr_t)-1))
if ((share->mapped_file == MAP_FAILED))
{
/*
Bad idea you think? See the problem is that nothing actually checks
@ -499,7 +498,7 @@ ha_tina::ha_tina(TABLE_SHARE *table_arg)
records_is_known(0)
{
/* Set our original buffers from pre-allocated memory */
buffer.set(byte_buffer, IO_SIZE, system_charset_info);
buffer.set((char*)byte_buffer, IO_SIZE, system_charset_info);
chain= chain_buffer;
}
@ -877,7 +876,8 @@ int ha_tina::write_row(byte * buf)
size= encode_quote(buf);
if (my_write(share->data_file, buffer.ptr(), size, MYF(MY_WME | MY_NABP)))
if (my_write(share->data_file, (byte*)buffer.ptr(), size,
MYF(MY_WME | MY_NABP)))
DBUG_RETURN(-1);
/*
@ -929,7 +929,8 @@ int ha_tina::update_row(const byte * old_data, byte * new_data)
if (chain_append())
DBUG_RETURN(-1);
if (my_write(share->data_file, buffer.ptr(), size, MYF(MY_WME | MY_NABP)))
if (my_write(share->data_file, (byte*)buffer.ptr(), size,
MYF(MY_WME | MY_NABP)))
DBUG_RETURN(-1);
/* UPDATE should never happen on the log tables */
@ -1130,7 +1131,7 @@ int ha_tina::rnd_end()
if ((chain_ptr - chain) > 0)
{
tina_set *ptr;
off_t length;
size_t length;
/*
Setting up writable map, this will contain all of the data after the
@ -1154,15 +1155,16 @@ int ha_tina::rnd_end()
length= length - (size_t)(ptr->end - ptr->begin);
}
/* Truncate the file to the new size */
/* Unmap the file before the new size is set */
if (my_munmap(share->mapped_file, share->file_stat.st_size))
DBUG_RETURN(-1);
/* We set it to null so that get_mmap() won't try to unmap it */
share->mapped_file= NULL;
/* Set the file to the new size */
if (my_chsize(share->data_file, length, 0, MYF(MY_WME)))
DBUG_RETURN(-1);
if (my_munmap(share->mapped_file, length))
DBUG_RETURN(-1);
/* We set it to null so that get_mmap() won't try to unmap it */
share->mapped_file= NULL;
if (get_mmap(share, 0) > 0)
DBUG_RETURN(-1);
}
@ -1281,6 +1283,13 @@ int ha_tina::delete_all_rows()
if (!records_is_known)
DBUG_RETURN(my_errno=HA_ERR_WRONG_COMMAND);
/* Unmap the file before the new size is set */
if (share->mapped_file && my_munmap(share->mapped_file,
share->file_stat.st_size))
DBUG_RETURN(-1);
share->mapped_file= NULL;
/* Truncate the file to zero size */
rc= my_chsize(share->data_file, 0, 0, MYF(MY_WME));
if (get_mmap(share, 0) > 0)

View file

@ -51,9 +51,9 @@ typedef struct st_tina_share {
ha_rows rows_recorded; /* Number of rows in tables */
} TINA_SHARE;
typedef struct tina_set {
off_t begin;
off_t end;
struct tina_set {
off_t begin;
off_t end;
};
class ha_tina: public handler

View file

@ -3,8 +3,7 @@
ADD_DEFINITIONS(-DMYSQL_SERVER -D_WIN32 -DWIN32 -D_LIB)
INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include include)
SET_SOURCE_FILES_PROPERTIES(ib_config.h PROPERTIES GENERATED 1)
ADD_LIBRARY(innobase ib_config.h btr/btr0btr.c btr/btr0cur.c btr/btr0pcur.c btr/btr0sea.c
ADD_LIBRARY(innobase btr/btr0btr.c btr/btr0cur.c btr/btr0pcur.c btr/btr0sea.c
buf/buf0buf.c buf/buf0flu.c buf/buf0lru.c buf/buf0rea.c
data/data0data.c data/data0type.c
dict/dict0boot.c dict/dict0crea.c dict/dict0dict.c dict/dict0load.c dict/dict0mem.c
@ -34,8 +33,3 @@ ADD_LIBRARY(innobase ib_config.h btr/btr0btr.c btr/btr0cur.c btr/btr0pcur.c btr/
trx/trx0purge.c trx/trx0rec.c trx/trx0roll.c trx/trx0rseg.c trx/trx0sys.c trx/trx0trx.c trx/trx0undo.c
usr/usr0sess.c
ut/ut0byte.c ut/ut0dbg.c ut/ut0mem.c ut/ut0rnd.c ut/ut0ut.c)
ADD_CUSTOM_COMMAND(
OUTPUT ib_config.h
COMMAND echo /*Generated file*/ > ib_config.h
)

View file

@ -160,6 +160,7 @@ static int FTB_WORD_cmp_list(CHARSET_INFO *cs, FTB_WORD **a, FTB_WORD **b)
typedef struct st_my_ftb_param
{
MYSQL_FTPARSER_PARAM *up;
FTB *ftb;
FTB_EXPR *ftbe;
byte *up_quot;
@ -280,7 +281,7 @@ static int ftb_parse_query_internal(void *param, char *query, int len)
info.prev= ' ';
info.quot= 0;
while (ft_get_word(cs, start, end, &w, &info))
ftb_query_add_word(param, w.pos, w.len, &info);
ftb_param->up->mysql_add_word(param, w.pos, w.len, &info);
return(0);
}
@ -295,14 +296,15 @@ static void _ftb_parse_query(FTB *ftb, byte *query, uint len,
if (ftb->state != UNINITIALIZED)
DBUG_VOID_RETURN;
if (! (param= ftparser_call_initializer(ftb->info, ftb->keynr, 0)))
DBUG_VOID_RETURN;
ftb_param.up= param;
ftb_param.ftb= ftb;
ftb_param.depth= 0;
ftb_param.ftbe= ftb->root;
ftb_param.up_quot= 0;
if (! (param= ftparser_call_initializer(ftb->info, ftb->keynr)))
DBUG_VOID_RETURN;
param->mysql_parse= ftb_parse_query_internal;
param->mysql_add_word= ftb_query_add_word;
param->mysql_ftparam= (void *)&ftb_param;
@ -313,7 +315,7 @@ static void _ftb_parse_query(FTB *ftb, byte *query, uint len,
parser->parse(param);
DBUG_VOID_RETURN;
}
static int _ftb_no_dupes_cmp(void* not_used __attribute__((unused)),
const void *a,const void *b)
@ -569,6 +571,7 @@ err:
typedef struct st_my_ftb_phrase_param
{
MYSQL_FTPARSER_PARAM *up;
LIST *phrase;
LIST *document;
CHARSET_INFO *cs;
@ -615,7 +618,7 @@ static int ftb_check_phrase_internal(void *param, char *document, int len)
const char *docend= document + len;
while (ft_simple_get_word(phrase_param->cs, &document, docend, &word, FALSE))
{
ftb_phrase_add_word(param, word.pos, word.len, 0);
phrase_param->up->mysql_add_word(param, word.pos, word.len, 0);
if (phrase_param->match)
return 1;
}
@ -644,8 +647,11 @@ static int _ftb_check_phrase(FTB *ftb, const byte *document, uint len,
MYSQL_FTPARSER_PARAM *param;
DBUG_ENTER("_ftb_check_phrase");
DBUG_ASSERT(parser);
if (! (param= ftparser_call_initializer(ftb->info, ftb->keynr)))
if (! (param= ftparser_call_initializer(ftb->info, ftb->keynr, 1)))
DBUG_RETURN(0);
ftb_param.up= param;
ftb_param.phrase= ftbe->phrase;
ftb_param.document= ftbe->document;
ftb_param.cs= ftb->charset;
@ -814,6 +820,7 @@ err:
typedef struct st_my_ftb_find_param
{
MYSQL_FTPARSER_PARAM *up;
FT_INFO *ftb;
FT_SEG_ITERATOR *ftsi;
} MY_FTB_FIND_PARAM;
@ -854,11 +861,12 @@ static int ftb_find_relevance_add_word(void *param, char *word, int len,
static int ftb_find_relevance_parse(void *param, char *doc, int len)
{
FT_INFO *ftb= ((MY_FTB_FIND_PARAM *)param)->ftb;
MY_FTB_FIND_PARAM *ftb_param=(MY_FTB_FIND_PARAM *)param;
FT_INFO *ftb= ftb_param->ftb;
char *end= doc + len;
FT_WORD w;
while (ft_simple_get_word(ftb->charset, &doc, end, &w, TRUE))
ftb_find_relevance_add_word(param, w.pos, w.len, 0);
ftb_param->up->mysql_add_word(param, w.pos, w.len, 0);
return(0);
}
@ -878,7 +886,7 @@ float ft_boolean_find_relevance(FT_INFO *ftb, byte *record, uint length)
return -2.0;
if (!ftb->queue.elements)
return 0;
if (! (param= ftparser_call_initializer(ftb->info, ftb->keynr)))
if (! (param= ftparser_call_initializer(ftb->info, ftb->keynr, 0)))
return 0;
if (ftb->state != INDEX_SEARCH && docid <= ftb->lastpos)
@ -902,19 +910,18 @@ float ft_boolean_find_relevance(FT_INFO *ftb, byte *record, uint length)
_mi_ft_segiterator_init(ftb->info, ftb->keynr, record, &ftsi);
memcpy(&ftsi2, &ftsi, sizeof(ftsi));
ftb_param.up= param;
ftb_param.ftb= ftb;
ftb_param.ftsi= &ftsi2;
param->mysql_parse= ftb_find_relevance_parse;
param->mysql_add_word= ftb_find_relevance_add_word;
param->mysql_ftparam= (void *)&ftb_param;
param->cs= ftb->charset;
param->mode= MYSQL_FTPARSER_SIMPLE_MODE;
while (_mi_ft_segiterator(&ftsi))
{
if (!ftsi.pos)
continue;
/* Since subsequent call to _ftb_check_phrase overwrites param elements,
it must be reinitialized at each iteration _inside_ the loop. */
param->mysql_parse= ftb_find_relevance_parse;
param->mysql_add_word= ftb_find_relevance_add_word;
param->mysql_ftparam= (void *)&ftb_param;
param->cs= ftb->charset;
param->mode= MYSQL_FTPARSER_SIMPLE_MODE;
param->doc= (byte *)ftsi.pos;
param->length= ftsi.len;
parser->parse(param);

View file

@ -226,7 +226,7 @@ FT_INFO *ft_init_nlq_search(MI_INFO *info, uint keynr, byte *query,
aio.charset=info->s->keyinfo[keynr].seg->charset;
aio.keybuff=info->lastkey+info->s->base.max_key_length;
parser= info->s->keyinfo[keynr].parser;
if (! (ftparser_param= ftparser_call_initializer(info, keynr)))
if (! (ftparser_param= ftparser_call_initializer(info, keynr, 0)))
goto err;
bzero(&wtree,sizeof(wtree));

View file

@ -27,6 +27,7 @@ typedef struct st_ft_docstat {
typedef struct st_my_ft_parser_param
{
MYSQL_FTPARSER_PARAM *up;
TREE *wtree;
my_bool with_alloc;
} MY_FT_PARSER_PARAM;
@ -268,16 +269,16 @@ static int ft_add_word(void *param, byte *word, uint word_len,
}
static int ft_parse_internal(void *param, byte *doc, uint doc_len)
static int ft_parse_internal(void *param, byte *doc, int doc_len)
{
byte *end=doc+doc_len;
MY_FT_PARSER_PARAM *ft_param=(MY_FT_PARSER_PARAM *)param;
TREE *wtree= ft_param->wtree;
FT_WORD w;
TREE *wtree;
DBUG_ENTER("ft_parse_internal");
wtree= ((MY_FT_PARSER_PARAM *)param)->wtree;
while (ft_simple_get_word(wtree->custom_arg, &doc, end, &w, TRUE))
if (ft_add_word(param, w.pos, w.len, 0))
if (ft_param->up->mysql_add_word(param, w.pos, w.len, 0))
DBUG_RETURN(1);
DBUG_RETURN(0);
}
@ -290,6 +291,8 @@ int ft_parse(TREE *wtree, byte *doc, int doclen, my_bool with_alloc,
MY_FT_PARSER_PARAM my_param;
DBUG_ENTER("ft_parse");
DBUG_ASSERT(parser);
my_param.up= param;
my_param.wtree= wtree;
my_param.with_alloc= with_alloc;
@ -300,11 +303,12 @@ int ft_parse(TREE *wtree, byte *doc, int doclen, my_bool with_alloc,
param->doc= doc;
param->length= doclen;
param->mode= MYSQL_FTPARSER_SIMPLE_MODE;
DBUG_RETURN(parser->parse(param));
DBUG_RETURN(parser->parse(param));
}
MYSQL_FTPARSER_PARAM *ftparser_call_initializer(MI_INFO *info, uint keynr)
#define MAX_PARAM_NR 2
MYSQL_FTPARSER_PARAM *ftparser_call_initializer(MI_INFO *info,
uint keynr, uint paramnr)
{
uint32 ftparser_nr;
struct st_mysql_ftparser *parser;
@ -343,8 +347,14 @@ MYSQL_FTPARSER_PARAM *ftparser_call_initializer(MI_INFO *info, uint keynr)
}
info->s->ftparsers= ftparsers;
}
/*
We have to allocate two MYSQL_FTPARSER_PARAM structures per plugin
because in a boolean search a parser is called recursively
ftb_find_relevance* calls ftb_check_phrase*
(MAX_PARAM_NR=2)
*/
info->ftparser_param= (MYSQL_FTPARSER_PARAM *)
my_malloc(sizeof(MYSQL_FTPARSER_PARAM) *
my_malloc(MAX_PARAM_NR * sizeof(MYSQL_FTPARSER_PARAM) *
info->s->ftparsers, MYF(MY_WME|MY_ZEROFILL));
if (! info->ftparser_param)
return 0;
@ -359,6 +369,8 @@ MYSQL_FTPARSER_PARAM *ftparser_call_initializer(MI_INFO *info, uint keynr)
ftparser_nr= info->s->keyinfo[keynr].ftparser_nr;
parser= info->s->keyinfo[keynr].parser;
}
DBUG_ASSERT(paramnr < MAX_PARAM_NR);
ftparser_nr= ftparser_nr*MAX_PARAM_NR + paramnr;
if (! info->ftparser_param[ftparser_nr].mysql_add_word)
{
/* Note, that mysql_add_word is used here as a flag:
@ -372,22 +384,27 @@ MYSQL_FTPARSER_PARAM *ftparser_call_initializer(MI_INFO *info, uint keynr)
return &info->ftparser_param[ftparser_nr];
}
void ftparser_call_deinitializer(MI_INFO *info)
{
uint i, keys= info->s->state.header.keys;
uint i, j, keys= info->s->state.header.keys;
if (! info->ftparser_param)
return;
for (i= 0; i < keys; i++)
{
MI_KEYDEF *keyinfo= &info->s->keyinfo[i];
MYSQL_FTPARSER_PARAM *ftparser_param=
&info->ftparser_param[keyinfo->ftparser_nr];
if (keyinfo->flag & HA_FULLTEXT && ftparser_param->mysql_add_word)
for (j=0; j < MAX_PARAM_NR; j++)
{
if (keyinfo->parser->deinit)
keyinfo->parser->deinit(ftparser_param);
ftparser_param->mysql_add_word= 0;
MYSQL_FTPARSER_PARAM *ftparser_param=
&info->ftparser_param[keyinfo->ftparser_nr*MAX_PARAM_NR + j];
if (keyinfo->flag & HA_FULLTEXT && ftparser_param->mysql_add_word)
{
if (keyinfo->parser->deinit)
keyinfo->parser->deinit(ftparser_param);
ftparser_param->mysql_add_word= 0;
}
else
break;
}
}
}

View file

@ -122,7 +122,7 @@ FT_WORD * _mi_ft_parserecord(MI_INFO *info, uint keynr, const byte *record)
TREE ptree;
MYSQL_FTPARSER_PARAM *param;
DBUG_ENTER("_mi_ft_parserecord");
if (! (param= ftparser_call_initializer(info, keynr)))
if (! (param= ftparser_call_initializer(info, keynr, 0)))
DBUG_RETURN(NULL);
bzero((char*) &ptree, sizeof(ptree));
if (_mi_ft_parse(&ptree, info, keynr, record, 0, param))

View file

@ -145,5 +145,6 @@ float ft_boolean_get_relevance(FT_INFO *);
my_off_t ft_boolean_get_docid(FT_INFO *);
void ft_boolean_reinit_search(FT_INFO *);
extern MYSQL_FTPARSER_PARAM *ftparser_call_initializer(MI_INFO *info,
uint keynr);
uint keynr,
uint paramnr);
extern void ftparser_call_deinitializer(MI_INFO *info);

View file

@ -233,7 +233,7 @@ sizeof(SimpleProperties::SP2StructMapping);
void
DictFilegroupInfo::Filegroup::init(){
memset(FilegroupName, sizeof(FilegroupName), 0);
memset(FilegroupName, 0, sizeof(FilegroupName));
FilegroupType = ~0;
FilegroupId = ~0;
FilegroupVersion = ~0;
@ -244,8 +244,10 @@ DictFilegroupInfo::Filegroup::init(){
TS_DataGrow.GrowLimit = 0;
TS_DataGrow.GrowSizeHi = 0;
TS_DataGrow.GrowSizeLo = 0;
memset(TS_DataGrow.GrowPattern, sizeof(TS_DataGrow.GrowPattern), 0);
memset(TS_DataGrow.GrowPattern, 0, sizeof(TS_DataGrow.GrowPattern));
TS_DataGrow.GrowMaxSize = 0;
LF_UndoFreeWordsHi= 0;
LF_UndoFreeWordsLo= 0;
}
void

View file

@ -22,7 +22,7 @@
#define ROW_LEN 16
#define ROW16_LEN 8
#define MAX_BUF 16*1024
#define MAX_BUF 64*1024
static CHARSET_INFO all_charsets[256];
@ -156,6 +156,7 @@ static int my_read_charset_file(const char *filename)
}
len=read(fd,buf,MAX_BUF);
DBUG_ASSERT(len < MAX_BUF);
close(fd);
if (my_parse_charset_xml(buf,len,add_collation))
@ -221,15 +222,19 @@ void dispcset(FILE *f,CHARSET_INFO *cs)
}
fprintf(f," NULL, /* from_uni */\n");
fprintf(f," my_unicase_default, /* caseinfo */\n");
fprintf(f," NULL, /* state map */\n");
fprintf(f," NULL, /* ident map */\n");
fprintf(f," 1, /* strxfrm_multiply*/\n");
fprintf(f," 1, /* caseup_multiply*/\n");
fprintf(f," 1, /* casedn_multiply*/\n");
fprintf(f," 1, /* mbminlen */\n");
fprintf(f," 1, /* mbmaxlen */\n");
fprintf(f," 0, /* min_sort_char */\n");
fprintf(f," 255, /* max_sort_char */\n");
fprintf(f," ' ', /* pad_char */\n");
fprintf(f," 0, /* escape_with_backslash_is_dangerous */\n");
fprintf(f," &my_charset_8bit_handler,\n");
if (cs->state & MY_CS_BINSORT)
fprintf(f," &my_collation_8bit_bin_handler,\n");

File diff suppressed because it is too large Load diff

View file

@ -72,6 +72,10 @@ else
libexecdir="$basedir/libexec"
fi
# datadir_set is used to determine if datadir was set (and so should be
# *not* set inside of the --basedir= handler.)
datadir_set=
#
# Use LSB init script functions for printing messages, if possible
#
@ -105,11 +109,15 @@ parse_server_arguments() {
case "$arg" in
--basedir=*) basedir=`echo "$arg" | sed -e 's/^[^=]*=//'`
bindir="$basedir/bin"
datadir="$basedir/data"
if test -z "$datadir_set"; then
datadir="$basedir/data"
fi
sbindir="$basedir/sbin"
libexecdir="$basedir/libexec"
;;
--datadir=*) datadir=`echo "$arg" | sed -e 's/^[^=]*=//'` ;;
--datadir=*) datadir=`echo "$arg" | sed -e 's/^[^=]*=//'`
datadir_set=1
;;
--user=*) user=`echo "$arg" | sed -e 's/^[^=]*=//'` ;;
--pid-file=*) server_pid_file=`echo "$arg" | sed -e 's/^[^=]*=//'` ;;
--use-mysqld_safe) use_mysqld_safe=1;;