mirror of
https://github.com/MariaDB/server.git
synced 2025-01-16 03:52:35 +01:00
Fixes for innobase
Added test for ALTER TABLE ORDER BY BUILD/FINISH.sh: Fixes for innobase BUILD/compile-pentium-debug: Fixes for innobase Docs/manual.texi: Fixes for innobase include/my_pthread.h: Fixes for innobase innobase/Makefile.am: Fixes for innobase innobase/btr/Makefile.am: Fixes for innobase innobase/data/Makefile.am: Fixes for innobase innobase/eval/Makefile.am: Fixes for innobase innobase/include/Makefile.i: Fixes for innobase innobase/os/Makefile.am: Fixes for innobase mysql-test/t/alter_table.test: Added test for ALTER TABLE ORDER BY mysys/my_error.c: Skip 'l' in '%lu' mysys/my_vsnprintf.c: cleanup sql/ha_innobase.cc: Fixed type + remove warnings sql/ha_innobase.h: Remove warnings sql/handler.cc: cleanup sql/sql_class.cc: remove warnings sql/sql_parse.cc: remove warnings sql/sql_table.cc: Fixed bug in ALTER TABLE ... ORDER BY BitKeeper/etc/logging_ok: Logging to logging@openlogging.org accepted
This commit is contained in:
parent
0897a73a26
commit
6839b1728d
20 changed files with 132 additions and 48 deletions
|
@ -13,6 +13,9 @@ $make -k clean || true
|
|||
/bin/rm -f */.deps/*.P config.cache
|
||||
|
||||
aclocal; autoheader; aclocal; automake; autoconf
|
||||
cd innobase
|
||||
aclocal; autoheader; aclocal; automake; autoconf
|
||||
cd ..
|
||||
|
||||
CFLAGS=\"$cflags\" CXX=gcc CXXFLAGS=\"$cxxflags\" $configure
|
||||
|
||||
|
|
|
@ -13,6 +13,6 @@ if test -d /usr/local/BerkeleyDB-dbug/
|
|||
then
|
||||
extra_configs="$extra_configs --with-berkeley-db=/usr/local/BerkeleyDB-dbug/"
|
||||
fi
|
||||
extra_configs="$extra_configs --with-innobase"
|
||||
extra_configs="$extra_configs --with-innobase-db"
|
||||
|
||||
. "$path/FINISH.sh"
|
||||
|
|
|
@ -1,15 +1 @@
|
|||
jcole@tetra.spaceapes.com
|
||||
monty@donna.mysql.com
|
||||
monty@work.mysql.com
|
||||
mwagner@evoq.mwagner.org
|
||||
mwagner@work.mysql.com
|
||||
paul@central.snake.net
|
||||
sasha@mysql.sashanet.com
|
||||
sasha@work.mysql.com
|
||||
serg@donna.mysql.com
|
||||
serg@serg.mysql.com
|
||||
tfr@coyote.emotion.ee
|
||||
tim@cane.mysql.fi
|
||||
tim@threads.polyesthetic.msg
|
||||
tim@work.mysql.com
|
||||
tim@donna.mysql.com
|
||||
monty@donna.mysql.fi
|
||||
|
|
|
@ -487,7 +487,7 @@ MySQL Table Types
|
|||
* ISAM:: ISAM tables
|
||||
* HEAP:: HEAP tables
|
||||
* BDB:: BDB or Berkeley_db tables
|
||||
* INNOBASE::
|
||||
* INNOBASE:: Innobase tables
|
||||
|
||||
MyISAM Tables
|
||||
|
||||
|
@ -2062,7 +2062,6 @@ report about lost data because of bugs in @strong{MySQL}.
|
|||
@cindex retrieving, data
|
||||
@cindex data, ISAM table handler
|
||||
|
||||
|
||||
@item The MyISAM table handler --- Gamma
|
||||
This is new in @strong{MySQL} Version 3.23. It's largely based on the ISAM
|
||||
table code but has a lot of new and very useful features.
|
||||
|
@ -2154,6 +2153,9 @@ The Berkeley DB code is very stable, but we are still improving the interface
|
|||
between @strong{MySQL} and BDB tables, so it will take some time before this
|
||||
is as tested as the other table types.
|
||||
|
||||
@item Innobase Tables -- Alpha
|
||||
This is a very recent addition to @code{MySQL} and are not very tested yet.
|
||||
|
||||
@item Automatic recovery of MyISAM tables - Beta.
|
||||
This only affects the new code that checks if the table was closed properly
|
||||
on open and executes an automatic check/repair of the table if it wasn't.
|
||||
|
@ -12241,7 +12243,7 @@ connections:
|
|||
@item @code{'x.y.%'} @tab @code{'fred'} @tab @code{fred}, connecting from @code{x.y.net}, @code{x.y.com},@code{x.y.edu}, etc. (this is probably not useful)
|
||||
@item @code{'144.155.166.177'} @tab @code{'fred'} @tab @code{fred}, connecting from the host with IP address @code{144.155.166.177}
|
||||
@item @code{'144.155.166.%'} @tab @code{'fred'} @tab @code{fred}, connecting from any host in the @code{144.155.166} class C subnet
|
||||
@item @code{'144.155.166.0/24'} @tab @code{'fred'} @tab Same as previous example
|
||||
@item @code{'144.155.166.0/255.255.255.0'} @tab @code{'fred'} @tab Same as previous example
|
||||
@end multitable
|
||||
|
||||
Because you can use IP wild-card values in the @code{Host} field (for example,
|
||||
|
@ -18322,12 +18324,12 @@ If you specify a @code{SELECT} after the @code{CREATE STATEMENT},
|
|||
@example
|
||||
mysql> CREATE TABLE test (a int not null auto_increment,
|
||||
primary key (a), key(b))
|
||||
TYPE=HEAP SELECT b,c from test2;
|
||||
TYPE=MyISAM SELECT b,c from test2;
|
||||
@end example
|
||||
|
||||
This will create a @code{HEAP} table with 3 columns. Note that the table will
|
||||
automatically be deleted if any errors occur while copying data
|
||||
into the table.
|
||||
This will create a @code{MyISAM} table with 3 columns. Note that the
|
||||
table will automatically be deleted if any errors occur while copying
|
||||
data into the table.
|
||||
@item
|
||||
The @code{RAID_TYPE} option will help you to break the 2G/4G limit for
|
||||
the MyISAM data file (not the index file) on
|
||||
|
@ -23504,6 +23506,60 @@ Innobase cannot notice. In cases like this the timeout is useful to
|
|||
resolve the situation.
|
||||
@end multitable
|
||||
|
||||
You can query the amount of free space in the Innobase tablespace (=
|
||||
data files you specified in my.cnf) by issuing the table status command
|
||||
of @strong{MySQL} for any table you have created with @code{TYPE =
|
||||
INNOBASE}. Then the amount of free space in the tablespace appears in
|
||||
the table comment section in the output of SHOW. An example:
|
||||
|
||||
@example
|
||||
SHOW TABLE STATUS FROM TEST LIKE 'CUSTOMER'
|
||||
@end example
|
||||
|
||||
if you have created a table of name CUSTOMER in a database you have named
|
||||
TEST. Note that the statistics SHOW gives about Innobase tables
|
||||
are only approximate: they are used in SQL optimization. Table and
|
||||
index reserved sizes in bytes are accurate, though.
|
||||
|
||||
Note that in addition to your tables, the rollback segment uses space
|
||||
from the tablespace.
|
||||
|
||||
Since Innobase is a multiversioned database, it must keep information
|
||||
of old versions of rows in the tablespace. This information is stored
|
||||
in a data structure called a rollback segment, like in Oracle. In contrast
|
||||
to Oracle, you do not need to configure the rollback segment in any way in
|
||||
Innobase. If you issue SELECTs, which by default do a consistent read in
|
||||
Innobase, remember to commit your transaction regularly. Otherwise
|
||||
the rollback segment will grow because it has to preserve the information
|
||||
needed for further consistent reads in your transaction: in Innobase
|
||||
all consistent reads within one transaction will see the same timepoint
|
||||
snapshot of the database: the reads are also 'consistent' with
|
||||
respect to each other.
|
||||
|
||||
Some Innobase errors: If you run out of file space in the tablespace,
|
||||
you will get the MySQL 'Table is full' error. If you want to make your
|
||||
tablespace bigger, you have to shut down MySQL and add a new datafile
|
||||
specification to my.conf, to the innobase_data_file_path parameter.
|
||||
|
||||
A transaction deadlock or a timeout in a lock wait will give 'Table handler
|
||||
error 1000000'.
|
||||
|
||||
Contact information of Innobase Oy, producer of the Innobase engine:
|
||||
|
||||
Website: Being registered, probably @uref{http://www.innobase.fi}.
|
||||
This should open about March 3rd, 2001.
|
||||
|
||||
@email{Heikki.Tuuri@@innobase.inet.fi}
|
||||
@example
|
||||
phone: 358-9-6969 3250 (office) 358-40-5617367 (mobile)
|
||||
Innobase Oy Inc.
|
||||
World Trade Center Helsinki
|
||||
Aleksanterinkatu 17
|
||||
P.O.Box 800
|
||||
00101 Helsinki
|
||||
Finland
|
||||
@end example
|
||||
|
||||
@cindex tutorial
|
||||
@cindex terminal monitor, defined
|
||||
@cindex monitor, terminal
|
||||
|
@ -40853,6 +40909,10 @@ This converter can't handle MEMO fields.
|
|||
Convert between FoxPro @file{.dbf} files and @strong{MySQL} tables on Windows.
|
||||
By Alexander Eltsyn, @email{ae@@nica.ru} or @email{ae@@usa.net}.
|
||||
|
||||
@item @uref{http://www.mysql.com/Downloads/Contrib/dbf2sql.zip, dbf2sql.zip}
|
||||
Short and simple prg that can help you transport your data from foxpro
|
||||
table into @strong{MySQL} table. By Danko Josic.
|
||||
|
||||
@item @uref{http://www.mysql.com/Downloads/Contrib/dump2h-1.20.gz, dump2h-1.20.gz}
|
||||
Convert from @code{mysqldump} output to a C header file. By Harry Brueckner,
|
||||
@email{brueckner@@mail.respublica.de}.
|
||||
|
@ -41594,6 +41654,8 @@ not yet 100 % confident in this code.
|
|||
@appendixsubsec Changes in release 3.23.34
|
||||
@itemize @bullet
|
||||
@item
|
||||
Fixed bug in @code{ALTER TABLE ... ORDER BY}.
|
||||
@item
|
||||
Added option @code{max_user_connections} to @code{mysqld}.
|
||||
@item
|
||||
Limit query length for replication by max_allowed_packet, not the arbitrary
|
||||
|
|
|
@ -441,6 +441,7 @@ int safe_cond_timedwait(pthread_cond_t *cond, safe_mutex_t *mp,
|
|||
#define pthread_mutex_destroy(A) safe_mutex_destroy((A),__FILE__,__LINE__)
|
||||
#define pthread_cond_wait(A,B) safe_cond_wait((A),(B),__FILE__,__LINE__)
|
||||
#define pthread_cond_timedwait(A,B,C) safe_cond_timedwait((A),(B),(C),__FILE__,__LINE__)
|
||||
#define pthread_mutex_trylock(A) pthread_mutex_lock(A)
|
||||
#define pthread_mutex_t safe_mutex_t
|
||||
#endif /* SAFE_MUTEX */
|
||||
|
||||
|
|
|
@ -24,3 +24,5 @@ SUBDIRS = os ut btr buf com data dict dyn eval fil fsp fut \
|
|||
ha ibuf lock log mach mem mtr odbc page pars que \
|
||||
read rem row srv sync thr trx usr
|
||||
|
||||
# Don't update the files from bitkeeper
|
||||
%::SCCS/s.%
|
||||
|
|
|
@ -22,4 +22,3 @@ libs_LIBRARIES = libbtr.a
|
|||
libbtr_a_SOURCES = btr0btr.c btr0cur.c btr0pcur.c btr0sea.c
|
||||
|
||||
EXTRA_PROGRAMS =
|
||||
|
||||
|
|
|
@ -22,4 +22,3 @@ libs_LIBRARIES = libdata.a
|
|||
libdata_a_SOURCES = data0data.c data0type.c
|
||||
|
||||
EXTRA_PROGRAMS =
|
||||
|
||||
|
|
|
@ -22,4 +22,3 @@ libs_LIBRARIES = libeval.a
|
|||
libeval_a_SOURCES = eval0eval.c eval0proc.c
|
||||
|
||||
EXTRA_PROGRAMS =
|
||||
|
||||
|
|
|
@ -3,3 +3,6 @@
|
|||
libsdir = ../libs
|
||||
|
||||
INCLUDES = -I../../include -I../include
|
||||
|
||||
# Don't update the files from bitkeeper
|
||||
%::SCCS/s.%
|
||||
|
|
|
@ -22,3 +22,6 @@ libs_LIBRARIES = libos.a
|
|||
libos_a_SOURCES = os0proc.c os0shm.c os0sync.c os0thread.c os0file.c
|
||||
|
||||
EXTRA_PROGRAMS =
|
||||
|
||||
# Don't update the files from bitkeeper
|
||||
%::SCCS/s.%
|
||||
|
|
|
@ -46,3 +46,24 @@ insert into t1 values(9),(3),(12),(10);
|
|||
alter table t1 order by n;
|
||||
select * from t1;
|
||||
drop table t1;
|
||||
|
||||
CREATE TABLE t1 (
|
||||
id int(11) unsigned NOT NULL default '0',
|
||||
category_id tinyint(4) unsigned NOT NULL default '0',
|
||||
type_id tinyint(4) unsigned NOT NULL default '0',
|
||||
body text NOT NULL,
|
||||
user_id int(11) unsigned NOT NULL default '0',
|
||||
status enum('new','old') NOT NULL default 'new',
|
||||
PRIMARY KEY (id)
|
||||
) TYPE=MyISAM;
|
||||
|
||||
ALTER TABLE
|
||||
t1
|
||||
ORDER BY
|
||||
t1.id,
|
||||
t1.status,
|
||||
t1.type_id,
|
||||
t1.user_id,
|
||||
t1.body;
|
||||
|
||||
drop table t1;
|
||||
|
|
|
@ -65,6 +65,8 @@ int my_error(int nr,myf MyFlags, ...)
|
|||
/* Skipp if max size is used (to be compatible with printf) */
|
||||
while (isdigit(*tpos) || *tpos == '.' || *tpos == '-')
|
||||
tpos++;
|
||||
if (*tpos == 'l') /* Skipp 'l' argument */
|
||||
*tpos++;
|
||||
if (*tpos == 's') /* String parameter */
|
||||
{
|
||||
par = va_arg(ap, char *);
|
||||
|
|
|
@ -44,13 +44,13 @@ int my_vsnprintf(char *to, size_t n, const char* fmt, va_list ap)
|
|||
fmt++;
|
||||
while (isdigit(*fmt) || *fmt == '.' || *fmt == '-')
|
||||
fmt++;
|
||||
if(*fmt == 'l')
|
||||
if (*fmt == 'l')
|
||||
fmt++;
|
||||
if (*fmt == 's') /* String parameter */
|
||||
{
|
||||
reg2 char *par = va_arg(ap, char *);
|
||||
uint plen;
|
||||
if(!par) par = (char*)"(null)";
|
||||
if (!par) par = (char*)"(null)";
|
||||
plen = (uint) strlen(par);
|
||||
if ((uint) (end-to) > plen) /* Replace if possible */
|
||||
{
|
||||
|
|
|
@ -418,7 +418,7 @@ innobase_init(void)
|
|||
int err;
|
||||
bool ret;
|
||||
ibool test_bool;
|
||||
static char *current_dir[3];
|
||||
static char current_dir[3];
|
||||
DBUG_ENTER("innobase_init");
|
||||
|
||||
/* Use current_dir if no paths are set */
|
||||
|
@ -431,13 +431,14 @@ innobase_init(void)
|
|||
|
||||
if (!innobase_data_file_path)
|
||||
{
|
||||
fprintf(stderr,"Can't initialize innobase as 'innobase_data_file_path' is not set\n");
|
||||
DBUG_RETURN(TRUE);
|
||||
fprintf(stderr,"Can't initialize Innobase as 'innobase_data_file_path' is not set\n");
|
||||
innobase_skip=1;
|
||||
DBUG_RETURN(FALSE); // Continue without innobase
|
||||
}
|
||||
|
||||
srv_data_home = (innobase_data_home_dir ? innobase_data_home_dir :
|
||||
current_dir);
|
||||
srv_logs_home = "";
|
||||
srv_logs_home = (char*) "";
|
||||
srv_arch_dir = (innobase_log_arch_dir ? innobase_log_arch_dir :
|
||||
current_dir);
|
||||
|
||||
|
@ -2167,8 +2168,9 @@ create_clustered_index_when_no_primary(
|
|||
/* The first '0' below specifies that everything in Innobase is
|
||||
currently created in file space 0 */
|
||||
|
||||
index = dict_mem_index_create((char*) table_name, "GEN_CLUST_INDEX",
|
||||
0, DICT_CLUSTERED, 0);
|
||||
index = dict_mem_index_create((char*) table_name,
|
||||
(char*) "GEN_CLUST_INDEX",
|
||||
0, DICT_CLUSTERED, 0);
|
||||
error = row_create_index_for_mysql(index, trx);
|
||||
|
||||
error = convert_error_code_to_mysql(error);
|
||||
|
@ -2208,7 +2210,7 @@ ha_innobase::create(
|
|||
|
||||
/* Create the table definition in Innobase */
|
||||
|
||||
if (error = create_table_def(trx, form, norm_name)) {
|
||||
if ((error = create_table_def(trx, form, norm_name))) {
|
||||
|
||||
trx_commit_for_mysql(trx);
|
||||
|
||||
|
@ -2248,8 +2250,8 @@ ha_innobase::create(
|
|||
if (primary_key_no != -1) {
|
||||
/* In Innobase the clustered index must always be created
|
||||
first */
|
||||
if (error = create_index(trx, form, norm_name,
|
||||
(uint) primary_key_no)) {
|
||||
if ((error = create_index(trx, form, norm_name,
|
||||
(uint) primary_key_no))) {
|
||||
trx_commit_for_mysql(trx);
|
||||
|
||||
trx_free_for_mysql(trx);
|
||||
|
@ -2262,7 +2264,7 @@ ha_innobase::create(
|
|||
|
||||
if (i != (uint) primary_key_no) {
|
||||
|
||||
if (error = create_index(trx, form, norm_name, i)) {
|
||||
if ((error = create_index(trx, form, norm_name, i))) {
|
||||
|
||||
trx_commit_for_mysql(trx);
|
||||
|
||||
|
@ -2564,7 +2566,8 @@ ha_innobase::update_table_comment(
|
|||
if (!str)
|
||||
return (char*)comment;
|
||||
|
||||
sprintf(str,"%s Innobase free: %lu kB", comment,innobase_get_free_space());
|
||||
sprintf(str,"%s Innobase free: %lu kB", comment,
|
||||
(ulong) innobase_get_free_space());
|
||||
|
||||
return((char*) str);
|
||||
}
|
||||
|
|
|
@ -53,10 +53,10 @@ class ha_innobase: public handler
|
|||
'ref' buffer of the handle, if any */
|
||||
ulong int_option_flag;
|
||||
uint primary_key;
|
||||
uint last_dup_key;
|
||||
ulong start_of_scan; /* this is set to 1 when we are
|
||||
starting a table scan but have not
|
||||
yet fetched any row, else 0 */
|
||||
uint last_dup_key;
|
||||
|
||||
uint last_match_mode;/* match mode of the latest search:
|
||||
ROW_SEL_EXACT, ROW_SEL_EXACT_PREFIX,
|
||||
|
|
|
@ -138,9 +138,8 @@ int ha_init()
|
|||
#ifdef HAVE_INNOBASE_DB
|
||||
if (!innobase_skip)
|
||||
{
|
||||
int error;
|
||||
if ((error=innobase_init()))
|
||||
return error;
|
||||
if (innobase_init())
|
||||
return -1;
|
||||
}
|
||||
#endif
|
||||
return 0;
|
||||
|
|
|
@ -75,9 +75,8 @@ static void free_var(user_var_entry *entry)
|
|||
****************************************************************************/
|
||||
|
||||
THD::THD():user_time(0),fatal_error(0),last_insert_id_used(0),
|
||||
insert_id_used(0),
|
||||
bootstrap(0),in_lock_tables(0),
|
||||
global_read_lock(0)
|
||||
insert_id_used(0),in_lock_tables(0),
|
||||
global_read_lock(0),bootstrap(0)
|
||||
{
|
||||
proc_info="login";
|
||||
host=user=priv_user=db=query=ip=0;
|
||||
|
|
|
@ -214,7 +214,7 @@ static int check_for_max_user_connections(const char *user, int u_length,
|
|||
(byte*) temp_user, temp_len);
|
||||
if (uc) /* user found ; check for no. of connections */
|
||||
{
|
||||
if (max_user_connections == uc->connections)
|
||||
if ((uint) max_user_connections == uc->connections)
|
||||
{
|
||||
net_printf(&(current_thd->net),ER_TOO_MANY_USER_CONNECTIONS, temp_user);
|
||||
pthread_mutex_unlock(&LOCK_user_conn);
|
||||
|
|
|
@ -1644,11 +1644,14 @@ copy_data_between_tables(TABLE *from,TABLE *to,
|
|||
|
||||
found_count=delete_count=0;
|
||||
|
||||
if(order) {
|
||||
if (order)
|
||||
{
|
||||
from->io_cache=(IO_CACHE*) my_malloc(sizeof(IO_CACHE),
|
||||
MYF(MY_FAE | MY_ZEROFILL));
|
||||
bzero((char*) &tables,sizeof(tables));
|
||||
tables.table = from;
|
||||
tables.name = tables.real_name= from->real_name;
|
||||
tables.db = from->table_cache_key;
|
||||
error=1;
|
||||
|
||||
if (setup_order(thd, &tables, fields, all_fields, order) ||
|
||||
|
|
Loading…
Reference in a new issue